summaryrefslogtreecommitdiffstats
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--library/alloc/benches/lib.rs1
-rw-r--r--library/alloc/src/alloc.rs18
-rw-r--r--library/alloc/src/alloc/tests.rs2
-rw-r--r--library/alloc/src/borrow.rs1
-rw-r--r--library/alloc/src/boxed.rs350
-rw-r--r--library/alloc/src/boxed/thin.rs8
-rw-r--r--library/alloc/src/collections/binary_heap.rs3
-rw-r--r--library/alloc/src/collections/btree/dedup_sorted_iter.rs4
-rw-r--r--library/alloc/src/collections/btree/map.rs41
-rw-r--r--library/alloc/src/collections/btree/map/entry.rs10
-rw-r--r--library/alloc/src/collections/btree/node.rs22
-rw-r--r--library/alloc/src/collections/btree/set.rs26
-rw-r--r--library/alloc/src/collections/linked_list.rs4
-rw-r--r--library/alloc/src/collections/mod.rs3
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs44
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs24
-rw-r--r--library/alloc/src/ffi/c_str.rs29
-rw-r--r--library/alloc/src/fmt.rs2
-rw-r--r--library/alloc/src/lib.rs30
-rw-r--r--library/alloc/src/macros.rs2
-rw-r--r--library/alloc/src/raw_vec.rs12
-rw-r--r--library/alloc/src/rc.rs14
-rw-r--r--library/alloc/src/slice.rs94
-rw-r--r--library/alloc/src/str.rs24
-rw-r--r--library/alloc/src/string.rs67
-rw-r--r--library/alloc/src/sync.rs81
-rw-r--r--library/alloc/src/sync/tests.rs19
-rw-r--r--library/alloc/src/task.rs6
-rw-r--r--library/alloc/src/vec/drain.rs75
-rw-r--r--library/alloc/src/vec/drain_filter.rs60
-rw-r--r--library/alloc/src/vec/in_place_collect.rs20
-rw-r--r--library/alloc/src/vec/in_place_drop.rs15
-rw-r--r--library/alloc/src/vec/into_iter.rs48
-rw-r--r--library/alloc/src/vec/is_zero.rs38
-rw-r--r--library/alloc/src/vec/mod.rs171
-rw-r--r--library/alloc/src/vec/spec_extend.rs2
-rw-r--r--library/alloc/tests/autotraits.rs293
-rw-r--r--library/alloc/tests/lib.rs9
-rw-r--r--library/alloc/tests/str.rs10
-rw-r--r--library/alloc/tests/string.rs126
-rw-r--r--library/alloc/tests/thin_box.rs8
-rw-r--r--library/alloc/tests/vec.rs341
-rw-r--r--library/alloc/tests/vec_deque.rs161
-rw-r--r--library/backtrace/.github/workflows/main.yml4
-rw-r--r--library/backtrace/Cargo.toml4
-rw-r--r--library/backtrace/src/backtrace/miri.rs6
-rw-r--r--library/core/benches/iter.rs27
-rw-r--r--library/core/benches/lib.rs1
-rw-r--r--library/core/benches/num/int_log/mod.rs6
-rw-r--r--library/core/src/alloc/global.rs2
-rw-r--r--library/core/src/alloc/layout.rs114
-rw-r--r--library/core/src/alloc/mod.rs9
-rw-r--r--library/core/src/any.rs244
-rw-r--r--library/core/src/array/equality.rs7
-rw-r--r--library/core/src/array/iter.rs59
-rw-r--r--library/core/src/array/mod.rs68
-rw-r--r--library/core/src/bool.rs29
-rw-r--r--library/core/src/borrow.rs4
-rw-r--r--library/core/src/cell.rs74
-rw-r--r--library/core/src/char/decode.rs9
-rw-r--r--library/core/src/char/methods.rs62
-rw-r--r--library/core/src/char/mod.rs8
-rw-r--r--library/core/src/cmp.rs125
-rw-r--r--library/core/src/const_closure.rs77
-rw-r--r--library/core/src/convert/mod.rs224
-rw-r--r--library/core/src/default.rs3
-rw-r--r--library/core/src/error.md137
-rw-r--r--library/core/src/error.rs508
-rw-r--r--library/core/src/ffi/c_double.md4
-rw-r--r--library/core/src/ffi/c_float.md4
-rw-r--r--library/core/src/ffi/c_str.rs105
-rw-r--r--library/core/src/fmt/builders.rs18
-rw-r--r--library/core/src/fmt/mod.rs18
-rw-r--r--library/core/src/fmt/num.rs92
-rw-r--r--library/core/src/future/poll_fn.rs11
-rw-r--r--library/core/src/future/ready.rs24
-rw-r--r--library/core/src/hash/mod.rs2
-rw-r--r--library/core/src/hint.rs28
-rw-r--r--library/core/src/intrinsics.rs530
-rw-r--r--library/core/src/iter/adapters/array_chunks.rs170
-rw-r--r--library/core/src/iter/adapters/by_ref_sized.rs42
-rw-r--r--library/core/src/iter/adapters/copied.rs74
-rw-r--r--library/core/src/iter/adapters/flatten.rs375
-rw-r--r--library/core/src/iter/adapters/map_while.rs14
-rw-r--r--library/core/src/iter/adapters/mod.rs14
-rw-r--r--library/core/src/iter/adapters/scan.rs14
-rw-r--r--library/core/src/iter/adapters/skip.rs39
-rw-r--r--library/core/src/iter/adapters/take.rs14
-rw-r--r--library/core/src/iter/adapters/take_while.rs14
-rw-r--r--library/core/src/iter/mod.rs25
-rw-r--r--library/core/src/iter/range.rs28
-rw-r--r--library/core/src/iter/traits/collect.rs3
-rw-r--r--library/core/src/iter/traits/iterator.rs215
-rw-r--r--library/core/src/lazy.rs1
-rw-r--r--library/core/src/lib.rs23
-rw-r--r--library/core/src/macros/mod.rs53
-rw-r--r--library/core/src/marker.rs134
-rw-r--r--library/core/src/mem/maybe_uninit.rs54
-rw-r--r--library/core/src/mem/mod.rs95
-rw-r--r--library/core/src/mem/transmutability.rs88
-rw-r--r--library/core/src/mem/valid_align.rs247
-rw-r--r--library/core/src/num/bignum.rs2
-rw-r--r--library/core/src/num/dec2flt/decimal.rs2
-rw-r--r--library/core/src/num/dec2flt/lemire.rs4
-rw-r--r--library/core/src/num/error.rs17
-rw-r--r--library/core/src/num/f32.rs153
-rw-r--r--library/core/src/num/f64.rs153
-rw-r--r--library/core/src/num/flt2dec/strategy/grisu.rs4
-rw-r--r--library/core/src/num/int_log10.rs2
-rw-r--r--library/core/src/num/int_macros.rs212
-rw-r--r--library/core/src/num/mod.rs76
-rw-r--r--library/core/src/num/nonzero.rs203
-rw-r--r--library/core/src/num/uint_macros.rs189
-rw-r--r--library/core/src/ops/arith.rs50
-rw-r--r--library/core/src/ops/bit.rs11
-rw-r--r--library/core/src/ops/control_flow.rs9
-rw-r--r--library/core/src/ops/deref.rs2
-rw-r--r--library/core/src/ops/drop.rs3
-rw-r--r--library/core/src/ops/function.rs28
-rw-r--r--library/core/src/ops/generator.rs1
-rw-r--r--library/core/src/ops/index.rs2
-rw-r--r--library/core/src/ops/index_range.rs171
-rw-r--r--library/core/src/ops/mod.rs3
-rw-r--r--library/core/src/ops/range.rs2
-rw-r--r--library/core/src/ops/try_trait.rs36
-rw-r--r--library/core/src/option.rs52
-rw-r--r--library/core/src/panic/location.rs9
-rw-r--r--library/core/src/panicking.rs130
-rw-r--r--library/core/src/primitive_docs.rs95
-rw-r--r--library/core/src/ptr/alignment.rs326
-rw-r--r--library/core/src/ptr/const_ptr.rs111
-rw-r--r--library/core/src/ptr/metadata.rs32
-rw-r--r--library/core/src/ptr/mod.rs130
-rw-r--r--library/core/src/ptr/mut_ptr.rs105
-rw-r--r--library/core/src/ptr/non_null.rs6
-rw-r--r--library/core/src/result.rs73
-rw-r--r--library/core/src/slice/ascii.rs2
-rw-r--r--library/core/src/slice/index.rs112
-rw-r--r--library/core/src/slice/iter.rs25
-rw-r--r--library/core/src/slice/iter/macros.rs26
-rw-r--r--library/core/src/slice/memchr.rs41
-rw-r--r--library/core/src/slice/mod.rs177
-rw-r--r--library/core/src/slice/raw.rs38
-rw-r--r--library/core/src/slice/rotate.rs4
-rw-r--r--library/core/src/slice/sort.rs42
-rw-r--r--library/core/src/str/error.rs17
-rw-r--r--library/core/src/str/lossy.rs244
-rw-r--r--library/core/src/str/mod.rs13
-rw-r--r--library/core/src/str/pattern.rs2
-rw-r--r--library/core/src/str/traits.rs18
-rw-r--r--library/core/src/str/validations.rs4
-rw-r--r--library/core/src/sync/atomic.rs134
-rw-r--r--library/core/src/sync/exclusive.rs9
-rw-r--r--library/core/src/task/wake.rs59
-rw-r--r--library/core/src/time.rs198
-rw-r--r--library/core/src/tuple.rs5
-rw-r--r--library/core/src/unicode/printable.rs131
-rw-r--r--library/core/src/unicode/unicode_data.rs293
-rw-r--r--library/core/tests/alloc.rs44
-rw-r--r--library/core/tests/any.rs2
-rw-r--r--library/core/tests/ascii.rs18
-rw-r--r--library/core/tests/atomic.rs2
-rw-r--r--library/core/tests/const_ptr.rs6
-rw-r--r--library/core/tests/iter/adapters/array_chunks.rs179
-rw-r--r--library/core/tests/iter/adapters/by_ref_sized.rs20
-rw-r--r--library/core/tests/iter/adapters/flatten.rs42
-rw-r--r--library/core/tests/iter/adapters/mod.rs24
-rw-r--r--library/core/tests/iter/adapters/skip.rs31
-rw-r--r--library/core/tests/lib.rs15
-rw-r--r--library/core/tests/mem.rs12
-rw-r--r--library/core/tests/num/int_log.rs186
-rw-r--r--library/core/tests/num/int_macros.rs26
-rw-r--r--library/core/tests/num/mod.rs2
-rw-r--r--library/core/tests/num/uint_macros.rs22
-rw-r--r--library/core/tests/num/wrapping.rs2
-rw-r--r--library/core/tests/option.rs1
-rw-r--r--library/core/tests/panic.rs1
-rw-r--r--library/core/tests/panic/location.rs31
-rw-r--r--library/core/tests/ptr.rs2
-rw-r--r--library/core/tests/result.rs9
-rw-r--r--library/core/tests/slice.rs4
-rw-r--r--library/core/tests/str_lossy.rs138
-rw-r--r--library/core/tests/task.rs17
-rw-r--r--library/core/tests/time.rs32
-rw-r--r--library/panic_abort/src/android.rs2
-rw-r--r--library/panic_abort/src/lib.rs33
-rw-r--r--library/panic_unwind/src/emcc.rs56
-rw-r--r--library/panic_unwind/src/gcc.rs292
-rw-r--r--library/panic_unwind/src/lib.rs8
-rw-r--r--library/panic_unwind/src/seh.rs45
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs2
-rw-r--r--library/portable-simd/crates/std_float/src/lib.rs6
-rw-r--r--library/proc_macro/src/bridge/client.rs6
-rw-r--r--library/proc_macro/src/bridge/mod.rs29
-rw-r--r--library/proc_macro/src/bridge/server.rs39
-rw-r--r--library/proc_macro/src/diagnostic.rs21
-rw-r--r--library/proc_macro/src/lib.rs11
-rw-r--r--library/rtstartup/rsbegin.rs19
-rw-r--r--library/std/Cargo.toml8
-rw-r--r--library/std/src/alloc.rs5
-rw-r--r--library/std/src/backtrace.rs33
-rw-r--r--library/std/src/collections/hash/map.rs14
-rw-r--r--library/std/src/collections/hash/map/tests.rs30
-rw-r--r--library/std/src/collections/hash/set.rs7
-rw-r--r--library/std/src/collections/hash/set/tests.rs6
-rw-r--r--library/std/src/env.rs2
-rw-r--r--library/std/src/error.rs1196
-rw-r--r--library/std/src/f32.rs2
-rw-r--r--library/std/src/f32/tests.rs78
-rw-r--r--library/std/src/f64.rs2
-rw-r--r--library/std/src/f64/tests.rs76
-rw-r--r--library/std/src/ffi/os_str.rs3
-rw-r--r--library/std/src/fs.rs108
-rw-r--r--library/std/src/io/buffered/bufreader.rs22
-rw-r--r--library/std/src/io/buffered/bufreader/buffer.rs27
-rw-r--r--library/std/src/io/buffered/tests.rs58
-rw-r--r--library/std/src/io/copy.rs34
-rw-r--r--library/std/src/io/cursor.rs10
-rw-r--r--library/std/src/io/error.rs16
-rw-r--r--library/std/src/io/error/repr_bitpacked.rs4
-rw-r--r--library/std/src/io/error/tests.rs2
-rw-r--r--library/std/src/io/impls.rs22
-rw-r--r--library/std/src/io/mod.rs100
-rw-r--r--library/std/src/io/readbuf.rs312
-rw-r--r--library/std/src/io/readbuf/tests.rs220
-rw-r--r--library/std/src/io/stdio.rs108
-rw-r--r--library/std/src/io/tests.rs23
-rw-r--r--library/std/src/io/util.rs14
-rw-r--r--library/std/src/io/util/tests.rs48
-rw-r--r--library/std/src/keyword_docs.rs157
-rw-r--r--library/std/src/lazy.rs1
-rw-r--r--library/std/src/lib.rs48
-rw-r--r--library/std/src/macros.rs36
-rw-r--r--library/std/src/net/addr.rs988
-rw-r--r--library/std/src/net/addr/tests.rs237
-rw-r--r--library/std/src/net/display_buffer.rs40
-rw-r--r--library/std/src/net/ip.rs2040
-rw-r--r--library/std/src/net/ip/tests.rs969
-rw-r--r--library/std/src/net/ip_addr.rs2095
-rw-r--r--library/std/src/net/ip_addr/tests.rs1039
-rw-r--r--library/std/src/net/mod.rs13
-rw-r--r--library/std/src/net/parser.rs138
-rw-r--r--library/std/src/net/socket_addr.rs974
-rw-r--r--library/std/src/net/socket_addr/tests.rs306
-rw-r--r--library/std/src/os/android/mod.rs1
-rw-r--r--library/std/src/os/android/net.rs4
-rw-r--r--library/std/src/os/fd/mod.rs13
-rw-r--r--library/std/src/os/fd/owned.rs78
-rw-r--r--library/std/src/os/fd/raw.rs26
-rw-r--r--library/std/src/os/fortanix_sgx/mod.rs5
-rw-r--r--library/std/src/os/linux/mod.rs1
-rw-r--r--library/std/src/os/linux/net.rs4
-rw-r--r--library/std/src/os/mod.rs7
-rw-r--r--library/std/src/os/net/mod.rs7
-rw-r--r--library/std/src/os/net/tcp.rs70
-rw-r--r--library/std/src/os/net/tests.rs29
-rw-r--r--library/std/src/os/unix/io/fd.rs8
-rw-r--r--library/std/src/os/unix/io/mod.rs11
-rw-r--r--library/std/src/os/unix/io/raw.rs6
-rw-r--r--library/std/src/os/unix/io/tests.rs (renamed from library/std/src/os/unix/io/fd/tests.rs)0
-rw-r--r--library/std/src/os/unix/mod.rs2
-rw-r--r--library/std/src/os/unix/net/addr.rs18
-rw-r--r--library/std/src/os/unix/net/datagram.rs25
-rw-r--r--library/std/src/os/unix/net/listener.rs10
-rw-r--r--library/std/src/os/unix/net/stream.rs25
-rw-r--r--library/std/src/os/wasi/io/mod.rs12
-rw-r--r--library/std/src/os/watchos/fs.rs142
-rw-r--r--library/std/src/os/watchos/mod.rs6
-rw-r--r--library/std/src/os/watchos/raw.rs83
-rw-r--r--library/std/src/os/windows/io/handle.rs17
-rw-r--r--library/std/src/panic.rs33
-rw-r--r--library/std/src/panicking.rs160
-rw-r--r--library/std/src/path.rs4
-rw-r--r--library/std/src/path/tests.rs5
-rw-r--r--library/std/src/personality.rs46
-rw-r--r--library/std/src/personality/dwarf/eh.rs (renamed from library/panic_unwind/src/dwarf/eh.rs)9
-rw-r--r--library/std/src/personality/dwarf/mod.rs (renamed from library/panic_unwind/src/dwarf/mod.rs)0
-rw-r--r--library/std/src/personality/dwarf/tests.rs (renamed from library/panic_unwind/src/dwarf/tests.rs)0
-rw-r--r--library/std/src/personality/emcc.rs20
-rw-r--r--library/std/src/personality/gcc.rs279
-rw-r--r--library/std/src/primitive_docs.rs95
-rw-r--r--library/std/src/process.rs28
-rw-r--r--library/std/src/rt.rs28
-rw-r--r--library/std/src/sync/mpsc/mpsc_queue/tests.rs2
-rw-r--r--library/std/src/sync/mpsc/spsc_queue/tests.rs5
-rw-r--r--library/std/src/sync/mpsc/stream.rs2
-rw-r--r--library/std/src/sync/mpsc/sync_tests.rs21
-rw-r--r--library/std/src/sync/mpsc/tests.rs12
-rw-r--r--library/std/src/sync/mutex.rs1
-rw-r--r--library/std/src/sync/once.rs312
-rw-r--r--library/std/src/sync/once_lock.rs55
-rw-r--r--library/std/src/sync/rwlock.rs66
-rw-r--r--library/std/src/sync/rwlock/tests.rs2
-rw-r--r--library/std/src/sys/common/mod.rs4
-rw-r--r--library/std/src/sys/common/small_c_string.rs58
-rw-r--r--library/std/src/sys/common/tests.rs66
-rw-r--r--library/std/src/sys/hermit/args.rs74
-rw-r--r--library/std/src/sys/hermit/condvar.rs90
-rw-r--r--library/std/src/sys/hermit/fs.rs31
-rw-r--r--library/std/src/sys/hermit/futex.rs39
-rw-r--r--library/std/src/sys/hermit/mod.rs21
-rw-r--r--library/std/src/sys/hermit/mutex.rs216
-rw-r--r--library/std/src/sys/hermit/net.rs2
-rw-r--r--library/std/src/sys/hermit/rwlock.rs144
-rw-r--r--library/std/src/sys/itron/mutex.rs6
-rw-r--r--library/std/src/sys/mod.rs2
-rw-r--r--library/std/src/sys/sgx/abi/thread.rs8
-rw-r--r--library/std/src/sys/sgx/abi/tls/mod.rs1
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/alloc.rs183
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/mod.rs8
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/raw.rs24
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/tests.rs34
-rw-r--r--library/std/src/sys/sgx/mod.rs2
-rw-r--r--library/std/src/sys/sgx/mutex.rs3
-rw-r--r--library/std/src/sys/sgx/thread_local_key.rs5
-rw-r--r--library/std/src/sys/solid/fs.rs44
-rw-r--r--library/std/src/sys/solid/mod.rs2
-rw-r--r--library/std/src/sys/solid/os.rs44
-rw-r--r--library/std/src/sys/solid/thread_local_key.rs5
-rw-r--r--library/std/src/sys/unix/fd.rs11
-rw-r--r--library/std/src/sys/unix/fs.rs497
-rw-r--r--library/std/src/sys/unix/io.rs6
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs2
-rw-r--r--library/std/src/sys/unix/locks/fuchsia_mutex.rs5
-rw-r--r--library/std/src/sys/unix/locks/futex_mutex.rs5
-rw-r--r--library/std/src/sys/unix/locks/futex_rwlock.rs2
-rw-r--r--library/std/src/sys/unix/locks/mod.rs6
-rw-r--r--library/std/src/sys/unix/locks/pthread_condvar.rs2
-rw-r--r--library/std/src/sys/unix/locks/pthread_mutex.rs2
-rw-r--r--library/std/src/sys/unix/mod.rs76
-rw-r--r--library/std/src/sys/unix/net.rs22
-rw-r--r--library/std/src/sys/unix/os.rs59
-rw-r--r--library/std/src/sys/unix/os_str.rs40
-rw-r--r--library/std/src/sys/unix/os_str/tests.rs8
-rw-r--r--library/std/src/sys/unix/process/process_common.rs61
-rw-r--r--library/std/src/sys/unix/process/process_common/tests.rs103
-rw-r--r--library/std/src/sys/unix/process/process_fuchsia.rs2
-rw-r--r--library/std/src/sys/unix/process/process_unix.rs80
-rw-r--r--library/std/src/sys/unix/rand.rs18
-rw-r--r--library/std/src/sys/unix/stdio.rs50
-rw-r--r--library/std/src/sys/unix/thread.rs59
-rw-r--r--library/std/src/sys/unix/thread_local_dtor.rs1
-rw-r--r--library/std/src/sys/unix/thread_local_key.rs5
-rw-r--r--library/std/src/sys/unix/thread_parker.rs281
-rw-r--r--library/std/src/sys/unix/thread_parker/darwin.rs131
-rw-r--r--library/std/src/sys/unix/thread_parker/mod.rs32
-rw-r--r--library/std/src/sys/unix/thread_parker/netbsd.rs113
-rw-r--r--library/std/src/sys/unix/thread_parker/pthread.rs271
-rw-r--r--library/std/src/sys/unix/time.rs34
-rw-r--r--library/std/src/sys/unsupported/alloc.rs7
-rw-r--r--library/std/src/sys/unsupported/common.rs2
-rw-r--r--library/std/src/sys/unsupported/fs.rs4
-rw-r--r--library/std/src/sys/unsupported/io.rs4
-rw-r--r--library/std/src/sys/unsupported/locks/condvar.rs1
-rw-r--r--library/std/src/sys/unsupported/locks/mod.rs2
-rw-r--r--library/std/src/sys/unsupported/locks/mutex.rs4
-rw-r--r--library/std/src/sys/unsupported/locks/rwlock.rs1
-rw-r--r--library/std/src/sys/unsupported/process.rs3
-rw-r--r--library/std/src/sys/unsupported/thread_local_dtor.rs1
-rw-r--r--library/std/src/sys/unsupported/thread_local_key.rs5
-rw-r--r--library/std/src/sys/wasi/fs.rs111
-rw-r--r--library/std/src/sys/wasi/io.rs6
-rw-r--r--library/std/src/sys/wasi/mod.rs3
-rw-r--r--library/std/src/sys/wasi/os.rs81
-rw-r--r--library/std/src/sys/wasi/time.rs4
-rw-r--r--library/std/src/sys/wasm/mod.rs2
-rw-r--r--library/std/src/sys/windows/alloc.rs5
-rw-r--r--library/std/src/sys/windows/c.rs65
-rw-r--r--library/std/src/sys/windows/cmath.rs2
-rw-r--r--library/std/src/sys/windows/compat.rs232
-rw-r--r--library/std/src/sys/windows/fs.rs156
-rw-r--r--library/std/src/sys/windows/handle.rs12
-rw-r--r--library/std/src/sys/windows/io.rs76
-rw-r--r--library/std/src/sys/windows/locks/mod.rs2
-rw-r--r--library/std/src/sys/windows/locks/mutex.rs2
-rw-r--r--library/std/src/sys/windows/mod.rs28
-rw-r--r--library/std/src/sys/windows/os.rs6
-rw-r--r--library/std/src/sys/windows/os_str.rs4
-rw-r--r--library/std/src/sys/windows/path/tests.rs2
-rw-r--r--library/std/src/sys/windows/process.rs6
-rw-r--r--library/std/src/sys/windows/rand.rs117
-rw-r--r--library/std/src/sys/windows/stdio.rs41
-rw-r--r--library/std/src/sys/windows/thread_local_dtor.rs4
-rw-r--r--library/std/src/sys/windows/thread_local_key.rs196
-rw-r--r--library/std/src/sys/windows/thread_local_key/tests.rs53
-rw-r--r--library/std/src/sys/windows/thread_parker.rs22
-rw-r--r--library/std/src/sys_common/backtrace.rs9
-rw-r--r--library/std/src/sys_common/condvar.rs1
-rw-r--r--library/std/src/sys_common/condvar/check.rs1
-rw-r--r--library/std/src/sys_common/mod.rs10
-rw-r--r--library/std/src/sys_common/mutex.rs45
-rw-r--r--library/std/src/sys_common/net.rs28
-rw-r--r--library/std/src/sys_common/once/futex.rs134
-rw-r--r--library/std/src/sys_common/once/generic.rs282
-rw-r--r--library/std/src/sys_common/once/mod.rs43
-rw-r--r--library/std/src/sys_common/remutex.rs46
-rw-r--r--library/std/src/sys_common/remutex/tests.rs37
-rw-r--r--library/std/src/sys_common/rwlock.rs61
-rw-r--r--library/std/src/sys_common/thread_local_key.rs26
-rw-r--r--library/std/src/sys_common/thread_local_key/tests.rs9
-rw-r--r--library/std/src/sys_common/thread_parker/mod.rs1
-rw-r--r--library/std/src/sys_common/wtf8.rs95
-rw-r--r--library/std/src/sys_common/wtf8/tests.rs295
-rw-r--r--library/std/src/thread/local.rs9
-rw-r--r--library/std/src/thread/mod.rs151
-rw-r--r--library/std/src/thread/tests.rs72
-rw-r--r--library/std/src/time.rs6
-rw-r--r--library/std/src/time/tests.rs3
-rw-r--r--library/std/tests/run-time-detect.rs86
-rw-r--r--library/stdarch/CONTRIBUTING.md2
-rw-r--r--library/stdarch/ci/android-install-sdk.sh4
-rw-r--r--library/stdarch/ci/docker/aarch64-linux-android/Dockerfile11
-rw-r--r--library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile11
-rw-r--r--library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile2
-rw-r--r--library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/stdarch/ci/docker/x86_64-linux-android/Dockerfile4
-rwxr-xr-xlibrary/stdarch/ci/dox.sh9
-rw-r--r--library/stdarch/crates/core_arch/Cargo.toml1
-rw-r--r--library/stdarch/crates/core_arch/build.rs3
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/crc.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs2538
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs74
-rw-r--r--library/stdarch/crates/core_arch/src/arm/neon.rs21
-rw-r--r--library/stdarch/crates/core_arch/src/arm/v7.rs1
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/crc.rs12
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/crypto.rs28
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs4060
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs376
-rw-r--r--library/stdarch/crates/core_arch/src/lib.rs5
-rw-r--r--library/stdarch/crates/core_arch/src/macros.rs24
-rw-r--r--library/stdarch/crates/core_arch/src/mod.rs7
-rw-r--r--library/stdarch/crates/core_arch/src/powerpc/altivec.rs5
-rw-r--r--library/stdarch/crates/core_arch/src/riscv_shared/mod.rs83
-rw-r--r--library/stdarch/crates/core_arch/src/riscv_shared/p.rs1061
-rw-r--r--library/stdarch/crates/core_arch/src/simd_llvm.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx2.rs6
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512bw.rs16
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512gfni.rs6
-rw-r--r--library/stdarch/crates/core_arch/src/x86/cpuid.rs28
-rw-r--r--library/stdarch/crates/core_arch/src/x86/mod.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse.rs32
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse2.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse3.rs6
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs34
-rw-r--r--library/stdarch/crates/intrinsic-test/missing_aarch64.txt14
-rw-r--r--library/stdarch/crates/intrinsic-test/src/argument.rs107
-rw-r--r--library/stdarch/crates/intrinsic-test/src/intrinsic.rs65
-rw-r--r--library/stdarch/crates/intrinsic-test/src/main.rs54
-rw-r--r--library/stdarch/crates/intrinsic-test/src/types.rs44
-rw-r--r--library/stdarch/crates/intrinsic-test/src/values.rs9
-rw-r--r--library/stdarch/crates/std_detect/Cargo.toml2
-rw-r--r--library/stdarch/crates/std_detect/src/detect/arch/aarch64.rs3
-rw-r--r--library/stdarch/crates/std_detect/src/detect/macros.rs27
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs227
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs61
-rw-r--r--library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxvbin0 -> 336 bytes
-rw-r--r--library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxvbin0 -> 336 bytes
-rw-r--r--library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxvbin0 -> 336 bytes
-rw-r--r--library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxvbin0 -> 320 bytes
-rw-r--r--library/stdarch/crates/std_detect/src/detect/test_data/linux-x64-i7-6850k.auxvbin304 -> 0 bytes
-rw-r--r--library/stdarch/crates/stdarch-gen/neon.spec24
-rw-r--r--library/stdarch/crates/stdarch-gen/src/main.rs55
-rw-r--r--library/stdarch/crates/stdarch-test/Cargo.toml2
-rw-r--r--library/stdarch/crates/stdarch-test/src/lib.rs1
-rw-r--r--library/stdarch/examples/hex.rs27
-rw-r--r--library/test/src/bench.rs21
-rw-r--r--library/test/src/cli.rs4
-rw-r--r--library/test/src/console.rs2
-rw-r--r--library/test/src/helpers/isatty.rs32
-rw-r--r--library/test/src/helpers/mod.rs1
-rw-r--r--library/test/src/lib.rs105
-rw-r--r--library/test/src/stats.rs2
-rw-r--r--library/test/src/term.rs2
-rw-r--r--library/test/src/term/terminfo/mod.rs20
-rw-r--r--library/test/src/term/win.rs7
-rw-r--r--library/test/src/tests.rs117
-rw-r--r--library/test/src/types.rs15
-rw-r--r--library/unwind/build.rs15
-rw-r--r--library/unwind/src/lib.rs20
480 files changed, 27597 insertions, 13410 deletions
diff --git a/library/alloc/benches/lib.rs b/library/alloc/benches/lib.rs
index 72ac897d4..d418965cd 100644
--- a/library/alloc/benches/lib.rs
+++ b/library/alloc/benches/lib.rs
@@ -3,7 +3,6 @@
#![cfg(not(target_os = "android"))]
#![feature(btree_drain_filter)]
#![feature(iter_next_chunk)]
-#![feature(map_first_last)]
#![feature(repr_simd)]
#![feature(slice_partition_dedup)]
#![feature(test)]
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index cc8da7bcc..8187517cc 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -28,16 +28,20 @@ extern "Rust" {
// The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
#[rustc_allocator]
- #[rustc_allocator_nounwind]
+ #[cfg_attr(not(bootstrap), rustc_nounwind)]
+ #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
- #[cfg_attr(not(bootstrap), rustc_deallocator)]
- #[rustc_allocator_nounwind]
+ #[rustc_deallocator]
+ #[cfg_attr(not(bootstrap), rustc_nounwind)]
+ #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
- #[cfg_attr(not(bootstrap), rustc_reallocator)]
- #[rustc_allocator_nounwind]
+ #[rustc_reallocator]
+ #[cfg_attr(not(bootstrap), rustc_nounwind)]
+ #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
- #[cfg_attr(not(bootstrap), rustc_allocator_zeroed)]
- #[rustc_allocator_nounwind]
+ #[rustc_allocator_zeroed]
+ #[cfg_attr(not(bootstrap), rustc_nounwind)]
+ #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
}
diff --git a/library/alloc/src/alloc/tests.rs b/library/alloc/src/alloc/tests.rs
index 7d560964d..b2f019459 100644
--- a/library/alloc/src/alloc/tests.rs
+++ b/library/alloc/src/alloc/tests.rs
@@ -15,7 +15,7 @@ fn allocate_zeroed() {
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
- i = i.offset(1);
+ i = i.add(1);
}
Global.deallocate(ptr.as_non_null_ptr(), layout);
}
diff --git a/library/alloc/src/borrow.rs b/library/alloc/src/borrow.rs
index 904a53bb4..83a138559 100644
--- a/library/alloc/src/borrow.rs
+++ b/library/alloc/src/borrow.rs
@@ -21,7 +21,6 @@ use Cow::*;
impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
where
B: ToOwned,
- <B as ToOwned>::Owned: 'a,
{
fn borrow(&self) -> &B {
&**self
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index c1ceeb0de..d6681a317 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -1,4 +1,4 @@
-//! A pointer type for heap allocation.
+//! The `Box<T>` type for heap allocation.
//!
//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
@@ -151,6 +151,7 @@ use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
use core::convert::{From, TryFrom};
+use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
@@ -175,6 +176,8 @@ use crate::raw_vec::RawVec;
#[cfg(not(no_global_oom_handling))]
use crate::str::from_boxed_utf8_unchecked;
#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+#[cfg(not(no_global_oom_handling))]
use crate::vec::Vec;
#[unstable(feature = "thin_box", issue = "92791")]
@@ -1480,7 +1483,7 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
- /// and performs a copy of `slice`.
+ /// and performs a copy of `slice` and its contents.
///
/// # Examples
/// ```rust
@@ -1617,6 +1620,22 @@ impl<T, const N: usize> From<[T; N]> for Box<[T]> {
}
}
+/// Casts a boxed slice to a boxed array.
+///
+/// # Safety
+///
+/// `boxed_slice.len()` must be exactly `N`.
+unsafe fn boxed_slice_as_array_unchecked<T, A: Allocator, const N: usize>(
+ boxed_slice: Box<[T], A>,
+) -> Box<[T; N], A> {
+ debug_assert_eq!(boxed_slice.len(), N);
+
+ let (ptr, alloc) = Box::into_raw_with_allocator(boxed_slice);
+ // SAFETY: Pointer and allocator came from an existing box,
+ // and our safety condition requires that the length is exactly `N`
+ unsafe { Box::from_raw_in(ptr as *mut [T; N], alloc) }
+}
+
#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
type Error = Box<[T]>;
@@ -1632,13 +1651,46 @@ impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
/// `boxed_slice.len()` does not equal `N`.
fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
if boxed_slice.len() == N {
- Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) })
+ Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
} else {
Err(boxed_slice)
}
}
}
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "boxed_array_try_from_vec", since = "1.66.0")]
+impl<T, const N: usize> TryFrom<Vec<T>> for Box<[T; N]> {
+ type Error = Vec<T>;
+
+ /// Attempts to convert a `Vec<T>` into a `Box<[T; N]>`.
+ ///
+ /// Like [`Vec::into_boxed_slice`], this is in-place if `vec.capacity() == N`,
+ /// but will require a reallocation otherwise.
+ ///
+ /// # Errors
+ ///
+ /// Returns the original `Vec<T>` in the `Err` variant if
+ /// `boxed_slice.len()` does not equal `N`.
+ ///
+ /// # Examples
+ ///
+ /// This can be used with [`vec!`] to create an array on the heap:
+ ///
+ /// ```
+ /// let state: Box<[f32; 100]> = vec![1.0; 100].try_into().unwrap();
+ /// assert_eq!(state.len(), 100);
+ /// ```
+ fn try_from(vec: Vec<T>) -> Result<Self, Self::Error> {
+ if vec.len() == N {
+ let boxed_slice = vec.into_boxed_slice();
+ Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
+ } else {
+ Err(vec)
+ }
+ }
+}
+
impl<A: Allocator> Box<dyn Any, A> {
/// Attempt to downcast the box to a concrete type.
///
@@ -2032,8 +2084,7 @@ impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
* could have a method to project a Pin<T> from it.
*/
#[stable(feature = "pin", since = "1.33.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const Unpin for Box<T, A> where A: 'static {}
+impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
#[unstable(feature = "generator_trait", issue = "43122")]
impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
@@ -2085,3 +2136,292 @@ impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> {
(**self).size_hint()
}
}
+
+impl dyn Error {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> {
+ if self.is::<T>() {
+ unsafe {
+ let raw: *mut dyn Error = Box::into_raw(self);
+ Ok(Box::from_raw(raw as *mut T))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl dyn Error + Send {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send` marker.
+ mem::transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
+ })
+ }
+}
+
+impl dyn Error + Send + Sync {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send + Sync` marker.
+ mem::transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
+ })
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
+ /// Converts a type of [`Error`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error>::from(an_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + 'a> {
+ Box::new(err)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
+ /// dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// unsafe impl Send for AnError {}
+ ///
+ /// unsafe impl Sync for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
+ Box::new(err)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<String> for Box<dyn Error + Send + Sync> {
+ /// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: String) -> Box<dyn Error + Send + Sync> {
+ struct StringError(String);
+
+ impl Error for StringError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ &self.0
+ }
+ }
+
+ impl fmt::Display for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+ }
+
+ // Purposefully skip printing "StringError(..)"
+ impl fmt::Debug for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.0, f)
+ }
+ }
+
+ Box::new(StringError(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<String> for Box<dyn Error> {
+ /// Converts a [`String`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(str_err: String) -> Box<dyn Error> {
+ let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
+ let err2: Box<dyn Error> = err1;
+ err2
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<&str> for Box<dyn Error> {
+ /// Converts a [`str`] into a box of dyn [`Error`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: &str) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a> From<Cow<'a, str>> for Box<dyn Error> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'a, str>) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[stable(feature = "box_error", since = "1.8.0")]
+impl<T: core::error::Error> core::error::Error for Box<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ core::error::Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn core::error::Error> {
+ core::error::Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
+ core::error::Error::source(&**self)
+ }
+}
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
index 649ccfcaa..c477c4490 100644
--- a/library/alloc/src/boxed/thin.rs
+++ b/library/alloc/src/boxed/thin.rs
@@ -2,6 +2,7 @@
// https://github.com/matthieu-m/rfc2580/blob/b58d1d3cba0d4b5e859d3617ea2d0943aaa31329/examples/thin.rs
// by matthieu-m
use crate::alloc::{self, Layout, LayoutError};
+use core::error::Error;
use core::fmt::{self, Debug, Display, Formatter};
use core::marker::PhantomData;
#[cfg(not(no_global_oom_handling))]
@@ -271,3 +272,10 @@ impl<H> WithHeader<H> {
Layout::new::<H>().extend(value_layout)
}
}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized + Error> Error for ThinBox<T> {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ self.deref().source()
+ }
+}
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs
index 197e7aaac..4583bc9a1 100644
--- a/library/alloc/src/collections/binary_heap.rs
+++ b/library/alloc/src/collections/binary_heap.rs
@@ -1010,7 +1010,8 @@ impl<T> BinaryHeap<T> {
/// current length. The allocator may reserve more space to speculatively
/// avoid frequent allocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
diff --git a/library/alloc/src/collections/btree/dedup_sorted_iter.rs b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
index 60bf83b83..17ee78045 100644
--- a/library/alloc/src/collections/btree/dedup_sorted_iter.rs
+++ b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
@@ -3,7 +3,9 @@ use core::iter::Peekable;
/// A iterator for deduping the key of a sorted iterator.
/// When encountering the duplicated key, only the last key-value pair is yielded.
///
-/// Used by [`BTreeMap::bulk_build_from_sorted_iter`].
+/// Used by [`BTreeMap::bulk_build_from_sorted_iter`][1].
+///
+/// [1]: crate::collections::BTreeMap::bulk_build_from_sorted_iter
pub struct DedupSortedIter<K, V, I>
where
I: Iterator<Item = (K, V)>,
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index cacbd54b6..8a7719347 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -580,7 +580,7 @@ impl<K, V> BTreeMap<K, V> {
/// map.insert(1, "a");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_stable(feature = "const_btree_new", since = "1.66.0")]
#[must_use]
pub const fn new() -> BTreeMap<K, V> {
BTreeMap { root: None, length: 0, alloc: ManuallyDrop::new(Global), _marker: PhantomData }
@@ -703,7 +703,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Basic usage:
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -712,7 +711,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// map.insert(2, "a");
/// assert_eq!(map.first_key_value(), Some((&1, &"b")));
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn first_key_value(&self) -> Option<(&K, &V)>
where
K: Ord,
@@ -727,7 +726,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// # Examples
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -741,7 +739,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// assert_eq!(*map.get(&1).unwrap(), "first");
/// assert_eq!(*map.get(&2).unwrap(), "b");
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn first_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>>
where
K: Ord,
@@ -765,7 +763,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Draining elements in ascending order, while keeping a usable map each iteration.
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -776,7 +773,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// }
/// assert!(map.is_empty());
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_first(&mut self) -> Option<(K, V)>
where
K: Ord,
@@ -792,7 +789,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Basic usage:
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -800,7 +796,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// map.insert(2, "a");
/// assert_eq!(map.last_key_value(), Some((&2, &"a")));
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn last_key_value(&self) -> Option<(&K, &V)>
where
K: Ord,
@@ -815,7 +811,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// # Examples
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -829,7 +824,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// assert_eq!(*map.get(&1).unwrap(), "a");
/// assert_eq!(*map.get(&2).unwrap(), "last");
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn last_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>>
where
K: Ord,
@@ -853,7 +848,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Draining elements in descending order, while keeping a usable map each iteration.
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -864,7 +858,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// }
/// assert!(map.is_empty());
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_last(&mut self) -> Option<(K, V)>
where
K: Ord,
@@ -1099,6 +1093,9 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Moves all elements from `other` into `self`, leaving `other` empty.
///
+ /// If a key from `other` is already present in `self`, the respective
+ /// value from `self` will be overwritten with the respective value from `other`.
+ ///
/// # Examples
///
/// ```
@@ -1107,10 +1104,10 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// let mut a = BTreeMap::new();
/// a.insert(1, "a");
/// a.insert(2, "b");
- /// a.insert(3, "c");
+ /// a.insert(3, "c"); // Note: Key (3) also present in b.
///
/// let mut b = BTreeMap::new();
- /// b.insert(3, "d");
+ /// b.insert(3, "d"); // Note: Key (3) also present in a.
/// b.insert(4, "e");
/// b.insert(5, "f");
///
@@ -1121,7 +1118,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// assert_eq!(a[&1], "a");
/// assert_eq!(a[&2], "b");
- /// assert_eq!(a[&3], "d");
+ /// assert_eq!(a[&3], "d"); // Note: "c" has been overwritten.
/// assert_eq!(a[&4], "e");
/// assert_eq!(a[&5], "f");
/// ```
@@ -2392,7 +2389,11 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_unstable(
+ feature = "const_btree_len",
+ issue = "71835",
+ implied_by = "const_btree_new"
+ )]
pub const fn len(&self) -> usize {
self.length
}
@@ -2413,7 +2414,11 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_unstable(
+ feature = "const_btree_len",
+ issue = "71835",
+ implied_by = "const_btree_new"
+ )]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs
index b6eecf9b0..370b58864 100644
--- a/library/alloc/src/collections/btree/map/entry.rs
+++ b/library/alloc/src/collections/btree/map/entry.rs
@@ -133,6 +133,16 @@ impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display
}
}
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: core::fmt::Debug + Ord, V: core::fmt::Debug> core::error::Error
+ for crate::collections::btree_map::OccupiedError<'a, K, V>
+{
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "key already exists"
+ }
+}
+
impl<'a, K: Ord, V, A: Allocator + Clone> Entry<'a, K, V, A> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs
index d831161bc..da766b67a 100644
--- a/library/alloc/src/collections/btree/node.rs
+++ b/library/alloc/src/collections/btree/node.rs
@@ -206,9 +206,9 @@ impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> {
unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync for NodeRef<BorrowType, K, V, Type> {}
-unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send for NodeRef<marker::Immut<'a>, K, V, Type> {}
-unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::Mut<'a>, K, V, Type> {}
-unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::ValMut<'a>, K, V, Type> {}
+unsafe impl<K: Sync, V: Sync, Type> Send for NodeRef<marker::Immut<'_>, K, V, Type> {}
+unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Mut<'_>, K, V, Type> {}
+unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::ValMut<'_>, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Owned, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Dying, K, V, Type> {}
@@ -318,7 +318,7 @@ impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type>
pub fn ascend(
self,
) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
- assert!(BorrowType::PERMITS_TRAVERSAL);
+ let _ = BorrowType::TRAVERSAL_PERMIT;
// We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
// there might be outstanding mutable references to values that we must not invalidate.
let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
@@ -1003,7 +1003,7 @@ impl<BorrowType: marker::BorrowType, K, V>
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
- assert!(BorrowType::PERMITS_TRAVERSAL);
+ let _ = BorrowType::TRAVERSAL_PERMIT;
// We need to use raw pointers to nodes because, if BorrowType is
// marker::ValMut, there might be outstanding mutable references to
// values that we must not invalidate. There's no worry accessing the
@@ -1666,15 +1666,17 @@ pub mod marker {
pub struct ValMut<'a>(PhantomData<&'a mut ()>);
pub trait BorrowType {
- // Whether node references of this borrow type allow traversing
- // to other nodes in the tree.
- const PERMITS_TRAVERSAL: bool = true;
+ // If node references of this borrow type allow traversing to other
+ // nodes in the tree, this constant can be evaluated. Thus reading it
+ // serves as a compile-time assertion.
+ const TRAVERSAL_PERMIT: () = ();
}
impl BorrowType for Owned {
- // Traversal isn't needed, it happens using the result of `borrow_mut`.
+ // Reject evaluation, because traversal isn't needed. Instead traversal
+ // happens using the result of `borrow_mut`.
// By disabling traversal, and only creating new references to roots,
// we know that every reference of the `Owned` type is to a root node.
- const PERMITS_TRAVERSAL: bool = false;
+ const TRAVERSAL_PERMIT: () = panic!();
}
impl BorrowType for Dying {}
impl<'a> BorrowType for Immut<'a> {}
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
index 2cfc08074..4ddb21192 100644
--- a/library/alloc/src/collections/btree/set.rs
+++ b/library/alloc/src/collections/btree/set.rs
@@ -343,7 +343,7 @@ impl<T> BTreeSet<T> {
/// let mut set: BTreeSet<i32> = BTreeSet::new();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_stable(feature = "const_btree_new", since = "1.66.0")]
#[must_use]
pub const fn new() -> BTreeSet<T> {
BTreeSet { map: BTreeMap::new() }
@@ -786,7 +786,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// Basic usage:
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@@ -797,7 +796,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// assert_eq!(set.first(), Some(&1));
/// ```
#[must_use]
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn first(&self) -> Option<&T>
where
T: Ord,
@@ -813,7 +812,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// Basic usage:
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@@ -824,7 +822,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// assert_eq!(set.last(), Some(&2));
/// ```
#[must_use]
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn last(&self) -> Option<&T>
where
T: Ord,
@@ -838,7 +836,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// # Examples
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@@ -849,7 +846,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// }
/// assert!(set.is_empty());
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_first(&mut self) -> Option<T>
where
T: Ord,
@@ -863,7 +860,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// # Examples
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@@ -874,7 +870,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// }
/// assert!(set.is_empty());
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_last(&mut self) -> Option<T>
where
T: Ord,
@@ -1174,7 +1170,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_unstable(
+ feature = "const_btree_len",
+ issue = "71835",
+ implied_by = "const_btree_new"
+ )]
pub const fn len(&self) -> usize {
self.map.len()
}
@@ -1193,7 +1193,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_unstable(
+ feature = "const_btree_len",
+ issue = "71835",
+ implied_by = "const_btree_new"
+ )]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
index e21c8aa3b..f2f5dffc2 100644
--- a/library/alloc/src/collections/linked_list.rs
+++ b/library/alloc/src/collections/linked_list.rs
@@ -1570,7 +1570,7 @@ impl<'a, T> CursorMut<'a, T> {
/// that the cursor points to is unchanged, even if it is the "ghost" node.
///
/// This operation should compute in *O*(1) time.
- // `push_front` continues to point to "ghost" when it addes a node to mimic
+ // `push_front` continues to point to "ghost" when it adds a node to mimic
// the behavior of `insert_before` on an empty list.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn push_front(&mut self, elt: T) {
@@ -1613,7 +1613,7 @@ impl<'a, T> CursorMut<'a, T> {
None
} else {
// We can't point to the node that we pop. Copying the behavior of
- // `remove_current`, we move on the the next node in the sequence.
+ // `remove_current`, we move on to the next node in the sequence.
// If the list is of length 1 then we end pointing to the "ghost"
// node at index 0, which is expected.
if self.list.head == self.current {
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
index 628a5b155..161a37573 100644
--- a/library/alloc/src/collections/mod.rs
+++ b/library/alloc/src/collections/mod.rs
@@ -152,3 +152,6 @@ trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
}
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl core::error::Error for TryReserveError {}
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
index 05f94da6d..41baa7102 100644
--- a/library/alloc/src/collections/vec_deque/drain.rs
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -1,10 +1,12 @@
+use core::fmt;
use core::iter::FusedIterator;
+use core::marker::PhantomData;
+use core::mem::{self, MaybeUninit};
use core::ptr::{self, NonNull};
-use core::{fmt, mem};
use crate::alloc::{Allocator, Global};
-use super::{count, Iter, VecDeque};
+use super::{count, wrap_index, VecDeque};
/// A draining iterator over the elements of a `VecDeque`.
///
@@ -20,18 +22,24 @@ pub struct Drain<
> {
after_tail: usize,
after_head: usize,
- iter: Iter<'a, T>,
+ ring: NonNull<[T]>,
+ tail: usize,
+ head: usize,
deque: NonNull<VecDeque<T, A>>,
+ _phantom: PhantomData<&'a T>,
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub(super) unsafe fn new(
after_tail: usize,
after_head: usize,
- iter: Iter<'a, T>,
+ ring: &'a [MaybeUninit<T>],
+ tail: usize,
+ head: usize,
deque: NonNull<VecDeque<T, A>>,
) -> Self {
- Drain { after_tail, after_head, iter, deque }
+ let ring = unsafe { NonNull::new_unchecked(ring as *const [MaybeUninit<T>] as *mut _) };
+ Drain { after_tail, after_head, ring, tail, head, deque, _phantom: PhantomData }
}
}
@@ -41,7 +49,9 @@ impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
f.debug_tuple("Drain")
.field(&self.after_tail)
.field(&self.after_head)
- .field(&self.iter)
+ .field(&self.ring)
+ .field(&self.tail)
+ .field(&self.head)
.finish()
}
}
@@ -118,12 +128,21 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- self.iter.next().map(|elt| unsafe { ptr::read(elt) })
+ if self.tail == self.head {
+ return None;
+ }
+ let tail = self.tail;
+ self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+ // Safety:
+ // - `self.tail` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(tail))) }
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
+ let len = count(self.tail, self.head, self.ring.len());
+ (len, Some(len))
}
}
@@ -131,7 +150,14 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
- self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
+ if self.tail == self.head {
+ return None;
+ }
+ self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+ // Safety:
+ // - `self.head` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(self.head))) }
}
}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 4d895d837..2a57dad89 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -12,11 +12,17 @@ use core::fmt;
use core::hash::{Hash, Hasher};
use core::iter::{repeat_with, FromIterator};
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice;
+// This is used in a bunch of intra-doc links.
+// FIXME: For some reason, `#[cfg(doc)]` wasn't sufficient, resulting in
+// failures in linkchecker even though rustdoc built the docs just fine.
+#[allow(unused_imports)]
+use core::mem;
+
use crate::alloc::{Allocator, Global};
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind;
@@ -177,7 +183,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Marginally more convenient
#[inline]
fn cap(&self) -> usize {
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// For zero sized types, we are always at maximum capacity
MAXIMUM_ZST_CAPACITY
} else {
@@ -794,7 +800,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// in the given deque. The collection may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -1333,9 +1340,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// it. We do not write to `self` nor reborrow to a mutable reference.
// Hence the raw pointer we created above, for `deque`, remains valid.
let ring = self.buffer_as_slice();
- let iter = Iter::new(ring, drain_tail, drain_head);
- Drain::new(drain_head, head, iter, deque)
+ Drain::new(drain_head, head, ring, drain_tail, drain_head, deque)
}
}
@@ -2447,8 +2453,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
let mut right_offset = 0;
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
- let src: isize = (right_edge + right_offset) as isize;
- ptr::swap(buf.add(i), buf.offset(src));
+ let src = right_edge + right_offset;
+ ptr::swap(buf.add(i), buf.add(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;
@@ -3038,7 +3044,7 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
/// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated.
fn from(mut other: Vec<T, A>) -> Self {
let len = other.len();
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// There's no actual allocation for ZSTs to worry about capacity,
// but `VecDeque` can't handle as much length as `Vec`.
assert!(len < MAXIMUM_ZST_CAPACITY, "capacity overflow");
@@ -3124,7 +3130,7 @@ impl<T, const N: usize> From<[T; N]> for VecDeque<T> {
fn from(arr: [T; N]) -> Self {
let mut deq = VecDeque::with_capacity(N);
let arr = ManuallyDrop::new(arr);
- if mem::size_of::<T>() != 0 {
+ if !<T>::IS_ZST {
// SAFETY: VecDeque::with_capacity ensures that there is enough capacity.
unsafe {
ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N);
diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs
index ae61b1f1e..11bd4c4dc 100644
--- a/library/alloc/src/ffi/c_str.rs
+++ b/library/alloc/src/ffi/c_str.rs
@@ -436,9 +436,9 @@ impl CString {
///
/// unsafe {
/// assert_eq!(b'f', *ptr as u8);
- /// assert_eq!(b'o', *ptr.offset(1) as u8);
- /// assert_eq!(b'o', *ptr.offset(2) as u8);
- /// assert_eq!(b'\0', *ptr.offset(3) as u8);
+ /// assert_eq!(b'o', *ptr.add(1) as u8);
+ /// assert_eq!(b'o', *ptr.add(2) as u8);
+ /// assert_eq!(b'\0', *ptr.add(3) as u8);
///
/// // retake pointer to free memory
/// let _ = CString::from_raw(ptr);
@@ -1121,3 +1121,26 @@ impl CStr {
CString::from(self)
}
}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::error::Error for NulError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "nul byte found in data"
+ }
+}
+
+#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+impl core::error::Error for FromVecWithNulError {}
+
+#[stable(feature = "cstring_into", since = "1.7.0")]
+impl core::error::Error for IntoStringError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "C string contained non-utf8 bytes"
+ }
+
+ fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
+ Some(self.__source())
+ }
+}
diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs
index ed398b566..799ce9d5d 100644
--- a/library/alloc/src/fmt.rs
+++ b/library/alloc/src/fmt.rs
@@ -327,7 +327,7 @@
//! - `text` must not contain any `'{'` or `'}'` characters,
//! - `ws` is any character for which [`char::is_whitespace`] returns `true`, has no semantic
//! meaning and is completely optional,
-//! - `integer` is a decimal integer that may contain leading zeroes and
+//! - `integer` is a decimal integer that may contain leading zeroes and must fit into an `usize` and
//! - `identifier` is an `IDENTIFIER_OR_KEYWORD` (not an `IDENTIFIER`) as defined by the [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html).
//!
//! # Formatting traits
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 8b6f40548..ce36b116f 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -56,10 +56,6 @@
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
-// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
-// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no affect there.
-#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
@@ -73,10 +69,16 @@
any(not(feature = "miri-test-libstd"), test, doctest),
no_global_oom_handling,
not(no_global_oom_handling),
+ not(no_rc),
+ not(no_sync),
target_has_atomic = "ptr"
))]
#![no_std]
#![needs_allocator]
+// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
@@ -97,7 +99,7 @@
#![feature(coerce_unsized)]
#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
#![feature(const_box)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
#![feature(const_cow_is_borrowed)]
#![feature(const_convert)]
#![feature(const_size_of_val)]
@@ -109,8 +111,11 @@
#![feature(core_intrinsics)]
#![feature(const_eval_select)]
#![feature(const_pin)]
+#![feature(const_waker)]
#![feature(cstr_from_bytes_until_nul)]
#![feature(dispatch_from_dyn)]
+#![feature(error_generic_member_access)]
+#![feature(error_in_core)]
#![feature(exact_size_is_empty)]
#![feature(extend_one)]
#![feature(fmt_internals)]
@@ -120,18 +125,21 @@
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(layout_for_ptr)]
-#![feature(maybe_uninit_array_assume_init)]
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
+#![feature(maybe_uninit_uninit_array_transpose)]
#![cfg_attr(test, feature(new_uninit))]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
+#![feature(provide_any)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
#![feature(receiver_trait)]
+#![feature(saturating_int_impl)]
#![feature(set_ptr_value)]
+#![feature(sized_type_properties)]
#![feature(slice_from_ptr_range)]
#![feature(slice_group_by)]
#![feature(slice_ptr_get)]
@@ -145,6 +153,7 @@
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
+#![feature(utf8_chunks)]
#![feature(std_internals)]
//
// Language features:
@@ -164,7 +173,6 @@
#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(negative_impls)]
#![feature(never_type)]
@@ -178,6 +186,7 @@
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
#![feature(c_unwind)]
+#![feature(with_negative_coherence)]
//
// Rustdoc features:
#![feature(doc_cfg)]
@@ -218,16 +227,17 @@ mod boxed {
}
pub mod borrow;
pub mod collections;
-#[cfg(not(no_global_oom_handling))]
+#[cfg(all(not(no_rc), not(no_sync), not(no_global_oom_handling)))]
pub mod ffi;
pub mod fmt;
+#[cfg(not(no_rc))]
pub mod rc;
pub mod slice;
pub mod str;
pub mod string;
-#[cfg(target_has_atomic = "ptr")]
+#[cfg(all(not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
pub mod sync;
-#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))]
+#[cfg(all(not(no_global_oom_handling), not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
pub mod task;
#[cfg(test)]
mod tests;
diff --git a/library/alloc/src/macros.rs b/library/alloc/src/macros.rs
index 88eb6aa7a..5198bf297 100644
--- a/library/alloc/src/macros.rs
+++ b/library/alloc/src/macros.rs
@@ -107,6 +107,8 @@ macro_rules! vec {
/// format!("test");
/// format!("hello {}", "world!");
/// format!("x = {}, y = {y}", 10, y = 30);
+/// let (x, y) = (1, 2);
+/// format!("{x} + {y} = 3");
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index b0f4529ab..5a10121bb 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -3,7 +3,7 @@
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
@@ -168,7 +168,7 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
- if mem::size_of::<T>() == 0 || capacity == 0 {
+ if T::IS_ZST || capacity == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
@@ -229,7 +229,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
- if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
+ if T::IS_ZST { usize::MAX } else { self.cap }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -238,7 +238,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
- if mem::size_of::<T>() == 0 || self.cap == 0 {
+ if T::IS_ZST || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
@@ -380,7 +380,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
@@ -406,7 +406,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index b89b03683..006d813e5 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -1110,8 +1110,8 @@ impl<T: ?Sized> Rc<T> {
#[inline]
#[stable(feature = "ptr_eq", since = "1.17.0")]
- /// Returns `true` if the two `Rc`s point to the same allocation
- /// (in a vein similar to [`ptr::eq`]).
+ /// Returns `true` if the two `Rc`s point to the same allocation in a vein similar to
+ /// [`ptr::eq`]. See [that function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
///
/// # Examples
///
@@ -1142,7 +1142,7 @@ impl<T: Clone> Rc<T> {
/// be cloned.
///
/// See also [`get_mut`], which will fail rather than cloning the inner value
- /// or diassociating [`Weak`] pointers.
+ /// or disassociating [`Weak`] pointers.
///
/// [`clone`]: Clone::clone
/// [`get_mut`]: Rc::get_mut
@@ -1386,7 +1386,7 @@ impl<T: ?Sized> Rc<T> {
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.allocate(layout),
- |mem| mem.with_metadata_of(ptr as *mut RcBox<T>),
+ |mem| mem.with_metadata_of(ptr as *const RcBox<T>),
)
}
}
@@ -2419,9 +2419,9 @@ impl<T: ?Sized> Weak<T> {
}
}
- /// Returns `true` if the two `Weak`s point to the same allocation (similar to
- /// [`ptr::eq`]), or if both don't point to any allocation
- /// (because they were created with `Weak::new()`).
+ /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
+ /// both don't point to any allocation (because they were created with `Weak::new()`). See [that
+ /// function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
///
/// # Notes
///
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index 63d4d9452..a5e7bf2a1 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -1,82 +1,12 @@
-//! A dynamically-sized view into a contiguous sequence, `[T]`.
+//! Utilities for the slice primitive type.
//!
//! *[See also the slice primitive type](slice).*
//!
-//! Slices are a view into a block of memory represented as a pointer and a
-//! length.
+//! Most of the structs in this module are iterator types which can only be created
+//! using a certain function. For example, `slice.iter()` yields an [`Iter`].
//!
-//! ```
-//! // slicing a Vec
-//! let vec = vec![1, 2, 3];
-//! let int_slice = &vec[..];
-//! // coercing an array to a slice
-//! let str_slice: &[&str] = &["one", "two", "three"];
-//! ```
-//!
-//! Slices are either mutable or shared. The shared slice type is `&[T]`,
-//! while the mutable slice type is `&mut [T]`, where `T` represents the element
-//! type. For example, you can mutate the block of memory that a mutable slice
-//! points to:
-//!
-//! ```
-//! let x = &mut [1, 2, 3];
-//! x[1] = 7;
-//! assert_eq!(x, &[1, 7, 3]);
-//! ```
-//!
-//! Here are some of the things this module contains:
-//!
-//! ## Structs
-//!
-//! There are several structs that are useful for slices, such as [`Iter`], which
-//! represents iteration over a slice.
-//!
-//! ## Trait Implementations
-//!
-//! There are several implementations of common traits for slices. Some examples
-//! include:
-//!
-//! * [`Clone`]
-//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
-//! * [`Hash`] - for slices whose element type is [`Hash`].
-//!
-//! ## Iteration
-//!
-//! The slices implement `IntoIterator`. The iterator yields references to the
-//! slice elements.
-//!
-//! ```
-//! let numbers = &[0, 1, 2];
-//! for n in numbers {
-//! println!("{n} is a number!");
-//! }
-//! ```
-//!
-//! The mutable slice yields mutable references to the elements:
-//!
-//! ```
-//! let mut scores = [7, 8, 9];
-//! for score in &mut scores[..] {
-//! *score += 1;
-//! }
-//! ```
-//!
-//! This iterator yields mutable references to the slice's elements, so while
-//! the element type of the slice is `i32`, the element type of the iterator is
-//! `&mut i32`.
-//!
-//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
-//! iterators.
-//! * Further methods that return iterators are [`.split`], [`.splitn`],
-//! [`.chunks`], [`.windows`] and more.
-//!
-//! [`Hash`]: core::hash::Hash
-//! [`.iter`]: slice::iter
-//! [`.iter_mut`]: slice::iter_mut
-//! [`.split`]: slice::split
-//! [`.splitn`]: slice::splitn
-//! [`.chunks`]: slice::chunks
-//! [`.windows`]: slice::windows
+//! A few functions are provided to create a slice from a value reference
+//! or from a raw pointer.
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
@@ -86,9 +16,7 @@ use core::borrow::{Borrow, BorrowMut};
#[cfg(not(no_global_oom_handling))]
use core::cmp::Ordering::{self, Less};
#[cfg(not(no_global_oom_handling))]
-use core::mem;
-#[cfg(not(no_global_oom_handling))]
-use core::mem::size_of;
+use core::mem::{self, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ptr;
@@ -275,7 +203,7 @@ impl<T> [T] {
where
T: Ord,
{
- merge_sort(self, |a, b| a.lt(b));
+ merge_sort(self, T::lt);
}
/// Sorts the slice with a comparator function.
@@ -1024,7 +952,7 @@ where
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
unsafe {
- let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+ let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
@@ -1038,12 +966,12 @@ where
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
- *ptr = unsafe { ptr.offset(1) };
+ *ptr = unsafe { ptr.add(1) };
old
}
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
- *ptr = unsafe { ptr.offset(-1) };
+ *ptr = unsafe { ptr.sub(1) };
*ptr
}
@@ -1088,7 +1016,7 @@ where
const MIN_RUN: usize = 10;
// Sorting has no meaningful behavior on zero-sized types.
- if size_of::<T>() == 0 {
+ if T::IS_ZST {
return;
}
diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs
index d5ed2c4ad..b28d20cda 100644
--- a/library/alloc/src/str.rs
+++ b/library/alloc/src/str.rs
@@ -1,26 +1,6 @@
-//! Unicode string slices.
+//! Utilities for the `str` primitive type.
//!
//! *[See also the `str` primitive type](str).*
-//!
-//! The `&str` type is one of the two main string types, the other being `String`.
-//! Unlike its `String` counterpart, its contents are borrowed.
-//!
-//! # Basic Usage
-//!
-//! A basic string declaration of `&str` type:
-//!
-//! ```
-//! let hello_world = "Hello, World!";
-//! ```
-//!
-//! Here we have declared a string literal, also known as a string slice.
-//! String literals have a static lifetime, which means the string `hello_world`
-//! is guaranteed to be valid for the duration of the entire program.
-//! We can explicitly specify `hello_world`'s lifetime as well:
-//!
-//! ```
-//! let hello_world: &'static str = "Hello, world!";
-//! ```
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
@@ -71,6 +51,8 @@ pub use core::str::{RSplit, Split};
pub use core::str::{RSplitN, SplitN};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitTerminator, SplitTerminator};
+#[unstable(feature = "utf8_chunks", issue = "99543")]
+pub use core::str::{Utf8Chunk, Utf8Chunks};
/// Note: `str` in `Concat<str>` is not meaningful here.
/// This type parameter of the trait only exists to enable another impl.
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index a5118e533..c436adf70 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -44,6 +44,7 @@
#[cfg(not(no_global_oom_handling))]
use core::char::{decode_utf16, REPLACEMENT_CHARACTER};
+use core::error::Error;
use core::fmt;
use core::hash;
use core::iter::FusedIterator;
@@ -58,15 +59,15 @@ use core::ops::Bound::{Excluded, Included, Unbounded};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr;
use core::slice;
-#[cfg(not(no_global_oom_handling))]
-use core::str::lossy;
use core::str::pattern::Pattern;
+#[cfg(not(no_global_oom_handling))]
+use core::str::Utf8Chunks;
#[cfg(not(no_global_oom_handling))]
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
use crate::collections::TryReserveError;
-use crate::str::{self, Chars, Utf8Error};
+use crate::str::{self, from_utf8_unchecked_mut, Chars, Utf8Error};
#[cfg(not(no_global_oom_handling))]
use crate::str::{from_boxed_utf8_unchecked, FromStr};
use crate::vec::Vec;
@@ -628,11 +629,11 @@ impl String {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
- let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
+ let mut iter = Utf8Chunks::new(v);
let first_valid = if let Some(chunk) = iter.next() {
- let lossy::Utf8LossyChunk { valid, broken } = chunk;
- if broken.is_empty() {
+ let valid = chunk.valid();
+ if chunk.invalid().is_empty() {
debug_assert_eq!(valid.len(), v.len());
return Cow::Borrowed(valid);
}
@@ -647,9 +648,9 @@ impl String {
res.push_str(first_valid);
res.push_str(REPLACEMENT);
- for lossy::Utf8LossyChunk { valid, broken } in iter {
- res.push_str(valid);
- if !broken.is_empty() {
+ for chunk in iter {
+ res.push_str(chunk.valid());
+ if !chunk.invalid().is_empty() {
res.push_str(REPLACEMENT);
}
}
@@ -1080,7 +1081,8 @@ impl String {
/// current length. The allocator may reserve more space to speculatively
/// avoid frequent allocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -1847,6 +1849,35 @@ impl String {
let slice = self.vec.into_boxed_slice();
unsafe { from_boxed_utf8_unchecked(slice) }
}
+
+ /// Consumes and leaks the `String`, returning a mutable reference to the contents,
+ /// `&'static mut str`.
+ ///
+ /// This is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak.
+ ///
+ /// It does not reallocate or shrink the `String`,
+ /// so the leaked allocation may include unused capacity that is not part
+ /// of the returned slice.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// #![feature(string_leak)]
+ ///
+ /// let x = String::from("bucket");
+ /// let static_ref: &'static mut str = x.leak();
+ /// assert_eq!(static_ref, "bucket");
+ /// ```
+ #[unstable(feature = "string_leak", issue = "102929")]
+ #[inline]
+ pub fn leak(self) -> &'static mut str {
+ let slice = self.vec.leak();
+ unsafe { from_utf8_unchecked_mut(slice) }
+ }
}
impl FromUtf8Error {
@@ -1938,6 +1969,22 @@ impl fmt::Display for FromUtf16Error {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for FromUtf8Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "invalid utf-8"
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for FromUtf16Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "invalid utf-16"
+ }
+}
+
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for String {
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 4c03cc3ed..81cd77074 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -3,6 +3,10 @@
//! Thread-safe reference-counting pointers.
//!
//! See the [`Arc<T>`][Arc] documentation for more details.
+//!
+//! **Note**: This module is only available on platforms that support atomic
+//! loads and stores of pointers. This may be detected at compile time using
+//! `#[cfg(target_has_atomic = "ptr")]`.
use core::any::Any;
use core::borrow;
@@ -82,6 +86,11 @@ macro_rules! acquire {
/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
/// types.
///
+/// **Note**: This type is only available on platforms that support atomic
+/// loads and stores of pointers, which includes all platforms that support
+/// the `std` crate but not all those which only support [`alloc`](crate).
+/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
+///
/// ## Thread Safety
///
/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
@@ -1108,8 +1117,8 @@ impl<T: ?Sized> Arc<T> {
drop(Weak { ptr: self.ptr });
}
- /// Returns `true` if the two `Arc`s point to the same allocation
- /// (in a vein similar to [`ptr::eq`]).
+ /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
+ /// [`ptr::eq`]. See [that function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
///
/// # Examples
///
@@ -1195,7 +1204,7 @@ impl<T: ?Sized> Arc<T> {
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.allocate(layout),
- |mem| mem.with_metadata_of(ptr as *mut ArcInner<T>),
+ |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
)
}
}
@@ -1980,33 +1989,26 @@ impl<T: ?Sized> Weak<T> {
// We use a CAS loop to increment the strong count instead of a
// fetch_add as this function should never take the reference count
// from zero to one.
- let inner = self.inner()?;
-
- // Relaxed load because any write of 0 that we can observe
- // leaves the field in a permanently zero state (so a
- // "stale" read of 0 is fine), and any other value is
- // confirmed via the CAS below.
- let mut n = inner.strong.load(Relaxed);
-
- loop {
- if n == 0 {
- return None;
- }
-
- // See comments in `Arc::clone` for why we do this (for `mem::forget`).
- if n > MAX_REFCOUNT {
- abort();
- }
-
+ self.inner()?
+ .strong
// Relaxed is fine for the failure case because we don't have any expectations about the new state.
// Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
// value can be initialized after `Weak` references have already been created. In that case, we
// expect to observe the fully initialized value.
- match inner.strong.compare_exchange_weak(n, n + 1, Acquire, Relaxed) {
- Ok(_) => return Some(unsafe { Arc::from_inner(self.ptr) }), // null checked above
- Err(old) => n = old,
- }
- }
+ .fetch_update(Acquire, Relaxed, |n| {
+ // Any write of 0 we can observe leaves the field in permanently zero state.
+ if n == 0 {
+ return None;
+ }
+ // See comments in `Arc::clone` for why we do this (for `mem::forget`).
+ if n > MAX_REFCOUNT {
+ abort();
+ }
+ Some(n + 1)
+ })
+ .ok()
+ // null checked above
+ .map(|_| unsafe { Arc::from_inner(self.ptr) })
}
/// Gets the number of strong (`Arc`) pointers pointing to this allocation.
@@ -2067,9 +2069,9 @@ impl<T: ?Sized> Weak<T> {
}
}
- /// Returns `true` if the two `Weak`s point to the same allocation (similar to
- /// [`ptr::eq`]), or if both don't point to any allocation
- /// (because they were created with `Weak::new()`).
+ /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
+ /// both don't point to any allocation (because they were created with `Weak::new()`). See [that
+ /// function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
///
/// # Notes
///
@@ -2763,3 +2765,24 @@ fn data_offset_align(align: usize) -> usize {
let layout = Layout::new::<ArcInner<()>>();
layout.size() + layout.padding_needed_for(align)
}
+
+#[stable(feature = "arc_error", since = "1.52.0")]
+impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ core::error::Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn core::error::Error> {
+ core::error::Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
+ core::error::Error::source(&**self)
+ }
+
+ fn provide<'a>(&'a self, req: &mut core::any::Demand<'a>) {
+ core::error::Error::provide(&**self, req);
+ }
+}
diff --git a/library/alloc/src/sync/tests.rs b/library/alloc/src/sync/tests.rs
index 202d0e7f0..0fae8953a 100644
--- a/library/alloc/src/sync/tests.rs
+++ b/library/alloc/src/sync/tests.rs
@@ -618,3 +618,22 @@ fn test_arc_cyclic_two_refs() {
assert_eq!(Arc::strong_count(&two_refs), 3);
assert_eq!(Arc::weak_count(&two_refs), 2);
}
+
+/// Test for Arc::drop bug (https://github.com/rust-lang/rust/issues/55005)
+#[test]
+#[cfg(miri)] // relies on Stacked Borrows in Miri
+fn arc_drop_dereferenceable_race() {
+ // The bug seems to take up to 700 iterations to reproduce with most seeds (tested 0-9).
+ for _ in 0..750 {
+ let arc_1 = Arc::new(());
+ let arc_2 = arc_1.clone();
+ let thread = thread::spawn(|| drop(arc_2));
+ // Spin a bit; makes the race more likely to appear
+ let mut i = 0;
+ while i < 256 {
+ i += 1;
+ }
+ drop(arc_1);
+ thread.join().unwrap();
+ }
+}
diff --git a/library/alloc/src/task.rs b/library/alloc/src/task.rs
index 528ee4ff1..9d8e309a9 100644
--- a/library/alloc/src/task.rs
+++ b/library/alloc/src/task.rs
@@ -1,5 +1,11 @@
#![stable(feature = "wake_trait", since = "1.51.0")]
+
//! Types and Traits for working with asynchronous tasks.
+//!
+//! **Note**: This module is only available on platforms that support atomic
+//! loads and stores of pointers. This may be detected at compile time using
+//! `#[cfg(target_has_atomic = "ptr")]`.
+
use core::mem::ManuallyDrop;
use core::task::{RawWaker, RawWakerVTable, Waker};
diff --git a/library/alloc/src/vec/drain.rs b/library/alloc/src/vec/drain.rs
index 5cdee0bd4..541f99bcf 100644
--- a/library/alloc/src/vec/drain.rs
+++ b/library/alloc/src/vec/drain.rs
@@ -1,7 +1,7 @@
use crate::alloc::{Allocator, Global};
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
-use core::mem;
+use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::ptr::{self, NonNull};
use core::slice::{self};
@@ -65,6 +65,77 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[unstable(feature = "drain_keep_rest", issue = "101122")]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
+ // ^-- start \_________/-- unyielded_len \____/-- self.tail_len
+ // ^-- unyielded_ptr ^-- tail
+ //
+ // Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
+ // Here we want to
+ // 1. Move [unyielded] to `start`
+ // 2. Move [tail] to a new start at `start + len(unyielded)`
+ // 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
+ // a. In case of ZST, this is the only thing we want to do
+ // 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ let source_vec = this.vec.as_mut();
+
+ let start = source_vec.len();
+ let tail = this.tail_start;
+
+ let unyielded_len = this.iter.len();
+ let unyielded_ptr = this.iter.as_slice().as_ptr();
+
+ // ZSTs have no identity, so we don't need to move them around.
+ let needs_move = mem::size_of::<T>() != 0;
+
+ if needs_move {
+ let start_ptr = source_vec.as_mut_ptr().add(start);
+
+ // memmove back unyielded elements
+ if unyielded_ptr != start_ptr {
+ let src = unyielded_ptr;
+ let dst = start_ptr;
+
+ ptr::copy(src, dst, unyielded_len);
+ }
+
+ // memmove back untouched tail
+ if tail != (start + unyielded_len) {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = start_ptr.add(unyielded_len);
+ ptr::copy(src, dst, this.tail_len);
+ }
+ }
+
+ source_vec.set_len(start + unyielded_len + this.tail_len);
+ }
+ }
}
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
@@ -131,7 +202,7 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
let mut vec = self.vec;
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
unsafe {
diff --git a/library/alloc/src/vec/drain_filter.rs b/library/alloc/src/vec/drain_filter.rs
index 3c37c92ae..8c03f1692 100644
--- a/library/alloc/src/vec/drain_filter.rs
+++ b/library/alloc/src/vec/drain_filter.rs
@@ -1,6 +1,7 @@
use crate::alloc::{Allocator, Global};
-use core::ptr::{self};
-use core::slice::{self};
+use core::mem::{self, ManuallyDrop};
+use core::ptr;
+use core::slice;
use super::Vec;
@@ -54,6 +55,61 @@ where
pub fn allocator(&self) -> &A {
self.vec.allocator()
}
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain_filter(|_| true);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[unstable(feature = "drain_keep_rest", issue = "101122")]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // _____________________/-- old_len
+ // / \
+ // [kept] [yielded] [tail]
+ // \_______/ ^-- idx
+ // \-- del
+ //
+ // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
+ //
+ // 1. Move [tail] after [kept]
+ // 2. Update length of the original vec to `old_len - del`
+ // a. In case of ZST, this is the only thing we want to do
+ // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ // ZSTs have no identity, so we don't need to move them around.
+ let needs_move = mem::size_of::<T>() != 0;
+
+ if needs_move && this.idx < this.old_len && this.del > 0 {
+ let ptr = this.vec.as_mut_ptr();
+ let src = ptr.add(this.idx);
+ let dst = src.sub(this.del);
+ let tail_len = this.old_len - this.idx;
+ src.copy_to(dst, tail_len);
+ }
+
+ let new_len = this.old_len - this.del;
+ this.vec.set_len(new_len);
+ }
+ }
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs
index 55dcb84ad..87d61deb1 100644
--- a/library/alloc/src/vec/in_place_collect.rs
+++ b/library/alloc/src/vec/in_place_collect.rs
@@ -55,6 +55,9 @@
//! This is handled by the [`InPlaceDrop`] guard for sink items (`U`) and by
//! [`vec::IntoIter::forget_allocation_drop_remaining()`] for remaining source items (`T`).
//!
+//! If dropping any remaining source item (`T`) panics then [`InPlaceDstBufDrop`] will handle dropping
+//! the already collected sink items (`U`) and freeing the allocation.
+//!
//! [`vec::IntoIter::forget_allocation_drop_remaining()`]: super::IntoIter::forget_allocation_drop_remaining()
//!
//! # O(1) collect
@@ -135,10 +138,10 @@
//! vec.truncate(write_idx);
//! ```
use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
-use core::mem::{self, ManuallyDrop};
+use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::ptr::{self};
-use super::{InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec};
+use super::{InPlaceDrop, InPlaceDstBufDrop, SpecFromIter, SpecFromIterNested, Vec};
/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the
/// source allocation, i.e. executing the pipeline in place.
@@ -154,7 +157,7 @@ where
default fn from_iter(mut iterator: I) -> Self {
// See "Layout constraints" section in the module documentation. We rely on const
// optimization here since these conditions currently cannot be expressed as trait bounds
- if mem::size_of::<T>() == 0
+ if T::IS_ZST
|| mem::size_of::<T>()
!= mem::size_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
|| mem::align_of::<T>()
@@ -191,14 +194,17 @@ where
);
}
- // Drop any remaining values at the tail of the source but prevent drop of the allocation
- // itself once IntoIter goes out of scope.
- // If the drop panics then we also leak any elements collected into dst_buf.
+ // The ownership of the allocation and the new `T` values is temporarily moved into `dst_guard`.
+ // This is safe because `forget_allocation_drop_remaining` immediately forgets the allocation
+ // before any panic can occur in order to avoid any double free, and then proceeds to drop
+ // any remaining values at the tail of the source.
//
// Note: This access to the source wouldn't be allowed by the TrustedRandomIteratorNoCoerce
// contract (used by SpecInPlaceCollect below). But see the "O(1) collect" section in the
// module documenttation why this is ok anyway.
+ let dst_guard = InPlaceDstBufDrop { ptr: dst_buf, len, cap };
src.forget_allocation_drop_remaining();
+ mem::forget(dst_guard);
let vec = unsafe { Vec::from_raw_parts(dst_buf, len, cap) };
@@ -267,7 +273,7 @@ where
// one slot in the underlying storage will have been freed up and we can immediately
// write back the result.
unsafe {
- let dst = dst_buf.offset(i as isize);
+ let dst = dst_buf.add(i);
debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
ptr::write(dst, self.__iterator_get_unchecked(i));
// Since this executes user code which can panic we have to bump the pointer
diff --git a/library/alloc/src/vec/in_place_drop.rs b/library/alloc/src/vec/in_place_drop.rs
index 1b1ef9130..25ca33c6a 100644
--- a/library/alloc/src/vec/in_place_drop.rs
+++ b/library/alloc/src/vec/in_place_drop.rs
@@ -22,3 +22,18 @@ impl<T> Drop for InPlaceDrop<T> {
}
}
}
+
+// A helper struct for in-place collection that drops the destination allocation and elements,
+// to avoid leaking them if some other destructor panics.
+pub(super) struct InPlaceDstBufDrop<T> {
+ pub(super) ptr: *mut T,
+ pub(super) len: usize,
+ pub(super) cap: usize,
+}
+
+impl<T> Drop for InPlaceDstBufDrop<T> {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe { super::Vec::from_raw_parts(self.ptr, self.len, self.cap) };
+ }
+}
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index 1b483e3fc..02cc7691a 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -4,12 +4,11 @@ use crate::alloc::{Allocator, Global};
use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
-use core::intrinsics::arith_offset;
use core::iter::{
FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@@ -96,13 +95,16 @@ impl<T, A: Allocator> IntoIter<T, A> {
}
/// Drops remaining elements and relinquishes the backing allocation.
+ /// This method guarantees it won't panic before relinquishing
+ /// the backing allocation.
///
/// This is roughly equivalent to the following, but more efficient
///
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
+ /// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
/// (&mut into_iter).for_each(core::mem::drop);
- /// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
+ /// std::mem::forget(into_iter);
/// ```
///
/// This method is used by in-place iteration, refer to the vec::in_place_collect
@@ -119,6 +121,8 @@ impl<T, A: Allocator> IntoIter<T, A> {
self.ptr = self.buf.as_ptr();
self.end = self.buf.as_ptr();
+ // Dropping the remaining elements can panic, so this needs to be
+ // done only after updating the other fields.
unsafe {
ptr::drop_in_place(remaining);
}
@@ -148,19 +152,19 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- if self.ptr as *const _ == self.end {
+ if self.ptr == self.end {
None
- } else if mem::size_of::<T>() == 0 {
+ } else if T::IS_ZST {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
+ self.ptr = self.ptr.wrapping_byte_add(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
- self.ptr = unsafe { self.ptr.offset(1) };
+ self.ptr = unsafe { self.ptr.add(1) };
Some(unsafe { ptr::read(old) })
}
@@ -168,7 +172,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = if mem::size_of::<T>() == 0 {
+ let exact = if T::IS_ZST {
self.end.addr().wrapping_sub(self.ptr.addr())
} else {
unsafe { self.end.sub_ptr(self.ptr) }
@@ -180,11 +184,11 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
// effectively results in unsigned pointers representing positions 0..usize::MAX,
// which is valid for ZSTs.
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T }
+ self.ptr = self.ptr.wrapping_byte_add(step_size);
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
@@ -210,16 +214,16 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
let len = self.len();
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
if len < N {
self.forget_remaining_elements();
// Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
}
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, N as isize) as *mut T };
+ self.ptr = self.ptr.wrapping_byte_add(N);
// Safety: ditto
- return Ok(unsafe { MaybeUninit::array_assume_init(raw_ary) });
+ return Ok(unsafe { raw_ary.transpose().assume_init() });
}
if len < N {
@@ -237,7 +241,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
return unsafe {
ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, N);
self.ptr = self.ptr.add(N);
- Ok(MaybeUninit::array_assume_init(raw_ary))
+ Ok(raw_ary.transpose().assume_init())
};
}
@@ -254,7 +258,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
unsafe {
- if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
+ if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
}
}
}
@@ -265,14 +269,14 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
if self.end == self.ptr {
None
- } else if mem::size_of::<T>() == 0 {
+ } else if T::IS_ZST {
// See above for why 'ptr.offset' isn't used
- self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
+ self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
- self.end = unsafe { self.end.offset(-1) };
+ self.end = unsafe { self.end.sub(1) };
Some(unsafe { ptr::read(self.end) })
}
@@ -281,14 +285,12 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
let step_size = self.len().min(n);
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// SAFETY: same as for advance_by()
- self.end = unsafe {
- arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T
- }
+ self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: same as for advance_by()
- self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
+ self.end = unsafe { self.end.sub(step_size) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()
diff --git a/library/alloc/src/vec/is_zero.rs b/library/alloc/src/vec/is_zero.rs
index 92a32779b..8e652d676 100644
--- a/library/alloc/src/vec/is_zero.rs
+++ b/library/alloc/src/vec/is_zero.rs
@@ -1,3 +1,5 @@
+use core::num::{Saturating, Wrapping};
+
use crate::boxed::Box;
#[rustc_specialization_trait]
@@ -144,3 +146,39 @@ impl_is_zero_option_of_nonzero!(
NonZeroUsize,
NonZeroIsize,
);
+
+unsafe impl<T: IsZero> IsZero for Wrapping<T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+}
+
+unsafe impl<T: IsZero> IsZero for Saturating<T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+}
+
+macro_rules! impl_for_optional_bool {
+ ($($t:ty,)+) => {$(
+ unsafe impl IsZero for $t {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ // SAFETY: This is *not* a stable layout guarantee, but
+ // inside `core` we're allowed to rely on the current rustc
+ // behaviour that options of bools will be one byte with
+ // no padding, so long as they're nested less than 254 deep.
+ let raw: u8 = unsafe { core::mem::transmute(*self) };
+ raw == 0
+ }
+ }
+ )+};
+}
+impl_for_optional_bool! {
+ Option<bool>,
+ Option<Option<bool>>,
+ Option<Option<Option<bool>>>,
+ // Could go further, but not worth the metadata overhead
+}
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index fa9f2131c..bbbdc3aa2 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -59,12 +59,12 @@ use core::cmp::Ordering;
use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::intrinsics::{arith_offset, assume};
+use core::intrinsics::assume;
use core::iter;
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice::{self, SliceIndex};
@@ -125,7 +125,7 @@ use self::set_len_on_drop::SetLenOnDrop;
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
-use self::in_place_drop::InPlaceDrop;
+use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop};
#[cfg(not(no_global_oom_handling))]
mod in_place_drop;
@@ -436,7 +436,7 @@ impl<T> Vec<T> {
/// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
- /// If it is imporant to know the exact allocated capacity of a `Vec`,
+ /// If it is important to know the exact allocated capacity of a `Vec`,
/// always use the [`capacity`] method after construction.
///
/// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
@@ -483,15 +483,13 @@ impl<T> Vec<T> {
Self::with_capacity_in(capacity, Global)
}
- /// Creates a `Vec<T>` directly from the raw components of another vector.
+ /// Creates a `Vec<T>` directly from a pointer, a capacity, and a length.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
- /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
- /// (at least, it's highly likely to be incorrect if it wasn't).
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -500,6 +498,14 @@ impl<T> Vec<T> {
/// to be the same size as the pointer was allocated with. (Because similar to
/// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
+ /// * The first `length` values must be properly initialized values of type `T`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ /// * The allocated size in bytes must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// These requirements are always upheld by any `ptr` that has been allocated
+ /// via `Vec<T>`. Other allocation sources are allowed if the invariants are
+ /// upheld.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is normally **not** safe
@@ -542,8 +548,8 @@ impl<T> Vec<T> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
- /// for i in 0..len as isize {
- /// ptr::write(p.offset(i), 4 + i);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@@ -551,6 +557,32 @@ impl<T> Vec<T> {
/// assert_eq!(rebuilt, [4, 5, 6]);
/// }
/// ```
+ ///
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{AllocError, Allocator, Global, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ ///
+ /// let vec = unsafe {
+ /// let mem = match Global.allocate(layout) {
+ /// Ok(mem) => mem.cast::<u32>().as_ptr(),
+ /// Err(AllocError) => return,
+ /// };
+ ///
+ /// mem.write(1_000_000);
+ ///
+ /// Vec::from_raw_parts_in(mem, 1, 16, Global)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+ /// assert_eq!(vec.capacity(), 16);
+ /// }
+ /// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
@@ -591,7 +623,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
- /// If it is imporant to know the exact allocated capacity of a `Vec`,
+ /// If it is important to know the exact allocated capacity of a `Vec`,
/// always use the [`capacity`] method after construction.
///
/// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
@@ -641,21 +673,30 @@ impl<T, A: Allocator> Vec<T, A> {
Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
}
- /// Creates a `Vec<T, A>` directly from the raw components of another vector.
+ /// Creates a `Vec<T, A>` directly from a pointer, a capacity, a length,
+ /// and an allocator.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
- /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
- /// (at least, it's highly likely to be incorrect if it wasn't).
- /// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
+ /// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
/// allocated and deallocated with the same layout.)
+ /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
+ /// to be the same size as the pointer was allocated with. (Because similar to
+ /// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
- /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ /// * The first `length` values must be properly initialized values of type `T`.
+ /// * `capacity` needs to [*fit*] the layout size that the pointer was allocated with.
+ /// * The allocated size in bytes must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// These requirements are always upheld by any `ptr` that has been allocated
+ /// via `Vec<T, A>`. Other allocation sources are allowed if the invariants are
+ /// upheld.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is **not** safe
@@ -673,6 +714,7 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// [`String`]: crate::string::String
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ /// [*fit*]: crate::alloc::Allocator#memory-fitting
///
/// # Examples
///
@@ -702,8 +744,8 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
- /// for i in 0..len as isize {
- /// ptr::write(p.offset(i), 4 + i);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@@ -711,6 +753,29 @@ impl<T, A: Allocator> Vec<T, A> {
/// assert_eq!(rebuilt, [4, 5, 6]);
/// }
/// ```
+ ///
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+ /// use std::alloc::{alloc, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ /// let vec = unsafe {
+ /// let mem = alloc(layout).cast::<u32>();
+ /// if mem.is_null() {
+ /// return;
+ /// }
+ ///
+ /// mem.write(1_000_000);
+ ///
+ /// Vec::from_raw_parts(mem, 1, 16)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+ /// assert_eq!(vec.capacity(), 16);
+ /// }
+ /// ```
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
@@ -803,13 +868,14 @@ impl<T, A: Allocator> Vec<T, A> {
(ptr, len, capacity, alloc)
}
- /// Returns the number of elements the vector can hold without
+ /// Returns the total number of elements the vector can hold without
/// reallocating.
///
/// # Examples
///
/// ```
- /// let vec: Vec<i32> = Vec::with_capacity(10);
+ /// let mut vec: Vec<i32> = Vec::with_capacity(10);
+ /// vec.push(42);
/// assert_eq!(vec.capacity(), 10);
/// ```
#[inline]
@@ -875,7 +941,8 @@ impl<T, A: Allocator> Vec<T, A> {
/// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -1393,7 +1460,7 @@ impl<T, A: Allocator> Vec<T, A> {
if index < len {
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
- ptr::copy(p, p.offset(1), len - index);
+ ptr::copy(p, p.add(1), len - index);
} else if index == len {
// No elements need shifting.
} else {
@@ -1455,7 +1522,7 @@ impl<T, A: Allocator> Vec<T, A> {
ret = ptr::read(ptr);
// Shift everything down to fill in that spot.
- ptr::copy(ptr.offset(1), ptr, len - index - 1);
+ ptr::copy(ptr.add(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
@@ -1773,6 +1840,51 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
+ /// Appends an element if there is sufficient spare capacity, otherwise an error is returned
+ /// with the element.
+ ///
+ /// Unlike [`push`] this method will not reallocate when there's insufficient capacity.
+ /// The caller should use [`reserve`] or [`try_reserve`] to ensure that there is enough capacity.
+ ///
+ /// [`push`]: Vec::push
+ /// [`reserve`]: Vec::reserve
+ /// [`try_reserve`]: Vec::try_reserve
+ ///
+ /// # Examples
+ ///
+ /// A manual, panic-free alternative to [`FromIterator`]:
+ ///
+ /// ```
+ /// #![feature(vec_push_within_capacity)]
+ ///
+ /// use std::collections::TryReserveError;
+ /// fn from_iter_fallible<T>(iter: impl Iterator<Item=T>) -> Result<Vec<T>, TryReserveError> {
+ /// let mut vec = Vec::new();
+ /// for value in iter {
+ /// if let Err(value) = vec.push_within_capacity(value) {
+ /// vec.try_reserve(1)?;
+ /// // this cannot fail, the previous line either returned or added at least 1 free slot
+ /// let _ = vec.push_within_capacity(value);
+ /// }
+ /// }
+ /// Ok(vec)
+ /// }
+ /// assert_eq!(from_iter_fallible(0..100), Ok(Vec::from_iter(0..100)));
+ /// ```
+ #[inline]
+ #[unstable(feature = "vec_push_within_capacity", issue = "100486")]
+ pub fn push_within_capacity(&mut self, value: T) -> Result<(), T> {
+ if self.len == self.buf.capacity() {
+ return Err(value);
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ Ok(())
+ }
+
/// Removes the last element from a vector and returns it, or [`None`] if it
/// is empty.
///
@@ -1888,9 +2000,7 @@ impl<T, A: Allocator> Vec<T, A> {
unsafe {
// set self.vec length's to start, to be safe in case Drain is leaked
self.set_len(start);
- // Use the borrow in the IterMut to indicate borrowing behavior of the
- // whole Drain iterator (like &mut T).
- let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start);
+ let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
Drain {
tail_start: end,
tail_len: len - end,
@@ -2082,7 +2192,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// static_ref[0] += 1;
/// assert_eq!(static_ref, &[2, 2, 3]);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_leak", since = "1.47.0")]
#[inline]
pub fn leak<'a>(self) -> &'a mut [T]
@@ -2346,7 +2455,7 @@ impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
#[unstable(feature = "slice_flatten", issue = "95629")]
pub fn into_flattened(self) -> Vec<T, A> {
let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
- let (new_len, new_cap) = if mem::size_of::<T>() == 0 {
+ let (new_len, new_cap) = if T::IS_ZST {
(len.checked_mul(N).expect("vec len overflow"), usize::MAX)
} else {
// SAFETY:
@@ -2408,7 +2517,7 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
ptr::write(ptr, value.next());
- ptr = ptr.offset(1);
+ ptr = ptr.add(1);
// Increment the length in every step in case next() panics
local_len.increment_len(1);
}
@@ -2676,8 +2785,8 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
let mut me = ManuallyDrop::new(self);
let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
let begin = me.as_mut_ptr();
- let end = if mem::size_of::<T>() == 0 {
- arith_offset(begin as *const i8, me.len() as isize) as *const T
+ let end = if T::IS_ZST {
+ begin.wrapping_byte_add(me.len())
} else {
begin.add(me.len()) as *const T
};
@@ -2927,6 +3036,8 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for Vec<T> {
/// Creates an empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
fn default() -> Vec<T> {
Vec::new()
}
diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs
index 506ee0ecf..1ea9c827a 100644
--- a/library/alloc/src/vec/spec_extend.rs
+++ b/library/alloc/src/vec/spec_extend.rs
@@ -39,7 +39,7 @@ where
let mut local_len = SetLenOnDrop::new(&mut self.len);
iterator.for_each(move |element| {
ptr::write(ptr, element);
- ptr = ptr.offset(1);
+ ptr = ptr.add(1);
// Since the loop executes user code which can panic we have to bump the pointer
// after each step.
// NB can't overflow since we would have had to alloc the address space
diff --git a/library/alloc/tests/autotraits.rs b/library/alloc/tests/autotraits.rs
new file mode 100644
index 000000000..8ff5f0abe
--- /dev/null
+++ b/library/alloc/tests/autotraits.rs
@@ -0,0 +1,293 @@
+fn require_sync<T: Sync>(_: T) {}
+fn require_send_sync<T: Send + Sync>(_: T) {}
+
+struct NotSend(*const ());
+unsafe impl Sync for NotSend {}
+
+#[test]
+fn test_btree_map() {
+ // Tests of this form are prone to https://github.com/rust-lang/rust/issues/64552.
+ //
+ // In theory the async block's future would be Send if the value we hold
+ // across the await point is Send, and Sync if the value we hold across the
+ // await point is Sync.
+ //
+ // We test autotraits in this convoluted way, instead of a straightforward
+ // `require_send_sync::<TypeIWantToTest>()`, because the interaction with
+ // generators exposes some current limitations in rustc's ability to prove a
+ // lifetime bound on the erased generator witness types. See the above link.
+ //
+ // A typical way this would surface in real code is:
+ //
+ // fn spawn<T: Future + Send>(_: T) {}
+ //
+ // async fn f() {
+ // let map = BTreeMap::<u32, Box<dyn Send + Sync>>::new();
+ // for _ in &map {
+ // async {}.await;
+ // }
+ // }
+ //
+ // fn main() {
+ // spawn(f());
+ // }
+ //
+ // where with some unintentionally overconstrained Send impls in liballoc's
+ // internals, the future might incorrectly not be Send even though every
+ // single type involved in the program is Send and Sync.
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::Iter<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ // Testing like this would not catch all issues that the above form catches.
+ require_send_sync(None::<alloc::collections::btree_map::Iter<'_, &u32, &u32>>);
+
+ require_sync(async {
+ let _v = None::<alloc::collections::btree_map::Iter<'_, u32, NotSend>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::BTreeMap<&u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<
+ alloc::collections::btree_map::DrainFilter<
+ '_,
+ &u32,
+ &u32,
+ fn(&&u32, &mut &u32) -> bool,
+ >,
+ >;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::Entry<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::IntoIter<&u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::IntoKeys<&u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::IntoValues<&u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::Iter<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::IterMut<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::Keys<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::OccupiedEntry<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::OccupiedError<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::Range<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::RangeMut<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::VacantEntry<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::Values<'_, &u32, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_map::ValuesMut<'_, &u32, &u32>>;
+ async {}.await;
+ });
+}
+
+#[test]
+fn test_btree_set() {
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::BTreeSet<&u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::Difference<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::DrainFilter<'_, &u32, fn(&&u32) -> bool>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::Intersection<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::IntoIter<&u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::Iter<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::Range<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::SymmetricDifference<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::btree_set::Union<'_, &u32>>;
+ async {}.await;
+ });
+}
+
+#[test]
+fn test_binary_heap() {
+ require_send_sync(async {
+ let _v = None::<alloc::collections::binary_heap::BinaryHeap<&u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::binary_heap::Drain<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::binary_heap::DrainSorted<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::binary_heap::IntoIter<&u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::binary_heap::IntoIterSorted<&u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::binary_heap::Iter<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::binary_heap::PeekMut<'_, &u32>>;
+ async {}.await;
+ });
+}
+
+#[test]
+fn test_linked_list() {
+ require_send_sync(async {
+ let _v = None::<alloc::collections::linked_list::Cursor<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::linked_list::CursorMut<'_, &u32>>;
+ async {}.await;
+ });
+
+ // FIXME
+ /*
+ require_send_sync(async {
+ let _v =
+ None::<alloc::collections::linked_list::DrainFilter<'_, &u32, fn(&mut &u32) -> bool>>;
+ async {}.await;
+ });
+ */
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::linked_list::IntoIter<&u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::linked_list::Iter<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::linked_list::IterMut<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::linked_list::LinkedList<&u32>>;
+ async {}.await;
+ });
+}
+
+#[test]
+fn test_vec_deque() {
+ require_send_sync(async {
+ let _v = None::<alloc::collections::vec_deque::Drain<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::vec_deque::IntoIter<&u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::vec_deque::Iter<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::vec_deque::IterMut<'_, &u32>>;
+ async {}.await;
+ });
+
+ require_send_sync(async {
+ let _v = None::<alloc::collections::vec_deque::VecDeque<&u32>>;
+ async {}.await;
+ });
+}
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
index d83cd29dd..ffc5ca7a5 100644
--- a/library/alloc/tests/lib.rs
+++ b/library/alloc/tests/lib.rs
@@ -2,6 +2,7 @@
#![feature(alloc_layout_extra)]
#![feature(assert_matches)]
#![feature(box_syntax)]
+#![feature(btree_drain_filter)]
#![feature(cow_is_borrowed)]
#![feature(const_box)]
#![feature(const_convert)]
@@ -14,6 +15,8 @@
#![feature(core_intrinsics)]
#![feature(drain_filter)]
#![feature(exact_size_is_empty)]
+#![feature(linked_list_cursors)]
+#![feature(map_try_insert)]
#![feature(new_uninit)]
#![feature(pattern)]
#![feature(trusted_len)]
@@ -32,22 +35,24 @@
#![feature(slice_group_by)]
#![feature(slice_partition_dedup)]
#![feature(string_remove_matches)]
-#![feature(const_btree_new)]
+#![feature(const_btree_len)]
#![feature(const_default_impls)]
#![feature(const_trait_impl)]
#![feature(const_str_from_utf8)]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(panic_update_hook)]
+#![feature(pointer_is_aligned)]
#![feature(slice_flatten)]
#![feature(thin_box)]
-#![feature(bench_black_box)]
#![feature(strict_provenance)]
#![feature(once_cell)]
+#![feature(drain_keep_rest)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
mod arc;
+mod autotraits;
mod borrow;
mod boxed;
mod btree_set_hash;
diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs
index 7379569dd..e30329aa1 100644
--- a/library/alloc/tests/str.rs
+++ b/library/alloc/tests/str.rs
@@ -1010,11 +1010,11 @@ fn test_as_bytes_fail() {
fn test_as_ptr() {
let buf = "hello".as_ptr();
unsafe {
- assert_eq!(*buf.offset(0), b'h');
- assert_eq!(*buf.offset(1), b'e');
- assert_eq!(*buf.offset(2), b'l');
- assert_eq!(*buf.offset(3), b'l');
- assert_eq!(*buf.offset(4), b'o');
+ assert_eq!(*buf.add(0), b'h');
+ assert_eq!(*buf.add(1), b'e');
+ assert_eq!(*buf.add(2), b'l');
+ assert_eq!(*buf.add(3), b'l');
+ assert_eq!(*buf.add(4), b'o');
}
}
diff --git a/library/alloc/tests/string.rs b/library/alloc/tests/string.rs
index b6836fdc8..99d1296a4 100644
--- a/library/alloc/tests/string.rs
+++ b/library/alloc/tests/string.rs
@@ -693,12 +693,6 @@ fn test_try_reserve() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- // On 16/32-bit, we check that allocations don't exceed isize::MAX,
- // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
- // Any platform that succeeds for these requests is technically broken with
- // ptr::offset because LLVM is the worst.
- let guards_against_isize = usize::BITS < 64;
-
{
// Note: basic stuff is checked by test_reserve
let mut empty_string: String = String::new();
@@ -712,35 +706,19 @@ fn test_try_reserve() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- // Check isize::MAX + 1 does count as overflow
- assert_matches!(
- empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- // Check usize::MAX does count as overflow
- assert_matches!(
- empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- // Check isize::MAX + 1 is an OOM
- assert_matches!(
- empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
-
- // Check usize::MAX is an OOM
- assert_matches!(
- empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "usize::MAX should trigger an OOM!"
- );
- }
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -753,19 +731,13 @@ fn test_try_reserve() {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should always overflow in the add-to-len
assert_matches!(
ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
@@ -785,8 +757,6 @@ fn test_try_reserve_exact() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- let guards_against_isize = usize::BITS < 64;
-
{
let mut empty_string: String = String::new();
@@ -799,31 +769,17 @@ fn test_try_reserve_exact() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- assert_matches!(
- empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- assert_matches!(
- empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
-
- assert_matches!(
- empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "usize::MAX should trigger an OOM!"
- );
- }
+ assert_matches!(
+ empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -839,19 +795,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
Err(CapacityOverflow),
diff --git a/library/alloc/tests/thin_box.rs b/library/alloc/tests/thin_box.rs
index 368aa564f..e008b0cc3 100644
--- a/library/alloc/tests/thin_box.rs
+++ b/library/alloc/tests/thin_box.rs
@@ -48,11 +48,11 @@ fn verify_aligned<T>(ptr: *const T) {
// practice these checks are mostly just smoke-detectors for an extremely
// broken `ThinBox` impl, since it's an extremely subtle piece of code.
let ptr = core::hint::black_box(ptr);
- let align = core::mem::align_of::<T>();
assert!(
- (ptr.addr() & (align - 1)) == 0 && !ptr.is_null(),
- "misaligned ThinBox data; valid pointers to `{}` should be aligned to {align}: {ptr:p}",
- core::any::type_name::<T>(),
+ ptr.is_aligned() && !ptr.is_null(),
+ "misaligned ThinBox data; valid pointers to `{ty}` should be aligned to {align}: {ptr:p}",
+ ty = core::any::type_name::<T>(),
+ align = core::mem::align_of::<T>(),
);
}
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index b797e2375..e02711870 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -294,6 +294,22 @@ fn test_retain() {
}
#[test]
+fn test_retain_predicate_order() {
+ for to_keep in [true, false] {
+ let mut number_of_executions = 0;
+ let mut vec = vec![1, 2, 3, 4];
+ let mut next_expected = 1;
+ vec.retain(|&x| {
+ assert_eq!(next_expected, x);
+ next_expected += 1;
+ number_of_executions += 1;
+ to_keep
+ });
+ assert_eq!(number_of_executions, 4);
+ }
+}
+
+#[test]
fn test_retain_pred_panic_with_hole() {
let v = (0..5).map(Rc::new).collect::<Vec<_>>();
catch_unwind(AssertUnwindSafe(|| {
@@ -355,6 +371,35 @@ fn test_retain_drop_panic() {
}
#[test]
+fn test_retain_maybeuninits() {
+ // This test aimed to be run under miri.
+ use core::mem::MaybeUninit;
+ let mut vec: Vec<_> = [1i32, 2, 3, 4].map(|v| MaybeUninit::new(vec![v])).into();
+ vec.retain(|x| {
+ // SAFETY: Retain must visit every element of Vec in original order and exactly once.
+ // Our values is initialized at creation of Vec.
+ let v = unsafe { x.assume_init_ref()[0] };
+ if v & 1 == 0 {
+ return true;
+ }
+ // SAFETY: Value is initialized.
+ // Value wouldn't be dropped by `Vec::retain`
+ // because `MaybeUninit` doesn't drop content.
+ drop(unsafe { x.assume_init_read() });
+ false
+ });
+ let vec: Vec<i32> = vec
+ .into_iter()
+ .map(|x| unsafe {
+ // SAFETY: All values dropped in retain predicate must be removed by `Vec::retain`.
+ // Remaining values are initialized.
+ x.assume_init()[0]
+ })
+ .collect();
+ assert_eq!(vec, [2, 4]);
+}
+
+#[test]
fn test_dedup() {
fn case(a: Vec<i32>, b: Vec<i32>) {
let mut v = a;
@@ -795,6 +840,36 @@ fn test_drain_leak() {
}
#[test]
+fn test_drain_keep_rest() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ let mut drain = v.drain(1..6);
+ assert_eq!(drain.next(), Some(1));
+ assert_eq!(drain.next_back(), Some(5));
+ assert_eq!(drain.next(), Some(2));
+
+ drain.keep_rest();
+ assert_eq!(v, &[0, 3, 4, 6]);
+}
+
+#[test]
+fn test_drain_keep_rest_all() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ v.drain(1..6).keep_rest();
+ assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6]);
+}
+
+#[test]
+fn test_drain_keep_rest_none() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ let mut drain = v.drain(1..6);
+
+ drain.by_ref().for_each(drop);
+
+ drain.keep_rest();
+ assert_eq!(v, &[0, 6]);
+}
+
+#[test]
fn test_splice() {
let mut v = vec![1, 2, 3, 4, 5];
let a = [10, 11, 12];
@@ -1030,6 +1105,12 @@ fn test_into_iter_drop_allocator() {
}
#[test]
+fn test_into_iter_zst() {
+ for _ in vec![[0u64; 0]].into_iter() {}
+ for _ in vec![[0u64; 0]; 5].into_iter().rev() {}
+}
+
+#[test]
fn test_from_iter_specialization() {
let src: Vec<usize> = vec![0usize; 1];
let srcptr = src.as_ptr();
@@ -1110,48 +1191,53 @@ fn test_from_iter_specialization_panic_during_iteration_drops() {
}
#[test]
-fn test_from_iter_specialization_panic_during_drop_leaks() {
- static mut DROP_COUNTER: usize = 0;
+fn test_from_iter_specialization_panic_during_drop_doesnt_leak() {
+ static mut DROP_COUNTER_OLD: [usize; 5] = [0; 5];
+ static mut DROP_COUNTER_NEW: [usize; 2] = [0; 2];
#[derive(Debug)]
- enum Droppable {
- DroppedTwice(Box<i32>),
- PanicOnDrop,
- }
+ struct Old(usize);
- impl Drop for Droppable {
+ impl Drop for Old {
fn drop(&mut self) {
- match self {
- Droppable::DroppedTwice(_) => {
- unsafe {
- DROP_COUNTER += 1;
- }
- println!("Dropping!")
- }
- Droppable::PanicOnDrop => {
- if !std::thread::panicking() {
- panic!();
- }
- }
+ unsafe {
+ DROP_COUNTER_OLD[self.0] += 1;
}
+
+ if self.0 == 3 {
+ panic!();
+ }
+
+ println!("Dropped Old: {}", self.0);
}
}
- let mut to_free: *mut Droppable = core::ptr::null_mut();
- let mut cap = 0;
+ #[derive(Debug)]
+ struct New(usize);
+
+ impl Drop for New {
+ fn drop(&mut self) {
+ unsafe {
+ DROP_COUNTER_NEW[self.0] += 1;
+ }
+
+ println!("Dropped New: {}", self.0);
+ }
+ }
let _ = std::panic::catch_unwind(AssertUnwindSafe(|| {
- let mut v = vec![Droppable::DroppedTwice(Box::new(123)), Droppable::PanicOnDrop];
- to_free = v.as_mut_ptr();
- cap = v.capacity();
- let _ = v.into_iter().take(0).collect::<Vec<_>>();
+ let v = vec![Old(0), Old(1), Old(2), Old(3), Old(4)];
+ let _ = v.into_iter().map(|x| New(x.0)).take(2).collect::<Vec<_>>();
}));
- assert_eq!(unsafe { DROP_COUNTER }, 1);
- // clean up the leak to keep miri happy
- unsafe {
- drop(Vec::from_raw_parts(to_free, 0, cap));
- }
+ assert_eq!(unsafe { DROP_COUNTER_OLD[0] }, 1);
+ assert_eq!(unsafe { DROP_COUNTER_OLD[1] }, 1);
+ assert_eq!(unsafe { DROP_COUNTER_OLD[2] }, 1);
+ assert_eq!(unsafe { DROP_COUNTER_OLD[3] }, 1);
+ assert_eq!(unsafe { DROP_COUNTER_OLD[4] }, 1);
+
+ assert_eq!(unsafe { DROP_COUNTER_NEW[0] }, 1);
+ assert_eq!(unsafe { DROP_COUNTER_NEW[1] }, 1);
}
// regression test for issue #85322. Peekable previously implemented InPlaceIterable,
@@ -1489,6 +1575,35 @@ fn drain_filter_unconsumed() {
}
#[test]
+fn test_drain_filter_keep_rest() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ let mut drain = v.drain_filter(|&mut x| x % 2 == 0);
+ assert_eq!(drain.next(), Some(0));
+ assert_eq!(drain.next(), Some(2));
+
+ drain.keep_rest();
+ assert_eq!(v, &[1, 3, 4, 5, 6]);
+}
+
+#[test]
+fn test_drain_filter_keep_rest_all() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ v.drain_filter(|_| true).keep_rest();
+ assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6]);
+}
+
+#[test]
+fn test_drain_filter_keep_rest_none() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ let mut drain = v.drain_filter(|_| true);
+
+ drain.by_ref().for_each(drop);
+
+ drain.keep_rest();
+ assert_eq!(v, &[]);
+}
+
+#[test]
fn test_reserve_exact() {
// This is all the same as test_reserve
@@ -1527,12 +1642,6 @@ fn test_try_reserve() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- // On 16/32-bit, we check that allocations don't exceed isize::MAX,
- // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
- // Any platform that succeeds for these requests is technically broken with
- // ptr::offset because LLVM is the worst.
- let guards_against_isize = usize::BITS < 64;
-
{
// Note: basic stuff is checked by test_reserve
let mut empty_bytes: Vec<u8> = Vec::new();
@@ -1546,35 +1655,19 @@ fn test_try_reserve() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- // Check isize::MAX + 1 does count as overflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- // Check usize::MAX does count as overflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- // Check isize::MAX + 1 is an OOM
- assert_matches!(
- empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
-
- // Check usize::MAX is an OOM
- assert_matches!(
- empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "usize::MAX should trigger an OOM!"
- );
- }
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -1587,19 +1680,13 @@ fn test_try_reserve() {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should always overflow in the add-to-len
assert_matches!(
ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
@@ -1620,19 +1707,13 @@ fn test_try_reserve() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should fail in the mul-by-size
assert_matches!(
ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()),
@@ -1652,8 +1733,6 @@ fn test_try_reserve_exact() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- let guards_against_isize = size_of::<usize>() < 8;
-
{
let mut empty_bytes: Vec<u8> = Vec::new();
@@ -1666,31 +1745,17 @@ fn test_try_reserve_exact() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
-
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "usize::MAX should trigger an OOM!"
- );
- }
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -1706,19 +1771,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
Err(CapacityOverflow),
@@ -1739,19 +1798,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind()),
Err(CapacityOverflow),
diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs
index 89cc7f905..019d73c0b 100644
--- a/library/alloc/tests/vec_deque.rs
+++ b/library/alloc/tests/vec_deque.rs
@@ -2,7 +2,6 @@ use std::assert_matches::assert_matches;
use std::collections::TryReserveErrorKind::*;
use std::collections::{vec_deque::Drain, VecDeque};
use std::fmt::Debug;
-use std::mem::size_of;
use std::ops::Bound::*;
use std::panic::{catch_unwind, AssertUnwindSafe};
@@ -1161,12 +1160,6 @@ fn test_try_reserve() {
const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
const MAX_USIZE: usize = usize::MAX;
- // On 16/32-bit, we check that allocations don't exceed isize::MAX,
- // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
- // Any platform that succeeds for these requests is technically broken with
- // ptr::offset because LLVM is the worst.
- let guards_against_isize = size_of::<usize>() < 8;
-
{
// Note: basic stuff is checked by test_reserve
let mut empty_bytes: VecDeque<u8> = VecDeque::new();
@@ -1180,31 +1173,19 @@ fn test_try_reserve() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- // Check isize::MAX + 1 does count as overflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- // Check usize::MAX does count as overflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- // Check isize::MAX is an OOM
- // VecDeque starts with capacity 7, always adds 1 to the capacity
- // and also rounds the number to next power of 2 so this is the
- // furthest we can go without triggering CapacityOverflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -1217,19 +1198,13 @@ fn test_try_reserve() {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should always overflow in the add-to-len
assert_matches!(
ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
@@ -1250,19 +1225,13 @@ fn test_try_reserve() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should fail in the mul-by-size
assert_matches!(
ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()),
@@ -1282,8 +1251,6 @@ fn test_try_reserve_exact() {
const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
const MAX_USIZE: usize = usize::MAX;
- let guards_against_isize = size_of::<usize>() < 8;
-
{
let mut empty_bytes: VecDeque<u8> = VecDeque::new();
@@ -1296,29 +1263,17 @@ fn test_try_reserve_exact() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- // Check isize::MAX is an OOM
- // VecDeque starts with capacity 7, always adds 1 to the capacity
- // and also rounds the number to next power of 2 so this is the
- // furthest we can go without triggering CapacityOverflow
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -1334,19 +1289,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
Err(CapacityOverflow),
@@ -1367,19 +1316,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind()),
Err(CapacityOverflow),
diff --git a/library/backtrace/.github/workflows/main.yml b/library/backtrace/.github/workflows/main.yml
index 5f4bd505b..c11b08dfd 100644
--- a/library/backtrace/.github/workflows/main.yml
+++ b/library/backtrace/.github/workflows/main.yml
@@ -33,8 +33,6 @@ jobs:
rust: stable-i686-msvc
- os: windows-latest
rust: stable-x86_64-gnu
- - os: windows-latest
- rust: stable-i686-gnu
steps:
- uses: actions/checkout@v1
with:
@@ -244,4 +242,4 @@ jobs:
rustup toolchain install nightly --component miri
rustup override set nightly
cargo miri setup
- - run: MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-strict-provenance" cargo miri test
+ - run: MIRIFLAGS="-Zmiri-disable-isolation" cargo miri test
diff --git a/library/backtrace/Cargo.toml b/library/backtrace/Cargo.toml
index 2881c8e8f..ef1c5ec00 100644
--- a/library/backtrace/Cargo.toml
+++ b/library/backtrace/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "backtrace"
-version = "0.3.65"
+version = "0.3.66"
authors = ["The Rust Project Developers"]
build = "build.rs"
license = "MIT/Apache-2.0"
@@ -44,7 +44,7 @@ addr2line = { version = "0.17.0", default-features = false }
miniz_oxide = { version = "0.5.0", default-features = false }
[dependencies.object]
-version = "0.28.0"
+version = "0.29.0"
default-features = false
features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
diff --git a/library/backtrace/src/backtrace/miri.rs b/library/backtrace/src/backtrace/miri.rs
index 9a5f65b80..f8c496428 100644
--- a/library/backtrace/src/backtrace/miri.rs
+++ b/library/backtrace/src/backtrace/miri.rs
@@ -91,7 +91,7 @@ pub fn resolve_addr(ptr: *mut c_void) -> Frame {
}
}
-pub unsafe fn trace_unsynchronized<F: FnMut(&super::Frame) -> bool>(mut cb: F) {
+unsafe fn trace_unsynchronized<F: FnMut(&super::Frame) -> bool>(mut cb: F) {
let len = miri_backtrace_size(0);
let mut frames = Vec::with_capacity(len);
@@ -102,6 +102,8 @@ pub unsafe fn trace_unsynchronized<F: FnMut(&super::Frame) -> bool>(mut cb: F) {
for ptr in frames.iter() {
let frame = resolve_addr(*ptr as *mut c_void);
- cb(&super::Frame { inner: frame });
+ if !cb(&super::Frame { inner: frame }) {
+ return;
+ }
}
}
diff --git a/library/core/benches/iter.rs b/library/core/benches/iter.rs
index 0abe20e4c..38887f29a 100644
--- a/library/core/benches/iter.rs
+++ b/library/core/benches/iter.rs
@@ -1,4 +1,6 @@
use core::iter::*;
+use core::mem;
+use core::num::Wrapping;
use test::{black_box, Bencher};
#[bench]
@@ -364,6 +366,13 @@ fn bench_partial_cmp(b: &mut Bencher) {
}
#[bench]
+fn bench_chain_partial_cmp(b: &mut Bencher) {
+ b.iter(|| {
+ (0..50000).chain(50000..100000).map(black_box).partial_cmp((0..100000).map(black_box))
+ })
+}
+
+#[bench]
fn bench_lt(b: &mut Bencher) {
b.iter(|| (0..100000).map(black_box).lt((0..100000).map(black_box)))
}
@@ -391,3 +400,21 @@ fn bench_trusted_random_access_adapters(b: &mut Bencher) {
acc
})
}
+
+/// Exercises the iter::Copied specialization for slice::Iter
+#[bench]
+fn bench_copied_array_chunks(b: &mut Bencher) {
+ let v = vec![1u8; 1024];
+
+ b.iter(|| {
+ black_box(&v)
+ .iter()
+ .copied()
+ .array_chunks::<{ mem::size_of::<u64>() }>()
+ .map(|ary| {
+ let d = u64::from_ne_bytes(ary);
+ Wrapping(d.rotate_left(7).wrapping_add(1))
+ })
+ .sum::<Wrapping<u64>>()
+ })
+}
diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs
index a6c174d2f..1e462e3fc 100644
--- a/library/core/benches/lib.rs
+++ b/library/core/benches/lib.rs
@@ -4,6 +4,7 @@
#![feature(int_log)]
#![feature(test)]
#![feature(trusted_random_access)]
+#![feature(iter_array_chunks)]
extern crate test;
diff --git a/library/core/benches/num/int_log/mod.rs b/library/core/benches/num/int_log/mod.rs
index 19864d2d4..3c01e2998 100644
--- a/library/core/benches/num/int_log/mod.rs
+++ b/library/core/benches/num/int_log/mod.rs
@@ -9,7 +9,7 @@ macro_rules! int_log_bench {
for n in 0..(<$t>::BITS / 8) {
for i in 1..=(100 as $t) {
let x = black_box(i << (n * 8));
- black_box(x.log10());
+ black_box(x.ilog10());
}
}
});
@@ -27,7 +27,7 @@ macro_rules! int_log_bench {
.collect();
bench.iter(|| {
for x in &numbers {
- black_box(black_box(x).log10());
+ black_box(black_box(x).ilog10());
}
});
}
@@ -44,7 +44,7 @@ macro_rules! int_log_bench {
.collect();
bench.iter(|| {
for x in &numbers {
- black_box(black_box(x).log10());
+ black_box(black_box(x).ilog10());
}
});
}
diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs
index 887246c60..6756eecd0 100644
--- a/library/core/src/alloc/global.rs
+++ b/library/core/src/alloc/global.rs
@@ -74,7 +74,7 @@ use crate::ptr;
/// {
/// return null_mut();
/// };
-/// (self.arena.get() as *mut u8).add(allocated)
+/// self.arena.get().cast::<u8>().add(allocated)
/// }
/// unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
/// }
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
index 2f378836c..920e559cc 100644
--- a/library/core/src/alloc/layout.rs
+++ b/library/core/src/alloc/layout.rs
@@ -1,4 +1,11 @@
+// Seemingly inconsequential code changes to this file can lead to measurable
+// performance impact on compilation times, due at least in part to the fact
+// that the layout code gets called from many instantiations of the various
+// collections, resulting in having to optimize down excess IR multiple times.
+// Your performance intuition is useless. Run perf.
+
use crate::cmp;
+use crate::error::Error;
use crate::fmt;
use crate::mem::{self, ValidAlign};
use crate::ptr::NonNull;
@@ -52,16 +59,23 @@ impl Layout {
/// * `align` must be a power of two,
///
/// * `size`, when rounded up to the nearest multiple of `align`,
- /// must not overflow (i.e., the rounded value must be less than
- /// or equal to `usize::MAX`).
+ /// must not overflow isize (i.e., the rounded value must be
+ /// less than or equal to `isize::MAX`).
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")]
#[inline]
+ #[rustc_allow_const_fn_unstable(ptr_alignment_type)]
pub const fn from_size_align(size: usize, align: usize) -> Result<Self, LayoutError> {
if !align.is_power_of_two() {
return Err(LayoutError);
}
+ // SAFETY: just checked that align is a power of two.
+ Layout::from_size_valid_align(size, unsafe { ValidAlign::new_unchecked(align) })
+ }
+
+ #[inline(always)]
+ const fn max_size_for_align(align: ValidAlign) -> usize {
// (power-of-two implies align != 0.)
// Rounded up size is:
@@ -76,13 +90,18 @@ impl Layout {
//
// Above implies that checking for summation overflow is both
// necessary and sufficient.
- if size > usize::MAX - (align - 1) {
+ isize::MAX as usize - (align.as_usize() - 1)
+ }
+
+ /// Internal helper constructor to skip revalidating alignment validity.
+ #[inline]
+ const fn from_size_valid_align(size: usize, align: ValidAlign) -> Result<Self, LayoutError> {
+ if size > Self::max_size_for_align(align) {
return Err(LayoutError);
}
- // SAFETY: the conditions for `from_size_align_unchecked` have been
- // checked above.
- unsafe { Ok(Layout::from_size_align_unchecked(size, align)) }
+ // SAFETY: Layout::size invariants checked above.
+ Ok(Layout { size, align })
}
/// Creates a layout, bypassing all checks.
@@ -95,9 +114,10 @@ impl Layout {
#[rustc_const_stable(feature = "const_alloc_layout_unchecked", since = "1.36.0")]
#[must_use]
#[inline]
+ #[rustc_allow_const_fn_unstable(ptr_alignment_type)]
pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
- // SAFETY: the caller must ensure that `align` is a power of two.
- Layout { size, align: unsafe { ValidAlign::new_unchecked(align) } }
+ // SAFETY: the caller is required to uphold the preconditions.
+ unsafe { Layout { size, align: ValidAlign::new_unchecked(align) } }
}
/// The minimum size in bytes for a memory block of this layout.
@@ -115,8 +135,9 @@ impl Layout {
#[must_use = "this returns the minimum alignment, \
without modifying the layout"]
#[inline]
+ #[rustc_allow_const_fn_unstable(ptr_alignment_type)]
pub const fn align(&self) -> usize {
- self.align.as_nonzero().get()
+ self.align.as_usize()
}
/// Constructs a `Layout` suitable for holding a value of type `T`.
@@ -126,10 +147,9 @@ impl Layout {
#[inline]
pub const fn new<T>() -> Self {
let (size, align) = size_align::<T>();
- // SAFETY: the align is guaranteed by Rust to be a power of two and
- // the size+align combo is guaranteed to fit in our address space. As a
- // result use the unchecked constructor here to avoid inserting code
- // that panics if it isn't optimized well enough.
+ // SAFETY: if the type is instantiated, rustc already ensures that its
+ // layout is valid. Use the unchecked constructor to avoid inserting a
+ // panicking codepath that needs to be optimized out.
unsafe { Layout::from_size_align_unchecked(size, align) }
}
@@ -141,7 +161,6 @@ impl Layout {
#[inline]
pub fn for_value<T: ?Sized>(t: &T) -> Self {
let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
- debug_assert!(Layout::from_size_align(size, align).is_ok());
// SAFETY: see rationale in `new` for why this is using the unsafe variant
unsafe { Layout::from_size_align_unchecked(size, align) }
}
@@ -176,7 +195,6 @@ impl Layout {
pub unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
// SAFETY: we pass along the prerequisites of these functions to the caller
let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) };
- debug_assert!(Layout::from_size_align(size, align).is_ok());
// SAFETY: see rationale in `new` for why this is using the unsafe variant
unsafe { Layout::from_size_align_unchecked(size, align) }
}
@@ -276,12 +294,11 @@ impl Layout {
let pad = self.padding_needed_for(self.align());
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
- // > must not overflow (i.e., the rounded value must be less than
- // > `usize::MAX`)
+ // > must not overflow isize (i.e., the rounded value must be
+ // > less than or equal to `isize::MAX`)
let new_size = self.size() + pad;
- // SAFETY: self.align is already known to be valid and new_size has been
- // padded already.
+ // SAFETY: padded size is guaranteed to not exceed `isize::MAX`.
unsafe { Layout::from_size_align_unchecked(new_size, self.align()) }
}
@@ -298,14 +315,13 @@ impl Layout {
pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> {
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
- // > must not overflow (i.e., the rounded value must be less than
- // > `usize::MAX`)
+ // > must not overflow isize (i.e., the rounded value must be
+ // > less than or equal to `isize::MAX`)
let padded_size = self.size() + self.padding_needed_for(self.align());
let alloc_size = padded_size.checked_mul(n).ok_or(LayoutError)?;
- // SAFETY: self.align is already known to be valid and alloc_size has been
- // padded already.
- unsafe { Ok((Layout::from_size_align_unchecked(alloc_size, self.align()), padded_size)) }
+ // The safe constructor is called here to enforce the isize size limit.
+ Layout::from_size_valid_align(alloc_size, self.align).map(|layout| (layout, padded_size))
}
/// Creates a layout describing the record for `self` followed by
@@ -356,13 +372,14 @@ impl Layout {
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
#[inline]
pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> {
- let new_align = cmp::max(self.align(), next.align());
+ let new_align = cmp::max(self.align, next.align);
let pad = self.padding_needed_for(next.align());
let offset = self.size().checked_add(pad).ok_or(LayoutError)?;
let new_size = offset.checked_add(next.size()).ok_or(LayoutError)?;
- let layout = Layout::from_size_align(new_size, new_align)?;
+ // The safe constructor is called here to enforce the isize size limit.
+ let layout = Layout::from_size_valid_align(new_size, new_align)?;
Ok((layout, offset))
}
@@ -382,7 +399,8 @@ impl Layout {
#[inline]
pub fn repeat_packed(&self, n: usize) -> Result<Self, LayoutError> {
let size = self.size().checked_mul(n).ok_or(LayoutError)?;
- Layout::from_size_align(size, self.align())
+ // The safe constructor is called here to enforce the isize size limit.
+ Layout::from_size_valid_align(size, self.align)
}
/// Creates a layout describing the record for `self` followed by
@@ -395,26 +413,39 @@ impl Layout {
#[inline]
pub fn extend_packed(&self, next: Self) -> Result<Self, LayoutError> {
let new_size = self.size().checked_add(next.size()).ok_or(LayoutError)?;
- Layout::from_size_align(new_size, self.align())
+ // The safe constructor is called here to enforce the isize size limit.
+ Layout::from_size_valid_align(new_size, self.align)
}
/// Creates a layout describing the record for a `[T; n]`.
///
- /// On arithmetic overflow, returns `LayoutError`.
+ /// On arithmetic overflow or when the total size would exceed
+ /// `isize::MAX`, returns `LayoutError`.
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
#[inline]
pub fn array<T>(n: usize) -> Result<Self, LayoutError> {
- let array_size = mem::size_of::<T>().checked_mul(n).ok_or(LayoutError)?;
-
- // SAFETY:
- // - Size: `array_size` cannot be too big because `size_of::<T>()` must
- // be a multiple of `align_of::<T>()`. Therefore, `array_size`
- // rounded up to the nearest multiple of `align_of::<T>()` is just
- // `array_size`. And `array_size` cannot be too big because it was
- // just checked by the `checked_mul()`.
- // - Alignment: `align_of::<T>()` will always give an acceptable
- // (non-zero, power of two) alignment.
- Ok(unsafe { Layout::from_size_align_unchecked(array_size, mem::align_of::<T>()) })
+ // Reduce the amount of code we need to monomorphize per `T`.
+ return inner(mem::size_of::<T>(), ValidAlign::of::<T>(), n);
+
+ #[inline]
+ fn inner(element_size: usize, align: ValidAlign, n: usize) -> Result<Layout, LayoutError> {
+ // We need to check two things about the size:
+ // - That the total size won't overflow a `usize`, and
+ // - That the total size still fits in an `isize`.
+ // By using division we can check them both with a single threshold.
+ // That'd usually be a bad idea, but thankfully here the element size
+ // and alignment are constants, so the compiler will fold all of it.
+ if element_size != 0 && n > Layout::max_size_for_align(align) / element_size {
+ return Err(LayoutError);
+ }
+
+ let array_size = element_size * n;
+
+ // SAFETY: We just checked above that the `array_size` will not
+ // exceed `isize::MAX` even when rounded up to the alignment.
+ // And `ValidAlign` guarantees it's a power of two.
+ unsafe { Ok(Layout::from_size_align_unchecked(array_size, align.as_usize())) }
+ }
}
}
@@ -434,6 +465,9 @@ pub type LayoutErr = LayoutError;
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct LayoutError;
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+impl Error for LayoutError {}
+
// (we need this for downstream impl of trait Error)
#[stable(feature = "alloc_layout", since = "1.28.0")]
impl fmt::Display for LayoutError {
diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs
index 6cc6e359e..a4bf6a853 100644
--- a/library/core/src/alloc/mod.rs
+++ b/library/core/src/alloc/mod.rs
@@ -21,6 +21,7 @@ pub use self::layout::LayoutErr;
#[stable(feature = "alloc_layout_error", since = "1.50.0")]
pub use self::layout::LayoutError;
+use crate::error::Error;
use crate::fmt;
use crate::ptr::{self, NonNull};
@@ -32,6 +33,13 @@ use crate::ptr::{self, NonNull};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct AllocError;
+#[unstable(
+ feature = "allocator_api",
+ reason = "the precise API and guarantees it provides may be tweaked.",
+ issue = "32838"
+)]
+impl Error for AllocError {}
+
// (we need this for downstream impl of trait Error)
#[unstable(feature = "allocator_api", issue = "32838")]
impl fmt::Display for AllocError {
@@ -97,6 +105,7 @@ impl fmt::Display for AllocError {
///
/// [*currently allocated*]: #currently-allocated-memory
#[unstable(feature = "allocator_api", issue = "32838")]
+#[const_trait]
pub unsafe trait Allocator {
/// Attempts to allocate a block of memory.
///
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index f20c497a1..1a379ecc1 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -1,7 +1,4 @@
-//! This module contains the `Any` trait, which enables dynamic typing
-//! of any `'static` type through runtime reflection. It also contains the
-//! `Provider` trait and accompanying API, which enable trait objects to provide
-//! data based on typed requests, an alternate form of runtime reflection.
+//! Utilities for dynamic typing or type reflection.
//!
//! # `Any` and `TypeId`
//!
@@ -799,7 +796,7 @@ pub trait Provider {
/// impl Provider for SomeConcreteType {
/// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
/// demand.provide_ref::<str>(&self.field)
- /// .provide_value::<i32>(|| self.num_field);
+ /// .provide_value::<i32>(self.num_field);
/// }
/// }
/// ```
@@ -884,28 +881,55 @@ impl<'a> Demand<'a> {
///
/// # Examples
///
+ /// Provides an `u8`.
+ ///
+ /// ```rust
+ /// #![feature(provide_any)]
+ ///
+ /// use std::any::{Provider, Demand};
+ /// # struct SomeConcreteType { field: u8 }
+ ///
+ /// impl Provider for SomeConcreteType {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// demand.provide_value::<u8>(self.field);
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "provide_any", issue = "96024")]
+ pub fn provide_value<T>(&mut self, value: T) -> &mut Self
+ where
+ T: 'static,
+ {
+ self.provide::<tags::Value<T>>(value)
+ }
+
+ /// Provide a value or other type with only static lifetimes computed using a closure.
+ ///
+ /// # Examples
+ ///
/// Provides a `String` by cloning.
///
/// ```rust
- /// # #![feature(provide_any)]
+ /// #![feature(provide_any)]
+ ///
/// use std::any::{Provider, Demand};
/// # struct SomeConcreteType { field: String }
///
/// impl Provider for SomeConcreteType {
/// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// demand.provide_value::<String>(|| self.field.clone());
+ /// demand.provide_value_with::<String>(|| self.field.clone());
/// }
/// }
/// ```
#[unstable(feature = "provide_any", issue = "96024")]
- pub fn provide_value<T>(&mut self, fulfil: impl FnOnce() -> T) -> &mut Self
+ pub fn provide_value_with<T>(&mut self, fulfil: impl FnOnce() -> T) -> &mut Self
where
T: 'static,
{
self.provide_with::<tags::Value<T>>(fulfil)
}
- /// Provide a reference, note that the referee type must be bounded by `'static`,
+ /// Provide a reference. The referee type must be bounded by `'static`,
/// but may be unsized.
///
/// # Examples
@@ -913,7 +937,8 @@ impl<'a> Demand<'a> {
/// Provides a reference to a field as a `&str`.
///
/// ```rust
- /// # #![feature(provide_any)]
+ /// #![feature(provide_any)]
+ ///
/// use std::any::{Provider, Demand};
/// # struct SomeConcreteType { field: String }
///
@@ -928,6 +953,40 @@ impl<'a> Demand<'a> {
self.provide::<tags::Ref<tags::MaybeSizedValue<T>>>(value)
}
+ /// Provide a reference computed using a closure. The referee type
+ /// must be bounded by `'static`, but may be unsized.
+ ///
+ /// # Examples
+ ///
+ /// Provides a reference to a field as a `&str`.
+ ///
+ /// ```rust
+ /// #![feature(provide_any)]
+ ///
+ /// use std::any::{Provider, Demand};
+ /// # struct SomeConcreteType { business: String, party: String }
+ /// # fn today_is_a_weekday() -> bool { true }
+ ///
+ /// impl Provider for SomeConcreteType {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// demand.provide_ref_with::<str>(|| {
+ /// if today_is_a_weekday() {
+ /// &self.business
+ /// } else {
+ /// &self.party
+ /// }
+ /// });
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "provide_any", issue = "96024")]
+ pub fn provide_ref_with<T: ?Sized + 'static>(
+ &mut self,
+ fulfil: impl FnOnce() -> &'a T,
+ ) -> &mut Self {
+ self.provide_with::<tags::Ref<tags::MaybeSizedValue<T>>>(fulfil)
+ }
+
/// Provide a value with the given `Type` tag.
fn provide<I>(&mut self, value: I::Reified) -> &mut Self
where
@@ -949,6 +1008,156 @@ impl<'a> Demand<'a> {
}
self
}
+
+ /// Check if the `Demand` would be satisfied if provided with a
+ /// value of the specified type. If the type does not match or has
+ /// already been provided, returns false.
+ ///
+ /// # Examples
+ ///
+ /// Check if an `u8` still needs to be provided and then provides
+ /// it.
+ ///
+ /// ```rust
+ /// #![feature(provide_any)]
+ ///
+ /// use std::any::{Provider, Demand};
+ ///
+ /// struct Parent(Option<u8>);
+ ///
+ /// impl Provider for Parent {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// if let Some(v) = self.0 {
+ /// demand.provide_value::<u8>(v);
+ /// }
+ /// }
+ /// }
+ ///
+ /// struct Child {
+ /// parent: Parent,
+ /// }
+ ///
+ /// impl Child {
+ /// // Pretend that this takes a lot of resources to evaluate.
+ /// fn an_expensive_computation(&self) -> Option<u8> {
+ /// Some(99)
+ /// }
+ /// }
+ ///
+ /// impl Provider for Child {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// // In general, we don't know if this call will provide
+ /// // an `u8` value or not...
+ /// self.parent.provide(demand);
+ ///
+ /// // ...so we check to see if the `u8` is needed before
+ /// // we run our expensive computation.
+ /// if demand.would_be_satisfied_by_value_of::<u8>() {
+ /// if let Some(v) = self.an_expensive_computation() {
+ /// demand.provide_value::<u8>(v);
+ /// }
+ /// }
+ ///
+ /// // The demand will be satisfied now, regardless of if
+ /// // the parent provided the value or we did.
+ /// assert!(!demand.would_be_satisfied_by_value_of::<u8>());
+ /// }
+ /// }
+ ///
+ /// let parent = Parent(Some(42));
+ /// let child = Child { parent };
+ /// assert_eq!(Some(42), std::any::request_value::<u8>(&child));
+ ///
+ /// let parent = Parent(None);
+ /// let child = Child { parent };
+ /// assert_eq!(Some(99), std::any::request_value::<u8>(&child));
+ /// ```
+ #[unstable(feature = "provide_any", issue = "96024")]
+ pub fn would_be_satisfied_by_value_of<T>(&self) -> bool
+ where
+ T: 'static,
+ {
+ self.would_be_satisfied_by::<tags::Value<T>>()
+ }
+
+ /// Check if the `Demand` would be satisfied if provided with a
+ /// reference to a value of the specified type. If the type does
+ /// not match or has already been provided, returns false.
+ ///
+ /// # Examples
+ ///
+ /// Check if a `&str` still needs to be provided and then provides
+ /// it.
+ ///
+ /// ```rust
+ /// #![feature(provide_any)]
+ ///
+ /// use std::any::{Provider, Demand};
+ ///
+ /// struct Parent(Option<String>);
+ ///
+ /// impl Provider for Parent {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// if let Some(v) = &self.0 {
+ /// demand.provide_ref::<str>(v);
+ /// }
+ /// }
+ /// }
+ ///
+ /// struct Child {
+ /// parent: Parent,
+ /// name: String,
+ /// }
+ ///
+ /// impl Child {
+ /// // Pretend that this takes a lot of resources to evaluate.
+ /// fn an_expensive_computation(&self) -> Option<&str> {
+ /// Some(&self.name)
+ /// }
+ /// }
+ ///
+ /// impl Provider for Child {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// // In general, we don't know if this call will provide
+ /// // a `str` reference or not...
+ /// self.parent.provide(demand);
+ ///
+ /// // ...so we check to see if the `&str` is needed before
+ /// // we run our expensive computation.
+ /// if demand.would_be_satisfied_by_ref_of::<str>() {
+ /// if let Some(v) = self.an_expensive_computation() {
+ /// demand.provide_ref::<str>(v);
+ /// }
+ /// }
+ ///
+ /// // The demand will be satisfied now, regardless of if
+ /// // the parent provided the reference or we did.
+ /// assert!(!demand.would_be_satisfied_by_ref_of::<str>());
+ /// }
+ /// }
+ ///
+ /// let parent = Parent(Some("parent".into()));
+ /// let child = Child { parent, name: "child".into() };
+ /// assert_eq!(Some("parent"), std::any::request_ref::<str>(&child));
+ ///
+ /// let parent = Parent(None);
+ /// let child = Child { parent, name: "child".into() };
+ /// assert_eq!(Some("child"), std::any::request_ref::<str>(&child));
+ /// ```
+ #[unstable(feature = "provide_any", issue = "96024")]
+ pub fn would_be_satisfied_by_ref_of<T>(&self) -> bool
+ where
+ T: ?Sized + 'static,
+ {
+ self.would_be_satisfied_by::<tags::Ref<tags::MaybeSizedValue<T>>>()
+ }
+
+ fn would_be_satisfied_by<I>(&self) -> bool
+ where
+ I: tags::Type<'a>,
+ {
+ matches!(self.0.downcast::<I>(), Some(TaggedOption(None)))
+ }
}
#[unstable(feature = "provide_any", issue = "96024")]
@@ -1053,6 +1262,21 @@ impl<'a> dyn Erased<'a> + 'a {
/// Returns some reference to the dynamic value if it is tagged with `I`,
/// or `None` otherwise.
#[inline]
+ fn downcast<I>(&self) -> Option<&TaggedOption<'a, I>>
+ where
+ I: tags::Type<'a>,
+ {
+ if self.tag_id() == TypeId::of::<I>() {
+ // SAFETY: Just checked whether we're pointing to an I.
+ Some(unsafe { &*(self as *const Self).cast::<TaggedOption<'a, I>>() })
+ } else {
+ None
+ }
+ }
+
+ /// Returns some mutable reference to the dynamic value if it is tagged with `I`,
+ /// or `None` otherwise.
+ #[inline]
fn downcast_mut<I>(&mut self) -> Option<&mut TaggedOption<'a, I>>
where
I: tags::Type<'a>,
diff --git a/library/core/src/array/equality.rs b/library/core/src/array/equality.rs
index 33f7f494e..b2c895f88 100644
--- a/library/core/src/array/equality.rs
+++ b/library/core/src/array/equality.rs
@@ -173,13 +173,14 @@ macro_rules! is_raw_eq_comparable {
)+};
}
-// SAFETY: All the ordinary integer types allow all bit patterns as distinct values
+// SAFETY: All the ordinary integer types have no padding, and are not pointers.
is_raw_eq_comparable!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize);
-// SAFETY: bool and char have *niches*, but no *padding*, so this is sound
+// SAFETY: bool and char have *niches*, but no *padding* (and these are not pointer types), so this
+// is sound
is_raw_eq_comparable!(bool, char);
-// SAFETY: Similarly, the non-zero types have a niche, but no undef,
+// SAFETY: Similarly, the non-zero types have a niche, but no undef and no pointers,
// and they compare like their underlying numeric type.
is_raw_eq_comparable!(
NonZeroU8,
diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs
index f4885ed9f..b91c63018 100644
--- a/library/core/src/array/iter.rs
+++ b/library/core/src/array/iter.rs
@@ -1,10 +1,10 @@
//! Defines the `IntoIter` owned iterator for arrays.
use crate::{
- cmp, fmt,
+ fmt,
iter::{self, ExactSizeIterator, FusedIterator, TrustedLen},
mem::{self, MaybeUninit},
- ops::Range,
+ ops::{IndexRange, Range},
ptr,
};
@@ -29,9 +29,10 @@ pub struct IntoIter<T, const N: usize> {
/// The elements in `data` that have not been yielded yet.
///
/// Invariants:
- /// - `alive.start <= alive.end`
/// - `alive.end <= N`
- alive: Range<usize>,
+ ///
+ /// (And the `IndexRange` type requires `alive.start <= alive.end`.)
+ alive: IndexRange,
}
// Note: the `#[rustc_skip_array_during_method_dispatch]` on `trait IntoIterator`
@@ -69,7 +70,7 @@ impl<T, const N: usize> IntoIterator for [T; N] {
// Until then, we can use `mem::transmute_copy` to create a bitwise copy
// as a different type, then forget `array` so that it is not dropped.
unsafe {
- let iter = IntoIter { data: mem::transmute_copy(&self), alive: 0..N };
+ let iter = IntoIter { data: mem::transmute_copy(&self), alive: IndexRange::zero_to(N) };
mem::forget(self);
iter
}
@@ -103,8 +104,7 @@ impl<T, const N: usize> IntoIter<T, N> {
///
/// ```
/// #![feature(array_into_iter_constructors)]
- ///
- /// #![feature(maybe_uninit_array_assume_init)]
+ /// #![feature(maybe_uninit_uninit_array_transpose)]
/// #![feature(maybe_uninit_uninit_array)]
/// use std::array::IntoIter;
/// use std::mem::MaybeUninit;
@@ -133,7 +133,7 @@ impl<T, const N: usize> IntoIter<T, N> {
/// }
///
/// // SAFETY: We've initialized all N items
- /// unsafe { Ok(MaybeUninit::array_assume_init(buffer)) }
+ /// unsafe { Ok(buffer.transpose().assume_init()) }
/// }
///
/// let r: [_; 4] = next_chunk(&mut (10..16)).unwrap();
@@ -147,7 +147,9 @@ impl<T, const N: usize> IntoIter<T, N> {
buffer: [MaybeUninit<T>; N],
initialized: Range<usize>,
) -> Self {
- Self { data: buffer, alive: initialized }
+ // SAFETY: one of our safety conditions is that the range is canonical.
+ let alive = unsafe { IndexRange::new_unchecked(initialized.start, initialized.end) };
+ Self { data: buffer, alive }
}
/// Creates an iterator over `T` which returns no elements.
@@ -283,16 +285,11 @@ impl<T, const N: usize> Iterator for IntoIter<T, N> {
}
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let len = self.len();
-
- // The number of elements to drop. Always in-bounds by construction.
- let delta = cmp::min(n, len);
+ let original_len = self.len();
- let range_to_drop = self.alive.start..(self.alive.start + delta);
-
- // Moving the start marks them as conceptually "dropped", so if anything
- // goes bad then our drop impl won't double-free them.
- self.alive.start += delta;
+ // This also moves the start, which marks them as conceptually "dropped",
+ // so if anything goes bad then our drop impl won't double-free them.
+ let range_to_drop = self.alive.take_prefix(n);
// SAFETY: These elements are currently initialized, so it's fine to drop them.
unsafe {
@@ -300,7 +297,7 @@ impl<T, const N: usize> Iterator for IntoIter<T, N> {
ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
}
- if n > len { Err(len) } else { Ok(()) }
+ if n > original_len { Err(original_len) } else { Ok(()) }
}
}
@@ -338,16 +335,11 @@ impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> {
}
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let len = self.len();
-
- // The number of elements to drop. Always in-bounds by construction.
- let delta = cmp::min(n, len);
-
- let range_to_drop = (self.alive.end - delta)..self.alive.end;
+ let original_len = self.len();
- // Moving the end marks them as conceptually "dropped", so if anything
- // goes bad then our drop impl won't double-free them.
- self.alive.end -= delta;
+ // This also moves the end, which marks them as conceptually "dropped",
+ // so if anything goes bad then our drop impl won't double-free them.
+ let range_to_drop = self.alive.take_suffix(n);
// SAFETY: These elements are currently initialized, so it's fine to drop them.
unsafe {
@@ -355,7 +347,7 @@ impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> {
ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
}
- if n > len { Err(len) } else { Ok(()) }
+ if n > original_len { Err(original_len) } else { Ok(()) }
}
}
@@ -372,9 +364,7 @@ impl<T, const N: usize> Drop for IntoIter<T, N> {
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T, const N: usize> ExactSizeIterator for IntoIter<T, N> {
fn len(&self) -> usize {
- // Will never underflow due to the invariant `alive.start <=
- // alive.end`.
- self.alive.end - self.alive.start
+ self.alive.len()
}
fn is_empty(&self) -> bool {
self.alive.is_empty()
@@ -396,14 +386,15 @@ impl<T: Clone, const N: usize> Clone for IntoIter<T, N> {
fn clone(&self) -> Self {
// Note, we don't really need to match the exact same alive range, so
// we can just clone into offset 0 regardless of where `self` is.
- let mut new = Self { data: MaybeUninit::uninit_array(), alive: 0..0 };
+ let mut new = Self { data: MaybeUninit::uninit_array(), alive: IndexRange::zero_to(0) };
// Clone all alive elements.
for (src, dst) in iter::zip(self.as_slice(), &mut new.data) {
// Write a clone into the new array, then update its alive range.
// If cloning panics, we'll correctly drop the previous items.
dst.write(src.clone());
- new.alive.end += 1;
+ // This addition cannot overflow as we're iterating a slice
+ new.alive = IndexRange::zero_to(new.alive.end() + 1);
}
new
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index c9823a136..eae0e1c76 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -1,4 +1,4 @@
-//! Helper functions and types for fixed-length arrays.
+//! Utilities for the array primitive type.
//!
//! *[See also the array primitive type](array).*
@@ -7,6 +7,7 @@
use crate::borrow::{Borrow, BorrowMut};
use crate::cmp::Ordering;
use crate::convert::{Infallible, TryFrom};
+use crate::error::Error;
use crate::fmt;
use crate::hash::{self, Hash};
use crate::iter::TrustedLen;
@@ -31,6 +32,10 @@ pub use iter::IntoIter;
/// # Example
///
/// ```rust
+/// // type inference is helping us here, the way `from_fn` knows how many
+/// // elements to produce is the length of array down there: only arrays of
+/// // equal lengths can be compared, so the const generic parameter `N` is
+/// // inferred to be 5, thus creating array of 5 elements.
/// let array = core::array::from_fn(|i| i);
/// assert_eq!(array, [0, 1, 2, 3, 4]);
/// ```
@@ -119,6 +124,14 @@ impl fmt::Display for TryFromSliceError {
}
}
+#[stable(feature = "try_from", since = "1.34.0")]
+impl Error for TryFromSliceError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
impl TryFromSliceError {
#[unstable(
feature = "array_error_internals",
@@ -173,6 +186,18 @@ impl<T, const N: usize> const BorrowMut<[T]> for [T; N] {
}
}
+/// Tries to create an array `[T; N]` by copying from a slice `&[T]`. Succeeds if
+/// `slice.len() == N`.
+///
+/// ```
+/// let bytes: [u8; 3] = [1, 0, 2];
+///
+/// let bytes_head: [u8; 2] = <[u8; 2]>::try_from(&bytes[0..2]).unwrap();
+/// assert_eq!(1, u16::from_le_bytes(bytes_head));
+///
+/// let bytes_tail: [u8; 2] = bytes[1..3].try_into().unwrap();
+/// assert_eq!(512, u16::from_le_bytes(bytes_tail));
+/// ```
#[stable(feature = "try_from", since = "1.34.0")]
impl<T, const N: usize> TryFrom<&[T]> for [T; N]
where
@@ -185,6 +210,18 @@ where
}
}
+/// Tries to create an array `[T; N]` by copying from a mutable slice `&mut [T]`.
+/// Succeeds if `slice.len() == N`.
+///
+/// ```
+/// let mut bytes: [u8; 3] = [1, 0, 2];
+///
+/// let bytes_head: [u8; 2] = <[u8; 2]>::try_from(&mut bytes[0..2]).unwrap();
+/// assert_eq!(1, u16::from_le_bytes(bytes_head));
+///
+/// let bytes_tail: [u8; 2] = (&mut bytes[1..3]).try_into().unwrap();
+/// assert_eq!(512, u16::from_le_bytes(bytes_tail));
+/// ```
#[stable(feature = "try_from_mut_slice_to_array", since = "1.59.0")]
impl<T, const N: usize> TryFrom<&mut [T]> for [T; N]
where
@@ -197,6 +234,18 @@ where
}
}
+/// Tries to create an array ref `&[T; N]` from a slice ref `&[T]`. Succeeds if
+/// `slice.len() == N`.
+///
+/// ```
+/// let bytes: [u8; 3] = [1, 0, 2];
+///
+/// let bytes_head: &[u8; 2] = <&[u8; 2]>::try_from(&bytes[0..2]).unwrap();
+/// assert_eq!(1, u16::from_le_bytes(*bytes_head));
+///
+/// let bytes_tail: &[u8; 2] = bytes[1..3].try_into().unwrap();
+/// assert_eq!(512, u16::from_le_bytes(*bytes_tail));
+/// ```
#[stable(feature = "try_from", since = "1.34.0")]
impl<'a, T, const N: usize> TryFrom<&'a [T]> for &'a [T; N] {
type Error = TryFromSliceError;
@@ -212,6 +261,18 @@ impl<'a, T, const N: usize> TryFrom<&'a [T]> for &'a [T; N] {
}
}
+/// Tries to create a mutable array ref `&mut [T; N]` from a mutable slice ref
+/// `&mut [T]`. Succeeds if `slice.len() == N`.
+///
+/// ```
+/// let mut bytes: [u8; 3] = [1, 0, 2];
+///
+/// let bytes_head: &mut [u8; 2] = <&mut [u8; 2]>::try_from(&mut bytes[0..2]).unwrap();
+/// assert_eq!(1, u16::from_le_bytes(*bytes_head));
+///
+/// let bytes_tail: &mut [u8; 2] = (&mut bytes[1..3]).try_into().unwrap();
+/// assert_eq!(512, u16::from_le_bytes(*bytes_tail));
+/// ```
#[stable(feature = "try_from", since = "1.34.0")]
impl<'a, T, const N: usize> TryFrom<&'a mut [T]> for &'a mut [T; N] {
type Error = TryFromSliceError;
@@ -375,7 +436,8 @@ impl<T: Copy> SpecArrayClone for T {
macro_rules! array_impl_default {
{$n:expr, $t:ident $($ts:ident)*} => {
#[stable(since = "1.4.0", feature = "array_default")]
- impl<T> Default for [T; $n] where T: Default {
+ #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+ impl<T> const Default for [T; $n] where T: ~const Default {
fn default() -> [T; $n] {
[$t::default(), $($ts::default()),*]
}
@@ -854,7 +916,7 @@ where
mem::forget(guard);
// SAFETY: All elements of the array were populated in the loop above.
- let output = unsafe { MaybeUninit::array_assume_init(array) };
+ let output = unsafe { array.transpose().assume_init() };
Ok(Try::from_output(output))
}
diff --git a/library/core/src/bool.rs b/library/core/src/bool.rs
index f7a8aa0d9..db1c505ba 100644
--- a/library/core/src/bool.rs
+++ b/library/core/src/bool.rs
@@ -6,12 +6,30 @@ impl bool {
/// Returns `Some(t)` if the `bool` is [`true`](../std/keyword.true.html),
/// or `None` otherwise.
///
+ /// Arguments passed to `then_some` are eagerly evaluated; if you are
+ /// passing the result of a function call, it is recommended to use
+ /// [`then`], which is lazily evaluated.
+ ///
+ /// [`then`]: bool::then
+ ///
/// # Examples
///
/// ```
/// assert_eq!(false.then_some(0), None);
/// assert_eq!(true.then_some(0), Some(0));
/// ```
+ ///
+ /// ```
+ /// let mut a = 0;
+ /// let mut function_with_side_effects = || { a += 1; };
+ ///
+ /// true.then_some(function_with_side_effects());
+ /// false.then_some(function_with_side_effects());
+ ///
+ /// // `a` is incremented twice because the value passed to `then_some` is
+ /// // evaluated eagerly.
+ /// assert_eq!(a, 2);
+ /// ```
#[stable(feature = "bool_to_option", since = "1.62.0")]
#[rustc_const_unstable(feature = "const_bool_to_option", issue = "91917")]
#[inline]
@@ -31,6 +49,17 @@ impl bool {
/// assert_eq!(false.then(|| 0), None);
/// assert_eq!(true.then(|| 0), Some(0));
/// ```
+ ///
+ /// ```
+ /// let mut a = 0;
+ ///
+ /// true.then(|| { a += 1; });
+ /// false.then(|| { a += 1; });
+ ///
+ /// // `a` is incremented once because the closure is evaluated lazily by
+ /// // `then`.
+ /// assert_eq!(a, 1);
+ /// ```
#[stable(feature = "lazy_bool_to_option", since = "1.50.0")]
#[rustc_const_unstable(feature = "const_bool_to_option", issue = "91917")]
#[inline]
diff --git a/library/core/src/borrow.rs b/library/core/src/borrow.rs
index 58eabecf3..fdd56cb4e 100644
--- a/library/core/src/borrow.rs
+++ b/library/core/src/borrow.rs
@@ -1,4 +1,4 @@
-//! A module for working with borrowed data.
+//! Utilities for working with borrowed data.
#![stable(feature = "rust1", since = "1.0.0")]
@@ -154,6 +154,7 @@
/// [`String`]: ../../std/string/struct.String.html
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "Borrow"]
+#[const_trait]
pub trait Borrow<Borrowed: ?Sized> {
/// Immutably borrows from an owned value.
///
@@ -184,6 +185,7 @@ pub trait Borrow<Borrowed: ?Sized> {
/// an underlying type by providing a mutable reference. See [`Borrow<T>`]
/// for more information on borrowing as another type.
#[stable(feature = "rust1", since = "1.0.0")]
+#[const_trait]
pub trait BorrowMut<Borrowed: ?Sized>: Borrow<Borrowed> {
/// Mutably borrows from an owned value.
///
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index fb4454c94..7bf32cb0d 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -405,6 +405,7 @@ impl<T> Cell<T> {
/// assert_eq!(cell.replace(10), 5);
/// assert_eq!(cell.get(), 10);
/// ```
+ #[inline]
#[stable(feature = "move_cell", since = "1.17.0")]
pub fn replace(&self, val: T) -> T {
// SAFETY: This can cause data races if called from a separate thread,
@@ -614,6 +615,7 @@ impl<T, const N: usize> Cell<[T; N]> {
/// A mutable memory location with dynamically checked borrow rules
///
/// See the [module-level documentation](self) for more.
+#[cfg_attr(not(test), rustc_diagnostic_item = "RefCell")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RefCell<T: ?Sized> {
borrow: Cell<BorrowFlag>,
@@ -1021,15 +1023,18 @@ impl<T: ?Sized> RefCell<T> {
/// Returns a mutable reference to the underlying data.
///
- /// This call borrows `RefCell` mutably (at compile-time) so there is no
- /// need for dynamic checks.
+ /// Since this method borrows `RefCell` mutably, it is statically guaranteed
+ /// that no borrows to the underlying data exist. The dynamic checks inherent
+ /// in [`borrow_mut`] and most other methods of `RefCell` are therefor
+ /// unnecessary.
///
- /// However be cautious: this method expects `self` to be mutable, which is
- /// generally not the case when using a `RefCell`. Take a look at the
- /// [`borrow_mut`] method instead if `self` isn't mutable.
+ /// This method can only be called if `RefCell` can be mutably borrowed,
+ /// which in general is only the case directly after the `RefCell` has
+ /// been created. In these situations, skipping the aforementioned dynamic
+ /// borrowing checks may yield better ergonomics and runtime-performance.
///
- /// Also, please be aware that this method is only for special circumstances and is usually
- /// not what you want. In case of doubt, use [`borrow_mut`] instead.
+ /// In most situations where `RefCell` is used, it can't be borrowed mutably.
+ /// Use [`borrow_mut`] to get mutable access to the underlying data then.
///
/// [`borrow_mut`]: RefCell::borrow_mut()
///
@@ -1811,6 +1816,61 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
///
/// [`.get_mut()`]: `UnsafeCell::get_mut`
///
+/// # Memory layout
+///
+/// `UnsafeCell<T>` has the same in-memory representation as its inner type `T`. A consequence
+/// of this guarantee is that it is possible to convert between `T` and `UnsafeCell<T>`.
+/// Special care has to be taken when converting a nested `T` inside of an `Outer<T>` type
+/// to an `Outer<UnsafeCell<T>>` type: this is not sound when the `Outer<T>` type enables [niche]
+/// optimizations. For example, the type `Option<NonNull<u8>>` is typically 8 bytes large on
+/// 64-bit platforms, but the type `Option<UnsafeCell<NonNull<u8>>>` takes up 16 bytes of space.
+/// Therefore this is not a valid conversion, despite `NonNull<u8>` and `UnsafeCell<NonNull<u8>>>`
+/// having the same memory layout. This is because `UnsafeCell` disables niche optimizations in
+/// order to avoid its interior mutability property from spreading from `T` into the `Outer` type,
+/// thus this can cause distortions in the type size in these cases.
+///
+/// Note that the only valid way to obtain a `*mut T` pointer to the contents of a
+/// _shared_ `UnsafeCell<T>` is through [`.get()`] or [`.raw_get()`]. A `&mut T` reference
+/// can be obtained by either dereferencing this pointer or by calling [`.get_mut()`]
+/// on an _exclusive_ `UnsafeCell<T>`. Even though `T` and `UnsafeCell<T>` have the
+/// same memory layout, the following is not allowed and undefined behavior:
+///
+/// ```rust,no_run
+/// # use std::cell::UnsafeCell;
+/// unsafe fn not_allowed<T>(ptr: &UnsafeCell<T>) -> &mut T {
+/// let t = ptr as *const UnsafeCell<T> as *mut T;
+/// // This is undefined behavior, because the `*mut T` pointer
+/// // was not obtained through `.get()` nor `.raw_get()`:
+/// unsafe { &mut *t }
+/// }
+/// ```
+///
+/// Instead, do this:
+///
+/// ```rust
+/// # use std::cell::UnsafeCell;
+/// // Safety: the caller must ensure that there are no references that
+/// // point to the *contents* of the `UnsafeCell`.
+/// unsafe fn get_mut<T>(ptr: &UnsafeCell<T>) -> &mut T {
+/// unsafe { &mut *ptr.get() }
+/// }
+/// ```
+///
+/// Coverting in the other direction from a `&mut T`
+/// to an `&UnsafeCell<T>` is allowed:
+///
+/// ```rust
+/// # use std::cell::UnsafeCell;
+/// fn get_shared<T>(ptr: &mut T) -> &UnsafeCell<T> {
+/// let t = ptr as *mut T as *const UnsafeCell<T>;
+/// // SAFETY: `T` and `UnsafeCell<T>` have the same memory layout
+/// unsafe { &*t }
+/// }
+/// ```
+///
+/// [niche]: https://rust-lang.github.io/unsafe-code-guidelines/glossary.html#niche
+/// [`.raw_get()`]: `UnsafeCell::raw_get`
+///
/// # Examples
///
/// Here is an example showcasing how to soundly mutate the contents of an `UnsafeCell<_>` despite
diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs
index 71297acd1..11f1c30f6 100644
--- a/library/core/src/char/decode.rs
+++ b/library/core/src/char/decode.rs
@@ -1,5 +1,6 @@
//! UTF-8 and UTF-16 decoding iterators
+use crate::error::Error;
use crate::fmt;
use super::from_u32_unchecked;
@@ -121,3 +122,11 @@ impl fmt::Display for DecodeUtf16Error {
write!(f, "unpaired surrogate found: {:x}", self.code)
}
}
+
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+impl Error for DecodeUtf16Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "unpaired surrogate found"
+ }
+}
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index eae567cad..bb8359936 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -597,9 +597,14 @@ impl char {
/// Returns the number of 16-bit code units this `char` would need if
/// encoded in UTF-16.
///
+ /// That number of code units is always either 1 or 2, for unicode scalar values in
+ /// the [basic multilingual plane] or [supplementary planes] respectively.
+ ///
/// See the documentation for [`len_utf8()`] for more explanation of this
/// concept. This function is a mirror, but for UTF-16 instead of UTF-8.
///
+ /// [basic multilingual plane]: http://www.unicode.org/glossary/#basic_multilingual_plane
+ /// [supplementary planes]: http://www.unicode.org/glossary/#supplementary_planes
/// [`len_utf8()`]: #method.len_utf8
///
/// # Examples
@@ -746,10 +751,19 @@ impl char {
/// assert!(!'中'.is_lowercase());
/// assert!(!' '.is_lowercase());
/// ```
+ ///
+ /// In a const context:
+ ///
+ /// ```
+ /// #![feature(const_unicode_case_lookup)]
+ /// const CAPITAL_DELTA_IS_LOWERCASE: bool = 'Δ'.is_lowercase();
+ /// assert!(!CAPITAL_DELTA_IS_LOWERCASE);
+ /// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_unicode_case_lookup", issue = "101400")]
#[inline]
- pub fn is_lowercase(self) -> bool {
+ pub const fn is_lowercase(self) -> bool {
match self {
'a'..='z' => true,
c => c > '\x7f' && unicode::Lowercase(c),
@@ -779,10 +793,19 @@ impl char {
/// assert!(!'中'.is_uppercase());
/// assert!(!' '.is_uppercase());
/// ```
+ ///
+ /// In a const context:
+ ///
+ /// ```
+ /// #![feature(const_unicode_case_lookup)]
+ /// const CAPITAL_DELTA_IS_UPPERCASE: bool = 'Δ'.is_uppercase();
+ /// assert!(CAPITAL_DELTA_IS_UPPERCASE);
+ /// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_unicode_case_lookup", issue = "101400")]
#[inline]
- pub fn is_uppercase(self) -> bool {
+ pub const fn is_uppercase(self) -> bool {
match self {
'A'..='Z' => true,
c => c > '\x7f' && unicode::Uppercase(c),
@@ -892,8 +915,7 @@ impl char {
///
/// The general categories for numbers (`Nd` for decimal digits, `Nl` for letter-like numeric
/// characters, and `No` for other numeric characters) are specified in the [Unicode Character
- /// Database][ucd] [`UnicodeData.txt`]. Note that this means ideographic numbers like '三'
- /// are considered alphabetic, not numeric. Please consider to use `is_ascii_digit` or `is_digit`.
+ /// Database][ucd] [`UnicodeData.txt`].
///
/// This method doesn't cover everything that could be considered a number, e.g. ideographic numbers like '三'.
/// If you want everything including characters with overlapping purposes then you might want to use
@@ -1427,6 +1449,38 @@ impl char {
matches!(*self, '0'..='9')
}
+ /// Checks if the value is an ASCII octal digit:
+ /// U+0030 '0' ..= U+0037 '7'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_ascii_octdigit)]
+ ///
+ /// let uppercase_a = 'A';
+ /// let a = 'a';
+ /// let zero = '0';
+ /// let seven = '7';
+ /// let nine = '9';
+ /// let percent = '%';
+ /// let lf = '\n';
+ ///
+ /// assert!(!uppercase_a.is_ascii_octdigit());
+ /// assert!(!a.is_ascii_octdigit());
+ /// assert!(zero.is_ascii_octdigit());
+ /// assert!(seven.is_ascii_octdigit());
+ /// assert!(!nine.is_ascii_octdigit());
+ /// assert!(!percent.is_ascii_octdigit());
+ /// assert!(!lf.is_ascii_octdigit());
+ /// ```
+ #[must_use]
+ #[unstable(feature = "is_ascii_octdigit", issue = "101288")]
+ #[rustc_const_unstable(feature = "is_ascii_octdigit", issue = "101288")]
+ #[inline]
+ pub const fn is_ascii_octdigit(&self) -> bool {
+ matches!(*self, '0'..='7')
+ }
+
/// Checks if the value is an ASCII hexadecimal digit:
///
/// - U+0030 '0' ..= U+0039 '9', or
diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs
index 0df23e7bb..b34a71216 100644
--- a/library/core/src/char/mod.rs
+++ b/library/core/src/char/mod.rs
@@ -1,4 +1,6 @@
-//! A character type.
+//! Utilities for the `char` primitive type.
+//!
+//! *[See also the `char` primitive type](primitive@char).*
//!
//! The `char` type represents a single character. More specifically, since
//! 'character' isn't a well-defined concept in Unicode, `char` is a '[Unicode
@@ -36,6 +38,7 @@ pub use self::methods::encode_utf16_raw;
#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
pub use self::methods::encode_utf8_raw;
+use crate::error::Error;
use crate::fmt::{self, Write};
use crate::iter::FusedIterator;
@@ -582,3 +585,6 @@ impl fmt::Display for TryFromCharError {
"unicode code point out of range".fmt(fmt)
}
}
+
+#[stable(feature = "u8_from_char", since = "1.59.0")]
+impl Error for TryFromCharError {}
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index 20bb67687..f0fa2e1d2 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -1,6 +1,6 @@
-//! Functionality for ordering and comparison.
+//! Utilities for comparing and ordering values.
//!
-//! This module contains various tools for ordering and comparing values. In
+//! This module contains various tools for comparing and ordering values. In
//! summary:
//!
//! * [`Eq`] and [`PartialEq`] are traits that allow you to define total and
@@ -22,7 +22,9 @@
#![stable(feature = "rust1", since = "1.0.0")]
+use crate::const_closure::ConstFnMutClosure;
use crate::marker::Destruct;
+use crate::marker::StructuralPartialEq;
use self::Ordering::*;
@@ -38,8 +40,10 @@ use self::Ordering::*;
///
/// Implementations must ensure that `eq` and `ne` are consistent with each other:
///
-/// - `a != b` if and only if `!(a == b)`
-/// (ensured by the default implementation).
+/// - `a != b` if and only if `!(a == b)`.
+///
+/// The default implementation of `ne` provides this consistency and is almost
+/// always sufficient. It should not be overridden without very good reason.
///
/// If [`PartialOrd`] or [`Ord`] are also implemented for `Self` and `Rhs`, their methods must also
/// be consistent with `PartialEq` (see the documentation of those traits for the exact
@@ -201,20 +205,10 @@ use self::Ordering::*;
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(alias = "==")]
#[doc(alias = "!=")]
-#[cfg_attr(
- bootstrap,
- rustc_on_unimplemented(
- message = "can't compare `{Self}` with `{Rhs}`",
- label = "no implementation for `{Self} == {Rhs}`"
- )
-)]
-#[cfg_attr(
- not(bootstrap),
- rustc_on_unimplemented(
- message = "can't compare `{Self}` with `{Rhs}`",
- label = "no implementation for `{Self} == {Rhs}`",
- append_const_msg,
- )
+#[rustc_on_unimplemented(
+ message = "can't compare `{Self}` with `{Rhs}`",
+ label = "no implementation for `{Self} == {Rhs}`",
+ append_const_msg
)]
#[const_trait]
#[rustc_diagnostic_item = "PartialEq"]
@@ -225,7 +219,8 @@ pub trait PartialEq<Rhs: ?Sized = Self> {
#[stable(feature = "rust1", since = "1.0.0")]
fn eq(&self, other: &Rhs) -> bool;
- /// This method tests for `!=`.
+ /// This method tests for `!=`. The default implementation is almost always
+ /// sufficient, and should not be overridden without very good reason.
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -335,7 +330,7 @@ pub struct AssertParamIsEq<T: Eq + ?Sized> {
/// let result = 2.cmp(&1);
/// assert_eq!(Ordering::Greater, result);
/// ```
-#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
+#[derive(Clone, Copy, Eq, Debug, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
#[repr(i8)]
pub enum Ordering {
@@ -882,6 +877,18 @@ pub macro Ord($item:item) {
}
#[stable(feature = "rust1", since = "1.0.0")]
+impl StructuralPartialEq for Ordering {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+impl const PartialEq for Ordering {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ (*self as i32).eq(&(*other as i32))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
impl const Ord for Ordering {
#[inline]
@@ -1060,20 +1067,10 @@ impl const PartialOrd for Ordering {
#[doc(alias = "<")]
#[doc(alias = "<=")]
#[doc(alias = ">=")]
-#[cfg_attr(
- bootstrap,
- rustc_on_unimplemented(
- message = "can't compare `{Self}` with `{Rhs}`",
- label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`",
- )
-)]
-#[cfg_attr(
- not(bootstrap),
- rustc_on_unimplemented(
- message = "can't compare `{Self}` with `{Rhs}`",
- label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`",
- append_const_msg,
- )
+#[rustc_on_unimplemented(
+ message = "can't compare `{Self}` with `{Rhs}`",
+ label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`",
+ append_const_msg
)]
#[const_trait]
#[rustc_diagnostic_item = "PartialOrd"]
@@ -1139,11 +1136,7 @@ pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> {
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn le(&self, other: &Rhs) -> bool {
- // Pattern `Some(Less | Eq)` optimizes worse than negating `None | Some(Greater)`.
- // FIXME: The root cause was fixed upstream in LLVM with:
- // https://github.com/llvm/llvm-project/commit/9bad7de9a3fb844f1ca2965f35d0c2a3d1e11775
- // Revert this workaround once support for LLVM 12 gets dropped.
- !matches!(self.partial_cmp(other), None | Some(Greater))
+ matches!(self.partial_cmp(other), Some(Less | Equal))
}
/// This method tests greater than (for `self` and `other`) and is used by the `>` operator.
@@ -1230,7 +1223,12 @@ pub const fn min<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T {
#[inline]
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
-pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+pub const fn min_by<T, F: ~const FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T
+where
+ T: ~const Destruct,
+ F: ~const Destruct,
+{
match compare(&v1, &v2) {
Ordering::Less | Ordering::Equal => v1,
Ordering::Greater => v2,
@@ -1252,8 +1250,24 @@ pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
#[inline]
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
-pub fn min_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
- min_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+pub const fn min_by_key<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T
+where
+ T: ~const Destruct,
+ F: ~const Destruct,
+ K: ~const Destruct,
+{
+ const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(
+ f: &mut F,
+ (v1, v2): (&T, &T),
+ ) -> Ordering
+ where
+ T: ~const Destruct,
+ K: ~const Destruct,
+ {
+ f(v1).cmp(&f(v2))
+ }
+ min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp))
}
/// Compares and returns the maximum of two values.
@@ -1294,7 +1308,12 @@ pub const fn max<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T {
#[inline]
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
-pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+pub const fn max_by<T, F: ~const FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T
+where
+ T: ~const Destruct,
+ F: ~const Destruct,
+{
match compare(&v1, &v2) {
Ordering::Less | Ordering::Equal => v2,
Ordering::Greater => v1,
@@ -1316,8 +1335,24 @@ pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
#[inline]
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
-pub fn max_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
- max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+pub const fn max_by_key<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T
+where
+ T: ~const Destruct,
+ F: ~const Destruct,
+ K: ~const Destruct,
+{
+ const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(
+ f: &mut F,
+ (v1, v2): (&T, &T),
+ ) -> Ordering
+ where
+ T: ~const Destruct,
+ K: ~const Destruct,
+ {
+ f(v1).cmp(&f(v2))
+ }
+ max_by(v1, v2, ConstFnMutClosure::new(&mut f, imp))
}
// Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types
diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs
new file mode 100644
index 000000000..9e9c02093
--- /dev/null
+++ b/library/core/src/const_closure.rs
@@ -0,0 +1,77 @@
+use crate::marker::Destruct;
+
+/// Struct representing a closure with mutably borrowed data.
+///
+/// Example:
+/// ```no_build
+/// #![feature(const_mut_refs)]
+/// use crate::const_closure::ConstFnMutClosure;
+/// const fn imp(state: &mut i32, (arg,): (i32,)) -> i32 {
+/// *state += arg;
+/// *state
+/// }
+/// let mut i = 5;
+/// let mut cl = ConstFnMutClosure::new(&mut i, imp);
+///
+/// assert!(7 == cl(2));
+/// assert!(8 == cl(1));
+/// ```
+pub(crate) struct ConstFnMutClosure<CapturedData, Function> {
+ /// The Data captured by the Closure.
+ /// Must be either a (mutable) reference or a tuple of (mutable) references.
+ pub data: CapturedData,
+ /// The Function of the Closure, must be: Fn(CapturedData, ClosureArgs) -> ClosureReturn
+ pub func: Function,
+}
+impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData, Function> {
+ /// Function for creating a new closure.
+ ///
+ /// `data` is the a mutable borrow of data that is captured from the environment.
+ /// If you want Data to be a tuple of mutable Borrows, the struct must be constructed manually.
+ ///
+ /// `func` is the function of the closure, it gets the data and a tuple of the arguments closure
+ /// and return the return value of the closure.
+ pub(crate) const fn new<ClosureArguments, ClosureReturnValue>(
+ data: &'a mut CapturedData,
+ func: Function,
+ ) -> Self
+ where
+ Function: ~const Fn(&mut CapturedData, ClosureArguments) -> ClosureReturnValue,
+ {
+ Self { data, func }
+ }
+}
+
+macro_rules! impl_fn_mut_tuple {
+ ($($var:ident)*) => {
+ #[allow(unused_parens)]
+ impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
+ FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
+ where
+ Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct,
+ {
+ type Output = ClosureReturnValue;
+
+ extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output {
+ self.call_mut(args)
+ }
+ }
+ #[allow(unused_parens)]
+ impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
+ FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
+ where
+ Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output {
+ #[allow(non_snake_case)]
+ let ($($var),*) = &mut self.data;
+ (self.func)(($($var),*), args)
+ }
+ }
+ };
+}
+impl_fn_mut_tuple!(A);
+impl_fn_mut_tuple!(A B);
+impl_fn_mut_tuple!(A B C);
+impl_fn_mut_tuple!(A B C D);
+impl_fn_mut_tuple!(A B C D E);
diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs
index b30c8a4ae..33493964b 100644
--- a/library/core/src/convert/mod.rs
+++ b/library/core/src/convert/mod.rs
@@ -25,6 +25,7 @@
//! # Generic Implementations
//!
//! - [`AsRef`] and [`AsMut`] auto-dereference if the inner type is a reference
+//! (but not generally for all [dereferenceable types][core::ops::Deref])
//! - [`From`]`<U> for T` implies [`Into`]`<T> for U`
//! - [`TryFrom`]`<U> for T` implies [`TryInto`]`<T> for U`
//! - [`From`] and [`Into`] are reflexive, which means that all types can
@@ -34,6 +35,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
+use crate::error::Error;
use crate::fmt;
use crate::hash::{Hash, Hasher};
@@ -108,10 +110,12 @@ pub const fn identity<T>(x: T) -> T {
/// If you need to do a costly conversion it is better to implement [`From`] with type
/// `&T` or write a custom function.
///
+/// # Relation to `Borrow`
+///
/// `AsRef` has the same signature as [`Borrow`], but [`Borrow`] is different in a few aspects:
///
/// - Unlike `AsRef`, [`Borrow`] has a blanket impl for any `T`, and can be used to accept either
-/// a reference or a value.
+/// a reference or a value. (See also note on `AsRef`'s reflexibility below.)
/// - [`Borrow`] also requires that [`Hash`], [`Eq`] and [`Ord`] for a borrowed value are
/// equivalent to those of the owned value. For this reason, if you want to
/// borrow only a single field of a struct you can implement `AsRef`, but not [`Borrow`].
@@ -121,9 +125,66 @@ pub const fn identity<T>(x: T) -> T {
///
/// # Generic Implementations
///
-/// - `AsRef` auto-dereferences if the inner type is a reference or a mutable
-/// reference (e.g.: `foo.as_ref()` will work the same if `foo` has type
-/// `&mut Foo` or `&&mut Foo`)
+/// `AsRef` auto-dereferences if the inner type is a reference or a mutable reference
+/// (e.g.: `foo.as_ref()` will work the same if `foo` has type `&mut Foo` or `&&mut Foo`).
+///
+/// Note that due to historic reasons, the above currently does not hold generally for all
+/// [dereferenceable types], e.g. `foo.as_ref()` will *not* work the same as
+/// `Box::new(foo).as_ref()`. Instead, many smart pointers provide an `as_ref` implementation which
+/// simply returns a reference to the [pointed-to value] (but do not perform a cheap
+/// reference-to-reference conversion for that value). However, [`AsRef::as_ref`] should not be
+/// used for the sole purpose of dereferencing; instead ['`Deref` coercion'] can be used:
+///
+/// [dereferenceable types]: core::ops::Deref
+/// [pointed-to value]: core::ops::Deref::Target
+/// ['`Deref` coercion']: core::ops::Deref#more-on-deref-coercion
+///
+/// ```
+/// let x = Box::new(5i32);
+/// // Avoid this:
+/// // let y: &i32 = x.as_ref();
+/// // Better just write:
+/// let y: &i32 = &x;
+/// ```
+///
+/// Types which implement [`Deref`] should consider implementing `AsRef<T>` as follows:
+///
+/// [`Deref`]: core::ops::Deref
+///
+/// ```
+/// # use core::ops::Deref;
+/// # struct SomeType;
+/// # impl Deref for SomeType {
+/// # type Target = [u8];
+/// # fn deref(&self) -> &[u8] {
+/// # &[]
+/// # }
+/// # }
+/// impl<T> AsRef<T> for SomeType
+/// where
+/// T: ?Sized,
+/// <SomeType as Deref>::Target: AsRef<T>,
+/// {
+/// fn as_ref(&self) -> &T {
+/// self.deref().as_ref()
+/// }
+/// }
+/// ```
+///
+/// # Reflexivity
+///
+/// Ideally, `AsRef` would be reflexive, i.e. there would be an `impl<T: ?Sized> AsRef<T> for T`
+/// with [`as_ref`] simply returning its argument unchanged.
+/// Such a blanket implementation is currently *not* provided due to technical restrictions of
+/// Rust's type system (it would be overlapping with another existing blanket implementation for
+/// `&T where T: AsRef<U>` which allows `AsRef` to auto-dereference, see "Generic Implementations"
+/// above).
+///
+/// [`as_ref`]: AsRef::as_ref
+///
+/// A trivial implementation of `AsRef<T> for T` must be added explicitly for a particular type `T`
+/// where needed or desired. Note, however, that not all types from `std` contain such an
+/// implementation, and those cannot be added by external code due to orphan rules.
///
/// # Examples
///
@@ -153,6 +214,7 @@ pub const fn identity<T>(x: T) -> T {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "AsRef")]
+#[const_trait]
pub trait AsRef<T: ?Sized> {
/// Converts this type into a shared reference of the (usually inferred) input type.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -170,31 +232,141 @@ pub trait AsRef<T: ?Sized> {
///
/// # Generic Implementations
///
-/// - `AsMut` auto-dereferences if the inner type is a mutable reference
-/// (e.g.: `foo.as_mut()` will work the same if `foo` has type `&mut Foo`
-/// or `&mut &mut Foo`)
+/// `AsMut` auto-dereferences if the inner type is a mutable reference
+/// (e.g.: `foo.as_mut()` will work the same if `foo` has type `&mut Foo` or `&mut &mut Foo`).
+///
+/// Note that due to historic reasons, the above currently does not hold generally for all
+/// [mutably dereferenceable types], e.g. `foo.as_mut()` will *not* work the same as
+/// `Box::new(foo).as_mut()`. Instead, many smart pointers provide an `as_mut` implementation which
+/// simply returns a reference to the [pointed-to value] (but do not perform a cheap
+/// reference-to-reference conversion for that value). However, [`AsMut::as_mut`] should not be
+/// used for the sole purpose of mutable dereferencing; instead ['`Deref` coercion'] can be used:
+///
+/// [mutably dereferenceable types]: core::ops::DerefMut
+/// [pointed-to value]: core::ops::Deref::Target
+/// ['`Deref` coercion']: core::ops::DerefMut#more-on-deref-coercion
+///
+/// ```
+/// let mut x = Box::new(5i32);
+/// // Avoid this:
+/// // let y: &mut i32 = x.as_mut();
+/// // Better just write:
+/// let y: &mut i32 = &mut x;
+/// ```
+///
+/// Types which implement [`DerefMut`] should consider to add an implementation of `AsMut<T>` as
+/// follows:
+///
+/// [`DerefMut`]: core::ops::DerefMut
+///
+/// ```
+/// # use core::ops::{Deref, DerefMut};
+/// # struct SomeType;
+/// # impl Deref for SomeType {
+/// # type Target = [u8];
+/// # fn deref(&self) -> &[u8] {
+/// # &[]
+/// # }
+/// # }
+/// # impl DerefMut for SomeType {
+/// # fn deref_mut(&mut self) -> &mut [u8] {
+/// # &mut []
+/// # }
+/// # }
+/// impl<T> AsMut<T> for SomeType
+/// where
+/// <SomeType as Deref>::Target: AsMut<T>,
+/// {
+/// fn as_mut(&mut self) -> &mut T {
+/// self.deref_mut().as_mut()
+/// }
+/// }
+/// ```
+///
+/// # Reflexivity
+///
+/// Ideally, `AsMut` would be reflexive, i.e. there would be an `impl<T: ?Sized> AsMut<T> for T`
+/// with [`as_mut`] simply returning its argument unchanged.
+/// Such a blanket implementation is currently *not* provided due to technical restrictions of
+/// Rust's type system (it would be overlapping with another existing blanket implementation for
+/// `&mut T where T: AsMut<U>` which allows `AsMut` to auto-dereference, see "Generic
+/// Implementations" above).
+///
+/// [`as_mut`]: AsMut::as_mut
+///
+/// A trivial implementation of `AsMut<T> for T` must be added explicitly for a particular type `T`
+/// where needed or desired. Note, however, that not all types from `std` contain such an
+/// implementation, and those cannot be added by external code due to orphan rules.
///
/// # Examples
///
-/// Using `AsMut` as trait bound for a generic function we can accept all mutable references
-/// that can be converted to type `&mut T`. Because [`Box<T>`] implements `AsMut<T>` we can
-/// write a function `add_one` that takes all arguments that can be converted to `&mut u64`.
-/// Because [`Box<T>`] implements `AsMut<T>`, `add_one` accepts arguments of type
-/// `&mut Box<u64>` as well:
+/// Using `AsMut` as trait bound for a generic function, we can accept all mutable references that
+/// can be converted to type `&mut T`. Unlike [dereference], which has a single [target type],
+/// there can be multiple implementations of `AsMut` for a type. In particular, `Vec<T>` implements
+/// both `AsMut<Vec<T>>` and `AsMut<[T]>`.
+///
+/// In the following, the example functions `caesar` and `null_terminate` provide a generic
+/// interface which work with any type that can be converted by cheap mutable-to-mutable conversion
+/// into a byte slice (`[u8]`) or byte vector (`Vec<u8>`), respectively.
+///
+/// [dereference]: core::ops::DerefMut
+/// [target type]: core::ops::Deref::Target
///
/// ```
-/// fn add_one<T: AsMut<u64>>(num: &mut T) {
-/// *num.as_mut() += 1;
+/// struct Document {
+/// info: String,
+/// content: Vec<u8>,
+/// }
+///
+/// impl<T: ?Sized> AsMut<T> for Document
+/// where
+/// Vec<u8>: AsMut<T>,
+/// {
+/// fn as_mut(&mut self) -> &mut T {
+/// self.content.as_mut()
+/// }
/// }
///
-/// let mut boxed_num = Box::new(0);
-/// add_one(&mut boxed_num);
-/// assert_eq!(*boxed_num, 1);
+/// fn caesar<T: AsMut<[u8]>>(data: &mut T, key: u8) {
+/// for byte in data.as_mut() {
+/// *byte = byte.wrapping_add(key);
+/// }
+/// }
+///
+/// fn null_terminate<T: AsMut<Vec<u8>>>(data: &mut T) {
+/// // Using a non-generic inner function, which contains most of the
+/// // functionality, helps to minimize monomorphization overhead.
+/// fn doit(data: &mut Vec<u8>) {
+/// let len = data.len();
+/// if len == 0 || data[len-1] != 0 {
+/// data.push(0);
+/// }
+/// }
+/// doit(data.as_mut());
+/// }
+///
+/// fn main() {
+/// let mut v: Vec<u8> = vec![1, 2, 3];
+/// caesar(&mut v, 5);
+/// assert_eq!(v, [6, 7, 8]);
+/// null_terminate(&mut v);
+/// assert_eq!(v, [6, 7, 8, 0]);
+/// let mut doc = Document {
+/// info: String::from("Example"),
+/// content: vec![17, 19, 8],
+/// };
+/// caesar(&mut doc, 1);
+/// assert_eq!(doc.content, [18, 20, 9]);
+/// null_terminate(&mut doc);
+/// assert_eq!(doc.content, [18, 20, 9, 0]);
+/// }
/// ```
///
-/// [`Box<T>`]: ../../std/boxed/struct.Box.html
+/// Note, however, that APIs don't need to be generic. In many cases taking a `&mut [u8]` or
+/// `&mut Vec<u8>`, for example, is the better choice (callers need to pass the correct type then).
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "AsMut")]
+#[const_trait]
pub trait AsMut<T: ?Sized> {
/// Converts this type into a mutable reference of the (usually inferred) input type.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -271,6 +443,7 @@ pub trait AsMut<T: ?Sized> {
/// [`Vec`]: ../../std/vec/struct.Vec.html
#[rustc_diagnostic_item = "Into"]
#[stable(feature = "rust1", since = "1.0.0")]
+#[const_trait]
pub trait Into<T>: Sized {
/// Converts this type into the (usually inferred) input type.
#[must_use]
@@ -366,12 +539,13 @@ pub trait Into<T>: Sized {
all(_Self = "&str", T = "std::string::String"),
note = "to coerce a `{T}` into a `{Self}`, use `&*` as a prefix",
))]
+#[const_trait]
pub trait From<T>: Sized {
/// Converts to this type from the input type.
#[lang = "from"]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- fn from(_: T) -> Self;
+ fn from(value: T) -> Self;
}
/// An attempted conversion that consumes `self`, which may or may not be
@@ -390,6 +564,7 @@ pub trait From<T>: Sized {
/// [`Into`], see there for details.
#[rustc_diagnostic_item = "TryInto"]
#[stable(feature = "try_from", since = "1.34.0")]
+#[const_trait]
pub trait TryInto<T>: Sized {
/// The type returned in the event of a conversion error.
#[stable(feature = "try_from", since = "1.34.0")]
@@ -434,7 +609,7 @@ pub trait TryInto<T>: Sized {
///
/// fn try_from(value: i32) -> Result<Self, Self::Error> {
/// if value <= 0 {
-/// Err("GreaterThanZero only accepts value superior than zero!")
+/// Err("GreaterThanZero only accepts values greater than zero!")
/// } else {
/// Ok(GreaterThanZero(value))
/// }
@@ -466,6 +641,7 @@ pub trait TryInto<T>: Sized {
/// [`try_from`]: TryFrom::try_from
#[rustc_diagnostic_item = "TryFrom"]
#[stable(feature = "try_from", since = "1.34.0")]
+#[const_trait]
pub trait TryFrom<T>: Sized {
/// The type returned in the event of a conversion error.
#[stable(feature = "try_from", since = "1.34.0")]
@@ -556,6 +732,7 @@ where
#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
impl<T> const From<T> for T {
/// Returns the argument unchanged.
+ #[inline(always)]
fn from(t: T) -> T {
t
}
@@ -715,6 +892,13 @@ impl fmt::Display for Infallible {
}
}
+#[stable(feature = "str_parse_error2", since = "1.8.0")]
+impl Error for Infallible {
+ fn description(&self) -> &str {
+ match *self {}
+ }
+}
+
#[stable(feature = "convert_infallible", since = "1.34.0")]
impl PartialEq for Infallible {
fn eq(&self, _: &Infallible) -> bool {
diff --git a/library/core/src/default.rs b/library/core/src/default.rs
index 1ce00828b..a5b4e9655 100644
--- a/library/core/src/default.rs
+++ b/library/core/src/default.rs
@@ -1,4 +1,4 @@
-//! The `Default` trait for types which may have meaningful default values.
+//! The `Default` trait for types with a default value.
#![stable(feature = "rust1", since = "1.0.0")]
@@ -99,6 +99,7 @@
/// ```
#[cfg_attr(not(test), rustc_diagnostic_item = "Default")]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait Default: Sized {
/// Returns the "default value" for a type.
///
diff --git a/library/core/src/error.md b/library/core/src/error.md
new file mode 100644
index 000000000..891abebbf
--- /dev/null
+++ b/library/core/src/error.md
@@ -0,0 +1,137 @@
+Interfaces for working with Errors.
+
+# Error Handling In Rust
+
+The Rust language provides two complementary systems for constructing /
+representing, reporting, propagating, reacting to, and discarding errors.
+These responsibilities are collectively known as "error handling." The
+components of the first system, the panic runtime and interfaces, are most
+commonly used to represent bugs that have been detected in your program. The
+components of the second system, `Result`, the error traits, and user
+defined types, are used to represent anticipated runtime failure modes of
+your program.
+
+## The Panic Interfaces
+
+The following are the primary interfaces of the panic system and the
+responsibilities they cover:
+
+* [`panic!`] and [`panic_any`] (Constructing, Propagated automatically)
+* [`PanicInfo`] (Reporting)
+* [`set_hook`], [`take_hook`], and [`#[panic_handler]`][panic-handler] (Reporting)
+* [`catch_unwind`] and [`resume_unwind`] (Discarding, Propagating)
+
+The following are the primary interfaces of the error system and the
+responsibilities they cover:
+
+* [`Result`] (Propagating, Reacting)
+* The [`Error`] trait (Reporting)
+* User defined types (Constructing / Representing)
+* [`match`] and [`downcast`] (Reacting)
+* The question mark operator ([`?`]) (Propagating)
+* The partially stable [`Try`] traits (Propagating, Constructing)
+* [`Termination`] (Reporting)
+
+## Converting Errors into Panics
+
+The panic and error systems are not entirely distinct. Often times errors
+that are anticipated runtime failures in an API might instead represent bugs
+to a caller. For these situations the standard library provides APIs for
+constructing panics with an `Error` as it's source.
+
+* [`Result::unwrap`]
+* [`Result::expect`]
+
+These functions are equivalent, they either return the inner value if the
+`Result` is `Ok` or panic if the `Result` is `Err` printing the inner error
+as the source. The only difference between them is that with `expect` you
+provide a panic error message to be printed alongside the source, whereas
+`unwrap` has a default message indicating only that you unwraped an `Err`.
+
+Of the two, `expect` is generally preferred since its `msg` field allows you
+to convey your intent and assumptions which makes tracking down the source
+of a panic easier. `unwrap` on the other hand can still be a good fit in
+situations where you can trivially show that a piece of code will never
+panic, such as `"127.0.0.1".parse::<std::net::IpAddr>().unwrap()` or early
+prototyping.
+
+# Common Message Styles
+
+There are two common styles for how people word `expect` messages. Using
+the message to present information to users encountering a panic
+("expect as error message") or using the message to present information
+to developers debugging the panic ("expect as precondition").
+
+In the former case the expect message is used to describe the error that
+has occurred which is considered a bug. Consider the following example:
+
+```should_panic
+// Read environment variable, panic if it is not present
+let path = std::env::var("IMPORTANT_PATH").unwrap();
+```
+
+In the "expect as error message" style we would use expect to describe
+that the environment variable was not set when it should have been:
+
+```should_panic
+let path = std::env::var("IMPORTANT_PATH")
+ .expect("env variable `IMPORTANT_PATH` is not set");
+```
+
+In the "expect as precondition" style, we would instead describe the
+reason we _expect_ the `Result` should be `Ok`. With this style we would
+prefer to write:
+
+```should_panic
+let path = std::env::var("IMPORTANT_PATH")
+ .expect("env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`");
+```
+
+The "expect as error message" style does not work as well with the
+default output of the std panic hooks, and often ends up repeating
+information that is already communicated by the source error being
+unwrapped:
+
+```text
+thread 'main' panicked at 'env variable `IMPORTANT_PATH` is not set: NotPresent', src/main.rs:4:6
+```
+
+In this example we end up mentioning that an env variable is not set,
+followed by our source message that says the env is not present, the
+only additional information we're communicating is the name of the
+environment variable being checked.
+
+The "expect as precondition" style instead focuses on source code
+readability, making it easier to understand what must have gone wrong in
+situations where panics are being used to represent bugs exclusively.
+Also, by framing our expect in terms of what "SHOULD" have happened to
+prevent the source error, we end up introducing new information that is
+independent from our source error.
+
+```text
+thread 'main' panicked at 'env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`: NotPresent', src/main.rs:4:6
+```
+
+In this example we are communicating not only the name of the
+environment variable that should have been set, but also an explanation
+for why it should have been set, and we let the source error display as
+a clear contradiction to our expectation.
+
+**Hint**: If you're having trouble remembering how to phrase
+expect-as-precondition style error messages remember to focus on the word
+"should" as in "env variable should be set by blah" or "the given binary
+should be available and executable by the current user".
+
+[`panic_any`]: ../../std/panic/fn.panic_any.html
+[`PanicInfo`]: crate::panic::PanicInfo
+[`catch_unwind`]: ../../std/panic/fn.catch_unwind.html
+[`resume_unwind`]: ../../std/panic/fn.resume_unwind.html
+[`downcast`]: crate::error::Error
+[`Termination`]: ../../std/process/trait.Termination.html
+[`Try`]: crate::ops::Try
+[panic hook]: ../../std/panic/fn.set_hook.html
+[`set_hook`]: ../../std/panic/fn.set_hook.html
+[`take_hook`]: ../../std/panic/fn.take_hook.html
+[panic-handler]: <https://doc.rust-lang.org/nomicon/panic-handler.html>
+[`match`]: ../../std/keyword.match.html
+[`?`]: ../../std/result/index.html#the-question-mark-operator-
diff --git a/library/core/src/error.rs b/library/core/src/error.rs
new file mode 100644
index 000000000..2738b4994
--- /dev/null
+++ b/library/core/src/error.rs
@@ -0,0 +1,508 @@
+#![doc = include_str!("error.md")]
+#![unstable(feature = "error_in_core", issue = "none")]
+
+#[cfg(test)]
+mod tests;
+
+use crate::any::{Demand, Provider, TypeId};
+use crate::fmt::{Debug, Display};
+
+/// `Error` is a trait representing the basic expectations for error values,
+/// i.e., values of type `E` in [`Result<T, E>`].
+///
+/// Errors must describe themselves through the [`Display`] and [`Debug`]
+/// traits. Error messages are typically concise lowercase sentences without
+/// trailing punctuation:
+///
+/// ```
+/// let err = "NaN".parse::<u32>().unwrap_err();
+/// assert_eq!(err.to_string(), "invalid digit found in string");
+/// ```
+///
+/// Errors may provide cause information. [`Error::source()`] is generally
+/// used when errors cross "abstraction boundaries". If one module must report
+/// an error that is caused by an error from a lower-level module, it can allow
+/// accessing that error via [`Error::source()`]. This makes it possible for the
+/// high-level module to provide its own errors while also revealing some of the
+/// implementation for debugging.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Error")]
+#[rustc_has_incoherent_inherent_impls]
+pub trait Error: Debug + Display {
+ /// The lower-level source of this error, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ ///
+ /// #[derive(Debug)]
+ /// struct SuperError {
+ /// source: SuperErrorSideKick,
+ /// }
+ ///
+ /// impl fmt::Display for SuperError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "SuperError is here!")
+ /// }
+ /// }
+ ///
+ /// impl Error for SuperError {
+ /// fn source(&self) -> Option<&(dyn Error + 'static)> {
+ /// Some(&self.source)
+ /// }
+ /// }
+ ///
+ /// #[derive(Debug)]
+ /// struct SuperErrorSideKick;
+ ///
+ /// impl fmt::Display for SuperErrorSideKick {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "SuperErrorSideKick is here!")
+ /// }
+ /// }
+ ///
+ /// impl Error for SuperErrorSideKick {}
+ ///
+ /// fn get_super_error() -> Result<(), SuperError> {
+ /// Err(SuperError { source: SuperErrorSideKick })
+ /// }
+ ///
+ /// fn main() {
+ /// match get_super_error() {
+ /// Err(e) => {
+ /// println!("Error: {e}");
+ /// println!("Caused by: {}", e.source().unwrap());
+ /// }
+ /// _ => println!("No error"),
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "error_source", since = "1.30.0")]
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ None
+ }
+
+ /// Gets the `TypeId` of `self`.
+ #[doc(hidden)]
+ #[unstable(
+ feature = "error_type_id",
+ reason = "this is memory-unsafe to override in user code",
+ issue = "60784"
+ )]
+ fn type_id(&self, _: private::Internal) -> TypeId
+ where
+ Self: 'static,
+ {
+ TypeId::of::<Self>()
+ }
+
+ /// ```
+ /// if let Err(e) = "xc".parse::<u32>() {
+ /// // Print `e` itself, no need for description().
+ /// eprintln!("Error: {e}");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.42.0", note = "use the Display impl or to_string()")]
+ fn description(&self) -> &str {
+ "description() is deprecated; use Display"
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.33.0",
+ note = "replaced by Error::source, which can support downcasting"
+ )]
+ #[allow(missing_docs)]
+ fn cause(&self) -> Option<&dyn Error> {
+ self.source()
+ }
+
+ /// Provides type based access to context intended for error reports.
+ ///
+ /// Used in conjunction with [`Demand::provide_value`] and [`Demand::provide_ref`] to extract
+ /// references to member variables from `dyn Error` trait objects.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// #![feature(provide_any)]
+ /// #![feature(error_generic_member_access)]
+ /// use core::fmt;
+ /// use core::any::Demand;
+ ///
+ /// #[derive(Debug)]
+ /// struct MyBacktrace {
+ /// // ...
+ /// }
+ ///
+ /// impl MyBacktrace {
+ /// fn new() -> MyBacktrace {
+ /// // ...
+ /// # MyBacktrace {}
+ /// }
+ /// }
+ ///
+ /// #[derive(Debug)]
+ /// struct SourceError {
+ /// // ...
+ /// }
+ ///
+ /// impl fmt::Display for SourceError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "Example Source Error")
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for SourceError {}
+ ///
+ /// #[derive(Debug)]
+ /// struct Error {
+ /// source: SourceError,
+ /// backtrace: MyBacktrace,
+ /// }
+ ///
+ /// impl fmt::Display for Error {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "Example Error")
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for Error {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// demand
+ /// .provide_ref::<MyBacktrace>(&self.backtrace)
+ /// .provide_ref::<dyn std::error::Error + 'static>(&self.source);
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// let backtrace = MyBacktrace::new();
+ /// let source = SourceError {};
+ /// let error = Error { source, backtrace };
+ /// let dyn_error = &error as &dyn std::error::Error;
+ /// let backtrace_ref = dyn_error.request_ref::<MyBacktrace>().unwrap();
+ ///
+ /// assert!(core::ptr::eq(&error.backtrace, backtrace_ref));
+ /// }
+ /// ```
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ #[allow(unused_variables)]
+ fn provide<'a>(&'a self, demand: &mut Demand<'a>) {}
+}
+
+#[unstable(feature = "error_generic_member_access", issue = "99301")]
+impl<E> Provider for E
+where
+ E: Error + ?Sized,
+{
+ fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ self.provide(demand)
+ }
+}
+
+mod private {
+ // This is a hack to prevent `type_id` from being overridden by `Error`
+ // implementations, since that can enable unsound downcasting.
+ #[unstable(feature = "error_type_id", issue = "60784")]
+ #[derive(Debug)]
+ pub struct Internal;
+}
+
+#[unstable(feature = "never_type", issue = "35121")]
+impl Error for ! {}
+
+impl<'a> dyn Error + 'a {
+ /// Request a reference of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_ref<T: ?Sized + 'static>(&'a self) -> Option<&'a T> {
+ core::any::request_ref(self)
+ }
+
+ /// Request a value of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_value<T: 'static>(&'a self) -> Option<T> {
+ core::any::request_value(self)
+ }
+}
+
+// Copied from `any.rs`.
+impl dyn Error + 'static {
+ /// Returns `true` if the inner type is the same as `T`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ // Get `TypeId` of the type this function is instantiated with.
+ let t = TypeId::of::<T>();
+
+ // Get `TypeId` of the type in the trait object (`self`).
+ let concrete = self.type_id(private::Internal);
+
+ // Compare both `TypeId`s on equality.
+ t == concrete
+ }
+
+ /// Returns some reference to the inner value if it is of type `T`, or
+ /// `None` if it isn't.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
+ if self.is::<T>() {
+ // SAFETY: `is` ensures this type cast is correct
+ unsafe { Some(&*(self as *const dyn Error as *const T)) }
+ } else {
+ None
+ }
+ }
+
+ /// Returns some mutable reference to the inner value if it is of type `T`, or
+ /// `None` if it isn't.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
+ if self.is::<T>() {
+ // SAFETY: `is` ensures this type cast is correct
+ unsafe { Some(&mut *(self as *mut dyn Error as *mut T)) }
+ } else {
+ None
+ }
+ }
+}
+
+impl dyn Error + 'static + Send {
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ <dyn Error + 'static>::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
+ <dyn Error + 'static>::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
+ <dyn Error + 'static>::downcast_mut::<T>(self)
+ }
+
+ /// Request a reference of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
+ <dyn Error>::request_ref(self)
+ }
+
+ /// Request a value of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_value<T: 'static>(&self) -> Option<T> {
+ <dyn Error>::request_value(self)
+ }
+}
+
+impl dyn Error + 'static + Send + Sync {
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ <dyn Error + 'static>::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
+ <dyn Error + 'static>::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
+ <dyn Error + 'static>::downcast_mut::<T>(self)
+ }
+
+ /// Request a reference of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
+ <dyn Error>::request_ref(self)
+ }
+
+ /// Request a value of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_value<T: 'static>(&self) -> Option<T> {
+ <dyn Error>::request_value(self)
+ }
+}
+
+impl dyn Error {
+ /// Returns an iterator starting with the current error and continuing with
+ /// recursively calling [`Error::source`].
+ ///
+ /// If you want to omit the current error and only use its sources,
+ /// use `skip(1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(error_iter)]
+ /// use std::error::Error;
+ /// use std::fmt;
+ ///
+ /// #[derive(Debug)]
+ /// struct A;
+ ///
+ /// #[derive(Debug)]
+ /// struct B(Option<Box<dyn Error + 'static>>);
+ ///
+ /// impl fmt::Display for A {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "A")
+ /// }
+ /// }
+ ///
+ /// impl fmt::Display for B {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "B")
+ /// }
+ /// }
+ ///
+ /// impl Error for A {}
+ ///
+ /// impl Error for B {
+ /// fn source(&self) -> Option<&(dyn Error + 'static)> {
+ /// self.0.as_ref().map(|e| e.as_ref())
+ /// }
+ /// }
+ ///
+ /// let b = B(Some(Box::new(A)));
+ ///
+ /// // let err : Box<Error> = b.into(); // or
+ /// let err = &b as &(dyn Error);
+ ///
+ /// let mut iter = err.sources();
+ ///
+ /// assert_eq!("B".to_string(), iter.next().unwrap().to_string());
+ /// assert_eq!("A".to_string(), iter.next().unwrap().to_string());
+ /// assert!(iter.next().is_none());
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[unstable(feature = "error_iter", issue = "58520")]
+ #[inline]
+ pub fn sources(&self) -> Source<'_> {
+ // You may think this method would be better in the Error trait, and you'd be right.
+ // Unfortunately that doesn't work, not because of the object safety rules but because we
+ // save a reference to self in Sources below as a trait object. If this method was
+ // declared in Error, then self would have the type &T where T is some concrete type which
+ // implements Error. We would need to coerce self to have type &dyn Error, but that requires
+ // that Self has a known size (i.e., Self: Sized). We can't put that bound on Error
+ // since that would forbid Error trait objects, and we can't put that bound on the method
+ // because that means the method can't be called on trait objects (we'd also need the
+ // 'static bound, but that isn't allowed because methods with bounds on Self other than
+ // Sized are not object-safe). Requiring an Unsize bound is not backwards compatible.
+
+ Source { current: Some(self) }
+ }
+}
+
+/// An iterator over an [`Error`] and its sources.
+///
+/// If you want to omit the initial error and only process
+/// its sources, use `skip(1)`.
+#[unstable(feature = "error_iter", issue = "58520")]
+#[derive(Clone, Debug)]
+pub struct Source<'a> {
+ current: Option<&'a (dyn Error + 'static)>,
+}
+
+#[unstable(feature = "error_iter", issue = "58520")]
+impl<'a> Iterator for Source<'a> {
+ type Item = &'a (dyn Error + 'static);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let current = self.current;
+ self.current = self.current.and_then(Error::source);
+ current
+ }
+}
+
+#[stable(feature = "error_by_ref", since = "1.51.0")]
+impl<'a, T: Error + ?Sized> Error for &'a T {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn Error> {
+ Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ Error::source(&**self)
+ }
+
+ fn provide<'b>(&'b self, demand: &mut Demand<'b>) {
+ Error::provide(&**self, demand);
+ }
+}
+
+#[stable(feature = "fmt_error", since = "1.11.0")]
+impl Error for crate::fmt::Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "an error occurred when formatting an argument"
+ }
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Error for crate::cell::BorrowError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "already mutably borrowed"
+ }
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Error for crate::cell::BorrowMutError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "already borrowed"
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl Error for crate::char::CharTryFromError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "converted integer out of range for `char`"
+ }
+}
+
+#[stable(feature = "char_from_str", since = "1.20.0")]
+impl Error for crate::char::ParseCharError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[stable(feature = "duration_checked_float", since = "1.66.0")]
+impl Error for crate::time::TryFromFloatSecsError {}
+
+#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")]
+impl Error for crate::ffi::FromBytesWithNulError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+impl Error for crate::ffi::FromBytesUntilNulError {}
diff --git a/library/core/src/ffi/c_double.md b/library/core/src/ffi/c_double.md
index 57f453482..d49e29b6e 100644
--- a/library/core/src/ffi/c_double.md
+++ b/library/core/src/ffi/c_double.md
@@ -1,6 +1,6 @@
Equivalent to C's `double` type.
-This type will almost always be [`f64`], which is guaranteed to be an [IEEE-754 double-precision float] in Rust. That said, the standard technically only guarantees that it be a floating-point number with at least the precision of a [`float`], and it may be `f32` or something entirely different from the IEEE-754 standard.
+This type will almost always be [`f64`], which is guaranteed to be an [IEEE 754 double-precision float] in Rust. That said, the standard technically only guarantees that it be a floating-point number with at least the precision of a [`float`], and it may be `f32` or something entirely different from the IEEE-754 standard.
-[IEEE-754 double-precision float]: https://en.wikipedia.org/wiki/IEEE_754
+[IEEE 754 double-precision float]: https://en.wikipedia.org/wiki/IEEE_754
[`float`]: c_float
diff --git a/library/core/src/ffi/c_float.md b/library/core/src/ffi/c_float.md
index 61e2abc05..36374ef43 100644
--- a/library/core/src/ffi/c_float.md
+++ b/library/core/src/ffi/c_float.md
@@ -1,5 +1,5 @@
Equivalent to C's `float` type.
-This type will almost always be [`f32`], which is guaranteed to be an [IEEE-754 single-precision float] in Rust. That said, the standard technically only guarantees that it be a floating-point number, and it may have less precision than `f32` or not follow the IEEE-754 standard at all.
+This type will almost always be [`f32`], which is guaranteed to be an [IEEE 754 single-precision float] in Rust. That said, the standard technically only guarantees that it be a floating-point number, and it may have less precision than `f32` or not follow the IEEE-754 standard at all.
-[IEEE-754 single-precision float]: https://en.wikipedia.org/wiki/IEEE_754
+[IEEE 754 single-precision float]: https://en.wikipedia.org/wiki/IEEE_754
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index 82e63a7fe..8923f548a 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -1,7 +1,6 @@
-use crate::ascii;
use crate::cmp::Ordering;
use crate::ffi::c_char;
-use crate::fmt::{self, Write};
+use crate::fmt;
use crate::intrinsics;
use crate::ops;
use crate::slice;
@@ -121,10 +120,10 @@ enum FromBytesWithNulErrorKind {
}
impl FromBytesWithNulError {
- fn interior_nul(pos: usize) -> FromBytesWithNulError {
+ const fn interior_nul(pos: usize) -> FromBytesWithNulError {
FromBytesWithNulError { kind: FromBytesWithNulErrorKind::InteriorNul(pos) }
}
- fn not_nul_terminated() -> FromBytesWithNulError {
+ const fn not_nul_terminated() -> FromBytesWithNulError {
FromBytesWithNulError { kind: FromBytesWithNulErrorKind::NotNulTerminated }
}
@@ -161,11 +160,7 @@ impl fmt::Display for FromBytesUntilNulError {
#[stable(feature = "cstr_debug", since = "1.3.0")]
impl fmt::Debug for CStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "\"")?;
- for byte in self.to_bytes().iter().flat_map(|&b| ascii::escape_default(b)) {
- f.write_char(byte as char)?;
- }
- write!(f, "\"")
+ write!(f, "\"{}\"", self.to_bytes().escape_ascii())
}
}
@@ -226,9 +221,7 @@ impl CStr {
/// # Examples
///
/// ```ignore (extern-declaration)
- /// # fn main() {
- /// use std::ffi::CStr;
- /// use std::os::raw::c_char;
+ /// use std::ffi::{c_char, CStr};
///
/// extern "C" {
/// fn my_string() -> *const c_char;
@@ -238,14 +231,26 @@ impl CStr {
/// let slice = CStr::from_ptr(my_string());
/// println!("string returned: {}", slice.to_str().unwrap());
/// }
- /// # }
+ /// ```
+ ///
+ /// ```
+ /// #![feature(const_cstr_methods)]
+ ///
+ /// use std::ffi::{c_char, CStr};
+ ///
+ /// const HELLO_PTR: *const c_char = {
+ /// const BYTES: &[u8] = b"Hello, world!\0";
+ /// BYTES.as_ptr().cast()
+ /// };
+ /// const HELLO: &CStr = unsafe { CStr::from_ptr(HELLO_PTR) };
/// ```
///
/// [valid]: core::ptr#safety
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- pub unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
+ #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")]
+ pub const unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
// SAFETY: The caller has provided a pointer that points to a valid C
// string with a NUL terminator of size less than `isize::MAX`, whose
// content remain valid and doesn't change for the lifetime of the
@@ -257,13 +262,29 @@ impl CStr {
//
// The cast from c_char to u8 is ok because a c_char is always one byte.
unsafe {
- extern "C" {
- /// Provided by libc or compiler_builtins.
- fn strlen(s: *const c_char) -> usize;
+ const fn strlen_ct(s: *const c_char) -> usize {
+ let mut len = 0;
+
+ // SAFETY: Outer caller has provided a pointer to a valid C string.
+ while unsafe { *s.add(len) } != 0 {
+ len += 1;
+ }
+
+ len
}
- let len = strlen(ptr);
- let ptr = ptr as *const u8;
- CStr::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr, len as usize + 1))
+
+ fn strlen_rt(s: *const c_char) -> usize {
+ extern "C" {
+ /// Provided by libc or compiler_builtins.
+ fn strlen(s: *const c_char) -> usize;
+ }
+
+ // SAFETY: Outer caller has provided a pointer to a valid C string.
+ unsafe { strlen(s) }
+ }
+
+ let len = intrinsics::const_eval_select((ptr,), strlen_ct, strlen_rt);
+ Self::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr.cast(), len + 1))
}
}
@@ -299,7 +320,8 @@ impl CStr {
/// ```
///
#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
- pub fn from_bytes_until_nul(bytes: &[u8]) -> Result<&CStr, FromBytesUntilNulError> {
+ #[rustc_const_unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+ pub const fn from_bytes_until_nul(bytes: &[u8]) -> Result<&CStr, FromBytesUntilNulError> {
let nul_pos = memchr::memchr(0, bytes);
match nul_pos {
Some(nul_pos) => {
@@ -348,7 +370,8 @@ impl CStr {
/// assert!(cstr.is_err());
/// ```
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
- pub fn from_bytes_with_nul(bytes: &[u8]) -> Result<&Self, FromBytesWithNulError> {
+ #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")]
+ pub const fn from_bytes_with_nul(bytes: &[u8]) -> Result<&Self, FromBytesWithNulError> {
let nul_pos = memchr::memchr(0, bytes);
match nul_pos {
Some(nul_pos) if nul_pos + 1 == bytes.len() => {
@@ -387,6 +410,7 @@ impl CStr {
#[rustc_const_stable(feature = "const_cstr_unchecked", since = "1.59.0")]
#[rustc_allow_const_fn_unstable(const_eval_select)]
pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr {
+ #[inline]
fn rt_impl(bytes: &[u8]) -> &CStr {
// Chance at catching some UB at runtime with debug builds.
debug_assert!(!bytes.is_empty() && bytes[bytes.len() - 1] == 0);
@@ -476,6 +500,34 @@ impl CStr {
self.inner.as_ptr()
}
+ /// Returns `true` if `self.to_bytes()` has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cstr_is_empty)]
+ ///
+ /// use std::ffi::CStr;
+ /// # use std::ffi::FromBytesWithNulError;
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Result<(), FromBytesWithNulError> {
+ /// let cstr = CStr::from_bytes_with_nul(b"foo\0")?;
+ /// assert!(!cstr.is_empty());
+ ///
+ /// let empty_cstr = CStr::from_bytes_with_nul(b"\0")?;
+ /// assert!(empty_cstr.is_empty());
+ /// # Ok(())
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "cstr_is_empty", issue = "102444")]
+ pub const fn is_empty(&self) -> bool {
+ // SAFETY: We know there is at least one byte; for empty strings it
+ // is the NUL terminator.
+ (unsafe { self.inner.get_unchecked(0) }) == &0
+ }
+
/// Converts this C string to a byte slice.
///
/// The returned slice will **not** contain the trailing nul terminator that this C
@@ -497,7 +549,8 @@ impl CStr {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn to_bytes(&self) -> &[u8] {
+ #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")]
+ pub const fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
// SAFETY: to_bytes_with_nul returns slice with length at least 1
unsafe { bytes.get_unchecked(..bytes.len() - 1) }
@@ -524,7 +577,8 @@ impl CStr {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn to_bytes_with_nul(&self) -> &[u8] {
+ #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")]
+ pub const fn to_bytes_with_nul(&self) -> &[u8] {
// SAFETY: Transmuting a slice of `c_char`s to a slice of `u8`s
// is safe on all supported targets.
unsafe { &*(&self.inner as *const [c_char] as *const [u8]) }
@@ -547,7 +601,8 @@ impl CStr {
/// assert_eq!(cstr.to_str(), Ok("foo"));
/// ```
#[stable(feature = "cstr_to_str", since = "1.4.0")]
- pub fn to_str(&self) -> Result<&str, str::Utf8Error> {
+ #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")]
+ pub const fn to_str(&self) -> Result<&str, str::Utf8Error> {
// N.B., when `CStr` is changed to perform the length check in `.to_bytes()`
// instead of in `from_ptr()`, it may be worth considering if this should
// be rewritten to do the UTF-8 check inline with the length calculation
diff --git a/library/core/src/fmt/builders.rs b/library/core/src/fmt/builders.rs
index 32d1a4e55..7da49b04a 100644
--- a/library/core/src/fmt/builders.rs
+++ b/library/core/src/fmt/builders.rs
@@ -28,24 +28,14 @@ impl<'buf, 'state> PadAdapter<'buf, 'state> {
}
impl fmt::Write for PadAdapter<'_, '_> {
- fn write_str(&mut self, mut s: &str) -> fmt::Result {
- while !s.is_empty() {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ for s in s.split_inclusive('\n') {
if self.state.on_newline {
self.buf.write_str(" ")?;
}
- let split = match s.find('\n') {
- Some(pos) => {
- self.state.on_newline = true;
- pos + 1
- }
- None => {
- self.state.on_newline = false;
- s.len()
- }
- };
- self.buf.write_str(&s[..split])?;
- s = &s[split..];
+ self.state.on_newline = s.ends_with('\n');
+ self.buf.write_str(s)?;
}
Ok(())
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index 372141e09..c8d285505 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -119,6 +119,10 @@ pub trait Write {
///
/// This function will return an instance of [`Error`] on error.
///
+ /// The purpose of std::fmt::Error is to abort the formatting operation when the underlying
+ /// destination encounters some error preventing it from accepting more text; it should
+ /// generally be propagated rather than handled, at least when implementing formatting traits.
+ ///
/// # Examples
///
/// ```
@@ -705,12 +709,19 @@ pub use macros::Debug;
/// Format trait for an empty format, `{}`.
///
+/// Implementing this trait for a type will automatically implement the
+/// [`ToString`][tostring] trait for the type, allowing the usage
+/// of the [`.to_string()`][tostring_function] method. Prefer implementing
+/// the `Display` trait for a type, rather than [`ToString`][tostring].
+///
/// `Display` is similar to [`Debug`], but `Display` is for user-facing
/// output, and so cannot be derived.
///
/// For more information on formatters, see [the module-level documentation][module].
///
/// [module]: ../../std/fmt/index.html
+/// [tostring]: ../../std/string/trait.ToString.html
+/// [tostring_function]: ../../std/string/trait.ToString.html#tymethod.to_string
///
/// # Examples
///
@@ -1815,7 +1826,7 @@ impl<'a> Formatter<'a> {
/// write!(formatter,
/// "Foo({}{})",
/// if self.0 < 0 { '-' } else { '+' },
- /// self.0)
+ /// self.0.abs())
/// } else {
/// write!(formatter, "Foo({})", self.0)
/// }
@@ -1823,6 +1834,7 @@ impl<'a> Formatter<'a> {
/// }
///
/// assert_eq!(&format!("{:+}", Foo(23)), "Foo(+23)");
+ /// assert_eq!(&format!("{:+}", Foo(-23)), "Foo(-23)");
/// assert_eq!(&format!("{}", Foo(23)), "Foo(23)");
/// ```
#[must_use]
@@ -2562,7 +2574,7 @@ macro_rules! tuple {
macro_rules! maybe_tuple_doc {
($a:ident @ #[$meta:meta] $item:item) => {
- #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc(fake_variadic)]
#[doc = "This trait is implemented for tuples up to twelve items long."]
#[$meta]
$item
@@ -2598,7 +2610,7 @@ impl Debug for () {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Debug for PhantomData<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
- f.debug_struct("PhantomData").finish()
+ write!(f, "PhantomData<{}>", crate::any::type_name::<T>())
}
}
diff --git a/library/core/src/fmt/num.rs b/library/core/src/fmt/num.rs
index 25789d37c..d8365ae9b 100644
--- a/library/core/src/fmt/num.rs
+++ b/library/core/src/fmt/num.rs
@@ -211,7 +211,7 @@ macro_rules! impl_Display {
fn $name(mut n: $u, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// 2^128 is about 3*10^38, so 39 gives an extra byte of space
let mut buf = [MaybeUninit::<u8>::uninit(); 39];
- let mut curr = buf.len() as isize;
+ let mut curr = buf.len();
let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
@@ -228,7 +228,7 @@ macro_rules! impl_Display {
// eagerly decode 4 characters at a time
while n >= 10000 {
- let rem = (n % 10000) as isize;
+ let rem = (n % 10000) as usize;
n /= 10000;
let d1 = (rem / 100) << 1;
@@ -238,29 +238,29 @@ macro_rules! impl_Display {
// We are allowed to copy to `buf_ptr[curr..curr + 3]` here since
// otherwise `curr < 0`. But then `n` was originally at least `10000^10`
// which is `10^40 > 2^128 > n`.
- ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d2), buf_ptr.add(curr + 2), 2);
}
// if we reach here numbers are <= 9999, so at most 4 chars long
- let mut n = n as isize; // possibly reduce 64bit math
+ let mut n = n as usize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
- ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
- *buf_ptr.offset(curr) = (n as u8) + b'0';
+ *buf_ptr.add(curr) = (n as u8) + b'0';
} else {
let d1 = n << 1;
curr -= 2;
- ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2);
}
}
@@ -268,7 +268,7 @@ macro_rules! impl_Display {
// UTF-8 since `DEC_DIGITS_LUT` is
let buf_slice = unsafe {
str::from_utf8_unchecked(
- slice::from_raw_parts(buf_ptr.offset(curr), buf.len() - curr as usize))
+ slice::from_raw_parts(buf_ptr.add(curr), buf.len() - curr))
};
f.pad_integral(is_nonnegative, "", buf_slice)
}
@@ -339,18 +339,18 @@ macro_rules! impl_Exp {
// Since `curr` always decreases by the number of digits copied, this means
// that `curr >= 0`.
let mut buf = [MaybeUninit::<u8>::uninit(); 40];
- let mut curr = buf.len() as isize; //index for buf
+ let mut curr = buf.len(); //index for buf
let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// decode 2 chars at a time
while n >= 100 {
- let d1 = ((n % 100) as isize) << 1;
+ let d1 = ((n % 100) as usize) << 1;
curr -= 2;
// SAFETY: `d1 <= 198`, so we can copy from `lut_ptr[d1..d1 + 2]` since
// `DEC_DIGITS_LUT` has a length of 200.
unsafe {
- ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2);
}
n /= 100;
exponent += 2;
@@ -362,7 +362,7 @@ macro_rules! impl_Exp {
curr -= 1;
// SAFETY: Safe since `40 > curr >= 0` (see comment)
unsafe {
- *buf_ptr.offset(curr) = (n as u8 % 10_u8) + b'0';
+ *buf_ptr.add(curr) = (n as u8 % 10_u8) + b'0';
}
n /= 10;
exponent += 1;
@@ -372,7 +372,7 @@ macro_rules! impl_Exp {
curr -= 1;
// SAFETY: Safe since `40 > curr >= 0`
unsafe {
- *buf_ptr.offset(curr) = b'.';
+ *buf_ptr.add(curr) = b'.';
}
}
@@ -380,10 +380,10 @@ macro_rules! impl_Exp {
let buf_slice = unsafe {
// decode last character
curr -= 1;
- *buf_ptr.offset(curr) = (n as u8) + b'0';
+ *buf_ptr.add(curr) = (n as u8) + b'0';
let len = buf.len() - curr as usize;
- slice::from_raw_parts(buf_ptr.offset(curr), len)
+ slice::from_raw_parts(buf_ptr.add(curr), len)
};
// stores 'e' (or 'E') and the up to 2-digit exponent
@@ -392,13 +392,13 @@ macro_rules! impl_Exp {
// SAFETY: In either case, `exp_buf` is written within bounds and `exp_ptr[..len]`
// is contained within `exp_buf` since `len <= 3`.
let exp_slice = unsafe {
- *exp_ptr.offset(0) = if upper { b'E' } else { b'e' };
+ *exp_ptr.add(0) = if upper { b'E' } else { b'e' };
let len = if exponent < 10 {
- *exp_ptr.offset(1) = (exponent as u8) + b'0';
+ *exp_ptr.add(1) = (exponent as u8) + b'0';
2
} else {
let off = exponent << 1;
- ptr::copy_nonoverlapping(lut_ptr.offset(off), exp_ptr.offset(1), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(off), exp_ptr.add(1), 2);
3
};
slice::from_raw_parts(exp_ptr, len)
@@ -479,7 +479,7 @@ mod imp {
impl_Exp!(i128, u128 as u128 via to_u128 named exp_u128);
/// Helper function for writing a u64 into `buf` going from last to first, with `curr`.
-fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], curr: &mut isize) {
+fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], curr: &mut usize) {
let buf_ptr = MaybeUninit::slice_as_mut_ptr(buf);
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
assert!(*curr > 19);
@@ -505,14 +505,14 @@ fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], cu
*curr -= 16;
- ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d5 as isize), buf_ptr.offset(*curr + 8), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d6 as isize), buf_ptr.offset(*curr + 10), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d7 as isize), buf_ptr.offset(*curr + 12), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d8 as isize), buf_ptr.offset(*curr + 14), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d2 as usize), buf_ptr.add(*curr + 2), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d3 as usize), buf_ptr.add(*curr + 4), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d4 as usize), buf_ptr.add(*curr + 6), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d5 as usize), buf_ptr.add(*curr + 8), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d6 as usize), buf_ptr.add(*curr + 10), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d7 as usize), buf_ptr.add(*curr + 12), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d8 as usize), buf_ptr.add(*curr + 14), 2);
}
if n >= 1e8 as u64 {
let to_parse = n % 1e8 as u64;
@@ -525,10 +525,10 @@ fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], cu
let d4 = ((to_parse / 1e0 as u64) % 100) << 1;
*curr -= 8;
- ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d2 as usize), buf_ptr.add(*curr + 2), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d3 as usize), buf_ptr.add(*curr + 4), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d4 as usize), buf_ptr.add(*curr + 6), 2);
}
// `n` < 1e8 < (1 << 32)
let mut n = n as u32;
@@ -540,8 +540,8 @@ fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], cu
let d2 = (to_parse % 100) << 1;
*curr -= 4;
- ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
- ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d2 as usize), buf_ptr.add(*curr + 2), 2);
}
// `n` < 1e4 < (1 << 16)
@@ -550,17 +550,17 @@ fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], cu
let d1 = (n % 100) << 1;
n /= 100;
*curr -= 2;
- ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
*curr -= 1;
- *buf_ptr.offset(*curr) = (n as u8) + b'0';
+ *buf_ptr.add(*curr) = (n as u8) + b'0';
} else {
let d1 = n << 1;
*curr -= 2;
- ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr), 2);
}
}
}
@@ -593,21 +593,21 @@ impl fmt::Display for i128 {
fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// 2^128 is about 3*10^38, so 39 gives an extra byte of space
let mut buf = [MaybeUninit::<u8>::uninit(); 39];
- let mut curr = buf.len() as isize;
+ let mut curr = buf.len();
let (n, rem) = udiv_1e19(n);
parse_u64_into(rem, &mut buf, &mut curr);
if n != 0 {
// 0 pad up to point
- let target = (buf.len() - 19) as isize;
+ let target = buf.len() - 19;
// SAFETY: Guaranteed that we wrote at most 19 bytes, and there must be space
// remaining since it has length 39
unsafe {
ptr::write_bytes(
- MaybeUninit::slice_as_mut_ptr(&mut buf).offset(target),
+ MaybeUninit::slice_as_mut_ptr(&mut buf).add(target),
b'0',
- (curr - target) as usize,
+ curr - target,
);
}
curr = target;
@@ -616,16 +616,16 @@ fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::R
parse_u64_into(rem, &mut buf, &mut curr);
// Should this following branch be annotated with unlikely?
if n != 0 {
- let target = (buf.len() - 38) as isize;
+ let target = buf.len() - 38;
// The raw `buf_ptr` pointer is only valid until `buf` is used the next time,
// buf `buf` is not used in this scope so we are good.
let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
// SAFETY: At this point we wrote at most 38 bytes, pad up to that point,
// There can only be at most 1 digit remaining.
unsafe {
- ptr::write_bytes(buf_ptr.offset(target), b'0', (curr - target) as usize);
+ ptr::write_bytes(buf_ptr.add(target), b'0', curr - target);
curr = target - 1;
- *buf_ptr.offset(curr) = (n as u8) + b'0';
+ *buf_ptr.add(curr) = (n as u8) + b'0';
}
}
}
@@ -634,8 +634,8 @@ fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::R
// UTF-8 since `DEC_DIGITS_LUT` is
let buf_slice = unsafe {
str::from_utf8_unchecked(slice::from_raw_parts(
- MaybeUninit::slice_as_mut_ptr(&mut buf).offset(curr),
- buf.len() - curr as usize,
+ MaybeUninit::slice_as_mut_ptr(&mut buf).add(curr),
+ buf.len() - curr,
))
};
f.pad_integral(is_nonnegative, "", buf_slice)
diff --git a/library/core/src/future/poll_fn.rs b/library/core/src/future/poll_fn.rs
index db2a52332..90cb79739 100644
--- a/library/core/src/future/poll_fn.rs
+++ b/library/core/src/future/poll_fn.rs
@@ -5,7 +5,9 @@ use crate::task::{Context, Poll};
/// Creates a future that wraps a function returning [`Poll`].
///
-/// Polling the future delegates to the wrapped function.
+/// Polling the future delegates to the wrapped function. If the returned future is pinned, then the
+/// captured environment of the wrapped function is also pinned in-place, so as long as the closure
+/// does not move out of its captures it can soundly create pinned references to them.
///
/// # Examples
///
@@ -41,7 +43,7 @@ pub struct PollFn<F> {
}
#[stable(feature = "future_poll_fn", since = "1.64.0")]
-impl<F> Unpin for PollFn<F> {}
+impl<F: Unpin> Unpin for PollFn<F> {}
#[stable(feature = "future_poll_fn", since = "1.64.0")]
impl<F> fmt::Debug for PollFn<F> {
@@ -57,7 +59,8 @@ where
{
type Output = T;
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
- (&mut self.f)(cx)
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ // SAFETY: We are not moving out of the pinned field.
+ (unsafe { &mut self.get_unchecked_mut().f })(cx)
}
}
diff --git a/library/core/src/future/ready.rs b/library/core/src/future/ready.rs
index 48f20f90a..a07b63fb6 100644
--- a/library/core/src/future/ready.rs
+++ b/library/core/src/future/ready.rs
@@ -24,6 +24,30 @@ impl<T> Future for Ready<T> {
}
}
+impl<T> Ready<T> {
+ /// Consumes the `Ready`, returning the wrapped value.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if this [`Ready`] was already polled to completion.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ready_into_inner)]
+ /// use std::future;
+ ///
+ /// let a = future::ready(1);
+ /// assert_eq!(a.into_inner(), 1);
+ /// ```
+ #[unstable(feature = "ready_into_inner", issue = "101196")]
+ #[must_use]
+ #[inline]
+ pub fn into_inner(self) -> T {
+ self.0.expect("Called `into_inner()` on `Ready` after completion")
+ }
+}
+
/// Creates a future that is immediately ready with a value.
///
/// Futures created through this function are functionally similar to those
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index 5974562ac..aa13435e6 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -900,7 +900,7 @@ mod impls {
macro_rules! maybe_tuple_doc {
($a:ident @ #[$meta:meta] $item:item) => {
- #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc(fake_variadic)]
#[doc = "This trait is implemented for tuples up to twelve items long."]
#[$meta]
$item
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
index 81b6d5737..c53175ba4 100644
--- a/library/core/src/hint.rs
+++ b/library/core/src/hint.rs
@@ -31,7 +31,7 @@ use crate::intrinsics;
///
/// `unreachable_unchecked()` can be used in situations where the compiler
/// can't prove invariants that were previously established. Such situations
-/// have a higher chance of occuring if those invariants are upheld by
+/// have a higher chance of occurring if those invariants are upheld by
/// external code that the compiler can't analyze.
/// ```
/// fn prepare_inputs(divisors: &mut Vec<u32>) {
@@ -100,7 +100,10 @@ use crate::intrinsics;
pub const unsafe fn unreachable_unchecked() -> ! {
// SAFETY: the safety contract for `intrinsics::unreachable` must
// be upheld by the caller.
- unsafe { intrinsics::unreachable() }
+ unsafe {
+ intrinsics::assert_unsafe_precondition!("hint::unreachable_unchecked must never be reached", () => false);
+ intrinsics::unreachable()
+ }
}
/// Emits a machine instruction to signal the processor that it is running in
@@ -160,19 +163,16 @@ pub const unsafe fn unreachable_unchecked() -> ! {
#[inline]
#[stable(feature = "renamed_spin_loop", since = "1.49.0")]
pub fn spin_loop() {
- #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2"))]
+ #[cfg(target_arch = "x86")]
{
- #[cfg(target_arch = "x86")]
- {
- // SAFETY: the `cfg` attr ensures that we only execute this on x86 targets.
- unsafe { crate::arch::x86::_mm_pause() };
- }
+ // SAFETY: the `cfg` attr ensures that we only execute this on x86 targets.
+ unsafe { crate::arch::x86::_mm_pause() };
+ }
- #[cfg(target_arch = "x86_64")]
- {
- // SAFETY: the `cfg` attr ensures that we only execute this on x86_64 targets.
- unsafe { crate::arch::x86_64::_mm_pause() };
- }
+ #[cfg(target_arch = "x86_64")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on x86_64 targets.
+ unsafe { crate::arch::x86_64::_mm_pause() };
}
// RISC-V platform spin loop hint implementation
@@ -220,7 +220,7 @@ pub fn spin_loop() {
///
/// [`std::convert::identity`]: crate::convert::identity
#[inline]
-#[unstable(feature = "bench_black_box", issue = "64102")]
+#[stable(feature = "bench_black_box", since = "1.66.0")]
#[rustc_const_unstable(feature = "const_black_box", issue = "none")]
pub const fn black_box<T>(dummy: T) -> T {
crate::intrinsics::black_box(dummy)
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index cabc5017f..1dc79afe8 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -54,7 +54,7 @@
)]
#![allow(missing_docs)]
-use crate::marker::{Destruct, DiscriminantKind};
+use crate::marker::DiscriminantKind;
use crate::mem;
// These imports are used for simplifying intra-doc links
@@ -63,7 +63,7 @@ use crate::mem;
use crate::sync::atomic::{self, AtomicBool, AtomicI32, AtomicIsize, AtomicU32, Ordering};
#[stable(feature = "drop_in_place", since = "1.8.0")]
-#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_allowed_through_unstable_modules]
#[deprecated(note = "no longer an intrinsic - use `ptr::drop_in_place` directly", since = "1.52.0")]
#[inline]
pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
@@ -71,214 +71,6 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
unsafe { crate::ptr::drop_in_place(to_drop) }
}
-// These have been renamed.
-#[cfg(bootstrap)]
-extern "rust-intrinsic" {
- pub fn atomic_cxchg<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchg_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchg_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchg_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchg_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchg_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchg_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchg_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchg_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_cxchgweak_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
- pub fn atomic_load<T: Copy>(src: *const T) -> T;
- pub fn atomic_load_acq<T: Copy>(src: *const T) -> T;
- pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
- pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
- pub fn atomic_store<T: Copy>(dst: *mut T, val: T);
- pub fn atomic_store_rel<T: Copy>(dst: *mut T, val: T);
- pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
- pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
- pub fn atomic_xchg<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xchg_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xchg_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xadd<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xadd_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xadd_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xsub<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xsub_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xsub_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_and<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_and_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_and_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_nand<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_nand_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_nand_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_or<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_or_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_or_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xor<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xor_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xor_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_max<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_max_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_max_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_min<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_min_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_min_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umin<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umin_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umin_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umax<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umax_acq<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umax_rel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
- pub fn atomic_fence();
- pub fn atomic_fence_acq();
- pub fn atomic_fence_rel();
- pub fn atomic_fence_acqrel();
- pub fn atomic_singlethreadfence();
- pub fn atomic_singlethreadfence_acq();
- pub fn atomic_singlethreadfence_rel();
- pub fn atomic_singlethreadfence_acqrel();
-}
-
-// These have been renamed.
-#[cfg(bootstrap)]
-mod atomics {
- pub use super::atomic_cxchg as atomic_cxchg_seqcst_seqcst;
- pub use super::atomic_cxchg_acq as atomic_cxchg_acquire_acquire;
- pub use super::atomic_cxchg_acq_failrelaxed as atomic_cxchg_acquire_relaxed;
- pub use super::atomic_cxchg_acqrel as atomic_cxchg_acqrel_acquire;
- pub use super::atomic_cxchg_acqrel_failrelaxed as atomic_cxchg_acqrel_relaxed;
- pub use super::atomic_cxchg_failacq as atomic_cxchg_seqcst_acquire;
- pub use super::atomic_cxchg_failrelaxed as atomic_cxchg_seqcst_relaxed;
- pub use super::atomic_cxchg_rel as atomic_cxchg_release_relaxed;
- pub use super::atomic_cxchg_relaxed as atomic_cxchg_relaxed_relaxed;
-
- pub use super::atomic_cxchgweak as atomic_cxchgweak_seqcst_seqcst;
- pub use super::atomic_cxchgweak_acq as atomic_cxchgweak_acquire_acquire;
- pub use super::atomic_cxchgweak_acq_failrelaxed as atomic_cxchgweak_acquire_relaxed;
- pub use super::atomic_cxchgweak_acqrel as atomic_cxchgweak_acqrel_acquire;
- pub use super::atomic_cxchgweak_acqrel_failrelaxed as atomic_cxchgweak_acqrel_relaxed;
- pub use super::atomic_cxchgweak_failacq as atomic_cxchgweak_seqcst_acquire;
- pub use super::atomic_cxchgweak_failrelaxed as atomic_cxchgweak_seqcst_relaxed;
- pub use super::atomic_cxchgweak_rel as atomic_cxchgweak_release_relaxed;
- pub use super::atomic_cxchgweak_relaxed as atomic_cxchgweak_relaxed_relaxed;
-
- pub use super::atomic_load as atomic_load_seqcst;
- pub use super::atomic_load_acq as atomic_load_acquire;
- pub use super::atomic_load_relaxed;
- pub use super::atomic_load_unordered;
-
- pub use super::atomic_store as atomic_store_seqcst;
- pub use super::atomic_store_rel as atomic_store_release;
- pub use super::atomic_store_relaxed;
- pub use super::atomic_store_unordered;
-
- pub use super::atomic_xchg as atomic_xchg_seqcst;
- pub use super::atomic_xchg_acq as atomic_xchg_acquire;
- pub use super::atomic_xchg_acqrel;
- pub use super::atomic_xchg_rel as atomic_xchg_release;
- pub use super::atomic_xchg_relaxed;
-
- pub use super::atomic_xadd as atomic_xadd_seqcst;
- pub use super::atomic_xadd_acq as atomic_xadd_acquire;
- pub use super::atomic_xadd_acqrel;
- pub use super::atomic_xadd_rel as atomic_xadd_release;
- pub use super::atomic_xadd_relaxed;
-
- pub use super::atomic_xsub as atomic_xsub_seqcst;
- pub use super::atomic_xsub_acq as atomic_xsub_acquire;
- pub use super::atomic_xsub_acqrel;
- pub use super::atomic_xsub_rel as atomic_xsub_release;
- pub use super::atomic_xsub_relaxed;
-
- pub use super::atomic_and as atomic_and_seqcst;
- pub use super::atomic_and_acq as atomic_and_acquire;
- pub use super::atomic_and_acqrel;
- pub use super::atomic_and_rel as atomic_and_release;
- pub use super::atomic_and_relaxed;
-
- pub use super::atomic_nand as atomic_nand_seqcst;
- pub use super::atomic_nand_acq as atomic_nand_acquire;
- pub use super::atomic_nand_acqrel;
- pub use super::atomic_nand_rel as atomic_nand_release;
- pub use super::atomic_nand_relaxed;
-
- pub use super::atomic_or as atomic_or_seqcst;
- pub use super::atomic_or_acq as atomic_or_acquire;
- pub use super::atomic_or_acqrel;
- pub use super::atomic_or_rel as atomic_or_release;
- pub use super::atomic_or_relaxed;
-
- pub use super::atomic_xor as atomic_xor_seqcst;
- pub use super::atomic_xor_acq as atomic_xor_acquire;
- pub use super::atomic_xor_acqrel;
- pub use super::atomic_xor_rel as atomic_xor_release;
- pub use super::atomic_xor_relaxed;
-
- pub use super::atomic_max as atomic_max_seqcst;
- pub use super::atomic_max_acq as atomic_max_acquire;
- pub use super::atomic_max_acqrel;
- pub use super::atomic_max_rel as atomic_max_release;
- pub use super::atomic_max_relaxed;
-
- pub use super::atomic_min as atomic_min_seqcst;
- pub use super::atomic_min_acq as atomic_min_acquire;
- pub use super::atomic_min_acqrel;
- pub use super::atomic_min_rel as atomic_min_release;
- pub use super::atomic_min_relaxed;
-
- pub use super::atomic_umin as atomic_umin_seqcst;
- pub use super::atomic_umin_acq as atomic_umin_acquire;
- pub use super::atomic_umin_acqrel;
- pub use super::atomic_umin_rel as atomic_umin_release;
- pub use super::atomic_umin_relaxed;
-
- pub use super::atomic_umax as atomic_umax_seqcst;
- pub use super::atomic_umax_acq as atomic_umax_acquire;
- pub use super::atomic_umax_acqrel;
- pub use super::atomic_umax_rel as atomic_umax_release;
- pub use super::atomic_umax_relaxed;
-
- pub use super::atomic_fence as atomic_fence_seqcst;
- pub use super::atomic_fence_acq as atomic_fence_acquire;
- pub use super::atomic_fence_acqrel;
- pub use super::atomic_fence_rel as atomic_fence_release;
-
- pub use super::atomic_singlethreadfence as atomic_singlethreadfence_seqcst;
- pub use super::atomic_singlethreadfence_acq as atomic_singlethreadfence_acquire;
- pub use super::atomic_singlethreadfence_acqrel;
- pub use super::atomic_singlethreadfence_rel as atomic_singlethreadfence_release;
-}
-
-#[cfg(bootstrap)]
-pub use atomics::*;
-
-#[cfg(not(bootstrap))]
extern "rust-intrinsic" {
// N.B., these intrinsics take raw pointers because they mutate aliased
// memory, which is not valid for either `&` or `&mut`.
@@ -945,30 +737,7 @@ extern "rust-intrinsic" {
/// [`atomic::compiler_fence`] by passing [`Ordering::AcqRel`]
/// as the `order`.
pub fn atomic_singlethreadfence_acqrel();
-}
-
-// These have been renamed.
-//
-// These are the aliases for the old names.
-// To be removed when stdarch and panic_unwind have been updated.
-#[cfg(not(bootstrap))]
-mod atomics {
- pub use super::atomic_cxchg_acqrel_acquire as atomic_cxchg_acqrel;
- pub use super::atomic_cxchg_acqrel_relaxed as atomic_cxchg_acqrel_failrelaxed;
- pub use super::atomic_cxchg_acquire_acquire as atomic_cxchg_acq;
- pub use super::atomic_cxchg_acquire_relaxed as atomic_cxchg_acq_failrelaxed;
- pub use super::atomic_cxchg_relaxed_relaxed as atomic_cxchg_relaxed;
- pub use super::atomic_cxchg_release_relaxed as atomic_cxchg_rel;
- pub use super::atomic_cxchg_seqcst_acquire as atomic_cxchg_failacq;
- pub use super::atomic_cxchg_seqcst_relaxed as atomic_cxchg_failrelaxed;
- pub use super::atomic_cxchg_seqcst_seqcst as atomic_cxchg;
- pub use super::atomic_store_seqcst as atomic_store;
-}
-
-#[cfg(not(bootstrap))]
-pub use atomics::*;
-extern "rust-intrinsic" {
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
/// if supported; otherwise, it is a no-op.
/// Prefetches have no effect on the behavior of the program but can change its performance
@@ -1019,6 +788,7 @@ extern "rust-intrinsic" {
/// uninitialized at that point in the control flow.
///
/// This intrinsic should not be used outside of the compiler.
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn rustc_peek<T>(_: T) -> T;
/// Aborts the execution of the process.
@@ -1036,6 +806,7 @@ extern "rust-intrinsic" {
/// On Unix, the
/// process will probably terminate with a signal like `SIGABRT`, `SIGILL`, `SIGTRAP`, `SIGSEGV` or
/// `SIGBUS`. The precise behaviour is not guaranteed and not stable.
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn abort() -> !;
/// Informs the optimizer that this point in the code is not reachable,
@@ -1074,6 +845,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_likely", issue = "none")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn likely(b: bool) -> bool;
/// Hints to the compiler that branch condition is likely to be false.
@@ -1088,6 +860,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_likely", issue = "none")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn unlikely(b: bool) -> bool;
/// Executes a breakpoint trap, for inspection by a debugger.
@@ -1107,6 +880,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::size_of`].
#[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn size_of<T>() -> usize;
/// The minimum alignment of a type.
@@ -1118,6 +892,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::align_of`].
#[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn min_align_of<T>() -> usize;
/// The preferred alignment of a type.
///
@@ -1146,6 +921,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::any::type_name`].
#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn type_name<T: ?Sized>() -> &'static str;
/// Gets an identifier which is globally unique to the specified type. This
@@ -1159,6 +935,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn type_id<T: ?Sized + 'static>() -> u64;
/// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
@@ -1166,6 +943,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_stable(feature = "const_assert_type", since = "1.59.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn assert_inhabited<T>();
/// A guard for unsafe functions that cannot ever be executed if `T` does not permit
@@ -1173,6 +951,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn assert_zero_valid<T>();
/// A guard for unsafe functions that cannot ever be executed if `T` has invalid
@@ -1180,6 +959,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn assert_uninit_valid<T>();
/// Gets a reference to a static `Location` indicating where it was called.
@@ -1191,6 +971,7 @@ extern "rust-intrinsic" {
///
/// Consider using [`core::panic::Location::caller`] instead.
#[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn caller_location() -> &'static crate::panic::Location<'static>;
/// Moves a value out of scope without running drop glue.
@@ -1203,6 +984,7 @@ extern "rust-intrinsic" {
/// Therefore, implementations must not require the user to uphold
/// any safety invariants.
#[rustc_const_unstable(feature = "const_intrinsic_forget", issue = "none")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn forget<T: ?Sized>(_: T);
/// Reinterprets the bits of a value of one type as another type.
@@ -1212,14 +994,14 @@ extern "rust-intrinsic" {
/// `transmute` is semantically equivalent to a bitwise move of one type
/// into another. It copies the bits from the source value into the
/// destination value, then forgets the original. Note that source and destination
- /// are passed by-value, which means if `T` or `U` contain padding, that padding
+ /// are passed by-value, which means if `Src` or `Dst` contain padding, that padding
/// is *not* guaranteed to be preserved by `transmute`.
///
/// Both the argument and the result must be [valid](../../nomicon/what-unsafe-does.html) at
/// their given type. Violating this condition leads to [undefined behavior][ub]. The compiler
/// will generate code *assuming that you, the programmer, ensure that there will never be
/// undefined behavior*. It is therefore your responsibility to guarantee that every value
- /// passed to `transmute` is valid at both types `T` and `U`. Failing to uphold this condition
+ /// passed to `transmute` is valid at both types `Src` and `Dst`. Failing to uphold this condition
/// may lead to unexpected and unstable compilation results. This makes `transmute` **incredibly
/// unsafe**. `transmute` should be the absolute last resort.
///
@@ -1230,7 +1012,7 @@ extern "rust-intrinsic" {
///
/// Because `transmute` is a by-value operation, alignment of the *transmuted values
/// themselves* is not a concern. As with any other function, the compiler already ensures
- /// both `T` and `U` are properly aligned. However, when transmuting values that *point
+ /// both `Src` and `Dst` are properly aligned. However, when transmuting values that *point
/// elsewhere* (such as pointers, references, boxes…), the caller has to ensure proper
/// alignment of the pointed-to values.
///
@@ -1313,7 +1095,7 @@ extern "rust-intrinsic" {
/// Note that using `transmute` to turn a pointer to a `usize` is (as noted above) [undefined
/// behavior][ub] in `const` contexts. Also outside of consts, this operation might not behave
/// as expected -- this is touching on many unspecified aspects of the Rust memory model.
- /// Depending on what the code is doing, the following alternatives are preferrable to
+ /// Depending on what the code is doing, the following alternatives are preferable to
/// pointer-to-integer transmutation:
/// - If the code just wants to store data of arbitrary type in some buffer and needs to pick a
/// type for that buffer, it can use [`MaybeUninit`][mem::MaybeUninit].
@@ -1463,10 +1245,10 @@ extern "rust-intrinsic" {
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- #[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+ #[rustc_allowed_through_unstable_modules]
#[rustc_const_stable(feature = "const_transmute", since = "1.56.0")]
#[rustc_diagnostic_item = "transmute"]
- pub fn transmute<T, U>(e: T) -> U;
+ pub fn transmute<Src, Dst>(src: Src) -> Dst;
/// Returns `true` if the actual type given as `T` requires drop
/// glue; returns `false` if the actual type provided for `T`
@@ -1482,6 +1264,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop).
#[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn needs_drop<T: ?Sized>() -> bool;
/// Calculates the offset from a pointer.
@@ -1518,6 +1301,17 @@ extern "rust-intrinsic" {
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
+ /// Masks out bits of the pointer according to a mask.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// Consider using [`pointer::mask`] instead.
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ pub fn ptr_mask<T>(ptr: *const T, mask: usize) -> *const T;
+
/// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
/// a size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`
@@ -1707,6 +1501,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f32::min`]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn minnumf32(x: f32, y: f32) -> f32;
/// Returns the minimum of two `f64` values.
///
@@ -1717,6 +1512,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f64::min`]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn minnumf64(x: f64, y: f64) -> f64;
/// Returns the maximum of two `f32` values.
///
@@ -1727,6 +1523,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f32::max`]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn maxnumf32(x: f32, y: f32) -> f32;
/// Returns the maximum of two `f64` values.
///
@@ -1737,6 +1534,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f64::max`]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn maxnumf64(x: f64, y: f64) -> f64;
/// Copies the sign from `y` to `x` for `f32` values.
@@ -1857,6 +1655,7 @@ extern "rust-intrinsic" {
/// primitives via the `count_ones` method. For example,
/// [`u32::count_ones`]
#[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn ctpop<T: Copy>(x: T) -> T;
/// Returns the number of leading unset bits (zeroes) in an integer type `T`.
@@ -1894,6 +1693,7 @@ extern "rust-intrinsic" {
/// assert_eq!(num_leading, 16);
/// ```
#[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn ctlz<T: Copy>(x: T) -> T;
/// Like `ctlz`, but extra-unsafe as it returns `undef` when
@@ -1950,6 +1750,7 @@ extern "rust-intrinsic" {
/// assert_eq!(num_trailing, 16);
/// ```
#[rustc_const_stable(feature = "const_cttz", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn cttz<T: Copy>(x: T) -> T;
/// Like `cttz`, but extra-unsafe as it returns `undef` when
@@ -1982,6 +1783,7 @@ extern "rust-intrinsic" {
/// primitives via the `swap_bytes` method. For example,
/// [`u32::swap_bytes`]
#[rustc_const_stable(feature = "const_bswap", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn bswap<T: Copy>(x: T) -> T;
/// Reverses the bits in an integer type `T`.
@@ -1995,6 +1797,7 @@ extern "rust-intrinsic" {
/// primitives via the `reverse_bits` method. For example,
/// [`u32::reverse_bits`]
#[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn bitreverse<T: Copy>(x: T) -> T;
/// Performs checked integer addition.
@@ -2008,6 +1811,7 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_add` method. For example,
/// [`u32::overflowing_add`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs checked integer subtraction
@@ -2021,6 +1825,7 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_sub` method. For example,
/// [`u32::overflowing_sub`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs checked integer multiplication
@@ -2034,6 +1839,7 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_mul` method. For example,
/// [`u32::overflowing_mul`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs an exact division, resulting in undefined behavior where
@@ -2108,6 +1914,7 @@ extern "rust-intrinsic" {
/// primitives via the `rotate_left` method. For example,
/// [`u32::rotate_left`]
#[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn rotate_left<T: Copy>(x: T, y: T) -> T;
/// Performs rotate right.
@@ -2121,6 +1928,7 @@ extern "rust-intrinsic" {
/// primitives via the `rotate_right` method. For example,
/// [`u32::rotate_right`]
#[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn rotate_right<T: Copy>(x: T, y: T) -> T;
/// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.
@@ -2134,6 +1942,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_add` method. For example,
/// [`u32::wrapping_add`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn wrapping_add<T: Copy>(a: T, b: T) -> T;
/// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits.
///
@@ -2146,6 +1955,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_sub` method. For example,
/// [`u32::wrapping_sub`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T;
/// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits.
///
@@ -2158,6 +1968,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_mul` method. For example,
/// [`u32::wrapping_mul`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T;
/// Computes `a + b`, saturating at numeric bounds.
@@ -2171,6 +1982,7 @@ extern "rust-intrinsic" {
/// primitives via the `saturating_add` method. For example,
/// [`u32::saturating_add`]
#[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn saturating_add<T: Copy>(a: T, b: T) -> T;
/// Computes `a - b`, saturating at numeric bounds.
///
@@ -2183,6 +1995,7 @@ extern "rust-intrinsic" {
/// primitives via the `saturating_sub` method. For example,
/// [`u32::saturating_sub`]
#[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn saturating_sub<T: Copy>(a: T, b: T) -> T;
/// Returns the value of the discriminant for the variant in 'v';
@@ -2195,6 +2008,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::discriminant`].
#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
/// Returns the number of variants of the type `T` cast to a `usize`;
@@ -2207,6 +2021,7 @@ extern "rust-intrinsic" {
///
/// The to-be-stabilized version of this intrinsic is [`mem::variant_count`].
#[rustc_const_unstable(feature = "variant_count", issue = "73662")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn variant_count<T>() -> usize;
/// Rust's "try catch" construct which invokes the function pointer `try_fn`
@@ -2223,30 +2038,25 @@ extern "rust-intrinsic" {
pub fn nontemporal_store<T>(ptr: *mut T, val: T);
/// See documentation of `<*const T>::offset_from` for details.
- #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;
/// See documentation of `<*const T>::sub_ptr` for details.
- #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
pub fn ptr_offset_from_unsigned<T>(ptr: *const T, base: *const T) -> usize;
/// See documentation of `<*const T>::guaranteed_eq` for details.
+ /// Returns `2` if the result is unknown.
+ /// Returns `1` if the pointers are guaranteed equal
+ /// Returns `0` if the pointers are guaranteed inequal
///
/// Note that, unlike most intrinsics, this is safe to call;
/// it does not require an `unsafe` block.
/// Therefore, implementations must not require the user to uphold
/// any safety invariants.
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
- pub fn ptr_guaranteed_eq<T>(ptr: *const T, other: *const T) -> bool;
-
- /// See documentation of `<*const T>::guaranteed_ne` for details.
- ///
- /// Note that, unlike most intrinsics, this is safe to call;
- /// it does not require an `unsafe` block.
- /// Therefore, implementations must not require the user to uphold
- /// any safety invariants.
- #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
- pub fn ptr_guaranteed_ne<T>(ptr: *const T, other: *const T) -> bool;
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8;
/// Allocates a block of memory at compile time.
/// At runtime, just returns a null pointer.
@@ -2282,7 +2092,8 @@ extern "rust-intrinsic" {
///
/// # Safety
///
- /// It's UB to call this if any of the *bytes* in `*a` or `*b` are uninitialized.
+ /// It's UB to call this if any of the *bytes* in `*a` or `*b` are uninitialized or carry a
+ /// pointer value.
/// Note that this is a stricter criterion than just the *values* being
/// fully-initialized: if `T` has padding, it's UB to call this intrinsic.
///
@@ -2295,17 +2106,74 @@ extern "rust-intrinsic" {
///
/// [`std::hint::black_box`]: crate::hint::black_box
#[rustc_const_unstable(feature = "const_black_box", issue = "none")]
+ #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
pub fn black_box<T>(dummy: T) -> T;
/// `ptr` must point to a vtable.
/// The intrinsic will return the size stored in that vtable.
- #[cfg(not(bootstrap))]
pub fn vtable_size(ptr: *const ()) -> usize;
/// `ptr` must point to a vtable.
/// The intrinsic will return the alignment stored in that vtable.
- #[cfg(not(bootstrap))]
pub fn vtable_align(ptr: *const ()) -> usize;
+
+ /// Selects which function to call depending on the context.
+ ///
+ /// If this function is evaluated at compile-time, then a call to this
+ /// intrinsic will be replaced with a call to `called_in_const`. It gets
+ /// replaced with a call to `called_at_rt` otherwise.
+ ///
+ /// # Type Requirements
+ ///
+ /// The two functions must be both function items. They cannot be function
+ /// pointers or closures. The first function must be a `const fn`.
+ ///
+ /// `arg` will be the tupled arguments that will be passed to either one of
+ /// the two functions, therefore, both functions must accept the same type of
+ /// arguments. Both functions must return RET.
+ ///
+ /// # Safety
+ ///
+ /// The two functions must behave observably equivalent. Safe code in other
+ /// crates may assume that calling a `const fn` at compile-time and at run-time
+ /// produces the same result. A function that produces a different result when
+ /// evaluated at run-time, or has any other observable side-effects, is
+ /// *unsound*.
+ ///
+ /// Here is an example of how this could cause a problem:
+ /// ```no_run
+ /// #![feature(const_eval_select)]
+ /// #![feature(core_intrinsics)]
+ /// use std::hint::unreachable_unchecked;
+ /// use std::intrinsics::const_eval_select;
+ ///
+ /// // Crate A
+ /// pub const fn inconsistent() -> i32 {
+ /// fn runtime() -> i32 { 1 }
+ /// const fn compiletime() -> i32 { 2 }
+ ///
+ /// unsafe {
+ // // ⚠ This code violates the required equivalence of `compiletime`
+ /// // and `runtime`.
+ /// const_eval_select((), compiletime, runtime)
+ /// }
+ /// }
+ ///
+ /// // Crate B
+ /// const X: i32 = inconsistent();
+ /// let x = inconsistent();
+ /// if x != X { unsafe { unreachable_unchecked(); }}
+ /// ```
+ ///
+ /// This code causes Undefined Behavior when being run, since the
+ /// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
+ /// which violates the principle that a `const fn` must behave the same at
+ /// compile-time and at run-time. The unsafe code in crate B is fine.
+ #[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
+ pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET
+ where
+ G: FnOnce<ARG, Output = RET>,
+ F: FnOnce<ARG, Output = RET>;
}
// Some functions are defined here because they accidentally got made
@@ -2316,6 +2184,11 @@ extern "rust-intrinsic" {
/// Check that the preconditions of an unsafe function are followed, if debug_assertions are on,
/// and only at runtime.
///
+/// This macro should be called as `assert_unsafe_precondition!([Generics](name: Type) => Expression)`
+/// where the names specified will be moved into the macro as captured variables, and defines an item
+/// to call `const_eval_select` on. The tokens inside the square brackets are used to denote generics
+/// for the function declaractions and can be omitted if there is no generics.
+///
/// # Safety
///
/// Invoking this macro is only sound if the following code is already UB when the passed
@@ -2330,18 +2203,23 @@ extern "rust-intrinsic" {
/// the occasional mistake, and this check should help them figure things out.
#[allow_internal_unstable(const_eval_select)] // permit this to be called in stably-const fn
macro_rules! assert_unsafe_precondition {
- ($e:expr) => {
+ ($name:expr, $([$($tt:tt)*])?($($i:ident:$ty:ty),*$(,)?) => $e:expr) => {
if cfg!(debug_assertions) {
- // Use a closure so that we can capture arbitrary expressions from the invocation
- let runtime = || {
+ // allow non_snake_case to allow capturing const generics
+ #[allow(non_snake_case)]
+ #[inline(always)]
+ fn runtime$(<$($tt)*>)?($($i:$ty),*) {
if !$e {
- // abort instead of panicking to reduce impact on code size
- ::core::intrinsics::abort();
+ // don't unwind to reduce impact on code size
+ ::core::panicking::panic_str_nounwind(
+ concat!("unsafe precondition(s) violated: ", $name)
+ );
}
- };
- const fn comptime() {}
+ }
+ #[allow(non_snake_case)]
+ const fn comptime$(<$($tt)*>)?($(_:$ty),*) {}
- ::core::intrinsics::const_eval_select((), comptime, runtime);
+ ::core::intrinsics::const_eval_select(($($i,)*), comptime, runtime);
}
};
}
@@ -2350,7 +2228,17 @@ pub(crate) use assert_unsafe_precondition;
/// Checks whether `ptr` is properly aligned with respect to
/// `align_of::<T>()`.
pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
- !ptr.is_null() && ptr.addr() % mem::align_of::<T>() == 0
+ !ptr.is_null() && ptr.is_aligned()
+}
+
+/// Checks whether an allocation of `len` instances of `T` exceeds
+/// the maximum allowed allocation size.
+pub(crate) fn is_valid_allocation_size<T>(len: usize) -> bool {
+ let max_len = const {
+ let size = crate::mem::size_of::<T>();
+ if size == 0 { usize::MAX } else { isize::MAX as usize / size }
+ };
+ len <= max_len
}
/// Checks whether the regions of memory starting at `src` and `dst` of size
@@ -2420,9 +2308,9 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -
/// dst.reserve(src_len);
///
/// unsafe {
-/// // The call to offset is always safe because `Vec` will never
+/// // The call to add is always safe because `Vec` will never
/// // allocate more than `isize::MAX` bytes.
-/// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize);
+/// let dst_ptr = dst.as_mut_ptr().add(dst_len);
/// let src_ptr = src.as_ptr();
///
/// // Truncate `src` without dropping its contents. We do this first,
@@ -2451,7 +2339,7 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -
/// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append
#[doc(alias = "memcpy")]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_allowed_through_unstable_modules]
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
@@ -2465,6 +2353,9 @@ pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: us
// upheld by the caller.
unsafe {
assert_unsafe_precondition!(
+ "ptr::copy_nonoverlapping requires that both pointer arguments are aligned and non-null \
+ and the specified memory ranges do not overlap",
+ [T](src: *const T, dst: *mut T, count: usize) =>
is_aligned_and_not_null(src)
&& is_aligned_and_not_null(dst)
&& is_nonoverlapping(src, dst, count)
@@ -2538,7 +2429,7 @@ pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: us
/// ```
#[doc(alias = "memmove")]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_allowed_through_unstable_modules]
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
@@ -2550,7 +2441,11 @@ pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
// SAFETY: the safety contract for `copy` must be upheld by the caller.
unsafe {
- assert_unsafe_precondition!(is_aligned_and_not_null(src) && is_aligned_and_not_null(dst));
+ assert_unsafe_precondition!(
+ "ptr::copy requires that both pointer arguments are aligned aligned and non-null",
+ [T](src: *const T, dst: *mut T) =>
+ is_aligned_and_not_null(src) && is_aligned_and_not_null(dst)
+ );
copy(src, dst, count)
}
}
@@ -2606,7 +2501,7 @@ pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
/// ```
#[doc(alias = "memset")]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_allowed_through_unstable_modules]
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
@@ -2618,99 +2513,10 @@ pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
// SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
unsafe {
- assert_unsafe_precondition!(is_aligned_and_not_null(dst));
+ assert_unsafe_precondition!(
+ "ptr::write_bytes requires that the destination pointer is aligned and non-null",
+ [T](dst: *mut T) => is_aligned_and_not_null(dst)
+ );
write_bytes(dst, val, count)
}
}
-
-/// Selects which function to call depending on the context.
-///
-/// If this function is evaluated at compile-time, then a call to this
-/// intrinsic will be replaced with a call to `called_in_const`. It gets
-/// replaced with a call to `called_at_rt` otherwise.
-///
-/// # Type Requirements
-///
-/// The two functions must be both function items. They cannot be function
-/// pointers or closures.
-///
-/// `arg` will be the arguments that will be passed to either one of the
-/// two functions, therefore, both functions must accept the same type of
-/// arguments. Both functions must return RET.
-///
-/// # Safety
-///
-/// The two functions must behave observably equivalent. Safe code in other
-/// crates may assume that calling a `const fn` at compile-time and at run-time
-/// produces the same result. A function that produces a different result when
-/// evaluated at run-time, or has any other observable side-effects, is
-/// *unsound*.
-///
-/// Here is an example of how this could cause a problem:
-/// ```no_run
-/// #![feature(const_eval_select)]
-/// #![feature(core_intrinsics)]
-/// use std::hint::unreachable_unchecked;
-/// use std::intrinsics::const_eval_select;
-///
-/// // Crate A
-/// pub const fn inconsistent() -> i32 {
-/// fn runtime() -> i32 { 1 }
-/// const fn compiletime() -> i32 { 2 }
-///
-/// unsafe {
-// // ⚠ This code violates the required equivalence of `compiletime`
-/// // and `runtime`.
-/// const_eval_select((), compiletime, runtime)
-/// }
-/// }
-///
-/// // Crate B
-/// const X: i32 = inconsistent();
-/// let x = inconsistent();
-/// if x != X { unsafe { unreachable_unchecked(); }}
-/// ```
-///
-/// This code causes Undefined Behavior when being run, since the
-/// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
-/// which violates the principle that a `const fn` must behave the same at
-/// compile-time and at run-time. The unsafe code in crate B is fine.
-#[unstable(
- feature = "const_eval_select",
- issue = "none",
- reason = "const_eval_select will never be stable"
-)]
-#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
-#[lang = "const_eval_select"]
-#[rustc_do_not_const_check]
-#[inline]
-pub const unsafe fn const_eval_select<ARG, F, G, RET>(
- arg: ARG,
- _called_in_const: F,
- called_at_rt: G,
-) -> RET
-where
- F: ~const FnOnce<ARG, Output = RET>,
- G: FnOnce<ARG, Output = RET> + ~const Destruct,
-{
- called_at_rt.call_once(arg)
-}
-
-#[unstable(
- feature = "const_eval_select",
- issue = "none",
- reason = "const_eval_select will never be stable"
-)]
-#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
-#[lang = "const_eval_select_ct"]
-pub const unsafe fn const_eval_select_ct<ARG, F, G, RET>(
- arg: ARG,
- called_in_const: F,
- _called_at_rt: G,
-) -> RET
-where
- F: ~const FnOnce<ARG, Output = RET>,
- G: FnOnce<ARG, Output = RET> + ~const Destruct,
-{
- called_in_const.call_once(arg)
-}
diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs
new file mode 100644
index 000000000..d4fb88610
--- /dev/null
+++ b/library/core/src/iter/adapters/array_chunks.rs
@@ -0,0 +1,170 @@
+use crate::array;
+use crate::iter::{ByRefSized, FusedIterator, Iterator};
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator over `N` elements of the iterator at a time.
+///
+/// The chunks do not overlap. If `N` does not divide the length of the
+/// iterator, then the last up to `N-1` elements will be omitted.
+///
+/// This `struct` is created by the [`array_chunks`][Iterator::array_chunks]
+/// method on [`Iterator`]. See its documentation for more.
+#[derive(Debug, Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+pub struct ArrayChunks<I: Iterator, const N: usize> {
+ iter: I,
+ remainder: Option<array::IntoIter<I::Item, N>>,
+}
+
+impl<I, const N: usize> ArrayChunks<I, N>
+where
+ I: Iterator,
+{
+ #[track_caller]
+ pub(in crate::iter) fn new(iter: I) -> Self {
+ assert!(N != 0, "chunk size must be non-zero");
+ Self { iter, remainder: None }
+ }
+
+ /// Returns an iterator over the remaining elements of the original iterator
+ /// that are not going to be returned by this iterator. The returned
+ /// iterator will yield at most `N-1` elements.
+ #[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+ #[inline]
+ pub fn into_remainder(self) -> Option<array::IntoIter<I::Item, N>> {
+ self.remainder
+ }
+}
+
+#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+impl<I, const N: usize> Iterator for ArrayChunks<I, N>
+where
+ I: Iterator,
+{
+ type Item = [I::Item; N];
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.try_for_each(ControlFlow::Break).break_value()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lower, upper) = self.iter.size_hint();
+
+ (lower / N, upper.map(|n| n / N))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count() / N
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let mut acc = init;
+ loop {
+ match self.iter.next_chunk() {
+ Ok(chunk) => acc = f(acc, chunk)?,
+ Err(remainder) => {
+ // Make sure to not override `self.remainder` with an empty array
+ // when `next` is called after `ArrayChunks` exhaustion.
+ self.remainder.get_or_insert(remainder);
+
+ break try { acc };
+ }
+ }
+ }
+ }
+
+ impl_fold_via_try_fold! { fold -> try_fold }
+}
+
+#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+impl<I, const N: usize> DoubleEndedIterator for ArrayChunks<I, N>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.try_rfold((), |(), x| ControlFlow::Break(x)).break_value()
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ // We are iterating from the back we need to first handle the remainder.
+ self.next_back_remainder();
+
+ let mut acc = init;
+ let mut iter = ByRefSized(&mut self.iter).rev();
+
+ // NB remainder is handled by `next_back_remainder`, so
+ // `next_chunk` can't return `Err` with non-empty remainder
+ // (assuming correct `I as ExactSizeIterator` impl).
+ while let Ok(mut chunk) = iter.next_chunk() {
+ // FIXME: do not do double reverse
+ // (we could instead add `next_chunk_back` for example)
+ chunk.reverse();
+ acc = f(acc, chunk)?
+ }
+
+ try { acc }
+ }
+
+ impl_fold_via_try_fold! { rfold -> try_rfold }
+}
+
+impl<I, const N: usize> ArrayChunks<I, N>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
+ /// Updates `self.remainder` such that `self.iter.len` is divisible by `N`.
+ fn next_back_remainder(&mut self) {
+ // Make sure to not override `self.remainder` with an empty array
+ // when `next_back` is called after `ArrayChunks` exhaustion.
+ if self.remainder.is_some() {
+ return;
+ }
+
+ // We use the `ExactSizeIterator` implementation of the underlying
+ // iterator to know how many remaining elements there are.
+ let rem = self.iter.len() % N;
+
+ // Take the last `rem` elements out of `self.iter`.
+ let mut remainder =
+ // SAFETY: `unwrap_err` always succeeds because x % N < N for all x.
+ unsafe { self.iter.by_ref().rev().take(rem).next_chunk().unwrap_err_unchecked() };
+
+ // We used `.rev()` above, so we need to re-reverse the reminder
+ remainder.as_mut_slice().reverse();
+ self.remainder = Some(remainder);
+ }
+}
+
+#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+impl<I, const N: usize> FusedIterator for ArrayChunks<I, N> where I: FusedIterator {}
+
+#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+impl<I, const N: usize> ExactSizeIterator for ArrayChunks<I, N>
+where
+ I: ExactSizeIterator,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ self.iter.len() / N
+ }
+
+ #[inline]
+ fn is_empty(&self) -> bool {
+ self.iter.len() < N
+ }
+}
diff --git a/library/core/src/iter/adapters/by_ref_sized.rs b/library/core/src/iter/adapters/by_ref_sized.rs
index cc1e8e8a2..1945e402f 100644
--- a/library/core/src/iter/adapters/by_ref_sized.rs
+++ b/library/core/src/iter/adapters/by_ref_sized.rs
@@ -1,4 +1,7 @@
-use crate::ops::Try;
+use crate::{
+ const_closure::ConstFnMutClosure,
+ ops::{NeverShortCircuit, Try},
+};
/// Like `Iterator::by_ref`, but requiring `Sized` so it can forward generics.
///
@@ -8,36 +11,41 @@ use crate::ops::Try;
#[derive(Debug)]
pub struct ByRefSized<'a, I>(pub &'a mut I);
+// The following implementations use UFCS-style, rather than trusting autoderef,
+// to avoid accidentally calling the `&mut Iterator` implementations.
+
#[unstable(feature = "std_internals", issue = "none")]
impl<I: Iterator> Iterator for ByRefSized<'_, I> {
type Item = I::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
- self.0.next()
+ I::next(self.0)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- self.0.size_hint()
+ I::size_hint(self.0)
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- self.0.advance_by(n)
+ I::advance_by(self.0, n)
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
- self.0.nth(n)
+ I::nth(self.0, n)
}
#[inline]
- fn fold<B, F>(self, init: B, f: F) -> B
+ fn fold<B, F>(self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
- self.0.fold(init, f)
+ // `fold` needs ownership, so this can't forward directly.
+ I::try_fold(self.0, init, ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp))
+ .0
}
#[inline]
@@ -46,7 +54,7 @@ impl<I: Iterator> Iterator for ByRefSized<'_, I> {
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
- self.0.try_fold(init, f)
+ I::try_fold(self.0, init, f)
}
}
@@ -54,25 +62,31 @@ impl<I: Iterator> Iterator for ByRefSized<'_, I> {
impl<I: DoubleEndedIterator> DoubleEndedIterator for ByRefSized<'_, I> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
- self.0.next_back()
+ I::next_back(self.0)
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- self.0.advance_back_by(n)
+ I::advance_back_by(self.0, n)
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
- self.0.nth_back(n)
+ I::nth_back(self.0, n)
}
#[inline]
- fn rfold<B, F>(self, init: B, f: F) -> B
+ fn rfold<B, F>(self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
- self.0.rfold(init, f)
+ // `rfold` needs ownership, so this can't forward directly.
+ I::try_rfold(
+ self.0,
+ init,
+ ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp),
+ )
+ .0
}
#[inline]
@@ -81,6 +95,6 @@ impl<I: DoubleEndedIterator> DoubleEndedIterator for ByRefSized<'_, I> {
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
- self.0.try_rfold(init, f)
+ I::try_rfold(self.0, init, f)
}
}
diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs
index f9bfd77d7..62d3afb81 100644
--- a/library/core/src/iter/adapters/copied.rs
+++ b/library/core/src/iter/adapters/copied.rs
@@ -2,7 +2,10 @@ use crate::iter::adapters::{
zip::try_get_unchecked, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
};
use crate::iter::{FusedIterator, TrustedLen};
+use crate::mem::MaybeUninit;
+use crate::mem::SizedTypeProperties;
use crate::ops::Try;
+use crate::{array, ptr};
/// An iterator that copies the elements of an underlying iterator.
///
@@ -44,6 +47,15 @@ where
self.it.next().copied()
}
+ fn next_chunk<const N: usize>(
+ &mut self,
+ ) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>>
+ where
+ Self: Sized,
+ {
+ <I as SpecNextChunk<'_, N, T>>::spec_next_chunk(&mut self.it)
+ }
+
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
@@ -166,3 +178,65 @@ where
T: Copy,
{
}
+
+trait SpecNextChunk<'a, const N: usize, T: 'a>: Iterator<Item = &'a T>
+where
+ T: Copy,
+{
+ fn spec_next_chunk(&mut self) -> Result<[T; N], array::IntoIter<T, N>>;
+}
+
+impl<'a, const N: usize, I, T: 'a> SpecNextChunk<'a, N, T> for I
+where
+ I: Iterator<Item = &'a T>,
+ T: Copy,
+{
+ default fn spec_next_chunk(&mut self) -> Result<[T; N], array::IntoIter<T, N>> {
+ array::iter_next_chunk(&mut self.map(|e| *e))
+ }
+}
+
+impl<'a, const N: usize, T: 'a> SpecNextChunk<'a, N, T> for crate::slice::Iter<'a, T>
+where
+ T: Copy,
+{
+ fn spec_next_chunk(&mut self) -> Result<[T; N], array::IntoIter<T, N>> {
+ let mut raw_array = MaybeUninit::uninit_array();
+
+ let len = self.len();
+
+ if T::IS_ZST {
+ if len < N {
+ let _ = self.advance_by(len);
+ // SAFETY: ZSTs can be conjured ex nihilo; only the amount has to be correct
+ return Err(unsafe { array::IntoIter::new_unchecked(raw_array, 0..len) });
+ }
+
+ let _ = self.advance_by(N);
+ // SAFETY: ditto
+ return Ok(unsafe { MaybeUninit::array_assume_init(raw_array) });
+ }
+
+ if len < N {
+ // SAFETY: `len` indicates that this many elements are available and we just checked that
+ // it fits into the array.
+ unsafe {
+ ptr::copy_nonoverlapping(
+ self.as_ref().as_ptr(),
+ raw_array.as_mut_ptr() as *mut T,
+ len,
+ );
+ let _ = self.advance_by(len);
+ return Err(array::IntoIter::new_unchecked(raw_array, 0..len));
+ }
+ }
+
+ // SAFETY: `len` is larger than the array size. Copy a fixed amount here to fully initialize
+ // the array.
+ unsafe {
+ ptr::copy_nonoverlapping(self.as_ref().as_ptr(), raw_array.as_mut_ptr() as *mut T, N);
+ let _ = self.advance_by(N);
+ Ok(MaybeUninit::array_assume_init(raw_array))
+ }
+ }
+}
diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs
index 15a120e35..307016c26 100644
--- a/library/core/src/iter/adapters/flatten.rs
+++ b/library/core/src/iter/adapters/flatten.rs
@@ -1,6 +1,6 @@
use crate::fmt;
use crate::iter::{DoubleEndedIterator, Fuse, FusedIterator, Iterator, Map, TrustedLen};
-use crate::ops::Try;
+use crate::ops::{ControlFlow, Try};
/// An iterator that maps each element to an iterator, and yields the elements
/// of the produced iterators.
@@ -73,6 +73,21 @@ where
{
self.inner.fold(init, fold)
}
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ self.inner.advance_by(n)
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.inner.count()
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.inner.last()
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -103,6 +118,11 @@ where
{
self.inner.rfold(init, fold)
}
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.inner.advance_back_by(n)
+ }
}
#[stable(feature = "fused", since = "1.26.0")]
@@ -214,6 +234,21 @@ where
{
self.inner.fold(init, fold)
}
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ self.inner.advance_by(n)
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.inner.count()
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.inner.last()
+ }
}
#[stable(feature = "iterator_flatten", since = "1.29.0")]
@@ -244,6 +279,11 @@ where
{
self.inner.rfold(init, fold)
}
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.inner.advance_back_by(n)
+ }
}
#[stable(feature = "iterator_flatten", since = "1.29.0")]
@@ -280,6 +320,144 @@ where
}
}
+impl<I, U> FlattenCompat<I, U>
+where
+ I: Iterator<Item: IntoIterator<IntoIter = U>>,
+{
+ /// Folds the inner iterators into an accumulator by applying an operation.
+ ///
+ /// Folds over the inner iterators, not over their elements. Is used by the `fold`, `count`,
+ /// and `last` methods.
+ #[inline]
+ fn iter_fold<Acc, Fold>(self, mut acc: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, U) -> Acc,
+ {
+ #[inline]
+ fn flatten<T: IntoIterator, Acc>(
+ fold: &mut impl FnMut(Acc, T::IntoIter) -> Acc,
+ ) -> impl FnMut(Acc, T) -> Acc + '_ {
+ move |acc, iter| fold(acc, iter.into_iter())
+ }
+
+ if let Some(iter) = self.frontiter {
+ acc = fold(acc, iter);
+ }
+
+ acc = self.iter.fold(acc, flatten(&mut fold));
+
+ if let Some(iter) = self.backiter {
+ acc = fold(acc, iter);
+ }
+
+ acc
+ }
+
+ /// Folds over the inner iterators as long as the given function returns successfully,
+ /// always storing the most recent inner iterator in `self.frontiter`.
+ ///
+ /// Folds over the inner iterators, not over their elements. Is used by the `try_fold` and
+ /// `advance_by` methods.
+ #[inline]
+ fn iter_try_fold<Acc, Fold, R>(&mut self, mut acc: Acc, mut fold: Fold) -> R
+ where
+ Fold: FnMut(Acc, &mut U) -> R,
+ R: Try<Output = Acc>,
+ {
+ #[inline]
+ fn flatten<'a, T: IntoIterator, Acc, R: Try<Output = Acc>>(
+ frontiter: &'a mut Option<T::IntoIter>,
+ fold: &'a mut impl FnMut(Acc, &mut T::IntoIter) -> R,
+ ) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, iter| fold(acc, frontiter.insert(iter.into_iter()))
+ }
+
+ if let Some(iter) = &mut self.frontiter {
+ acc = fold(acc, iter)?;
+ }
+ self.frontiter = None;
+
+ acc = self.iter.try_fold(acc, flatten(&mut self.frontiter, &mut fold))?;
+ self.frontiter = None;
+
+ if let Some(iter) = &mut self.backiter {
+ acc = fold(acc, iter)?;
+ }
+ self.backiter = None;
+
+ try { acc }
+ }
+}
+
+impl<I, U> FlattenCompat<I, U>
+where
+ I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U>>,
+{
+ /// Folds the inner iterators into an accumulator by applying an operation, starting form the
+ /// back.
+ ///
+ /// Folds over the inner iterators, not over their elements. Is used by the `rfold` method.
+ #[inline]
+ fn iter_rfold<Acc, Fold>(self, mut acc: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, U) -> Acc,
+ {
+ #[inline]
+ fn flatten<T: IntoIterator, Acc>(
+ fold: &mut impl FnMut(Acc, T::IntoIter) -> Acc,
+ ) -> impl FnMut(Acc, T) -> Acc + '_ {
+ move |acc, iter| fold(acc, iter.into_iter())
+ }
+
+ if let Some(iter) = self.backiter {
+ acc = fold(acc, iter);
+ }
+
+ acc = self.iter.rfold(acc, flatten(&mut fold));
+
+ if let Some(iter) = self.frontiter {
+ acc = fold(acc, iter);
+ }
+
+ acc
+ }
+
+ /// Folds over the inner iterators in reverse order as long as the given function returns
+ /// successfully, always storing the most recent inner iterator in `self.backiter`.
+ ///
+ /// Folds over the inner iterators, not over their elements. Is used by the `try_rfold` and
+ /// `advance_back_by` methods.
+ #[inline]
+ fn iter_try_rfold<Acc, Fold, R>(&mut self, mut acc: Acc, mut fold: Fold) -> R
+ where
+ Fold: FnMut(Acc, &mut U) -> R,
+ R: Try<Output = Acc>,
+ {
+ #[inline]
+ fn flatten<'a, T: IntoIterator, Acc, R: Try>(
+ backiter: &'a mut Option<T::IntoIter>,
+ fold: &'a mut impl FnMut(Acc, &mut T::IntoIter) -> R,
+ ) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, iter| fold(acc, backiter.insert(iter.into_iter()))
+ }
+
+ if let Some(iter) = &mut self.backiter {
+ acc = fold(acc, iter)?;
+ }
+ self.backiter = None;
+
+ acc = self.iter.try_rfold(acc, flatten(&mut self.backiter, &mut fold))?;
+ self.backiter = None;
+
+ if let Some(iter) = &mut self.frontiter {
+ acc = fold(acc, iter)?;
+ }
+ self.frontiter = None;
+
+ try { acc }
+ }
+}
+
impl<I, U> Iterator for FlattenCompat<I, U>
where
I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
@@ -323,99 +501,74 @@ where
}
#[inline]
- fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
#[inline]
- fn flatten<'a, T: IntoIterator, Acc, R: Try<Output = Acc>>(
- frontiter: &'a mut Option<T::IntoIter>,
- fold: &'a mut impl FnMut(Acc, T::Item) -> R,
- ) -> impl FnMut(Acc, T) -> R + 'a {
- move |acc, x| {
- let mut mid = x.into_iter();
- let r = mid.try_fold(acc, &mut *fold);
- *frontiter = Some(mid);
- r
- }
- }
-
- if let Some(ref mut front) = self.frontiter {
- init = front.try_fold(init, &mut fold)?;
+ fn flatten<U: Iterator, Acc, R: Try<Output = Acc>>(
+ mut fold: impl FnMut(Acc, U::Item) -> R,
+ ) -> impl FnMut(Acc, &mut U) -> R {
+ move |acc, iter| iter.try_fold(acc, &mut fold)
}
- self.frontiter = None;
- init = self.iter.try_fold(init, flatten(&mut self.frontiter, &mut fold))?;
- self.frontiter = None;
-
- if let Some(ref mut back) = self.backiter {
- init = back.try_fold(init, &mut fold)?;
- }
- self.backiter = None;
-
- try { init }
+ self.iter_try_fold(init, flatten(fold))
}
#[inline]
- fn fold<Acc, Fold>(self, mut init: Acc, mut fold: Fold) -> Acc
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
#[inline]
- fn flatten<T: IntoIterator, Acc>(
- fold: &mut impl FnMut(Acc, T::Item) -> Acc,
- ) -> impl FnMut(Acc, T) -> Acc + '_ {
- move |acc, x| x.into_iter().fold(acc, &mut *fold)
- }
-
- if let Some(front) = self.frontiter {
- init = front.fold(init, &mut fold);
- }
-
- init = self.iter.fold(init, flatten(&mut fold));
-
- if let Some(back) = self.backiter {
- init = back.fold(init, &mut fold);
+ fn flatten<U: Iterator, Acc>(
+ mut fold: impl FnMut(Acc, U::Item) -> Acc,
+ ) -> impl FnMut(Acc, U) -> Acc {
+ move |acc, iter| iter.fold(acc, &mut fold)
}
- init
+ self.iter_fold(init, flatten(fold))
}
#[inline]
#[rustc_inherit_overflow_checks]
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let mut rem = n;
- loop {
- if let Some(ref mut front) = self.frontiter {
- match front.advance_by(rem) {
- ret @ Ok(_) => return ret,
- Err(advanced) => rem -= advanced,
- }
- }
- self.frontiter = match self.iter.next() {
- Some(iterable) => Some(iterable.into_iter()),
- _ => break,
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance<U: Iterator>(n: usize, iter: &mut U) -> ControlFlow<(), usize> {
+ match iter.advance_by(n) {
+ Ok(()) => ControlFlow::BREAK,
+ Err(advanced) => ControlFlow::Continue(n - advanced),
}
}
- self.frontiter = None;
-
- if let Some(ref mut back) = self.backiter {
- match back.advance_by(rem) {
- ret @ Ok(_) => return ret,
- Err(advanced) => rem -= advanced,
- }
+ match self.iter_try_fold(n, advance) {
+ ControlFlow::Continue(remaining) if remaining > 0 => Err(n - remaining),
+ _ => Ok(()),
}
+ }
- if rem > 0 {
- return Err(n - rem);
+ #[inline]
+ fn count(self) -> usize {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn count<U: Iterator>(acc: usize, iter: U) -> usize {
+ acc + iter.count()
}
- self.backiter = None;
+ self.iter_fold(0, count)
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ #[inline]
+ fn last<U: Iterator>(last: Option<U::Item>, iter: U) -> Option<U::Item> {
+ iter.last().or(last)
+ }
- Ok(())
+ self.iter_fold(None, last)
}
}
@@ -438,105 +591,53 @@ where
}
#[inline]
- fn try_rfold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
#[inline]
- fn flatten<'a, T: IntoIterator, Acc, R: Try<Output = Acc>>(
- backiter: &'a mut Option<T::IntoIter>,
- fold: &'a mut impl FnMut(Acc, T::Item) -> R,
- ) -> impl FnMut(Acc, T) -> R + 'a
- where
- T::IntoIter: DoubleEndedIterator,
- {
- move |acc, x| {
- let mut mid = x.into_iter();
- let r = mid.try_rfold(acc, &mut *fold);
- *backiter = Some(mid);
- r
- }
+ fn flatten<U: DoubleEndedIterator, Acc, R: Try<Output = Acc>>(
+ mut fold: impl FnMut(Acc, U::Item) -> R,
+ ) -> impl FnMut(Acc, &mut U) -> R {
+ move |acc, iter| iter.try_rfold(acc, &mut fold)
}
- if let Some(ref mut back) = self.backiter {
- init = back.try_rfold(init, &mut fold)?;
- }
- self.backiter = None;
-
- init = self.iter.try_rfold(init, flatten(&mut self.backiter, &mut fold))?;
- self.backiter = None;
-
- if let Some(ref mut front) = self.frontiter {
- init = front.try_rfold(init, &mut fold)?;
- }
- self.frontiter = None;
-
- try { init }
+ self.iter_try_rfold(init, flatten(fold))
}
#[inline]
- fn rfold<Acc, Fold>(self, mut init: Acc, mut fold: Fold) -> Acc
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
#[inline]
- fn flatten<T: IntoIterator, Acc>(
- fold: &mut impl FnMut(Acc, T::Item) -> Acc,
- ) -> impl FnMut(Acc, T) -> Acc + '_
- where
- T::IntoIter: DoubleEndedIterator,
- {
- move |acc, x| x.into_iter().rfold(acc, &mut *fold)
- }
-
- if let Some(back) = self.backiter {
- init = back.rfold(init, &mut fold);
+ fn flatten<U: DoubleEndedIterator, Acc>(
+ mut fold: impl FnMut(Acc, U::Item) -> Acc,
+ ) -> impl FnMut(Acc, U) -> Acc {
+ move |acc, iter| iter.rfold(acc, &mut fold)
}
- init = self.iter.rfold(init, flatten(&mut fold));
-
- if let Some(front) = self.frontiter {
- init = front.rfold(init, &mut fold);
- }
-
- init
+ self.iter_rfold(init, flatten(fold))
}
#[inline]
#[rustc_inherit_overflow_checks]
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let mut rem = n;
- loop {
- if let Some(ref mut back) = self.backiter {
- match back.advance_back_by(rem) {
- ret @ Ok(_) => return ret,
- Err(advanced) => rem -= advanced,
- }
- }
- match self.iter.next_back() {
- Some(iterable) => self.backiter = Some(iterable.into_iter()),
- _ => break,
- }
- }
-
- self.backiter = None;
-
- if let Some(ref mut front) = self.frontiter {
- match front.advance_back_by(rem) {
- ret @ Ok(_) => return ret,
- Err(advanced) => rem -= advanced,
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance<U: DoubleEndedIterator>(n: usize, iter: &mut U) -> ControlFlow<(), usize> {
+ match iter.advance_back_by(n) {
+ Ok(()) => ControlFlow::BREAK,
+ Err(advanced) => ControlFlow::Continue(n - advanced),
}
}
- if rem > 0 {
- return Err(n - rem);
+ match self.iter_try_rfold(n, advance) {
+ ControlFlow::Continue(remaining) if remaining > 0 => Err(n - remaining),
+ _ => Ok(()),
}
-
- self.frontiter = None;
-
- Ok(())
}
}
diff --git a/library/core/src/iter/adapters/map_while.rs b/library/core/src/iter/adapters/map_while.rs
index 1e8d6bf3e..fbdeca4d4 100644
--- a/library/core/src/iter/adapters/map_while.rs
+++ b/library/core/src/iter/adapters/map_while.rs
@@ -64,19 +64,7 @@ where
.into_try()
}
- #[inline]
- fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
- where
- Self: Sized,
- Fold: FnMut(Acc, Self::Item) -> Acc,
- {
- #[inline]
- fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
- move |acc, x| Ok(f(acc, x))
- }
-
- self.try_fold(init, ok(fold)).unwrap()
- }
+ impl_fold_via_try_fold! { fold -> try_fold }
}
#[unstable(issue = "none", feature = "inplace_iteration")]
diff --git a/library/core/src/iter/adapters/mod.rs b/library/core/src/iter/adapters/mod.rs
index 916a26e24..8cc2b7cec 100644
--- a/library/core/src/iter/adapters/mod.rs
+++ b/library/core/src/iter/adapters/mod.rs
@@ -1,6 +1,7 @@
use crate::iter::{InPlaceIterable, Iterator};
-use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, NeverShortCircuit, Residual, Try};
+use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try};
+mod array_chunks;
mod by_ref_sized;
mod chain;
mod cloned;
@@ -32,6 +33,9 @@ pub use self::{
scan::Scan, skip::Skip, skip_while::SkipWhile, take::Take, take_while::TakeWhile, zip::Zip,
};
+#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+pub use self::array_chunks::ArrayChunks;
+
#[unstable(feature = "std_internals", issue = "none")]
pub use self::by_ref_sized::ByRefSized;
@@ -199,13 +203,7 @@ where
.into_try()
}
- fn fold<B, F>(mut self, init: B, fold: F) -> B
- where
- Self: Sized,
- F: FnMut(B, Self::Item) -> B,
- {
- self.try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0
- }
+ impl_fold_via_try_fold! { fold -> try_fold }
}
#[unstable(issue = "none", feature = "inplace_iteration")]
diff --git a/library/core/src/iter/adapters/scan.rs b/library/core/src/iter/adapters/scan.rs
index 80bfd2231..62470512c 100644
--- a/library/core/src/iter/adapters/scan.rs
+++ b/library/core/src/iter/adapters/scan.rs
@@ -74,19 +74,7 @@ where
self.iter.try_fold(init, scan(state, f, fold)).into_try()
}
- #[inline]
- fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
- where
- Self: Sized,
- Fold: FnMut(Acc, Self::Item) -> Acc,
- {
- #[inline]
- fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
- move |acc, x| Ok(f(acc, x))
- }
-
- self.try_fold(init, ok(fold)).unwrap()
- }
+ impl_fold_via_try_fold! { fold -> try_fold }
}
#[unstable(issue = "none", feature = "inplace_iteration")]
diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs
index 2c283100f..c6334880d 100644
--- a/library/core/src/iter/adapters/skip.rs
+++ b/library/core/src/iter/adapters/skip.rs
@@ -33,21 +33,32 @@ where
#[inline]
fn next(&mut self) -> Option<I::Item> {
if unlikely(self.n > 0) {
- self.iter.nth(crate::mem::take(&mut self.n) - 1)?;
+ self.iter.nth(crate::mem::take(&mut self.n))
+ } else {
+ self.iter.next()
}
- self.iter.next()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<I::Item> {
- // Can't just add n + self.n due to overflow.
if self.n > 0 {
- let to_skip = self.n;
- self.n = 0;
- // nth(n) skips n+1
- self.iter.nth(to_skip - 1)?;
+ let skip: usize = crate::mem::take(&mut self.n);
+ // Checked add to handle overflow case.
+ let n = match skip.checked_add(n) {
+ Some(nth) => nth,
+ None => {
+ // In case of overflow, load skip value, before loading `n`.
+ // Because the amount of elements to iterate is beyond `usize::MAX`, this
+ // is split into two `nth` calls where the `skip` `nth` call is discarded.
+ self.iter.nth(skip - 1)?;
+ n
+ }
+ };
+ // Load nth element including skip.
+ self.iter.nth(n)
+ } else {
+ self.iter.nth(n)
}
- self.iter.nth(n)
}
#[inline]
@@ -195,17 +206,7 @@ where
if n == 0 { try { init } } else { self.iter.try_rfold(init, check(n, fold)).into_try() }
}
- fn rfold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
- where
- Fold: FnMut(Acc, Self::Item) -> Acc,
- {
- #[inline]
- fn ok<Acc, T>(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, T) -> Result<Acc, !> {
- move |acc, x| Ok(f(acc, x))
- }
-
- self.try_rfold(init, ok(fold)).unwrap()
- }
+ impl_fold_via_try_fold! { rfold -> try_rfold }
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs
index 2962e0104..58a0b9d7b 100644
--- a/library/core/src/iter/adapters/take.rs
+++ b/library/core/src/iter/adapters/take.rs
@@ -98,19 +98,7 @@ where
}
}
- #[inline]
- fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
- where
- Self: Sized,
- Fold: FnMut(Acc, Self::Item) -> Acc,
- {
- #[inline]
- fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
- move |acc, x| Ok(f(acc, x))
- }
-
- self.try_fold(init, ok(fold)).unwrap()
- }
+ impl_fold_via_try_fold! { fold -> try_fold }
#[inline]
#[rustc_inherit_overflow_checks]
diff --git a/library/core/src/iter/adapters/take_while.rs b/library/core/src/iter/adapters/take_while.rs
index ded216da9..ec66dc3ae 100644
--- a/library/core/src/iter/adapters/take_while.rs
+++ b/library/core/src/iter/adapters/take_while.rs
@@ -94,19 +94,7 @@ where
}
}
- #[inline]
- fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
- where
- Self: Sized,
- Fold: FnMut(Acc, Self::Item) -> Acc,
- {
- #[inline]
- fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
- move |acc, x| Ok(f(acc, x))
- }
-
- self.try_fold(init, ok(fold)).unwrap()
- }
+ impl_fold_via_try_fold! { fold -> try_fold }
}
#[stable(feature = "fused", since = "1.26.0")]
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
index d5c6aed5b..ef0f39782 100644
--- a/library/core/src/iter/mod.rs
+++ b/library/core/src/iter/mod.rs
@@ -352,6 +352,29 @@
#![stable(feature = "rust1", since = "1.0.0")]
+// This needs to be up here in order to be usable in the child modules
+macro_rules! impl_fold_via_try_fold {
+ (fold -> try_fold) => {
+ impl_fold_via_try_fold! { @internal fold -> try_fold }
+ };
+ (rfold -> try_rfold) => {
+ impl_fold_via_try_fold! { @internal rfold -> try_rfold }
+ };
+ (@internal $fold:ident -> $try_fold:ident) => {
+ #[inline]
+ fn $fold<AAA, FFF>(mut self, init: AAA, mut fold: FFF) -> AAA
+ where
+ FFF: FnMut(AAA, Self::Item) -> AAA,
+ {
+ use crate::const_closure::ConstFnMutClosure;
+ use crate::ops::NeverShortCircuit;
+
+ let fold = ConstFnMutClosure::new(&mut fold, NeverShortCircuit::wrap_mut_2_imp);
+ self.$try_fold(init, fold).0
+ }
+ };
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::traits::Iterator;
@@ -398,6 +421,8 @@ pub use self::traits::{
#[stable(feature = "iter_zip", since = "1.59.0")]
pub use self::adapters::zip;
+#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+pub use self::adapters::ArrayChunks;
#[unstable(feature = "std_internals", issue = "none")]
pub use self::adapters::ByRefSized;
#[stable(feature = "iter_cloned", since = "1.1.0")]
diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs
index f7aeee8c9..ac7b389b1 100644
--- a/library/core/src/iter/range.rs
+++ b/library/core/src/iter/range.rs
@@ -1150,19 +1150,7 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> {
self.spec_try_fold(init, f)
}
- #[inline]
- fn fold<B, F>(mut self, init: B, f: F) -> B
- where
- Self: Sized,
- F: FnMut(B, Self::Item) -> B,
- {
- #[inline]
- fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
- move |acc, x| Ok(f(acc, x))
- }
-
- self.try_fold(init, ok(f)).unwrap()
- }
+ impl_fold_via_try_fold! { fold -> try_fold }
#[inline]
fn last(mut self) -> Option<A> {
@@ -1230,19 +1218,7 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
self.spec_try_rfold(init, f)
}
- #[inline]
- fn rfold<B, F>(mut self, init: B, f: F) -> B
- where
- Self: Sized,
- F: FnMut(B, Self::Item) -> B,
- {
- #[inline]
- fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
- move |acc, x| Ok(f(acc, x))
- }
-
- self.try_rfold(init, ok(f)).unwrap()
- }
+ impl_fold_via_try_fold! { rfold -> try_rfold }
}
// Safety: See above implementation for `ops::Range<A>`
diff --git a/library/core/src/iter/traits/collect.rs b/library/core/src/iter/traits/collect.rs
index 12ca508be..e099700e3 100644
--- a/library/core/src/iter/traits/collect.rs
+++ b/library/core/src/iter/traits/collect.rs
@@ -228,6 +228,7 @@ pub trait FromIterator<A>: Sized {
#[rustc_diagnostic_item = "IntoIterator"]
#[rustc_skip_array_during_method_dispatch]
#[stable(feature = "rust1", since = "1.0.0")]
+#[const_trait]
pub trait IntoIterator {
/// The type of the elements being iterated over.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -263,7 +264,7 @@ pub trait IntoIterator {
#[rustc_const_unstable(feature = "const_intoiterator_identity", issue = "90603")]
#[stable(feature = "rust1", since = "1.0.0")]
-impl<I: ~const Iterator> const IntoIterator for I {
+impl<I: Iterator> const IntoIterator for I {
type Item = I::Item;
type IntoIter = I;
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 275412b57..789a87968 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -5,7 +5,7 @@ use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try};
use super::super::try_process;
use super::super::ByRefSized;
use super::super::TrustedRandomAccessNoCoerce;
-use super::super::{Chain, Cloned, Copied, Cycle, Enumerate, Filter, FilterMap, Fuse};
+use super::super::{ArrayChunks, Chain, Cloned, Copied, Cycle, Enumerate, Filter, FilterMap, Fuse};
use super::super::{FlatMap, Flatten};
use super::super::{FromIterator, Intersperse, IntersperseWith, Product, Sum, Zip};
use super::super::{
@@ -692,7 +692,7 @@ pub trait Iterator {
/// assert_eq!(it.next(), Some(NotClone(99))); // The separator.
/// assert_eq!(it.next(), Some(NotClone(1))); // The next element from `v`.
/// assert_eq!(it.next(), Some(NotClone(99))); // The separator.
- /// assert_eq!(it.next(), Some(NotClone(2))); // The last element from from `v`.
+ /// assert_eq!(it.next(), Some(NotClone(2))); // The last element from `v`.
/// assert_eq!(it.next(), None); // The iterator is finished.
/// ```
///
@@ -2431,22 +2431,13 @@ pub trait Iterator {
///
/// # Example
///
- /// Find the maximum value:
- ///
/// ```
- /// fn find_max<I>(iter: I) -> Option<I::Item>
- /// where I: Iterator,
- /// I::Item: Ord,
- /// {
- /// iter.reduce(|accum, item| {
- /// if accum >= item { accum } else { item }
- /// })
- /// }
- /// let a = [10, 20, 5, -23, 0];
- /// let b: [u32; 0] = [];
+ /// let reduced: i32 = (1..10).reduce(|acc, e| acc + e).unwrap();
+ /// assert_eq!(reduced, 45);
///
- /// assert_eq!(find_max(a.iter()), Some(&20));
- /// assert_eq!(find_max(b.iter()), None);
+ /// // Which is equivalent to doing it with `fold`:
+ /// let folded: i32 = (1..10).fold(0, |acc, e| acc + e);
+ /// assert_eq!(reduced, folded);
/// ```
#[inline]
#[stable(feature = "iterator_fold_self", since = "1.51.0")]
@@ -2906,14 +2897,14 @@ pub trait Iterator {
/// Stopping at the first `true`:
///
/// ```
- /// let a = [1, 2, 3];
+ /// let a = [-1, 2, 3, 4];
///
/// let mut iter = a.iter();
///
- /// assert_eq!(iter.rposition(|&x| x == 2), Some(1));
+ /// assert_eq!(iter.rposition(|&x| x >= 2), Some(3));
///
/// // we can still use `iter`, as there are more elements.
- /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&-1));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -3316,6 +3307,49 @@ pub trait Iterator {
Cycle::new(self)
}
+ /// Returns an iterator over `N` elements of the iterator at a time.
+ ///
+ /// The chunks do not overlap. If `N` does not divide the length of the
+ /// iterator, then the last up to `N-1` elements will be omitted and can be
+ /// retrieved from the [`.into_remainder()`][ArrayChunks::into_remainder]
+ /// function of the iterator.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_array_chunks)]
+ ///
+ /// let mut iter = "lorem".chars().array_chunks();
+ /// assert_eq!(iter.next(), Some(['l', 'o']));
+ /// assert_eq!(iter.next(), Some(['r', 'e']));
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.into_remainder().unwrap().as_slice(), &['m']);
+ /// ```
+ ///
+ /// ```
+ /// #![feature(iter_array_chunks)]
+ ///
+ /// let data = [1, 1, 2, -2, 6, 0, 3, 1];
+ /// // ^-----^ ^------^
+ /// for [x, y, z] in data.iter().array_chunks() {
+ /// assert_eq!(x + y + z, 4);
+ /// }
+ /// ```
+ #[track_caller]
+ #[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
+ fn array_chunks<const N: usize>(self) -> ArrayChunks<Self, N>
+ where
+ Self: Sized,
+ {
+ ArrayChunks::new(self)
+ }
+
/// Sums the elements of an iterator.
///
/// Takes each element, adds them together, and returns the result.
@@ -3418,36 +3452,27 @@ pub trait Iterator {
/// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (2 * x).cmp(&y)), Ordering::Greater);
/// ```
#[unstable(feature = "iter_order_by", issue = "64295")]
- fn cmp_by<I, F>(mut self, other: I, mut cmp: F) -> Ordering
+ fn cmp_by<I, F>(self, other: I, cmp: F) -> Ordering
where
Self: Sized,
I: IntoIterator,
F: FnMut(Self::Item, I::Item) -> Ordering,
{
- let mut other = other.into_iter();
-
- loop {
- let x = match self.next() {
- None => {
- if other.next().is_none() {
- return Ordering::Equal;
- } else {
- return Ordering::Less;
- }
- }
- Some(val) => val,
- };
-
- let y = match other.next() {
- None => return Ordering::Greater,
- Some(val) => val,
- };
-
- match cmp(x, y) {
- Ordering::Equal => (),
- non_eq => return non_eq,
+ #[inline]
+ fn compare<X, Y, F>(mut cmp: F) -> impl FnMut(X, Y) -> ControlFlow<Ordering>
+ where
+ F: FnMut(X, Y) -> Ordering,
+ {
+ move |x, y| match cmp(x, y) {
+ Ordering::Equal => ControlFlow::CONTINUE,
+ non_eq => ControlFlow::Break(non_eq),
}
}
+
+ match iter_compare(self, other.into_iter(), compare(cmp)) {
+ ControlFlow::Continue(ord) => ord,
+ ControlFlow::Break(ord) => ord,
+ }
}
/// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
@@ -3503,36 +3528,27 @@ pub trait Iterator {
/// );
/// ```
#[unstable(feature = "iter_order_by", issue = "64295")]
- fn partial_cmp_by<I, F>(mut self, other: I, mut partial_cmp: F) -> Option<Ordering>
+ fn partial_cmp_by<I, F>(self, other: I, partial_cmp: F) -> Option<Ordering>
where
Self: Sized,
I: IntoIterator,
F: FnMut(Self::Item, I::Item) -> Option<Ordering>,
{
- let mut other = other.into_iter();
-
- loop {
- let x = match self.next() {
- None => {
- if other.next().is_none() {
- return Some(Ordering::Equal);
- } else {
- return Some(Ordering::Less);
- }
- }
- Some(val) => val,
- };
-
- let y = match other.next() {
- None => return Some(Ordering::Greater),
- Some(val) => val,
- };
-
- match partial_cmp(x, y) {
- Some(Ordering::Equal) => (),
- non_eq => return non_eq,
+ #[inline]
+ fn compare<X, Y, F>(mut partial_cmp: F) -> impl FnMut(X, Y) -> ControlFlow<Option<Ordering>>
+ where
+ F: FnMut(X, Y) -> Option<Ordering>,
+ {
+ move |x, y| match partial_cmp(x, y) {
+ Some(Ordering::Equal) => ControlFlow::CONTINUE,
+ non_eq => ControlFlow::Break(non_eq),
}
}
+
+ match iter_compare(self, other.into_iter(), compare(partial_cmp)) {
+ ControlFlow::Continue(ord) => Some(ord),
+ ControlFlow::Break(ord) => ord,
+ }
}
/// Determines if the elements of this [`Iterator`] are equal to those of
@@ -3570,29 +3586,26 @@ pub trait Iterator {
/// assert!(xs.iter().eq_by(&ys, |&x, &y| x * x == y));
/// ```
#[unstable(feature = "iter_order_by", issue = "64295")]
- fn eq_by<I, F>(mut self, other: I, mut eq: F) -> bool
+ fn eq_by<I, F>(self, other: I, eq: F) -> bool
where
Self: Sized,
I: IntoIterator,
F: FnMut(Self::Item, I::Item) -> bool,
{
- let mut other = other.into_iter();
-
- loop {
- let x = match self.next() {
- None => return other.next().is_none(),
- Some(val) => val,
- };
-
- let y = match other.next() {
- None => return false,
- Some(val) => val,
- };
-
- if !eq(x, y) {
- return false;
+ #[inline]
+ fn compare<X, Y, F>(mut eq: F) -> impl FnMut(X, Y) -> ControlFlow<()>
+ where
+ F: FnMut(X, Y) -> bool,
+ {
+ move |x, y| {
+ if eq(x, y) { ControlFlow::CONTINUE } else { ControlFlow::BREAK }
}
}
+
+ match iter_compare(self, other.into_iter(), compare(eq)) {
+ ControlFlow::Continue(ord) => ord == Ordering::Equal,
+ ControlFlow::Break(()) => false,
+ }
}
/// Determines if the elements of this [`Iterator`] are unequal to those of
@@ -3817,6 +3830,46 @@ pub trait Iterator {
}
}
+/// Compares two iterators element-wise using the given function.
+///
+/// If `ControlFlow::CONTINUE` is returned from the function, the comparison moves on to the next
+/// elements of both iterators. Returning `ControlFlow::Break(x)` short-circuits the iteration and
+/// returns `ControlFlow::Break(x)`. If one of the iterators runs out of elements,
+/// `ControlFlow::Continue(ord)` is returned where `ord` is the result of comparing the lengths of
+/// the iterators.
+///
+/// Isolates the logic shared by ['cmp_by'](Iterator::cmp_by),
+/// ['partial_cmp_by'](Iterator::partial_cmp_by), and ['eq_by'](Iterator::eq_by).
+#[inline]
+fn iter_compare<A, B, F, T>(mut a: A, mut b: B, f: F) -> ControlFlow<T, Ordering>
+where
+ A: Iterator,
+ B: Iterator,
+ F: FnMut(A::Item, B::Item) -> ControlFlow<T>,
+{
+ #[inline]
+ fn compare<'a, B, X, T>(
+ b: &'a mut B,
+ mut f: impl FnMut(X, B::Item) -> ControlFlow<T> + 'a,
+ ) -> impl FnMut(X) -> ControlFlow<ControlFlow<T, Ordering>> + 'a
+ where
+ B: Iterator,
+ {
+ move |x| match b.next() {
+ None => ControlFlow::Break(ControlFlow::Continue(Ordering::Greater)),
+ Some(y) => f(x, y).map_break(ControlFlow::Break),
+ }
+ }
+
+ match a.try_for_each(compare(&mut b, f)) {
+ ControlFlow::Continue(()) => ControlFlow::Continue(match b.next() {
+ None => Ordering::Equal,
+ Some(_) => Ordering::Less,
+ }),
+ ControlFlow::Break(x) => x,
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for &mut I {
type Item = I::Item;
diff --git a/library/core/src/lazy.rs b/library/core/src/lazy.rs
deleted file mode 100644
index f8c06c3f9..000000000
--- a/library/core/src/lazy.rs
+++ /dev/null
@@ -1 +0,0 @@
-//! Lazy values and one-time initialization of static data.
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 24742bb49..659409557 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -93,6 +93,7 @@
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
+#![allow(incomplete_features)]
//
// Library features:
#![feature(const_align_offset)]
@@ -113,6 +114,7 @@
#![feature(const_fmt_arguments_new)]
#![feature(const_heap)]
#![feature(const_convert)]
+#![feature(const_index_range_slice_index)]
#![feature(const_inherent_unchecked_arith)]
#![feature(const_int_unchecked_arith)]
#![feature(const_intrinsic_forget)]
@@ -130,25 +132,30 @@
#![feature(const_replace)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_is_null)]
-#![feature(const_ptr_offset_from)]
#![feature(const_ptr_read)]
#![feature(const_ptr_write)]
#![feature(const_raw_ptr_comparison)]
#![feature(const_size_of_val)]
#![feature(const_slice_from_raw_parts_mut)]
#![feature(const_slice_ptr_len)]
+#![feature(const_slice_split_at_mut)]
#![feature(const_str_from_utf8_unchecked_mut)]
#![feature(const_swap)]
#![feature(const_trait_impl)]
+#![feature(const_try)]
#![feature(const_type_id)]
#![feature(const_type_name)]
#![feature(const_default_impls)]
+#![feature(const_unicode_case_lookup)]
#![feature(const_unsafecell_get_mut)]
+#![feature(const_waker)]
#![feature(core_panic)]
#![feature(duration_consts_float)]
#![feature(maybe_uninit_uninit_array)]
+#![feature(ptr_alignment_type)]
#![feature(ptr_metadata)]
#![feature(slice_ptr_get)]
+#![feature(slice_split_at_unchecked)]
#![feature(str_internals)]
#![feature(utf16_extra)]
#![feature(utf16_extra_const)]
@@ -157,13 +164,17 @@
#![feature(const_slice_from_ref)]
#![feature(const_slice_index)]
#![feature(const_is_char_boundary)]
+#![feature(const_cstr_methods)]
+#![feature(is_ascii_octdigit)]
//
// Language features:
#![feature(abi_unadjusted)]
+#![feature(adt_const_params)]
#![feature(allow_internal_unsafe)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
#![feature(auto_traits)]
+#![feature(c_unwind)]
#![feature(cfg_sanitize)]
#![feature(cfg_target_has_atomic)]
#![feature(cfg_target_has_atomic_equal_alignment)]
@@ -181,13 +192,13 @@
#![feature(extern_types)]
#![feature(fundamental)]
#![feature(if_let_guard)]
+#![feature(inline_const)]
#![feature(intra_doc_pointers)]
#![feature(intrinsics)]
#![feature(lang_items)]
#![feature(link_llvm_intrinsics)]
#![feature(macro_metavar_expr)]
#![feature(min_specialization)]
-#![feature(mixed_integer_ops)]
#![feature(must_not_suspend)]
#![feature(negative_impls)]
#![feature(never_type)]
@@ -201,12 +212,14 @@
#![feature(simd_ffi)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
+#![feature(target_feature_11)]
#![feature(trait_alias)]
#![feature(transparent_unions)]
#![feature(try_blocks)]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
#![feature(asm_const)]
+#![feature(const_transmute_copy)]
//
// Target features:
#![feature(arm_target_feature)]
@@ -216,6 +229,7 @@
#![feature(hexagon_target_feature)]
#![feature(mips_target_feature)]
#![feature(powerpc_target_feature)]
+#![feature(riscv_target_feature)]
#![feature(rtm_target_feature)]
#![feature(sse4a_target_feature)]
#![feature(tbm_target_feature)]
@@ -302,6 +316,7 @@ pub mod clone;
pub mod cmp;
pub mod convert;
pub mod default;
+pub mod error;
pub mod marker;
pub mod ops;
@@ -317,8 +332,6 @@ pub mod cell;
pub mod char;
pub mod ffi;
pub mod iter;
-#[unstable(feature = "once_cell", issue = "74465")]
-pub mod lazy;
pub mod option;
pub mod panic;
pub mod panicking;
@@ -347,6 +360,8 @@ mod bool;
mod tuple;
mod unit;
+mod const_closure;
+
#[stable(feature = "core_primitive", since = "1.43.0")]
pub mod primitive;
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index 3a115a8b8..fd96e1ff7 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -350,10 +350,12 @@ macro_rules! matches {
/// Unwraps a result or propagates its error.
///
-/// The `?` operator was added to replace `try!` and should be used instead.
-/// Furthermore, `try` is a reserved word in Rust 2018, so if you must use
-/// it, you will need to use the [raw-identifier syntax][ris]: `r#try`.
+/// The [`?` operator][propagating-errors] was added to replace `try!`
+/// and should be used instead. Furthermore, `try` is a reserved word
+/// in Rust 2018, so if you must use it, you will need to use the
+/// [raw-identifier syntax][ris]: `r#try`.
///
+/// [propagating-errors]: https://doc.rust-lang.org/book/ch09-02-recoverable-errors-with-result.html#a-shortcut-for-propagating-errors-the--operator
/// [ris]: https://doc.rust-lang.org/nightly/rust-by-example/compatibility/raw_identifiers.html
///
/// `try!` matches the given [`Result`]. In case of the `Ok` variant, the
@@ -457,11 +459,12 @@ macro_rules! r#try {
///
/// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects
/// implementing either, as objects do not typically implement both. However, the module must
-/// import the traits qualified so their names do not conflict:
+/// avoid conflict between the trait names, such as by importing them as `_` or otherwise renaming
+/// them:
///
/// ```
-/// use std::fmt::Write as FmtWrite;
-/// use std::io::Write as IoWrite;
+/// use std::fmt::Write as _;
+/// use std::io::Write as _;
///
/// fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let mut s = String::new();
@@ -474,6 +477,23 @@ macro_rules! r#try {
/// }
/// ```
///
+/// If you also need the trait names themselves, such as to implement one or both on your types,
+/// import the containing module and then name them with a prefix:
+///
+/// ```
+/// # #![allow(unused_imports)]
+/// use std::fmt::{self, Write as _};
+/// use std::io::{self, Write as _};
+///
+/// struct Example;
+///
+/// impl fmt::Write for Example {
+/// fn write_str(&mut self, _s: &str) -> core::fmt::Result {
+/// unimplemented!();
+/// }
+/// }
+/// ```
+///
/// Note: This macro can be used in `no_std` setups as well.
/// In a `no_std` setup you are responsible for the implementation details of the components.
///
@@ -526,25 +546,6 @@ macro_rules! write {
/// Ok(())
/// }
/// ```
-///
-/// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects
-/// implementing either, as objects do not typically implement both. However, the module must
-/// import the traits qualified so their names do not conflict:
-///
-/// ```
-/// use std::fmt::Write as FmtWrite;
-/// use std::io::Write as IoWrite;
-///
-/// fn main() -> Result<(), Box<dyn std::error::Error>> {
-/// let mut s = String::new();
-/// let mut v = Vec::new();
-///
-/// writeln!(&mut s, "{} {}", "abc", 123)?; // uses fmt::Write::write_fmt
-/// writeln!(&mut v, "s = {:?}", s)?; // uses io::Write::write_fmt
-/// assert_eq!(v, b"s = \"abc 123\\n\"\n");
-/// Ok(())
-/// }
-/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "writeln_macro")]
@@ -1015,7 +1016,7 @@ pub(crate) mod builtin {
/// Concatenates literals into a byte slice.
///
/// This macro takes any number of comma-separated literals, and concatenates them all into
- /// one, yielding an expression of type `&[u8, _]`, which represents all of the literals
+ /// one, yielding an expression of type `&[u8; _]`, which represents all of the literals
/// concatenated left-to-right. The literals passed can be any combination of:
///
/// - byte literals (`b'r'`)
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index 2c5789795..ae4ebf444 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -44,6 +44,12 @@ impl<T: ?Sized> !Send for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> !Send for *mut T {}
+// Most instances arise automatically, but this instance is needed to link up `T: Sync` with
+// `&T: Send` (and it also removes the unsound default instance `T Send` -> `&T: Send` that would
+// otherwise exist).
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync + ?Sized> Send for &T {}
+
/// Types with a constant size known at compile time.
///
/// All type parameters have an implicit bound of `Sized`. The special syntax
@@ -81,6 +87,7 @@ impl<T: ?Sized> !Send for *mut T {}
/// ```
///
/// [trait object]: ../../book/ch17-02-trait-objects.html
+#[doc(alias = "?", alias = "?Sized")]
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "sized"]
#[rustc_on_unimplemented(
@@ -343,7 +350,7 @@ pub trait StructuralEq {
/// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get
/// the error [E0204].
///
-/// [E0204]: ../../error-index.html#E0204
+/// [E0204]: ../../error_codes/E0204.html
///
/// ## When *should* my type be `Copy`?
///
@@ -482,64 +489,6 @@ impl<T: ?Sized> !Sync for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> !Sync for *mut T {}
-macro_rules! impls {
- ($t: ident) => {
- #[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized> Hash for $t<T> {
- #[inline]
- fn hash<H: Hasher>(&self, _: &mut H) {}
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized> cmp::PartialEq for $t<T> {
- fn eq(&self, _other: &$t<T>) -> bool {
- true
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized> cmp::Eq for $t<T> {}
-
- #[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized> cmp::PartialOrd for $t<T> {
- fn partial_cmp(&self, _other: &$t<T>) -> Option<cmp::Ordering> {
- Option::Some(cmp::Ordering::Equal)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized> cmp::Ord for $t<T> {
- fn cmp(&self, _other: &$t<T>) -> cmp::Ordering {
- cmp::Ordering::Equal
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized> Copy for $t<T> {}
-
- #[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized> Clone for $t<T> {
- fn clone(&self) -> Self {
- Self
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
- impl<T: ?Sized> const Default for $t<T> {
- fn default() -> Self {
- Self
- }
- }
-
- #[unstable(feature = "structural_match", issue = "31434")]
- impl<T: ?Sized> StructuralPartialEq for $t<T> {}
-
- #[unstable(feature = "structural_match", issue = "31434")]
- impl<T: ?Sized> StructuralEq for $t<T> {}
- };
-}
-
/// Zero-sized type used to mark things that "act like" they own a `T`.
///
/// Adding a `PhantomData<T>` field to your type tells the compiler that your
@@ -677,15 +626,60 @@ macro_rules! impls {
#[stable(feature = "rust1", since = "1.0.0")]
pub struct PhantomData<T: ?Sized>;
-impls! { PhantomData }
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Hash for PhantomData<T> {
+ #[inline]
+ fn hash<H: Hasher>(&self, _: &mut H) {}
+}
-mod impls {
- #[stable(feature = "rust1", since = "1.0.0")]
- unsafe impl<T: Sync + ?Sized> Send for &T {}
- #[stable(feature = "rust1", since = "1.0.0")]
- unsafe impl<T: Send + ?Sized> Send for &mut T {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> cmp::PartialEq for PhantomData<T> {
+ fn eq(&self, _other: &PhantomData<T>) -> bool {
+ true
+ }
}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> cmp::Eq for PhantomData<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> cmp::PartialOrd for PhantomData<T> {
+ fn partial_cmp(&self, _other: &PhantomData<T>) -> Option<cmp::Ordering> {
+ Option::Some(cmp::Ordering::Equal)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> cmp::Ord for PhantomData<T> {
+ fn cmp(&self, _other: &PhantomData<T>) -> cmp::Ordering {
+ cmp::Ordering::Equal
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Copy for PhantomData<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Clone for PhantomData<T> {
+ fn clone(&self) -> Self {
+ Self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T: ?Sized> const Default for PhantomData<T> {
+ fn default() -> Self {
+ Self
+ }
+}
+
+#[unstable(feature = "structural_match", issue = "31434")]
+impl<T: ?Sized> StructuralPartialEq for PhantomData<T> {}
+
+#[unstable(feature = "structural_match", issue = "31434")]
+impl<T: ?Sized> StructuralEq for PhantomData<T> {}
+
/// Compiler-internal trait used to indicate the type of enum discriminants.
///
/// This trait is automatically implemented for every type and does not add any
@@ -798,8 +792,18 @@ impl<T: ?Sized> Unpin for *mut T {}
#[unstable(feature = "const_trait_impl", issue = "67792")]
#[lang = "destruct"]
#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
+#[const_trait]
pub trait Destruct {}
+/// A marker for tuple types.
+///
+/// The implementation of this trait is built-in and cannot be implemented
+/// for any user type.
+#[unstable(feature = "tuple_trait", issue = "none")]
+#[lang = "tuple_trait"]
+#[rustc_on_unimplemented(message = "`{Self}` is not a tuple")]
+pub trait Tuple {}
+
/// Implementations of `Copy` for primitive types.
///
/// Implementations that cannot be described in Rust
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
index b4ea53608..7757c95de 100644
--- a/library/core/src/mem/maybe_uninit.rs
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -54,9 +54,6 @@ use crate::slice;
/// // The equivalent code with `MaybeUninit<i32>`:
/// let x: i32 = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! ⚠️
/// ```
-/// (Notice that the rules around uninitialized integers are not finalized yet, but
-/// until they are, it is advisable to avoid them.)
-///
/// On top of that, remember that most types have additional invariants beyond merely
/// being considered initialized at the type level. For example, a `1`-initialized [`Vec<T>`]
/// is considered initialized (under the current implementation; this does not constitute
@@ -130,11 +127,8 @@ use crate::slice;
/// MaybeUninit::uninit().assume_init()
/// };
///
-/// // Dropping a `MaybeUninit` does nothing. Thus using raw pointer
-/// // assignment instead of `ptr::write` does not cause the old
-/// // uninitialized value to be dropped. Also if there is a panic during
-/// // this loop, we have a memory leak, but there is no memory safety
-/// // issue.
+/// // Dropping a `MaybeUninit` does nothing, so if there is a panic during this loop,
+/// // we have a memory leak, but there is no memory safety issue.
/// for elem in &mut data[..] {
/// elem.write(vec![42]);
/// }
@@ -152,7 +146,6 @@ use crate::slice;
///
/// ```
/// use std::mem::MaybeUninit;
-/// use std::ptr;
///
/// // Create an uninitialized array of `MaybeUninit`. The `assume_init` is
/// // safe because the type we are claiming to have initialized here is a
@@ -168,7 +161,7 @@ use crate::slice;
///
/// // For each item in the array, drop if we allocated it.
/// for elem in &mut data[0..data_len] {
-/// unsafe { ptr::drop_in_place(elem.as_mut_ptr()); }
+/// unsafe { elem.assume_init_drop(); }
/// }
/// ```
///
@@ -653,7 +646,7 @@ impl<T> MaybeUninit<T> {
/// implements the [`Copy`] trait or not. When using multiple copies of the
/// data (by calling `assume_init_read` multiple times, or first calling
/// `assume_init_read` and then [`assume_init`]), it is your responsibility
- /// to ensure that that data may indeed be duplicated.
+ /// to ensure that data may indeed be duplicated.
///
/// [inv]: #initialization-invariant
/// [`assume_init`]: MaybeUninit::assume_init
@@ -1290,3 +1283,42 @@ impl<T> MaybeUninit<T> {
}
}
}
+
+impl<T, const N: usize> MaybeUninit<[T; N]> {
+ /// Transposes a `MaybeUninit<[T; N]>` into a `[MaybeUninit<T>; N]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_uninit_array_transpose)]
+ /// # use std::mem::MaybeUninit;
+ ///
+ /// let data: [MaybeUninit<u8>; 1000] = MaybeUninit::uninit().transpose();
+ /// ```
+ #[unstable(feature = "maybe_uninit_uninit_array_transpose", issue = "96097")]
+ #[inline]
+ pub const fn transpose(self) -> [MaybeUninit<T>; N] {
+ // SAFETY: T and MaybeUninit<T> have the same layout
+ unsafe { super::transmute_copy(&ManuallyDrop::new(self)) }
+ }
+}
+
+impl<T, const N: usize> [MaybeUninit<T>; N] {
+ /// Transposes a `[MaybeUninit<T>; N]` into a `MaybeUninit<[T; N]>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_uninit_array_transpose)]
+ /// # use std::mem::MaybeUninit;
+ ///
+ /// let data = [MaybeUninit::<u8>::uninit(); 1000];
+ /// let data: MaybeUninit<[u8; 1000]> = data.transpose();
+ /// ```
+ #[unstable(feature = "maybe_uninit_uninit_array_transpose", issue = "96097")]
+ #[inline]
+ pub const fn transpose(self) -> MaybeUninit<[T; N]> {
+ // SAFETY: T and MaybeUninit<T> have the same layout
+ unsafe { super::transmute_copy(&ManuallyDrop::new(self)) }
+ }
+}
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 20b2d5e26..9195da5a4 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -21,11 +21,10 @@ mod maybe_uninit;
#[stable(feature = "maybe_uninit", since = "1.36.0")]
pub use maybe_uninit::MaybeUninit;
-mod valid_align;
-// For now this type is left crate-local. It could potentially make sense to expose
-// it publicly, as it would be a nice parameter type for methods which need to take
-// alignment as a parameter, such as `Layout::padding_needed_for`.
-pub(crate) use valid_align::ValidAlign;
+// FIXME: This is left here for now to avoid complications around pending reverts.
+// Once <https://github.com/rust-lang/rust/issues/101899> is fully resolved,
+// this should be removed and the references in `alloc::Layout` updated.
+pub(crate) use ptr::Alignment as ValidAlign;
mod transmutability;
#[unstable(feature = "transmutability", issue = "99571")]
@@ -665,14 +664,14 @@ pub unsafe fn zeroed<T>() -> T {
/// correctly: it has the same effect as [`MaybeUninit::uninit().assume_init()`][uninit].
/// As the [`assume_init` documentation][assume_init] explains,
/// [the Rust compiler assumes][inv] that values are properly initialized.
-/// As a consequence, calling e.g. `mem::uninitialized::<bool>()` causes immediate
-/// undefined behavior for returning a `bool` that is not definitely either `true`
-/// or `false`. Worse, truly uninitialized memory like what gets returned here
+///
+/// Truly uninitialized memory like what gets returned here
/// is special in that the compiler knows that it does not have a fixed value.
/// This makes it undefined behavior to have uninitialized data in a variable even
/// if that variable has an integer type.
-/// (Notice that the rules around uninitialized integers are not finalized yet, but
-/// until they are, it is advisable to avoid them.)
+///
+/// Therefore, it is immediate undefined behavior to call this function on nearly all types,
+/// including integer types and arrays of integer types, and even if the result is unused.
///
/// [uninit]: MaybeUninit::uninit
/// [assume_init]: MaybeUninit::assume_init
@@ -1009,18 +1008,18 @@ pub fn copy<T: Copy>(x: &T) -> T {
*x
}
-/// Interprets `src` as having type `&U`, and then reads `src` without moving
+/// Interprets `src` as having type `&Dst`, and then reads `src` without moving
/// the contained value.
///
-/// This function will unsafely assume the pointer `src` is valid for [`size_of::<U>`][size_of]
-/// bytes by transmuting `&T` to `&U` and then reading the `&U` (except that this is done in a way
-/// that is correct even when `&U` has stricter alignment requirements than `&T`). It will also
-/// unsafely create a copy of the contained value instead of moving out of `src`.
+/// This function will unsafely assume the pointer `src` is valid for [`size_of::<Dst>`][size_of]
+/// bytes by transmuting `&Src` to `&Dst` and then reading the `&Dst` (except that this is done
+/// in a way that is correct even when `&Dst` has stricter alignment requirements than `&Src`).
+/// It will also unsafely create a copy of the contained value instead of moving out of `src`.
///
-/// It is not a compile-time error if `T` and `U` have different sizes, but it
-/// is highly encouraged to only invoke this function where `T` and `U` have the
-/// same size. This function triggers [undefined behavior][ub] if `U` is larger than
-/// `T`.
+/// It is not a compile-time error if `Src` and `Dst` have different sizes, but it
+/// is highly encouraged to only invoke this function where `Src` and `Dst` have the
+/// same size. This function triggers [undefined behavior][ub] if `Dst` is larger than
+/// `Src`.
///
/// [ub]: ../../reference/behavior-considered-undefined.html
///
@@ -1053,19 +1052,22 @@ pub fn copy<T: Copy>(x: &T) -> T {
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_transmute_copy", issue = "83165")]
-pub const unsafe fn transmute_copy<T, U>(src: &T) -> U {
- assert!(size_of::<T>() >= size_of::<U>(), "cannot transmute_copy if U is larger than T");
+pub const unsafe fn transmute_copy<Src, Dst>(src: &Src) -> Dst {
+ assert!(
+ size_of::<Src>() >= size_of::<Dst>(),
+ "cannot transmute_copy if Dst is larger than Src"
+ );
- // If U has a higher alignment requirement, src might not be suitably aligned.
- if align_of::<U>() > align_of::<T>() {
+ // If Dst has a higher alignment requirement, src might not be suitably aligned.
+ if align_of::<Dst>() > align_of::<Src>() {
// SAFETY: `src` is a reference which is guaranteed to be valid for reads.
// The caller must guarantee that the actual transmutation is safe.
- unsafe { ptr::read_unaligned(src as *const T as *const U) }
+ unsafe { ptr::read_unaligned(src as *const Src as *const Dst) }
} else {
// SAFETY: `src` is a reference which is guaranteed to be valid for reads.
- // We just checked that `src as *const U` was properly aligned.
+ // We just checked that `src as *const Dst` was properly aligned.
// The caller must guarantee that the actual transmutation is safe.
- unsafe { ptr::read(src as *const T as *const U) }
+ unsafe { ptr::read(src as *const Src as *const Dst) }
}
}
@@ -1178,3 +1180,44 @@ pub const fn discriminant<T>(v: &T) -> Discriminant<T> {
pub const fn variant_count<T>() -> usize {
intrinsics::variant_count::<T>()
}
+
+/// Provides associated constants for various useful properties of types,
+/// to give them a canonical form in our code and make them easier to read.
+///
+/// This is here only to simplify all the ZST checks we need in the library.
+/// It's not on a stabilization track right now.
+#[doc(hidden)]
+#[unstable(feature = "sized_type_properties", issue = "none")]
+pub trait SizedTypeProperties: Sized {
+ /// `true` if this type requires no storage.
+ /// `false` if its [size](size_of) is greater than zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(sized_type_properties)]
+ /// use core::mem::SizedTypeProperties;
+ ///
+ /// fn do_something_with<T>() {
+ /// if T::IS_ZST {
+ /// // ... special approach ...
+ /// } else {
+ /// // ... the normal thing ...
+ /// }
+ /// }
+ ///
+ /// struct MyUnit;
+ /// assert!(MyUnit::IS_ZST);
+ ///
+ /// // For negative checks, consider using UFCS to emphasize the negation
+ /// assert!(!<i32>::IS_ZST);
+ /// // As it can sometimes hide in the type otherwise
+ /// assert!(!String::IS_ZST);
+ /// ```
+ #[doc(hidden)]
+ #[unstable(feature = "sized_type_properties", issue = "none")]
+ const IS_ZST: bool = size_of::<Self>() == 0;
+}
+#[doc(hidden)]
+#[unstable(feature = "sized_type_properties", issue = "none")]
+impl<T> SizedTypeProperties for T {}
diff --git a/library/core/src/mem/transmutability.rs b/library/core/src/mem/transmutability.rs
index b59a5b89d..3b98efff2 100644
--- a/library/core/src/mem/transmutability.rs
+++ b/library/core/src/mem/transmutability.rs
@@ -4,25 +4,20 @@
/// any value of type `Self` are safely transmutable into a value of type `Dst`, in a given `Context`,
/// notwithstanding whatever safety checks you have asked the compiler to [`Assume`] are satisfied.
#[unstable(feature = "transmutability", issue = "99571")]
-#[cfg_attr(not(bootstrap), lang = "transmute_trait")]
+#[lang = "transmute_trait"]
#[rustc_on_unimplemented(
message = "`{Src}` cannot be safely transmuted into `{Self}` in the defining scope of `{Context}`.",
label = "`{Src}` cannot be safely transmuted into `{Self}` in the defining scope of `{Context}`."
)]
-pub unsafe trait BikeshedIntrinsicFrom<
- Src,
- Context,
- const ASSUME_ALIGNMENT: bool,
- const ASSUME_LIFETIMES: bool,
- const ASSUME_VALIDITY: bool,
- const ASSUME_VISIBILITY: bool,
-> where
+pub unsafe trait BikeshedIntrinsicFrom<Src, Context, const ASSUME: Assume = { Assume::NOTHING }>
+where
Src: ?Sized,
{
}
/// What transmutation safety conditions shall the compiler assume that *you* are checking?
#[unstable(feature = "transmutability", issue = "99571")]
+#[lang = "transmute_opts"]
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct Assume {
/// When `true`, the compiler assumes that *you* are ensuring (either dynamically or statically) that
@@ -33,11 +28,80 @@ pub struct Assume {
/// that violates Rust's memory model.
pub lifetimes: bool,
+ /// When `true`, the compiler assumes that *you* have ensured that it is safe for you to violate the
+ /// type and field privacy of the destination type (and sometimes of the source type, too).
+ pub safety: bool,
+
/// When `true`, the compiler assumes that *you* are ensuring that the source type is actually a valid
/// instance of the destination type.
pub validity: bool,
+}
- /// When `true`, the compiler assumes that *you* have ensured that it is safe for you to violate the
- /// type and field privacy of the destination type (and sometimes of the source type, too).
- pub visibility: bool,
+impl Assume {
+ /// Do not assume that *you* have ensured any safety properties are met.
+ #[unstable(feature = "transmutability", issue = "99571")]
+ pub const NOTHING: Self =
+ Self { alignment: false, lifetimes: false, safety: false, validity: false };
+
+ /// Assume only that alignment conditions are met.
+ #[unstable(feature = "transmutability", issue = "99571")]
+ pub const ALIGNMENT: Self = Self { alignment: true, ..Self::NOTHING };
+
+ /// Assume only that lifetime conditions are met.
+ #[unstable(feature = "transmutability", issue = "99571")]
+ pub const LIFETIMES: Self = Self { lifetimes: true, ..Self::NOTHING };
+
+ /// Assume only that safety conditions are met.
+ #[unstable(feature = "transmutability", issue = "99571")]
+ pub const SAFETY: Self = Self { safety: true, ..Self::NOTHING };
+
+ /// Assume only that dynamically-satisfiable validity conditions are met.
+ #[unstable(feature = "transmutability", issue = "99571")]
+ pub const VALIDITY: Self = Self { validity: true, ..Self::NOTHING };
+
+ /// Assume both `self` and `other_assumptions`.
+ #[unstable(feature = "transmutability", issue = "99571")]
+ pub const fn and(self, other_assumptions: Self) -> Self {
+ Self {
+ alignment: self.alignment || other_assumptions.alignment,
+ lifetimes: self.lifetimes || other_assumptions.lifetimes,
+ safety: self.safety || other_assumptions.safety,
+ validity: self.validity || other_assumptions.validity,
+ }
+ }
+
+ /// Assume `self`, excepting `other_assumptions`.
+ #[unstable(feature = "transmutability", issue = "99571")]
+ pub const fn but_not(self, other_assumptions: Self) -> Self {
+ Self {
+ alignment: self.alignment && !other_assumptions.alignment,
+ lifetimes: self.lifetimes && !other_assumptions.lifetimes,
+ safety: self.safety && !other_assumptions.safety,
+ validity: self.validity && !other_assumptions.validity,
+ }
+ }
+}
+
+// FIXME(jswrenn): This const op is not actually usable. Why?
+// https://github.com/rust-lang/rust/pull/100726#issuecomment-1219928926
+#[unstable(feature = "transmutability", issue = "99571")]
+#[rustc_const_unstable(feature = "transmutability", issue = "99571")]
+impl const core::ops::Add for Assume {
+ type Output = Assume;
+
+ fn add(self, other_assumptions: Assume) -> Assume {
+ self.and(other_assumptions)
+ }
+}
+
+// FIXME(jswrenn): This const op is not actually usable. Why?
+// https://github.com/rust-lang/rust/pull/100726#issuecomment-1219928926
+#[unstable(feature = "transmutability", issue = "99571")]
+#[rustc_const_unstable(feature = "transmutability", issue = "99571")]
+impl const core::ops::Sub for Assume {
+ type Output = Assume;
+
+ fn sub(self, other_assumptions: Assume) -> Assume {
+ self.but_not(other_assumptions)
+ }
}
diff --git a/library/core/src/mem/valid_align.rs b/library/core/src/mem/valid_align.rs
deleted file mode 100644
index fcfa95120..000000000
--- a/library/core/src/mem/valid_align.rs
+++ /dev/null
@@ -1,247 +0,0 @@
-use crate::convert::TryFrom;
-use crate::num::NonZeroUsize;
-use crate::{cmp, fmt, hash, mem, num};
-
-/// A type storing a `usize` which is a power of two, and thus
-/// represents a possible alignment in the rust abstract machine.
-///
-/// Note that particularly large alignments, while representable in this type,
-/// are likely not to be supported by actual allocators and linkers.
-#[derive(Copy, Clone)]
-#[repr(transparent)]
-pub(crate) struct ValidAlign(ValidAlignEnum);
-
-// ValidAlign is `repr(usize)`, but via extra steps.
-const _: () = assert!(mem::size_of::<ValidAlign>() == mem::size_of::<usize>());
-const _: () = assert!(mem::align_of::<ValidAlign>() == mem::align_of::<usize>());
-
-impl ValidAlign {
- /// Creates a `ValidAlign` from a power-of-two `usize`.
- ///
- /// # Safety
- ///
- /// `align` must be a power of two.
- ///
- /// Equivalently, it must be `1 << exp` for some `exp` in `0..usize::BITS`.
- /// It must *not* be zero.
- #[inline]
- pub(crate) const unsafe fn new_unchecked(align: usize) -> Self {
- debug_assert!(align.is_power_of_two());
-
- // SAFETY: By precondition, this must be a power of two, and
- // our variants encompass all possible powers of two.
- unsafe { mem::transmute::<usize, ValidAlign>(align) }
- }
-
- #[inline]
- pub(crate) const fn as_nonzero(self) -> NonZeroUsize {
- // SAFETY: All the discriminants are non-zero.
- unsafe { NonZeroUsize::new_unchecked(self.0 as usize) }
- }
-
- /// Returns the base 2 logarithm of the alignment.
- ///
- /// This is always exact, as `self` represents a power of two.
- #[inline]
- pub(crate) fn log2(self) -> u32 {
- self.as_nonzero().trailing_zeros()
- }
-}
-
-impl fmt::Debug for ValidAlign {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{:?} (1 << {:?})", self.as_nonzero(), self.log2())
- }
-}
-
-impl TryFrom<NonZeroUsize> for ValidAlign {
- type Error = num::TryFromIntError;
-
- #[inline]
- fn try_from(align: NonZeroUsize) -> Result<ValidAlign, Self::Error> {
- if align.is_power_of_two() {
- // SAFETY: Just checked for power-of-two
- unsafe { Ok(ValidAlign::new_unchecked(align.get())) }
- } else {
- Err(num::TryFromIntError(()))
- }
- }
-}
-
-impl TryFrom<usize> for ValidAlign {
- type Error = num::TryFromIntError;
-
- #[inline]
- fn try_from(align: usize) -> Result<ValidAlign, Self::Error> {
- if align.is_power_of_two() {
- // SAFETY: Just checked for power-of-two
- unsafe { Ok(ValidAlign::new_unchecked(align)) }
- } else {
- Err(num::TryFromIntError(()))
- }
- }
-}
-
-impl cmp::Eq for ValidAlign {}
-
-impl cmp::PartialEq for ValidAlign {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- self.as_nonzero() == other.as_nonzero()
- }
-}
-
-impl cmp::Ord for ValidAlign {
- #[inline]
- fn cmp(&self, other: &Self) -> cmp::Ordering {
- self.as_nonzero().cmp(&other.as_nonzero())
- }
-}
-
-impl cmp::PartialOrd for ValidAlign {
- #[inline]
- fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
- Some(self.cmp(other))
- }
-}
-
-impl hash::Hash for ValidAlign {
- #[inline]
- fn hash<H: hash::Hasher>(&self, state: &mut H) {
- self.as_nonzero().hash(state)
- }
-}
-
-#[cfg(target_pointer_width = "16")]
-type ValidAlignEnum = ValidAlignEnum16;
-#[cfg(target_pointer_width = "32")]
-type ValidAlignEnum = ValidAlignEnum32;
-#[cfg(target_pointer_width = "64")]
-type ValidAlignEnum = ValidAlignEnum64;
-
-#[derive(Copy, Clone)]
-#[repr(u16)]
-enum ValidAlignEnum16 {
- _Align1Shl0 = 1 << 0,
- _Align1Shl1 = 1 << 1,
- _Align1Shl2 = 1 << 2,
- _Align1Shl3 = 1 << 3,
- _Align1Shl4 = 1 << 4,
- _Align1Shl5 = 1 << 5,
- _Align1Shl6 = 1 << 6,
- _Align1Shl7 = 1 << 7,
- _Align1Shl8 = 1 << 8,
- _Align1Shl9 = 1 << 9,
- _Align1Shl10 = 1 << 10,
- _Align1Shl11 = 1 << 11,
- _Align1Shl12 = 1 << 12,
- _Align1Shl13 = 1 << 13,
- _Align1Shl14 = 1 << 14,
- _Align1Shl15 = 1 << 15,
-}
-
-#[derive(Copy, Clone)]
-#[repr(u32)]
-enum ValidAlignEnum32 {
- _Align1Shl0 = 1 << 0,
- _Align1Shl1 = 1 << 1,
- _Align1Shl2 = 1 << 2,
- _Align1Shl3 = 1 << 3,
- _Align1Shl4 = 1 << 4,
- _Align1Shl5 = 1 << 5,
- _Align1Shl6 = 1 << 6,
- _Align1Shl7 = 1 << 7,
- _Align1Shl8 = 1 << 8,
- _Align1Shl9 = 1 << 9,
- _Align1Shl10 = 1 << 10,
- _Align1Shl11 = 1 << 11,
- _Align1Shl12 = 1 << 12,
- _Align1Shl13 = 1 << 13,
- _Align1Shl14 = 1 << 14,
- _Align1Shl15 = 1 << 15,
- _Align1Shl16 = 1 << 16,
- _Align1Shl17 = 1 << 17,
- _Align1Shl18 = 1 << 18,
- _Align1Shl19 = 1 << 19,
- _Align1Shl20 = 1 << 20,
- _Align1Shl21 = 1 << 21,
- _Align1Shl22 = 1 << 22,
- _Align1Shl23 = 1 << 23,
- _Align1Shl24 = 1 << 24,
- _Align1Shl25 = 1 << 25,
- _Align1Shl26 = 1 << 26,
- _Align1Shl27 = 1 << 27,
- _Align1Shl28 = 1 << 28,
- _Align1Shl29 = 1 << 29,
- _Align1Shl30 = 1 << 30,
- _Align1Shl31 = 1 << 31,
-}
-
-#[derive(Copy, Clone)]
-#[repr(u64)]
-enum ValidAlignEnum64 {
- _Align1Shl0 = 1 << 0,
- _Align1Shl1 = 1 << 1,
- _Align1Shl2 = 1 << 2,
- _Align1Shl3 = 1 << 3,
- _Align1Shl4 = 1 << 4,
- _Align1Shl5 = 1 << 5,
- _Align1Shl6 = 1 << 6,
- _Align1Shl7 = 1 << 7,
- _Align1Shl8 = 1 << 8,
- _Align1Shl9 = 1 << 9,
- _Align1Shl10 = 1 << 10,
- _Align1Shl11 = 1 << 11,
- _Align1Shl12 = 1 << 12,
- _Align1Shl13 = 1 << 13,
- _Align1Shl14 = 1 << 14,
- _Align1Shl15 = 1 << 15,
- _Align1Shl16 = 1 << 16,
- _Align1Shl17 = 1 << 17,
- _Align1Shl18 = 1 << 18,
- _Align1Shl19 = 1 << 19,
- _Align1Shl20 = 1 << 20,
- _Align1Shl21 = 1 << 21,
- _Align1Shl22 = 1 << 22,
- _Align1Shl23 = 1 << 23,
- _Align1Shl24 = 1 << 24,
- _Align1Shl25 = 1 << 25,
- _Align1Shl26 = 1 << 26,
- _Align1Shl27 = 1 << 27,
- _Align1Shl28 = 1 << 28,
- _Align1Shl29 = 1 << 29,
- _Align1Shl30 = 1 << 30,
- _Align1Shl31 = 1 << 31,
- _Align1Shl32 = 1 << 32,
- _Align1Shl33 = 1 << 33,
- _Align1Shl34 = 1 << 34,
- _Align1Shl35 = 1 << 35,
- _Align1Shl36 = 1 << 36,
- _Align1Shl37 = 1 << 37,
- _Align1Shl38 = 1 << 38,
- _Align1Shl39 = 1 << 39,
- _Align1Shl40 = 1 << 40,
- _Align1Shl41 = 1 << 41,
- _Align1Shl42 = 1 << 42,
- _Align1Shl43 = 1 << 43,
- _Align1Shl44 = 1 << 44,
- _Align1Shl45 = 1 << 45,
- _Align1Shl46 = 1 << 46,
- _Align1Shl47 = 1 << 47,
- _Align1Shl48 = 1 << 48,
- _Align1Shl49 = 1 << 49,
- _Align1Shl50 = 1 << 50,
- _Align1Shl51 = 1 << 51,
- _Align1Shl52 = 1 << 52,
- _Align1Shl53 = 1 << 53,
- _Align1Shl54 = 1 << 54,
- _Align1Shl55 = 1 << 55,
- _Align1Shl56 = 1 << 56,
- _Align1Shl57 = 1 << 57,
- _Align1Shl58 = 1 << 58,
- _Align1Shl59 = 1 << 59,
- _Align1Shl60 = 1 << 60,
- _Align1Shl61 = 1 << 61,
- _Align1Shl62 = 1 << 62,
- _Align1Shl63 = 1 << 63,
-}
diff --git a/library/core/src/num/bignum.rs b/library/core/src/num/bignum.rs
index de85fdd6e..d2a21b6b3 100644
--- a/library/core/src/num/bignum.rs
+++ b/library/core/src/num/bignum.rs
@@ -137,7 +137,7 @@ macro_rules! define_bignum {
// Find the most significant non-zero digit.
let msd = digits.iter().rposition(|&x| x != 0);
match msd {
- Some(msd) => msd * digitbits + digits[msd].log2() as usize + 1,
+ Some(msd) => msd * digitbits + digits[msd].ilog2() as usize + 1,
// There are no non-zero digits, i.e., the number is zero.
_ => 0,
}
diff --git a/library/core/src/num/dec2flt/decimal.rs b/library/core/src/num/dec2flt/decimal.rs
index f8edc3625..2019f71e6 100644
--- a/library/core/src/num/dec2flt/decimal.rs
+++ b/library/core/src/num/dec2flt/decimal.rs
@@ -32,7 +32,7 @@ impl Default for Decimal {
impl Decimal {
/// The maximum number of digits required to unambiguously round a float.
///
- /// For a double-precision IEEE-754 float, this required 767 digits,
+ /// For a double-precision IEEE 754 float, this required 767 digits,
/// so we store the max digits + 1.
///
/// We can exactly represent a float in radix `b` from radix 2 if
diff --git a/library/core/src/num/dec2flt/lemire.rs b/library/core/src/num/dec2flt/lemire.rs
index 75405f471..9f7594460 100644
--- a/library/core/src/num/dec2flt/lemire.rs
+++ b/library/core/src/num/dec2flt/lemire.rs
@@ -6,7 +6,7 @@ use crate::num::dec2flt::table::{
LARGEST_POWER_OF_FIVE, POWER_OF_FIVE_128, SMALLEST_POWER_OF_FIVE,
};
-/// Compute a float using an extended-precision representation.
+/// Compute w * 10^q using an extended-precision float representation.
///
/// Fast conversion of a the significant digits and decimal exponent
/// a float to an extended representation with a binary float. This
@@ -76,7 +76,7 @@ pub fn compute_float<F: RawFloat>(q: i64, mut w: u64) -> BiasedFp {
return BiasedFp { f: mantissa, e: power2 };
}
// Need to handle rounding ties. Normally, we need to round up,
- // but if we fall right in between and and we have an even basis, we
+ // but if we fall right in between and we have an even basis, we
// need to round down.
//
// This will only occur if:
diff --git a/library/core/src/num/error.rs b/library/core/src/num/error.rs
index 1a223016d..768dd8781 100644
--- a/library/core/src/num/error.rs
+++ b/library/core/src/num/error.rs
@@ -1,6 +1,7 @@
//! Error types for conversion to integral types.
use crate::convert::Infallible;
+use crate::error::Error;
use crate::fmt;
/// The error type returned when a checked integral type conversion fails.
@@ -144,3 +145,19 @@ impl fmt::Display for ParseIntError {
self.__description().fmt(f)
}
}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for ParseIntError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl Error for TryFromIntError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index 6548ad2e5..2c6a0ba64 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -1,4 +1,4 @@
-//! Constants specific to the `f32` single-precision floating point type.
+//! Constants for the `f32` single-precision floating point type.
//!
//! *[See also the `f32` primitive type][f32].*
//!
@@ -394,7 +394,7 @@ impl f32 {
/// Not a Number (NaN).
///
- /// Note that IEEE-745 doesn't define just a single NaN value;
+ /// Note that IEEE 754 doesn't define just a single NaN value;
/// a plethora of bit patterns are considered to be NaN.
/// Furthermore, the standard makes a difference
/// between a "signaling" and a "quiet" NaN,
@@ -632,7 +632,7 @@ impl f32 {
}
/// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
- /// positive sign bit and positive infinity. Note that IEEE-745 doesn't assign any
+ /// positive sign bit and positive infinity. Note that IEEE 754 doesn't assign any
/// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
/// the bit pattern of NaNs are conserved over arithmetic operations, the result of
/// `is_sign_positive` on a NaN might produce an unexpected result in some cases.
@@ -654,7 +654,7 @@ impl f32 {
}
/// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
- /// negative sign bit and negative infinity. Note that IEEE-745 doesn't assign any
+ /// negative sign bit and negative infinity. Note that IEEE 754 doesn't assign any
/// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
/// the bit pattern of NaNs are conserved over arithmetic operations, the result of
/// `is_sign_negative` on a NaN might produce an unexpected result in some cases.
@@ -678,6 +678,106 @@ impl f32 {
unsafe { mem::transmute::<f32, u32>(self) & 0x8000_0000 != 0 }
}
+ /// Returns the least number greater than `self`.
+ ///
+ /// Let `TINY` be the smallest representable positive `f32`. Then,
+ /// - if `self.is_nan()`, this returns `self`;
+ /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
+ /// - if `self` is `-TINY`, this returns -0.0;
+ /// - if `self` is -0.0 or +0.0, this returns `TINY`;
+ /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
+ /// - otherwise the unique least value greater than `self` is returned.
+ ///
+ /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
+ /// is finite `x == x.next_up().next_down()` also holds.
+ ///
+ /// ```rust
+ /// #![feature(float_next_up_down)]
+ /// // f32::EPSILON is the difference between 1.0 and the next number up.
+ /// assert_eq!(1.0f32.next_up(), 1.0 + f32::EPSILON);
+ /// // But not for most numbers.
+ /// assert!(0.1f32.next_up() < 0.1 + f32::EPSILON);
+ /// assert_eq!(16777216f32.next_up(), 16777218.0);
+ /// ```
+ ///
+ /// [`NEG_INFINITY`]: Self::NEG_INFINITY
+ /// [`INFINITY`]: Self::INFINITY
+ /// [`MIN`]: Self::MIN
+ /// [`MAX`]: Self::MAX
+ #[unstable(feature = "float_next_up_down", issue = "91399")]
+ #[rustc_const_unstable(feature = "float_next_up_down", issue = "91399")]
+ pub const fn next_up(self) -> Self {
+ // We must use strictly integer arithmetic to prevent denormals from
+ // flushing to zero after an arithmetic operation on some platforms.
+ const TINY_BITS: u32 = 0x1; // Smallest positive f32.
+ const CLEAR_SIGN_MASK: u32 = 0x7fff_ffff;
+
+ let bits = self.to_bits();
+ if self.is_nan() || bits == Self::INFINITY.to_bits() {
+ return self;
+ }
+
+ let abs = bits & CLEAR_SIGN_MASK;
+ let next_bits = if abs == 0 {
+ TINY_BITS
+ } else if bits == abs {
+ bits + 1
+ } else {
+ bits - 1
+ };
+ Self::from_bits(next_bits)
+ }
+
+ /// Returns the greatest number less than `self`.
+ ///
+ /// Let `TINY` be the smallest representable positive `f32`. Then,
+ /// - if `self.is_nan()`, this returns `self`;
+ /// - if `self` is [`INFINITY`], this returns [`MAX`];
+ /// - if `self` is `TINY`, this returns 0.0;
+ /// - if `self` is -0.0 or +0.0, this returns `-TINY`;
+ /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
+ /// - otherwise the unique greatest value less than `self` is returned.
+ ///
+ /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
+ /// is finite `x == x.next_down().next_up()` also holds.
+ ///
+ /// ```rust
+ /// #![feature(float_next_up_down)]
+ /// let x = 1.0f32;
+ /// // Clamp value into range [0, 1).
+ /// let clamped = x.clamp(0.0, 1.0f32.next_down());
+ /// assert!(clamped < 1.0);
+ /// assert_eq!(clamped.next_up(), 1.0);
+ /// ```
+ ///
+ /// [`NEG_INFINITY`]: Self::NEG_INFINITY
+ /// [`INFINITY`]: Self::INFINITY
+ /// [`MIN`]: Self::MIN
+ /// [`MAX`]: Self::MAX
+ #[unstable(feature = "float_next_up_down", issue = "91399")]
+ #[rustc_const_unstable(feature = "float_next_up_down", issue = "91399")]
+ pub const fn next_down(self) -> Self {
+ // We must use strictly integer arithmetic to prevent denormals from
+ // flushing to zero after an arithmetic operation on some platforms.
+ const NEG_TINY_BITS: u32 = 0x8000_0001; // Smallest (in magnitude) negative f32.
+ const CLEAR_SIGN_MASK: u32 = 0x7fff_ffff;
+
+ let bits = self.to_bits();
+ if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
+ return self;
+ }
+
+ let abs = bits & CLEAR_SIGN_MASK;
+ let next_bits = if abs == 0 {
+ NEG_TINY_BITS
+ } else if bits == abs {
+ bits - 1
+ } else {
+ bits + 1
+ };
+ Self::from_bits(next_bits)
+ }
+
/// Takes the reciprocal (inverse) of a number, `1/x`.
///
/// ```
@@ -733,7 +833,7 @@ impl f32 {
/// Returns the maximum of the two numbers, ignoring NaN.
///
/// If one of the arguments is NaN, then the other argument is returned.
- /// This follows the IEEE-754 2008 semantics for maxNum, except for handling of signaling NaNs;
+ /// This follows the IEEE 754-2008 semantics for maxNum, except for handling of signaling NaNs;
/// this function handles all NaNs the same way and avoids maxNum's problems with associativity.
/// This also matches the behavior of libm’s fmax.
///
@@ -753,7 +853,7 @@ impl f32 {
/// Returns the minimum of the two numbers, ignoring NaN.
///
/// If one of the arguments is NaN, then the other argument is returned.
- /// This follows the IEEE-754 2008 semantics for minNum, except for handling of signaling NaNs;
+ /// This follows the IEEE 754-2008 semantics for minNum, except for handling of signaling NaNs;
/// this function handles all NaNs the same way and avoids minNum's problems with associativity.
/// This also matches the behavior of libm’s fmin.
///
@@ -933,10 +1033,14 @@ impl f32 {
}
}
}
- // SAFETY: `u32` is a plain old datatype so we can always... uh...
- // ...look, just pretend you forgot what you just read.
- // Stability concerns.
- let rt_f32_to_u32 = |rt| unsafe { mem::transmute::<f32, u32>(rt) };
+
+ #[inline(always)] // See https://github.com/rust-lang/compiler-builtins/issues/491
+ fn rt_f32_to_u32(x: f32) -> u32 {
+ // SAFETY: `u32` is a plain old datatype so we can always... uh...
+ // ...look, just pretend you forgot what you just read.
+ // Stability concerns.
+ unsafe { mem::transmute(x) }
+ }
// SAFETY: We use internal implementations that either always work or fail at compile time.
unsafe { intrinsics::const_eval_select((self,), ct_f32_to_u32, rt_f32_to_u32) }
}
@@ -947,9 +1051,9 @@ impl f32 {
/// It turns out this is incredibly portable, for two reasons:
///
/// * Floats and Ints have the same endianness on all supported platforms.
- /// * IEEE-754 very precisely specifies the bit layout of floats.
+ /// * IEEE 754 very precisely specifies the bit layout of floats.
///
- /// However there is one caveat: prior to the 2008 version of IEEE-754, how
+ /// However there is one caveat: prior to the 2008 version of IEEE 754, how
/// to interpret the NaN signaling bit wasn't actually specified. Most platforms
/// (notably x86 and ARM) picked the interpretation that was ultimately
/// standardized in 2008, but some didn't (notably MIPS). As a result, all
@@ -1021,10 +1125,14 @@ impl f32 {
}
}
}
- // SAFETY: `u32` is a plain old datatype so we can always... uh...
- // ...look, just pretend you forgot what you just read.
- // Stability concerns.
- let rt_u32_to_f32 = |rt| unsafe { mem::transmute::<u32, f32>(rt) };
+
+ #[inline(always)] // See https://github.com/rust-lang/compiler-builtins/issues/491
+ fn rt_u32_to_f32(x: u32) -> f32 {
+ // SAFETY: `u32` is a plain old datatype so we can always... uh...
+ // ...look, just pretend you forgot what you just read.
+ // Stability concerns.
+ unsafe { mem::transmute(x) }
+ }
// SAFETY: We use internal implementations that either always work or fail at compile time.
unsafe { intrinsics::const_eval_select((v,), ct_u32_to_f32, rt_u32_to_f32) }
}
@@ -1282,15 +1390,14 @@ impl f32 {
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "clamp", since = "1.50.0")]
#[inline]
- pub fn clamp(self, min: f32, max: f32) -> f32 {
+ pub fn clamp(mut self, min: f32, max: f32) -> f32 {
assert!(min <= max);
- let mut x = self;
- if x < min {
- x = min;
+ if self < min {
+ self = min;
}
- if x > max {
- x = max;
+ if self > max {
+ self = max;
}
- x
+ self
}
}
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index 75c92c2f8..fd3c18ce2 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -1,4 +1,4 @@
-//! Constants specific to the `f64` double-precision floating point type.
+//! Constants for the `f64` double-precision floating point type.
//!
//! *[See also the `f64` primitive type][f64].*
//!
@@ -393,7 +393,7 @@ impl f64 {
/// Not a Number (NaN).
///
- /// Note that IEEE-745 doesn't define just a single NaN value;
+ /// Note that IEEE 754 doesn't define just a single NaN value;
/// a plethora of bit patterns are considered to be NaN.
/// Furthermore, the standard makes a difference
/// between a "signaling" and a "quiet" NaN,
@@ -624,7 +624,7 @@ impl f64 {
}
/// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
- /// positive sign bit and positive infinity. Note that IEEE-745 doesn't assign any
+ /// positive sign bit and positive infinity. Note that IEEE 754 doesn't assign any
/// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
/// the bit pattern of NaNs are conserved over arithmetic operations, the result of
/// `is_sign_positive` on a NaN might produce an unexpected result in some cases.
@@ -655,7 +655,7 @@ impl f64 {
}
/// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
- /// negative sign bit and negative infinity. Note that IEEE-745 doesn't assign any
+ /// negative sign bit and negative infinity. Note that IEEE 754 doesn't assign any
/// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
/// the bit pattern of NaNs are conserved over arithmetic operations, the result of
/// `is_sign_negative` on a NaN might produce an unexpected result in some cases.
@@ -688,6 +688,106 @@ impl f64 {
self.is_sign_negative()
}
+ /// Returns the least number greater than `self`.
+ ///
+ /// Let `TINY` be the smallest representable positive `f64`. Then,
+ /// - if `self.is_nan()`, this returns `self`;
+ /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
+ /// - if `self` is `-TINY`, this returns -0.0;
+ /// - if `self` is -0.0 or +0.0, this returns `TINY`;
+ /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
+ /// - otherwise the unique least value greater than `self` is returned.
+ ///
+ /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
+ /// is finite `x == x.next_up().next_down()` also holds.
+ ///
+ /// ```rust
+ /// #![feature(float_next_up_down)]
+ /// // f64::EPSILON is the difference between 1.0 and the next number up.
+ /// assert_eq!(1.0f64.next_up(), 1.0 + f64::EPSILON);
+ /// // But not for most numbers.
+ /// assert!(0.1f64.next_up() < 0.1 + f64::EPSILON);
+ /// assert_eq!(9007199254740992f64.next_up(), 9007199254740994.0);
+ /// ```
+ ///
+ /// [`NEG_INFINITY`]: Self::NEG_INFINITY
+ /// [`INFINITY`]: Self::INFINITY
+ /// [`MIN`]: Self::MIN
+ /// [`MAX`]: Self::MAX
+ #[unstable(feature = "float_next_up_down", issue = "91399")]
+ #[rustc_const_unstable(feature = "float_next_up_down", issue = "91399")]
+ pub const fn next_up(self) -> Self {
+ // We must use strictly integer arithmetic to prevent denormals from
+ // flushing to zero after an arithmetic operation on some platforms.
+ const TINY_BITS: u64 = 0x1; // Smallest positive f64.
+ const CLEAR_SIGN_MASK: u64 = 0x7fff_ffff_ffff_ffff;
+
+ let bits = self.to_bits();
+ if self.is_nan() || bits == Self::INFINITY.to_bits() {
+ return self;
+ }
+
+ let abs = bits & CLEAR_SIGN_MASK;
+ let next_bits = if abs == 0 {
+ TINY_BITS
+ } else if bits == abs {
+ bits + 1
+ } else {
+ bits - 1
+ };
+ Self::from_bits(next_bits)
+ }
+
+ /// Returns the greatest number less than `self`.
+ ///
+ /// Let `TINY` be the smallest representable positive `f64`. Then,
+ /// - if `self.is_nan()`, this returns `self`;
+ /// - if `self` is [`INFINITY`], this returns [`MAX`];
+ /// - if `self` is `TINY`, this returns 0.0;
+ /// - if `self` is -0.0 or +0.0, this returns `-TINY`;
+ /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
+ /// - otherwise the unique greatest value less than `self` is returned.
+ ///
+ /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
+ /// is finite `x == x.next_down().next_up()` also holds.
+ ///
+ /// ```rust
+ /// #![feature(float_next_up_down)]
+ /// let x = 1.0f64;
+ /// // Clamp value into range [0, 1).
+ /// let clamped = x.clamp(0.0, 1.0f64.next_down());
+ /// assert!(clamped < 1.0);
+ /// assert_eq!(clamped.next_up(), 1.0);
+ /// ```
+ ///
+ /// [`NEG_INFINITY`]: Self::NEG_INFINITY
+ /// [`INFINITY`]: Self::INFINITY
+ /// [`MIN`]: Self::MIN
+ /// [`MAX`]: Self::MAX
+ #[unstable(feature = "float_next_up_down", issue = "91399")]
+ #[rustc_const_unstable(feature = "float_next_up_down", issue = "91399")]
+ pub const fn next_down(self) -> Self {
+ // We must use strictly integer arithmetic to prevent denormals from
+ // flushing to zero after an arithmetic operation on some platforms.
+ const NEG_TINY_BITS: u64 = 0x8000_0000_0000_0001; // Smallest (in magnitude) negative f64.
+ const CLEAR_SIGN_MASK: u64 = 0x7fff_ffff_ffff_ffff;
+
+ let bits = self.to_bits();
+ if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
+ return self;
+ }
+
+ let abs = bits & CLEAR_SIGN_MASK;
+ let next_bits = if abs == 0 {
+ NEG_TINY_BITS
+ } else if bits == abs {
+ bits - 1
+ } else {
+ bits + 1
+ };
+ Self::from_bits(next_bits)
+ }
+
/// Takes the reciprocal (inverse) of a number, `1/x`.
///
/// ```
@@ -744,7 +844,7 @@ impl f64 {
/// Returns the maximum of the two numbers, ignoring NaN.
///
/// If one of the arguments is NaN, then the other argument is returned.
- /// This follows the IEEE-754 2008 semantics for maxNum, except for handling of signaling NaNs;
+ /// This follows the IEEE 754-2008 semantics for maxNum, except for handling of signaling NaNs;
/// this function handles all NaNs the same way and avoids maxNum's problems with associativity.
/// This also matches the behavior of libm’s fmax.
///
@@ -764,7 +864,7 @@ impl f64 {
/// Returns the minimum of the two numbers, ignoring NaN.
///
/// If one of the arguments is NaN, then the other argument is returned.
- /// This follows the IEEE-754 2008 semantics for minNum, except for handling of signaling NaNs;
+ /// This follows the IEEE 754-2008 semantics for minNum, except for handling of signaling NaNs;
/// this function handles all NaNs the same way and avoids minNum's problems with associativity.
/// This also matches the behavior of libm’s fmin.
///
@@ -926,10 +1026,14 @@ impl f64 {
}
}
}
- // SAFETY: `u64` is a plain old datatype so we can always... uh...
- // ...look, just pretend you forgot what you just read.
- // Stability concerns.
- let rt_f64_to_u64 = |rt| unsafe { mem::transmute::<f64, u64>(rt) };
+
+ #[inline(always)] // See https://github.com/rust-lang/compiler-builtins/issues/491
+ fn rt_f64_to_u64(rt: f64) -> u64 {
+ // SAFETY: `u64` is a plain old datatype so we can always... uh...
+ // ...look, just pretend you forgot what you just read.
+ // Stability concerns.
+ unsafe { mem::transmute::<f64, u64>(rt) }
+ }
// SAFETY: We use internal implementations that either always work or fail at compile time.
unsafe { intrinsics::const_eval_select((self,), ct_f64_to_u64, rt_f64_to_u64) }
}
@@ -940,9 +1044,9 @@ impl f64 {
/// It turns out this is incredibly portable, for two reasons:
///
/// * Floats and Ints have the same endianness on all supported platforms.
- /// * IEEE-754 very precisely specifies the bit layout of floats.
+ /// * IEEE 754 very precisely specifies the bit layout of floats.
///
- /// However there is one caveat: prior to the 2008 version of IEEE-754, how
+ /// However there is one caveat: prior to the 2008 version of IEEE 754, how
/// to interpret the NaN signaling bit wasn't actually specified. Most platforms
/// (notably x86 and ARM) picked the interpretation that was ultimately
/// standardized in 2008, but some didn't (notably MIPS). As a result, all
@@ -1019,10 +1123,14 @@ impl f64 {
}
}
}
- // SAFETY: `u64` is a plain old datatype so we can always... uh...
- // ...look, just pretend you forgot what you just read.
- // Stability concerns.
- let rt_u64_to_f64 = |rt| unsafe { mem::transmute::<u64, f64>(rt) };
+
+ #[inline(always)] // See https://github.com/rust-lang/compiler-builtins/issues/491
+ fn rt_u64_to_f64(rt: u64) -> f64 {
+ // SAFETY: `u64` is a plain old datatype so we can always... uh...
+ // ...look, just pretend you forgot what you just read.
+ // Stability concerns.
+ unsafe { mem::transmute::<u64, f64>(rt) }
+ }
// SAFETY: We use internal implementations that either always work or fail at compile time.
unsafe { intrinsics::const_eval_select((v,), ct_u64_to_f64, rt_u64_to_f64) }
}
@@ -1280,15 +1388,14 @@ impl f64 {
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "clamp", since = "1.50.0")]
#[inline]
- pub fn clamp(self, min: f64, max: f64) -> f64 {
+ pub fn clamp(mut self, min: f64, max: f64) -> f64 {
assert!(min <= max);
- let mut x = self;
- if x < min {
- x = min;
+ if self < min {
+ self = min;
}
- if x > max {
- x = max;
+ if self > max {
+ self = max;
}
- x
+ self
}
}
diff --git a/library/core/src/num/flt2dec/strategy/grisu.rs b/library/core/src/num/flt2dec/strategy/grisu.rs
index a4cb51c62..ed3e0edaf 100644
--- a/library/core/src/num/flt2dec/strategy/grisu.rs
+++ b/library/core/src/num/flt2dec/strategy/grisu.rs
@@ -253,7 +253,6 @@ pub fn format_shortest_opt<'a>(
let delta1frac = delta1 & ((1 << e) - 1);
// render integral parts, while checking for the accuracy at each step.
- let mut kappa = max_kappa as i16;
let mut ten_kappa = max_ten_kappa; // 10^kappa
let mut remainder = plus1int; // digits yet to be rendered
loop {
@@ -290,12 +289,10 @@ pub fn format_shortest_opt<'a>(
// the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
if i > max_kappa as usize {
debug_assert_eq!(ten_kappa, 1);
- debug_assert_eq!(kappa, 0);
break;
}
// restore invariants
- kappa -= 1;
ten_kappa /= 10;
remainder = r;
}
@@ -338,7 +335,6 @@ pub fn format_shortest_opt<'a>(
}
// restore invariants
- kappa -= 1;
remainder = r;
}
diff --git a/library/core/src/num/int_log10.rs b/library/core/src/num/int_log10.rs
index cc26c04a5..80472528f 100644
--- a/library/core/src/num/int_log10.rs
+++ b/library/core/src/num/int_log10.rs
@@ -1,5 +1,5 @@
/// These functions compute the integer logarithm of their type, assuming
-/// that someone has already checked that the the value is strictly positive.
+/// that someone has already checked that the value is strictly positive.
// 0 < val <= u8::MAX
#[inline]
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index a66de19ba..914dca61b 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -464,12 +464,11 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_unsigned(2), Some(3));")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add_unsigned(3), None);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -533,12 +532,11 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_sub_unsigned(2), Some(-1));")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).checked_sub_unsigned(3), None);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -654,7 +652,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem(2), Some(1));")]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_rem(-1), None);")]
@@ -706,7 +703,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_neg(), Some(-5));")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_neg(), None);")]
/// ```
@@ -822,7 +818,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!((-5", stringify!($SelfT), ").checked_abs(), Some(5));")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_abs(), None);")]
/// ```
@@ -874,7 +869,7 @@ macro_rules! int_impl {
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
- Some(try_opt!(acc.checked_mul(base)))
+ acc.checked_mul(base)
}
/// Saturating integer addition. Computes `self + rhs`, saturating at the numeric
@@ -907,12 +902,11 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_unsigned(2), 3);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_add_unsigned(100), ", stringify!($SelfT), "::MAX);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -954,12 +948,11 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_sub_unsigned(127), -27);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_sub_unsigned(100), ", stringify!($SelfT), "::MIN);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -1030,7 +1023,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".saturating_mul(12), 120);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_mul(10), ", stringify!($SelfT), "::MAX);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_mul(10), ", stringify!($SelfT), "::MIN);")]
@@ -1089,7 +1081,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!((-4", stringify!($SelfT), ").saturating_pow(3), -64);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(2), ", stringify!($SelfT), "::MAX);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(3), ", stringify!($SelfT), "::MIN);")]
@@ -1135,12 +1126,11 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_add_unsigned(27), 127);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_add_unsigned(2), ", stringify!($SelfT), "::MIN + 1);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
@@ -1176,12 +1166,11 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(0", stringify!($SelfT), ".wrapping_sub_unsigned(127), -127);")]
#[doc = concat!("assert_eq!((-2", stringify!($SelfT), ").wrapping_sub_unsigned(", stringify!($UnsignedT), "::MAX), -1);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
@@ -1504,7 +1493,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (", stringify!($SelfT), "::MIN, true));")]
/// ```
@@ -1518,6 +1506,51 @@ macro_rules! int_impl {
(a as Self, b)
}
+ /// Calculates `self + rhs + carry` without the ability to overflow.
+ ///
+ /// Performs "signed ternary addition" which takes in an extra bit to add, and may return an
+ /// additional bit of overflow. This signed function is used only on the highest-ordered data,
+ /// for which the signed overflow result indicates whether the big integer overflowed or not.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, false), (7, false));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, true), (8, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), (", stringify!($SelfT), "::MIN, true));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(0, true), (", stringify!($SelfT), "::MIN, true));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, true), (", stringify!($SelfT), "::MIN + 1, true));")]
+ #[doc = concat!("assert_eq!(",
+ stringify!($SelfT), "::MAX.carrying_add(", stringify!($SelfT), "::MAX, true), ",
+ "(-1, true));"
+ )]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.carrying_add(-1, true), (", stringify!($SelfT), "::MIN, false));")]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".carrying_add(", stringify!($SelfT), "::MAX, true), (", stringify!($SelfT), "::MIN, true));")]
+ /// ```
+ ///
+ /// If `carry` is false, this method is equivalent to [`overflowing_add`](Self::overflowing_add):
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ #[doc = concat!("assert_eq!(5_", stringify!($SelfT), ".carrying_add(2, false), 5_", stringify!($SelfT), ".overflowing_add(2));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), ", stringify!($SelfT), "::MAX.overflowing_add(1));")]
+ /// ```
+ #[unstable(feature = "bigint_helper_methods", issue = "85532")]
+ #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) {
+ // note: longer-term this should be done via an intrinsic.
+ // note: no intermediate overflow is required (https://github.com/rust-lang/rust/issues/85532#issuecomment-1032214946).
+ let (a, b) = self.overflowing_add(rhs);
+ let (c, d) = a.overflowing_add(carry as $SelfT);
+ (c, b != d)
+ }
+
/// Calculates `self` + `rhs` with an unsigned `rhs`
///
/// Returns a tuple of the addition along with a boolean indicating
@@ -1529,13 +1562,12 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_unsigned(2), (3, false));")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN).overflowing_add_unsigned(", stringify!($UnsignedT), "::MAX), (", stringify!($SelfT), "::MAX, false));")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).overflowing_add_unsigned(3), (", stringify!($SelfT), "::MIN, true));")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -1555,7 +1587,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));")]
/// ```
@@ -1569,6 +1600,39 @@ macro_rules! int_impl {
(a as Self, b)
}
+ /// Calculates `self - rhs - borrow` without the ability to overflow.
+ ///
+ /// Performs "signed ternary subtraction" which takes in an extra bit to subtract, and may return an
+ /// additional bit of overflow. This signed function is used only on the highest-ordered data,
+ /// for which the signed overflow result indicates whether the big integer overflowed or not.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, false), (3, false));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, true), (2, false));")]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, false), (-1, false));")]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, true), (-2, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.borrowing_sub(1, true), (", stringify!($SelfT), "::MAX - 1, true));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.borrowing_sub(-1, false), (", stringify!($SelfT), "::MIN, true));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.borrowing_sub(-1, true), (", stringify!($SelfT), "::MAX, false));")]
+ /// ```
+ #[unstable(feature = "bigint_helper_methods", issue = "85532")]
+ #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn borrowing_sub(self, rhs: Self, borrow: bool) -> (Self, bool) {
+ // note: longer-term this should be done via an intrinsic.
+ // note: no intermediate overflow is required (https://github.com/rust-lang/rust/issues/85532#issuecomment-1032214946).
+ let (a, b) = self.overflowing_sub(rhs);
+ let (c, d) = a.overflowing_sub(borrow as $SelfT);
+ (c, b != d)
+ }
+
/// Calculates `self` - `rhs` with an unsigned `rhs`
///
/// Returns a tuple of the subtraction along with a boolean indicating
@@ -1580,13 +1644,12 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_sub_unsigned(2), (-1, false));")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX).overflowing_sub_unsigned(", stringify!($UnsignedT), "::MAX), (", stringify!($SelfT), "::MIN, false));")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).overflowing_sub_unsigned(3), (", stringify!($SelfT), "::MAX, true));")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -1633,7 +1696,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_div(2), (2, false));")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div(-1), (", stringify!($SelfT), "::MIN, true));")]
/// ```
@@ -1696,7 +1758,6 @@ macro_rules! int_impl {
/// Basic usage:
///
/// ```
- ///
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_rem(2), (1, false));")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem(-1), (0, true));")]
/// ```
@@ -2204,105 +2265,70 @@ macro_rules! int_impl {
/// rounded down.
///
/// This method might not be optimized owing to implementation details;
- /// `log2` can produce results more efficiently for base 2, and `log10`
+ /// `ilog2` can produce results more efficiently for base 2, and `ilog10`
/// can produce results more efficiently for base 10.
///
/// # Panics
///
- /// When the number is negative, zero, or if the base is not at least 2; it
- /// panics in debug mode and the return value is 0 in release
- /// mode.
+ /// This function will panic if `self` is less than or equal to zero,
+ /// or if `base` is less then 2.
///
/// # Examples
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".log(5), 1);")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".ilog(5), 1);")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
#[track_caller]
- #[rustc_inherit_overflow_checks]
- #[allow(arithmetic_overflow)]
- pub const fn log(self, base: Self) -> u32 {
- match self.checked_log(base) {
- Some(n) => n,
- None => {
- // In debug builds, trigger a panic on None.
- // This should optimize completely out in release builds.
- let _ = Self::MAX + 1;
-
- 0
- },
- }
+ pub const fn ilog(self, base: Self) -> u32 {
+ assert!(base >= 2, "base of integer logarithm must be at least 2");
+ self.checked_ilog(base).expect("argument of integer logarithm must be positive")
}
/// Returns the base 2 logarithm of the number, rounded down.
///
/// # Panics
///
- /// When the number is negative or zero it panics in debug mode and the return value
- /// is 0 in release mode.
+ /// This function will panic if `self` is less than or equal to zero.
///
/// # Examples
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".log2(), 1);")]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".ilog2(), 1);")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
#[track_caller]
- #[rustc_inherit_overflow_checks]
- #[allow(arithmetic_overflow)]
- pub const fn log2(self) -> u32 {
- match self.checked_log2() {
- Some(n) => n,
- None => {
- // In debug builds, trigger a panic on None.
- // This should optimize completely out in release builds.
- let _ = Self::MAX + 1;
-
- 0
- },
- }
+ pub const fn ilog2(self) -> u32 {
+ self.checked_ilog2().expect("argument of integer logarithm must be positive")
}
/// Returns the base 10 logarithm of the number, rounded down.
///
/// # Panics
///
- /// When the number is negative or zero it panics in debug mode and the return value
- /// is 0 in release mode.
+ /// This function will panic if `self` is less than or equal to zero.
///
/// # Example
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".log10(), 1);")]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".ilog10(), 1);")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
#[track_caller]
- #[rustc_inherit_overflow_checks]
- #[allow(arithmetic_overflow)]
- pub const fn log10(self) -> u32 {
- match self.checked_log10() {
- Some(n) => n,
- None => {
- // In debug builds, trigger a panic on None.
- // This should optimize completely out in release builds.
- let _ = Self::MAX + 1;
-
- 0
- },
- }
+ pub const fn ilog10(self) -> u32 {
+ self.checked_ilog10().expect("argument of integer logarithm must be positive")
}
/// Returns the logarithm of the number with respect to an arbitrary base,
@@ -2311,20 +2337,20 @@ macro_rules! int_impl {
/// Returns `None` if the number is negative or zero, or if the base is not at least 2.
///
/// This method might not be optimized owing to implementation details;
- /// `checked_log2` can produce results more efficiently for base 2, and
- /// `checked_log10` can produce results more efficiently for base 10.
+ /// `checked_ilog2` can produce results more efficiently for base 2, and
+ /// `checked_ilog10` can produce results more efficiently for base 10.
///
/// # Examples
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_log(5), Some(1));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_ilog(5), Some(1));")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
- pub const fn checked_log(self, base: Self) -> Option<u32> {
+ pub const fn checked_ilog(self, base: Self) -> Option<u32> {
if self <= 0 || base <= 1 {
None
} else {
@@ -2333,7 +2359,7 @@ macro_rules! int_impl {
// Optimization for 128 bit wide integers.
if Self::BITS == 128 {
- let b = Self::log2(self) / (Self::log2(base) + 1);
+ let b = Self::ilog2(self) / (Self::ilog2(base) + 1);
n += b;
r /= base.pow(b as u32);
}
@@ -2354,13 +2380,13 @@ macro_rules! int_impl {
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_log2(), Some(1));")]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_ilog2(), Some(1));")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
- pub const fn checked_log2(self) -> Option<u32> {
+ pub const fn checked_ilog2(self) -> Option<u32> {
if self <= 0 {
None
} else {
@@ -2378,13 +2404,13 @@ macro_rules! int_impl {
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_log10(), Some(1));")]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_ilog10(), Some(1));")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
- pub const fn checked_log10(self) -> Option<u32> {
+ pub const fn checked_ilog10(self) -> Option<u32> {
if self > 0 {
Some(int_log10::$ActualT(self as $ActualT))
} else {
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index f481399fd..311c5fa5b 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -3,6 +3,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::ascii;
+use crate::error::Error;
use crate::intrinsics;
use crate::mem;
use crate::ops::{Add, Mul, Sub};
@@ -57,6 +58,15 @@ pub use wrapping::Wrapping;
#[cfg(not(no_fp_fmt_parse))]
pub use dec2flt::ParseFloatError;
+#[cfg(not(no_fp_fmt_parse))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for ParseFloatError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
pub use error::ParseIntError;
@@ -101,6 +111,9 @@ macro_rules! widening_impl {
/// This returns the low-order (wrapping) bits and the high-order (overflow) bits
/// of the result as two separate values, in that order.
///
+ /// If you also need to add a carry to the wide result, then you want
+ /// [`Self::carrying_mul`] instead.
+ ///
/// # Examples
///
/// Basic usage:
@@ -136,6 +149,8 @@ macro_rules! widening_impl {
/// additional amount of overflow. This allows for chaining together multiple
/// multiplications to create "big integers" which represent larger values.
///
+ /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead.
+ ///
/// # Examples
///
/// Basic usage:
@@ -155,6 +170,31 @@ macro_rules! widening_impl {
)]
/// ```
///
+ /// This is the core operation needed for scalar multiplication when
+ /// implementing it for wider-than-native types.
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ /// fn scalar_mul_eq(little_endian_digits: &mut Vec<u16>, multiplicand: u16) {
+ /// let mut carry = 0;
+ /// for d in little_endian_digits.iter_mut() {
+ /// (*d, carry) = d.carrying_mul(multiplicand, carry);
+ /// }
+ /// if carry != 0 {
+ /// little_endian_digits.push(carry);
+ /// }
+ /// }
+ ///
+ /// let mut v = vec![10, 20];
+ /// scalar_mul_eq(&mut v, 3);
+ /// assert_eq!(v, [30, 60]);
+ ///
+ /// assert_eq!(0x87654321_u64 * 0xFEED, 0x86D3D159E38D);
+ /// let mut v = vec![0x4321, 0x8765];
+ /// scalar_mul_eq(&mut v, 0xFEED);
+ /// assert_eq!(v, [0xE38D, 0xD159, 0x86D3]);
+ /// ```
+ ///
/// If `carry` is zero, this is similar to [`overflowing_mul`](Self::overflowing_mul),
/// except that it gives the value of the overflow instead of just whether one happened:
///
@@ -582,6 +622,38 @@ impl u8 {
matches!(*self, b'0'..=b'9')
}
+ /// Checks if the value is an ASCII octal digit:
+ /// U+0030 '0' ..= U+0037 '7'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_ascii_octdigit)]
+ ///
+ /// let uppercase_a = b'A';
+ /// let a = b'a';
+ /// let zero = b'0';
+ /// let seven = b'7';
+ /// let nine = b'9';
+ /// let percent = b'%';
+ /// let lf = b'\n';
+ ///
+ /// assert!(!uppercase_a.is_ascii_octdigit());
+ /// assert!(!a.is_ascii_octdigit());
+ /// assert!(zero.is_ascii_octdigit());
+ /// assert!(seven.is_ascii_octdigit());
+ /// assert!(!nine.is_ascii_octdigit());
+ /// assert!(!percent.is_ascii_octdigit());
+ /// assert!(!lf.is_ascii_octdigit());
+ /// ```
+ #[must_use]
+ #[unstable(feature = "is_ascii_octdigit", issue = "101288")]
+ #[rustc_const_unstable(feature = "is_ascii_octdigit", issue = "101288")]
+ #[inline]
+ pub const fn is_ascii_octdigit(&self) -> bool {
+ matches!(*self, b'0'..=b'7')
+ }
+
/// Checks if the value is an ASCII hexadecimal digit:
///
/// - U+0030 '0' ..= U+0039 '9', or
@@ -623,7 +695,7 @@ impl u8 {
///
/// - U+0021 ..= U+002F `! " # $ % & ' ( ) * + , - . /`, or
/// - U+003A ..= U+0040 `: ; < = > ? @`, or
- /// - U+005B ..= U+0060 ``[ \ ] ^ _ ` ``, or
+ /// - U+005B ..= U+0060 `` [ \ ] ^ _ ` ``, or
/// - U+007B ..= U+007E `{ | } ~`
///
/// # Examples
@@ -936,8 +1008,8 @@ impl usize {
/// assert_eq!(num.classify(), FpCategory::Normal);
/// assert_eq!(inf.classify(), FpCategory::Infinite);
/// assert_eq!(zero.classify(), FpCategory::Zero);
-/// assert_eq!(nan.classify(), FpCategory::Nan);
/// assert_eq!(sub.classify(), FpCategory::Subnormal);
+/// assert_eq!(nan.classify(), FpCategory::Nan);
/// ```
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index 4de0a0cf5..6b6f3417f 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -56,7 +56,10 @@ macro_rules! nonzero_integers {
pub const unsafe fn new_unchecked(n: $Int) -> Self {
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
- core::intrinsics::assert_unsafe_precondition!(n != 0);
+ core::intrinsics::assert_unsafe_precondition!(
+ concat!(stringify!($Ty), "::new_unchecked requires a non-zero argument"),
+ (n: $Int) => n != 0
+ );
Self(n)
}
}
@@ -309,8 +312,8 @@ macro_rules! nonzero_unsigned_operations {
( $( $Ty: ident($Int: ident); )+ ) => {
$(
impl $Ty {
- /// Add an unsigned integer to a non-zero value.
- /// Check for overflow and return [`None`] on overflow
+ /// Adds an unsigned integer to a non-zero value.
+ /// Checks for overflow and returns [`None`] on overflow.
/// As a consequence, the result cannot wrap to zero.
///
///
@@ -346,7 +349,7 @@ macro_rules! nonzero_unsigned_operations {
}
}
- /// Add an unsigned integer to a non-zero value.
+ /// Adds an unsigned integer to a non-zero value.
#[doc = concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")]
///
/// # Examples
@@ -377,7 +380,7 @@ macro_rules! nonzero_unsigned_operations {
unsafe { $Ty::new_unchecked(self.get().saturating_add(other)) }
}
- /// Add an unsigned integer to a non-zero value,
+ /// Adds an unsigned integer to a non-zero value,
/// assuming overflow cannot occur.
/// Overflow is unchecked, and it is undefined behaviour to overflow
/// *even if the result would wrap to a non-zero value*.
@@ -409,7 +412,7 @@ macro_rules! nonzero_unsigned_operations {
}
/// Returns the smallest power of two greater than or equal to n.
- /// Check for overflow and return [`None`]
+ /// Checks for overflow and returns [`None`]
/// if the next power of two is greater than the type’s maximum value.
/// As a consequence, the result cannot wrap to zero.
///
@@ -450,7 +453,7 @@ macro_rules! nonzero_unsigned_operations {
/// Returns the base 2 logarithm of the number, rounded down.
///
/// This is the same operation as
- #[doc = concat!("[`", stringify!($Int), "::log2`],")]
+ #[doc = concat!("[`", stringify!($Int), "::ilog2`],")]
/// except that it has no failure cases to worry about
/// since this value can never be zero.
///
@@ -460,22 +463,22 @@ macro_rules! nonzero_unsigned_operations {
/// #![feature(int_log)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
- #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(7).unwrap().log2(), 2);")]
- #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(8).unwrap().log2(), 3);")]
- #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(9).unwrap().log2(), 3);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(7).unwrap().ilog2(), 2);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(8).unwrap().ilog2(), 3);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(9).unwrap().ilog2(), 3);")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
- pub const fn log2(self) -> u32 {
+ pub const fn ilog2(self) -> u32 {
Self::BITS - 1 - self.leading_zeros()
}
/// Returns the base 10 logarithm of the number, rounded down.
///
/// This is the same operation as
- #[doc = concat!("[`", stringify!($Int), "::log10`],")]
+ #[doc = concat!("[`", stringify!($Int), "::ilog10`],")]
/// except that it has no failure cases to worry about
/// since this value can never be zero.
///
@@ -485,15 +488,15 @@ macro_rules! nonzero_unsigned_operations {
/// #![feature(int_log)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
- #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(99).unwrap().log10(), 1);")]
- #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(100).unwrap().log10(), 2);")]
- #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(101).unwrap().log10(), 2);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(99).unwrap().ilog10(), 1);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(100).unwrap().ilog10(), 2);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(101).unwrap().ilog10(), 2);")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
- pub const fn log10(self) -> u32 {
+ pub const fn ilog10(self) -> u32 {
super::int_log10::$Int(self.0)
}
}
@@ -545,7 +548,7 @@ macro_rules! nonzero_signed_operations {
}
/// Checked absolute value.
- /// Check for overflow and returns [`None`] if
+ /// Checks for overflow and returns [`None`] if
#[doc = concat!("`self == ", stringify!($Int), "::MIN`.")]
/// The result cannot be zero.
///
@@ -721,6 +724,160 @@ macro_rules! nonzero_signed_operations {
// SAFETY: absolute value of nonzero cannot yield zero values.
unsafe { $Uty::new_unchecked(self.get().unsigned_abs()) }
}
+
+ /// Returns `true` if `self` is negative and `false` if the
+ /// number is positive.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(nonzero_negation_ops)]
+ ///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")]
+ #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")]
+ ///
+ /// assert!(neg_five.is_negative());
+ /// assert!(!pos_five.is_negative());
+ /// # Some(())
+ /// # }
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "nonzero_negation_ops", issue = "102443")]
+ pub const fn is_negative(self) -> bool {
+ self.get().is_negative()
+ }
+
+ /// Checked negation. Computes `-self`, returning `None` if `self == i32::MIN`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(nonzero_negation_ops)]
+ ///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")]
+ #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")]
+ #[doc = concat!("let min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ ///
+ /// assert_eq!(pos_five.checked_neg(), Some(neg_five));
+ /// assert_eq!(min.checked_neg(), None);
+ /// # Some(())
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "nonzero_negation_ops", issue = "102443")]
+ pub const fn checked_neg(self) -> Option<$Ty> {
+ if let Some(result) = self.get().checked_neg() {
+ // SAFETY: negation of nonzero cannot yield zero values.
+ return Some(unsafe { $Ty::new_unchecked(result) });
+ }
+ None
+ }
+
+ /// Negates self, overflowing if this is equal to the minimum value.
+ ///
+ #[doc = concat!("See [`", stringify!($Int), "::overflowing_neg`]")]
+ /// for documentation on overflow behaviour.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(nonzero_negation_ops)]
+ ///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")]
+ #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")]
+ #[doc = concat!("let min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ ///
+ /// assert_eq!(pos_five.overflowing_neg(), (neg_five, false));
+ /// assert_eq!(min.overflowing_neg(), (min, true));
+ /// # Some(())
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "nonzero_negation_ops", issue = "102443")]
+ pub const fn overflowing_neg(self) -> ($Ty, bool) {
+ let (result, overflow) = self.get().overflowing_neg();
+ // SAFETY: negation of nonzero cannot yield zero values.
+ ((unsafe { $Ty::new_unchecked(result) }), overflow)
+ }
+
+ /// Saturating negation. Computes `-self`, returning `MAX` if
+ /// `self == i32::MIN` instead of overflowing.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(nonzero_negation_ops)]
+ ///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")]
+ #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")]
+ #[doc = concat!("let min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ #[doc = concat!("let min_plus_one = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN + 1)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(pos_five.saturating_neg(), neg_five);
+ /// assert_eq!(min.saturating_neg(), max);
+ /// assert_eq!(max.saturating_neg(), min_plus_one);
+ /// # Some(())
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "nonzero_negation_ops", issue = "102443")]
+ pub const fn saturating_neg(self) -> $Ty {
+ if let Some(result) = self.checked_neg() {
+ return result;
+ }
+ $Ty::MAX
+ }
+
+ /// Wrapping (modular) negation. Computes `-self`, wrapping around at the boundary
+ /// of the type.
+ ///
+ #[doc = concat!("See [`", stringify!($Int), "::wrapping_neg`]")]
+ /// for documentation on overflow behaviour.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(nonzero_negation_ops)]
+ ///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")]
+ #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")]
+ #[doc = concat!("let min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ ///
+ /// assert_eq!(pos_five.wrapping_neg(), neg_five);
+ /// assert_eq!(min.wrapping_neg(), min);
+ /// # Some(())
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "nonzero_negation_ops", issue = "102443")]
+ pub const fn wrapping_neg(self) -> $Ty {
+ let result = self.get().wrapping_neg();
+ // SAFETY: negation of nonzero cannot yield zero values.
+ unsafe { $Ty::new_unchecked(result) }
+ }
}
)+
}
@@ -740,8 +897,8 @@ macro_rules! nonzero_unsigned_signed_operations {
( $( $signedness:ident $Ty: ident($Int: ty); )+ ) => {
$(
impl $Ty {
- /// Multiply two non-zero integers together.
- /// Check for overflow and return [`None`] on overflow.
+ /// Multiplies two non-zero integers together.
+ /// Checks for overflow and returns [`None`] on overflow.
/// As a consequence, the result cannot wrap to zero.
///
/// # Examples
@@ -777,7 +934,7 @@ macro_rules! nonzero_unsigned_signed_operations {
}
}
- /// Multiply two non-zero integers together.
+ /// Multiplies two non-zero integers together.
#[doc = concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")]
///
/// # Examples
@@ -809,7 +966,7 @@ macro_rules! nonzero_unsigned_signed_operations {
unsafe { $Ty::new_unchecked(self.get().saturating_mul(other.get())) }
}
- /// Multiply two non-zero integers together,
+ /// Multiplies two non-zero integers together,
/// assuming overflow cannot occur.
/// Overflow is unchecked, and it is undefined behaviour to overflow
/// *even if the result would wrap to a non-zero value*.
@@ -849,8 +1006,8 @@ macro_rules! nonzero_unsigned_signed_operations {
unsafe { $Ty::new_unchecked(self.get().unchecked_mul(other.get())) }
}
- /// Raise non-zero value to an integer power.
- /// Check for overflow and return [`None`] on overflow.
+ /// Raises non-zero value to an integer power.
+ /// Checks for overflow and returns [`None`] on overflow.
/// As a consequence, the result cannot wrap to zero.
///
/// # Examples
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 733655442..335cc5124 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -474,13 +474,12 @@ macro_rules! uint_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_signed(2), Some(3));")]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_signed(-2), None);")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add_signed(3), None);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -688,104 +687,69 @@ macro_rules! uint_impl {
/// rounded down.
///
/// This method might not be optimized owing to implementation details;
- /// `log2` can produce results more efficiently for base 2, and `log10`
+ /// `ilog2` can produce results more efficiently for base 2, and `ilog10`
/// can produce results more efficiently for base 10.
///
/// # Panics
///
- /// When the number is zero, or if the base is not at least 2;
- /// it panics in debug mode and the return value is 0 in release mode.
+ /// This function will panic if `self` is zero, or if `base` is less then 2.
///
/// # Examples
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".log(5), 1);")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".ilog(5), 1);")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
#[track_caller]
- #[rustc_inherit_overflow_checks]
- #[allow(arithmetic_overflow)]
- pub const fn log(self, base: Self) -> u32 {
- match self.checked_log(base) {
- Some(n) => n,
- None => {
- // In debug builds, trigger a panic on None.
- // This should optimize completely out in release builds.
- let _ = Self::MAX + 1;
-
- 0
- },
- }
+ pub const fn ilog(self, base: Self) -> u32 {
+ assert!(base >= 2, "base of integer logarithm must be at least 2");
+ self.checked_ilog(base).expect("argument of integer logarithm must be positive")
}
/// Returns the base 2 logarithm of the number, rounded down.
///
/// # Panics
///
- /// When the number is zero it panics in debug mode and
- /// the return value is 0 in release mode.
+ /// This function will panic if `self` is zero.
///
/// # Examples
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".log2(), 1);")]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".ilog2(), 1);")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
#[track_caller]
- #[rustc_inherit_overflow_checks]
- #[allow(arithmetic_overflow)]
- pub const fn log2(self) -> u32 {
- match self.checked_log2() {
- Some(n) => n,
- None => {
- // In debug builds, trigger a panic on None.
- // This should optimize completely out in release builds.
- let _ = Self::MAX + 1;
-
- 0
- },
- }
+ pub const fn ilog2(self) -> u32 {
+ self.checked_ilog2().expect("argument of integer logarithm must be positive")
}
/// Returns the base 10 logarithm of the number, rounded down.
///
/// # Panics
///
- /// When the number is zero it panics in debug mode and the
- /// return value is 0 in release mode.
+ /// This function will panic if `self` is zero.
///
/// # Example
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".log10(), 1);")]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".ilog10(), 1);")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
#[track_caller]
- #[rustc_inherit_overflow_checks]
- #[allow(arithmetic_overflow)]
- pub const fn log10(self) -> u32 {
- match self.checked_log10() {
- Some(n) => n,
- None => {
- // In debug builds, trigger a panic on None.
- // This should optimize completely out in release builds.
- let _ = Self::MAX + 1;
-
- 0
- },
- }
+ pub const fn ilog10(self) -> u32 {
+ self.checked_ilog10().expect("argument of integer logarithm must be positive")
}
/// Returns the logarithm of the number with respect to an arbitrary base,
@@ -794,20 +758,20 @@ macro_rules! uint_impl {
/// Returns `None` if the number is zero, or if the base is not at least 2.
///
/// This method might not be optimized owing to implementation details;
- /// `checked_log2` can produce results more efficiently for base 2, and
- /// `checked_log10` can produce results more efficiently for base 10.
+ /// `checked_ilog2` can produce results more efficiently for base 2, and
+ /// `checked_ilog10` can produce results more efficiently for base 10.
///
/// # Examples
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_log(5), Some(1));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_ilog(5), Some(1));")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
- pub const fn checked_log(self, base: Self) -> Option<u32> {
+ pub const fn checked_ilog(self, base: Self) -> Option<u32> {
if self <= 0 || base <= 1 {
None
} else {
@@ -816,7 +780,7 @@ macro_rules! uint_impl {
// Optimization for 128 bit wide integers.
if Self::BITS == 128 {
- let b = Self::log2(self) / (Self::log2(base) + 1);
+ let b = Self::ilog2(self) / (Self::ilog2(base) + 1);
n += b;
r /= base.pow(b as u32);
}
@@ -837,15 +801,15 @@ macro_rules! uint_impl {
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_log2(), Some(1));")]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_ilog2(), Some(1));")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
- pub const fn checked_log2(self) -> Option<u32> {
+ pub const fn checked_ilog2(self) -> Option<u32> {
if let Some(x) = <$NonZeroT>::new(self) {
- Some(x.log2())
+ Some(x.ilog2())
} else {
None
}
@@ -859,15 +823,15 @@ macro_rules! uint_impl {
///
/// ```
/// #![feature(int_log)]
- #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_log10(), Some(1));")]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_ilog10(), Some(1));")]
/// ```
#[unstable(feature = "int_log", issue = "70887")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
- pub const fn checked_log10(self) -> Option<u32> {
+ pub const fn checked_ilog10(self) -> Option<u32> {
if let Some(x) = <$NonZeroT>::new(self) {
- Some(x.log10())
+ Some(x.ilog10())
} else {
None
}
@@ -1026,7 +990,7 @@ macro_rules! uint_impl {
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
- Some(try_opt!(acc.checked_mul(base)))
+ acc.checked_mul(base)
}
/// Saturating integer addition. Computes `self + rhs`, saturating at
@@ -1057,13 +1021,12 @@ macro_rules! uint_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_signed(2), 3);")]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_signed(-2), 0);")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).saturating_add_signed(4), ", stringify!($SelfT), "::MAX);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -1198,13 +1161,12 @@ macro_rules! uint_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_add_signed(2), 3);")]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_add_signed(-2), ", stringify!($SelfT), "::MAX);")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).wrapping_add_signed(4), 1);")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -1494,7 +1456,6 @@ macro_rules! uint_impl {
/// Basic usage
///
/// ```
- ///
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (0, true));")]
/// ```
@@ -1508,37 +1469,42 @@ macro_rules! uint_impl {
(a as Self, b)
}
- /// Calculates `self + rhs + carry` without the ability to overflow.
+ /// Calculates `self` + `rhs` + `carry` and returns a tuple containing
+ /// the sum and the output carry.
///
- /// Performs "ternary addition" which takes in an extra bit to add, and may return an
- /// additional bit of overflow. This allows for chaining together multiple additions
- /// to create "big integers" which represent larger values.
+ /// Performs "ternary addition" of two integer operands and a carry-in
+ /// bit, and returns an output integer and a carry-out bit. This allows
+ /// chaining together multiple additions to create a wider addition, and
+ /// can be useful for bignum addition.
///
#[doc = concat!("This can be thought of as a ", stringify!($BITS), "-bit \"full adder\", in the electronics sense.")]
///
- /// # Examples
+ /// If the input carry is false, this method is equivalent to
+ /// [`overflowing_add`](Self::overflowing_add), and the output carry is
+ /// equal to the overflow flag. Note that although carry and overflow
+ /// flags are similar for unsigned integers, they are different for
+ /// signed integers.
///
- /// Basic usage
+ /// # Examples
///
/// ```
/// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, false), (7, false));")]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, true), (8, false));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), (0, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(0, true), (0, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, true), (1, true));")]
- #[doc = concat!("assert_eq!(",
- stringify!($SelfT), "::MAX.carrying_add(", stringify!($SelfT), "::MAX, true), ",
- "(", stringify!($SelfT), "::MAX, true));"
- )]
- /// ```
///
- /// If `carry` is false, this method is equivalent to [`overflowing_add`](Self::overflowing_add):
+ #[doc = concat!("// 3 MAX (a = 3 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")]
+ #[doc = concat!("// + 5 7 (b = 5 × 2^", stringify!($BITS), " + 7)")]
+ /// // ---------
+ #[doc = concat!("// 9 6 (sum = 9 × 2^", stringify!($BITS), " + 6)")]
///
- /// ```
- /// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5_", stringify!($SelfT), ".carrying_add(2, false), 5_", stringify!($SelfT), ".overflowing_add(2));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), ", stringify!($SelfT), "::MAX.overflowing_add(1));")]
+ #[doc = concat!("let (a1, a0): (", stringify!($SelfT), ", ", stringify!($SelfT), ") = (3, ", stringify!($SelfT), "::MAX);")]
+ #[doc = concat!("let (b1, b0): (", stringify!($SelfT), ", ", stringify!($SelfT), ") = (5, 7);")]
+ /// let carry0 = false;
+ ///
+ /// let (sum0, carry1) = a0.carrying_add(b0, carry0);
+ /// assert_eq!(carry1, true);
+ /// let (sum1, carry2) = a1.carrying_add(b1, carry1);
+ /// assert_eq!(carry2, false);
+ ///
+ /// assert_eq!((sum1, sum0), (9, 6));
/// ```
#[unstable(feature = "bigint_helper_methods", issue = "85532")]
#[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
@@ -1564,13 +1530,12 @@ macro_rules! uint_impl {
/// Basic usage:
///
/// ```
- /// # #![feature(mixed_integer_ops)]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_signed(2), (3, false));")]
#[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_signed(-2), (", stringify!($SelfT), "::MAX, true));")]
#[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).overflowing_add_signed(4), (1, true));")]
/// ```
- #[unstable(feature = "mixed_integer_ops", issue = "87840")]
- #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[stable(feature = "mixed_integer_ops", since = "1.66.0")]
+ #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -1590,7 +1555,6 @@ macro_rules! uint_impl {
/// Basic usage
///
/// ```
- ///
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));")]
#[doc = concat!("assert_eq!(0", stringify!($SelfT), ".overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));")]
/// ```
@@ -1604,22 +1568,35 @@ macro_rules! uint_impl {
(a as Self, b)
}
- /// Calculates `self - rhs - borrow` without the ability to overflow.
+ /// Calculates `self` &minus; `rhs` &minus; `borrow` and returns a tuple
+ /// containing the difference and the output borrow.
///
- /// Performs "ternary subtraction" which takes in an extra bit to subtract, and may return
- /// an additional bit of overflow. This allows for chaining together multiple subtractions
- /// to create "big integers" which represent larger values.
+ /// Performs "ternary subtraction" by subtracting both an integer
+ /// operand and a borrow-in bit from `self`, and returns an output
+ /// integer and a borrow-out bit. This allows chaining together multiple
+ /// subtractions to create a wider subtraction, and can be useful for
+ /// bignum subtraction.
///
/// # Examples
///
- /// Basic usage
- ///
/// ```
/// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, false), (3, false));")]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, true), (2, false));")]
- #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, false), (", stringify!($SelfT), "::MAX, true));")]
- #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, true), (", stringify!($SelfT), "::MAX - 1, true));")]
+ ///
+ #[doc = concat!("// 9 6 (a = 9 × 2^", stringify!($BITS), " + 6)")]
+ #[doc = concat!("// - 5 7 (b = 5 × 2^", stringify!($BITS), " + 7)")]
+ /// // ---------
+ #[doc = concat!("// 3 MAX (diff = 3 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")]
+ ///
+ #[doc = concat!("let (a1, a0): (", stringify!($SelfT), ", ", stringify!($SelfT), ") = (9, 6);")]
+ #[doc = concat!("let (b1, b0): (", stringify!($SelfT), ", ", stringify!($SelfT), ") = (5, 7);")]
+ /// let borrow0 = false;
+ ///
+ /// let (diff0, borrow1) = a0.borrowing_sub(b0, borrow0);
+ /// assert_eq!(borrow1, true);
+ /// let (diff1, borrow2) = a1.borrowing_sub(b1, borrow1);
+ /// assert_eq!(borrow2, false);
+ ///
+ #[doc = concat!("assert_eq!((diff1, diff0), (3, ", stringify!($SelfT), "::MAX));")]
/// ```
#[unstable(feature = "bigint_helper_methods", issue = "85532")]
#[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs
index e367be8c1..75c52d3ec 100644
--- a/library/core/src/ops/arith.rs
+++ b/library/core/src/ops/arith.rs
@@ -65,38 +65,15 @@
/// ```
#[lang = "add"]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(
- bootstrap,
- rustc_on_unimplemented(
- on(
- all(_Self = "{integer}", Rhs = "{float}"),
- message = "cannot add a float to an integer",
- ),
- on(
- all(_Self = "{float}", Rhs = "{integer}"),
- message = "cannot add an integer to a float",
- ),
- message = "cannot add `{Rhs}` to `{Self}`",
- label = "no implementation for `{Self} + {Rhs}`"
- )
-)]
-#[cfg_attr(
- not(bootstrap),
- rustc_on_unimplemented(
- on(
- all(_Self = "{integer}", Rhs = "{float}"),
- message = "cannot add a float to an integer",
- ),
- on(
- all(_Self = "{float}", Rhs = "{integer}"),
- message = "cannot add an integer to a float",
- ),
- message = "cannot add `{Rhs}` to `{Self}`",
- label = "no implementation for `{Self} + {Rhs}`",
- append_const_msg,
- )
+#[rustc_on_unimplemented(
+ on(all(_Self = "{integer}", Rhs = "{float}"), message = "cannot add a float to an integer",),
+ on(all(_Self = "{float}", Rhs = "{integer}"), message = "cannot add an integer to a float",),
+ message = "cannot add `{Rhs}` to `{Self}`",
+ label = "no implementation for `{Self} + {Rhs}`",
+ append_const_msg
)]
#[doc(alias = "+")]
+#[const_trait]
pub trait Add<Rhs = Self> {
/// The resulting type after applying the `+` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -201,9 +178,11 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented(
message = "cannot subtract `{Rhs}` from `{Self}`",
- label = "no implementation for `{Self} - {Rhs}`"
+ label = "no implementation for `{Self} - {Rhs}`",
+ append_const_msg
)]
#[doc(alias = "-")]
+#[const_trait]
pub trait Sub<Rhs = Self> {
/// The resulting type after applying the `-` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -333,6 +312,7 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
label = "no implementation for `{Self} * {Rhs}`"
)]
#[doc(alias = "*")]
+#[const_trait]
pub trait Mul<Rhs = Self> {
/// The resulting type after applying the `*` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -466,6 +446,7 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
label = "no implementation for `{Self} / {Rhs}`"
)]
#[doc(alias = "/")]
+#[const_trait]
pub trait Div<Rhs = Self> {
/// The resulting type after applying the `/` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -568,6 +549,7 @@ div_impl_float! { f32 f64 }
label = "no implementation for `{Self} % {Rhs}`"
)]
#[doc(alias = "%")]
+#[const_trait]
pub trait Rem<Rhs = Self> {
/// The resulting type after applying the `%` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -682,6 +664,7 @@ rem_impl_float! { f32 f64 }
#[lang = "neg"]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(alias = "-")]
+#[const_trait]
pub trait Neg {
/// The resulting type after applying the `-` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -755,6 +738,7 @@ neg_impl! { isize i8 i16 i32 i64 i128 f32 f64 }
)]
#[doc(alias = "+")]
#[doc(alias = "+=")]
+#[const_trait]
pub trait AddAssign<Rhs = Self> {
/// Performs the `+=` operation.
///
@@ -822,6 +806,7 @@ add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
)]
#[doc(alias = "-")]
#[doc(alias = "-=")]
+#[const_trait]
pub trait SubAssign<Rhs = Self> {
/// Performs the `-=` operation.
///
@@ -880,6 +865,7 @@ sub_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
)]
#[doc(alias = "*")]
#[doc(alias = "*=")]
+#[const_trait]
pub trait MulAssign<Rhs = Self> {
/// Performs the `*=` operation.
///
@@ -938,6 +924,7 @@ mul_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
)]
#[doc(alias = "/")]
#[doc(alias = "/=")]
+#[const_trait]
pub trait DivAssign<Rhs = Self> {
/// Performs the `/=` operation.
///
@@ -999,6 +986,7 @@ div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
)]
#[doc(alias = "%")]
#[doc(alias = "%=")]
+#[const_trait]
pub trait RemAssign<Rhs = Self> {
/// Performs the `%=` operation.
///
diff --git a/library/core/src/ops/bit.rs b/library/core/src/ops/bit.rs
index 7c664226f..327009801 100644
--- a/library/core/src/ops/bit.rs
+++ b/library/core/src/ops/bit.rs
@@ -31,6 +31,7 @@
#[lang = "not"]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(alias = "!")]
+#[const_trait]
pub trait Not {
/// The resulting type after applying the `!` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -143,6 +144,7 @@ impl const Not for ! {
message = "no implementation for `{Self} & {Rhs}`",
label = "no implementation for `{Self} & {Rhs}`"
)]
+#[const_trait]
pub trait BitAnd<Rhs = Self> {
/// The resulting type after applying the `&` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -244,6 +246,7 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
message = "no implementation for `{Self} | {Rhs}`",
label = "no implementation for `{Self} | {Rhs}`"
)]
+#[const_trait]
pub trait BitOr<Rhs = Self> {
/// The resulting type after applying the `|` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -345,6 +348,7 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
message = "no implementation for `{Self} ^ {Rhs}`",
label = "no implementation for `{Self} ^ {Rhs}`"
)]
+#[const_trait]
pub trait BitXor<Rhs = Self> {
/// The resulting type after applying the `^` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -445,6 +449,7 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
message = "no implementation for `{Self} << {Rhs}`",
label = "no implementation for `{Self} << {Rhs}`"
)]
+#[const_trait]
pub trait Shl<Rhs = Self> {
/// The resulting type after applying the `<<` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -564,6 +569,7 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 }
message = "no implementation for `{Self} >> {Rhs}`",
label = "no implementation for `{Self} >> {Rhs}`"
)]
+#[const_trait]
pub trait Shr<Rhs = Self> {
/// The resulting type after applying the `>>` operator.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -692,6 +698,7 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
message = "no implementation for `{Self} &= {Rhs}`",
label = "no implementation for `{Self} &= {Rhs}`"
)]
+#[const_trait]
pub trait BitAndAssign<Rhs = Self> {
/// Performs the `&=` operation.
///
@@ -764,6 +771,7 @@ bitand_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
message = "no implementation for `{Self} |= {Rhs}`",
label = "no implementation for `{Self} |= {Rhs}`"
)]
+#[const_trait]
pub trait BitOrAssign<Rhs = Self> {
/// Performs the `|=` operation.
///
@@ -836,6 +844,7 @@ bitor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
message = "no implementation for `{Self} ^= {Rhs}`",
label = "no implementation for `{Self} ^= {Rhs}`"
)]
+#[const_trait]
pub trait BitXorAssign<Rhs = Self> {
/// Performs the `^=` operation.
///
@@ -906,6 +915,7 @@ bitxor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
message = "no implementation for `{Self} <<= {Rhs}`",
label = "no implementation for `{Self} <<= {Rhs}`"
)]
+#[const_trait]
pub trait ShlAssign<Rhs = Self> {
/// Performs the `<<=` operation.
///
@@ -989,6 +999,7 @@ shl_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
message = "no implementation for `{Self} >>= {Rhs}`",
label = "no implementation for `{Self} >>= {Rhs}`"
)]
+#[const_trait]
pub trait ShrAssign<Rhs = Self> {
/// Performs the `>>=` operation.
///
diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs
index b1f5559dc..72ebe653c 100644
--- a/library/core/src/ops/control_flow.rs
+++ b/library/core/src/ops/control_flow.rs
@@ -95,7 +95,8 @@ pub enum ControlFlow<B, C = ()> {
}
#[unstable(feature = "try_trait_v2", issue = "84277")]
-impl<B, C> ops::Try for ControlFlow<B, C> {
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<B, C> const ops::Try for ControlFlow<B, C> {
type Output = C;
type Residual = ControlFlow<B, convert::Infallible>;
@@ -114,7 +115,8 @@ impl<B, C> ops::Try for ControlFlow<B, C> {
}
#[unstable(feature = "try_trait_v2", issue = "84277")]
-impl<B, C> ops::FromResidual for ControlFlow<B, C> {
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<B, C> const ops::FromResidual for ControlFlow<B, C> {
#[inline]
fn from_residual(residual: ControlFlow<B, convert::Infallible>) -> Self {
match residual {
@@ -124,7 +126,8 @@ impl<B, C> ops::FromResidual for ControlFlow<B, C> {
}
#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
-impl<B, C> ops::Residual<C> for ControlFlow<B, convert::Infallible> {
+#[rustc_const_unstable(feature = "const_try", issue = "74935")]
+impl<B, C> const ops::Residual<C> for ControlFlow<B, convert::Infallible> {
type TryType = ControlFlow<B, C>;
}
diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs
index d68932402..4f4c99c4a 100644
--- a/library/core/src/ops/deref.rs
+++ b/library/core/src/ops/deref.rs
@@ -61,6 +61,7 @@
#[doc(alias = "&*")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "Deref"]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait Deref {
/// The resulting type after dereferencing.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -169,6 +170,7 @@ impl<T: ?Sized> const Deref for &mut T {
#[lang = "deref_mut"]
#[doc(alias = "*")]
#[stable(feature = "rust1", since = "1.0.0")]
+#[const_trait]
pub trait DerefMut: Deref {
/// Mutably dereferences the value.
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/ops/drop.rs b/library/core/src/ops/drop.rs
index aa654aa55..a2c3d978c 100644
--- a/library/core/src/ops/drop.rs
+++ b/library/core/src/ops/drop.rs
@@ -134,6 +134,7 @@
/// these types cannot have destructors.
#[lang = "drop"]
#[stable(feature = "rust1", since = "1.0.0")]
+#[const_trait]
pub trait Drop {
/// Executes the destructor for this type.
///
@@ -156,7 +157,7 @@ pub trait Drop {
/// handled by the compiler, but when using unsafe code, can sometimes occur
/// unintentionally, particularly when using [`ptr::drop_in_place`].
///
- /// [E0040]: ../../error-index.html#E0040
+ /// [E0040]: ../../error_codes/E0040.html
/// [`panic!`]: crate::panic!
/// [`mem::drop`]: drop
/// [`ptr::drop_in_place`]: crate::ptr::drop_in_place
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index c5a194b7d..2e0a752c8 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -71,6 +71,7 @@
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait Fn<Args>: FnMut<Args> {
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
@@ -158,6 +159,7 @@ pub trait Fn<Args>: FnMut<Args> {
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait FnMut<Args>: FnOnce<Args> {
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
@@ -237,6 +239,7 @@ pub trait FnMut<Args>: FnOnce<Args> {
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait FnOnce<Args> {
/// The returned type after the call operator is used.
#[lang = "fn_once_output"]
@@ -250,9 +253,10 @@ pub trait FnOnce<Args> {
mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
- impl<A, F: ?Sized> Fn<A> for &F
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A, F: ?Sized> const Fn<A> for &F
where
- F: Fn<A>,
+ F: ~const Fn<A>,
{
extern "rust-call" fn call(&self, args: A) -> F::Output {
(**self).call(args)
@@ -260,9 +264,10 @@ mod impls {
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl<A, F: ?Sized> FnMut<A> for &F
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A, F: ?Sized> const FnMut<A> for &F
where
- F: Fn<A>,
+ F: ~const Fn<A>,
{
extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
(**self).call(args)
@@ -270,9 +275,10 @@ mod impls {
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl<A, F: ?Sized> FnOnce<A> for &F
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A, F: ?Sized> const FnOnce<A> for &F
where
- F: Fn<A>,
+ F: ~const Fn<A>,
{
type Output = F::Output;
@@ -282,9 +288,10 @@ mod impls {
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl<A, F: ?Sized> FnMut<A> for &mut F
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A, F: ?Sized> const FnMut<A> for &mut F
where
- F: FnMut<A>,
+ F: ~const FnMut<A>,
{
extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
(*self).call_mut(args)
@@ -292,9 +299,10 @@ mod impls {
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl<A, F: ?Sized> FnOnce<A> for &mut F
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A, F: ?Sized> const FnOnce<A> for &mut F
where
- F: FnMut<A>,
+ F: ~const FnMut<A>,
{
type Output = F::Output;
extern "rust-call" fn call_once(self, args: A) -> F::Output {
diff --git a/library/core/src/ops/generator.rs b/library/core/src/ops/generator.rs
index b651b7b23..fee4beb1e 100644
--- a/library/core/src/ops/generator.rs
+++ b/library/core/src/ops/generator.rs
@@ -83,7 +83,6 @@ pub trait Generator<R = ()> {
/// `return` statement or implicitly as the last expression of a generator
/// literal. For example futures would use this as `Result<T, E>` as it
/// represents a completed future.
- #[lang = "generator_return"]
type Return;
/// Resumes the execution of this generator.
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
index e2e569cb7..dd4e3ac1c 100644
--- a/library/core/src/ops/index.rs
+++ b/library/core/src/ops/index.rs
@@ -55,6 +55,7 @@
#[doc(alias = "]")]
#[doc(alias = "[")]
#[doc(alias = "[]")]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait Index<Idx: ?Sized> {
/// The returned type after indexing.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -163,6 +164,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind
#[doc(alias = "[")]
#[doc(alias = "]")]
#[doc(alias = "[]")]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
/// Performs the mutable indexing (`container[index]`) operation.
///
diff --git a/library/core/src/ops/index_range.rs b/library/core/src/ops/index_range.rs
new file mode 100644
index 000000000..3e06776d2
--- /dev/null
+++ b/library/core/src/ops/index_range.rs
@@ -0,0 +1,171 @@
+use crate::intrinsics::{assert_unsafe_precondition, unchecked_add, unchecked_sub};
+use crate::iter::{FusedIterator, TrustedLen};
+
+/// Like a `Range<usize>`, but with a safety invariant that `start <= end`.
+///
+/// This means that `end - start` cannot overflow, allowing some μoptimizations.
+///
+/// (Normal `Range` code needs to handle degenerate ranges like `10..0`,
+/// which takes extra checks compared to only handling the canonical form.)
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub(crate) struct IndexRange {
+ start: usize,
+ end: usize,
+}
+
+impl IndexRange {
+ /// # Safety
+ /// - `start <= end`
+ #[inline]
+ pub const unsafe fn new_unchecked(start: usize, end: usize) -> Self {
+ // SAFETY: comparisons on usize are pure
+ unsafe {
+ assert_unsafe_precondition!(
+ "IndexRange::new_unchecked requires `start <= end`",
+ (start: usize, end: usize) => start <= end
+ )
+ };
+ IndexRange { start, end }
+ }
+
+ #[inline]
+ pub const fn zero_to(end: usize) -> Self {
+ IndexRange { start: 0, end }
+ }
+
+ #[inline]
+ pub const fn start(&self) -> usize {
+ self.start
+ }
+
+ #[inline]
+ pub const fn end(&self) -> usize {
+ self.end
+ }
+
+ #[inline]
+ pub const fn len(&self) -> usize {
+ // SAFETY: By invariant, this cannot wrap
+ unsafe { unchecked_sub(self.end, self.start) }
+ }
+
+ /// # Safety
+ /// - Can only be called when `start < end`, aka when `len > 0`.
+ #[inline]
+ unsafe fn next_unchecked(&mut self) -> usize {
+ debug_assert!(self.start < self.end);
+
+ let value = self.start;
+ // SAFETY: The range isn't empty, so this cannot overflow
+ self.start = unsafe { unchecked_add(value, 1) };
+ value
+ }
+
+ /// # Safety
+ /// - Can only be called when `start < end`, aka when `len > 0`.
+ #[inline]
+ unsafe fn next_back_unchecked(&mut self) -> usize {
+ debug_assert!(self.start < self.end);
+
+ // SAFETY: The range isn't empty, so this cannot overflow
+ let value = unsafe { unchecked_sub(self.end, 1) };
+ self.end = value;
+ value
+ }
+
+ /// Removes the first `n` items from this range, returning them as an `IndexRange`.
+ /// If there are fewer than `n`, then the whole range is returned and
+ /// `self` is left empty.
+ ///
+ /// This is designed to help implement `Iterator::advance_by`.
+ #[inline]
+ pub fn take_prefix(&mut self, n: usize) -> Self {
+ let mid = if n <= self.len() {
+ // SAFETY: We just checked that this will be between start and end,
+ // and thus the addition cannot overflow.
+ unsafe { unchecked_add(self.start, n) }
+ } else {
+ self.end
+ };
+ let prefix = Self { start: self.start, end: mid };
+ self.start = mid;
+ prefix
+ }
+
+ /// Removes the last `n` items from this range, returning them as an `IndexRange`.
+ /// If there are fewer than `n`, then the whole range is returned and
+ /// `self` is left empty.
+ ///
+ /// This is designed to help implement `Iterator::advance_back_by`.
+ #[inline]
+ pub fn take_suffix(&mut self, n: usize) -> Self {
+ let mid = if n <= self.len() {
+ // SAFETY: We just checked that this will be between start and end,
+ // and thus the addition cannot overflow.
+ unsafe { unchecked_sub(self.end, n) }
+ } else {
+ self.start
+ };
+ let suffix = Self { start: mid, end: self.end };
+ self.end = mid;
+ suffix
+ }
+}
+
+impl Iterator for IndexRange {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ if self.len() > 0 {
+ // SAFETY: We just checked that the range is non-empty
+ unsafe { Some(self.next_unchecked()) }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let original_len = self.len();
+ self.take_prefix(n);
+ if n > original_len { Err(original_len) } else { Ok(()) }
+ }
+}
+
+impl DoubleEndedIterator for IndexRange {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ if self.len() > 0 {
+ // SAFETY: We just checked that the range is non-empty
+ unsafe { Some(self.next_back_unchecked()) }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let original_len = self.len();
+ self.take_suffix(n);
+ if n > original_len { Err(original_len) } else { Ok(()) }
+ }
+}
+
+impl ExactSizeIterator for IndexRange {
+ #[inline]
+ fn len(&self) -> usize {
+ self.len()
+ }
+}
+
+// SAFETY: Because we only deal in `usize`, our `len` is always perfect.
+unsafe impl TrustedLen for IndexRange {}
+
+impl FusedIterator for IndexRange {}
diff --git a/library/core/src/ops/mod.rs b/library/core/src/ops/mod.rs
index 31c1a1d09..a5e5b13b3 100644
--- a/library/core/src/ops/mod.rs
+++ b/library/core/src/ops/mod.rs
@@ -146,6 +146,7 @@ mod drop;
mod function;
mod generator;
mod index;
+mod index_range;
mod range;
mod try_trait;
mod unsize;
@@ -178,6 +179,8 @@ pub use self::index::{Index, IndexMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::range::{Range, RangeFrom, RangeFull, RangeTo};
+pub(crate) use self::index_range::IndexRange;
+
#[stable(feature = "inclusive_range", since = "1.26.0")]
pub use self::range::{Bound, RangeBounds, RangeInclusive, RangeToInclusive};
diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs
index a3b148473..d29ae3561 100644
--- a/library/core/src/ops/range.rs
+++ b/library/core/src/ops/range.rs
@@ -677,7 +677,7 @@ pub enum Bound<T> {
impl<T> Bound<T> {
/// Converts from `&Bound<T>` to `Bound<&T>`.
#[inline]
- #[unstable(feature = "bound_as_ref", issue = "80996")]
+ #[stable(feature = "bound_as_ref_shared", since = "1.65.0")]
pub fn as_ref(&self) -> Bound<&T> {
match *self {
Included(ref x) => Included(x),
diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs
index 02f7f62bf..84a690468 100644
--- a/library/core/src/ops/try_trait.rs
+++ b/library/core/src/ops/try_trait.rs
@@ -128,7 +128,8 @@ use crate::ops::ControlFlow;
)]
#[doc(alias = "?")]
#[lang = "Try"]
-pub trait Try: FromResidual {
+#[const_trait]
+pub trait Try: ~const FromResidual {
/// The type of the value produced by `?` when *not* short-circuiting.
#[unstable(feature = "try_trait_v2", issue = "84277")]
type Output;
@@ -232,7 +233,7 @@ pub trait Try: FromResidual {
message = "the `?` operator can only be used on `Result`s, not `Option`s, \
in {ItemContext} that returns `Result`",
label = "use `.ok_or(...)?` to provide an error compatible with `{Self}`",
- enclosing_scope = "this function returns a `Result`"
+ parent_label = "this function returns a `Result`"
),
on(
all(
@@ -245,7 +246,7 @@ pub trait Try: FromResidual {
message = "the `?` operator can only be used on `Result`s \
in {ItemContext} that returns `Result`",
label = "this `?` produces `{R}`, which is incompatible with `{Self}`",
- enclosing_scope = "this function returns a `Result`"
+ parent_label = "this function returns a `Result`"
),
on(
all(
@@ -256,7 +257,7 @@ pub trait Try: FromResidual {
message = "the `?` operator can only be used on `Option`s, not `Result`s, \
in {ItemContext} that returns `Option`",
label = "use `.ok()?` if you want to discard the `{R}` error information",
- enclosing_scope = "this function returns an `Option`"
+ parent_label = "this function returns an `Option`"
),
on(
all(
@@ -268,7 +269,7 @@ pub trait Try: FromResidual {
message = "the `?` operator can only be used on `Option`s \
in {ItemContext} that returns `Option`",
label = "this `?` produces `{R}`, which is incompatible with `{Self}`",
- enclosing_scope = "this function returns an `Option`"
+ parent_label = "this function returns an `Option`"
),
on(
all(
@@ -279,7 +280,7 @@ pub trait Try: FromResidual {
message = "the `?` operator in {ItemContext} that returns `ControlFlow<B, _>` \
can only be used on other `ControlFlow<B, _>`s (with the same Break type)",
label = "this `?` produces `{R}`, which is incompatible with `{Self}`",
- enclosing_scope = "this function returns a `ControlFlow`",
+ parent_label = "this function returns a `ControlFlow`",
note = "unlike `Result`, there's no `From`-conversion performed for `ControlFlow`"
),
on(
@@ -291,7 +292,7 @@ pub trait Try: FromResidual {
message = "the `?` operator can only be used on `ControlFlow`s \
in {ItemContext} that returns `ControlFlow`",
label = "this `?` produces `{R}`, which is incompatible with `{Self}`",
- enclosing_scope = "this function returns a `ControlFlow`",
+ parent_label = "this function returns a `ControlFlow`",
),
on(
all(from_desugaring = "QuestionMark"),
@@ -299,11 +300,12 @@ pub trait Try: FromResidual {
that returns `Result` or `Option` \
(or another type that implements `{FromResidual}`)",
label = "cannot use the `?` operator in {ItemContext} that returns `{Self}`",
- enclosing_scope = "this function should return `Result` or `Option` to accept `?`"
+ parent_label = "this function should return `Result` or `Option` to accept `?`"
),
)]
#[rustc_diagnostic_item = "FromResidual"]
#[unstable(feature = "try_trait_v2", issue = "84277")]
+#[const_trait]
pub trait FromResidual<R = <Self as Try>::Residual> {
/// Constructs the type from a compatible `Residual` type.
///
@@ -356,10 +358,11 @@ where
/// and in the other direction,
/// `<Result<Infallible, E> as Residual<T>>::TryType = Result<T, E>`.
#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
+#[const_trait]
pub trait Residual<O> {
/// The "return" type of this meta-function.
#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
- type TryType: Try<Output = O, Residual = Self>;
+ type TryType: ~const Try<Output = O, Residual = Self>;
}
#[unstable(feature = "pub_crate_should_not_need_unstable_attr", issue = "none")]
@@ -376,16 +379,19 @@ pub(crate) type ChangeOutputType<T, V> = <<T as Try>::Residual as Residual<V>>::
pub(crate) struct NeverShortCircuit<T>(pub T);
impl<T> NeverShortCircuit<T> {
- /// Wrap a binary `FnMut` to return its result wrapped in a `NeverShortCircuit`.
+ /// Implementation for building `ConstFnMutClosure` for wrapping the output of a ~const FnMut in a `NeverShortCircuit`.
#[inline]
- pub fn wrap_mut_2<A, B>(mut f: impl FnMut(A, B) -> T) -> impl FnMut(A, B) -> Self {
- move |a, b| NeverShortCircuit(f(a, b))
+ pub const fn wrap_mut_2_imp<A, B, F: ~const FnMut(A, B) -> T>(
+ f: &mut F,
+ (a, b): (A, B),
+ ) -> NeverShortCircuit<T> {
+ NeverShortCircuit(f(a, b))
}
}
pub(crate) enum NeverShortCircuitResidual {}
-impl<T> Try for NeverShortCircuit<T> {
+impl<T> const Try for NeverShortCircuit<T> {
type Output = T;
type Residual = NeverShortCircuitResidual;
@@ -400,14 +406,14 @@ impl<T> Try for NeverShortCircuit<T> {
}
}
-impl<T> FromResidual for NeverShortCircuit<T> {
+impl<T> const FromResidual for NeverShortCircuit<T> {
#[inline]
fn from_residual(never: NeverShortCircuitResidual) -> Self {
match never {}
}
}
-impl<T> Residual<T> for NeverShortCircuitResidual {
+impl<T> const Residual<T> for NeverShortCircuitResidual {
type TryType = NeverShortCircuit<T>;
}
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index bca73cb77..f284b4359 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -559,22 +559,25 @@ impl<T> Option<T> {
/// # Examples
///
/// ```
- /// #![feature(is_some_with)]
+ /// #![feature(is_some_and)]
///
/// let x: Option<u32> = Some(2);
- /// assert_eq!(x.is_some_and(|&x| x > 1), true);
+ /// assert_eq!(x.is_some_and(|x| x > 1), true);
///
/// let x: Option<u32> = Some(0);
- /// assert_eq!(x.is_some_and(|&x| x > 1), false);
+ /// assert_eq!(x.is_some_and(|x| x > 1), false);
///
/// let x: Option<u32> = None;
- /// assert_eq!(x.is_some_and(|&x| x > 1), false);
+ /// assert_eq!(x.is_some_and(|x| x > 1), false);
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "is_some_with", issue = "93050")]
- pub fn is_some_and(&self, f: impl FnOnce(&T) -> bool) -> bool {
- matches!(self, Some(x) if f(x))
+ #[unstable(feature = "is_some_and", issue = "93050")]
+ pub fn is_some_and(self, f: impl FnOnce(T) -> bool) -> bool {
+ match self {
+ None => false,
+ Some(x) => f(x),
+ }
}
/// Returns `true` if the option is a [`None`] value.
@@ -834,19 +837,12 @@ impl<T> Option<T> {
///
/// # Examples
///
- /// Converts a string to an integer, turning poorly-formed strings
- /// into 0 (the default value for integers). [`parse`] converts
- /// a string to any other type that implements [`FromStr`], returning
- /// [`None`] on error.
- ///
/// ```
- /// let good_year_from_input = "1909";
- /// let bad_year_from_input = "190blarg";
- /// let good_year = good_year_from_input.parse().ok().unwrap_or_default();
- /// let bad_year = bad_year_from_input.parse().ok().unwrap_or_default();
+ /// let x: Option<u32> = None;
+ /// let y: Option<u32> = Some(12);
///
- /// assert_eq!(1909, good_year);
- /// assert_eq!(0, bad_year);
+ /// assert_eq!(x.unwrap_or_default(), 0);
+ /// assert_eq!(y.unwrap_or_default(), 12);
/// ```
///
/// [default value]: Default::default
@@ -1189,6 +1185,12 @@ impl<T> Option<T> {
/// Returns [`None`] if the option is [`None`], otherwise returns `optb`.
///
+ /// Arguments passed to `and` are eagerly evaluated; if you are passing the
+ /// result of a function call, it is recommended to use [`and_then`], which is
+ /// lazily evaluated.
+ ///
+ /// [`and_then`]: Option::and_then
+ ///
/// # Examples
///
/// ```
@@ -1711,8 +1713,6 @@ impl<T, U> Option<(T, U)> {
/// # Examples
///
/// ```
- /// #![feature(unzip_option)]
- ///
/// let x = Some((1, "hi"));
/// let y = None::<(u8, u32)>;
///
@@ -1720,8 +1720,13 @@ impl<T, U> Option<(T, U)> {
/// assert_eq!(y.unzip(), (None, None));
/// ```
#[inline]
- #[unstable(feature = "unzip_option", issue = "87800", reason = "recently added")]
- pub const fn unzip(self) -> (Option<T>, Option<U>) {
+ #[stable(feature = "unzip_option", since = "1.66.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn unzip(self) -> (Option<T>, Option<U>)
+ where
+ T: ~const Destruct,
+ U: ~const Destruct,
+ {
match self {
Some((a, b)) => (Some(a), Some(b)),
None => (None, None),
@@ -2315,7 +2320,8 @@ impl<T> ops::FromResidual<ops::Yeet<()>> for Option<T> {
}
#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
-impl<T> ops::Residual<T> for Option<convert::Infallible> {
+#[rustc_const_unstable(feature = "const_try", issue = "74935")]
+impl<T> const ops::Residual<T> for Option<convert::Infallible> {
type TryType = Option<T>;
}
diff --git a/library/core/src/panic/location.rs b/library/core/src/panic/location.rs
index 8eefd9ff2..6dcf23dde 100644
--- a/library/core/src/panic/location.rs
+++ b/library/core/src/panic/location.rs
@@ -123,8 +123,9 @@ impl<'a> Location<'a> {
/// ```
#[must_use]
#[stable(feature = "panic_hooks", since = "1.10.0")]
+ #[rustc_const_unstable(feature = "const_location_fields", issue = "102911")]
#[inline]
- pub fn file(&self) -> &str {
+ pub const fn file(&self) -> &str {
self.file
}
@@ -147,8 +148,9 @@ impl<'a> Location<'a> {
/// ```
#[must_use]
#[stable(feature = "panic_hooks", since = "1.10.0")]
+ #[rustc_const_unstable(feature = "const_location_fields", issue = "102911")]
#[inline]
- pub fn line(&self) -> u32 {
+ pub const fn line(&self) -> u32 {
self.line
}
@@ -171,8 +173,9 @@ impl<'a> Location<'a> {
/// ```
#[must_use]
#[stable(feature = "panic_col", since = "1.25.0")]
+ #[rustc_const_unstable(feature = "const_location_fields", issue = "102911")]
#[inline]
- pub fn column(&self) -> u32 {
+ pub const fn column(&self) -> u32 {
self.col
}
}
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index 7a575a88e..a9de7c94e 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -29,6 +29,73 @@
use crate::fmt;
use crate::panic::{Location, PanicInfo};
+// First we define the two main entry points that all panics go through.
+// In the end both are just convenience wrappers around `panic_impl`.
+
+/// The entry point for panicking with a formatted message.
+///
+/// This is designed to reduce the amount of code required at the call
+/// site as much as possible (so that `panic!()` has as low an impact
+/// on (e.g.) the inlining of other functions as possible), by moving
+/// the actual formatting into this shared place.
+#[cold]
+// If panic_immediate_abort, inline the abort call,
+// otherwise avoid inlining because of it is cold path.
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[track_caller]
+#[lang = "panic_fmt"] // needed for const-evaluated panics
+#[rustc_do_not_const_check] // hooked by const-eval
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
+ // that gets resolved to the `#[panic_handler]` function.
+ extern "Rust" {
+ #[lang = "panic_impl"]
+ fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ }
+
+ let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), true);
+
+ // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
+ unsafe { panic_impl(&pi) }
+}
+
+/// Like panic_fmt, but without unwinding and track_caller to reduce the impact on codesize.
+/// Also just works on `str`, as a `fmt::Arguments` needs more space to be passed.
+#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cfg_attr(not(bootstrap), rustc_nounwind)]
+#[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+pub fn panic_str_nounwind(msg: &'static str) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
+ // that gets resolved to the `#[panic_handler]` function.
+ extern "Rust" {
+ #[lang = "panic_impl"]
+ fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ }
+
+ // PanicInfo with the `can_unwind` flag set to false forces an abort.
+ let pieces = [msg];
+ let fmt = fmt::Arguments::new_v1(&pieces, &[]);
+ let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), false);
+
+ // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
+ unsafe { panic_impl(&pi) }
+}
+
+// Next we define a bunch of higher-level wrappers that all bottom out in the two core functions
+// above.
+
/// The underlying implementation of libcore's `panic!` macro when no formatting is used.
#[cold]
// never inline unless panic_immediate_abort to avoid code
@@ -84,62 +151,17 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
panic!("index out of bounds: the len is {len} but the index is {index}")
}
-// This function is called directly by the codegen backend, and must not have
-// any extra arguments (including those synthesized by track_caller).
+/// Panic because we cannot unwind out of a function.
+///
+/// This function is called directly by the codegen backend, and must not have
+/// any extra arguments (including those synthesized by track_caller).
#[cold]
#[inline(never)]
#[lang = "panic_no_unwind"] // needed by codegen for panic in nounwind function
+#[cfg_attr(not(bootstrap), rustc_nounwind)]
+#[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn panic_no_unwind() -> ! {
- if cfg!(feature = "panic_immediate_abort") {
- super::intrinsics::abort()
- }
-
- // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
- // that gets resolved to the `#[panic_handler]` function.
- extern "Rust" {
- #[lang = "panic_impl"]
- fn panic_impl(pi: &PanicInfo<'_>) -> !;
- }
-
- // PanicInfo with the `can_unwind` flag set to false forces an abort.
- let fmt = format_args!("panic in a function that cannot unwind");
- let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), false);
-
- // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
- unsafe { panic_impl(&pi) }
-}
-
-/// The entry point for panicking with a formatted message.
-///
-/// This is designed to reduce the amount of code required at the call
-/// site as much as possible (so that `panic!()` has as low an impact
-/// on (e.g.) the inlining of other functions as possible), by moving
-/// the actual formatting into this shared place.
-#[cold]
-// If panic_immediate_abort, inline the abort call,
-// otherwise avoid inlining because of it is cold path.
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
-#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[track_caller]
-#[lang = "panic_fmt"] // needed for const-evaluated panics
-#[rustc_do_not_const_check] // hooked by const-eval
-#[rustc_const_unstable(feature = "core_panic", issue = "none")]
-pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
- if cfg!(feature = "panic_immediate_abort") {
- super::intrinsics::abort()
- }
-
- // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
- // that gets resolved to the `#[panic_handler]` function.
- extern "Rust" {
- #[lang = "panic_impl"]
- fn panic_impl(pi: &PanicInfo<'_>) -> !;
- }
-
- let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), true);
-
- // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
- unsafe { panic_impl(&pi) }
+ panic_str_nounwind("panic in a function that cannot unwind")
}
/// This function is used instead of panic_fmt in const eval.
@@ -190,11 +212,11 @@ pub fn assert_matches_failed<T: fmt::Debug + ?Sized>(
right: &str,
args: Option<fmt::Arguments<'_>>,
) -> ! {
- // Use the Display implementation to display the pattern.
+ // The pattern is a string so it can be displayed directly.
struct Pattern<'a>(&'a str);
impl fmt::Debug for Pattern<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(self.0, f)
+ f.write_str(self.0)
}
}
assert_failed_inner(AssertKind::Match, &left, &Pattern(right), args);
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index b8e546164..331714a99 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -611,7 +611,19 @@ mod prim_pointer {}
///
/// Arrays coerce to [slices (`[T]`)][slice], so a slice method may be called on
/// an array. Indeed, this provides most of the API for working with arrays.
-/// Slices have a dynamic size and do not coerce to arrays.
+///
+/// Slices have a dynamic size and do not coerce to arrays. Instead, use
+/// `slice.try_into().unwrap()` or `<ArrayType>::try_from(slice).unwrap()`.
+///
+/// Array's `try_from(slice)` implementations (and the corresponding `slice.try_into()`
+/// array implementations) succeed if the input slice length is the same as the result
+/// array length. They optimize especially well when the optimizer can easily determine
+/// the slice length, e.g. `<[u8; 4]>::try_from(&slice[4..8]).unwrap()`. Array implements
+/// [TryFrom](crate::convert::TryFrom) returning:
+///
+/// - `[T; N]` copies from the slice's elements
+/// - `&[T; N]` references the original slice's elements
+/// - `&mut [T; N]` references the original slice's elements
///
/// You can move elements out of an array with a [slice pattern]. If you want
/// one element, see [`mem::replace`].
@@ -640,6 +652,15 @@ mod prim_pointer {}
/// for x in &array { }
/// ```
///
+/// You can use `<ArrayType>::try_from(slice)` or `slice.try_into()` to get an array from
+/// a slice:
+///
+/// ```
+/// let bytes: [u8; 3] = [1, 0, 2];
+/// assert_eq!(1, u16::from_le_bytes(<[u8; 2]>::try_from(&bytes[0..2]).unwrap()));
+/// assert_eq!(512, u16::from_le_bytes(bytes[1..3].try_into().unwrap()));
+/// ```
+///
/// You can use a [slice pattern] to move elements out of an array:
///
/// ```
@@ -801,11 +822,53 @@ mod prim_array {}
/// assert_eq!(2 * pointer_size, std::mem::size_of::<Box<[u8]>>());
/// assert_eq!(2 * pointer_size, std::mem::size_of::<Rc<[u8]>>());
/// ```
+///
+/// ## Trait Implementations
+///
+/// Some traits are implemented for slices if the element type implements
+/// that trait. This includes [`Eq`], [`Hash`] and [`Ord`].
+///
+/// ## Iteration
+///
+/// The slices implement `IntoIterator`. The iterator yields references to the
+/// slice elements.
+///
+/// ```
+/// let numbers: &[i32] = &[0, 1, 2];
+/// for n in numbers {
+/// println!("{n} is a number!");
+/// }
+/// ```
+///
+/// The mutable slice yields mutable references to the elements:
+///
+/// ```
+/// let mut scores: &mut [i32] = &mut [7, 8, 9];
+/// for score in scores {
+/// *score += 1;
+/// }
+/// ```
+///
+/// This iterator yields mutable references to the slice's elements, so while
+/// the element type of the slice is `i32`, the element type of the iterator is
+/// `&mut i32`.
+///
+/// * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
+/// iterators.
+/// * Further methods that return iterators are [`.split`], [`.splitn`],
+/// [`.chunks`], [`.windows`] and more.
+///
+/// [`Hash`]: core::hash::Hash
+/// [`.iter`]: slice::iter
+/// [`.iter_mut`]: slice::iter_mut
+/// [`.split`]: slice::split
+/// [`.splitn`]: slice::splitn
+/// [`.chunks`]: slice::chunks
+/// [`.windows`]: slice::windows
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_slice {}
#[doc(primitive = "str")]
-//
/// String slices.
///
/// *[See also the `std::str` module](crate::str).*
@@ -816,19 +879,22 @@ mod prim_slice {}
///
/// String slices are always valid UTF-8.
///
-/// # Examples
+/// # Basic Usage
///
/// String literals are string slices:
///
/// ```
-/// let hello = "Hello, world!";
-///
-/// // with an explicit type annotation
-/// let hello: &'static str = "Hello, world!";
+/// let hello_world = "Hello, World!";
/// ```
///
-/// They are `'static` because they're stored directly in the final binary, and
-/// so will be valid for the `'static` duration.
+/// Here we have declared a string slice initialized with a string literal.
+/// String literals have a static lifetime, which means the string `hello_world`
+/// is guaranteed to be valid for the duration of the entire program.
+/// We can explicitly specify `hello_world`'s lifetime as well:
+///
+/// ```
+/// let hello_world: &'static str = "Hello, world!";
+/// ```
///
/// # Representation
///
@@ -996,7 +1062,7 @@ impl<T> (T,) {}
// Fake impl that's only really used for docs.
#[cfg(doc)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+#[doc(fake_variadic)]
/// This trait is implemented on arbitrary-length tuples.
impl<T: Clone> Clone for (T,) {
fn clone(&self) -> Self {
@@ -1007,7 +1073,7 @@ impl<T: Clone> Clone for (T,) {
// Fake impl that's only really used for docs.
#[cfg(doc)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+#[doc(fake_variadic)]
/// This trait is implemented on arbitrary-length tuples.
impl<T: Copy> Copy for (T,) {
// empty
@@ -1178,7 +1244,7 @@ mod prim_usize {}
#[doc(alias = "&")]
#[doc(alias = "&mut")]
//
-/// References, both shared and mutable.
+/// References, `&T` and `&mut T`.
///
/// A reference represents a borrow of some owned value. You can get one by using the `&` or `&mut`
/// operators on a value, or by using a [`ref`](../std/keyword.ref.html) or
@@ -1484,13 +1550,12 @@ mod prim_fn {}
// Required to make auto trait impls render.
// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
#[doc(hidden)]
-#[cfg(not(bootstrap))]
impl<Ret, T> fn(T) -> Ret {}
// Fake impl that's only really used for docs.
#[cfg(doc)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+#[doc(fake_variadic)]
/// This trait is implemented on function pointers with any number of arguments.
impl<Ret, T> Clone for fn(T) -> Ret {
fn clone(&self) -> Self {
@@ -1501,7 +1566,7 @@ impl<Ret, T> Clone for fn(T) -> Ret {
// Fake impl that's only really used for docs.
#[cfg(doc)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+#[doc(fake_variadic)]
/// This trait is implemented on function pointers with any number of arguments.
impl<Ret, T> Copy for fn(T) -> Ret {
// empty
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
new file mode 100644
index 000000000..1390e09dd
--- /dev/null
+++ b/library/core/src/ptr/alignment.rs
@@ -0,0 +1,326 @@
+use crate::convert::{TryFrom, TryInto};
+use crate::intrinsics::assert_unsafe_precondition;
+use crate::num::NonZeroUsize;
+use crate::{cmp, fmt, hash, mem, num};
+
+/// A type storing a `usize` which is a power of two, and thus
+/// represents a possible alignment in the rust abstract machine.
+///
+/// Note that particularly large alignments, while representable in this type,
+/// are likely not to be supported by actual allocators and linkers.
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[repr(transparent)]
+pub struct Alignment(AlignmentEnum);
+
+// Alignment is `repr(usize)`, but via extra steps.
+const _: () = assert!(mem::size_of::<Alignment>() == mem::size_of::<usize>());
+const _: () = assert!(mem::align_of::<Alignment>() == mem::align_of::<usize>());
+
+fn _alignment_can_be_structurally_matched(a: Alignment) -> bool {
+ matches!(a, Alignment::MIN)
+}
+
+impl Alignment {
+ /// The smallest possible alignment, 1.
+ ///
+ /// All addresses are always aligned at least this much.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_alignment_type)]
+ /// use std::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::MIN.as_usize(), 1);
+ /// ```
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ pub const MIN: Self = Self(AlignmentEnum::_Align1Shl0);
+
+ /// Returns the alignment for a type.
+ ///
+ /// This provides the same numerical value as [`mem::align_of`],
+ /// but in an `Alignment` instead of a `usize.
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn of<T>() -> Self {
+ // SAFETY: rustc ensures that type alignment is always a power of two.
+ unsafe { Alignment::new_unchecked(mem::align_of::<T>()) }
+ }
+
+ /// Creates an `Alignment` from a `usize`, or returns `None` if it's
+ /// not a power of two.
+ ///
+ /// Note that `0` is not a power of two, nor a valid alignment.
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn new(align: usize) -> Option<Self> {
+ if align.is_power_of_two() {
+ // SAFETY: Just checked it only has one bit set
+ Some(unsafe { Self::new_unchecked(align) })
+ } else {
+ None
+ }
+ }
+
+ /// Creates an `Alignment` from a power-of-two `usize`.
+ ///
+ /// # Safety
+ ///
+ /// `align` must be a power of two.
+ ///
+ /// Equivalently, it must be `1 << exp` for some `exp` in `0..usize::BITS`.
+ /// It must *not* be zero.
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const unsafe fn new_unchecked(align: usize) -> Self {
+ // SAFETY: Precondition passed to the caller.
+ unsafe {
+ assert_unsafe_precondition!(
+ "Alignment::new_unchecked requires a power of two",
+ (align: usize) => align.is_power_of_two()
+ )
+ };
+
+ // SAFETY: By precondition, this must be a power of two, and
+ // our variants encompass all possible powers of two.
+ unsafe { mem::transmute::<usize, Alignment>(align) }
+ }
+
+ /// Returns the alignment as a [`usize`]
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn as_usize(self) -> usize {
+ self.0 as usize
+ }
+
+ /// Returns the alignment as a [`NonZeroUsize`]
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn as_nonzero(self) -> NonZeroUsize {
+ // SAFETY: All the discriminants are non-zero.
+ unsafe { NonZeroUsize::new_unchecked(self.as_usize()) }
+ }
+
+ /// Returns the base-2 logarithm of the alignment.
+ ///
+ /// This is always exact, as `self` represents a power of two.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_alignment_type)]
+ /// use std::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::of::<u8>().log2(), 0);
+ /// assert_eq!(Alignment::new(1024).unwrap().log2(), 10);
+ /// ```
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub fn log2(self) -> u32 {
+ self.as_nonzero().trailing_zeros()
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl fmt::Debug for Alignment {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?} (1 << {:?})", self.as_nonzero(), self.log2())
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl TryFrom<NonZeroUsize> for Alignment {
+ type Error = num::TryFromIntError;
+
+ #[inline]
+ fn try_from(align: NonZeroUsize) -> Result<Alignment, Self::Error> {
+ align.get().try_into()
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl TryFrom<usize> for Alignment {
+ type Error = num::TryFromIntError;
+
+ #[inline]
+ fn try_from(align: usize) -> Result<Alignment, Self::Error> {
+ Self::new(align).ok_or(num::TryFromIntError(()))
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl From<Alignment> for NonZeroUsize {
+ #[inline]
+ fn from(align: Alignment) -> NonZeroUsize {
+ align.as_nonzero()
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl From<Alignment> for usize {
+ #[inline]
+ fn from(align: Alignment) -> usize {
+ align.as_usize()
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl cmp::Ord for Alignment {
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.as_nonzero().cmp(&other.as_nonzero())
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl cmp::PartialOrd for Alignment {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl hash::Hash for Alignment {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.as_nonzero().hash(state)
+ }
+}
+
+#[cfg(target_pointer_width = "16")]
+type AlignmentEnum = AlignmentEnum16;
+#[cfg(target_pointer_width = "32")]
+type AlignmentEnum = AlignmentEnum32;
+#[cfg(target_pointer_width = "64")]
+type AlignmentEnum = AlignmentEnum64;
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[repr(u16)]
+enum AlignmentEnum16 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[repr(u32)]
+enum AlignmentEnum32 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+ _Align1Shl16 = 1 << 16,
+ _Align1Shl17 = 1 << 17,
+ _Align1Shl18 = 1 << 18,
+ _Align1Shl19 = 1 << 19,
+ _Align1Shl20 = 1 << 20,
+ _Align1Shl21 = 1 << 21,
+ _Align1Shl22 = 1 << 22,
+ _Align1Shl23 = 1 << 23,
+ _Align1Shl24 = 1 << 24,
+ _Align1Shl25 = 1 << 25,
+ _Align1Shl26 = 1 << 26,
+ _Align1Shl27 = 1 << 27,
+ _Align1Shl28 = 1 << 28,
+ _Align1Shl29 = 1 << 29,
+ _Align1Shl30 = 1 << 30,
+ _Align1Shl31 = 1 << 31,
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[repr(u64)]
+enum AlignmentEnum64 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+ _Align1Shl16 = 1 << 16,
+ _Align1Shl17 = 1 << 17,
+ _Align1Shl18 = 1 << 18,
+ _Align1Shl19 = 1 << 19,
+ _Align1Shl20 = 1 << 20,
+ _Align1Shl21 = 1 << 21,
+ _Align1Shl22 = 1 << 22,
+ _Align1Shl23 = 1 << 23,
+ _Align1Shl24 = 1 << 24,
+ _Align1Shl25 = 1 << 25,
+ _Align1Shl26 = 1 << 26,
+ _Align1Shl27 = 1 << 27,
+ _Align1Shl28 = 1 << 28,
+ _Align1Shl29 = 1 << 29,
+ _Align1Shl30 = 1 << 30,
+ _Align1Shl31 = 1 << 31,
+ _Align1Shl32 = 1 << 32,
+ _Align1Shl33 = 1 << 33,
+ _Align1Shl34 = 1 << 34,
+ _Align1Shl35 = 1 << 35,
+ _Align1Shl36 = 1 << 36,
+ _Align1Shl37 = 1 << 37,
+ _Align1Shl38 = 1 << 38,
+ _Align1Shl39 = 1 << 39,
+ _Align1Shl40 = 1 << 40,
+ _Align1Shl41 = 1 << 41,
+ _Align1Shl42 = 1 << 42,
+ _Align1Shl43 = 1 << 43,
+ _Align1Shl44 = 1 << 44,
+ _Align1Shl45 = 1 << 45,
+ _Align1Shl46 = 1 << 46,
+ _Align1Shl47 = 1 << 47,
+ _Align1Shl48 = 1 << 48,
+ _Align1Shl49 = 1 << 49,
+ _Align1Shl50 = 1 << 50,
+ _Align1Shl51 = 1 << 51,
+ _Align1Shl52 = 1 << 52,
+ _Align1Shl53 = 1 << 53,
+ _Align1Shl54 = 1 << 54,
+ _Align1Shl55 = 1 << 55,
+ _Align1Shl56 = 1 << 56,
+ _Align1Shl57 = 1 << 57,
+ _Align1Shl58 = 1 << 58,
+ _Align1Shl59 = 1 << 59,
+ _Align1Shl60 = 1 << 60,
+ _Align1Shl61 = 1 << 61,
+ _Align1Shl62 = 1 << 62,
+ _Align1Shl63 = 1 << 63,
+}
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index e0655d68d..5a083227b 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -36,7 +36,10 @@ impl<T: ?Sized> *const T {
pub const fn is_null(self) -> bool {
// Compare via a cast to a thin pointer, so fat pointers are only
// considering their "data" part for null-ness.
- (self as *const u8).guaranteed_eq(null())
+ match (self as *const u8).guaranteed_eq(null()) {
+ None => false,
+ Some(res) => res,
+ }
}
/// Casts to a pointer of another type.
@@ -95,8 +98,8 @@ impl<T: ?Sized> *const T {
///
/// This is a bit safer than `as` because it wouldn't silently change the type if the code is
/// refactored.
- #[unstable(feature = "ptr_const_cast", issue = "92675")]
- #[rustc_const_unstable(feature = "ptr_const_cast", issue = "92675")]
+ #[stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
pub const fn cast_mut(self) -> *mut T {
self as _
}
@@ -154,7 +157,7 @@ impl<T: ?Sized> *const T {
/// This is similar to `self as usize`, which semantically discards *provenance* and
/// *address-space* information. However, unlike `self as usize`, casting the returned address
/// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
- /// properly restore the lost information and obtain a dereferencable pointer, use
+ /// properly restore the lost information and obtain a dereferenceable pointer, use
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
@@ -249,7 +252,7 @@ impl<T: ?Sized> *const T {
let offset = dest_addr.wrapping_sub(self_addr);
// This is the canonical desugarring of this operation
- self.cast::<u8>().wrapping_offset(offset).cast::<T>()
+ self.wrapping_byte_offset(offset)
}
/// Creates a new pointer by mapping `self`'s address to a new one.
@@ -559,6 +562,20 @@ impl<T: ?Sized> *const T {
from_raw_parts::<T>(self.cast::<u8>().wrapping_offset(count).cast::<()>(), metadata(self))
}
+ /// Masks out bits of the pointer according to a mask.
+ ///
+ /// This is convenience for `ptr.map_addr(|a| a & mask)`.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "ptr_mask", issue = "98290")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ pub fn mask(self, mask: usize) -> *const T {
+ let this = intrinsics::ptr_mask(self.cast::<()>(), mask);
+ from_raw_parts::<T>(this, metadata(self))
+ }
+
/// Calculates the distance between two pointers. The returned value is in
/// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
///
@@ -641,7 +658,7 @@ impl<T: ?Sized> *const T {
/// }
/// ```
#[stable(feature = "ptr_offset_from", since = "1.47.0")]
- #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn offset_from(self, origin: *const T) -> isize
@@ -677,7 +694,7 @@ impl<T: ?Sized> *const T {
/// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
///
/// This computes the same value that [`offset_from`](#method.offset_from)
- /// would compute, but with the added precondition that that the offset is
+ /// would compute, but with the added precondition that the offset is
/// guaranteed to be non-negative. This method is equivalent to
/// `usize::from(self.offset_from(origin)).unwrap_unchecked()`,
/// but it provides slightly more information to the optimizer, which can
@@ -740,9 +757,15 @@ impl<T: ?Sized> *const T {
where
T: Sized,
{
+ let this = self;
// SAFETY: The comparison has no side-effects, and the intrinsic
// does this check internally in the CTFE implementation.
- unsafe { assert_unsafe_precondition!(self >= origin) };
+ unsafe {
+ assert_unsafe_precondition!(
+ "ptr::sub_ptr requires `this >= origin`",
+ [T](this: *const T, origin: *const T) => this >= origin
+ )
+ };
let pointee_size = mem::size_of::<T>();
assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
@@ -752,20 +775,16 @@ impl<T: ?Sized> *const T {
/// Returns whether two pointers are guaranteed to be equal.
///
- /// At runtime this function behaves like `self == other`.
+ /// At runtime this function behaves like `Some(self == other)`.
/// However, in some contexts (e.g., compile-time evaluation),
/// it is not always possible to determine equality of two pointers, so this function may
- /// spuriously return `false` for pointers that later actually turn out to be equal.
- /// But when it returns `true`, the pointers are guaranteed to be equal.
+ /// spuriously return `None` for pointers that later actually turn out to have its equality known.
+ /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
///
- /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer
- /// comparisons for which both functions return `false`.
- ///
- /// [`guaranteed_ne`]: #method.guaranteed_ne
- ///
- /// The return value may change depending on the compiler version and unsafe code must not
+ /// The return value may change from `Some` to `None` and vice versa depending on the compiler
+ /// version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
- /// for performance optimizations where spurious `false` return values by this function do not
+ /// for performance optimizations where spurious `None` return values by this function do not
/// affect the outcome, but just the performance.
/// The consequences of using this method to make runtime and compile-time code behave
/// differently have not been explored. This method should not be used to introduce such
@@ -774,29 +793,28 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[inline]
- pub const fn guaranteed_eq(self, other: *const T) -> bool
+ pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
where
T: Sized,
{
- intrinsics::ptr_guaranteed_eq(self, other)
+ match intrinsics::ptr_guaranteed_cmp(self as _, other as _) {
+ 2 => None,
+ other => Some(other == 1),
+ }
}
- /// Returns whether two pointers are guaranteed to be unequal.
+ /// Returns whether two pointers are guaranteed to be inequal.
///
- /// At runtime this function behaves like `self != other`.
+ /// At runtime this function behaves like `Some(self != other)`.
/// However, in some contexts (e.g., compile-time evaluation),
- /// it is not always possible to determine the inequality of two pointers, so this function may
- /// spuriously return `false` for pointers that later actually turn out to be unequal.
- /// But when it returns `true`, the pointers are guaranteed to be unequal.
- ///
- /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
- /// comparisons for which both functions return `false`.
- ///
- /// [`guaranteed_eq`]: #method.guaranteed_eq
+ /// it is not always possible to determine inequality of two pointers, so this function may
+ /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
+ /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
///
- /// The return value may change depending on the compiler version and unsafe code must not
+ /// The return value may change from `Some` to `None` and vice versa depending on the compiler
+ /// version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
- /// for performance optimizations where spurious `false` return values by this function do not
+ /// for performance optimizations where spurious `None` return values by this function do not
/// affect the outcome, but just the performance.
/// The consequences of using this method to make runtime and compile-time code behave
/// differently have not been explored. This method should not be used to introduce such
@@ -805,11 +823,14 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[inline]
- pub const fn guaranteed_ne(self, other: *const T) -> bool
+ pub const fn guaranteed_ne(self, other: *const T) -> Option<bool>
where
T: Sized,
{
- intrinsics::ptr_guaranteed_ne(self, other)
+ match self.guaranteed_eq(other) {
+ None => None,
+ Some(eq) => Some(!eq),
+ }
}
/// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
@@ -1267,20 +1288,21 @@ impl<T: ?Sized> *const T {
/// Accessing adjacent `u8` as `u16`
///
/// ```
- /// # fn foo(n: usize) {
- /// # use std::mem::align_of;
+ /// use std::mem::align_of;
+ ///
/// # unsafe {
- /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
- /// let ptr = x.as_ptr().add(n) as *const u8;
+ /// let x = [5_u8, 6, 7, 8, 9];
+ /// let ptr = x.as_ptr();
/// let offset = ptr.align_offset(align_of::<u16>());
- /// if offset < x.len() - n - 1 {
- /// let u16_ptr = ptr.add(offset) as *const u16;
- /// assert_ne!(*u16_ptr, 500);
+ ///
+ /// if offset < x.len() - 1 {
+ /// let u16_ptr = ptr.add(offset).cast::<u16>();
+ /// assert!(*u16_ptr == u16::from_ne_bytes([5, 6]) || *u16_ptr == u16::from_ne_bytes([6, 7]));
/// } else {
/// // while the pointer can be aligned via `offset`, it would point
/// // outside the allocation
/// }
- /// # } }
+ /// # }
/// ```
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
@@ -1336,11 +1358,8 @@ impl<T: ?Sized> *const T {
panic!("is_aligned_to: align is not a power-of-two");
}
- // SAFETY: `is_power_of_two()` will return `false` for zero.
- unsafe { core::intrinsics::assume(align != 0) };
-
// Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() % align == 0
+ self.cast::<u8>().addr() & align - 1 == 0
}
}
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index cd5edee04..caa10f181 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -135,16 +135,16 @@ pub const fn from_raw_parts_mut<T: ?Sized>(
}
#[repr(C)]
-pub(crate) union PtrRepr<T: ?Sized> {
- pub(crate) const_ptr: *const T,
- pub(crate) mut_ptr: *mut T,
- pub(crate) components: PtrComponents<T>,
+union PtrRepr<T: ?Sized> {
+ const_ptr: *const T,
+ mut_ptr: *mut T,
+ components: PtrComponents<T>,
}
#[repr(C)]
-pub(crate) struct PtrComponents<T: ?Sized> {
- pub(crate) data_address: *const (),
- pub(crate) metadata: <T as Pointee>::Metadata,
+struct PtrComponents<T: ?Sized> {
+ data_address: *const (),
+ metadata: <T as Pointee>::Metadata,
}
// Manual impl needed to avoid `T: Copy` bound.
@@ -180,7 +180,6 @@ pub struct DynMetadata<Dyn: ?Sized> {
phantom: crate::marker::PhantomData<Dyn>,
}
-#[cfg(not(bootstrap))]
extern "C" {
/// Opaque type for accessing vtables.
///
@@ -189,17 +188,6 @@ extern "C" {
type VTable;
}
-/// The common prefix of all vtables. It is followed by function pointers for trait methods.
-///
-/// Private implementation detail of `DynMetadata::size_of` etc.
-#[repr(C)]
-#[cfg(bootstrap)]
-struct VTable {
- drop_in_place: fn(*mut ()),
- size_of: usize,
- align_of: usize,
-}
-
impl<Dyn: ?Sized> DynMetadata<Dyn> {
/// Returns the size of the type associated with this vtable.
#[inline]
@@ -207,9 +195,6 @@ impl<Dyn: ?Sized> DynMetadata<Dyn> {
// Note that "size stored in vtable" is *not* the same as "result of size_of_val_raw".
// Consider a reference like `&(i32, dyn Send)`: the vtable will only store the size of the
// `Send` part!
- #[cfg(bootstrap)]
- return self.vtable_ptr.size_of;
- #[cfg(not(bootstrap))]
// SAFETY: DynMetadata always contains a valid vtable pointer
return unsafe {
crate::intrinsics::vtable_size(self.vtable_ptr as *const VTable as *const ())
@@ -219,9 +204,6 @@ impl<Dyn: ?Sized> DynMetadata<Dyn> {
/// Returns the alignment of the type associated with this vtable.
#[inline]
pub fn align_of(self) -> usize {
- #[cfg(bootstrap)]
- return self.vtable_ptr.align_of;
- #[cfg(not(bootstrap))]
// SAFETY: DynMetadata always contains a valid vtable pointer
return unsafe {
crate::intrinsics::vtable_align(self.vtable_ptr as *const VTable as *const ())
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 40e28e636..565c38d22 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -90,7 +90,7 @@
//! isn't *pointer*-sized but address-space/offset/allocation-sized (we'll probably continue
//! to conflate these notions). This would potentially make it possible to more efficiently
//! target platforms where pointers are larger than offsets, such as CHERI and maybe some
-//! segmented architecures.
+//! segmented architectures.
//!
//! ## Provenance
//!
@@ -172,7 +172,7 @@
//! a pointer to a usize is generally an operation which *only* extracts the address. It is
//! therefore *impossible* to construct a valid pointer from a usize because there is no way
//! to restore the address-space and provenance. In other words, pointer-integer-pointer
-//! roundtrips are not possible (in the sense that the resulting pointer is not dereferencable).
+//! roundtrips are not possible (in the sense that the resulting pointer is not dereferenceable).
//!
//! The key insight to making this model *at all* viable is the [`with_addr`][] method:
//!
@@ -272,7 +272,7 @@
//!
//! * Create an invalid pointer from just an address (see [`ptr::invalid`][]). This can
//! be used for sentinel values like `null` *or* to represent a tagged pointer that will
-//! never be dereferencable. In general, it is always sound for an integer to pretend
+//! never be dereferenceable. In general, it is always sound for an integer to pretend
//! to be a pointer "for fun" as long as you don't use operations on it which require
//! it to be valid (offset, read, write, etc).
//!
@@ -377,6 +377,10 @@ use crate::intrinsics::{
use crate::mem::{self, MaybeUninit};
+mod alignment;
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+pub use alignment::Alignment;
+
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(inline)]
pub use crate::intrinsics::copy_nonoverlapping;
@@ -390,7 +394,6 @@ pub use crate::intrinsics::copy;
pub use crate::intrinsics::write_bytes;
mod metadata;
-pub(crate) use metadata::PtrRepr;
#[unstable(feature = "ptr_metadata", issue = "81513")]
pub use metadata::{from_raw_parts, from_raw_parts_mut, metadata, DynMetadata, Pointee, Thin};
@@ -578,12 +581,21 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// Convert an address back to a pointer, picking up a previously 'exposed' provenance.
///
/// This is equivalent to `addr as *const T`. The provenance of the returned pointer is that of *any*
-/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize`
-/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be
-/// used, the program has undefined behavior. Note that there is no algorithm that decides which
-/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess
-/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined
-/// behavior, then that is the guess that will be taken.
+/// pointer that was previously exposed by passing it to [`expose_addr`][pointer::expose_addr],
+/// or a `ptr as usize` cast. In addition, memory which is outside the control of the Rust abstract
+/// machine (MMIO registers, for example) is always considered to be exposed, so long as this memory
+/// is disjoint from memory that will be used by the abstract machine such as the stack, heap,
+/// and statics.
+///
+/// If there is no 'exposed' provenance that justifies the way this pointer will be used,
+/// the program has undefined behavior. In particular, the aliasing rules still apply: pointers
+/// and references that have been invalidated due to aliasing accesses cannot be used any more,
+/// even if they have been exposed!
+///
+/// Note that there is no algorithm that decides which provenance will be used. You can think of this
+/// as "guessing" the right provenance, and the guess will be "maximally in your favor", in the sense
+/// that if there is any way to avoid undefined behavior (while upholding all aliasing requirements),
+/// then that is the guess that will be taken.
///
/// On platforms with multiple address spaces, it is your responsibility to ensure that the
/// address makes sense in the address space that this pointer will be used with.
@@ -603,6 +615,7 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn from_exposed_addr<T>(addr: usize) -> *const T
where
T: Sized,
@@ -639,6 +652,7 @@ where
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
where
T: Sized,
@@ -885,6 +899,9 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
// valid for writes and properly aligned.
unsafe {
assert_unsafe_precondition!(
+ "ptr::swap_nonoverlapping requires that both pointer arguments are aligned and non-null \
+ and the specified memory ranges do not overlap",
+ [T](x: *mut T, y: *mut T, count: usize) =>
is_aligned_and_not_null(x)
&& is_aligned_and_not_null(y)
&& is_nonoverlapping(x, y, count)
@@ -981,7 +998,10 @@ pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
// and cannot overlap `src` since `dst` must point to a distinct
// allocated object.
unsafe {
- assert_unsafe_precondition!(is_aligned_and_not_null(dst));
+ assert_unsafe_precondition!(
+ "ptr::replace requires that the pointer argument is aligned and non-null",
+ [T](dst: *mut T) => is_aligned_and_not_null(dst)
+ );
mem::swap(&mut *dst, &mut src); // cannot overlap
}
src
@@ -1112,6 +1132,10 @@ pub const unsafe fn read<T>(src: *const T) -> T {
// Also, since we just wrote a valid value into `tmp`, it is guaranteed
// to be properly initialized.
unsafe {
+ assert_unsafe_precondition!(
+ "ptr::read requires that the pointer argument is aligned and non-null",
+ [T](src: *const T) => is_aligned_and_not_null(src)
+ );
copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
tmp.assume_init()
}
@@ -1305,6 +1329,10 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
// `dst` cannot overlap `src` because the caller has mutable access
// to `dst` while `src` is owned by this function.
unsafe {
+ assert_unsafe_precondition!(
+ "ptr::write requires that the pointer argument is aligned and non-null",
+ [T](dst: *mut T) => is_aligned_and_not_null(dst)
+ );
copy_nonoverlapping(&src as *const T, dst, 1);
intrinsics::forget(src);
}
@@ -1468,7 +1496,10 @@ pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
pub unsafe fn read_volatile<T>(src: *const T) -> T {
// SAFETY: the caller must uphold the safety contract for `volatile_load`.
unsafe {
- assert_unsafe_precondition!(is_aligned_and_not_null(src));
+ assert_unsafe_precondition!(
+ "ptr::read_volatile requires that the pointer argument is aligned and non-null",
+ [T](src: *const T) => is_aligned_and_not_null(src)
+ );
intrinsics::volatile_load(src)
}
}
@@ -1539,7 +1570,10 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T {
pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
// SAFETY: the caller must uphold the safety contract for `volatile_store`.
unsafe {
- assert_unsafe_precondition!(is_aligned_and_not_null(dst));
+ assert_unsafe_precondition!(
+ "ptr::write_volatile requires that the pointer argument is aligned and non-null",
+ [T](dst: *mut T) => is_aligned_and_not_null(dst)
+ );
intrinsics::volatile_store(dst, src);
}
}
@@ -1726,6 +1760,12 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// by their address rather than comparing the values they point to
/// (which is what the `PartialEq for &T` implementation does).
///
+/// When comparing wide pointers, both the address and the metadata are tested for equality.
+/// However, note that comparing trait object pointers (`*const dyn Trait`) is unrealiable: pointers
+/// to values of the same underlying type can compare inequal (because vtables are duplicated in
+/// multiple codegen units), and pointers to values of *different* underlying type can compare equal
+/// (since identical vtables can be deduplicated within a codegen unit).
+///
/// # Examples
///
/// ```
@@ -1752,41 +1792,6 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// assert!(!std::ptr::eq(&a[..2], &a[..3]));
/// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
/// ```
-///
-/// Traits are also compared by their implementation:
-///
-/// ```
-/// #[repr(transparent)]
-/// struct Wrapper { member: i32 }
-///
-/// trait Trait {}
-/// impl Trait for Wrapper {}
-/// impl Trait for i32 {}
-///
-/// let wrapper = Wrapper { member: 10 };
-///
-/// // Pointers have equal addresses.
-/// assert!(std::ptr::eq(
-/// &wrapper as *const Wrapper as *const u8,
-/// &wrapper.member as *const i32 as *const u8
-/// ));
-///
-/// // Objects have equal addresses, but `Trait` has different implementations.
-/// assert!(!std::ptr::eq(
-/// &wrapper as &dyn Trait,
-/// &wrapper.member as &dyn Trait,
-/// ));
-/// assert!(!std::ptr::eq(
-/// &wrapper as &dyn Trait as *const dyn Trait,
-/// &wrapper.member as &dyn Trait as *const dyn Trait,
-/// ));
-///
-/// // Converting the reference to a `*const u8` compares by address.
-/// assert!(std::ptr::eq(
-/// &wrapper as &dyn Trait as *const dyn Trait as *const u8,
-/// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8,
-/// ));
-/// ```
#[stable(feature = "ptr_eq", since = "1.17.0")]
#[inline]
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
@@ -1834,7 +1839,7 @@ macro_rules! maybe_fnptr_doc {
$item
};
($a:ident @ #[$meta:meta] $item:item) => {
- #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc(fake_variadic)]
#[doc = "This trait is implemented for function pointers with up to twelve arguments."]
#[$meta]
$item
@@ -1854,9 +1859,16 @@ macro_rules! maybe_fnptr_doc {
// Impls for function pointers
macro_rules! fnptr_impls_safety_abi {
($FnTy: ty, $($Arg: ident),*) => {
+ fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* }
+ };
+ (@c_unwind $FnTy: ty, $($Arg: ident),*) => {
+ #[cfg(not(bootstrap))]
+ fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* }
+ };
+ (#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> PartialEq for $FnTy {
#[inline]
fn eq(&self, other: &Self) -> bool {
@@ -1867,13 +1879,13 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> Eq for $FnTy {}
}
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> PartialOrd for $FnTy {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
@@ -1884,7 +1896,7 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> Ord for $FnTy {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
@@ -1895,7 +1907,7 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> hash::Hash for $FnTy {
fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
state.write_usize(*self as usize)
@@ -1905,7 +1917,7 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::pointer_fmt_inner(*self as usize, f)
@@ -1915,7 +1927,7 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::pointer_fmt_inner(*self as usize, f)
@@ -1930,16 +1942,22 @@ macro_rules! fnptr_impls_args {
fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
};
() => {
// No variadic functions with 0 parameters
fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
+ fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn() -> Ret, }
fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
+ fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn() -> Ret, }
};
}
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index fc3dd2a9b..6764002bc 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -35,7 +35,10 @@ impl<T: ?Sized> *mut T {
pub const fn is_null(self) -> bool {
// Compare via a cast to a thin pointer, so fat pointers are only
// considering their "data" part for null-ness.
- (self as *mut u8).guaranteed_eq(null_mut())
+ match (self as *mut u8).guaranteed_eq(null_mut()) {
+ None => false,
+ Some(res) => res,
+ }
}
/// Casts to a pointer of another type.
@@ -77,10 +80,14 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "set_ptr_value", issue = "75091")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline]
- pub fn with_metadata_of<U>(self, mut val: *mut U) -> *mut U
+ pub fn with_metadata_of<U>(self, val: *const U) -> *mut U
where
U: ?Sized,
{
+ // Prepare in the type system that we will replace the pointer value with a mutable
+ // pointer, taking the mutable provenance from the `self` pointer.
+ let mut val = val as *mut U;
+ // Pointer to the pointer value within the value.
let target = &mut val as *mut *mut U as *mut *mut u8;
// SAFETY: In case of a thin pointer, this operations is identical
// to a simple assignment. In case of a fat pointer, with the current
@@ -100,8 +107,8 @@ impl<T: ?Sized> *mut T {
/// coercion.
///
/// [`cast_mut`]: #method.cast_mut
- #[unstable(feature = "ptr_const_cast", issue = "92675")]
- #[rustc_const_unstable(feature = "ptr_const_cast", issue = "92675")]
+ #[stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
pub const fn cast_const(self) -> *const T {
self as _
}
@@ -160,7 +167,7 @@ impl<T: ?Sized> *mut T {
/// This is similar to `self as usize`, which semantically discards *provenance* and
/// *address-space* information. However, unlike `self as usize`, casting the returned address
/// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
- /// properly restore the lost information and obtain a dereferencable pointer, use
+ /// properly restore the lost information and obtain a dereferenceable pointer, use
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
@@ -255,7 +262,7 @@ impl<T: ?Sized> *mut T {
let offset = dest_addr.wrapping_sub(self_addr);
// This is the canonical desugarring of this operation
- self.cast::<u8>().wrapping_offset(offset).cast::<T>()
+ self.wrapping_byte_offset(offset)
}
/// Creates a new pointer by mapping `self`'s address to a new one.
@@ -575,6 +582,20 @@ impl<T: ?Sized> *mut T {
)
}
+ /// Masks out bits of the pointer according to a mask.
+ ///
+ /// This is convenience for `ptr.map_addr(|a| a & mask)`.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "ptr_mask", issue = "98290")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ pub fn mask(self, mask: usize) -> *mut T {
+ let this = intrinsics::ptr_mask(self.cast::<()>(), mask) as *mut ();
+ from_raw_parts_mut::<T>(this, metadata(self))
+ }
+
/// Returns `None` if the pointer is null, or else returns a unique reference to
/// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_mut`]
/// must be used instead.
@@ -682,20 +703,16 @@ impl<T: ?Sized> *mut T {
/// Returns whether two pointers are guaranteed to be equal.
///
- /// At runtime this function behaves like `self == other`.
+ /// At runtime this function behaves like `Some(self == other)`.
/// However, in some contexts (e.g., compile-time evaluation),
/// it is not always possible to determine equality of two pointers, so this function may
- /// spuriously return `false` for pointers that later actually turn out to be equal.
- /// But when it returns `true`, the pointers are guaranteed to be equal.
+ /// spuriously return `None` for pointers that later actually turn out to have its equality known.
+ /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
///
- /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer
- /// comparisons for which both functions return `false`.
- ///
- /// [`guaranteed_ne`]: #method.guaranteed_ne
- ///
- /// The return value may change depending on the compiler version and unsafe code might not
+ /// The return value may change from `Some` to `None` and vice versa depending on the compiler
+ /// version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
- /// for performance optimizations where spurious `false` return values by this function do not
+ /// for performance optimizations where spurious `None` return values by this function do not
/// affect the outcome, but just the performance.
/// The consequences of using this method to make runtime and compile-time code behave
/// differently have not been explored. This method should not be used to introduce such
@@ -704,29 +721,25 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[inline]
- pub const fn guaranteed_eq(self, other: *mut T) -> bool
+ pub const fn guaranteed_eq(self, other: *mut T) -> Option<bool>
where
T: Sized,
{
- intrinsics::ptr_guaranteed_eq(self as *const _, other as *const _)
+ (self as *const T).guaranteed_eq(other as _)
}
- /// Returns whether two pointers are guaranteed to be unequal.
+ /// Returns whether two pointers are guaranteed to be inequal.
///
- /// At runtime this function behaves like `self != other`.
+ /// At runtime this function behaves like `Some(self != other)`.
/// However, in some contexts (e.g., compile-time evaluation),
- /// it is not always possible to determine the inequality of two pointers, so this function may
- /// spuriously return `false` for pointers that later actually turn out to be unequal.
- /// But when it returns `true`, the pointers are guaranteed to be unequal.
- ///
- /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
- /// comparisons for which both functions return `false`.
+ /// it is not always possible to determine inequality of two pointers, so this function may
+ /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
+ /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
///
- /// [`guaranteed_eq`]: #method.guaranteed_eq
- ///
- /// The return value may change depending on the compiler version and unsafe code might not
+ /// The return value may change from `Some` to `None` and vice versa depending on the compiler
+ /// version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
- /// for performance optimizations where spurious `false` return values by this function do not
+ /// for performance optimizations where spurious `None` return values by this function do not
/// affect the outcome, but just the performance.
/// The consequences of using this method to make runtime and compile-time code behave
/// differently have not been explored. This method should not be used to introduce such
@@ -735,11 +748,11 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[inline]
- pub const unsafe fn guaranteed_ne(self, other: *mut T) -> bool
+ pub const fn guaranteed_ne(self, other: *mut T) -> Option<bool>
where
T: Sized,
{
- intrinsics::ptr_guaranteed_ne(self as *const _, other as *const _)
+ (self as *const T).guaranteed_ne(other as _)
}
/// Calculates the distance between two pointers. The returned value is in
@@ -824,7 +837,7 @@ impl<T: ?Sized> *mut T {
/// }
/// ```
#[stable(feature = "ptr_offset_from", since = "1.47.0")]
- #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn offset_from(self, origin: *const T) -> isize
@@ -858,7 +871,7 @@ impl<T: ?Sized> *mut T {
/// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
///
/// This computes the same value that [`offset_from`](#method.offset_from)
- /// would compute, but with the added precondition that that the offset is
+ /// would compute, but with the added precondition that the offset is
/// guaranteed to be non-negative. This method is equivalent to
/// `usize::from(self.offset_from(origin)).unwrap_unchecked()`,
/// but it provides slightly more information to the optimizer, which can
@@ -1545,20 +1558,23 @@ impl<T: ?Sized> *mut T {
/// Accessing adjacent `u8` as `u16`
///
/// ```
- /// # fn foo(n: usize) {
- /// # use std::mem::align_of;
+ /// use std::mem::align_of;
+ ///
/// # unsafe {
- /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
- /// let ptr = x.as_ptr().add(n) as *const u8;
+ /// let mut x = [5_u8, 6, 7, 8, 9];
+ /// let ptr = x.as_mut_ptr();
/// let offset = ptr.align_offset(align_of::<u16>());
- /// if offset < x.len() - n - 1 {
- /// let u16_ptr = ptr.add(offset) as *const u16;
- /// assert_ne!(*u16_ptr, 500);
+ ///
+ /// if offset < x.len() - 1 {
+ /// let u16_ptr = ptr.add(offset).cast::<u16>();
+ /// *u16_ptr = 0;
+ ///
+ /// assert!(x == [0, 0, 7, 8, 9] || x == [5, 0, 0, 8, 9]);
/// } else {
/// // while the pointer can be aligned via `offset`, it would point
/// // outside the allocation
/// }
- /// # } }
+ /// # }
/// ```
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
@@ -1614,11 +1630,8 @@ impl<T: ?Sized> *mut T {
panic!("is_aligned_to: align is not a power-of-two");
}
- // SAFETY: `is_power_of_two()` will return `false` for zero.
- unsafe { core::intrinsics::assume(align != 0) };
-
// Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() % align == 0
+ self.cast::<u8>().addr() & align - 1 == 0
}
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index f3ef094cb..c18264d13 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -2,6 +2,7 @@ use crate::cmp::Ordering;
use crate::convert::From;
use crate::fmt;
use crate::hash;
+use crate::intrinsics::assert_unsafe_precondition;
use crate::marker::Unsize;
use crate::mem::{self, MaybeUninit};
use crate::num::NonZeroUsize;
@@ -195,7 +196,10 @@ impl<T: ?Sized> NonNull<T> {
#[inline]
pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
// SAFETY: the caller must guarantee that `ptr` is non-null.
- unsafe { NonNull { pointer: ptr as _ } }
+ unsafe {
+ assert_unsafe_precondition!("NonNull::new_unchecked requires that the pointer is non-null", [T: ?Sized](ptr: *mut T) => !ptr.is_null());
+ NonNull { pointer: ptr as _ }
+ }
}
/// Creates a new `NonNull` if `ptr` is non-null.
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 45b052c82..3f33c5fd6 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -548,22 +548,25 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
- /// #![feature(is_some_with)]
+ /// #![feature(is_some_and)]
///
/// let x: Result<u32, &str> = Ok(2);
- /// assert_eq!(x.is_ok_and(|&x| x > 1), true);
+ /// assert_eq!(x.is_ok_and(|x| x > 1), true);
///
/// let x: Result<u32, &str> = Ok(0);
- /// assert_eq!(x.is_ok_and(|&x| x > 1), false);
+ /// assert_eq!(x.is_ok_and(|x| x > 1), false);
///
/// let x: Result<u32, &str> = Err("hey");
- /// assert_eq!(x.is_ok_and(|&x| x > 1), false);
+ /// assert_eq!(x.is_ok_and(|x| x > 1), false);
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "is_some_with", issue = "93050")]
- pub fn is_ok_and(&self, f: impl FnOnce(&T) -> bool) -> bool {
- matches!(self, Ok(x) if f(x))
+ #[unstable(feature = "is_some_and", issue = "93050")]
+ pub fn is_ok_and(self, f: impl FnOnce(T) -> bool) -> bool {
+ match self {
+ Err(_) => false,
+ Ok(x) => f(x),
+ }
}
/// Returns `true` if the result is [`Err`].
@@ -592,7 +595,7 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
- /// #![feature(is_some_with)]
+ /// #![feature(is_some_and)]
/// use std::io::{Error, ErrorKind};
///
/// let x: Result<u32, Error> = Err(Error::new(ErrorKind::NotFound, "!"));
@@ -606,9 +609,12 @@ impl<T, E> Result<T, E> {
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "is_some_with", issue = "93050")]
- pub fn is_err_and(&self, f: impl FnOnce(&E) -> bool) -> bool {
- matches!(self, Err(x) if f(x))
+ #[unstable(feature = "is_some_and", issue = "93050")]
+ pub fn is_err_and(self, f: impl FnOnce(E) -> bool) -> bool {
+ match self {
+ Ok(_) => false,
+ Err(e) => f(e),
+ }
}
/////////////////////////////////////////////////////////////////////////
@@ -1285,6 +1291,11 @@ impl<T, E> Result<T, E> {
/// Returns `res` if the result is [`Ok`], otherwise returns the [`Err`] value of `self`.
///
+ /// Arguments passed to `and` are eagerly evaluated; if you are passing the
+ /// result of a function call, it is recommended to use [`and_then`], which is
+ /// lazily evaluated.
+ ///
+ /// [`and_then`]: Result::and_then
///
/// # Examples
///
@@ -1771,40 +1782,6 @@ impl<T, E> Result<Result<T, E>, E> {
}
}
-impl<T> Result<T, T> {
- /// Returns the [`Ok`] value if `self` is `Ok`, and the [`Err`] value if
- /// `self` is `Err`.
- ///
- /// In other words, this function returns the value (the `T`) of a
- /// `Result<T, T>`, regardless of whether or not that result is `Ok` or
- /// `Err`.
- ///
- /// This can be useful in conjunction with APIs such as
- /// [`Atomic*::compare_exchange`], or [`slice::binary_search`], but only in
- /// cases where you don't care if the result was `Ok` or not.
- ///
- /// [`Atomic*::compare_exchange`]: crate::sync::atomic::AtomicBool::compare_exchange
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(result_into_ok_or_err)]
- /// let ok: Result<u32, u32> = Ok(3);
- /// let err: Result<u32, u32> = Err(4);
- ///
- /// assert_eq!(ok.into_ok_or_err(), 3);
- /// assert_eq!(err.into_ok_or_err(), 4);
- /// ```
- #[inline]
- #[unstable(feature = "result_into_ok_or_err", reason = "newly added", issue = "82223")]
- pub const fn into_ok_or_err(self) -> T {
- match self {
- Ok(v) => v,
- Err(v) => v,
- }
- }
-}
-
// This is a separate function to reduce the code size of the methods
#[cfg(not(feature = "panic_immediate_abort"))]
#[inline(never)]
@@ -2095,9 +2072,6 @@ impl<A, E, V: FromIterator<A>> FromIterator<Result<A, E>> for Result<V, E> {
/// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16.
#[inline]
fn from_iter<I: IntoIterator<Item = Result<A, E>>>(iter: I) -> Result<V, E> {
- // FIXME(#11084): This could be replaced with Iterator::scan when this
- // performance bug is closed.
-
iter::try_process(iter.into_iter(), |i| i.collect())
}
}
@@ -2145,6 +2119,7 @@ impl<T, E, F: From<E>> ops::FromResidual<ops::Yeet<E>> for Result<T, F> {
}
#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
-impl<T, E> ops::Residual<T> for Result<convert::Infallible, E> {
+#[rustc_const_unstable(feature = "const_try", issue = "74935")]
+impl<T, E> const ops::Residual<T> for Result<convert::Infallible, E> {
type TryType = Result<T, E>;
}
diff --git a/library/core/src/slice/ascii.rs b/library/core/src/slice/ascii.rs
index 63715a6b8..5e5399acc 100644
--- a/library/core/src/slice/ascii.rs
+++ b/library/core/src/slice/ascii.rs
@@ -215,8 +215,6 @@ impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
}
}
#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
-impl<'a> iter::ExactSizeIterator for EscapeAscii<'a> {}
-#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
impl<'a> fmt::Display for EscapeAscii<'a> {
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index fd7ecf3da..6d2f7330d 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -48,10 +48,12 @@ const fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
}
// FIXME const-hack
+#[track_caller]
fn slice_start_index_len_fail_rt(index: usize, len: usize) -> ! {
panic!("range start index {index} out of range for slice of length {len}");
}
+#[track_caller]
const fn slice_start_index_len_fail_ct(_: usize, _: usize) -> ! {
panic!("slice start index is out of range for slice");
}
@@ -69,10 +71,12 @@ const fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
}
// FIXME const-hack
+#[track_caller]
fn slice_end_index_len_fail_rt(index: usize, len: usize) -> ! {
panic!("range end index {index} out of range for slice of length {len}");
}
+#[track_caller]
const fn slice_end_index_len_fail_ct(_: usize, _: usize) -> ! {
panic!("slice end index is out of range for slice");
}
@@ -88,10 +92,12 @@ const fn slice_index_order_fail(index: usize, end: usize) -> ! {
}
// FIXME const-hack
+#[track_caller]
fn slice_index_order_fail_rt(index: usize, end: usize) -> ! {
panic!("slice index starts at {index} but ends at {end}");
}
+#[track_caller]
const fn slice_index_order_fail_ct(_: usize, _: usize) -> ! {
panic!("slice index start is larger than end");
}
@@ -133,6 +139,8 @@ mod private_slice_index {
impl Sealed for ops::RangeToInclusive<usize> {}
#[stable(feature = "slice_index_with_ops_bound_pair", since = "1.53.0")]
impl Sealed for (ops::Bound<usize>, ops::Bound<usize>) {}
+
+ impl Sealed for ops::IndexRange {}
}
/// A helper trait used for indexing operations.
@@ -152,6 +160,7 @@ mod private_slice_index {
message = "the type `{T}` cannot be indexed by `{Self}`",
label = "slice indices are of type `usize` or ranges of `usize`"
)]
+#[const_trait]
pub unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed {
/// The output type returned by methods.
#[stable(feature = "slice_get_slice", since = "1.28.0")]
@@ -217,21 +226,29 @@ unsafe impl<T> const SliceIndex<[T]> for usize {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
+ let this = self;
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
- assert_unsafe_precondition!(self < slice.len());
+ assert_unsafe_precondition!(
+ "slice::get_unchecked requires that the index is within the slice",
+ [T](this: usize, slice: *const [T]) => this < slice.len()
+ );
slice.as_ptr().add(self)
}
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
+ let this = self;
// SAFETY: see comments for `get_unchecked` above.
unsafe {
- assert_unsafe_precondition!(self < slice.len());
+ assert_unsafe_precondition!(
+ "slice::get_unchecked_mut requires that the index is within the slice",
+ [T](this: usize, slice: *mut [T]) => this < slice.len()
+ );
slice.as_mut_ptr().add(self)
}
}
@@ -249,6 +266,83 @@ unsafe impl<T> const SliceIndex<[T]> for usize {
}
}
+/// Because `IndexRange` guarantees `start <= end`, fewer checks are needed here
+/// than there are for a general `Range<usize>` (which might be `100..3`).
+#[rustc_const_unstable(feature = "const_index_range_slice_index", issue = "none")]
+unsafe impl<T> const SliceIndex<[T]> for ops::IndexRange {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ if self.end() <= slice.len() {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { Some(&*self.get_unchecked(slice)) }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ if self.end() <= slice.len() {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { Some(&mut *self.get_unchecked_mut(slice)) }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ let end = self.end();
+ // SAFETY: the caller guarantees that `slice` is not dangling, so it
+ // cannot be longer than `isize::MAX`. They also guarantee that
+ // `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
+ // so the call to `add` is safe.
+
+ unsafe {
+ assert_unsafe_precondition!(
+ "slice::get_unchecked requires that the index is within the slice",
+ [T](end: usize, slice: *const [T]) => end <= slice.len()
+ );
+ ptr::slice_from_raw_parts(slice.as_ptr().add(self.start()), self.len())
+ }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ let end = self.end();
+ // SAFETY: see comments for `get_unchecked` above.
+ unsafe {
+ assert_unsafe_precondition!(
+ "slice::get_unchecked_mut requires that the index is within the slice",
+ [T](end: usize, slice: *mut [T]) => end <= slice.len()
+ );
+ ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start()), self.len())
+ }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ if self.end() <= slice.len() {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &*self.get_unchecked(slice) }
+ } else {
+ slice_end_index_len_fail(self.end(), slice.len())
+ }
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ if self.end() <= slice.len() {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ } else {
+ slice_end_index_len_fail(self.end(), slice.len())
+ }
+ }
+}
+
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> {
@@ -276,22 +370,32 @@ unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ let this = ops::Range { start: self.start, end: self.end };
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
- assert_unsafe_precondition!(self.end >= self.start && self.end <= slice.len());
+ assert_unsafe_precondition!(
+ "slice::get_unchecked requires that the range is within the slice",
+ [T](this: ops::Range<usize>, slice: *const [T]) =>
+ this.end >= this.start && this.end <= slice.len()
+ );
ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start)
}
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ let this = ops::Range { start: self.start, end: self.end };
// SAFETY: see comments for `get_unchecked` above.
unsafe {
- assert_unsafe_precondition!(self.end >= self.start && self.end <= slice.len());
+ assert_unsafe_precondition!(
+ "slice::get_unchecked_mut requires that the range is within the slice",
+ [T](this: ops::Range<usize>, slice: *mut [T]) =>
+ this.end >= this.start && this.end <= slice.len()
+ );
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
}
}
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index f1e659309..8a8962828 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -9,7 +9,7 @@ use crate::fmt;
use crate::intrinsics::{assume, exact_div, unchecked_sub};
use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use crate::marker::{PhantomData, Send, Sized, Sync};
-use crate::mem;
+use crate::mem::{self, SizedTypeProperties};
use crate::num::NonZeroUsize;
use crate::ptr::NonNull;
@@ -91,11 +91,8 @@ impl<'a, T> Iter<'a, T> {
unsafe {
assume(!ptr.is_null());
- let end = if mem::size_of::<T>() == 0 {
- (ptr as *const u8).wrapping_add(slice.len()) as *const T
- } else {
- ptr.add(slice.len())
- };
+ let end =
+ if T::IS_ZST { ptr.wrapping_byte_add(slice.len()) } else { ptr.add(slice.len()) };
Self { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: PhantomData }
}
@@ -127,6 +124,7 @@ impl<'a, T> Iter<'a, T> {
/// ```
#[must_use]
#[stable(feature = "iter_to_slice", since = "1.4.0")]
+ #[inline]
pub fn as_slice(&self) -> &'a [T] {
self.make_slice()
}
@@ -146,6 +144,7 @@ iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> {
+ #[inline]
fn clone(&self) -> Self {
Iter { ptr: self.ptr, end: self.end, _marker: self._marker }
}
@@ -153,6 +152,7 @@ impl<T> Clone for Iter<'_, T> {
#[stable(feature = "slice_iter_as_ref", since = "1.13.0")]
impl<T> AsRef<[T]> for Iter<'_, T> {
+ #[inline]
fn as_ref(&self) -> &[T] {
self.as_slice()
}
@@ -227,11 +227,8 @@ impl<'a, T> IterMut<'a, T> {
unsafe {
assume(!ptr.is_null());
- let end = if mem::size_of::<T>() == 0 {
- (ptr as *mut u8).wrapping_add(slice.len()) as *mut T
- } else {
- ptr.add(slice.len())
- };
+ let end =
+ if T::IS_ZST { ptr.wrapping_byte_add(slice.len()) } else { ptr.add(slice.len()) };
Self { ptr: NonNull::new_unchecked(ptr), end, _marker: PhantomData }
}
@@ -303,6 +300,7 @@ impl<'a, T> IterMut<'a, T> {
/// ```
#[must_use]
#[stable(feature = "slice_iter_mut_as_slice", since = "1.53.0")]
+ #[inline]
pub fn as_slice(&self) -> &[T] {
self.make_slice()
}
@@ -351,6 +349,7 @@ impl<'a, T> IterMut<'a, T> {
#[stable(feature = "slice_iter_mut_as_slice", since = "1.53.0")]
impl<T> AsRef<[T]> for IterMut<'_, T> {
+ #[inline]
fn as_ref(&self) -> &[T] {
self.as_slice()
}
@@ -2754,10 +2753,10 @@ impl<'a, T> Iterator for RChunksMut<'a, T> {
None => 0,
};
// SAFETY: This type ensures that self.v is a valid pointer with a correct len.
- // Therefore the bounds check in split_at_mut guarantess the split point is inbounds.
+ // Therefore the bounds check in split_at_mut guarantees the split point is inbounds.
let (head, tail) = unsafe { self.v.split_at_mut(start) };
// SAFETY: This type ensures that self.v is a valid pointer with a correct len.
- // Therefore the bounds check in split_at_mut guarantess the split point is inbounds.
+ // Therefore the bounds check in split_at_mut guarantees the split point is inbounds.
let (nth, _) = unsafe { tail.split_at_mut(end - start) };
self.v = head;
// SAFETY: Nothing else points to or will point to the contents of this slice.
diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs
index c05242222..ce51d48e3 100644
--- a/library/core/src/slice/iter/macros.rs
+++ b/library/core/src/slice/iter/macros.rs
@@ -64,7 +64,7 @@ macro_rules! iterator {
// backwards by `n`. `n` must not exceed `self.len()`.
macro_rules! zst_shrink {
($self: ident, $n: ident) => {
- $self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T;
+ $self.end = $self.end.wrapping_byte_sub($n);
}
}
@@ -82,7 +82,7 @@ macro_rules! iterator {
// returning the old start.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
- unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T {
+ unsafe fn post_inc_start(&mut self, offset: usize) -> * $raw_mut T {
if mem::size_of::<T>() == 0 {
zst_shrink!(self, offset);
self.ptr.as_ptr()
@@ -90,7 +90,7 @@ macro_rules! iterator {
let old = self.ptr.as_ptr();
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
- self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) };
+ self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().add(offset)) };
old
}
}
@@ -99,15 +99,15 @@ macro_rules! iterator {
// returning the new end.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
- unsafe fn pre_dec_end(&mut self, offset: isize) -> * $raw_mut T {
- if mem::size_of::<T>() == 0 {
+ unsafe fn pre_dec_end(&mut self, offset: usize) -> * $raw_mut T {
+ if T::IS_ZST {
zst_shrink!(self, offset);
self.ptr.as_ptr()
} else {
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// which is guaranteed to not overflow an `isize`. Also, the resulting pointer
// is in bounds of `slice`, which fulfills the other requirements for `offset`.
- self.end = unsafe { self.end.offset(-offset) };
+ self.end = unsafe { self.end.sub(offset) };
self.end
}
}
@@ -140,7 +140,7 @@ macro_rules! iterator {
// since we check if the iterator is empty first.
unsafe {
assume(!self.ptr.as_ptr().is_null());
- if mem::size_of::<T>() != 0 {
+ if !<T>::IS_ZST {
assume(!self.end.is_null());
}
if is_empty!(self) {
@@ -166,7 +166,7 @@ macro_rules! iterator {
fn nth(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// We have to do it this way as `ptr` may never be 0, but `end`
// could be (due to wrapping).
self.end = self.ptr.as_ptr();
@@ -180,7 +180,7 @@ macro_rules! iterator {
}
// SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
unsafe {
- self.post_inc_start(n as isize);
+ self.post_inc_start(n);
Some(next_unchecked!(self))
}
}
@@ -189,7 +189,7 @@ macro_rules! iterator {
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
- unsafe { self.post_inc_start(advance as isize) };
+ unsafe { self.post_inc_start(advance) };
if advance == n { Ok(()) } else { Err(advance) }
}
@@ -355,7 +355,7 @@ macro_rules! iterator {
// empty first.
unsafe {
assume(!self.ptr.as_ptr().is_null());
- if mem::size_of::<T>() != 0 {
+ if !<T>::IS_ZST {
assume(!self.end.is_null());
}
if is_empty!(self) {
@@ -375,7 +375,7 @@ macro_rules! iterator {
}
// SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
unsafe {
- self.pre_dec_end(n as isize);
+ self.pre_dec_end(n);
Some(next_back_unchecked!(self))
}
}
@@ -384,7 +384,7 @@ macro_rules! iterator {
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
- unsafe { self.pre_dec_end(advance as isize) };
+ unsafe { self.pre_dec_end(advance) };
if advance == n { Ok(()) } else { Err(advance) }
}
}
diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs
index dffeaf6a8..c848c2e18 100644
--- a/library/core/src/slice/memchr.rs
+++ b/library/core/src/slice/memchr.rs
@@ -16,35 +16,51 @@ const USIZE_BYTES: usize = mem::size_of::<usize>();
/// bytes where the borrow propagated all the way to the most significant
/// bit."
#[inline]
-fn contains_zero_byte(x: usize) -> bool {
+const fn contains_zero_byte(x: usize) -> bool {
x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
}
#[cfg(target_pointer_width = "16")]
#[inline]
-fn repeat_byte(b: u8) -> usize {
+const fn repeat_byte(b: u8) -> usize {
(b as usize) << 8 | b as usize
}
#[cfg(not(target_pointer_width = "16"))]
#[inline]
-fn repeat_byte(b: u8) -> usize {
+const fn repeat_byte(b: u8) -> usize {
(b as usize) * (usize::MAX / 255)
}
/// Returns the first index matching the byte `x` in `text`.
#[must_use]
#[inline]
-pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
- // Fast path for small slices
+pub const fn memchr(x: u8, text: &[u8]) -> Option<usize> {
+ // Fast path for small slices.
if text.len() < 2 * USIZE_BYTES {
- return text.iter().position(|elt| *elt == x);
+ return memchr_naive(x, text);
}
- memchr_general_case(x, text)
+ memchr_aligned(x, text)
}
-fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
+#[inline]
+const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> {
+ let mut i = 0;
+
+ // FIXME(const-hack): Replace with `text.iter().pos(|c| *c == x)`.
+ while i < text.len() {
+ if text[i] == x {
+ return Some(i);
+ }
+
+ i += 1;
+ }
+
+ None
+}
+
+const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> {
// Scan for a single byte value by reading two `usize` words at a time.
//
// Split `text` in three parts
@@ -59,7 +75,7 @@ fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
if offset > 0 {
offset = cmp::min(offset, len);
- if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
+ if let Some(index) = memchr_naive(x, &text[..offset]) {
return Some(index);
}
}
@@ -84,7 +100,8 @@ fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
}
// Find the byte after the point the body loop stopped.
- text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
+ // FIXME(const-hack): Use `?` instead.
+ if let Some(i) = memchr_naive(x, &text[offset..]) { Some(offset + i) } else { None }
}
/// Returns the last index matching the byte `x` in `text`.
@@ -124,8 +141,8 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
// SAFETY: offset starts at len - suffix.len(), as long as it is greater than
// min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes.
unsafe {
- let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk);
- let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk);
+ let u = *(ptr.add(offset - 2 * chunk_bytes) as *const Chunk);
+ let v = *(ptr.add(offset - chunk_bytes) as *const Chunk);
// Break if there is a matching byte.
let zu = contains_zero_byte(u ^ repeated_x);
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index e6ca6ef82..4f1bb1734 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -9,7 +9,7 @@
use crate::cmp::Ordering::{self, Greater, Less};
use crate::intrinsics::{assert_unsafe_precondition, exact_div};
use crate::marker::Copy;
-use crate::mem;
+use crate::mem::{self, SizedTypeProperties};
use crate::num::NonZeroUsize;
use crate::ops::{Bound, FnMut, OneSidedRange, Range, RangeBounds};
use crate::option::Option;
@@ -123,18 +123,11 @@ impl<T> [T] {
#[lang = "slice_len_fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.39.0")]
+ #[rustc_allow_const_fn_unstable(ptr_metadata)]
#[inline]
#[must_use]
- // SAFETY: const sound because we transmute out the length field as a usize (which it must be)
pub const fn len(&self) -> usize {
- // FIXME: Replace with `crate::ptr::metadata(self)` when that is const-stable.
- // As of this writing this causes a "Const-stable functions can only call other
- // const-stable functions" error.
-
- // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
- // and PtrComponents<T> have the same memory layouts. Only std can make this
- // guarantee.
- unsafe { crate::ptr::PtrRepr { const_ptr: self }.components.metadata }
+ ptr::metadata(self)
}
/// Returns `true` if the slice has a length of 0.
@@ -656,10 +649,14 @@ impl<T> [T] {
#[unstable(feature = "slice_swap_unchecked", issue = "88539")]
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
pub const unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
- let ptr = self.as_mut_ptr();
+ let this = self;
+ let ptr = this.as_mut_ptr();
// SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()`
unsafe {
- assert_unsafe_precondition!(a < self.len() && b < self.len());
+ assert_unsafe_precondition!(
+ "slice::swap_unchecked requires that the indices are within the slice",
+ [T](a: usize, b: usize, this: &mut [T]) => a < this.len() && b < this.len()
+ );
ptr::swap(ptr.add(a), ptr.add(b));
}
}
@@ -674,8 +671,9 @@ impl<T> [T] {
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_reverse", issue = "100784")]
#[inline]
- pub fn reverse(&mut self) {
+ pub const fn reverse(&mut self) {
let half_len = self.len() / 2;
let Range { start, end } = self.as_mut_ptr_range();
@@ -698,9 +696,9 @@ impl<T> [T] {
revswap(front_half, back_half, half_len);
#[inline]
- fn revswap<T>(a: &mut [T], b: &mut [T], n: usize) {
- debug_assert_eq!(a.len(), n);
- debug_assert_eq!(b.len(), n);
+ const fn revswap<T>(a: &mut [T], b: &mut [T], n: usize) {
+ debug_assert!(a.len() == n);
+ debug_assert!(b.len() == n);
// Because this function is first compiled in isolation,
// this check tells LLVM that the indexing below is
@@ -708,8 +706,10 @@ impl<T> [T] {
// lengths of the slices are known -- it's removed.
let (a, b) = (&mut a[..n], &mut b[..n]);
- for i in 0..n {
+ let mut i = 0;
+ while i < n {
mem::swap(&mut a[i], &mut b[n - 1 - i]);
+ i += 1;
}
}
}
@@ -969,9 +969,13 @@ impl<T> [T] {
#[inline]
#[must_use]
pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
+ let this = self;
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
let new_len = unsafe {
- assert_unsafe_precondition!(N != 0 && self.len() % N == 0);
+ assert_unsafe_precondition!(
+ "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
+ [T](this: &[T], N: usize) => N != 0 && this.len() % N == 0
+ );
exact_div(self.len(), N)
};
// SAFETY: We cast a slice of `new_len * N` elements into
@@ -1108,10 +1112,14 @@ impl<T> [T] {
#[inline]
#[must_use]
pub unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
+ let this = &*self;
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
let new_len = unsafe {
- assert_unsafe_precondition!(N != 0 && self.len() % N == 0);
- exact_div(self.len(), N)
+ assert_unsafe_precondition!(
+ "slice::as_chunks_unchecked_mut requires `N != 0` and the slice to split exactly into `N`-element chunks",
+ [T](this: &[T], N: usize) => N != 0 && this.len() % N == 0
+ );
+ exact_div(this.len(), N)
};
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
@@ -1538,13 +1546,14 @@ impl<T> [T] {
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_slice_split_at_not_mut", issue = "101158")]
#[inline]
#[track_caller]
#[must_use]
- pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
+ pub const fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
- // fulfills the requirements of `from_raw_parts_mut`.
+ // fulfills the requirements of `split_at_unchecked`.
unsafe { self.split_at_unchecked(mid) }
}
@@ -1573,7 +1582,8 @@ impl<T> [T] {
#[inline]
#[track_caller]
#[must_use]
- pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
+ #[rustc_const_unstable(feature = "const_slice_split_at_mut", issue = "101804")]
+ pub const fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
@@ -1623,11 +1633,19 @@ impl<T> [T] {
/// }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
+ #[rustc_const_unstable(feature = "slice_split_at_unchecked", issue = "76014")]
#[inline]
#[must_use]
- pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
+ pub const unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
+ // HACK: the const function `from_raw_parts` is used to make this
+ // function const; previously the implementation used
+ // `(self.get_unchecked(..mid), self.get_unchecked(mid..))`
+
+ let len = self.len();
+ let ptr = self.as_ptr();
+
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
- unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
+ unsafe { (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), len - mid)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
@@ -1664,9 +1682,10 @@ impl<T> [T] {
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
+ #[rustc_const_unstable(feature = "const_slice_split_at_mut", issue = "101804")]
#[inline]
#[must_use]
- pub unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
+ pub const unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
@@ -1675,7 +1694,10 @@ impl<T> [T] {
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe {
- assert_unsafe_precondition!(mid <= len);
+ assert_unsafe_precondition!(
+ "slice::split_at_mut_unchecked requires the index to be within the slice",
+ (mid: usize, len: usize) => mid <= len
+ );
(from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid))
}
}
@@ -2059,7 +2081,7 @@ impl<T> [T] {
SplitN::new(self.split(pred), n)
}
- /// Returns an iterator over subslices separated by elements that match
+ /// Returns an iterator over mutable subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
@@ -2309,7 +2331,7 @@ impl<T> [T] {
}
/// Binary searches this slice for a given element.
- /// This behaves similary to [`contains`] if this slice is sorted.
+ /// This behaves similarly to [`contains`] if this slice is sorted.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2342,6 +2364,28 @@ impl<T> [T] {
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
+ /// If you want to find that whole *range* of matching items, rather than
+ /// an arbitrary matching one, that can be done using [`partition_point`]:
+ /// ```
+ /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+ ///
+ /// let low = s.partition_point(|x| x < &1);
+ /// assert_eq!(low, 1);
+ /// let high = s.partition_point(|x| x <= &1);
+ /// assert_eq!(high, 5);
+ /// let r = s.binary_search(&1);
+ /// assert!((low..high).contains(&r.unwrap()));
+ ///
+ /// assert!(s[..low].iter().all(|&x| x < 1));
+ /// assert!(s[low..high].iter().all(|&x| x == 1));
+ /// assert!(s[high..].iter().all(|&x| x > 1));
+ ///
+ /// // For something not found, the "range" of equal items is empty
+ /// assert_eq!(s.partition_point(|x| x < &11), 9);
+ /// assert_eq!(s.partition_point(|x| x <= &11), 9);
+ /// assert_eq!(s.binary_search(&11), Err(9));
+ /// ```
+ ///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order, consider using [`partition_point`]:
///
@@ -2409,15 +2453,20 @@ impl<T> [T] {
where
F: FnMut(&'a T) -> Ordering,
{
+ // INVARIANTS:
+ // - 0 <= left <= left + size = right <= self.len()
+ // - f returns Less for everything in self[..left]
+ // - f returns Greater for everything in self[right..]
let mut size = self.len();
let mut left = 0;
let mut right = size;
while left < right {
let mid = left + size / 2;
- // SAFETY: the call is made safe by the following invariants:
- // - `mid >= 0`
- // - `mid < size`: `mid` is limited by `[left; right)` bound.
+ // SAFETY: the while condition means `size` is strictly positive, so
+ // `size/2 < size`. Thus `left + size/2 < left + size`, which
+ // coupled with the `left + size <= self.len()` invariant means
+ // we have `left + size/2 < self.len()`, and this is in-bounds.
let cmp = f(unsafe { self.get_unchecked(mid) });
// The reason why we use if/else control flow rather than match
@@ -2435,6 +2484,10 @@ impl<T> [T] {
size = right - left;
}
+
+ // SAFETY: directly true from the overall invariant.
+ // Note that this is `<=`, unlike the assume in the `Ok` path.
+ unsafe { crate::intrinsics::assume(left <= self.len()) };
Err(left)
}
@@ -2525,7 +2578,7 @@ impl<T> [T] {
where
T: Ord,
{
- sort::quicksort(self, |a, b| a.lt(b));
+ sort::quicksort(self, T::lt);
}
/// Sorts the slice with a comparator function, but might not preserve the order of equal
@@ -2628,9 +2681,10 @@ impl<T> [T] {
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
- /// element" in other libraries. It returns a triplet of the following values: all elements less
- /// than the one at the given index, the value at the given index, and all elements greater than
- /// the one at the given index.
+ /// element" in other libraries. It returns a triplet of the following from the reordered slice:
+ /// the subslice prior to `index`, the element at `index`, and the subslice after `index`;
+ /// accordingly, the values in those two subslices will respectively all be less-than-or-equal-to
+ /// and greater-than-or-equal-to the value of the element at `index`.
///
/// # Current implementation
///
@@ -2664,8 +2718,7 @@ impl<T> [T] {
where
T: Ord,
{
- let mut f = |a: &T, b: &T| a.lt(b);
- sort::partition_at_index(self, index, &mut f)
+ sort::partition_at_index(self, index, T::lt)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
@@ -2675,10 +2728,11 @@ impl<T> [T] {
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
- /// is also known as "kth element" in other libraries. It returns a triplet of the following
- /// values: all elements less than the one at the given index, the value at the given index,
- /// and all elements greater than the one at the given index, using the provided comparator
- /// function.
+ /// is also known as "kth element" in other libraries. It returns a triplet of the following from
+ /// the slice reordered according to the provided comparator function: the subslice prior to
+ /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in
+ /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to
+ /// the value of the element at `index`.
///
/// # Current implementation
///
@@ -2716,8 +2770,7 @@ impl<T> [T] {
where
F: FnMut(&T, &T) -> Ordering,
{
- let mut f = |a: &T, b: &T| compare(a, b) == Less;
- sort::partition_at_index(self, index, &mut f)
+ sort::partition_at_index(self, index, |a: &T, b: &T| compare(a, b) == Less)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
@@ -2727,10 +2780,11 @@ impl<T> [T] {
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
- /// is also known as "kth element" in other libraries. It returns a triplet of the following
- /// values: all elements less than the one at the given index, the value at the given index, and
- /// all elements greater than the one at the given index, using the provided key extraction
- /// function.
+ /// is also known as "kth element" in other libraries. It returns a triplet of the following from
+ /// the slice reordered according to the provided key extraction function: the subslice prior to
+ /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in
+ /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to
+ /// the value of the element at `index`.
///
/// # Current implementation
///
@@ -2769,8 +2823,7 @@ impl<T> [T] {
F: FnMut(&T) -> K,
K: Ord,
{
- let mut g = |a: &T, b: &T| f(a).lt(&f(b));
- sort::partition_at_index(self, index, &mut g)
+ sort::partition_at_index(self, index, |a: &T, b: &T| f(a).lt(&f(b)))
}
/// Moves all consecutive repeated elements to the end of the slice according to the
@@ -2921,7 +2974,7 @@ impl<T> [T] {
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
- let ptr_write = prev_ptr_write.offset(1);
+ let ptr_write = prev_ptr_write.add(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
@@ -3444,7 +3497,7 @@ impl<T> [T] {
#[must_use]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
- if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
+ if U::IS_ZST || T::IS_ZST {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
@@ -3505,7 +3558,7 @@ impl<T> [T] {
#[must_use]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
- if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
+ if U::IS_ZST || T::IS_ZST {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
@@ -3518,7 +3571,7 @@ impl<T> [T] {
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
- // a size that is a power of two (since it comes from the alignement for U),
+ // a size that is a power of two (since it comes from the alignment for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
@@ -3761,6 +3814,16 @@ impl<T> [T] {
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
///
+ /// If all elements of the slice match the predicate, including if the slice
+ /// is empty, then the length of the slice will be returned:
+ ///
+ /// ```
+ /// let a = [2, 4, 8];
+ /// assert_eq!(a.partition_point(|x| x < &100), a.len());
+ /// let a: [i32; 0] = [];
+ /// assert_eq!(a.partition_point(|x| x < &100), 0);
+ /// ```
+ ///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
@@ -4051,7 +4114,7 @@ impl<T, const N: usize> [[T; N]] {
/// ```
#[unstable(feature = "slice_flatten", issue = "95629")]
pub fn flatten(&self) -> &[T] {
- let len = if crate::mem::size_of::<T>() == 0 {
+ let len = if T::IS_ZST {
self.len().checked_mul(N).expect("slice len overflow")
} else {
// SAFETY: `self.len() * N` cannot overflow because `self` is
@@ -4089,7 +4152,7 @@ impl<T, const N: usize> [[T; N]] {
/// ```
#[unstable(feature = "slice_flatten", issue = "95629")]
pub fn flatten_mut(&mut self) -> &mut [T] {
- let len = if crate::mem::size_of::<T>() == 0 {
+ let len = if T::IS_ZST {
self.len().checked_mul(N).expect("slice len overflow")
} else {
// SAFETY: `self.len() * N` cannot overflow because `self` is
@@ -4101,7 +4164,6 @@ impl<T, const N: usize> [[T; N]] {
}
}
-#[cfg(not(bootstrap))]
#[cfg(not(test))]
impl [f32] {
/// Sorts the slice of floats.
@@ -4131,7 +4193,6 @@ impl [f32] {
}
}
-#[cfg(not(bootstrap))]
#[cfg(not(test))]
impl [f64] {
/// Sorts the slice of floats.
diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs
index 107e71ab6..052fd34d0 100644
--- a/library/core/src/slice/raw.rs
+++ b/library/core/src/slice/raw.rs
@@ -1,7 +1,9 @@
//! Free functions to create `&[T]` and `&mut [T]`.
use crate::array;
-use crate::intrinsics::{assert_unsafe_precondition, is_aligned_and_not_null};
+use crate::intrinsics::{
+ assert_unsafe_precondition, is_aligned_and_not_null, is_valid_allocation_size,
+};
use crate::ops::Range;
use crate::ptr;
@@ -91,8 +93,9 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe {
assert_unsafe_precondition!(
- is_aligned_and_not_null(data)
- && crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize
+ "slice::from_raw_parts requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`",
+ [T](data: *const T, len: usize) => is_aligned_and_not_null(data)
+ && is_valid_allocation_size::<T>(len)
);
&*ptr::slice_from_raw_parts(data, len)
}
@@ -135,8 +138,9 @@ pub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a m
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe {
assert_unsafe_precondition!(
- is_aligned_and_not_null(data)
- && crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize
+ "slice::from_raw_parts_mut requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`",
+ [T](data: *mut T, len: usize) => is_aligned_and_not_null(data)
+ && is_valid_allocation_size::<T>(len)
);
&mut *ptr::slice_from_raw_parts_mut(data, len)
}
@@ -188,6 +192,10 @@ pub const fn from_mut<T>(s: &mut T) -> &mut [T] {
///
/// Note that a range created from [`slice::as_ptr_range`] fulfills these requirements.
///
+/// # Panics
+///
+/// This function panics if `T` is a Zero-Sized Type (“ZST”).
+///
/// # Caveat
///
/// The lifetime for the returned slice is inferred from its usage. To
@@ -219,9 +227,15 @@ pub const unsafe fn from_ptr_range<'a, T>(range: Range<*const T>) -> &'a [T] {
unsafe { from_raw_parts(range.start, range.end.sub_ptr(range.start)) }
}
-/// Performs the same functionality as [`from_ptr_range`], except that a
+/// Forms a mutable slice from a pointer range.
+///
+/// This is the same functionality as [`from_ptr_range`], except that a
/// mutable slice is returned.
///
+/// This function is useful for interacting with foreign interfaces which
+/// use two pointers to refer to a range of elements in memory, as is
+/// common in C++.
+///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
@@ -247,6 +261,18 @@ pub const unsafe fn from_ptr_range<'a, T>(range: Range<*const T>) -> &'a [T] {
///
/// Note that a range created from [`slice::as_mut_ptr_range`] fulfills these requirements.
///
+/// # Panics
+///
+/// This function panics if `T` is a Zero-Sized Type (“ZST”).
+///
+/// # Caveat
+///
+/// The lifetime for the returned slice is inferred from its usage. To
+/// prevent accidental misuse, it's suggested to tie the lifetime to whichever
+/// source lifetime is safe in the context, such as by providing a helper
+/// function taking the lifetime of a host value for the slice, or by explicit
+/// annotation.
+///
/// # Examples
///
/// ```
diff --git a/library/core/src/slice/rotate.rs b/library/core/src/slice/rotate.rs
index 4589c6c0f..fa8c238f8 100644
--- a/library/core/src/slice/rotate.rs
+++ b/library/core/src/slice/rotate.rs
@@ -1,5 +1,5 @@
use crate::cmp;
-use crate::mem::{self, MaybeUninit};
+use crate::mem::{self, MaybeUninit, SizedTypeProperties};
use crate::ptr;
/// Rotates the range `[mid-left, mid+right)` such that the element at `mid` becomes the first
@@ -63,7 +63,7 @@ use crate::ptr;
/// when `left < right` the swapping happens from the left instead.
pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize) {
type BufType = [usize; 32];
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
return;
}
loop {
diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs
index 6a201834b..87f77b7f2 100644
--- a/library/core/src/slice/sort.rs
+++ b/library/core/src/slice/sort.rs
@@ -7,7 +7,7 @@
//! stable sorting implementation.
use crate::cmp;
-use crate::mem::{self, MaybeUninit};
+use crate::mem::{self, MaybeUninit, SizedTypeProperties};
use crate::ptr;
/// When dropped, copies from `src` into `dest`.
@@ -326,8 +326,8 @@ where
unsafe {
// Branchless comparison.
*end_l = i as u8;
- end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
- elem = elem.offset(1);
+ end_l = end_l.add(!is_less(&*elem, pivot) as usize);
+ elem = elem.add(1);
}
}
}
@@ -352,9 +352,9 @@ where
// Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
unsafe {
// Branchless comparison.
- elem = elem.offset(-1);
+ elem = elem.sub(1);
*end_r = i as u8;
- end_r = end_r.offset(is_less(&*elem, pivot) as isize);
+ end_r = end_r.add(is_less(&*elem, pivot) as usize);
}
}
}
@@ -365,12 +365,12 @@ where
if count > 0 {
macro_rules! left {
() => {
- l.offset(*start_l as isize)
+ l.add(usize::from(*start_l))
};
}
macro_rules! right {
() => {
- r.offset(-(*start_r as isize) - 1)
+ r.sub(usize::from(*start_r) + 1)
};
}
@@ -398,16 +398,16 @@ where
ptr::copy_nonoverlapping(right!(), left!(), 1);
for _ in 1..count {
- start_l = start_l.offset(1);
+ start_l = start_l.add(1);
ptr::copy_nonoverlapping(left!(), right!(), 1);
- start_r = start_r.offset(1);
+ start_r = start_r.add(1);
ptr::copy_nonoverlapping(right!(), left!(), 1);
}
ptr::copy_nonoverlapping(&tmp, right!(), 1);
mem::forget(tmp);
- start_l = start_l.offset(1);
- start_r = start_r.offset(1);
+ start_l = start_l.add(1);
+ start_r = start_r.add(1);
}
}
@@ -420,7 +420,7 @@ where
// safe. Otherwise, the debug assertions in the `is_done` case guarantee that
// `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
// for the smaller number of remaining elements.
- l = unsafe { l.offset(block_l as isize) };
+ l = unsafe { l.add(block_l) };
}
if start_r == end_r {
@@ -428,7 +428,7 @@ where
// SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
// or `block_r` has been adjusted for the last handful of elements.
- r = unsafe { r.offset(-(block_r as isize)) };
+ r = unsafe { r.sub(block_r) };
}
if is_done {
@@ -457,9 +457,9 @@ where
// - `offsets_l` contains valid offsets into `v` collected during the partitioning of
// the last block, so the `l.offset` calls are valid.
unsafe {
- end_l = end_l.offset(-1);
- ptr::swap(l.offset(*end_l as isize), r.offset(-1));
- r = r.offset(-1);
+ end_l = end_l.sub(1);
+ ptr::swap(l.add(usize::from(*end_l)), r.sub(1));
+ r = r.sub(1);
}
}
width(v.as_mut_ptr(), r)
@@ -470,9 +470,9 @@ where
while start_r < end_r {
// SAFETY: See the reasoning in [remaining-elements-safety].
unsafe {
- end_r = end_r.offset(-1);
- ptr::swap(l, r.offset(-(*end_r as isize) - 1));
- l = l.offset(1);
+ end_r = end_r.sub(1);
+ ptr::swap(l, r.sub(usize::from(*end_r) + 1));
+ l = l.add(1);
}
}
width(v.as_mut_ptr(), l)
@@ -813,7 +813,7 @@ where
F: FnMut(&T, &T) -> bool,
{
// Sorting has no meaningful behavior on zero-sized types.
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
return;
}
@@ -898,7 +898,7 @@ where
panic!("partition_at_index index {} greater than length of slice {}", index, v.len());
}
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// Sorting has no meaningful behavior on zero-sized types. Do nothing.
} else if index == v.len() - 1 {
// Find max element and place it in the last position of the array. We're free to use
diff --git a/library/core/src/str/error.rs b/library/core/src/str/error.rs
index 4e569fcc8..a11b5add4 100644
--- a/library/core/src/str/error.rs
+++ b/library/core/src/str/error.rs
@@ -1,5 +1,6 @@
//! Defines utf8 error type.
+use crate::error::Error;
use crate::fmt;
/// Errors which can occur when attempting to interpret a sequence of [`u8`]
@@ -122,6 +123,14 @@ impl fmt::Display for Utf8Error {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for Utf8Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "invalid utf-8: corrupt contents"
+ }
+}
+
/// An error returned when parsing a `bool` using [`from_str`] fails
///
/// [`from_str`]: super::FromStr::from_str
@@ -136,3 +145,11 @@ impl fmt::Display for ParseBoolError {
"provided string was not `true` or `false`".fmt(f)
}
}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for ParseBoolError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "failed to parse bool"
+ }
+}
diff --git a/library/core/src/str/lossy.rs b/library/core/src/str/lossy.rs
index 6ec1c9390..59f873d12 100644
--- a/library/core/src/str/lossy.rs
+++ b/library/core/src/str/lossy.rs
@@ -1,51 +1,170 @@
-use crate::char;
-use crate::fmt::{self, Write};
-use crate::mem;
+use crate::fmt;
+use crate::fmt::Formatter;
+use crate::fmt::Write;
+use crate::iter::FusedIterator;
use super::from_utf8_unchecked;
use super::validations::utf8_char_width;
-/// Lossy UTF-8 string.
-#[unstable(feature = "str_internals", issue = "none")]
-pub struct Utf8Lossy {
- bytes: [u8],
+/// An item returned by the [`Utf8Chunks`] iterator.
+///
+/// A `Utf8Chunk` stores a sequence of [`u8`] up to the first broken character
+/// when decoding a UTF-8 string.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(utf8_chunks)]
+///
+/// use std::str::Utf8Chunks;
+///
+/// // An invalid UTF-8 string
+/// let bytes = b"foo\xF1\x80bar";
+///
+/// // Decode the first `Utf8Chunk`
+/// let chunk = Utf8Chunks::new(bytes).next().unwrap();
+///
+/// // The first three characters are valid UTF-8
+/// assert_eq!("foo", chunk.valid());
+///
+/// // The fourth character is broken
+/// assert_eq!(b"\xF1\x80", chunk.invalid());
+/// ```
+#[unstable(feature = "utf8_chunks", issue = "99543")]
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct Utf8Chunk<'a> {
+ valid: &'a str,
+ invalid: &'a [u8],
}
-impl Utf8Lossy {
+impl<'a> Utf8Chunk<'a> {
+ /// Returns the next validated UTF-8 substring.
+ ///
+ /// This substring can be empty at the start of the string or between
+ /// broken UTF-8 characters.
#[must_use]
- pub fn from_bytes(bytes: &[u8]) -> &Utf8Lossy {
- // SAFETY: Both use the same memory layout, and UTF-8 correctness isn't required.
- unsafe { mem::transmute(bytes) }
+ #[unstable(feature = "utf8_chunks", issue = "99543")]
+ pub fn valid(&self) -> &'a str {
+ self.valid
}
- pub fn chunks(&self) -> Utf8LossyChunksIter<'_> {
- Utf8LossyChunksIter { source: &self.bytes }
+ /// Returns the invalid sequence that caused a failure.
+ ///
+ /// The returned slice will have a maximum length of 3 and starts after the
+ /// substring given by [`valid`]. Decoding will resume after this sequence.
+ ///
+ /// If empty, this is the last chunk in the string. If non-empty, an
+ /// unexpected byte was encountered or the end of the input was reached
+ /// unexpectedly.
+ ///
+ /// Lossy decoding would replace this sequence with [`U+FFFD REPLACEMENT
+ /// CHARACTER`].
+ ///
+ /// [`valid`]: Self::valid
+ /// [`U+FFFD REPLACEMENT CHARACTER`]: crate::char::REPLACEMENT_CHARACTER
+ #[must_use]
+ #[unstable(feature = "utf8_chunks", issue = "99543")]
+ pub fn invalid(&self) -> &'a [u8] {
+ self.invalid
}
}
-/// Iterator over lossy UTF-8 string
-#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[must_use]
+#[unstable(feature = "str_internals", issue = "none")]
+pub struct Debug<'a>(&'a [u8]);
+
#[unstable(feature = "str_internals", issue = "none")]
-#[allow(missing_debug_implementations)]
-pub struct Utf8LossyChunksIter<'a> {
+impl fmt::Debug for Debug<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ f.write_char('"')?;
+
+ for chunk in Utf8Chunks::new(self.0) {
+ // Valid part.
+ // Here we partially parse UTF-8 again which is suboptimal.
+ {
+ let valid = chunk.valid();
+ let mut from = 0;
+ for (i, c) in valid.char_indices() {
+ let esc = c.escape_debug();
+ // If char needs escaping, flush backlog so far and write, else skip
+ if esc.len() != 1 {
+ f.write_str(&valid[from..i])?;
+ for c in esc {
+ f.write_char(c)?;
+ }
+ from = i + c.len_utf8();
+ }
+ }
+ f.write_str(&valid[from..])?;
+ }
+
+ // Broken parts of string as hex escape.
+ for &b in chunk.invalid() {
+ write!(f, "\\x{:02X}", b)?;
+ }
+ }
+
+ f.write_char('"')
+ }
+}
+
+/// An iterator used to decode a slice of mostly UTF-8 bytes to string slices
+/// ([`&str`]) and byte slices ([`&[u8]`][byteslice]).
+///
+/// If you want a simple conversion from UTF-8 byte slices to string slices,
+/// [`from_utf8`] is easier to use.
+///
+/// [byteslice]: slice
+/// [`from_utf8`]: super::from_utf8
+///
+/// # Examples
+///
+/// This can be used to create functionality similar to
+/// [`String::from_utf8_lossy`] without allocating heap memory:
+///
+/// ```
+/// #![feature(utf8_chunks)]
+///
+/// use std::str::Utf8Chunks;
+///
+/// fn from_utf8_lossy<F>(input: &[u8], mut push: F) where F: FnMut(&str) {
+/// for chunk in Utf8Chunks::new(input) {
+/// push(chunk.valid());
+///
+/// if !chunk.invalid().is_empty() {
+/// push("\u{FFFD}");
+/// }
+/// }
+/// }
+/// ```
+///
+/// [`String::from_utf8_lossy`]: ../../std/string/struct.String.html#method.from_utf8_lossy
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "utf8_chunks", issue = "99543")]
+#[derive(Clone)]
+pub struct Utf8Chunks<'a> {
source: &'a [u8],
}
-#[unstable(feature = "str_internals", issue = "none")]
-#[derive(PartialEq, Eq, Debug)]
-pub struct Utf8LossyChunk<'a> {
- /// Sequence of valid chars.
- /// Can be empty between broken UTF-8 chars.
- pub valid: &'a str,
- /// Single broken char, empty if none.
- /// Empty iff iterator item is last.
- pub broken: &'a [u8],
+impl<'a> Utf8Chunks<'a> {
+ /// Creates a new iterator to decode the bytes.
+ #[unstable(feature = "utf8_chunks", issue = "99543")]
+ pub fn new(bytes: &'a [u8]) -> Self {
+ Self { source: bytes }
+ }
+
+ #[doc(hidden)]
+ #[unstable(feature = "str_internals", issue = "none")]
+ pub fn debug(&self) -> Debug<'_> {
+ Debug(self.source)
+ }
}
-impl<'a> Iterator for Utf8LossyChunksIter<'a> {
- type Item = Utf8LossyChunk<'a>;
+#[unstable(feature = "utf8_chunks", issue = "99543")]
+impl<'a> Iterator for Utf8Chunks<'a> {
+ type Item = Utf8Chunk<'a>;
- fn next(&mut self) -> Option<Utf8LossyChunk<'a>> {
+ fn next(&mut self) -> Option<Utf8Chunk<'a>> {
if self.source.is_empty() {
return None;
}
@@ -130,71 +249,22 @@ impl<'a> Iterator for Utf8LossyChunksIter<'a> {
// SAFETY: `valid_up_to <= i` because it is only ever assigned via
// `valid_up_to = i` and `i` only increases.
- let (valid, broken) = unsafe { inspected.split_at_unchecked(valid_up_to) };
+ let (valid, invalid) = unsafe { inspected.split_at_unchecked(valid_up_to) };
- Some(Utf8LossyChunk {
+ Some(Utf8Chunk {
// SAFETY: All bytes up to `valid_up_to` are valid UTF-8.
valid: unsafe { from_utf8_unchecked(valid) },
- broken,
+ invalid,
})
}
}
-impl fmt::Display for Utf8Lossy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // If we're the empty string then our iterator won't actually yield
- // anything, so perform the formatting manually
- if self.bytes.is_empty() {
- return "".fmt(f);
- }
-
- for Utf8LossyChunk { valid, broken } in self.chunks() {
- // If we successfully decoded the whole chunk as a valid string then
- // we can return a direct formatting of the string which will also
- // respect various formatting flags if possible.
- if valid.len() == self.bytes.len() {
- assert!(broken.is_empty());
- return valid.fmt(f);
- }
-
- f.write_str(valid)?;
- if !broken.is_empty() {
- f.write_char(char::REPLACEMENT_CHARACTER)?;
- }
- }
- Ok(())
- }
-}
-
-impl fmt::Debug for Utf8Lossy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_char('"')?;
+#[unstable(feature = "utf8_chunks", issue = "99543")]
+impl FusedIterator for Utf8Chunks<'_> {}
- for Utf8LossyChunk { valid, broken } in self.chunks() {
- // Valid part.
- // Here we partially parse UTF-8 again which is suboptimal.
- {
- let mut from = 0;
- for (i, c) in valid.char_indices() {
- let esc = c.escape_debug();
- // If char needs escaping, flush backlog so far and write, else skip
- if esc.len() != 1 {
- f.write_str(&valid[from..i])?;
- for c in esc {
- f.write_char(c)?;
- }
- from = i + c.len_utf8();
- }
- }
- f.write_str(&valid[from..])?;
- }
-
- // Broken parts of string as hex escape.
- for &b in broken {
- write!(f, "\\x{:02x}", b)?;
- }
- }
-
- f.write_char('"')
+#[unstable(feature = "utf8_chunks", issue = "99543")]
+impl fmt::Debug for Utf8Chunks<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Utf8Chunks").field("source", &self.debug()).finish()
}
}
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index c4f2e283e..fbc0fc397 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -22,9 +22,9 @@ use crate::slice::{self, SliceIndex};
pub mod pattern;
-#[unstable(feature = "str_internals", issue = "none")]
-#[allow(missing_docs)]
-pub mod lossy;
+mod lossy;
+#[unstable(feature = "utf8_chunks", issue = "99543")]
+pub use lossy::{Utf8Chunk, Utf8Chunks};
#[stable(feature = "rust1", since = "1.0.0")]
pub use converts::{from_utf8, from_utf8_unchecked};
@@ -91,10 +91,12 @@ const fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
}
}
+#[track_caller]
const fn slice_error_fail_ct(_: &str, _: usize, _: usize) -> ! {
panic!("failed to slice string");
}
+#[track_caller]
fn slice_error_fail_rt(s: &str, begin: usize, end: usize) -> ! {
const MAX_DISPLAY_LENGTH: usize = 256;
let trunc_len = s.floor_char_boundary(MAX_DISPLAY_LENGTH);
@@ -2353,7 +2355,7 @@ impl str {
#[inline]
pub fn is_ascii(&self) -> bool {
// We can treat each byte as character here: all multibyte characters
- // start with a byte that is not in the ascii range, so we will stop
+ // start with a byte that is not in the ASCII range, so we will stop
// there already.
self.as_bytes().is_ascii()
}
@@ -2638,3 +2640,6 @@ impl_fn_for_zst! {
unsafe { from_utf8_unchecked(bytes) }
};
}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl !crate::error::Error for &str {}
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
index 031fb8e8b..ec2cb429e 100644
--- a/library/core/src/str/pattern.rs
+++ b/library/core/src/str/pattern.rs
@@ -267,7 +267,7 @@ pub unsafe trait Searcher<'a> {
/// The index ranges returned by this trait are not required
/// to exactly match those of the forward search in reverse.
///
-/// For the reason why this trait is marked unsafe, see them
+/// For the reason why this trait is marked unsafe, see the
/// parent trait [`Searcher`].
pub unsafe trait ReverseSearcher<'a>: Searcher<'a> {
/// Performs the next search step starting from the back.
diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs
index e9649fc91..d3ed811b1 100644
--- a/library/core/src/str/traits.rs
+++ b/library/core/src/str/traits.rs
@@ -507,7 +507,6 @@ unsafe impl const SliceIndex<str> for ops::RangeToInclusive<usize> {
///
/// ```
/// use std::str::FromStr;
-/// use std::num::ParseIntError;
///
/// #[derive(Debug, PartialEq)]
/// struct Point {
@@ -515,18 +514,21 @@ unsafe impl const SliceIndex<str> for ops::RangeToInclusive<usize> {
/// y: i32
/// }
///
+/// #[derive(Debug, PartialEq, Eq)]
+/// struct ParsePointError;
+///
/// impl FromStr for Point {
-/// type Err = ParseIntError;
+/// type Err = ParsePointError;
///
/// fn from_str(s: &str) -> Result<Self, Self::Err> {
/// let (x, y) = s
/// .strip_prefix('(')
/// .and_then(|s| s.strip_suffix(')'))
/// .and_then(|s| s.split_once(','))
-/// .unwrap();
+/// .ok_or(ParsePointError)?;
///
-/// let x_fromstr = x.parse::<i32>()?;
-/// let y_fromstr = y.parse::<i32>()?;
+/// let x_fromstr = x.parse::<i32>().map_err(|_| ParsePointError)?;
+/// let y_fromstr = y.parse::<i32>().map_err(|_| ParsePointError)?;
///
/// Ok(Point { x: x_fromstr, y: y_fromstr })
/// }
@@ -538,6 +540,8 @@ unsafe impl const SliceIndex<str> for ops::RangeToInclusive<usize> {
/// // Implicit calls, through parse
/// assert_eq!("(1,2)".parse(), expected);
/// assert_eq!("(1,2)".parse::<Point>(), expected);
+/// // Invalid input string
+/// assert!(Point::from_str("(1 2)").is_err());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait FromStr: Sized {
@@ -573,8 +577,8 @@ impl FromStr for bool {
/// Parse a `bool` from a string.
///
- /// Yields a `Result<bool, ParseBoolError>`, because `s` may or may not
- /// actually be parseable.
+ /// The only accepted values are `"true"` and `"false"`. Any other input
+ /// will return an error.
///
/// # Examples
///
diff --git a/library/core/src/str/validations.rs b/library/core/src/str/validations.rs
index 04bc66523..2acef432f 100644
--- a/library/core/src/str/validations.rs
+++ b/library/core/src/str/validations.rs
@@ -216,12 +216,12 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
// SAFETY: since `align - index` and `ascii_block_size` are
// multiples of `usize_bytes`, `block = ptr.add(index)` is
// always aligned with a `usize` so it's safe to dereference
- // both `block` and `block.offset(1)`.
+ // both `block` and `block.add(1)`.
unsafe {
let block = ptr.add(index) as *const usize;
// break if there is a nonascii byte
let zu = contains_nonascii(*block);
- let zv = contains_nonascii(*block.offset(1));
+ let zv = contains_nonascii(*block.add(1));
if zu || zv {
break;
}
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 5e2e0c4d8..edc68d6fa 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -294,7 +294,7 @@ impl AtomicBool {
/// ```
/// use std::sync::atomic::AtomicBool;
///
- /// let atomic_true = AtomicBool::new(true);
+ /// let atomic_true = AtomicBool::new(true);
/// let atomic_false = AtomicBool::new(false);
/// ```
#[inline]
@@ -955,6 +955,14 @@ impl AtomicBool {
/// **Note:** This method is only available on platforms that support atomic
/// operations on `u8`.
///
+ /// # Considerations
+ ///
+ /// This method is not magic; it is not provided by the hardware.
+ /// It is implemented in terms of [`AtomicBool::compare_exchange_weak`], and suffers from the same drawbacks.
+ /// In particular, this method will not circumvent the [ABA Problem].
+ ///
+ /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
+ ///
/// # Examples
///
/// ```rust
@@ -1171,7 +1179,7 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
/// let value = some_ptr.load(Ordering::Relaxed);
/// ```
@@ -1198,7 +1206,7 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
/// let other_ptr = &mut 10;
///
@@ -1230,7 +1238,7 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
/// let other_ptr = &mut 10;
///
@@ -1282,9 +1290,9 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
- /// let other_ptr = &mut 10;
+ /// let other_ptr = &mut 10;
///
/// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
/// ```
@@ -1325,9 +1333,9 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
- /// let other_ptr = &mut 10;
+ /// let other_ptr = &mut 10;
///
/// let value = some_ptr.compare_exchange(ptr, other_ptr,
/// Ordering::SeqCst, Ordering::Relaxed);
@@ -1422,6 +1430,14 @@ impl<T> AtomicPtr<T> {
/// **Note:** This method is only available on platforms that support atomic
/// operations on pointers.
///
+ /// # Considerations
+ ///
+ /// This method is not magic; it is not provided by the hardware.
+ /// It is implemented in terms of [`AtomicPtr::compare_exchange_weak`], and suffers from the same drawbacks.
+ /// In particular, this method will not circumvent the [ABA Problem].
+ ///
+ /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
+ ///
/// # Examples
///
/// ```rust
@@ -1554,8 +1570,8 @@ impl<T> AtomicPtr<T> {
/// Offsets the pointer's address by adding `val` *bytes*, returning the
/// previous pointer.
///
- /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
- /// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
+ /// This is equivalent to using [`wrapping_byte_add`] to atomically
+ /// perform `ptr = ptr.wrapping_byte_add(val)`.
///
/// `fetch_byte_add` takes an [`Ordering`] argument which describes the
/// memory ordering of this operation. All ordering modes are possible. Note
@@ -1565,8 +1581,7 @@ impl<T> AtomicPtr<T> {
/// **Note**: This method is only available on platforms that support atomic
/// operations on [`AtomicPtr`].
///
- /// [`wrapping_add`]: pointer::wrapping_add
- /// [`cast`]: pointer::cast
+ /// [`wrapping_byte_add`]: pointer::wrapping_byte_add
///
/// # Examples
///
@@ -1584,23 +1599,15 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_add(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
/// Offsets the pointer's address by subtracting `val` *bytes*, returning the
/// previous pointer.
///
- /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
- /// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
+ /// This is equivalent to using [`wrapping_byte_sub`] to atomically
+ /// perform `ptr = ptr.wrapping_byte_sub(val)`.
///
/// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
/// memory ordering of this operation. All ordering modes are possible. Note
@@ -1610,8 +1617,7 @@ impl<T> AtomicPtr<T> {
/// **Note**: This method is only available on platforms that support atomic
/// operations on [`AtomicPtr`].
///
- /// [`wrapping_sub`]: pointer::wrapping_sub
- /// [`cast`]: pointer::cast
+ /// [`wrapping_byte_sub`]: pointer::wrapping_byte_sub
///
/// # Examples
///
@@ -1628,24 +1634,16 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_sub(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
/// Performs a bitwise "or" operation on the address of the current pointer,
/// and the argument `val`, and stores a pointer with provenance of the
/// current pointer and the resulting address.
///
- /// This is equivalent equivalent to using [`map_addr`] to atomically
- /// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
+ /// This is equivalent to using [`map_addr`] to atomically perform
+ /// `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
/// pointer schemes to atomically set tag bits.
///
/// **Caveat**: This operation returns the previous value. To compute the
@@ -1687,24 +1685,16 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_or(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
/// Performs a bitwise "and" operation on the address of the current
/// pointer, and the argument `val`, and stores a pointer with provenance of
/// the current pointer and the resulting address.
///
- /// This is equivalent equivalent to using [`map_addr`] to atomically
- /// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
+ /// This is equivalent to using [`map_addr`] to atomically perform
+ /// `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
/// pointer schemes to atomically unset tag bits.
///
/// **Caveat**: This operation returns the previous value. To compute the
@@ -1745,24 +1735,16 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_and(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
/// Performs a bitwise "xor" operation on the address of the current
/// pointer, and the argument `val`, and stores a pointer with provenance of
/// the current pointer and the resulting address.
///
- /// This is equivalent equivalent to using [`map_addr`] to atomically
- /// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
+ /// This is equivalent to using [`map_addr`] to atomically perform
+ /// `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
/// pointer schemes to atomically toggle tag bits.
///
/// **Caveat**: This operation returns the previous value. To compute the
@@ -1801,16 +1783,8 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_xor(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
}
@@ -2552,6 +2526,16 @@ macro_rules! atomic_int {
/// **Note**: This method is only available on platforms that support atomic operations on
#[doc = concat!("[`", $s_int_type, "`].")]
///
+ /// # Considerations
+ ///
+ /// This method is not magic; it is not provided by the hardware.
+ /// It is implemented in terms of
+ #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange_weak`],")]
+ /// and suffers from the same drawbacks.
+ /// In particular, this method will not circumvent the [ABA Problem].
+ ///
+ /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
+ ///
/// # Examples
///
/// ```rust
@@ -3073,30 +3057,22 @@ unsafe fn atomic_compare_exchange<T: Copy>(
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed_relaxed(dst, old, new),
- #[cfg(not(bootstrap))]
(Relaxed, Acquire) => intrinsics::atomic_cxchg_relaxed_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Relaxed, SeqCst) => intrinsics::atomic_cxchg_relaxed_seqcst(dst, old, new),
(Acquire, Relaxed) => intrinsics::atomic_cxchg_acquire_relaxed(dst, old, new),
(Acquire, Acquire) => intrinsics::atomic_cxchg_acquire_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Acquire, SeqCst) => intrinsics::atomic_cxchg_acquire_seqcst(dst, old, new),
(Release, Relaxed) => intrinsics::atomic_cxchg_release_relaxed(dst, old, new),
- #[cfg(not(bootstrap))]
(Release, Acquire) => intrinsics::atomic_cxchg_release_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Release, SeqCst) => intrinsics::atomic_cxchg_release_seqcst(dst, old, new),
(AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_relaxed(dst, old, new),
(AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(AcqRel, SeqCst) => intrinsics::atomic_cxchg_acqrel_seqcst(dst, old, new),
(SeqCst, Relaxed) => intrinsics::atomic_cxchg_seqcst_relaxed(dst, old, new),
(SeqCst, Acquire) => intrinsics::atomic_cxchg_seqcst_acquire(dst, old, new),
(SeqCst, SeqCst) => intrinsics::atomic_cxchg_seqcst_seqcst(dst, old, new),
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
- #[cfg(bootstrap)]
- _ => panic!("a failure ordering can't be stronger than a success ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
@@ -3116,30 +3092,22 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, new),
- #[cfg(not(bootstrap))]
(Relaxed, Acquire) => intrinsics::atomic_cxchgweak_relaxed_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Relaxed, SeqCst) => intrinsics::atomic_cxchgweak_relaxed_seqcst(dst, old, new),
(Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, new),
(Acquire, Acquire) => intrinsics::atomic_cxchgweak_acquire_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Acquire, SeqCst) => intrinsics::atomic_cxchgweak_acquire_seqcst(dst, old, new),
(Release, Relaxed) => intrinsics::atomic_cxchgweak_release_relaxed(dst, old, new),
- #[cfg(not(bootstrap))]
(Release, Acquire) => intrinsics::atomic_cxchgweak_release_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Release, SeqCst) => intrinsics::atomic_cxchgweak_release_seqcst(dst, old, new),
(AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, new),
(AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(AcqRel, SeqCst) => intrinsics::atomic_cxchgweak_acqrel_seqcst(dst, old, new),
(SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, new),
(SeqCst, Acquire) => intrinsics::atomic_cxchgweak_seqcst_acquire(dst, old, new),
(SeqCst, SeqCst) => intrinsics::atomic_cxchgweak_seqcst_seqcst(dst, old, new),
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
- #[cfg(bootstrap)]
- _ => panic!("a failure ordering can't be stronger than a success ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs
index a7519ab5a..c65c27500 100644
--- a/library/core/src/sync/exclusive.rs
+++ b/library/core/src/sync/exclusive.rs
@@ -100,6 +100,7 @@ impl<T: Sized> Exclusive<T> {
/// Wrap a value in an `Exclusive`
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn new(t: T) -> Self {
Self { inner: t }
}
@@ -107,6 +108,7 @@ impl<T: Sized> Exclusive<T> {
/// Unwrap the value contained in the `Exclusive`
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn into_inner(self) -> T {
self.inner
}
@@ -116,6 +118,7 @@ impl<T: ?Sized> Exclusive<T> {
/// Get exclusive access to the underlying value.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
@@ -128,6 +131,7 @@ impl<T: ?Sized> Exclusive<T> {
/// produce _pinned_ access to the underlying value.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> {
// SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned
// `Pin::map_unchecked_mut` is not const, so we do this conversion manually
@@ -139,6 +143,7 @@ impl<T: ?Sized> Exclusive<T> {
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn from_mut(r: &'_ mut T) -> &'_ mut Exclusive<T> {
// SAFETY: repr is ≥ C, so refs have the same layout; and `Exclusive` properties are `&mut`-agnostic
unsafe { &mut *(r as *mut T as *mut Exclusive<T>) }
@@ -149,6 +154,7 @@ impl<T: ?Sized> Exclusive<T> {
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn from_pin_mut(r: Pin<&'_ mut T>) -> Pin<&'_ mut Exclusive<T>> {
// SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned
// `Pin::map_unchecked_mut` is not const, so we do this conversion manually
@@ -158,6 +164,7 @@ impl<T: ?Sized> Exclusive<T> {
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> From<T> for Exclusive<T> {
+ #[inline]
fn from(t: T) -> Self {
Self::new(t)
}
@@ -166,7 +173,7 @@ impl<T> From<T> for Exclusive<T> {
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T: Future + ?Sized> Future for Exclusive<T> {
type Output = T::Output;
-
+ #[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.get_pin_mut().poll(cx)
}
diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs
index 87d4a25af..0cff972df 100644
--- a/library/core/src/task/wake.rs
+++ b/library/core/src/task/wake.rs
@@ -71,6 +71,12 @@ impl RawWaker {
/// pointer of a properly constructed [`RawWaker`] object from inside the
/// [`RawWaker`] implementation. Calling one of the contained functions using
/// any other `data` pointer will cause undefined behavior.
+///
+/// These functions must all be thread-safe (even though [`RawWaker`] is
+/// <code>\![Send] + \![Sync]</code>)
+/// because [`Waker`] is <code>[Send] + [Sync]</code>, and thus wakers may be moved to
+/// arbitrary threads or invoked by `&` reference. For example, this means that if the
+/// `clone` and `drop` functions manage a reference count, they must do so atomically.
#[stable(feature = "futures_api", since = "1.36.0")]
#[derive(PartialEq, Copy, Clone, Debug)]
pub struct RawWakerVTable {
@@ -110,6 +116,12 @@ impl RawWakerVTable {
/// Creates a new `RawWakerVTable` from the provided `clone`, `wake`,
/// `wake_by_ref`, and `drop` functions.
///
+ /// These functions must all be thread-safe (even though [`RawWaker`] is
+ /// <code>\![Send] + \![Sync]</code>)
+ /// because [`Waker`] is <code>[Send] + [Sync]</code>, and thus wakers may be moved to
+ /// arbitrary threads or invoked by `&` reference. For example, this means that if the
+ /// `clone` and `drop` functions manage a reference count, they must do so atomically.
+ ///
/// # `clone`
///
/// This function will be called when the [`RawWaker`] gets cloned, e.g. when
@@ -157,9 +169,9 @@ impl RawWakerVTable {
}
}
-/// The `Context` of an asynchronous task.
+/// The context of an asynchronous task.
///
-/// Currently, `Context` only serves to provide access to a `&Waker`
+/// Currently, `Context` only serves to provide access to a [`&Waker`](Waker)
/// which can be used to wake the current task.
#[stable(feature = "futures_api", since = "1.36.0")]
pub struct Context<'a> {
@@ -172,19 +184,21 @@ pub struct Context<'a> {
}
impl<'a> Context<'a> {
- /// Create a new `Context` from a `&Waker`.
+ /// Create a new `Context` from a [`&Waker`](Waker).
#[stable(feature = "futures_api", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_waker", issue = "102012")]
#[must_use]
#[inline]
- pub fn from_waker(waker: &'a Waker) -> Self {
+ pub const fn from_waker(waker: &'a Waker) -> Self {
Context { waker, _marker: PhantomData }
}
- /// Returns a reference to the `Waker` for the current task.
+ /// Returns a reference to the [`Waker`] for the current task.
#[stable(feature = "futures_api", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_waker", issue = "102012")]
#[must_use]
#[inline]
- pub fn waker(&self) -> &'a Waker {
+ pub const fn waker(&self) -> &'a Waker {
&self.waker
}
}
@@ -202,7 +216,18 @@ impl fmt::Debug for Context<'_> {
/// This handle encapsulates a [`RawWaker`] instance, which defines the
/// executor-specific wakeup behavior.
///
-/// Implements [`Clone`], [`Send`], and [`Sync`].
+/// The typical life of a `Waker` is that it is constructed by an executor, wrapped in a
+/// [`Context`], then passed to [`Future::poll()`]. Then, if the future chooses to return
+/// [`Poll::Pending`], it must also store the waker somehow and call [`Waker::wake()`] when
+/// the future should be polled again.
+///
+/// Implements [`Clone`], [`Send`], and [`Sync`]; therefore, a waker may be invoked
+/// from any thread, including ones not in any way managed by the executor. For example,
+/// this might be done to wake a future when a blocking function call completes on another
+/// thread.
+///
+/// [`Future::poll()`]: core::future::Future::poll
+/// [`Poll::Pending`]: core::task::Poll::Pending
#[repr(transparent)]
#[stable(feature = "futures_api", since = "1.36.0")]
pub struct Waker {
@@ -219,18 +244,21 @@ unsafe impl Sync for Waker {}
impl Waker {
/// Wake up the task associated with this `Waker`.
///
- /// As long as the runtime keeps running and the task is not finished, it is
- /// guaranteed that each invocation of `wake` (or `wake_by_ref`) will be followed
- /// by at least one `poll` of the task to which this `Waker` belongs. This makes
+ /// As long as the executor keeps running and the task is not finished, it is
+ /// guaranteed that each invocation of [`wake()`](Self::wake) (or
+ /// [`wake_by_ref()`](Self::wake_by_ref)) will be followed by at least one
+ /// [`poll()`] of the task to which this `Waker` belongs. This makes
/// it possible to temporarily yield to other tasks while running potentially
/// unbounded processing loops.
///
/// Note that the above implies that multiple wake-ups may be coalesced into a
- /// single `poll` invocation by the runtime.
+ /// single [`poll()`] invocation by the runtime.
///
/// Also note that yielding to competing tasks is not guaranteed: it is the
/// executor’s choice which task to run and the executor may choose to run the
/// current task again.
+ ///
+ /// [`poll()`]: crate::future::Future::poll
#[inline]
#[stable(feature = "futures_api", since = "1.36.0")]
pub fn wake(self) {
@@ -250,8 +278,8 @@ impl Waker {
/// Wake up the task associated with this `Waker` without consuming the `Waker`.
///
- /// This is similar to `wake`, but may be slightly less efficient in the case
- /// where an owned `Waker` is available. This method should be preferred to
+ /// This is similar to [`wake()`](Self::wake), but may be slightly less efficient in
+ /// the case where an owned `Waker` is available. This method should be preferred to
/// calling `waker.clone().wake()`.
#[inline]
#[stable(feature = "futures_api", since = "1.36.0")]
@@ -263,7 +291,7 @@ impl Waker {
unsafe { (self.waker.vtable.wake_by_ref)(self.waker.data) }
}
- /// Returns `true` if this `Waker` and another `Waker` have awoken the same task.
+ /// Returns `true` if this `Waker` and another `Waker` would awake the same task.
///
/// This function works on a best-effort basis, and may return false even
/// when the `Waker`s would awaken the same task. However, if this function
@@ -285,7 +313,8 @@ impl Waker {
#[inline]
#[must_use]
#[stable(feature = "futures_api", since = "1.36.0")]
- pub unsafe fn from_raw(waker: RawWaker) -> Waker {
+ #[rustc_const_unstable(feature = "const_waker", issue = "102012")]
+ pub const unsafe fn from_raw(waker: RawWaker) -> Waker {
Waker { waker }
}
diff --git a/library/core/src/time.rs b/library/core/src/time.rs
index 756f1a166..ba1cb6efa 100644
--- a/library/core/src/time.rs
+++ b/library/core/src/time.rs
@@ -29,6 +29,20 @@ const NANOS_PER_MICRO: u32 = 1_000;
const MILLIS_PER_SEC: u64 = 1_000;
const MICROS_PER_SEC: u64 = 1_000_000;
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+#[rustc_layout_scalar_valid_range_end(999_999_999)]
+struct Nanoseconds(u32);
+
+impl Default for Nanoseconds {
+ #[inline]
+ fn default() -> Self {
+ // SAFETY: 0 is within the valid range
+ unsafe { Nanoseconds(0) }
+ }
+}
+
/// A `Duration` type to represent a span of time, typically used for system
/// timeouts.
///
@@ -71,7 +85,7 @@ const MICROS_PER_SEC: u64 = 1_000_000;
#[cfg_attr(not(test), rustc_diagnostic_item = "Duration")]
pub struct Duration {
secs: u64,
- nanos: u32, // Always 0 <= nanos < NANOS_PER_SEC
+ nanos: Nanoseconds, // Always 0 <= nanos < NANOS_PER_SEC
}
impl Duration {
@@ -188,7 +202,8 @@ impl Duration {
None => panic!("overflow in Duration::new"),
};
let nanos = nanos % NANOS_PER_SEC;
- Duration { secs, nanos }
+ // SAFETY: nanos % NANOS_PER_SEC < NANOS_PER_SEC, therefore nanos is within the valid range
+ Duration { secs, nanos: unsafe { Nanoseconds(nanos) } }
}
/// Creates a new `Duration` from the specified number of whole seconds.
@@ -208,7 +223,7 @@ impl Duration {
#[inline]
#[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
pub const fn from_secs(secs: u64) -> Duration {
- Duration { secs, nanos: 0 }
+ Duration::new(secs, 0)
}
/// Creates a new `Duration` from the specified number of milliseconds.
@@ -228,10 +243,7 @@ impl Duration {
#[inline]
#[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
pub const fn from_millis(millis: u64) -> Duration {
- Duration {
- secs: millis / MILLIS_PER_SEC,
- nanos: ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI,
- }
+ Duration::new(millis / MILLIS_PER_SEC, ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI)
}
/// Creates a new `Duration` from the specified number of microseconds.
@@ -251,10 +263,7 @@ impl Duration {
#[inline]
#[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
pub const fn from_micros(micros: u64) -> Duration {
- Duration {
- secs: micros / MICROS_PER_SEC,
- nanos: ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO,
- }
+ Duration::new(micros / MICROS_PER_SEC, ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO)
}
/// Creates a new `Duration` from the specified number of nanoseconds.
@@ -274,10 +283,7 @@ impl Duration {
#[inline]
#[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
pub const fn from_nanos(nanos: u64) -> Duration {
- Duration {
- secs: nanos / (NANOS_PER_SEC as u64),
- nanos: (nanos % (NANOS_PER_SEC as u64)) as u32,
- }
+ Duration::new(nanos / (NANOS_PER_SEC as u64), (nanos % (NANOS_PER_SEC as u64)) as u32)
}
/// Returns true if this `Duration` spans no time.
@@ -301,7 +307,7 @@ impl Duration {
#[rustc_const_stable(feature = "duration_zero", since = "1.53.0")]
#[inline]
pub const fn is_zero(&self) -> bool {
- self.secs == 0 && self.nanos == 0
+ self.secs == 0 && self.nanos.0 == 0
}
/// Returns the number of _whole_ seconds contained by this `Duration`.
@@ -318,19 +324,11 @@ impl Duration {
/// assert_eq!(duration.as_secs(), 5);
/// ```
///
- /// To determine the total number of seconds represented by the `Duration`,
- /// use `as_secs` in combination with [`subsec_nanos`]:
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// let duration = Duration::new(5, 730023852);
- ///
- /// assert_eq!(5.730023852,
- /// duration.as_secs() as f64
- /// + duration.subsec_nanos() as f64 * 1e-9);
- /// ```
+ /// To determine the total number of seconds represented by the `Duration`
+ /// including the fractional part, use [`as_secs_f64`] or [`as_secs_f32`]
///
+ /// [`as_secs_f64`]: Duration::as_secs_f64
+ /// [`as_secs_f32`]: Duration::as_secs_f32
/// [`subsec_nanos`]: Duration::subsec_nanos
#[stable(feature = "duration", since = "1.3.0")]
#[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
@@ -360,7 +358,7 @@ impl Duration {
#[must_use]
#[inline]
pub const fn subsec_millis(&self) -> u32 {
- self.nanos / NANOS_PER_MILLI
+ self.nanos.0 / NANOS_PER_MILLI
}
/// Returns the fractional part of this `Duration`, in whole microseconds.
@@ -383,7 +381,7 @@ impl Duration {
#[must_use]
#[inline]
pub const fn subsec_micros(&self) -> u32 {
- self.nanos / NANOS_PER_MICRO
+ self.nanos.0 / NANOS_PER_MICRO
}
/// Returns the fractional part of this `Duration`, in nanoseconds.
@@ -406,7 +404,7 @@ impl Duration {
#[must_use]
#[inline]
pub const fn subsec_nanos(&self) -> u32 {
- self.nanos
+ self.nanos.0
}
/// Returns the total number of whole milliseconds contained by this `Duration`.
@@ -424,7 +422,7 @@ impl Duration {
#[must_use]
#[inline]
pub const fn as_millis(&self) -> u128 {
- self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos / NANOS_PER_MILLI) as u128
+ self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos.0 / NANOS_PER_MILLI) as u128
}
/// Returns the total number of whole microseconds contained by this `Duration`.
@@ -442,7 +440,7 @@ impl Duration {
#[must_use]
#[inline]
pub const fn as_micros(&self) -> u128 {
- self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos / NANOS_PER_MICRO) as u128
+ self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos.0 / NANOS_PER_MICRO) as u128
}
/// Returns the total number of nanoseconds contained by this `Duration`.
@@ -460,7 +458,7 @@ impl Duration {
#[must_use]
#[inline]
pub const fn as_nanos(&self) -> u128 {
- self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos as u128
+ self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos.0 as u128
}
/// Checked `Duration` addition. Computes `self + other`, returning [`None`]
@@ -483,7 +481,7 @@ impl Duration {
#[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
pub const fn checked_add(self, rhs: Duration) -> Option<Duration> {
if let Some(mut secs) = self.secs.checked_add(rhs.secs) {
- let mut nanos = self.nanos + rhs.nanos;
+ let mut nanos = self.nanos.0 + rhs.nanos.0;
if nanos >= NANOS_PER_SEC {
nanos -= NANOS_PER_SEC;
if let Some(new_secs) = secs.checked_add(1) {
@@ -493,7 +491,7 @@ impl Duration {
}
}
debug_assert!(nanos < NANOS_PER_SEC);
- Some(Duration { secs, nanos })
+ Some(Duration::new(secs, nanos))
} else {
None
}
@@ -543,16 +541,16 @@ impl Duration {
#[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
pub const fn checked_sub(self, rhs: Duration) -> Option<Duration> {
if let Some(mut secs) = self.secs.checked_sub(rhs.secs) {
- let nanos = if self.nanos >= rhs.nanos {
- self.nanos - rhs.nanos
+ let nanos = if self.nanos.0 >= rhs.nanos.0 {
+ self.nanos.0 - rhs.nanos.0
} else if let Some(sub_secs) = secs.checked_sub(1) {
secs = sub_secs;
- self.nanos + NANOS_PER_SEC - rhs.nanos
+ self.nanos.0 + NANOS_PER_SEC - rhs.nanos.0
} else {
return None;
};
debug_assert!(nanos < NANOS_PER_SEC);
- Some(Duration { secs, nanos })
+ Some(Duration::new(secs, nanos))
} else {
None
}
@@ -601,13 +599,13 @@ impl Duration {
#[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
pub const fn checked_mul(self, rhs: u32) -> Option<Duration> {
// Multiply nanoseconds as u64, because it cannot overflow that way.
- let total_nanos = self.nanos as u64 * rhs as u64;
+ let total_nanos = self.nanos.0 as u64 * rhs as u64;
let extra_secs = total_nanos / (NANOS_PER_SEC as u64);
let nanos = (total_nanos % (NANOS_PER_SEC as u64)) as u32;
if let Some(s) = self.secs.checked_mul(rhs as u64) {
if let Some(secs) = s.checked_add(extra_secs) {
debug_assert!(nanos < NANOS_PER_SEC);
- return Some(Duration { secs, nanos });
+ return Some(Duration::new(secs, nanos));
}
}
None
@@ -661,9 +659,9 @@ impl Duration {
let secs = self.secs / (rhs as u64);
let carry = self.secs - secs * (rhs as u64);
let extra_nanos = carry * (NANOS_PER_SEC as u64) / (rhs as u64);
- let nanos = self.nanos / rhs + (extra_nanos as u32);
+ let nanos = self.nanos.0 / rhs + (extra_nanos as u32);
debug_assert!(nanos < NANOS_PER_SEC);
- Some(Duration { secs, nanos })
+ Some(Duration::new(secs, nanos))
} else {
None
}
@@ -685,7 +683,7 @@ impl Duration {
#[inline]
#[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
pub const fn as_secs_f64(&self) -> f64 {
- (self.secs as f64) + (self.nanos as f64) / (NANOS_PER_SEC as f64)
+ (self.secs as f64) + (self.nanos.0 as f64) / (NANOS_PER_SEC as f64)
}
/// Returns the number of seconds contained by this `Duration` as `f32`.
@@ -704,7 +702,7 @@ impl Duration {
#[inline]
#[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
pub const fn as_secs_f32(&self) -> f32 {
- (self.secs as f32) + (self.nanos as f32) / (NANOS_PER_SEC as f32)
+ (self.secs as f32) + (self.nanos.0 as f32) / (NANOS_PER_SEC as f32)
}
/// Creates a new `Duration` from the specified number of seconds represented
@@ -995,13 +993,13 @@ macro_rules! sum_durations {
for entry in $iter {
total_secs =
total_secs.checked_add(entry.secs).expect("overflow in iter::sum over durations");
- total_nanos = match total_nanos.checked_add(entry.nanos as u64) {
+ total_nanos = match total_nanos.checked_add(entry.nanos.0 as u64) {
Some(n) => n,
None => {
total_secs = total_secs
.checked_add(total_nanos / NANOS_PER_SEC as u64)
.expect("overflow in iter::sum over durations");
- (total_nanos % NANOS_PER_SEC as u64) + entry.nanos as u64
+ (total_nanos % NANOS_PER_SEC as u64) + entry.nanos.0 as u64
}
};
}
@@ -1009,7 +1007,7 @@ macro_rules! sum_durations {
.checked_add(total_nanos / NANOS_PER_SEC as u64)
.expect("overflow in iter::sum over durations");
total_nanos = total_nanos % NANOS_PER_SEC as u64;
- Duration { secs: total_secs, nanos: total_nanos as u32 }
+ Duration::new(total_secs, total_nanos as u32)
}};
}
@@ -1045,7 +1043,7 @@ impl fmt::Debug for Duration {
/// to the formatter's `width`, if specified.
fn fmt_decimal(
f: &mut fmt::Formatter<'_>,
- mut integer_part: u64,
+ integer_part: u64,
mut fractional_part: u32,
mut divisor: u32,
prefix: &str,
@@ -1077,7 +1075,7 @@ impl fmt::Debug for Duration {
// normal floating point numbers. However, we only need to do work
// when rounding up. This happens if the first digit of the
// remaining ones is >= 5.
- if fractional_part > 0 && fractional_part >= divisor * 5 {
+ let integer_part = if fractional_part > 0 && fractional_part >= divisor * 5 {
// Round up the number contained in the buffer. We go through
// the buffer backwards and keep track of the carry.
let mut rev_pos = pos;
@@ -1101,9 +1099,18 @@ impl fmt::Debug for Duration {
// the whole buffer to '0's and need to increment the integer
// part.
if carry {
- integer_part += 1;
+ // If `integer_part == u64::MAX` and precision < 9, any
+ // carry of the overflow during rounding of the
+ // `fractional_part` into the `integer_part` will cause the
+ // `integer_part` itself to overflow. Avoid this by using an
+ // `Option<u64>`, with `None` representing `u64::MAX + 1`.
+ integer_part.checked_add(1)
+ } else {
+ Some(integer_part)
}
- }
+ } else {
+ Some(integer_part)
+ };
// Determine the end of the buffer: if precision is set, we just
// use as many digits from the buffer (capped to 9). If it isn't
@@ -1113,7 +1120,12 @@ impl fmt::Debug for Duration {
// This closure emits the formatted duration without emitting any
// padding (padding is calculated below).
let emit_without_padding = |f: &mut fmt::Formatter<'_>| {
- write!(f, "{}{}", prefix, integer_part)?;
+ if let Some(integer_part) = integer_part {
+ write!(f, "{}{}", prefix, integer_part)?;
+ } else {
+ // u64::MAX + 1 == 18446744073709551616
+ write!(f, "{}18446744073709551616", prefix)?;
+ }
// Write the decimal point and the fractional part (if any).
if end > 0 {
@@ -1143,12 +1155,17 @@ impl fmt::Debug for Duration {
// 2. The postfix: can be "µs" so we have to count UTF8 characters.
let mut actual_w = prefix.len() + postfix.chars().count();
// 3. The integer part:
- if let Some(log) = integer_part.checked_log10() {
- // integer_part is > 0, so has length log10(x)+1
- actual_w += 1 + log as usize;
+ if let Some(integer_part) = integer_part {
+ if let Some(log) = integer_part.checked_ilog10() {
+ // integer_part is > 0, so has length log10(x)+1
+ actual_w += 1 + log as usize;
+ } else {
+ // integer_part is 0, so has length 1.
+ actual_w += 1;
+ }
} else {
- // integer_part is 0, so has length 1.
- actual_w += 1;
+ // integer_part is u64::MAX + 1, so has length 20
+ actual_w += 20;
}
// 4. The fractional part (if any):
if end > 0 {
@@ -1174,27 +1191,27 @@ impl fmt::Debug for Duration {
let prefix = if f.sign_plus() { "+" } else { "" };
if self.secs > 0 {
- fmt_decimal(f, self.secs, self.nanos, NANOS_PER_SEC / 10, prefix, "s")
- } else if self.nanos >= NANOS_PER_MILLI {
+ fmt_decimal(f, self.secs, self.nanos.0, NANOS_PER_SEC / 10, prefix, "s")
+ } else if self.nanos.0 >= NANOS_PER_MILLI {
fmt_decimal(
f,
- (self.nanos / NANOS_PER_MILLI) as u64,
- self.nanos % NANOS_PER_MILLI,
+ (self.nanos.0 / NANOS_PER_MILLI) as u64,
+ self.nanos.0 % NANOS_PER_MILLI,
NANOS_PER_MILLI / 10,
prefix,
"ms",
)
- } else if self.nanos >= NANOS_PER_MICRO {
+ } else if self.nanos.0 >= NANOS_PER_MICRO {
fmt_decimal(
f,
- (self.nanos / NANOS_PER_MICRO) as u64,
- self.nanos % NANOS_PER_MICRO,
+ (self.nanos.0 / NANOS_PER_MICRO) as u64,
+ self.nanos.0 % NANOS_PER_MICRO,
NANOS_PER_MICRO / 10,
prefix,
"µs",
)
} else {
- fmt_decimal(f, self.nanos as u64, 0, 1, prefix, "ns")
+ fmt_decimal(f, self.nanos.0 as u64, 0, 1, prefix, "ns")
}
}
}
@@ -1208,7 +1225,6 @@ impl fmt::Debug for Duration {
/// # Example
///
/// ```
-/// #![feature(duration_checked_float)]
/// use std::time::Duration;
///
/// if let Err(e) = Duration::try_from_secs_f32(-1.0) {
@@ -1216,33 +1232,33 @@ impl fmt::Debug for Duration {
/// }
/// ```
#[derive(Debug, Clone, PartialEq, Eq)]
-#[unstable(feature = "duration_checked_float", issue = "83400")]
-pub struct FromFloatSecsError {
- kind: FromFloatSecsErrorKind,
+#[stable(feature = "duration_checked_float", since = "1.66.0")]
+pub struct TryFromFloatSecsError {
+ kind: TryFromFloatSecsErrorKind,
}
-impl FromFloatSecsError {
+impl TryFromFloatSecsError {
const fn description(&self) -> &'static str {
match self.kind {
- FromFloatSecsErrorKind::Negative => {
+ TryFromFloatSecsErrorKind::Negative => {
"can not convert float seconds to Duration: value is negative"
}
- FromFloatSecsErrorKind::OverflowOrNan => {
+ TryFromFloatSecsErrorKind::OverflowOrNan => {
"can not convert float seconds to Duration: value is either too big or NaN"
}
}
}
}
-#[unstable(feature = "duration_checked_float", issue = "83400")]
-impl fmt::Display for FromFloatSecsError {
+#[stable(feature = "duration_checked_float", since = "1.66.0")]
+impl fmt::Display for TryFromFloatSecsError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.description().fmt(f)
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
-enum FromFloatSecsErrorKind {
+enum TryFromFloatSecsErrorKind {
// Value is negative.
Negative,
// Value is either too big to be represented as `Duration` or `NaN`.
@@ -1262,8 +1278,8 @@ macro_rules! try_from_secs {
const MANT_MASK: $bits_ty = (1 << $mant_bits) - 1;
const EXP_MASK: $bits_ty = (1 << $exp_bits) - 1;
- if $secs.is_sign_negative() {
- return Err(FromFloatSecsError { kind: FromFloatSecsErrorKind::Negative });
+ if $secs < 0.0 {
+ return Err(TryFromFloatSecsError { kind: TryFromFloatSecsErrorKind::Negative });
}
let bits = $secs.to_bits();
@@ -1288,7 +1304,7 @@ macro_rules! try_from_secs {
let rem_msb = nanos_tmp & rem_msb_mask == 0;
let add_ns = !(rem_msb || (is_even && is_tie));
- // f32 does not have enough presicion to trigger the second branch
+ // f32 does not have enough precision to trigger the second branch
// since it can not represent numbers between 0.999_999_940_395 and 1.0.
let nanos = nanos + add_ns as u32;
if ($mant_bits == 23) || (nanos != NANOS_PER_SEC) { (0, nanos) } else { (1, 0) }
@@ -1307,9 +1323,9 @@ macro_rules! try_from_secs {
let rem_msb = nanos_tmp & rem_msb_mask == 0;
let add_ns = !(rem_msb || (is_even && is_tie));
- // f32 does not have enough presicion to trigger the second branch.
+ // f32 does not have enough precision to trigger the second branch.
// For example, it can not represent numbers between 1.999_999_880...
- // and 2.0. Bigger values result in even smaller presicion of the
+ // and 2.0. Bigger values result in even smaller precision of the
// fractional part.
let nanos = nanos + add_ns as u32;
if ($mant_bits == 23) || (nanos != NANOS_PER_SEC) {
@@ -1322,10 +1338,10 @@ macro_rules! try_from_secs {
let secs = u64::from(mant) << (exp - $mant_bits);
(secs, 0)
} else {
- return Err(FromFloatSecsError { kind: FromFloatSecsErrorKind::OverflowOrNan });
+ return Err(TryFromFloatSecsError { kind: TryFromFloatSecsErrorKind::OverflowOrNan });
};
- Ok(Duration { secs, nanos })
+ Ok(Duration::new(secs, nanos))
}};
}
@@ -1338,8 +1354,6 @@ impl Duration {
///
/// # Examples
/// ```
- /// #![feature(duration_checked_float)]
- ///
/// use std::time::Duration;
///
/// let res = Duration::try_from_secs_f32(0.0);
@@ -1387,9 +1401,10 @@ impl Duration {
/// let res = Duration::try_from_secs_f32(val);
/// assert_eq!(res, Ok(Duration::new(1, 2_929_688)));
/// ```
- #[unstable(feature = "duration_checked_float", issue = "83400")]
+ #[stable(feature = "duration_checked_float", since = "1.66.0")]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
#[inline]
- pub const fn try_from_secs_f32(secs: f32) -> Result<Duration, FromFloatSecsError> {
+ pub const fn try_from_secs_f32(secs: f32) -> Result<Duration, TryFromFloatSecsError> {
try_from_secs!(
secs = secs,
mantissa_bits = 23,
@@ -1408,8 +1423,6 @@ impl Duration {
///
/// # Examples
/// ```
- /// #![feature(duration_checked_float)]
- ///
/// use std::time::Duration;
///
/// let res = Duration::try_from_secs_f64(0.0);
@@ -1465,9 +1478,10 @@ impl Duration {
/// let res = Duration::try_from_secs_f64(val);
/// assert_eq!(res, Ok(Duration::new(1, 2_929_688)));
/// ```
- #[unstable(feature = "duration_checked_float", issue = "83400")]
+ #[stable(feature = "duration_checked_float", since = "1.66.0")]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
#[inline]
- pub const fn try_from_secs_f64(secs: f64) -> Result<Duration, FromFloatSecsError> {
+ pub const fn try_from_secs_f64(secs: f64) -> Result<Duration, TryFromFloatSecsError> {
try_from_secs!(
secs = secs,
mantissa_bits = 52,
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index d189e6400..fc91fe468 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -93,7 +93,8 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:Default),+> Default for ($($T,)+) {
+ #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+ impl<$($T: ~const Default),+> const Default for ($($T,)+) {
#[inline]
fn default() -> ($($T,)+) {
($({ let x: $T = Default::default(); x},)+)
@@ -107,7 +108,7 @@ macro_rules! tuple_impls {
// Otherwise, it hides the docs entirely.
macro_rules! maybe_tuple_doc {
($a:ident @ #[$meta:meta] $item:item) => {
- #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc(fake_variadic)]
#[doc = "This trait is implemented for tuples up to twelve items long."]
#[$meta]
$item
diff --git a/library/core/src/unicode/printable.rs b/library/core/src/unicode/printable.rs
index 31cf88a41..ffb18a5ba 100644
--- a/library/core/src/unicode/printable.rs
+++ b/library/core/src/unicode/printable.rs
@@ -54,7 +54,7 @@ pub(crate) fn is_printable(x: char) -> bool {
if 0x2a6e0 <= x && x < 0x2a700 {
return false;
}
- if 0x2b739 <= x && x < 0x2b740 {
+ if 0x2b73a <= x && x < 0x2b740 {
return false;
}
if 0x2b81e <= x && x < 0x2b820 {
@@ -69,7 +69,10 @@ pub(crate) fn is_printable(x: char) -> bool {
if 0x2fa1e <= x && x < 0x30000 {
return false;
}
- if 0x3134b <= x && x < 0xe0100 {
+ if 0x3134b <= x && x < 0x31350 {
+ return false;
+ }
+ if 0x323b0 <= x && x < 0xe0100 {
return false;
}
if 0xe01f0 <= x && x < 0x110000 {
@@ -92,7 +95,7 @@ const SINGLETONS0U: &[(u8, u8)] = &[
(0x0b, 25),
(0x0c, 26),
(0x0d, 16),
- (0x0e, 13),
+ (0x0e, 12),
(0x0f, 4),
(0x10, 3),
(0x12, 18),
@@ -142,24 +145,24 @@ const SINGLETONS0L: &[u8] = &[
0xe4, 0xe5, 0xf0, 0x0d, 0x11, 0x45, 0x49, 0x64,
0x65, 0x80, 0x84, 0xb2, 0xbc, 0xbe, 0xbf, 0xd5,
0xd7, 0xf0, 0xf1, 0x83, 0x85, 0x8b, 0xa4, 0xa6,
- 0xbe, 0xbf, 0xc5, 0xc7, 0xce, 0xcf, 0xda, 0xdb,
- 0x48, 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49,
- 0x4e, 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e,
- 0x8f, 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7,
- 0xd7, 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7,
- 0xfe, 0xff, 0x80, 0x6d, 0x71, 0xde, 0xdf, 0x0e,
- 0x1f, 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e,
- 0xae, 0xaf, 0x7f, 0xbb, 0xbc, 0x16, 0x17, 0x1e,
- 0x1f, 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c,
- 0x5e, 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc,
- 0xf0, 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75,
- 0x96, 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf,
- 0xc7, 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98,
- 0x30, 0x8f, 0x1f, 0xd2, 0xd4, 0xce, 0xff, 0x4e,
- 0x4f, 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27,
- 0x2f, 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f,
- 0x42, 0x45, 0x90, 0x91, 0x53, 0x67, 0x75, 0xc8,
- 0xc9, 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff,
+ 0xbe, 0xbf, 0xc5, 0xc7, 0xcf, 0xda, 0xdb, 0x48,
+ 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49, 0x4e,
+ 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e, 0x8f,
+ 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7, 0xd7,
+ 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7, 0xfe,
+ 0xff, 0x80, 0x6d, 0x71, 0xde, 0xdf, 0x0e, 0x1f,
+ 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e, 0xae,
+ 0xaf, 0x7f, 0xbb, 0xbc, 0x16, 0x17, 0x1e, 0x1f,
+ 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c, 0x5e,
+ 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc, 0xf0,
+ 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75, 0x96,
+ 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf, 0xc7,
+ 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98, 0x30,
+ 0x8f, 0x1f, 0xd2, 0xd4, 0xce, 0xff, 0x4e, 0x4f,
+ 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27, 0x2f,
+ 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f, 0x42,
+ 0x45, 0x90, 0x91, 0x53, 0x67, 0x75, 0xc8, 0xc9,
+ 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff,
];
#[rustfmt::skip]
const SINGLETONS1U: &[(u8, u8)] = &[
@@ -184,10 +187,12 @@ const SINGLETONS1U: &[(u8, u8)] = &[
(0x19, 13),
(0x1c, 5),
(0x1d, 8),
+ (0x1f, 1),
(0x24, 1),
(0x6a, 4),
(0x6b, 2),
(0xaf, 3),
+ (0xb1, 2),
(0xbc, 2),
(0xcf, 2),
(0xd1, 2),
@@ -203,7 +208,7 @@ const SINGLETONS1U: &[(u8, u8)] = &[
(0xee, 32),
(0xf0, 4),
(0xf8, 2),
- (0xfa, 2),
+ (0xfa, 3),
(0xfb, 1),
];
#[rustfmt::skip]
@@ -220,18 +225,19 @@ const SINGLETONS1L: &[u8] = &[
0x0a, 0x0b, 0x14, 0x17, 0x36, 0x39, 0x3a, 0xa8,
0xa9, 0xd8, 0xd9, 0x09, 0x37, 0x90, 0x91, 0xa8,
0x07, 0x0a, 0x3b, 0x3e, 0x66, 0x69, 0x8f, 0x92,
- 0x6f, 0x5f, 0xbf, 0xee, 0xef, 0x5a, 0x62, 0xf4,
- 0xfc, 0xff, 0x9a, 0x9b, 0x2e, 0x2f, 0x27, 0x28,
- 0x55, 0x9d, 0xa0, 0xa1, 0xa3, 0xa4, 0xa7, 0xa8,
- 0xad, 0xba, 0xbc, 0xc4, 0x06, 0x0b, 0x0c, 0x15,
- 0x1d, 0x3a, 0x3f, 0x45, 0x51, 0xa6, 0xa7, 0xcc,
- 0xcd, 0xa0, 0x07, 0x19, 0x1a, 0x22, 0x25, 0x3e,
- 0x3f, 0xe7, 0xec, 0xef, 0xff, 0xc5, 0xc6, 0x04,
- 0x20, 0x23, 0x25, 0x26, 0x28, 0x33, 0x38, 0x3a,
- 0x48, 0x4a, 0x4c, 0x50, 0x53, 0x55, 0x56, 0x58,
- 0x5a, 0x5c, 0x5e, 0x60, 0x63, 0x65, 0x66, 0x6b,
- 0x73, 0x78, 0x7d, 0x7f, 0x8a, 0xa4, 0xaa, 0xaf,
- 0xb0, 0xc0, 0xd0, 0xae, 0xaf, 0x6e, 0x6f, 0x93,
+ 0x11, 0x6f, 0x5f, 0xbf, 0xee, 0xef, 0x5a, 0x62,
+ 0xf4, 0xfc, 0xff, 0x53, 0x54, 0x9a, 0x9b, 0x2e,
+ 0x2f, 0x27, 0x28, 0x55, 0x9d, 0xa0, 0xa1, 0xa3,
+ 0xa4, 0xa7, 0xa8, 0xad, 0xba, 0xbc, 0xc4, 0x06,
+ 0x0b, 0x0c, 0x15, 0x1d, 0x3a, 0x3f, 0x45, 0x51,
+ 0xa6, 0xa7, 0xcc, 0xcd, 0xa0, 0x07, 0x19, 0x1a,
+ 0x22, 0x25, 0x3e, 0x3f, 0xe7, 0xec, 0xef, 0xff,
+ 0xc5, 0xc6, 0x04, 0x20, 0x23, 0x25, 0x26, 0x28,
+ 0x33, 0x38, 0x3a, 0x48, 0x4a, 0x4c, 0x50, 0x53,
+ 0x55, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x63,
+ 0x65, 0x66, 0x6b, 0x73, 0x78, 0x7d, 0x7f, 0x8a,
+ 0xa4, 0xaa, 0xaf, 0xb0, 0xc0, 0xd0, 0xae, 0xaf,
+ 0x6e, 0x6f, 0xbe, 0x93,
];
#[rustfmt::skip]
const NORMAL0: &[u8] = &[
@@ -272,7 +278,7 @@ const NORMAL0: &[u8] = &[
0x1b, 0x07,
0x57, 0x07,
0x02, 0x06,
- 0x16, 0x0d,
+ 0x17, 0x0c,
0x50, 0x04,
0x43, 0x03,
0x2d, 0x03,
@@ -424,8 +430,8 @@ const NORMAL1: &[u8] = &[
0x33, 0x07,
0x2e, 0x08,
0x0a, 0x81, 0x26,
- 0x52, 0x4e,
- 0x28, 0x08,
+ 0x52, 0x4b,
+ 0x2b, 0x08,
0x2a, 0x16,
0x1a, 0x26,
0x1c, 0x14,
@@ -438,7 +444,7 @@ const NORMAL1: &[u8] = &[
0x48, 0x08,
0x27, 0x09,
0x75, 0x0b,
- 0x3f, 0x41,
+ 0x42, 0x3e,
0x2a, 0x06,
0x3b, 0x05,
0x0a, 0x06,
@@ -464,7 +470,8 @@ const NORMAL1: &[u8] = &[
0x45, 0x1b,
0x48, 0x08,
0x53, 0x0d,
- 0x49, 0x81, 0x07,
+ 0x49, 0x07,
+ 0x0a, 0x80, 0xf6,
0x46, 0x0a,
0x1d, 0x03,
0x47, 0x49,
@@ -473,14 +480,17 @@ const NORMAL1: &[u8] = &[
0x0a, 0x06,
0x39, 0x07,
0x0a, 0x81, 0x36,
- 0x19, 0x80, 0xb7,
+ 0x19, 0x07,
+ 0x3b, 0x03,
+ 0x1c, 0x56,
0x01, 0x0f,
0x32, 0x0d,
0x83, 0x9b, 0x66,
0x75, 0x0b,
0x80, 0xc4, 0x8a, 0x4c,
0x63, 0x0d,
- 0x84, 0x2f, 0x8f, 0xd1,
+ 0x84, 0x30, 0x10,
+ 0x16, 0x8f, 0xaa,
0x82, 0x47, 0xa1, 0xb9,
0x82, 0x39, 0x07,
0x2a, 0x04,
@@ -498,8 +508,9 @@ const NORMAL1: &[u8] = &[
0x97, 0xf8, 0x08,
0x84, 0xd6, 0x2a,
0x09, 0xa2, 0xe7,
- 0x81, 0x33, 0x2d,
- 0x03, 0x11,
+ 0x81, 0x33, 0x0f,
+ 0x01, 0x1d,
+ 0x06, 0x0e,
0x04, 0x08,
0x81, 0x8c, 0x89, 0x04,
0x6b, 0x05,
@@ -511,21 +522,26 @@ const NORMAL1: &[u8] = &[
0x80, 0xf6, 0x0a,
0x73, 0x08,
0x70, 0x15,
- 0x46, 0x80, 0x9a,
+ 0x46, 0x7a,
+ 0x14, 0x0c,
0x14, 0x0c,
0x57, 0x09,
0x19, 0x80, 0x87,
0x81, 0x47, 0x03,
0x85, 0x42, 0x0f,
0x15, 0x84, 0x50,
- 0x1f, 0x80, 0xe1,
- 0x2b, 0x80, 0xd5,
+ 0x1f, 0x06,
+ 0x06, 0x80, 0xd5,
+ 0x2b, 0x05,
+ 0x3e, 0x21,
+ 0x01, 0x70,
0x2d, 0x03,
0x1a, 0x04,
0x02, 0x81, 0x40,
0x1f, 0x11,
0x3a, 0x05,
- 0x01, 0x84, 0xe0,
+ 0x01, 0x81, 0xd0,
+ 0x2a, 0x82, 0xe6,
0x80, 0xf7, 0x29,
0x4c, 0x04,
0x0a, 0x04,
@@ -546,11 +562,11 @@ const NORMAL1: &[u8] = &[
0x09, 0x07,
0x02, 0x0e,
0x06, 0x80, 0x9a,
- 0x83, 0xd8, 0x05,
- 0x10, 0x03,
+ 0x83, 0xd8, 0x04,
+ 0x11, 0x03,
0x0d, 0x03,
- 0x74, 0x0c,
- 0x59, 0x07,
+ 0x77, 0x04,
+ 0x5f, 0x06,
0x0c, 0x04,
0x01, 0x0f,
0x0c, 0x04,
@@ -559,15 +575,12 @@ const NORMAL1: &[u8] = &[
0x28, 0x08,
0x22, 0x4e,
0x81, 0x54, 0x0c,
- 0x15, 0x03,
- 0x05, 0x03,
- 0x07, 0x09,
0x1d, 0x03,
- 0x0b, 0x05,
- 0x06, 0x0a,
- 0x0a, 0x06,
- 0x08, 0x08,
- 0x07, 0x09,
+ 0x09, 0x07,
+ 0x36, 0x08,
+ 0x0e, 0x04,
+ 0x09, 0x07,
+ 0x09, 0x07,
0x80, 0xcb, 0x25,
0x0a, 0x84, 0x06,
];
diff --git a/library/core/src/unicode/unicode_data.rs b/library/core/src/unicode/unicode_data.rs
index d2073f86c..bd69ca520 100644
--- a/library/core/src/unicode/unicode_data.rs
+++ b/library/core/src/unicode/unicode_data.rs
@@ -1,7 +1,8 @@
///! This file is generated by src/tools/unicode-table-generator; do not edit manually!
+#[rustc_const_unstable(feature = "const_unicode_case_lookup", issue = "101400")]
#[inline(always)]
-fn bitset_search<
+const fn bitset_search<
const N: usize,
const CHUNK_SIZE: usize,
const N1: usize,
@@ -17,14 +18,18 @@ fn bitset_search<
let bucket_idx = (needle / 64) as usize;
let chunk_map_idx = bucket_idx / CHUNK_SIZE;
let chunk_piece = bucket_idx % CHUNK_SIZE;
- let chunk_idx = if let Some(&v) = chunk_idx_map.get(chunk_map_idx) {
- v
+ // FIXME: const-hack: Revert to `slice::get` after `const_slice_index`
+ // feature stabilizes.
+ let chunk_idx = if chunk_map_idx < chunk_idx_map.len() {
+ chunk_idx_map[chunk_map_idx]
} else {
return false;
};
let idx = bitset_chunk_idx[chunk_idx as usize][chunk_piece] as usize;
- let word = if let Some(word) = bitset_canonical.get(idx) {
- *word
+ // FIXME: const-hack: Revert to `slice::get` after `const_slice_index`
+ // feature stabilizes.
+ let word = if idx < bitset_canonical.len() {
+ bitset_canonical[idx]
} else {
let (real_idx, mapping) = bitset_canonicalized[idx - bitset_canonical.len()];
let mut word = bitset_canonical[real_idx as usize];
@@ -94,21 +99,21 @@ fn skip_search<const SOR: usize, const OFFSETS: usize>(
offset_idx % 2 == 1
}
-pub const UNICODE_VERSION: (u8, u8, u8) = (14, 0, 0);
+pub const UNICODE_VERSION: (u8, u8, u8) = (15, 0, 0);
#[rustfmt::skip]
pub mod alphabetic {
- static SHORT_OFFSET_RUNS: [u32; 51] = [
- 706, 33559113, 876615277, 956309270, 1166025910, 1314925568, 1319120901, 1398813696,
- 1449151936, 1451271309, 1455465997, 1463867300, 1652619520, 1663105646, 1665203518,
- 1711342208, 1797326647, 1891700352, 2044795904, 2397118176, 2485199770, 2495688592,
- 2506175535, 2512471040, 2514568775, 2516674560, 2518772281, 2520870464, 2552334328,
- 2583792854, 2587996144, 2594287907, 2608968444, 2621553664, 2623656960, 2644629158,
- 2722225920, 2770461328, 2808211424, 2816601600, 2850156848, 2988572672, 3001198304,
- 3003299641, 3007499938, 3015896033, 3020093440, 3022191134, 3024289792, 3026391883,
- 3029603147,
+ static SHORT_OFFSET_RUNS: [u32; 53] = [
+ 706, 33559113, 872420973, 952114966, 1161831606, 1310731264, 1314926597, 1394619392,
+ 1444957632, 1447077005, 1451271693, 1459672996, 1648425216, 1658911342, 1661009214,
+ 1707147904, 1793132343, 1887506048, 2040601600, 2392923872, 2481005466, 2504077200,
+ 2514564144, 2520859648, 2527151687, 2529257472, 2531355193, 2533453376, 2564917240,
+ 2596375766, 2600579056, 2606870819, 2621551356, 2642525184, 2644628480, 2665600678,
+ 2743197440, 2791432848, 2841765072, 2850154464, 2854350336, 2887905584, 3026321408,
+ 3038947040, 3041048378, 3045248674, 3053644769, 3057842176, 3059939870, 3062038528,
+ 3064140619, 3066241968, 3071550384,
];
- static OFFSETS: [u8; 1445] = [
+ static OFFSETS: [u8; 1465] = [
65, 26, 6, 26, 47, 1, 10, 1, 4, 1, 5, 23, 1, 31, 1, 0, 4, 12, 14, 5, 7, 1, 1, 1, 86, 1, 42,
5, 1, 2, 2, 4, 1, 1, 6, 1, 1, 3, 1, 1, 1, 20, 1, 83, 1, 139, 8, 166, 1, 38, 2, 1, 6, 41, 39,
14, 1, 1, 1, 2, 1, 2, 1, 1, 8, 27, 4, 4, 29, 11, 5, 56, 1, 7, 14, 102, 1, 8, 4, 8, 4, 3, 10,
@@ -118,50 +123,51 @@ pub mod alphabetic {
2, 1, 2, 4, 5, 4, 2, 2, 2, 4, 1, 7, 4, 1, 1, 17, 6, 11, 3, 1, 9, 1, 3, 1, 22, 1, 7, 1, 2, 1,
5, 3, 9, 1, 3, 1, 2, 3, 1, 15, 4, 21, 4, 4, 3, 1, 8, 2, 2, 2, 22, 1, 7, 1, 2, 1, 5, 3, 8, 2,
2, 2, 2, 9, 2, 4, 2, 1, 5, 13, 1, 16, 2, 1, 6, 3, 3, 1, 4, 3, 2, 1, 1, 1, 2, 3, 2, 3, 3, 3,
- 12, 4, 5, 3, 3, 1, 3, 3, 1, 6, 1, 40, 4, 1, 8, 1, 3, 1, 23, 1, 16, 3, 8, 1, 3, 1, 3, 8, 2,
- 1, 3, 2, 1, 2, 4, 28, 4, 1, 8, 1, 3, 1, 23, 1, 10, 1, 5, 3, 8, 1, 3, 1, 3, 8, 2, 6, 2, 1, 4,
- 13, 2, 13, 13, 1, 3, 1, 41, 2, 8, 1, 3, 1, 3, 1, 1, 5, 4, 7, 5, 22, 6, 1, 3, 1, 18, 3, 24,
- 1, 9, 1, 1, 2, 7, 8, 6, 1, 1, 1, 8, 18, 2, 13, 58, 5, 7, 6, 1, 51, 2, 1, 1, 1, 5, 1, 24, 1,
- 1, 1, 19, 1, 3, 2, 5, 1, 1, 6, 1, 14, 4, 32, 1, 63, 8, 1, 36, 4, 17, 6, 16, 1, 36, 67, 55,
- 1, 1, 2, 5, 16, 64, 10, 4, 2, 38, 1, 1, 5, 1, 2, 43, 1, 0, 1, 4, 2, 7, 1, 1, 1, 4, 2, 41, 1,
- 4, 2, 33, 1, 4, 2, 7, 1, 1, 1, 4, 2, 15, 1, 57, 1, 4, 2, 67, 37, 16, 16, 86, 2, 6, 3, 0, 2,
- 17, 1, 26, 5, 75, 3, 11, 7, 20, 11, 21, 12, 20, 12, 13, 1, 3, 1, 2, 12, 52, 2, 19, 14, 1, 4,
- 1, 67, 89, 7, 43, 5, 70, 10, 31, 1, 12, 4, 9, 23, 30, 2, 5, 11, 44, 4, 26, 54, 28, 4, 63, 2,
- 20, 50, 1, 23, 2, 11, 3, 49, 52, 1, 15, 1, 8, 51, 42, 2, 4, 10, 44, 1, 11, 14, 55, 22, 3,
- 10, 36, 2, 9, 7, 43, 2, 3, 41, 4, 1, 6, 1, 2, 3, 1, 5, 192, 39, 14, 11, 0, 2, 6, 2, 38, 2,
- 6, 2, 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13, 5, 3, 1,
- 7, 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4, 1, 11, 2,
- 4, 5, 5, 4, 1, 17, 41, 0, 52, 0, 229, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1, 2, 56, 7, 1, 16, 23,
- 9, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 32, 47, 1, 0, 3, 25, 9, 7, 5, 2, 5, 4,
- 86, 6, 3, 1, 90, 1, 4, 5, 43, 1, 94, 17, 32, 48, 16, 0, 0, 64, 0, 67, 46, 2, 0, 3, 16, 10,
- 2, 20, 47, 5, 8, 3, 113, 39, 9, 2, 103, 2, 64, 5, 2, 1, 1, 1, 5, 24, 20, 1, 33, 24, 52, 12,
- 68, 1, 1, 44, 6, 3, 1, 1, 3, 10, 33, 5, 35, 13, 29, 3, 51, 1, 12, 15, 1, 16, 16, 10, 5, 1,
- 55, 9, 14, 18, 23, 3, 69, 1, 1, 1, 1, 24, 3, 2, 16, 2, 4, 11, 6, 2, 6, 2, 6, 9, 7, 1, 7, 1,
- 43, 1, 14, 6, 123, 21, 0, 12, 23, 4, 49, 0, 0, 2, 106, 38, 7, 12, 5, 5, 12, 1, 13, 1, 5, 1,
- 1, 1, 2, 1, 2, 1, 108, 33, 0, 18, 64, 2, 54, 40, 12, 116, 5, 1, 135, 36, 26, 6, 26, 11, 89,
- 3, 6, 2, 6, 2, 6, 2, 3, 35, 12, 1, 26, 1, 19, 1, 2, 1, 15, 2, 14, 34, 123, 69, 53, 0, 29, 3,
+ 12, 4, 5, 3, 3, 1, 3, 3, 1, 6, 1, 40, 13, 1, 3, 1, 23, 1, 16, 3, 8, 1, 3, 1, 3, 8, 2, 1, 3,
+ 2, 1, 2, 4, 28, 4, 1, 8, 1, 3, 1, 23, 1, 10, 1, 5, 3, 8, 1, 3, 1, 3, 8, 2, 6, 2, 1, 4, 13,
+ 3, 12, 13, 1, 3, 1, 41, 2, 8, 1, 3, 1, 3, 1, 1, 5, 4, 7, 5, 22, 6, 1, 3, 1, 18, 3, 24, 1, 9,
+ 1, 1, 2, 7, 8, 6, 1, 1, 1, 8, 18, 2, 13, 58, 5, 7, 6, 1, 51, 2, 1, 1, 1, 5, 1, 24, 1, 1, 1,
+ 19, 1, 3, 2, 5, 1, 1, 6, 1, 14, 4, 32, 1, 63, 8, 1, 36, 4, 19, 4, 16, 1, 36, 67, 55, 1, 1,
+ 2, 5, 16, 64, 10, 4, 2, 38, 1, 1, 5, 1, 2, 43, 1, 0, 1, 4, 2, 7, 1, 1, 1, 4, 2, 41, 1, 4, 2,
+ 33, 1, 4, 2, 7, 1, 1, 1, 4, 2, 15, 1, 57, 1, 4, 2, 67, 37, 16, 16, 86, 2, 6, 3, 0, 2, 17, 1,
+ 26, 5, 75, 3, 11, 7, 20, 11, 21, 12, 20, 12, 13, 1, 3, 1, 2, 12, 52, 2, 19, 14, 1, 4, 1, 67,
+ 89, 7, 43, 5, 70, 10, 31, 1, 12, 4, 9, 23, 30, 2, 5, 11, 44, 4, 26, 54, 28, 4, 63, 2, 20,
+ 50, 1, 23, 2, 11, 3, 49, 52, 1, 15, 1, 8, 51, 42, 2, 4, 10, 44, 1, 11, 14, 55, 22, 3, 10,
+ 36, 2, 9, 7, 43, 2, 3, 41, 4, 1, 6, 1, 2, 3, 1, 5, 192, 39, 14, 11, 0, 2, 6, 2, 38, 2, 6, 2,
+ 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13, 5, 3, 1, 7,
+ 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4, 1, 11, 2, 4,
+ 5, 5, 4, 1, 17, 41, 0, 52, 0, 229, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1, 2, 56, 7, 1, 16, 23, 9,
+ 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 32, 47, 1, 0, 3, 25, 9, 7, 5, 2, 5, 4, 86,
+ 6, 3, 1, 90, 1, 4, 5, 43, 1, 94, 17, 32, 48, 16, 0, 0, 64, 0, 67, 46, 2, 0, 3, 16, 10, 2,
+ 20, 47, 5, 8, 3, 113, 39, 9, 2, 103, 2, 64, 5, 2, 1, 1, 1, 5, 24, 20, 1, 33, 24, 52, 12, 68,
+ 1, 1, 44, 6, 3, 1, 1, 3, 10, 33, 5, 35, 13, 29, 3, 51, 1, 12, 15, 1, 16, 16, 10, 5, 1, 55,
+ 9, 14, 18, 23, 3, 69, 1, 1, 1, 1, 24, 3, 2, 16, 2, 4, 11, 6, 2, 6, 2, 6, 9, 7, 1, 7, 1, 43,
+ 1, 14, 6, 123, 21, 0, 12, 23, 4, 49, 0, 0, 2, 106, 38, 7, 12, 5, 5, 12, 1, 13, 1, 5, 1, 1,
+ 1, 2, 1, 2, 1, 108, 33, 0, 18, 64, 2, 54, 40, 12, 116, 5, 1, 135, 36, 26, 6, 26, 11, 89, 3,
+ 6, 2, 6, 2, 6, 2, 3, 35, 12, 1, 26, 1, 19, 1, 2, 1, 15, 2, 14, 34, 123, 69, 53, 0, 29, 3,
49, 47, 32, 13, 30, 5, 43, 5, 30, 2, 36, 4, 8, 1, 5, 42, 158, 18, 36, 4, 36, 4, 40, 8, 52,
12, 11, 1, 15, 1, 7, 1, 2, 1, 11, 1, 15, 1, 7, 1, 2, 67, 0, 9, 22, 10, 8, 24, 6, 1, 42, 1,
9, 69, 6, 2, 1, 1, 44, 1, 2, 3, 1, 2, 23, 10, 23, 9, 31, 65, 19, 1, 2, 10, 22, 10, 26, 70,
56, 6, 2, 64, 4, 1, 2, 5, 8, 1, 3, 1, 29, 42, 29, 3, 29, 35, 8, 1, 28, 27, 54, 10, 22, 10,
19, 13, 18, 110, 73, 55, 51, 13, 51, 13, 40, 0, 42, 1, 2, 3, 2, 78, 29, 10, 1, 8, 22, 42,
- 18, 46, 21, 27, 23, 9, 70, 43, 5, 12, 55, 9, 1, 13, 25, 23, 51, 17, 4, 8, 35, 3, 1, 9, 64,
- 1, 4, 9, 2, 10, 1, 1, 1, 35, 18, 1, 34, 2, 1, 6, 1, 65, 7, 1, 1, 1, 4, 1, 15, 1, 10, 7, 57,
+ 18, 46, 21, 27, 23, 9, 70, 43, 5, 10, 57, 9, 1, 13, 25, 23, 51, 17, 4, 8, 35, 3, 1, 9, 64,
+ 1, 4, 9, 2, 10, 1, 1, 1, 35, 18, 1, 34, 2, 1, 6, 4, 62, 7, 1, 1, 1, 4, 1, 15, 1, 10, 7, 57,
23, 4, 1, 8, 2, 2, 2, 22, 1, 7, 1, 2, 1, 5, 3, 8, 2, 2, 2, 2, 3, 1, 6, 1, 5, 7, 156, 66, 1,
3, 1, 4, 20, 3, 30, 66, 2, 2, 1, 1, 184, 54, 2, 7, 25, 6, 34, 63, 1, 1, 3, 1, 59, 54, 2, 1,
71, 27, 2, 14, 21, 7, 185, 57, 103, 64, 31, 8, 2, 1, 2, 8, 1, 2, 1, 30, 1, 2, 2, 2, 2, 4,
93, 8, 2, 46, 2, 6, 1, 1, 1, 2, 27, 51, 2, 10, 17, 72, 5, 1, 18, 73, 0, 9, 1, 45, 1, 7, 1,
1, 49, 30, 2, 22, 1, 14, 73, 7, 1, 2, 1, 44, 3, 1, 1, 2, 1, 3, 1, 1, 2, 2, 24, 6, 1, 2, 1,
- 37, 1, 2, 1, 4, 1, 1, 0, 23, 185, 1, 79, 0, 102, 111, 17, 196, 0, 97, 15, 0, 0, 0, 0, 0, 7,
- 31, 17, 79, 17, 30, 18, 48, 16, 4, 31, 21, 5, 19, 0, 64, 128, 75, 4, 57, 7, 17, 64, 2, 1, 1,
- 12, 2, 14, 0, 8, 0, 42, 9, 0, 4, 1, 7, 1, 2, 1, 0, 45, 3, 17, 4, 8, 0, 0, 107, 5, 13, 3, 9,
- 7, 10, 4, 1, 0, 85, 1, 71, 1, 2, 2, 1, 2, 2, 2, 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1,
- 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2, 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1,
- 25, 1, 31, 1, 25, 1, 8, 0, 31, 225, 7, 1, 17, 2, 7, 1, 2, 1, 5, 213, 45, 10, 7, 16, 1, 0,
- 30, 18, 44, 0, 7, 1, 4, 1, 2, 1, 15, 1, 197, 59, 68, 3, 1, 3, 1, 0, 4, 1, 27, 1, 2, 1, 1, 2,
- 1, 1, 10, 1, 4, 1, 1, 1, 1, 6, 1, 4, 1, 1, 1, 1, 1, 1, 3, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 1, 1, 2, 4, 1, 7, 1, 4, 1, 4, 1, 1, 1, 10, 1, 17, 5, 3, 1, 5, 1, 17, 0, 26,
- 6, 26, 6, 26, 0, 0, 32, 0, 7, 222, 2, 0, 14, 0, 0, 0, 0, 0, 0,
+ 37, 1, 2, 1, 4, 1, 1, 0, 23, 9, 17, 1, 41, 3, 3, 111, 1, 79, 0, 102, 111, 17, 196, 0, 97,
+ 15, 0, 17, 6, 0, 0, 0, 0, 7, 31, 17, 79, 17, 30, 18, 48, 16, 4, 31, 21, 5, 19, 0, 64, 128,
+ 75, 4, 57, 7, 17, 64, 2, 1, 1, 12, 2, 14, 0, 8, 0, 42, 9, 0, 4, 1, 7, 1, 2, 1, 0, 15, 1, 29,
+ 3, 2, 1, 14, 4, 8, 0, 0, 107, 5, 13, 3, 9, 7, 10, 4, 1, 0, 85, 1, 71, 1, 2, 2, 1, 2, 2, 2,
+ 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1, 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2, 25,
+ 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 31, 6, 6, 213, 7, 1,
+ 17, 2, 7, 1, 2, 1, 5, 5, 62, 33, 1, 112, 45, 10, 7, 16, 1, 0, 30, 18, 44, 0, 28, 0, 7, 1, 4,
+ 1, 2, 1, 15, 1, 197, 59, 68, 3, 1, 3, 1, 0, 4, 1, 27, 1, 2, 1, 1, 2, 1, 1, 10, 1, 4, 1, 1,
+ 1, 1, 6, 1, 4, 1, 1, 1, 1, 1, 1, 3, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2,
+ 4, 1, 7, 1, 4, 1, 4, 1, 1, 1, 10, 1, 17, 5, 3, 1, 5, 1, 17, 0, 26, 6, 26, 6, 26, 0, 0, 32,
+ 0, 6, 222, 2, 0, 14, 0, 0, 0, 0, 0, 5, 0, 0,
];
pub fn lookup(c: char) -> bool {
super::skip_search(
@@ -177,11 +183,11 @@ pub mod case_ignorable {
static SHORT_OFFSET_RUNS: [u32; 35] = [
688, 44045149, 572528402, 576724925, 807414908, 878718981, 903913493, 929080568, 933275148,
937491230, 1138818560, 1147208189, 1210124160, 1222707713, 1235291428, 1260457643,
- 1264654383, 1491147067, 1499536432, 1558257395, 1621177392, 1625385712, 1629581135,
- 1642180592, 1658961053, 1671548672, 1679937895, 1688328704, 1709301760, 1734467888,
- 1755439790, 1759635664, 1768027131, 1777205249, 1782514160,
+ 1264654383, 1499535675, 1507925040, 1566646003, 1629566000, 1650551536, 1658941263,
+ 1671540720, 1688321181, 1700908800, 1709298023, 1717688832, 1738661888, 1763828398,
+ 1797383403, 1805773008, 1809970171, 1819148289, 1824457200,
];
- static OFFSETS: [u8; 855] = [
+ static OFFSETS: [u8; 875] = [
39, 1, 6, 1, 11, 1, 35, 1, 1, 1, 71, 1, 4, 1, 1, 1, 4, 1, 2, 2, 0, 192, 4, 2, 4, 1, 9, 2,
1, 1, 251, 7, 207, 1, 5, 1, 49, 45, 1, 1, 1, 2, 1, 2, 1, 1, 44, 1, 11, 6, 10, 11, 1, 1, 35,
1, 10, 21, 16, 1, 101, 8, 1, 10, 1, 4, 33, 1, 1, 1, 30, 27, 91, 11, 58, 11, 4, 1, 2, 1, 24,
@@ -190,7 +196,7 @@ pub mod case_ignorable {
57, 1, 4, 5, 1, 2, 4, 1, 20, 2, 22, 6, 1, 1, 58, 1, 2, 1, 1, 4, 8, 1, 7, 2, 11, 2, 30, 1,
61, 1, 12, 1, 50, 1, 3, 1, 55, 1, 1, 3, 5, 3, 1, 4, 7, 2, 11, 2, 29, 1, 58, 1, 2, 1, 6, 1,
5, 2, 20, 2, 28, 2, 57, 2, 4, 4, 8, 1, 20, 2, 29, 1, 72, 1, 7, 3, 1, 1, 90, 1, 2, 7, 11, 9,
- 98, 1, 2, 9, 9, 1, 1, 6, 74, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1,
+ 98, 1, 2, 9, 9, 1, 1, 7, 73, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1,
102, 4, 1, 6, 1, 2, 2, 2, 25, 2, 4, 3, 16, 4, 13, 1, 2, 2, 6, 1, 15, 1, 94, 1, 0, 3, 0, 3,
29, 2, 30, 2, 30, 2, 64, 2, 1, 7, 8, 1, 2, 11, 3, 1, 5, 1, 45, 5, 51, 1, 65, 2, 34, 1, 118,
3, 4, 2, 9, 1, 6, 3, 219, 2, 2, 1, 58, 1, 1, 7, 1, 1, 1, 1, 2, 8, 6, 10, 2, 1, 39, 1, 8, 31,
@@ -204,15 +210,16 @@ pub mod case_ignorable {
1, 1, 27, 1, 14, 2, 5, 2, 1, 1, 100, 5, 9, 3, 121, 1, 2, 1, 4, 1, 0, 1, 147, 17, 0, 16, 3,
1, 12, 16, 34, 1, 2, 1, 169, 1, 7, 1, 6, 1, 11, 1, 35, 1, 1, 1, 47, 1, 45, 2, 67, 1, 21, 3,
0, 1, 226, 1, 149, 5, 0, 6, 1, 42, 1, 9, 0, 3, 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2,
- 153, 11, 49, 4, 123, 1, 54, 15, 41, 1, 2, 2, 10, 3, 49, 4, 2, 2, 2, 1, 4, 1, 10, 1, 50, 3,
- 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3, 2, 1, 1, 2, 6, 1, 160, 1, 3, 8, 21, 2,
- 57, 2, 3, 1, 37, 7, 3, 5, 195, 8, 2, 3, 1, 1, 23, 1, 84, 6, 1, 1, 4, 2, 1, 2, 238, 4, 6, 2,
- 1, 2, 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 0,
- 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2, 0,
- 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1, 1,
- 0, 2, 0, 9, 0, 5, 59, 7, 9, 4, 0, 1, 63, 17, 64, 2, 1, 2, 0, 4, 1, 7, 1, 2, 0, 2, 1, 4, 0,
- 46, 2, 23, 0, 3, 9, 16, 2, 7, 30, 4, 148, 3, 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7,
- 1, 17, 2, 7, 1, 2, 1, 5, 0, 14, 0, 1, 61, 4, 0, 7, 109, 8, 0, 5, 0, 1, 30, 96, 128, 240, 0,
+ 80, 3, 70, 11, 49, 4, 123, 1, 54, 15, 41, 1, 2, 2, 10, 3, 49, 4, 2, 2, 2, 1, 4, 1, 10, 1,
+ 50, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3, 2, 1, 1, 2, 6, 1, 2, 1, 157, 1,
+ 3, 8, 21, 2, 57, 2, 3, 1, 37, 7, 3, 5, 195, 8, 2, 3, 1, 1, 23, 1, 84, 6, 1, 1, 4, 2, 1, 2,
+ 238, 4, 6, 2, 1, 2, 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5,
+ 0, 9, 1, 2, 0, 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46,
+ 13, 1, 2, 0, 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2,
+ 3, 1, 1, 1, 0, 2, 11, 2, 52, 5, 5, 1, 1, 1, 0, 17, 6, 15, 0, 5, 59, 7, 9, 4, 0, 1, 63, 17,
+ 64, 2, 1, 2, 0, 4, 1, 7, 1, 2, 0, 2, 1, 4, 0, 46, 2, 23, 0, 3, 9, 16, 2, 7, 30, 4, 148, 3,
+ 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7, 1, 17, 2, 7, 1, 2, 1, 5, 5, 62, 33, 1, 160,
+ 14, 0, 1, 61, 4, 0, 5, 0, 7, 109, 8, 0, 5, 0, 1, 30, 96, 128, 240, 0,
];
pub fn lookup(c: char) -> bool {
super::skip_search(
@@ -225,24 +232,24 @@ pub mod case_ignorable {
#[rustfmt::skip]
pub mod cased {
- static SHORT_OFFSET_RUNS: [u32; 21] = [
+ static SHORT_OFFSET_RUNS: [u32; 22] = [
4256, 115348384, 136322176, 144711446, 163587254, 320875520, 325101120, 350268208,
392231680, 404815649, 413205504, 421595008, 467733632, 484513952, 492924480, 497144832,
- 501339814, 578936576, 627173632, 635564336, 640872842,
+ 501339814, 578936576, 627171376, 639756544, 643952944, 649261450,
];
- static OFFSETS: [u8; 311] = [
+ static OFFSETS: [u8; 315] = [
65, 26, 6, 26, 47, 1, 10, 1, 4, 1, 5, 23, 1, 31, 1, 195, 1, 4, 4, 208, 1, 36, 7, 2, 30, 5,
96, 1, 42, 4, 2, 2, 2, 4, 1, 1, 6, 1, 1, 3, 1, 1, 1, 20, 1, 83, 1, 139, 8, 166, 1, 38, 9,
- 41, 0, 38, 1, 1, 5, 1, 2, 43, 2, 3, 0, 86, 2, 6, 0, 9, 7, 43, 2, 3, 64, 192, 64, 0, 2, 6, 2,
+ 41, 0, 38, 1, 1, 5, 1, 2, 43, 1, 4, 0, 86, 2, 6, 0, 9, 7, 43, 2, 3, 64, 192, 64, 0, 2, 6, 2,
38, 2, 6, 2, 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13,
5, 3, 1, 7, 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4,
1, 6, 4, 1, 2, 4, 5, 5, 4, 1, 17, 32, 3, 2, 0, 52, 0, 229, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1,
- 0, 46, 18, 30, 132, 102, 3, 4, 1, 59, 5, 2, 1, 1, 1, 5, 27, 2, 1, 3, 0, 43, 1, 13, 7, 80, 0,
+ 0, 46, 18, 30, 132, 102, 3, 4, 1, 59, 5, 2, 1, 1, 1, 5, 24, 5, 1, 3, 0, 43, 1, 14, 6, 80, 0,
7, 12, 5, 0, 26, 6, 26, 0, 80, 96, 36, 4, 36, 116, 11, 1, 15, 1, 7, 1, 2, 1, 11, 1, 15, 1,
7, 1, 2, 0, 1, 2, 3, 1, 42, 1, 9, 0, 51, 13, 51, 0, 64, 0, 64, 0, 85, 1, 71, 1, 2, 2, 1, 2,
2, 2, 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1, 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2,
- 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 10, 1, 20, 0,
- 68, 0, 26, 6, 26, 6, 26, 0,
+ 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 10, 1, 20, 6, 6,
+ 0, 62, 0, 68, 0, 26, 6, 26, 6, 26, 0,
];
pub fn lookup(c: char) -> bool {
super::skip_search(
@@ -272,14 +279,14 @@ pub mod cc {
#[rustfmt::skip]
pub mod grapheme_extend {
- static SHORT_OFFSET_RUNS: [u32; 32] = [
+ static SHORT_OFFSET_RUNS: [u32; 33] = [
768, 2098307, 6292881, 10490717, 522196754, 526393356, 731917551, 740306986, 752920175,
761309186, 778107678, 908131840, 912326558, 920715773, 924912129, 937495844, 962662059,
- 966858799, 1205935152, 1277239027, 1340173040, 1344368463, 1352776861, 1365364480,
- 1369559397, 1377950208, 1407311872, 1432478000, 1453449902, 1457645776, 1466826784,
- 1476329968,
+ 966858799, 1214323760, 1285627635, 1348547648, 1369533168, 1377922895, 1386331293,
+ 1398918912, 1403113829, 1411504640, 1440866304, 1466032814, 1495393516, 1503783120,
+ 1508769824, 1518273008,
];
- static OFFSETS: [u8; 707] = [
+ static OFFSETS: [u8; 727] = [
0, 112, 0, 7, 0, 45, 1, 1, 1, 2, 1, 2, 1, 1, 72, 11, 48, 21, 16, 1, 101, 7, 2, 6, 2, 2, 1,
4, 35, 1, 30, 27, 91, 11, 58, 9, 9, 1, 24, 4, 1, 9, 1, 3, 1, 5, 43, 3, 60, 8, 42, 24, 1, 32,
55, 1, 1, 1, 4, 8, 4, 1, 3, 7, 10, 2, 29, 1, 58, 1, 1, 1, 2, 4, 8, 1, 9, 1, 10, 2, 26, 1, 2,
@@ -287,7 +294,7 @@ pub mod grapheme_extend {
1, 1, 58, 1, 1, 2, 1, 4, 8, 1, 7, 3, 10, 2, 30, 1, 59, 1, 1, 1, 12, 1, 9, 1, 40, 1, 3, 1,
55, 1, 1, 3, 5, 3, 1, 4, 7, 2, 11, 2, 29, 1, 58, 1, 2, 1, 2, 1, 3, 1, 5, 2, 7, 2, 11, 2, 28,
2, 57, 2, 1, 1, 2, 4, 8, 1, 9, 1, 10, 2, 29, 1, 72, 1, 4, 1, 2, 3, 1, 1, 8, 1, 81, 1, 2, 7,
- 12, 8, 98, 1, 2, 9, 11, 6, 74, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1,
+ 12, 8, 98, 1, 2, 9, 11, 7, 73, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1,
102, 4, 1, 6, 1, 2, 2, 2, 25, 2, 4, 3, 16, 4, 13, 1, 2, 2, 6, 1, 15, 1, 0, 3, 0, 3, 29, 2,
30, 2, 30, 2, 64, 2, 1, 7, 8, 1, 2, 11, 9, 1, 45, 3, 1, 1, 117, 2, 34, 1, 118, 3, 4, 2, 9,
1, 6, 3, 219, 2, 2, 1, 58, 1, 1, 7, 1, 1, 1, 1, 2, 8, 6, 10, 2, 1, 48, 31, 49, 4, 48, 7, 1,
@@ -296,16 +303,17 @@ pub mod grapheme_extend {
4, 1, 10, 32, 2, 80, 2, 0, 1, 3, 1, 4, 1, 25, 2, 5, 1, 151, 2, 26, 18, 13, 1, 38, 8, 25, 11,
46, 3, 48, 1, 2, 4, 2, 2, 39, 1, 67, 6, 2, 2, 2, 2, 12, 1, 8, 1, 47, 1, 51, 1, 1, 3, 2, 2,
5, 2, 1, 1, 42, 2, 8, 1, 238, 1, 2, 1, 4, 1, 0, 1, 0, 16, 16, 16, 0, 2, 0, 1, 226, 1, 149,
- 5, 0, 3, 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2, 153, 11, 49, 4, 123, 1, 54, 15, 41, 1,
- 2, 2, 10, 3, 49, 4, 2, 2, 7, 1, 61, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3,
- 2, 1, 1, 2, 6, 1, 160, 1, 3, 8, 21, 2, 57, 2, 1, 1, 1, 1, 22, 1, 14, 7, 3, 5, 195, 8, 2, 3,
- 1, 1, 23, 1, 81, 1, 2, 6, 1, 1, 2, 1, 1, 2, 1, 2, 235, 1, 2, 4, 6, 2, 1, 2, 27, 2, 85, 8, 2,
- 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 245, 1, 10, 2, 1, 1, 4,
- 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2, 0, 7, 1, 6, 1,
- 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1, 1, 0, 2, 0, 5,
- 59, 7, 0, 1, 63, 4, 81, 1, 0, 2, 0, 46, 2, 23, 0, 1, 1, 3, 4, 5, 8, 8, 2, 7, 30, 4, 148, 3,
- 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7, 1, 17, 2, 7, 1, 2, 1, 5, 0, 7, 0, 1, 61, 4,
- 0, 7, 109, 7, 0, 96, 128, 240, 0,
+ 5, 0, 3, 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2, 80, 3, 70, 11, 49, 4, 123, 1, 54, 15,
+ 41, 1, 2, 2, 10, 3, 49, 4, 2, 2, 7, 1, 61, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1,
+ 95, 3, 2, 1, 1, 2, 6, 1, 2, 1, 157, 1, 3, 8, 21, 2, 57, 2, 1, 1, 1, 1, 22, 1, 14, 7, 3, 5,
+ 195, 8, 2, 3, 1, 1, 23, 1, 81, 1, 2, 6, 1, 1, 2, 1, 1, 2, 1, 2, 235, 1, 2, 4, 6, 2, 1, 2,
+ 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 245, 1,
+ 10, 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2,
+ 0, 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1,
+ 1, 0, 2, 11, 2, 52, 5, 5, 1, 1, 1, 0, 1, 6, 15, 0, 5, 59, 7, 0, 1, 63, 4, 81, 1, 0, 2, 0,
+ 46, 2, 23, 0, 1, 1, 3, 4, 5, 8, 8, 2, 7, 30, 4, 148, 3, 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1,
+ 15, 0, 7, 1, 17, 2, 7, 1, 2, 1, 5, 100, 1, 160, 7, 0, 1, 61, 4, 0, 4, 0, 7, 109, 7, 0, 96,
+ 128, 240, 0,
];
pub fn lookup(c: char) -> bool {
super::skip_search(
@@ -318,54 +326,56 @@ pub mod grapheme_extend {
#[rustfmt::skip]
pub mod lowercase {
- static BITSET_CHUNKS_MAP: [u8; 123] = [
+ const BITSET_CHUNKS_MAP: &'static [u8; 123] = &[
14, 17, 0, 0, 9, 0, 0, 12, 13, 10, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 4, 1, 0, 15, 0, 8, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0,
- 3, 0, 0, 7,
+ 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0,
+ 3, 18, 0, 7,
];
- static BITSET_INDEX_CHUNKS: [[u8; 16]; 19] = [
+ const BITSET_INDEX_CHUNKS: &'static [[u8; 16]; 20] = &[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 14, 55, 0],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 44, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 65, 42, 0, 50, 46, 48, 32],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 65, 43, 0, 51, 47, 49, 33],
[0, 0, 0, 0, 10, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26],
+ [0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27],
[0, 0, 0, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 57, 0, 55, 55, 55, 0, 21, 21, 67, 21, 35, 24, 23, 36],
- [0, 5, 74, 0, 28, 15, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 64, 33, 17, 22, 51, 52, 47, 45, 8, 34, 40, 0, 27, 13, 30],
- [11, 58, 0, 4, 0, 0, 29, 0, 0, 0, 0, 0, 0, 0, 31, 0],
- [16, 25, 21, 37, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [16, 49, 2, 20, 66, 9, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [63, 39, 54, 12, 73, 61, 18, 1, 6, 62, 71, 19, 68, 69, 3, 44],
+ [0, 0, 0, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 57, 0, 55, 55, 55, 0, 22, 22, 67, 22, 36, 25, 24, 37],
+ [0, 5, 68, 0, 29, 15, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 64, 34, 17, 23, 52, 53, 48, 46, 8, 35, 42, 0, 28, 13, 31],
+ [11, 58, 0, 6, 0, 0, 30, 0, 0, 0, 0, 0, 0, 0, 32, 0],
+ [16, 26, 22, 38, 39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [16, 50, 2, 21, 66, 9, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [16, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [63, 41, 54, 12, 75, 61, 18, 1, 7, 62, 74, 20, 71, 72, 4, 45],
];
- static BITSET_CANONICAL: [u64; 55] = [
+ const BITSET_CANONICAL: &'static [u64; 55] = &[
0b0000000000000000000000000000000000000000000000000000000000000000,
0b1111111111111111110000000000000000000000000011111111111111111111,
0b1010101010101010101010101010101010101010101010101010100000000010,
+ 0b0000000000000111111111111111111111111111111111111111111111111111,
0b1111111111111111111111000000000000000000000000001111110111111111,
- 0b0000111111111111111111111111111111111111000000000000000000000000,
0b1000000000000010000000000000000000000000000000000000000000000000,
+ 0b0000111111111111111111111111111111111111000000000000000000000000,
0b0000111111111111111111111111110000000000000000000000000011111111,
- 0b0000000000000111111111111111111111111111111111111111111111111111,
0b1111111111111111111111111111111111111111111111111010101010000101,
0b1111111111111111111111111111111100000000000000000000000000000000,
0b1111111111111111111111111111110000000000000000000000000000000000,
0b1111111111111111111111110000000000000000000000000000000000000000,
0b1111111111111111111111000000000000000000000000001111111111101111,
0b1111111111111111111100000000000000000000000000010000000000000000,
- 0b1111111111111111000000011111111111110111111111111111111111111111,
+ 0b1111111111111111000000111111111111110111111111111111111111111111,
0b1111111111111111000000000000000000000000000000000100001111000000,
0b1111111111111111000000000000000000000000000000000000000000000000,
0b1111111101111111111111111111111110000000000000000000000000000000,
0b1111110000000000000000000000000011111111111111111111111111000000,
+ 0b1111011111111111111111111111111111111111111111110000000000000000,
0b1111000000000000000000000000001111110111111111111111111111111100,
0b1010101010101010101010101010101010101010101010101101010101010100,
0b1010101010101010101010101010101010101010101010101010101010101010,
@@ -379,16 +389,16 @@ pub mod lowercase {
0b0001101111111011111111111111101111111111100000000000000000000000,
0b0001100100101111101010101010101010101010111000110111111111111111,
0b0000011111111101111111111111111111111111111111111111111110111001,
- 0b0000011101000000000000000000000000000010101010100000010100001010,
+ 0b0000011101011100000000000000000000000010101010100000010100001010,
0b0000010000100000000001000000000000000000000000000000000000000000,
0b0000000111111111111111111111111111111111111011111111111111111111,
0b0000000011111111000000001111111100000000001111110000000011111111,
0b0000000011011100000000001111111100000000110011110000000011011100,
0b0000000000001000010100000001101010101010101010101010101010101010,
0b0000000000000000001000001011111111111111111111111111111111111111,
+ 0b0000000000000000000001111110000001111111111111111111101111111111,
0b0000000000000000000000001111111111111111110111111100000000000000,
0b0000000000000000000000000001111100000000000000000000000000000011,
- 0b0000000000000000000000000000000001111111111111111111101111111111,
0b0000000000000000000000000000000000111010101010101010101010101010,
0b0000000000000000000000000000000000000000111110000000000001111111,
0b0000000000000000000000000000000000000000000000000000101111110111,
@@ -400,16 +410,16 @@ pub mod lowercase {
0b1010101010101011101010101010100000000000000000000000000000000000,
0b1101010010101010101010101010101010101010101010101010101101010101,
0b1110011001010001001011010010101001001110001001000011000100101001,
- 0b1110011111111111111111111111111111111111111111110000000000000000,
0b1110101111000000000000000000000000001111111111111111111111111100,
];
- static BITSET_MAPPING: [(u8, u8); 20] = [
+ const BITSET_MAPPING: &'static [(u8, u8); 21] = &[
(0, 64), (1, 188), (1, 183), (1, 176), (1, 109), (1, 124), (1, 126), (1, 66), (1, 70),
- (1, 77), (2, 146), (2, 144), (2, 83), (3, 12), (3, 6), (4, 156), (4, 78), (5, 187),
- (6, 132), (7, 93),
+ (1, 77), (2, 146), (2, 144), (2, 83), (3, 93), (3, 147), (3, 133), (4, 12), (4, 6),
+ (5, 187), (6, 78), (7, 132),
];
- pub fn lookup(c: char) -> bool {
+ #[rustc_const_unstable(feature = "const_unicode_case_lookup", issue = "101400")]
+ pub const fn lookup(c: char) -> bool {
super::bitset_search(
c as u32,
&BITSET_CHUNKS_MAP,
@@ -422,14 +432,14 @@ pub mod lowercase {
#[rustfmt::skip]
pub mod n {
- static SHORT_OFFSET_RUNS: [u32; 38] = [
+ static SHORT_OFFSET_RUNS: [u32; 39] = [
1632, 18876774, 31461440, 102765417, 111154926, 115349830, 132128880, 165684320, 186656630,
195046653, 199241735, 203436434, 216049184, 241215536, 249605104, 274792208, 278987015,
283181793, 295766104, 320933114, 383848032, 392238160, 434181712, 442570976, 455154768,
- 463544256, 476128256, 480340576, 484535936, 501338848, 505534414, 513925440, 518120176,
- 522315975, 526511217, 534900992, 555875312, 561183738,
+ 463544144, 476128256, 484534880, 488730240, 505533120, 509728718, 522314048, 526508784,
+ 530703600, 534898887, 539094129, 547483904, 568458224, 573766650,
];
- static OFFSETS: [u8; 269] = [
+ static OFFSETS: [u8; 275] = [
48, 10, 120, 2, 5, 1, 2, 3, 0, 10, 134, 10, 198, 10, 0, 10, 118, 10, 4, 6, 108, 10, 118,
10, 118, 10, 2, 6, 110, 13, 115, 10, 8, 7, 103, 10, 104, 7, 7, 19, 109, 10, 96, 10, 118, 10,
70, 20, 0, 10, 70, 10, 0, 20, 0, 3, 239, 10, 6, 10, 22, 10, 0, 10, 128, 11, 165, 10, 6, 10,
@@ -439,9 +449,9 @@ pub mod n {
29, 1, 8, 1, 134, 5, 202, 10, 0, 8, 25, 7, 39, 9, 75, 5, 22, 6, 160, 2, 2, 16, 2, 46, 64, 9,
52, 2, 30, 3, 75, 5, 104, 8, 24, 8, 41, 7, 0, 6, 48, 10, 0, 31, 158, 10, 42, 4, 112, 7, 134,
30, 128, 10, 60, 10, 144, 10, 7, 20, 251, 10, 0, 10, 118, 10, 0, 10, 102, 10, 102, 12, 0,
- 19, 93, 10, 0, 29, 227, 10, 70, 10, 0, 21, 0, 111, 0, 10, 86, 10, 134, 10, 1, 7, 0, 23, 0,
- 20, 108, 25, 0, 50, 0, 10, 0, 10, 0, 9, 128, 10, 0, 59, 1, 3, 1, 4, 76, 45, 1, 15, 0, 13, 0,
- 10, 0,
+ 19, 93, 10, 0, 29, 227, 10, 70, 10, 0, 10, 102, 21, 0, 111, 0, 10, 86, 10, 134, 10, 1, 7, 0,
+ 23, 0, 20, 12, 20, 108, 25, 0, 50, 0, 10, 0, 10, 0, 10, 0, 9, 128, 10, 0, 59, 1, 3, 1, 4,
+ 76, 45, 1, 15, 0, 13, 0, 10, 0,
];
pub fn lookup(c: char) -> bool {
super::skip_search(
@@ -454,14 +464,14 @@ pub mod n {
#[rustfmt::skip]
pub mod uppercase {
- static BITSET_CHUNKS_MAP: [u8; 125] = [
+ const BITSET_CHUNKS_MAP: &'static [u8; 125] = &[
12, 15, 6, 6, 0, 6, 6, 2, 4, 11, 6, 16, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 5, 6, 14, 6, 10, 6, 6, 1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 13, 6, 6,
6, 6, 9, 6, 3,
];
- static BITSET_INDEX_CHUNKS: [[u8; 16]; 17] = [
+ const BITSET_INDEX_CHUNKS: &'static [[u8; 16]; 17] = &[
[43, 43, 5, 34, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 5, 1],
[43, 43, 5, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
[43, 43, 39, 43, 43, 43, 43, 43, 17, 17, 62, 17, 42, 29, 24, 23],
@@ -480,7 +490,7 @@ pub mod uppercase {
[57, 19, 2, 18, 10, 47, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
[57, 37, 17, 27, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
];
- static BITSET_CANONICAL: [u64; 43] = [
+ const BITSET_CANONICAL: &'static [u64; 43] = &[
0b0000011111111111111111111111111000000000000000000000000000000000,
0b0000000000111111111111111111111111111111111111111111111111111111,
0b0101010101010101010101010101010101010101010101010101010000000001,
@@ -525,13 +535,14 @@ pub mod uppercase {
0b1111011111111111000000000000000000000000000000000000000000000000,
0b1111111100000000111111110000000000111111000000001111111100000000,
];
- static BITSET_MAPPING: [(u8, u8); 25] = [
+ const BITSET_MAPPING: &'static [(u8, u8); 25] = &[
(0, 187), (0, 177), (0, 171), (0, 167), (0, 164), (0, 32), (0, 47), (0, 51), (0, 121),
(0, 117), (0, 109), (1, 150), (1, 148), (1, 142), (1, 134), (1, 131), (1, 64), (2, 164),
(2, 146), (2, 20), (3, 146), (3, 140), (3, 134), (4, 178), (4, 171),
];
- pub fn lookup(c: char) -> bool {
+ #[rustc_const_unstable(feature = "const_unicode_case_lookup", issue = "101400")]
+ pub const fn lookup(c: char) -> bool {
super::bitset_search(
c as u32,
&BITSET_CHUNKS_MAP,
@@ -544,18 +555,26 @@ pub mod uppercase {
#[rustfmt::skip]
pub mod white_space {
- static SHORT_OFFSET_RUNS: [u32; 4] = [
- 5760, 18882560, 23080960, 40972289,
- ];
- static OFFSETS: [u8; 21] = [
- 9, 5, 18, 1, 100, 1, 26, 1, 0, 1, 0, 11, 29, 2, 5, 1, 47, 1, 0, 1, 0,
+ static WHITESPACE_MAP: [u8; 256] = [
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
+ #[inline]
pub fn lookup(c: char) -> bool {
- super::skip_search(
- c as u32,
- &SHORT_OFFSET_RUNS,
- &OFFSETS,
- )
+ match c as u32 >> 8 {
+ 0 => WHITESPACE_MAP[c as usize & 0xff] & 1 != 0,
+ 22 => c as u32 == 0x1680,
+ 32 => WHITESPACE_MAP[c as usize & 0xff] & 2 != 0,
+ 48 => c as u32 == 0x3000,
+ _ => false,
+ }
}
}
diff --git a/library/core/tests/alloc.rs b/library/core/tests/alloc.rs
index 8a5a06b34..3ceaeadce 100644
--- a/library/core/tests/alloc.rs
+++ b/library/core/tests/alloc.rs
@@ -1,4 +1,5 @@
use core::alloc::Layout;
+use core::mem::size_of;
use core::ptr::{self, NonNull};
#[test]
@@ -13,6 +14,49 @@ fn const_unchecked_layout() {
}
#[test]
+fn layout_round_up_to_align_edge_cases() {
+ const MAX_SIZE: usize = isize::MAX as usize;
+
+ for shift in 0..usize::BITS {
+ let align = 1_usize << shift;
+ let edge = (MAX_SIZE + 1) - align;
+ let low = edge.saturating_sub(10);
+ let high = edge.saturating_add(10);
+ assert!(Layout::from_size_align(low, align).is_ok());
+ assert!(Layout::from_size_align(high, align).is_err());
+ for size in low..=high {
+ assert_eq!(
+ Layout::from_size_align(size, align).is_ok(),
+ size.next_multiple_of(align) <= MAX_SIZE,
+ );
+ }
+ }
+}
+
+#[test]
+fn layout_array_edge_cases() {
+ for_type::<i64>();
+ for_type::<[i32; 0b10101]>();
+ for_type::<[u8; 0b1010101]>();
+
+ // Make sure ZSTs don't lead to divide-by-zero
+ assert_eq!(Layout::array::<()>(usize::MAX).unwrap(), Layout::from_size_align(0, 1).unwrap());
+
+ fn for_type<T>() {
+ const MAX_SIZE: usize = isize::MAX as usize;
+
+ let edge = (MAX_SIZE + 1) / size_of::<T>();
+ let low = edge.saturating_sub(10);
+ let high = edge.saturating_add(10);
+ assert!(Layout::array::<T>(low).is_ok());
+ assert!(Layout::array::<T>(high).is_err());
+ for n in low..=high {
+ assert_eq!(Layout::array::<T>(n).is_ok(), n * size_of::<T>() <= MAX_SIZE);
+ }
+ }
+}
+
+#[test]
fn layout_debug_shows_log2_of_alignment() {
// `Debug` is not stable, but here's what it does right now
let layout = Layout::from_size_align(24576, 8192).unwrap();
diff --git a/library/core/tests/any.rs b/library/core/tests/any.rs
index 8ed0c8880..9538b8139 100644
--- a/library/core/tests/any.rs
+++ b/library/core/tests/any.rs
@@ -142,7 +142,7 @@ impl Provider for SomeConcreteType {
demand
.provide_ref::<String>(&self.some_string)
.provide_ref::<str>(&self.some_string)
- .provide_value::<String>(|| "bye".to_owned());
+ .provide_value_with::<String>(|| "bye".to_owned());
}
}
diff --git a/library/core/tests/ascii.rs b/library/core/tests/ascii.rs
index 6d2cf3e83..f5f2dd047 100644
--- a/library/core/tests/ascii.rs
+++ b/library/core/tests/ascii.rs
@@ -252,6 +252,23 @@ fn test_is_ascii_digit() {
}
#[test]
+fn test_is_ascii_octdigit() {
+ assert_all!(is_ascii_octdigit, "", "01234567");
+ assert_none!(
+ is_ascii_octdigit,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
fn test_is_ascii_hexdigit() {
assert_all!(is_ascii_hexdigit, "", "0123456789", "abcdefABCDEF",);
assert_none!(
@@ -454,6 +471,7 @@ fn ascii_ctype_const() {
is_ascii_lowercase => [true, false, false, false, false];
is_ascii_alphanumeric => [true, true, true, false, false];
is_ascii_digit => [false, false, true, false, false];
+ is_ascii_octdigit => [false, false, false, false, false];
is_ascii_hexdigit => [true, true, true, false, false];
is_ascii_punctuation => [false, false, false, true, false];
is_ascii_graphic => [true, true, true, true, false];
diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
index 13b12db20..94b031060 100644
--- a/library/core/tests/atomic.rs
+++ b/library/core/tests/atomic.rs
@@ -155,7 +155,7 @@ fn ptr_add_data() {
assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1));
assert_eq!(atom.load(SeqCst), n);
- let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>();
+ let bytes_from_n = |b| n.wrapping_byte_add(b);
assert_eq!(atom.fetch_byte_add(1, SeqCst), n);
assert_eq!(atom.load(SeqCst), bytes_from_n(1));
diff --git a/library/core/tests/const_ptr.rs b/library/core/tests/const_ptr.rs
index 152fed803..d874f0831 100644
--- a/library/core/tests/const_ptr.rs
+++ b/library/core/tests/const_ptr.rs
@@ -3,7 +3,7 @@ const DATA: [u16; 2] = [u16::from_ne_bytes([0x01, 0x23]), u16::from_ne_bytes([0x
const fn unaligned_ptr() -> *const u16 {
// Since DATA.as_ptr() is aligned to two bytes, adding 1 byte to that produces an unaligned *const u16
- unsafe { (DATA.as_ptr() as *const u8).add(1) as *const u16 }
+ unsafe { DATA.as_ptr().byte_add(1) }
}
#[test]
@@ -67,7 +67,7 @@ fn write() {
const fn write_unaligned() -> [u16; 2] {
let mut two_aligned = [0u16; 2];
unsafe {
- let unaligned_ptr = (two_aligned.as_mut_ptr() as *mut u8).add(1) as *mut u16;
+ let unaligned_ptr = two_aligned.as_mut_ptr().byte_add(1);
ptr::write_unaligned(unaligned_ptr, u16::from_ne_bytes([0x23, 0x45]));
}
two_aligned
@@ -91,7 +91,7 @@ fn mut_ptr_write() {
const fn write_unaligned() -> [u16; 2] {
let mut two_aligned = [0u16; 2];
unsafe {
- let unaligned_ptr = (two_aligned.as_mut_ptr() as *mut u8).add(1) as *mut u16;
+ let unaligned_ptr = two_aligned.as_mut_ptr().byte_add(1);
unaligned_ptr.write_unaligned(u16::from_ne_bytes([0x23, 0x45]));
}
two_aligned
diff --git a/library/core/tests/iter/adapters/array_chunks.rs b/library/core/tests/iter/adapters/array_chunks.rs
new file mode 100644
index 000000000..4e9d89e1e
--- /dev/null
+++ b/library/core/tests/iter/adapters/array_chunks.rs
@@ -0,0 +1,179 @@
+use core::cell::Cell;
+use core::iter::{self, Iterator};
+
+use super::*;
+
+#[test]
+fn test_iterator_array_chunks_infer() {
+ let xs = [1, 1, 2, -2, 6, 0, 3, 1];
+ for [a, b, c] in xs.iter().copied().array_chunks() {
+ assert_eq!(a + b + c, 4);
+ }
+}
+
+#[test]
+fn test_iterator_array_chunks_clone_and_drop() {
+ let count = Cell::new(0);
+ let mut it = (0..5).map(|_| CountDrop::new(&count)).array_chunks::<3>();
+ assert_eq!(it.by_ref().count(), 1);
+ assert_eq!(count.get(), 3);
+ let mut it2 = it.clone();
+ assert_eq!(count.get(), 3);
+ assert_eq!(it.into_remainder().unwrap().len(), 2);
+ assert_eq!(count.get(), 5);
+ assert!(it2.next().is_none());
+ assert_eq!(it2.into_remainder().unwrap().len(), 2);
+ assert_eq!(count.get(), 7);
+}
+
+#[test]
+fn test_iterator_array_chunks_remainder() {
+ let mut it = (0..11).array_chunks::<4>();
+ assert_eq!(it.next(), Some([0, 1, 2, 3]));
+ assert_eq!(it.next(), Some([4, 5, 6, 7]));
+ assert_eq!(it.next(), None);
+ assert_eq!(it.into_remainder().unwrap().as_slice(), &[8, 9, 10]);
+}
+
+#[test]
+fn test_iterator_array_chunks_size_hint() {
+ let it = (0..6).array_chunks::<1>();
+ assert_eq!(it.size_hint(), (6, Some(6)));
+
+ let it = (0..6).array_chunks::<3>();
+ assert_eq!(it.size_hint(), (2, Some(2)));
+
+ let it = (0..6).array_chunks::<5>();
+ assert_eq!(it.size_hint(), (1, Some(1)));
+
+ let it = (0..6).array_chunks::<7>();
+ assert_eq!(it.size_hint(), (0, Some(0)));
+
+ let it = (1..).array_chunks::<2>();
+ assert_eq!(it.size_hint(), (usize::MAX / 2, None));
+
+ let it = (1..).filter(|x| x % 2 != 0).array_chunks::<2>();
+ assert_eq!(it.size_hint(), (0, None));
+}
+
+#[test]
+fn test_iterator_array_chunks_count() {
+ let it = (0..6).array_chunks::<1>();
+ assert_eq!(it.count(), 6);
+
+ let it = (0..6).array_chunks::<3>();
+ assert_eq!(it.count(), 2);
+
+ let it = (0..6).array_chunks::<5>();
+ assert_eq!(it.count(), 1);
+
+ let it = (0..6).array_chunks::<7>();
+ assert_eq!(it.count(), 0);
+
+ let it = (0..6).filter(|x| x % 2 == 0).array_chunks::<2>();
+ assert_eq!(it.count(), 1);
+
+ let it = iter::empty::<i32>().array_chunks::<2>();
+ assert_eq!(it.count(), 0);
+
+ let it = [(); usize::MAX].iter().array_chunks::<2>();
+ assert_eq!(it.count(), usize::MAX / 2);
+}
+
+#[test]
+fn test_iterator_array_chunks_next_and_next_back() {
+ let mut it = (0..11).array_chunks::<3>();
+ assert_eq!(it.next(), Some([0, 1, 2]));
+ assert_eq!(it.next_back(), Some([6, 7, 8]));
+ assert_eq!(it.next(), Some([3, 4, 5]));
+ assert_eq!(it.next_back(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next_back(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.into_remainder().unwrap().as_slice(), &[9, 10]);
+}
+
+#[test]
+fn test_iterator_array_chunks_rev_remainder() {
+ let mut it = (0..11).array_chunks::<4>();
+ {
+ let mut it = it.by_ref().rev();
+ assert_eq!(it.next(), Some([4, 5, 6, 7]));
+ assert_eq!(it.next(), Some([0, 1, 2, 3]));
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+ }
+ assert_eq!(it.into_remainder().unwrap().as_slice(), &[8, 9, 10]);
+}
+
+#[test]
+fn test_iterator_array_chunks_try_fold() {
+ let count = Cell::new(0);
+ let mut it = (0..10).map(|_| CountDrop::new(&count)).array_chunks::<3>();
+ let result: Result<_, ()> = it.by_ref().try_fold(0, |acc, _item| Ok(acc + 1));
+ assert_eq!(result, Ok(3));
+ assert_eq!(count.get(), 9);
+ drop(it);
+ assert_eq!(count.get(), 10);
+
+ let count = Cell::new(0);
+ let mut it = (0..10).map(|_| CountDrop::new(&count)).array_chunks::<3>();
+ let result = it.by_ref().try_fold(0, |acc, _item| if acc < 2 { Ok(acc + 1) } else { Err(acc) });
+ assert_eq!(result, Err(2));
+ assert_eq!(count.get(), 9);
+ drop(it);
+ assert_eq!(count.get(), 9);
+}
+
+#[test]
+fn test_iterator_array_chunks_fold() {
+ let result = (1..11).array_chunks::<3>().fold(0, |acc, [a, b, c]| {
+ assert_eq!(acc + 1, a);
+ assert_eq!(acc + 2, b);
+ assert_eq!(acc + 3, c);
+ acc + 3
+ });
+ assert_eq!(result, 9);
+
+ let count = Cell::new(0);
+ let result =
+ (0..10).map(|_| CountDrop::new(&count)).array_chunks::<3>().fold(0, |acc, _item| acc + 1);
+ assert_eq!(result, 3);
+ assert_eq!(count.get(), 10);
+}
+
+#[test]
+fn test_iterator_array_chunks_try_rfold() {
+ let count = Cell::new(0);
+ let mut it = (0..10).map(|_| CountDrop::new(&count)).array_chunks::<3>();
+ let result: Result<_, ()> = it.try_rfold(0, |acc, _item| Ok(acc + 1));
+ assert_eq!(result, Ok(3));
+ assert_eq!(count.get(), 9);
+ drop(it);
+ assert_eq!(count.get(), 10);
+
+ let count = Cell::new(0);
+ let mut it = (0..10).map(|_| CountDrop::new(&count)).array_chunks::<3>();
+ let result = it.try_rfold(0, |acc, _item| if acc < 2 { Ok(acc + 1) } else { Err(acc) });
+ assert_eq!(result, Err(2));
+ assert_eq!(count.get(), 9);
+ drop(it);
+ assert_eq!(count.get(), 10);
+}
+
+#[test]
+fn test_iterator_array_chunks_rfold() {
+ let result = (1..11).array_chunks::<3>().rfold(0, |acc, [a, b, c]| {
+ assert_eq!(10 - (acc + 1), c);
+ assert_eq!(10 - (acc + 2), b);
+ assert_eq!(10 - (acc + 3), a);
+ acc + 3
+ });
+ assert_eq!(result, 9);
+
+ let count = Cell::new(0);
+ let result =
+ (0..10).map(|_| CountDrop::new(&count)).array_chunks::<3>().rfold(0, |acc, _item| acc + 1);
+ assert_eq!(result, 3);
+ assert_eq!(count.get(), 10);
+}
diff --git a/library/core/tests/iter/adapters/by_ref_sized.rs b/library/core/tests/iter/adapters/by_ref_sized.rs
new file mode 100644
index 000000000..a9c066f0e
--- /dev/null
+++ b/library/core/tests/iter/adapters/by_ref_sized.rs
@@ -0,0 +1,20 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_by_ref_sized() {
+ let a = ['a', 'b', 'c', 'd'];
+
+ let mut s = String::from("Z");
+ let mut it = a.iter().copied();
+ ByRefSized(&mut it).take(2).for_each(|x| s.push(x));
+ assert_eq!(s, "Zab");
+ ByRefSized(&mut it).fold((), |(), x| s.push(x));
+ assert_eq!(s, "Zabcd");
+
+ let mut s = String::from("Z");
+ let mut it = a.iter().copied();
+ ByRefSized(&mut it).rev().take(2).for_each(|x| s.push(x));
+ assert_eq!(s, "Zdc");
+ ByRefSized(&mut it).rfold((), |(), x| s.push(x));
+ assert_eq!(s, "Zdcba");
+}
diff --git a/library/core/tests/iter/adapters/flatten.rs b/library/core/tests/iter/adapters/flatten.rs
index f8ab8c9d4..690fd0c21 100644
--- a/library/core/tests/iter/adapters/flatten.rs
+++ b/library/core/tests/iter/adapters/flatten.rs
@@ -168,3 +168,45 @@ fn test_trusted_len_flatten() {
assert_trusted_len(&iter);
assert_eq!(iter.size_hint(), (20, Some(20)));
}
+
+#[test]
+fn test_flatten_count() {
+ let mut it = once(0..10).chain(once(10..30)).chain(once(30..40)).flatten();
+
+ assert_eq!(it.clone().count(), 40);
+ it.advance_by(5).unwrap();
+ assert_eq!(it.clone().count(), 35);
+ it.advance_back_by(5).unwrap();
+ assert_eq!(it.clone().count(), 30);
+ it.advance_by(10).unwrap();
+ assert_eq!(it.clone().count(), 20);
+ it.advance_back_by(8).unwrap();
+ assert_eq!(it.clone().count(), 12);
+ it.advance_by(4).unwrap();
+ assert_eq!(it.clone().count(), 8);
+ it.advance_back_by(5).unwrap();
+ assert_eq!(it.clone().count(), 3);
+ it.advance_by(3).unwrap();
+ assert_eq!(it.clone().count(), 0);
+}
+
+#[test]
+fn test_flatten_last() {
+ let mut it = once(0..10).chain(once(10..30)).chain(once(30..40)).flatten();
+
+ assert_eq!(it.clone().last(), Some(39));
+ it.advance_by(5).unwrap(); // 5..40
+ assert_eq!(it.clone().last(), Some(39));
+ it.advance_back_by(5).unwrap(); // 5..35
+ assert_eq!(it.clone().last(), Some(34));
+ it.advance_by(10).unwrap(); // 15..35
+ assert_eq!(it.clone().last(), Some(34));
+ it.advance_back_by(8).unwrap(); // 15..27
+ assert_eq!(it.clone().last(), Some(26));
+ it.advance_by(4).unwrap(); // 19..27
+ assert_eq!(it.clone().last(), Some(26));
+ it.advance_back_by(5).unwrap(); // 19..22
+ assert_eq!(it.clone().last(), Some(21));
+ it.advance_by(3).unwrap(); // 22..22
+ assert_eq!(it.clone().last(), None);
+}
diff --git a/library/core/tests/iter/adapters/mod.rs b/library/core/tests/iter/adapters/mod.rs
index 567d9fe49..ffd5f3857 100644
--- a/library/core/tests/iter/adapters/mod.rs
+++ b/library/core/tests/iter/adapters/mod.rs
@@ -1,3 +1,5 @@
+mod array_chunks;
+mod by_ref_sized;
mod chain;
mod cloned;
mod copied;
@@ -183,3 +185,25 @@ impl Clone for CountClone {
ret
}
}
+
+#[derive(Debug, Clone)]
+struct CountDrop<'a> {
+ dropped: bool,
+ count: &'a Cell<usize>,
+}
+
+impl<'a> CountDrop<'a> {
+ pub fn new(count: &'a Cell<usize>) -> Self {
+ Self { dropped: false, count }
+ }
+}
+
+impl Drop for CountDrop<'_> {
+ fn drop(&mut self) {
+ if self.dropped {
+ panic!("double drop");
+ }
+ self.dropped = true;
+ self.count.set(self.count.get() + 1);
+ }
+}
diff --git a/library/core/tests/iter/adapters/skip.rs b/library/core/tests/iter/adapters/skip.rs
index 65f235e86..754641834 100644
--- a/library/core/tests/iter/adapters/skip.rs
+++ b/library/core/tests/iter/adapters/skip.rs
@@ -201,3 +201,34 @@ fn test_skip_non_fused() {
// advance it further. `Unfuse` tests that this doesn't happen by panicking in that scenario.
let _ = non_fused.skip(20).next();
}
+
+#[test]
+fn test_skip_non_fused_nth_overflow() {
+ let non_fused = Unfuse::new(0..10);
+
+ // Ensures that calling skip and `nth` where the sum would overflow does not fail for non-fused
+ // iterators.
+ let _ = non_fused.skip(20).nth(usize::MAX);
+}
+
+#[test]
+fn test_skip_overflow_wrapping() {
+ // Test to ensure even on overflowing on `skip+nth` the correct amount of elements are yielded.
+ struct WrappingIterator(usize);
+
+ impl Iterator for WrappingIterator {
+ type Item = usize;
+
+ fn next(&mut self) -> core::option::Option<Self::Item> {
+ <Self as Iterator>::nth(self, 0)
+ }
+
+ fn nth(&mut self, nth: usize) -> core::option::Option<Self::Item> {
+ self.0 = self.0.wrapping_add(nth.wrapping_add(1));
+ Some(self.0)
+ }
+ }
+
+ let wrap = WrappingIterator(0);
+ assert_eq!(wrap.skip(20).nth(usize::MAX), Some(20));
+}
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index db94368f6..51f858ade 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -2,11 +2,12 @@
#![feature(array_chunks)]
#![feature(array_methods)]
#![feature(array_windows)]
-#![feature(bench_black_box)]
+#![feature(bigint_helper_methods)]
#![feature(cell_update)]
#![feature(const_assume)]
#![feature(const_black_box)]
#![feature(const_bool_to_option)]
+#![feature(const_caller_location)]
#![feature(const_cell_into_inner)]
#![feature(const_convert)]
#![feature(const_heap)]
@@ -14,11 +15,13 @@
#![feature(const_maybe_uninit_assume_init_read)]
#![feature(const_nonnull_new)]
#![feature(const_num_from_num)]
+#![feature(const_pointer_byte_offsets)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_read)]
#![feature(const_ptr_write)]
#![feature(const_trait_impl)]
#![feature(const_likely)]
+#![feature(const_location_fields)]
#![feature(core_intrinsics)]
#![feature(core_private_bignum)]
#![feature(core_private_diy_float)]
@@ -46,8 +49,8 @@
#![feature(slice_from_ptr_range)]
#![feature(split_as_slice)]
#![feature(maybe_uninit_uninit_array)]
-#![feature(maybe_uninit_array_assume_init)]
#![feature(maybe_uninit_write_slice)]
+#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(min_specialization)]
#![feature(numfmt)]
#![feature(step_trait)]
@@ -61,6 +64,7 @@
#![feature(slice_partition_dedup)]
#![feature(int_log)]
#![feature(iter_advance_by)]
+#![feature(iter_array_chunks)]
#![feature(iter_collect_into)]
#![feature(iter_partition_in_place)]
#![feature(iter_intersperse)]
@@ -71,9 +75,10 @@
#![feature(iterator_try_reduce)]
#![feature(const_mut_refs)]
#![feature(const_pin)]
+#![feature(const_waker)]
#![feature(never_type)]
#![feature(unwrap_infallible)]
-#![feature(result_into_ok_or_err)]
+#![feature(pointer_byte_offsets)]
#![feature(portable_simd)]
#![feature(ptr_metadata)]
#![feature(once_cell)]
@@ -90,12 +95,13 @@
#![feature(strict_provenance_atomic_ptr)]
#![feature(trusted_random_access)]
#![feature(unsize)]
-#![feature(unzip_option)]
#![feature(const_array_from_ref)]
#![feature(const_slice_from_ref)]
#![feature(waker_getters)]
#![feature(slice_flatten)]
#![feature(provide_any)]
+#![feature(utf8_chunks)]
+#![feature(is_ascii_octdigit)]
#![deny(unsafe_op_in_unsafe_fn)]
extern crate test;
@@ -126,6 +132,7 @@ mod nonzero;
mod num;
mod ops;
mod option;
+mod panic;
mod pattern;
mod pin;
mod pin_macro;
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
index 6856d1a1f..0362e1c8a 100644
--- a/library/core/tests/mem.rs
+++ b/library/core/tests/mem.rs
@@ -130,7 +130,11 @@ fn test_transmute_copy_grow_panics() {
payload
.downcast::<&'static str>()
.and_then(|s| {
- if *s == "cannot transmute_copy if U is larger than T" { Ok(s) } else { Err(s) }
+ if *s == "cannot transmute_copy if Dst is larger than Src" {
+ Ok(s)
+ } else {
+ Err(s)
+ }
})
.unwrap_or_else(|p| panic::resume_unwind(p));
}
@@ -163,18 +167,18 @@ fn assume_init_good() {
#[test]
fn uninit_array_assume_init() {
- let mut array: [MaybeUninit<i16>; 5] = MaybeUninit::uninit_array();
+ let mut array = [MaybeUninit::<i16>::uninit(); 5];
array[0].write(3);
array[1].write(1);
array[2].write(4);
array[3].write(1);
array[4].write(5);
- let array = unsafe { MaybeUninit::array_assume_init(array) };
+ let array = unsafe { array.transpose().assume_init() };
assert_eq!(array, [3, 1, 4, 1, 5]);
- let [] = unsafe { MaybeUninit::<!>::array_assume_init([]) };
+ let [] = unsafe { [MaybeUninit::<!>::uninit(); 0].transpose().assume_init() };
}
#[test]
diff --git a/library/core/tests/num/int_log.rs b/library/core/tests/num/int_log.rs
index dc3092e14..a1edb1a51 100644
--- a/library/core/tests/num/int_log.rs
+++ b/library/core/tests/num/int_log.rs
@@ -1,166 +1,196 @@
-//! This tests the `Integer::{log,log2,log10}` methods. These tests are in a
+//! This tests the `Integer::{ilog,log2,log10}` methods. These tests are in a
//! separate file because there's both a large number of them, and not all tests
-//! can be run on Android. This is because in Android `log2` uses an imprecise
+//! can be run on Android. This is because in Android `ilog2` uses an imprecise
//! approximation:https://github.com/rust-lang/rust/blob/4825e12fc9c79954aa0fe18f5521efa6c19c7539/src/libstd/sys/unix/android.rs#L27-L53
#[test]
-fn checked_log() {
- assert_eq!(999u32.checked_log(10), Some(2));
- assert_eq!(1000u32.checked_log(10), Some(3));
- assert_eq!(555u32.checked_log(13), Some(2));
- assert_eq!(63u32.checked_log(4), Some(2));
- assert_eq!(64u32.checked_log(4), Some(3));
- assert_eq!(10460353203u64.checked_log(3), Some(21));
- assert_eq!(10460353202u64.checked_log(3), Some(20));
- assert_eq!(147808829414345923316083210206383297601u128.checked_log(3), Some(80));
- assert_eq!(147808829414345923316083210206383297600u128.checked_log(3), Some(79));
- assert_eq!(22528399544939174411840147874772641u128.checked_log(19683), Some(8));
- assert_eq!(22528399544939174411840147874772631i128.checked_log(19683), Some(7));
-
- assert_eq!(0u8.checked_log(4), None);
- assert_eq!(0u16.checked_log(4), None);
- assert_eq!(0i8.checked_log(4), None);
- assert_eq!(0i16.checked_log(4), None);
+fn checked_ilog() {
+ assert_eq!(999u32.checked_ilog(10), Some(2));
+ assert_eq!(1000u32.checked_ilog(10), Some(3));
+ assert_eq!(555u32.checked_ilog(13), Some(2));
+ assert_eq!(63u32.checked_ilog(4), Some(2));
+ assert_eq!(64u32.checked_ilog(4), Some(3));
+ assert_eq!(10460353203u64.checked_ilog(3), Some(21));
+ assert_eq!(10460353202u64.checked_ilog(3), Some(20));
+ assert_eq!(147808829414345923316083210206383297601u128.checked_ilog(3), Some(80));
+ assert_eq!(147808829414345923316083210206383297600u128.checked_ilog(3), Some(79));
+ assert_eq!(22528399544939174411840147874772641u128.checked_ilog(19683), Some(8));
+ assert_eq!(22528399544939174411840147874772631i128.checked_ilog(19683), Some(7));
+
+ assert_eq!(0u8.checked_ilog(4), None);
+ assert_eq!(0u16.checked_ilog(4), None);
+ assert_eq!(0i8.checked_ilog(4), None);
+ assert_eq!(0i16.checked_ilog(4), None);
#[cfg(not(miri))] // Miri is too slow
for i in i16::MIN..=0 {
- assert_eq!(i.checked_log(4), None);
+ assert_eq!(i.checked_ilog(4), None);
}
#[cfg(not(miri))] // Miri is too slow
for i in 1..=i16::MAX {
- assert_eq!(i.checked_log(13), Some((i as f32).log(13.0) as u32));
+ assert_eq!(i.checked_ilog(13), Some((i as f32).log(13.0) as u32));
}
#[cfg(not(miri))] // Miri is too slow
for i in 1..=u16::MAX {
- assert_eq!(i.checked_log(13), Some((i as f32).log(13.0) as u32));
+ assert_eq!(i.checked_ilog(13), Some((i as f32).log(13.0) as u32));
}
}
#[test]
-fn checked_log2() {
- assert_eq!(5u32.checked_log2(), Some(2));
- assert_eq!(0u64.checked_log2(), None);
- assert_eq!(128i32.checked_log2(), Some(7));
- assert_eq!((-55i16).checked_log2(), None);
+fn checked_ilog2() {
+ assert_eq!(5u32.checked_ilog2(), Some(2));
+ assert_eq!(0u64.checked_ilog2(), None);
+ assert_eq!(128i32.checked_ilog2(), Some(7));
+ assert_eq!((-55i16).checked_ilog2(), None);
- assert_eq!(0u8.checked_log2(), None);
- assert_eq!(0u16.checked_log2(), None);
- assert_eq!(0i8.checked_log2(), None);
- assert_eq!(0i16.checked_log2(), None);
+ assert_eq!(0u8.checked_ilog2(), None);
+ assert_eq!(0u16.checked_ilog2(), None);
+ assert_eq!(0i8.checked_ilog2(), None);
+ assert_eq!(0i16.checked_ilog2(), None);
for i in 1..=u8::MAX {
- assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32));
+ assert_eq!(i.checked_ilog2(), Some((i as f32).log2() as u32));
}
#[cfg(not(miri))] // Miri is too slow
for i in 1..=u16::MAX {
- // Guard against Android's imprecise f32::log2 implementation.
+ // Guard against Android's imprecise f32::ilog2 implementation.
if i != 8192 && i != 32768 {
- assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32));
+ assert_eq!(i.checked_ilog2(), Some((i as f32).log2() as u32));
}
}
for i in i8::MIN..=0 {
- assert_eq!(i.checked_log2(), None);
+ assert_eq!(i.checked_ilog2(), None);
}
for i in 1..=i8::MAX {
- assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32));
+ assert_eq!(i.checked_ilog2(), Some((i as f32).log2() as u32));
}
#[cfg(not(miri))] // Miri is too slow
for i in i16::MIN..=0 {
- assert_eq!(i.checked_log2(), None);
+ assert_eq!(i.checked_ilog2(), None);
}
#[cfg(not(miri))] // Miri is too slow
for i in 1..=i16::MAX {
- // Guard against Android's imprecise f32::log2 implementation.
+ // Guard against Android's imprecise f32::ilog2 implementation.
if i != 8192 {
- assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32));
+ assert_eq!(i.checked_ilog2(), Some((i as f32).log2() as u32));
}
}
}
-// Validate cases that fail on Android's imprecise float log2 implementation.
+// Validate cases that fail on Android's imprecise float ilog2 implementation.
#[test]
#[cfg(not(target_os = "android"))]
-fn checked_log2_not_android() {
- assert_eq!(8192u16.checked_log2(), Some((8192f32).log2() as u32));
- assert_eq!(32768u16.checked_log2(), Some((32768f32).log2() as u32));
- assert_eq!(8192i16.checked_log2(), Some((8192f32).log2() as u32));
+fn checked_ilog2_not_android() {
+ assert_eq!(8192u16.checked_ilog2(), Some((8192f32).log2() as u32));
+ assert_eq!(32768u16.checked_ilog2(), Some((32768f32).log2() as u32));
+ assert_eq!(8192i16.checked_ilog2(), Some((8192f32).log2() as u32));
}
#[test]
-fn checked_log10() {
- assert_eq!(0u8.checked_log10(), None);
- assert_eq!(0u16.checked_log10(), None);
- assert_eq!(0i8.checked_log10(), None);
- assert_eq!(0i16.checked_log10(), None);
+fn checked_ilog10() {
+ assert_eq!(0u8.checked_ilog10(), None);
+ assert_eq!(0u16.checked_ilog10(), None);
+ assert_eq!(0i8.checked_ilog10(), None);
+ assert_eq!(0i16.checked_ilog10(), None);
#[cfg(not(miri))] // Miri is too slow
for i in i16::MIN..=0 {
- assert_eq!(i.checked_log10(), None);
+ assert_eq!(i.checked_ilog10(), None);
}
#[cfg(not(miri))] // Miri is too slow
for i in 1..=i16::MAX {
- assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32));
+ assert_eq!(i.checked_ilog10(), Some((i as f32).log10() as u32));
}
#[cfg(not(miri))] // Miri is too slow
for i in 1..=u16::MAX {
- assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32));
+ assert_eq!(i.checked_ilog10(), Some((i as f32).log10() as u32));
}
#[cfg(not(miri))] // Miri is too slow
for i in 1..=100_000u32 {
- assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32));
+ assert_eq!(i.checked_ilog10(), Some((i as f32).log10() as u32));
}
}
-macro_rules! log10_loop {
- ($T:ty, $log10_max:expr) => {
- assert_eq!(<$T>::MAX.log10(), $log10_max);
- for i in 0..=$log10_max {
+macro_rules! ilog10_loop {
+ ($T:ty, $ilog10_max:expr) => {
+ assert_eq!(<$T>::MAX.ilog10(), $ilog10_max);
+ for i in 0..=$ilog10_max {
let p = (10 as $T).pow(i as u32);
if p >= 10 {
- assert_eq!((p - 9).log10(), i - 1);
- assert_eq!((p - 1).log10(), i - 1);
+ assert_eq!((p - 9).ilog10(), i - 1);
+ assert_eq!((p - 1).ilog10(), i - 1);
}
- assert_eq!(p.log10(), i);
- assert_eq!((p + 1).log10(), i);
+ assert_eq!(p.ilog10(), i);
+ assert_eq!((p + 1).ilog10(), i);
if p >= 10 {
- assert_eq!((p + 9).log10(), i);
+ assert_eq!((p + 9).ilog10(), i);
}
- // also check `x.log(10)`
+ // also check `x.ilog(10)`
if p >= 10 {
- assert_eq!((p - 9).log(10), i - 1);
- assert_eq!((p - 1).log(10), i - 1);
+ assert_eq!((p - 9).ilog(10), i - 1);
+ assert_eq!((p - 1).ilog(10), i - 1);
}
- assert_eq!(p.log(10), i);
- assert_eq!((p + 1).log(10), i);
+ assert_eq!(p.ilog(10), i);
+ assert_eq!((p + 1).ilog(10), i);
if p >= 10 {
- assert_eq!((p + 9).log(10), i);
+ assert_eq!((p + 9).ilog(10), i);
}
}
};
}
#[test]
-fn log10_u8() {
- log10_loop! { u8, 2 }
+fn ilog10_u8() {
+ ilog10_loop! { u8, 2 }
}
#[test]
-fn log10_u16() {
- log10_loop! { u16, 4 }
+fn ilog10_u16() {
+ ilog10_loop! { u16, 4 }
}
#[test]
-fn log10_u32() {
- log10_loop! { u32, 9 }
+fn ilog10_u32() {
+ ilog10_loop! { u32, 9 }
}
#[test]
-fn log10_u64() {
- log10_loop! { u64, 19 }
+fn ilog10_u64() {
+ ilog10_loop! { u64, 19 }
}
#[test]
-fn log10_u128() {
- log10_loop! { u128, 38 }
+fn ilog10_u128() {
+ ilog10_loop! { u128, 38 }
+}
+
+#[test]
+#[should_panic(expected = "argument of integer logarithm must be positive")]
+fn ilog2_of_0_panic() {
+ let _ = 0u32.ilog2();
+}
+
+#[test]
+#[should_panic(expected = "argument of integer logarithm must be positive")]
+fn ilog10_of_0_panic() {
+ let _ = 0u32.ilog10();
+}
+
+#[test]
+#[should_panic(expected = "argument of integer logarithm must be positive")]
+fn ilog3_of_0_panic() {
+ let _ = 0u32.ilog(3);
+}
+
+#[test]
+#[should_panic(expected = "base of integer logarithm must be at least 2")]
+fn ilog0_of_1_panic() {
+ let _ = 1u32.ilog(0);
+}
+
+#[test]
+#[should_panic(expected = "base of integer logarithm must be at least 2")]
+fn ilog1_of_1_panic() {
+ let _ = 1u32.ilog(1);
}
diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs
index 8b84a78e6..18c55e43a 100644
--- a/library/core/tests/num/int_macros.rs
+++ b/library/core/tests/num/int_macros.rs
@@ -338,6 +338,32 @@ macro_rules! int_module {
assert_eq!(MIN.checked_next_multiple_of(-3), None);
assert_eq!(MIN.checked_next_multiple_of(-1), Some(MIN));
}
+
+ #[test]
+ fn test_carrying_add() {
+ assert_eq!($T::MAX.carrying_add(1, false), ($T::MIN, true));
+ assert_eq!($T::MAX.carrying_add(0, true), ($T::MIN, true));
+ assert_eq!($T::MAX.carrying_add(1, true), ($T::MIN + 1, true));
+ assert_eq!($T::MAX.carrying_add(-1, false), ($T::MAX - 1, false));
+ assert_eq!($T::MAX.carrying_add(-1, true), ($T::MAX, false)); // no intermediate overflow
+ assert_eq!($T::MIN.carrying_add(-1, false), ($T::MAX, true));
+ assert_eq!($T::MIN.carrying_add(-1, true), ($T::MIN, false)); // no intermediate overflow
+ assert_eq!((0 as $T).carrying_add($T::MAX, true), ($T::MIN, true));
+ assert_eq!((0 as $T).carrying_add($T::MIN, true), ($T::MIN + 1, false));
+ }
+
+ #[test]
+ fn test_borrowing_sub() {
+ assert_eq!($T::MIN.borrowing_sub(1, false), ($T::MAX, true));
+ assert_eq!($T::MIN.borrowing_sub(0, true), ($T::MAX, true));
+ assert_eq!($T::MIN.borrowing_sub(1, true), ($T::MAX - 1, true));
+ assert_eq!($T::MIN.borrowing_sub(-1, false), ($T::MIN + 1, false));
+ assert_eq!($T::MIN.borrowing_sub(-1, true), ($T::MIN, false)); // no intermediate overflow
+ assert_eq!($T::MAX.borrowing_sub(-1, false), ($T::MIN, true));
+ assert_eq!($T::MAX.borrowing_sub(-1, true), ($T::MAX, false)); // no intermediate overflow
+ assert_eq!((0 as $T).borrowing_sub($T::MIN, false), ($T::MIN, true));
+ assert_eq!((0 as $T).borrowing_sub($T::MIN, true), ($T::MAX, false));
+ }
}
};
}
diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs
index 49580cdcc..c79e909e4 100644
--- a/library/core/tests/num/mod.rs
+++ b/library/core/tests/num/mod.rs
@@ -172,7 +172,7 @@ fn test_can_not_overflow() {
// Calcutate the string length for the smallest overflowing number:
let max_len_string = format_radix(num, base as u128);
- // Ensure that that string length is deemed to potentially overflow:
+ // Ensure that string length is deemed to potentially overflow:
assert!(can_overflow::<$t>(base, &max_len_string));
}
)*)
diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs
index 93ae620c2..15ae9f232 100644
--- a/library/core/tests/num/uint_macros.rs
+++ b/library/core/tests/num/uint_macros.rs
@@ -230,6 +230,28 @@ macro_rules! uint_module {
assert_eq!((1 as $T).checked_next_multiple_of(0), None);
assert_eq!(MAX.checked_next_multiple_of(2), None);
}
+
+ #[test]
+ fn test_carrying_add() {
+ assert_eq!($T::MAX.carrying_add(1, false), (0, true));
+ assert_eq!($T::MAX.carrying_add(0, true), (0, true));
+ assert_eq!($T::MAX.carrying_add(1, true), (1, true));
+
+ assert_eq!($T::MIN.carrying_add($T::MAX, false), ($T::MAX, false));
+ assert_eq!($T::MIN.carrying_add(0, true), (1, false));
+ assert_eq!($T::MIN.carrying_add($T::MAX, true), (0, true));
+ }
+
+ #[test]
+ fn test_borrowing_sub() {
+ assert_eq!($T::MIN.borrowing_sub(1, false), ($T::MAX, true));
+ assert_eq!($T::MIN.borrowing_sub(0, true), ($T::MAX, true));
+ assert_eq!($T::MIN.borrowing_sub(1, true), ($T::MAX - 1, true));
+
+ assert_eq!($T::MAX.borrowing_sub($T::MAX, false), (0, false));
+ assert_eq!($T::MAX.borrowing_sub(0, true), ($T::MAX - 1, false));
+ assert_eq!($T::MAX.borrowing_sub($T::MAX, true), ($T::MAX, true));
+ }
}
};
}
diff --git a/library/core/tests/num/wrapping.rs b/library/core/tests/num/wrapping.rs
index 8ded139a1..c5a719883 100644
--- a/library/core/tests/num/wrapping.rs
+++ b/library/core/tests/num/wrapping.rs
@@ -75,8 +75,6 @@ wrapping_test!(test_wrapping_u64, u64, u64::MIN, u64::MAX);
wrapping_test!(test_wrapping_u128, u128, u128::MIN, u128::MAX);
wrapping_test!(test_wrapping_usize, usize, usize::MIN, usize::MAX);
-// Don't warn about overflowing ops on 32-bit platforms
-#[cfg_attr(target_pointer_width = "32", allow(const_err))]
#[test]
fn wrapping_int_api() {
assert_eq!(i8::MAX.wrapping_add(1), i8::MIN);
diff --git a/library/core/tests/option.rs b/library/core/tests/option.rs
index 9f5e537dc..f36f7c268 100644
--- a/library/core/tests/option.rs
+++ b/library/core/tests/option.rs
@@ -57,6 +57,7 @@ fn test_get_resource() {
}
#[test]
+#[cfg_attr(not(bootstrap), allow(for_loops_over_fallibles))]
fn test_option_dance() {
let x = Some(());
let mut y = Some(5);
diff --git a/library/core/tests/panic.rs b/library/core/tests/panic.rs
new file mode 100644
index 000000000..24b6c56b3
--- /dev/null
+++ b/library/core/tests/panic.rs
@@ -0,0 +1 @@
+mod location;
diff --git a/library/core/tests/panic/location.rs b/library/core/tests/panic/location.rs
new file mode 100644
index 000000000..d20241d83
--- /dev/null
+++ b/library/core/tests/panic/location.rs
@@ -0,0 +1,31 @@
+use core::panic::Location;
+
+// Note: Some of the following tests depend on the source location,
+// so please be careful when editing this file.
+
+#[test]
+fn location_const_caller() {
+ const _CALLER_REFERENCE: &Location<'static> = Location::caller();
+ const _CALLER: Location<'static> = *Location::caller();
+}
+
+#[test]
+fn location_const_file() {
+ const CALLER: &Location<'static> = Location::caller();
+ const FILE: &str = CALLER.file();
+ assert_eq!(FILE, file!());
+}
+
+#[test]
+fn location_const_line() {
+ const CALLER: &Location<'static> = Location::caller();
+ const LINE: u32 = CALLER.line();
+ assert_eq!(LINE, 21);
+}
+
+#[test]
+fn location_const_column() {
+ const CALLER: &Location<'static> = Location::caller();
+ const COLUMN: u32 = CALLER.column();
+ assert_eq!(COLUMN, 40);
+}
diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
index 12861794c..97a369810 100644
--- a/library/core/tests/ptr.rs
+++ b/library/core/tests/ptr.rs
@@ -650,7 +650,7 @@ fn thin_box() {
.unwrap_or_else(|| handle_alloc_error(layout))
.cast::<DynMetadata<T>>();
ptr.as_ptr().write(meta);
- ptr.cast::<u8>().as_ptr().add(offset).cast::<Value>().write(value);
+ ptr.as_ptr().byte_add(offset).cast::<Value>().write(value);
Self { ptr, phantom: PhantomData }
}
}
diff --git a/library/core/tests/result.rs b/library/core/tests/result.rs
index 103e8cc3a..50926da3c 100644
--- a/library/core/tests/result.rs
+++ b/library/core/tests/result.rs
@@ -96,15 +96,6 @@ fn test_unwrap_or() {
}
#[test]
-fn test_ok_or_err() {
- let ok: Result<isize, isize> = Ok(100);
- let err: Result<isize, isize> = Err(200);
-
- assert_eq!(ok.into_ok_or_err(), 100);
- assert_eq!(err.into_ok_or_err(), 200);
-}
-
-#[test]
fn test_unwrap_or_else() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." { 50 } else { panic!("BadBad") }
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 0656109e9..9e1fbea79 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -1197,7 +1197,6 @@ fn chunks_mut_are_send_and_sync() {
use std::slice::{ChunksExactMut, ChunksMut, RChunksExactMut, RChunksMut};
use std::sync::MutexGuard;
- #[allow(unused)]
fn assert_send_and_sync()
where
ChunksMut<'static, Cell<i32>>: Send,
@@ -1210,6 +1209,8 @@ fn chunks_mut_are_send_and_sync() {
RChunksExactMut<'static, MutexGuard<'static, u32>>: Sync,
{
}
+
+ assert_send_and_sync();
}
#[test]
@@ -1283,7 +1284,6 @@ fn test_windows_zip() {
}
#[test]
-#[allow(const_err)]
fn test_iter_ref_consistency() {
use std::fmt::Debug;
diff --git a/library/core/tests/str_lossy.rs b/library/core/tests/str_lossy.rs
index d4b47a470..9d3f0b65f 100644
--- a/library/core/tests/str_lossy.rs
+++ b/library/core/tests/str_lossy.rs
@@ -1,85 +1,85 @@
-use core::str::lossy::*;
+use core::str::Utf8Chunks;
#[test]
fn chunks() {
- let mut iter = Utf8Lossy::from_bytes(b"hello").chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "hello", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
+ macro_rules! assert_chunks {
+ ( $string:expr, $(($valid:expr, $invalid:expr)),* $(,)? ) => {{
+ let mut iter = Utf8Chunks::new($string);
+ $(
+ let chunk = iter.next().expect("missing chunk");
+ assert_eq!($valid, chunk.valid());
+ assert_eq!($invalid, chunk.invalid());
+ )*
+ assert_eq!(None, iter.next());
+ }};
+ }
- let mut iter = Utf8Lossy::from_bytes("ศไทย中华Việt Nam".as_bytes()).chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "ศไทย中华Việt Nam", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
-
- let mut iter = Utf8Lossy::from_bytes(b"Hello\xC2 There\xFF Goodbye").chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "Hello", broken: b"\xC2" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: " There", broken: b"\xFF" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: " Goodbye", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
-
- let mut iter = Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye").chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "Hello", broken: b"\xC0" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: " There", broken: b"\xE6\x83" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: " Goodbye", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
-
- let mut iter = Utf8Lossy::from_bytes(b"\xF5foo\xF5\x80bar").chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF5" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF5" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
-
- let mut iter = Utf8Lossy::from_bytes(b"\xF1foo\xF1\x80bar\xF1\x80\x80baz").chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF1" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF1\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"\xF1\x80\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "baz", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
-
- let mut iter = Utf8Lossy::from_bytes(b"\xF4foo\xF4\x80bar\xF4\xBFbaz").chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF4" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF4\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"\xF4" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "baz", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
-
- let mut iter = Utf8Lossy::from_bytes(b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar").chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF0" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "foo\u{10000}bar", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
+ assert_chunks!(b"hello", ("hello", b""));
+ assert_chunks!("ศไทย中华Việt Nam".as_bytes(), ("ศไทย中华Việt Nam", b""));
+ assert_chunks!(
+ b"Hello\xC2 There\xFF Goodbye",
+ ("Hello", b"\xC2"),
+ (" There", b"\xFF"),
+ (" Goodbye", b""),
+ );
+ assert_chunks!(
+ b"Hello\xC0\x80 There\xE6\x83 Goodbye",
+ ("Hello", b"\xC0"),
+ ("", b"\x80"),
+ (" There", b"\xE6\x83"),
+ (" Goodbye", b""),
+ );
+ assert_chunks!(
+ b"\xF5foo\xF5\x80bar",
+ ("", b"\xF5"),
+ ("foo", b"\xF5"),
+ ("", b"\x80"),
+ ("bar", b""),
+ );
+ assert_chunks!(
+ b"\xF1foo\xF1\x80bar\xF1\x80\x80baz",
+ ("", b"\xF1"),
+ ("foo", b"\xF1\x80"),
+ ("bar", b"\xF1\x80\x80"),
+ ("baz", b""),
+ );
+ assert_chunks!(
+ b"\xF4foo\xF4\x80bar\xF4\xBFbaz",
+ ("", b"\xF4"),
+ ("foo", b"\xF4\x80"),
+ ("bar", b"\xF4"),
+ ("", b"\xBF"),
+ ("baz", b""),
+ );
+ assert_chunks!(
+ b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar",
+ ("", b"\xF0"),
+ ("", b"\x80"),
+ ("", b"\x80"),
+ ("", b"\x80"),
+ ("foo\u{10000}bar", b""),
+ );
// surrogates
- let mut iter = Utf8Lossy::from_bytes(b"\xED\xA0\x80foo\xED\xBF\xBFbar").chunks();
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xED" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xA0" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xED" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
- assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"" }), iter.next());
- assert_eq!(None, iter.next());
-}
-
-#[test]
-fn display() {
- assert_eq!(
- "Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye",
- &Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string()
+ assert_chunks!(
+ b"\xED\xA0\x80foo\xED\xBF\xBFbar",
+ ("", b"\xED"),
+ ("", b"\xA0"),
+ ("", b"\x80"),
+ ("foo", b"\xED"),
+ ("", b"\xBF"),
+ ("", b"\xBF"),
+ ("bar", b""),
);
}
#[test]
fn debug() {
assert_eq!(
- "\"Hello\\xc0\\x80 There\\xe6\\x83 Goodbye\\u{10d4ea}\"",
+ "\"Hello\\xC0\\x80 There\\xE6\\x83 Goodbye\\u{10d4ea}\"",
&format!(
"{:?}",
- Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye\xf4\x8d\x93\xaa")
- )
+ Utf8Chunks::new(b"Hello\xC0\x80 There\xE6\x83 Goodbye\xf4\x8d\x93\xaa").debug(),
+ ),
);
}
diff --git a/library/core/tests/task.rs b/library/core/tests/task.rs
index d71fef9e5..56be30e92 100644
--- a/library/core/tests/task.rs
+++ b/library/core/tests/task.rs
@@ -1,4 +1,4 @@
-use core::task::Poll;
+use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
#[test]
fn poll_const() {
@@ -12,3 +12,18 @@ fn poll_const() {
const IS_PENDING: bool = POLL.is_pending();
assert!(IS_PENDING);
}
+
+#[test]
+fn waker_const() {
+ const VOID_TABLE: RawWakerVTable = RawWakerVTable::new(|_| VOID_WAKER, |_| {}, |_| {}, |_| {});
+
+ const VOID_WAKER: RawWaker = RawWaker::new(&(), &VOID_TABLE);
+
+ static WAKER: Waker = unsafe { Waker::from_raw(VOID_WAKER) };
+
+ static CONTEXT: Context<'static> = Context::from_waker(&WAKER);
+
+ static WAKER_REF: &'static Waker = CONTEXT.waker();
+
+ WAKER_REF.wake_by_ref();
+}
diff --git a/library/core/tests/time.rs b/library/core/tests/time.rs
index fe2d2f241..a05128de4 100644
--- a/library/core/tests/time.rs
+++ b/library/core/tests/time.rs
@@ -197,9 +197,31 @@ fn correct_sum() {
#[test]
fn debug_formatting_extreme_values() {
assert_eq!(
- format!("{:?}", Duration::new(18_446_744_073_709_551_615, 123_456_789)),
+ format!("{:?}", Duration::new(u64::MAX, 123_456_789)),
"18446744073709551615.123456789s"
);
+ assert_eq!(format!("{:.0?}", Duration::MAX), "18446744073709551616s");
+ assert_eq!(format!("{:.0?}", Duration::new(u64::MAX, 500_000_000)), "18446744073709551616s");
+ assert_eq!(format!("{:.0?}", Duration::new(u64::MAX, 499_999_999)), "18446744073709551615s");
+ assert_eq!(
+ format!("{:.3?}", Duration::new(u64::MAX, 999_500_000)),
+ "18446744073709551616.000s"
+ );
+ assert_eq!(
+ format!("{:.3?}", Duration::new(u64::MAX, 999_499_999)),
+ "18446744073709551615.999s"
+ );
+ assert_eq!(
+ format!("{:.8?}", Duration::new(u64::MAX, 999_999_995)),
+ "18446744073709551616.00000000s"
+ );
+ assert_eq!(
+ format!("{:.8?}", Duration::new(u64::MAX, 999_999_994)),
+ "18446744073709551615.99999999s"
+ );
+ assert_eq!(format!("{:21.0?}", Duration::MAX), "18446744073709551616s");
+ assert_eq!(format!("{:22.0?}", Duration::MAX), "18446744073709551616s ");
+ assert_eq!(format!("{:24.0?}", Duration::MAX), "18446744073709551616s ");
}
#[test]
@@ -445,3 +467,11 @@ fn duration_const() {
const SATURATING_MUL: Duration = MAX.saturating_mul(2);
assert_eq!(SATURATING_MUL, MAX);
}
+
+#[test]
+fn from_neg_zero() {
+ assert_eq!(Duration::try_from_secs_f32(-0.0), Ok(Duration::ZERO));
+ assert_eq!(Duration::try_from_secs_f64(-0.0), Ok(Duration::ZERO));
+ assert_eq!(Duration::from_secs_f32(-0.0), Duration::ZERO);
+ assert_eq!(Duration::from_secs_f64(-0.0), Duration::ZERO);
+}
diff --git a/library/panic_abort/src/android.rs b/library/panic_abort/src/android.rs
index 18bb932f1..0fd824f8a 100644
--- a/library/panic_abort/src/android.rs
+++ b/library/panic_abort/src/android.rs
@@ -42,7 +42,7 @@ pub(crate) unsafe fn android_set_abort_message(payload: *mut &mut dyn BoxMeUp) {
return; // allocation failure
}
copy_nonoverlapping(msg.as_ptr(), buf as *mut u8, msg.len());
- buf.offset(msg.len() as isize).write(0);
+ buf.add(msg.len()).write(0);
let func = transmute::<usize, SetAbortMessageType>(func_addr);
func(buf);
diff --git a/library/panic_abort/src/lib.rs b/library/panic_abort/src/lib.rs
index 8801c670b..cba8ef25d 100644
--- a/library/panic_abort/src/lib.rs
+++ b/library/panic_abort/src/lib.rs
@@ -113,27 +113,11 @@ pub unsafe fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
// binaries, but it should never be called as we don't link in an unwinding
// runtime at all.
pub mod personalities {
- #[rustc_std_internal_symbol]
- #[cfg(not(any(
- all(target_family = "wasm", not(target_os = "emscripten")),
- all(target_os = "windows", target_env = "gnu", target_arch = "x86_64",),
- )))]
- pub extern "C" fn rust_eh_personality() {}
-
- // On x86_64-pc-windows-gnu we use our own personality function that needs
- // to return `ExceptionContinueSearch` as we're passing on all our frames.
- #[rustc_std_internal_symbol]
- #[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86_64"))]
- pub extern "C" fn rust_eh_personality(
- _record: usize,
- _frame: usize,
- _context: usize,
- _dispatcher: usize,
- ) -> u32 {
- 1 // `ExceptionContinueSearch`
- }
+ // In the past this module used to contain stubs for the personality
+ // functions of various platforms, but these where removed when personality
+ // functions were moved to std.
- // Similar to above, this corresponds to the `eh_catch_typeinfo` lang item
+ // This corresponds to the `eh_catch_typeinfo` lang item
// that's only used on Emscripten currently.
//
// Since panics don't generate exceptions and foreign exceptions are
@@ -143,13 +127,4 @@ pub mod personalities {
#[allow(non_upper_case_globals)]
#[cfg(target_os = "emscripten")]
static rust_eh_catch_typeinfo: [usize; 2] = [0; 2];
-
- // These two are called by our startup objects on i686-pc-windows-gnu, but
- // they don't need to do anything so the bodies are nops.
- #[rustc_std_internal_symbol]
- #[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
- pub extern "C" fn rust_eh_register_frames() {}
- #[rustc_std_internal_symbol]
- #[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
- pub extern "C" fn rust_eh_unregister_frames() {}
}
diff --git a/library/panic_unwind/src/emcc.rs b/library/panic_unwind/src/emcc.rs
index 1ee69ff9c..c6d423085 100644
--- a/library/panic_unwind/src/emcc.rs
+++ b/library/panic_unwind/src/emcc.rs
@@ -12,7 +12,6 @@ use core::intrinsics;
use core::mem;
use core::ptr;
use core::sync::atomic::{AtomicBool, Ordering};
-use libc::{self, c_int};
use unwind as uw;
// This matches the layout of std::type_info in C++
@@ -48,7 +47,12 @@ static EXCEPTION_TYPE_INFO: TypeInfo = TypeInfo {
name: b"rust_panic\0".as_ptr(),
};
+// NOTE(nbdd0121): The `canary` field will be part of stable ABI after `c_unwind` stabilization.
+#[repr(C)]
struct Exception {
+ // See `gcc.rs` on why this is present. We already have a static here so just use it.
+ canary: *const TypeInfo,
+
// This is necessary because C++ code can capture our exception with
// std::exception_ptr and rethrow it multiple times, possibly even in
// another thread.
@@ -71,27 +75,38 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box<dyn Any + Send> {
let catch_data = &*(ptr as *mut CatchData);
let adjusted_ptr = __cxa_begin_catch(catch_data.ptr as *mut libc::c_void) as *mut Exception;
- let out = if catch_data.is_rust_panic {
- let was_caught = (*adjusted_ptr).caught.swap(true, Ordering::SeqCst);
- if was_caught {
- // Since cleanup() isn't allowed to panic, we just abort instead.
- intrinsics::abort();
- }
- (*adjusted_ptr).data.take().unwrap()
- } else {
+ if !catch_data.is_rust_panic {
+ super::__rust_foreign_exception();
+ }
+
+ let canary = ptr::addr_of!((*adjusted_ptr).canary).read();
+ if !ptr::eq(canary, &EXCEPTION_TYPE_INFO) {
super::__rust_foreign_exception();
- };
+ }
+
+ let was_caught = (*adjusted_ptr).caught.swap(true, Ordering::SeqCst);
+ if was_caught {
+ // Since cleanup() isn't allowed to panic, we just abort instead.
+ intrinsics::abort();
+ }
+ let out = (*adjusted_ptr).data.take().unwrap();
__cxa_end_catch();
out
}
pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
- let sz = mem::size_of_val(&data);
- let exception = __cxa_allocate_exception(sz) as *mut Exception;
+ let exception = __cxa_allocate_exception(mem::size_of::<Exception>()) as *mut Exception;
if exception.is_null() {
return uw::_URC_FATAL_PHASE1_ERROR as u32;
}
- ptr::write(exception, Exception { caught: AtomicBool::new(false), data: Some(data) });
+ ptr::write(
+ exception,
+ Exception {
+ canary: &EXCEPTION_TYPE_INFO,
+ caught: AtomicBool::new(false),
+ data: Some(data),
+ },
+ );
__cxa_throw(exception as *mut _, &EXCEPTION_TYPE_INFO, exception_cleanup);
}
@@ -105,21 +120,6 @@ extern "C" fn exception_cleanup(ptr: *mut libc::c_void) -> *mut libc::c_void {
}
}
-// This is required by the compiler to exist (e.g., it's a lang item), but it's
-// never actually called by the compiler. Emscripten EH doesn't use a
-// personality function at all, it instead uses __cxa_find_matching_catch.
-// Wasm error handling would use __gxx_personality_wasm0.
-#[lang = "eh_personality"]
-unsafe extern "C" fn rust_eh_personality(
- _version: c_int,
- _actions: uw::_Unwind_Action,
- _exception_class: uw::_Unwind_Exception_Class,
- _exception_object: *mut uw::_Unwind_Exception,
- _context: *mut uw::_Unwind_Context,
-) -> uw::_Unwind_Reason_Code {
- core::intrinsics::abort()
-}
-
extern "C" {
fn __cxa_allocate_exception(thrown_size: libc::size_t) -> *mut libc::c_void;
fn __cxa_begin_catch(thrown_exception: *mut libc::c_void) -> *mut libc::c_void;
diff --git a/library/panic_unwind/src/gcc.rs b/library/panic_unwind/src/gcc.rs
index a59659231..0b7a873a6 100644
--- a/library/panic_unwind/src/gcc.rs
+++ b/library/panic_unwind/src/gcc.rs
@@ -38,14 +38,23 @@
use alloc::boxed::Box;
use core::any::Any;
+use core::ptr;
-use crate::dwarf::eh::{self, EHAction, EHContext};
-use libc::{c_int, uintptr_t};
use unwind as uw;
+// In case where multiple copies of std exist in a single process,
+// we use address of this static variable to distinguish an exception raised by
+// this copy and some other copy (which needs to be treated as foreign exception).
+static CANARY: u8 = 0;
+
+// NOTE(nbdd0121)
+// Once `c_unwind` feature is stabilized, there will be ABI stability requirement
+// on this struct. The first two field must be `_Unwind_Exception` and `canary`,
+// as it may be accessed by a different version of the std with a different compiler.
#[repr(C)]
struct Exception {
_uwe: uw::_Unwind_Exception,
+ canary: *const u8,
cause: Box<dyn Any + Send>,
}
@@ -56,6 +65,7 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
exception_cleanup,
private: [0; uw::unwinder_private_data_size],
},
+ canary: &CANARY,
cause: data,
});
let exception_param = Box::into_raw(exception) as *mut uw::_Unwind_Exception;
@@ -77,10 +87,22 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box<dyn Any + Send> {
if (*exception).exception_class != rust_exception_class() {
uw::_Unwind_DeleteException(exception);
super::__rust_foreign_exception();
- } else {
- let exception = Box::from_raw(exception as *mut Exception);
- exception.cause
}
+
+ let exception = exception.cast::<Exception>();
+ // Just access the canary field, avoid accessing the entire `Exception` as
+ // it can be a foreign Rust exception.
+ let canary = ptr::addr_of!((*exception).canary).read();
+ if !ptr::eq(canary, &CANARY) {
+ // A foreign Rust exception, treat it slightly differently from other
+ // foreign exceptions, because call into `_Unwind_DeleteException` will
+ // call into `__rust_drop_panic` which produces a confusing
+ // "Rust panic must be rethrown" message.
+ super::__rust_foreign_exception();
+ }
+
+ let exception = Box::from_raw(exception as *mut Exception);
+ exception.cause
}
// Rust's exception class identifier. This is used by personality routines to
@@ -89,263 +111,3 @@ fn rust_exception_class() -> uw::_Unwind_Exception_Class {
// M O Z \0 R U S T -- vendor, language
0x4d4f5a_00_52555354
}
-
-// Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister()
-// and TargetLowering::getExceptionSelectorRegister() for each architecture,
-// then mapped to DWARF register numbers via register definition tables
-// (typically <arch>RegisterInfo.td, search for "DwarfRegNum").
-// See also https://llvm.org/docs/WritingAnLLVMBackend.html#defining-a-register.
-
-#[cfg(target_arch = "x86")]
-const UNWIND_DATA_REG: (i32, i32) = (0, 2); // EAX, EDX
-
-#[cfg(target_arch = "x86_64")]
-const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX
-
-#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
-const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1 / X0, X1
-
-#[cfg(target_arch = "m68k")]
-const UNWIND_DATA_REG: (i32, i32) = (0, 1); // D0, D1
-
-#[cfg(any(target_arch = "mips", target_arch = "mips64"))]
-const UNWIND_DATA_REG: (i32, i32) = (4, 5); // A0, A1
-
-#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
-const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 / X3, X4
-
-#[cfg(target_arch = "s390x")]
-const UNWIND_DATA_REG: (i32, i32) = (6, 7); // R6, R7
-
-#[cfg(any(target_arch = "sparc", target_arch = "sparc64"))]
-const UNWIND_DATA_REG: (i32, i32) = (24, 25); // I0, I1
-
-#[cfg(target_arch = "hexagon")]
-const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1
-
-#[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))]
-const UNWIND_DATA_REG: (i32, i32) = (10, 11); // x10, x11
-
-// The following code is based on GCC's C and C++ personality routines. For reference, see:
-// https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc
-// https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c
-
-cfg_if::cfg_if! {
- if #[cfg(all(target_arch = "arm", not(target_os = "ios"), not(target_os = "watchos"), not(target_os = "netbsd")))] {
- // ARM EHABI personality routine.
- // https://infocenter.arm.com/help/topic/com.arm.doc.ihi0038b/IHI0038B_ehabi.pdf
- //
- // iOS uses the default routine instead since it uses SjLj unwinding.
- #[lang = "eh_personality"]
- unsafe extern "C" fn rust_eh_personality(state: uw::_Unwind_State,
- exception_object: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- let state = state as c_int;
- let action = state & uw::_US_ACTION_MASK as c_int;
- let search_phase = if action == uw::_US_VIRTUAL_UNWIND_FRAME as c_int {
- // Backtraces on ARM will call the personality routine with
- // state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases
- // we want to continue unwinding the stack, otherwise all our backtraces
- // would end at __rust_try
- if state & uw::_US_FORCE_UNWIND as c_int != 0 {
- return continue_unwind(exception_object, context);
- }
- true
- } else if action == uw::_US_UNWIND_FRAME_STARTING as c_int {
- false
- } else if action == uw::_US_UNWIND_FRAME_RESUME as c_int {
- return continue_unwind(exception_object, context);
- } else {
- return uw::_URC_FAILURE;
- };
-
- // The DWARF unwinder assumes that _Unwind_Context holds things like the function
- // and LSDA pointers, however ARM EHABI places them into the exception object.
- // To preserve signatures of functions like _Unwind_GetLanguageSpecificData(), which
- // take only the context pointer, GCC personality routines stash a pointer to
- // exception_object in the context, using location reserved for ARM's
- // "scratch register" (r12).
- uw::_Unwind_SetGR(context,
- uw::UNWIND_POINTER_REG,
- exception_object as uw::_Unwind_Ptr);
- // ...A more principled approach would be to provide the full definition of ARM's
- // _Unwind_Context in our libunwind bindings and fetch the required data from there
- // directly, bypassing DWARF compatibility functions.
-
- let eh_action = match find_eh_action(context) {
- Ok(action) => action,
- Err(_) => return uw::_URC_FAILURE,
- };
- if search_phase {
- match eh_action {
- EHAction::None |
- EHAction::Cleanup(_) => return continue_unwind(exception_object, context),
- EHAction::Catch(_) => {
- // EHABI requires the personality routine to update the
- // SP value in the barrier cache of the exception object.
- (*exception_object).private[5] =
- uw::_Unwind_GetGR(context, uw::UNWIND_SP_REG);
- return uw::_URC_HANDLER_FOUND;
- }
- EHAction::Terminate => return uw::_URC_FAILURE,
- }
- } else {
- match eh_action {
- EHAction::None => return continue_unwind(exception_object, context),
- EHAction::Cleanup(lpad) |
- EHAction::Catch(lpad) => {
- uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0,
- exception_object as uintptr_t);
- uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
- uw::_Unwind_SetIP(context, lpad);
- return uw::_URC_INSTALL_CONTEXT;
- }
- EHAction::Terminate => return uw::_URC_FAILURE,
- }
- }
-
- // On ARM EHABI the personality routine is responsible for actually
- // unwinding a single stack frame before returning (ARM EHABI Sec. 6.1).
- unsafe fn continue_unwind(exception_object: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- if __gnu_unwind_frame(exception_object, context) == uw::_URC_NO_REASON {
- uw::_URC_CONTINUE_UNWIND
- } else {
- uw::_URC_FAILURE
- }
- }
- // defined in libgcc
- extern "C" {
- fn __gnu_unwind_frame(exception_object: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code;
- }
- }
- } else {
- // Default personality routine, which is used directly on most targets
- // and indirectly on Windows x86_64 via SEH.
- unsafe extern "C" fn rust_eh_personality_impl(version: c_int,
- actions: uw::_Unwind_Action,
- _exception_class: uw::_Unwind_Exception_Class,
- exception_object: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- if version != 1 {
- return uw::_URC_FATAL_PHASE1_ERROR;
- }
- let eh_action = match find_eh_action(context) {
- Ok(action) => action,
- Err(_) => return uw::_URC_FATAL_PHASE1_ERROR,
- };
- if actions as i32 & uw::_UA_SEARCH_PHASE as i32 != 0 {
- match eh_action {
- EHAction::None |
- EHAction::Cleanup(_) => uw::_URC_CONTINUE_UNWIND,
- EHAction::Catch(_) => uw::_URC_HANDLER_FOUND,
- EHAction::Terminate => uw::_URC_FATAL_PHASE1_ERROR,
- }
- } else {
- match eh_action {
- EHAction::None => uw::_URC_CONTINUE_UNWIND,
- EHAction::Cleanup(lpad) |
- EHAction::Catch(lpad) => {
- uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0,
- exception_object as uintptr_t);
- uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
- uw::_Unwind_SetIP(context, lpad);
- uw::_URC_INSTALL_CONTEXT
- }
- EHAction::Terminate => uw::_URC_FATAL_PHASE2_ERROR,
- }
- }
- }
-
- cfg_if::cfg_if! {
- if #[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))] {
- // On x86_64 MinGW targets, the unwinding mechanism is SEH however the unwind
- // handler data (aka LSDA) uses GCC-compatible encoding.
- #[lang = "eh_personality"]
- #[allow(nonstandard_style)]
- unsafe extern "C" fn rust_eh_personality(exceptionRecord: *mut uw::EXCEPTION_RECORD,
- establisherFrame: uw::LPVOID,
- contextRecord: *mut uw::CONTEXT,
- dispatcherContext: *mut uw::DISPATCHER_CONTEXT)
- -> uw::EXCEPTION_DISPOSITION {
- uw::_GCC_specific_handler(exceptionRecord,
- establisherFrame,
- contextRecord,
- dispatcherContext,
- rust_eh_personality_impl)
- }
- } else {
- // The personality routine for most of our targets.
- #[lang = "eh_personality"]
- unsafe extern "C" fn rust_eh_personality(version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- exception_object: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- rust_eh_personality_impl(version,
- actions,
- exception_class,
- exception_object,
- context)
- }
- }
- }
- }
-}
-
-unsafe fn find_eh_action(context: *mut uw::_Unwind_Context) -> Result<EHAction, ()> {
- let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8;
- let mut ip_before_instr: c_int = 0;
- let ip = uw::_Unwind_GetIPInfo(context, &mut ip_before_instr);
- let eh_context = EHContext {
- // The return address points 1 byte past the call instruction,
- // which could be in the next IP range in LSDA range table.
- //
- // `ip = -1` has special meaning, so use wrapping sub to allow for that
- ip: if ip_before_instr != 0 { ip } else { ip.wrapping_sub(1) },
- func_start: uw::_Unwind_GetRegionStart(context),
- get_text_start: &|| uw::_Unwind_GetTextRelBase(context),
- get_data_start: &|| uw::_Unwind_GetDataRelBase(context),
- };
- eh::find_eh_action(lsda, &eh_context)
-}
-
-// Frame unwind info registration
-//
-// Each module's image contains a frame unwind info section (usually
-// ".eh_frame"). When a module is loaded/unloaded into the process, the
-// unwinder must be informed about the location of this section in memory. The
-// methods of achieving that vary by the platform. On some (e.g., Linux), the
-// unwinder can discover unwind info sections on its own (by dynamically
-// enumerating currently loaded modules via the dl_iterate_phdr() API and
-// finding their ".eh_frame" sections); Others, like Windows, require modules
-// to actively register their unwind info sections via unwinder API.
-//
-// This module defines two symbols which are referenced and called from
-// rsbegin.rs to register our information with the GCC runtime. The
-// implementation of stack unwinding is (for now) deferred to libgcc_eh, however
-// Rust crates use these Rust-specific entry points to avoid potential clashes
-// with any GCC runtime.
-#[cfg(all(target_os = "windows", target_arch = "x86", target_env = "gnu"))]
-pub mod eh_frame_registry {
- extern "C" {
- fn __register_frame_info(eh_frame_begin: *const u8, object: *mut u8);
- fn __deregister_frame_info(eh_frame_begin: *const u8, object: *mut u8);
- }
-
- #[rustc_std_internal_symbol]
- pub unsafe extern "C" fn rust_eh_register_frames(eh_frame_begin: *const u8, object: *mut u8) {
- __register_frame_info(eh_frame_begin, object);
- }
-
- #[rustc_std_internal_symbol]
- pub unsafe extern "C" fn rust_eh_unregister_frames(eh_frame_begin: *const u8, object: *mut u8) {
- __deregister_frame_info(eh_frame_begin, object);
- }
-}
diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs
index f9acb42c4..7e7180a38 100644
--- a/library/panic_unwind/src/lib.rs
+++ b/library/panic_unwind/src/lib.rs
@@ -42,7 +42,8 @@ cfg_if::cfg_if! {
// L4Re is unix family but does not yet support unwinding.
#[path = "dummy.rs"]
mod real_imp;
- } else if #[cfg(target_env = "msvc")] {
+ } else if #[cfg(all(target_env = "msvc", not(target_arch = "arm")))] {
+ // LLVM does not support unwinding on 32 bit ARM msvc (thumbv7a-pc-windows-msvc)
#[path = "seh.rs"]
mod real_imp;
} else if #[cfg(any(
@@ -52,9 +53,6 @@ cfg_if::cfg_if! {
all(target_family = "unix", not(target_os = "espidf")),
all(target_vendor = "fortanix", target_env = "sgx"),
))] {
- // Rust runtime's startup objects depend on these symbols, so make them public.
- #[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
- pub use real_imp::eh_frame_registry::*;
#[path = "gcc.rs"]
mod real_imp;
} else {
@@ -92,8 +90,6 @@ extern "C" {
fn __rust_foreign_exception() -> !;
}
-mod dwarf;
-
#[rustc_std_internal_symbol]
#[allow(improper_ctypes_definitions)]
pub unsafe extern "C" fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any + Send + 'static) {
diff --git a/library/panic_unwind/src/seh.rs b/library/panic_unwind/src/seh.rs
index 9f1eb411f..651115a82 100644
--- a/library/panic_unwind/src/seh.rs
+++ b/library/panic_unwind/src/seh.rs
@@ -49,9 +49,15 @@
use alloc::boxed::Box;
use core::any::Any;
use core::mem::{self, ManuallyDrop};
+use core::ptr;
use libc::{c_int, c_uint, c_void};
+// NOTE(nbdd0121): The `canary` field will be part of stable ABI after `c_unwind` stabilization.
+#[repr(C)]
struct Exception {
+ // See `gcc.rs` on why this is present. We already have a static here so just use it.
+ canary: *const _TypeDescriptor,
+
// This needs to be an Option because we catch the exception by reference
// and its destructor is executed by the C++ runtime. When we take the Box
// out of the exception, we need to leave the exception in a valid state
@@ -235,7 +241,7 @@ static mut TYPE_DESCRIPTOR: _TypeDescriptor = _TypeDescriptor {
macro_rules! define_cleanup {
($abi:tt $abi2:tt) => {
unsafe extern $abi fn exception_cleanup(e: *mut Exception) {
- if let Exception { data: Some(b) } = e.read() {
+ if let Exception { data: Some(b), .. } = e.read() {
drop(b);
super::__rust_drop_panic();
}
@@ -256,7 +262,7 @@ cfg_if::cfg_if! {
}
pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
- use core::intrinsics::atomic_store;
+ use core::intrinsics::atomic_store_seqcst;
// _CxxThrowException executes entirely on this stack frame, so there's no
// need to otherwise transfer `data` to the heap. We just pass a stack
@@ -265,7 +271,7 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
// The ManuallyDrop is needed here since we don't want Exception to be
// dropped when unwinding. Instead it will be dropped by exception_cleanup
// which is invoked by the C++ runtime.
- let mut exception = ManuallyDrop::new(Exception { data: Some(data) });
+ let mut exception = ManuallyDrop::new(Exception { canary: &TYPE_DESCRIPTOR, data: Some(data) });
let throw_ptr = &mut exception as *mut _ as *mut _;
// This... may seems surprising, and justifiably so. On 32-bit MSVC the
@@ -288,20 +294,23 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
//
// In any case, we basically need to do something like this until we can
// express more operations in statics (and we may never be able to).
- atomic_store(&mut THROW_INFO.pmfnUnwind as *mut _ as *mut u32, ptr!(exception_cleanup) as u32);
- atomic_store(
+ atomic_store_seqcst(
+ &mut THROW_INFO.pmfnUnwind as *mut _ as *mut u32,
+ ptr!(exception_cleanup) as u32,
+ );
+ atomic_store_seqcst(
&mut THROW_INFO.pCatchableTypeArray as *mut _ as *mut u32,
ptr!(&CATCHABLE_TYPE_ARRAY as *const _) as u32,
);
- atomic_store(
+ atomic_store_seqcst(
&mut CATCHABLE_TYPE_ARRAY.arrayOfCatchableTypes[0] as *mut _ as *mut u32,
ptr!(&CATCHABLE_TYPE as *const _) as u32,
);
- atomic_store(
+ atomic_store_seqcst(
&mut CATCHABLE_TYPE.pType as *mut _ as *mut u32,
ptr!(&TYPE_DESCRIPTOR as *const _) as u32,
);
- atomic_store(
+ atomic_store_seqcst(
&mut CATCHABLE_TYPE.copyFunction as *mut _ as *mut u32,
ptr!(exception_copy) as u32,
);
@@ -318,18 +327,12 @@ pub unsafe fn cleanup(payload: *mut u8) -> Box<dyn Any + Send> {
// __rust_try. This happens when a non-Rust foreign exception is caught.
if payload.is_null() {
super::__rust_foreign_exception();
- } else {
- let exception = &mut *(payload as *mut Exception);
- exception.data.take().unwrap()
}
-}
-
-// This is required by the compiler to exist (e.g., it's a lang item), but
-// it's never actually called by the compiler because __C_specific_handler
-// or _except_handler3 is the personality function that is always used.
-// Hence this is just an aborting stub.
-#[lang = "eh_personality"]
-#[cfg(not(test))]
-fn rust_eh_personality() {
- core::intrinsics::abort()
+ let exception = payload as *mut Exception;
+ let canary = ptr::addr_of!((*exception).canary).read();
+ if !ptr::eq(canary, &TYPE_DESCRIPTOR) {
+ // A foreign Rust exception.
+ super::__rust_foreign_exception();
+ }
+ (*exception).data.take().unwrap()
}
diff --git a/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs b/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
index 65d3ce9be..2235f016c 100644
--- a/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
+++ b/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
@@ -70,7 +70,7 @@ impl_integer_intrinsic! {
impl ToBitMask<BitMask=u64> for Mask<_, 64>
}
-/// Returns the minimum numnber of bytes in a bitmask with `lanes` lanes.
+/// Returns the minimum number of bytes in a bitmask with `lanes` lanes.
#[cfg(feature = "generic_const_exprs")]
pub const fn bitmask_len(lanes: usize) -> usize {
(lanes + 7) / 8
diff --git a/library/portable-simd/crates/std_float/src/lib.rs b/library/portable-simd/crates/std_float/src/lib.rs
index 4bd4d4c05..4ac60b10c 100644
--- a/library/portable-simd/crates/std_float/src/lib.rs
+++ b/library/portable-simd/crates/std_float/src/lib.rs
@@ -1,9 +1,5 @@
#![cfg_attr(feature = "as_crate", no_std)] // We are std!
-#![cfg_attr(
- feature = "as_crate",
- feature(platform_intrinsics),
- feature(portable_simd)
-)]
+#![cfg_attr(feature = "as_crate", feature(platform_intrinsics), feature(portable_simd))]
#[cfg(not(feature = "as_crate"))]
use core::simd;
#[cfg(feature = "as_crate")]
diff --git a/library/proc_macro/src/bridge/client.rs b/library/proc_macro/src/bridge/client.rs
index 1516f084a..506b2a773 100644
--- a/library/proc_macro/src/bridge/client.rs
+++ b/library/proc_macro/src/bridge/client.rs
@@ -176,8 +176,6 @@ define_handles! {
FreeFunctions,
TokenStream,
SourceFile,
- MultiSpan,
- Diagnostic,
'interned:
Span,
@@ -225,10 +223,10 @@ pub(crate) use super::symbol::Symbol;
macro_rules! define_client_side {
($($name:ident {
- $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)*;)*
+ $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)?;)*
}),* $(,)?) => {
$(impl $name {
- $(pub(crate) fn $method($($arg: $arg_ty),*) $(-> $ret_ty)* {
+ $(pub(crate) fn $method($($arg: $arg_ty),*) $(-> $ret_ty)? {
Bridge::with(|bridge| {
let mut buf = bridge.cached_buffer.take();
diff --git a/library/proc_macro/src/bridge/mod.rs b/library/proc_macro/src/bridge/mod.rs
index 5cde966bf..4c1e196b5 100644
--- a/library/proc_macro/src/bridge/mod.rs
+++ b/library/proc_macro/src/bridge/mod.rs
@@ -57,6 +57,7 @@ macro_rules! with_api {
fn track_env_var(var: &str, value: Option<&str>);
fn track_path(path: &str);
fn literal_from_str(s: &str) -> Result<Literal<$S::Span, $S::Symbol>, ()>;
+ fn emit_diagnostic(diagnostic: Diagnostic<$S::Span>);
},
TokenStream {
fn drop($self: $S::TokenStream);
@@ -87,22 +88,6 @@ macro_rules! with_api {
fn path($self: &$S::SourceFile) -> String;
fn is_real($self: &$S::SourceFile) -> bool;
},
- MultiSpan {
- fn drop($self: $S::MultiSpan);
- fn new() -> $S::MultiSpan;
- fn push($self: &mut $S::MultiSpan, span: $S::Span);
- },
- Diagnostic {
- fn drop($self: $S::Diagnostic);
- fn new(level: Level, msg: &str, span: $S::MultiSpan) -> $S::Diagnostic;
- fn sub(
- $self: &mut $S::Diagnostic,
- level: Level,
- msg: &str,
- span: $S::MultiSpan,
- );
- fn emit($self: $S::Diagnostic);
- },
Span {
fn debug($self: $S::Span) -> String;
fn source_file($self: $S::Span) -> $S::SourceFile;
@@ -510,6 +495,18 @@ compound_traits!(
}
);
+#[derive(Clone, Debug)]
+pub struct Diagnostic<Span> {
+ pub level: Level,
+ pub message: String,
+ pub spans: Vec<Span>,
+ pub children: Vec<Diagnostic<Span>>,
+}
+
+compound_traits!(
+ struct Diagnostic<Span> { level, message, spans, children }
+);
+
/// Globals provided alongside the initial inputs for a macro expansion.
/// Provides values such as spans which are used frequently to avoid RPC.
#[derive(Clone)]
diff --git a/library/proc_macro/src/bridge/server.rs b/library/proc_macro/src/bridge/server.rs
index e068ec60b..8202c40d6 100644
--- a/library/proc_macro/src/bridge/server.rs
+++ b/library/proc_macro/src/bridge/server.rs
@@ -2,6 +2,7 @@
use super::*;
+use std::cell::Cell;
use std::marker::PhantomData;
// FIXME(eddyb) generate the definition of `HandleStore` in `server.rs`.
@@ -11,8 +12,6 @@ pub trait Types {
type FreeFunctions: 'static;
type TokenStream: 'static + Clone;
type SourceFile: 'static + Clone;
- type MultiSpan: 'static;
- type Diagnostic: 'static;
type Span: 'static + Copy + Eq + Hash;
type Symbol: 'static;
}
@@ -145,6 +144,38 @@ pub trait ExecutionStrategy {
) -> Buffer;
}
+thread_local! {
+ /// While running a proc-macro with the same-thread executor, this flag will
+ /// be set, forcing nested proc-macro invocations (e.g. due to
+ /// `TokenStream::expand_expr`) to be run using a cross-thread executor.
+ ///
+ /// This is required as the thread-local state in the proc_macro client does
+ /// not handle being re-entered, and will invalidate all `Symbol`s when
+ /// entering a nested macro.
+ static ALREADY_RUNNING_SAME_THREAD: Cell<bool> = Cell::new(false);
+}
+
+/// Keep `ALREADY_RUNNING_SAME_THREAD` (see also its documentation)
+/// set to `true`, preventing same-thread reentrance.
+struct RunningSameThreadGuard(());
+
+impl RunningSameThreadGuard {
+ fn new() -> Self {
+ let already_running = ALREADY_RUNNING_SAME_THREAD.replace(true);
+ assert!(
+ !already_running,
+ "same-thread nesting (\"reentrance\") of proc macro executions is not supported"
+ );
+ RunningSameThreadGuard(())
+ }
+}
+
+impl Drop for RunningSameThreadGuard {
+ fn drop(&mut self) {
+ ALREADY_RUNNING_SAME_THREAD.set(false);
+ }
+}
+
pub struct MaybeCrossThread<P> {
cross_thread: bool,
marker: PhantomData<P>,
@@ -167,7 +198,7 @@ where
run_client: extern "C" fn(BridgeConfig<'_>) -> Buffer,
force_show_panics: bool,
) -> Buffer {
- if self.cross_thread {
+ if self.cross_thread || ALREADY_RUNNING_SAME_THREAD.get() {
<CrossThread<P>>::new().run_bridge_and_client(
dispatcher,
input,
@@ -190,6 +221,8 @@ impl ExecutionStrategy for SameThread {
run_client: extern "C" fn(BridgeConfig<'_>) -> Buffer,
force_show_panics: bool,
) -> Buffer {
+ let _guard = RunningSameThreadGuard::new();
+
let mut dispatch = |buf| dispatcher.dispatch(buf);
run_client(BridgeConfig {
diff --git a/library/proc_macro/src/diagnostic.rs b/library/proc_macro/src/diagnostic.rs
index 6e46dc036..5a209f7c7 100644
--- a/library/proc_macro/src/diagnostic.rs
+++ b/library/proc_macro/src/diagnostic.rs
@@ -161,22 +161,15 @@ impl Diagnostic {
/// Emit the diagnostic.
#[unstable(feature = "proc_macro_diagnostic", issue = "54140")]
pub fn emit(self) {
- fn to_internal(spans: Vec<Span>) -> crate::bridge::client::MultiSpan {
- let mut multi_span = crate::bridge::client::MultiSpan::new();
- for span in spans {
- multi_span.push(span.0);
+ fn to_internal(diag: Diagnostic) -> crate::bridge::Diagnostic<crate::bridge::client::Span> {
+ crate::bridge::Diagnostic {
+ level: diag.level,
+ message: diag.message,
+ spans: diag.spans.into_iter().map(|s| s.0).collect(),
+ children: diag.children.into_iter().map(to_internal).collect(),
}
- multi_span
}
- let mut diag = crate::bridge::client::Diagnostic::new(
- self.level,
- &self.message[..],
- to_internal(self.spans),
- );
- for c in self.children {
- diag.sub(c.level, &c.message[..], to_internal(c.spans));
- }
- diag.emit();
+ crate::bridge::client::FreeFunctions::emit_diagnostic(to_internal(self));
}
}
diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs
index 8e478cd7b..0d3fc2c52 100644
--- a/library/proc_macro/src/lib.rs
+++ b/library/proc_macro/src/lib.rs
@@ -533,7 +533,7 @@ impl Span {
other.resolved_at(*self)
}
- /// Compares to spans to see if they're equal.
+ /// Compares two spans to see if they're equal.
#[unstable(feature = "proc_macro_span", issue = "54725")]
pub fn eq(&self, other: &Span) -> bool {
self.0 == other.0
@@ -546,7 +546,7 @@ impl Span {
/// Note: The observable result of a macro should only rely on the tokens and
/// not on this source text. The result of this function is a best effort to
/// be used for diagnostics only.
- #[unstable(feature = "proc_macro_span", issue = "54725")]
+ #[stable(feature = "proc_macro_source_text", since = "1.66.0")]
pub fn source_text(&self) -> Option<String> {
self.0.source_text()
}
@@ -1353,12 +1353,7 @@ impl Literal {
/// Byte string literal.
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
pub fn byte_string(bytes: &[u8]) -> Literal {
- let string = bytes
- .iter()
- .cloned()
- .flat_map(std::ascii::escape_default)
- .map(Into::<char>::into)
- .collect::<String>();
+ let string = bytes.escape_ascii().to_string();
Literal::new(bridge::LitKind::ByteStr, &string, None)
}
diff --git a/library/rtstartup/rsbegin.rs b/library/rtstartup/rsbegin.rs
index c6a4548ec..1df0c8970 100644
--- a/library/rtstartup/rsbegin.rs
+++ b/library/rtstartup/rsbegin.rs
@@ -35,6 +35,16 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
drop_in_place(to_drop);
}
+// Frame unwind info registration
+//
+// Each module's image contains a frame unwind info section (usually
+// ".eh_frame"). When a module is loaded/unloaded into the process, the
+// unwinder must be informed about the location of this section in memory. The
+// methods of achieving that vary by the platform. On some (e.g., Linux), the
+// unwinder can discover unwind info sections on its own (by dynamically
+// enumerating currently loaded modules via the dl_iterate_phdr() API and
+// finding their ".eh_frame" sections); Others, like Windows, require modules
+// to actively register their unwind info sections via unwinder API.
#[cfg(all(target_os = "windows", target_arch = "x86", target_env = "gnu"))]
pub mod eh_frames {
#[no_mangle]
@@ -62,20 +72,19 @@ pub mod eh_frames {
}
// Unwind info registration/deregistration routines.
- // See the docs of libpanic_unwind.
extern "C" {
- fn rust_eh_register_frames(eh_frame_begin: *const u8, object: *mut u8);
- fn rust_eh_unregister_frames(eh_frame_begin: *const u8, object: *mut u8);
+ fn __register_frame_info(eh_frame_begin: *const u8, object: *mut u8);
+ fn __deregister_frame_info(eh_frame_begin: *const u8, object: *mut u8);
}
unsafe extern "C" fn init() {
// register unwind info on module startup
- rust_eh_register_frames(&__EH_FRAME_BEGIN__ as *const u8, &mut OBJ as *mut _ as *mut u8);
+ __register_frame_info(&__EH_FRAME_BEGIN__ as *const u8, &mut OBJ as *mut _ as *mut u8);
}
unsafe extern "C" fn uninit() {
// unregister on shutdown
- rust_eh_unregister_frames(&__EH_FRAME_BEGIN__ as *const u8, &mut OBJ as *mut _ as *mut u8);
+ __deregister_frame_info(&__EH_FRAME_BEGIN__ as *const u8, &mut OBJ as *mut _ as *mut u8);
}
// MinGW-specific init/uninit routine registration
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index 229e546e0..bc10b12ec 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -11,11 +11,11 @@ crate-type = ["dylib", "rlib"]
[dependencies]
alloc = { path = "../alloc" }
-cfg-if = { version = "0.1.8", features = ['rustc-dep-of-std'] }
+cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
panic_unwind = { path = "../panic_unwind", optional = true }
panic_abort = { path = "../panic_abort" }
core = { path = "../core" }
-libc = { version = "0.2.126", default-features = false, features = ['rustc-dep-of-std'] }
+libc = { version = "0.2.135", default-features = false, features = ['rustc-dep-of-std'] }
compiler_builtins = { version = "0.1.73" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
@@ -39,10 +39,10 @@ rand = "0.7"
dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] }
[target.x86_64-fortanix-unknown-sgx.dependencies]
-fortanix-sgx-abi = { version = "0.3.2", features = ['rustc-dep-of-std'] }
+fortanix-sgx-abi = { version = "0.5.0", features = ['rustc-dep-of-std'] }
[target.'cfg(target_os = "hermit")'.dependencies]
-hermit-abi = { version = "0.2.0", features = ['rustc-dep-of-std'] }
+hermit-abi = { version = "0.2.6", features = ['rustc-dep-of-std'] }
[target.wasm32-wasi.dependencies]
wasi = { version = "0.11.0", features = ['rustc-dep-of-std'], default-features = false }
diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs
index a05e0db3a..61c1ff578 100644
--- a/library/std/src/alloc.rs
+++ b/library/std/src/alloc.rs
@@ -68,7 +68,10 @@ pub use alloc_crate::alloc::*;
/// The default memory allocator provided by the operating system.
///
/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
-/// plus related functions.
+/// plus related functions. However, it is not valid to mix use of the backing
+/// system allocator with `System`, as this implementation may include extra
+/// work, such as to serve alignment requests greater than the alignment
+/// provided directly by the backing system allocator.
///
/// This type implements the `GlobalAlloc` trait and Rust programs by default
/// work as if they had this definition:
diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs
index 05e9b2eb6..9cb74f951 100644
--- a/library/std/src/backtrace.rs
+++ b/library/std/src/backtrace.rs
@@ -9,19 +9,13 @@
//! implementing `std::error::Error`) to get a causal chain of where an error
//! was generated.
//!
-//! > **Note**: this module is unstable and is designed in [RFC 2504], and you
-//! > can learn more about its status in the [tracking issue].
-//!
-//! [RFC 2504]: https://github.com/rust-lang/rfcs/blob/master/text/2504-fix-error.md
-//! [tracking issue]: https://github.com/rust-lang/rust/issues/53487
-//!
//! ## Accuracy
//!
//! Backtraces are attempted to be as accurate as possible, but no guarantees
//! are provided about the exact accuracy of a backtrace. Instruction pointers,
//! symbol names, filenames, line numbers, etc, may all be incorrect when
-//! reported. Accuracy is attempted on a best-effort basis, however, and bugs
-//! are always welcome to indicate areas of improvement!
+//! reported. Accuracy is attempted on a best-effort basis, however, any bug
+//! reports are always welcome to indicate areas of improvement!
//!
//! For most platforms a backtrace with a filename/line number requires that
//! programs be compiled with debug information. Without debug information
@@ -45,7 +39,7 @@
//! default. Its behavior is governed by two environment variables:
//!
//! * `RUST_LIB_BACKTRACE` - if this is set to `0` then `Backtrace::capture`
-//! will never capture a backtrace. Any other value this is set to will enable
+//! will never capture a backtrace. Any other value set will enable
//! `Backtrace::capture`.
//!
//! * `RUST_BACKTRACE` - if `RUST_LIB_BACKTRACE` is not set, then this variable
@@ -64,7 +58,7 @@
//! `RUST_LIB_BACKTRACE` or `RUST_BACKTRACE` at runtime might not actually change
//! how backtraces are captured.
-#![unstable(feature = "backtrace", issue = "53487")]
+#![stable(feature = "backtrace", since = "1.65.0")]
#[cfg(test)]
mod tests;
@@ -110,6 +104,7 @@ use crate::vec::Vec;
/// previous point in time. In some instances the `Backtrace` type may
/// internally be empty due to configuration. For more information see
/// `Backtrace::capture`.
+#[stable(feature = "backtrace", since = "1.65.0")]
#[must_use]
pub struct Backtrace {
inner: Inner,
@@ -117,17 +112,21 @@ pub struct Backtrace {
/// The current status of a backtrace, indicating whether it was captured or
/// whether it is empty for some other reason.
+#[stable(feature = "backtrace", since = "1.65.0")]
#[non_exhaustive]
#[derive(Debug, PartialEq, Eq)]
pub enum BacktraceStatus {
/// Capturing a backtrace is not supported, likely because it's not
/// implemented for the current platform.
+ #[stable(feature = "backtrace", since = "1.65.0")]
Unsupported,
/// Capturing a backtrace has been disabled through either the
/// `RUST_LIB_BACKTRACE` or `RUST_BACKTRACE` environment variables.
+ #[stable(feature = "backtrace", since = "1.65.0")]
Disabled,
/// A backtrace has been captured and the `Backtrace` should print
/// reasonable information when rendered.
+ #[stable(feature = "backtrace", since = "1.65.0")]
Captured,
}
@@ -174,6 +173,7 @@ enum BytesOrWide {
Wide(Vec<u16>),
}
+#[stable(feature = "backtrace", since = "1.65.0")]
impl fmt::Debug for Backtrace {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let capture = match &self.inner {
@@ -200,6 +200,7 @@ impl fmt::Debug for Backtrace {
}
}
+#[unstable(feature = "backtrace_frames", issue = "79676")]
impl fmt::Debug for BacktraceFrame {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut dbg = fmt.debug_list();
@@ -288,6 +289,7 @@ impl Backtrace {
///
/// To forcibly capture a backtrace regardless of environment variables, use
/// the `Backtrace::force_capture` function.
+ #[stable(feature = "backtrace", since = "1.65.0")]
#[inline(never)] // want to make sure there's a frame here to remove
pub fn capture() -> Backtrace {
if !Backtrace::enabled() {
@@ -306,6 +308,7 @@ impl Backtrace {
/// Note that capturing a backtrace can be an expensive operation on some
/// platforms, so this should be used with caution in performance-sensitive
/// parts of code.
+ #[stable(feature = "backtrace", since = "1.65.0")]
#[inline(never)] // want to make sure there's a frame here to remove
pub fn force_capture() -> Backtrace {
Backtrace::create(Backtrace::force_capture as usize)
@@ -313,6 +316,8 @@ impl Backtrace {
/// Forcibly captures a disabled backtrace, regardless of environment
/// variable configuration.
+ #[stable(feature = "backtrace", since = "1.65.0")]
+ #[rustc_const_stable(feature = "backtrace", since = "1.65.0")]
pub const fn disabled() -> Backtrace {
Backtrace { inner: Inner::Disabled }
}
@@ -320,8 +325,7 @@ impl Backtrace {
// Capture a backtrace which start just before the function addressed by
// `ip`
fn create(ip: usize) -> Backtrace {
- // SAFETY: We don't attempt to lock this reentrantly.
- let _lock = unsafe { lock() };
+ let _lock = lock();
let mut frames = Vec::new();
let mut actual_start = None;
unsafe {
@@ -356,6 +360,7 @@ impl Backtrace {
/// Returns the status of this backtrace, indicating whether this backtrace
/// request was unsupported, disabled, or a stack trace was actually
/// captured.
+ #[stable(feature = "backtrace", since = "1.65.0")]
#[must_use]
pub fn status(&self) -> BacktraceStatus {
match self.inner {
@@ -375,6 +380,7 @@ impl<'a> Backtrace {
}
}
+#[stable(feature = "backtrace", since = "1.65.0")]
impl fmt::Display for Backtrace {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let capture = match &self.inner {
@@ -462,8 +468,7 @@ impl Capture {
// Use the global backtrace lock to synchronize this as it's a
// requirement of the `backtrace` crate, and then actually resolve
// everything.
- // SAFETY: We don't attempt to lock this reentrantly.
- let _lock = unsafe { lock() };
+ let _lock = lock();
for frame in self.frames.iter_mut() {
let symbols = &mut frame.symbols;
let frame = match &frame.frame {
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index db811343f..708edc5de 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -9,6 +9,7 @@ use crate::borrow::Borrow;
use crate::cell::Cell;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind;
+use crate::error::Error;
use crate::fmt::{self, Debug};
#[allow(deprecated)]
use crate::hash::{BuildHasher, Hash, Hasher, SipHasher13};
@@ -279,7 +280,8 @@ impl<K, V, S> HashMap<K, V, S> {
/// ```
#[inline]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
- pub fn with_hasher(hash_builder: S) -> HashMap<K, V, S> {
+ #[rustc_const_unstable(feature = "const_collections_with_hasher", issue = "102575")]
+ pub const fn with_hasher(hash_builder: S) -> HashMap<K, V, S> {
HashMap { base: base::HashMap::with_hasher(hash_builder) }
}
@@ -757,7 +759,7 @@ where
/// Tries to reserve capacity for at least `additional` more elements to be inserted
/// in the `HashMap`. The collection may reserve more space to speculatively
- /// avoid frequent reallocations. After calling `reserve`,
+ /// avoid frequent reallocations. After calling `try_reserve`,
/// capacity will be greater than or equal to `self.len() + additional` if
/// it returns `Ok(())`.
/// Does nothing if capacity is already sufficient.
@@ -2158,6 +2160,14 @@ impl<'a, K: Debug, V: Debug> fmt::Display for OccupiedError<'a, K, V> {
}
}
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: fmt::Debug, V: fmt::Debug> Error for OccupiedError<'a, K, V> {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "key already exists"
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V, S> IntoIterator for &'a HashMap<K, V, S> {
type Item = (&'a K, &'a V);
diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs
index 7ebc41588..65634f206 100644
--- a/library/std/src/collections/hash/map/tests.rs
+++ b/library/std/src/collections/hash/map/tests.rs
@@ -268,10 +268,13 @@ fn test_lots_of_insertions() {
// Try this a few times to make sure we never screw up the hashmap's
// internal state.
- for _ in 0..10 {
+ let loops = if cfg!(miri) { 2 } else { 10 };
+ for _ in 0..loops {
assert!(m.is_empty());
- for i in 1..1001 {
+ let count = if cfg!(miri) { 101 } else { 1001 };
+
+ for i in 1..count {
assert!(m.insert(i, i).is_none());
for j in 1..=i {
@@ -279,42 +282,42 @@ fn test_lots_of_insertions() {
assert_eq!(r, Some(&j));
}
- for j in i + 1..1001 {
+ for j in i + 1..count {
let r = m.get(&j);
assert_eq!(r, None);
}
}
- for i in 1001..2001 {
+ for i in count..(2 * count) {
assert!(!m.contains_key(&i));
}
// remove forwards
- for i in 1..1001 {
+ for i in 1..count {
assert!(m.remove(&i).is_some());
for j in 1..=i {
assert!(!m.contains_key(&j));
}
- for j in i + 1..1001 {
+ for j in i + 1..count {
assert!(m.contains_key(&j));
}
}
- for i in 1..1001 {
+ for i in 1..count {
assert!(!m.contains_key(&i));
}
- for i in 1..1001 {
+ for i in 1..count {
assert!(m.insert(i, i).is_none());
}
// remove backwards
- for i in (1..1001).rev() {
+ for i in (1..count).rev() {
assert!(m.remove(&i).is_some());
- for j in i..1001 {
+ for j in i..count {
assert!(!m.contains_key(&j));
}
@@ -817,6 +820,7 @@ fn test_retain() {
}
#[test]
+#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
fn test_try_reserve() {
let mut empty_bytes: HashMap<u8, u8> = HashMap::new();
@@ -1111,3 +1115,9 @@ fn from_array() {
// that's a problem!
let _must_not_require_type_annotation = HashMap::from([(1, 2)]);
}
+
+#[test]
+fn const_with_hasher() {
+ const X: HashMap<(), (), ()> = HashMap::with_hasher(());
+ assert_eq!(X.len(), 0);
+}
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index abff82788..cee884145 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -239,7 +239,7 @@ impl<T, S> HashSet<T, S> {
///
/// If the returned iterator is dropped before being fully consumed, it
/// drops the remaining elements. The returned iterator keeps a mutable
- /// borrow on the vector to optimize its implementation.
+ /// borrow on the set to optimize its implementation.
///
/// # Examples
///
@@ -376,7 +376,8 @@ impl<T, S> HashSet<T, S> {
/// ```
#[inline]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
- pub fn with_hasher(hasher: S) -> HashSet<T, S> {
+ #[rustc_const_unstable(feature = "const_collections_with_hasher", issue = "102575")]
+ pub const fn with_hasher(hasher: S) -> HashSet<T, S> {
HashSet { base: base::HashSet::with_hasher(hasher) }
}
@@ -461,7 +462,7 @@ where
/// Tries to reserve capacity for at least `additional` more elements to be inserted
/// in the `HashSet`. The collection may reserve more space to speculatively
- /// avoid frequent reallocations. After calling `reserve`,
+ /// avoid frequent reallocations. After calling `try_reserve`,
/// capacity will be greater than or equal to `self.len() + additional` if
/// it returns `Ok(())`.
/// Does nothing if capacity is already sufficient.
diff --git a/library/std/src/collections/hash/set/tests.rs b/library/std/src/collections/hash/set/tests.rs
index 233db276b..941a0450c 100644
--- a/library/std/src/collections/hash/set/tests.rs
+++ b/library/std/src/collections/hash/set/tests.rs
@@ -496,3 +496,9 @@ fn from_array() {
// that's a problem!
let _must_not_require_type_annotation = HashSet::from([1, 2]);
}
+
+#[test]
+fn const_with_hasher() {
+ const X: HashSet<(), ()> = HashSet::with_hasher(());
+ assert_eq!(X.len(), 0);
+}
diff --git a/library/std/src/env.rs b/library/std/src/env.rs
index 463f71406..6eb7cbea6 100644
--- a/library/std/src/env.rs
+++ b/library/std/src/env.rs
@@ -603,7 +603,7 @@ pub fn home_dir() -> Option<PathBuf> {
/// # Platform-specific behavior
///
/// On Unix, returns the value of the `TMPDIR` environment variable if it is
-/// set, otherwise for non-Android it returns `/tmp`. If Android, since there
+/// set, otherwise for non-Android it returns `/tmp`. On Android, since there
/// is no global temporary folder (it is usually allocated per-app), it returns
/// `/data/local/tmp`.
/// On Windows, the behavior is equivalent to that of [`GetTempPath2`][GetTempPath2] /
diff --git a/library/std/src/error.rs b/library/std/src/error.rs
index 722df119d..05f8fd8de 100644
--- a/library/std/src/error.rs
+++ b/library/std/src/error.rs
@@ -1,366 +1,14 @@
-//! Interfaces for working with Errors.
-//!
-//! # Error Handling In Rust
-//!
-//! The Rust language provides two complementary systems for constructing /
-//! representing, reporting, propagating, reacting to, and discarding errors.
-//! These responsibilities are collectively known as "error handling." The
-//! components of the first system, the panic runtime and interfaces, are most
-//! commonly used to represent bugs that have been detected in your program. The
-//! components of the second system, `Result`, the error traits, and user
-//! defined types, are used to represent anticipated runtime failure modes of
-//! your program.
-//!
-//! ## The Panic Interfaces
-//!
-//! The following are the primary interfaces of the panic system and the
-//! responsibilities they cover:
-//!
-//! * [`panic!`] and [`panic_any`] (Constructing, Propagated automatically)
-//! * [`PanicInfo`] (Reporting)
-//! * [`set_hook`], [`take_hook`], and [`#[panic_handler]`][panic-handler] (Reporting)
-//! * [`catch_unwind`] and [`resume_unwind`] (Discarding, Propagating)
-//!
-//! The following are the primary interfaces of the error system and the
-//! responsibilities they cover:
-//!
-//! * [`Result`] (Propagating, Reacting)
-//! * The [`Error`] trait (Reporting)
-//! * User defined types (Constructing / Representing)
-//! * [`match`] and [`downcast`] (Reacting)
-//! * The question mark operator ([`?`]) (Propagating)
-//! * The partially stable [`Try`] traits (Propagating, Constructing)
-//! * [`Termination`] (Reporting)
-//!
-//! ## Converting Errors into Panics
-//!
-//! The panic and error systems are not entirely distinct. Often times errors
-//! that are anticipated runtime failures in an API might instead represent bugs
-//! to a caller. For these situations the standard library provides APIs for
-//! constructing panics with an `Error` as it's source.
-//!
-//! * [`Result::unwrap`]
-//! * [`Result::expect`]
-//!
-//! These functions are equivalent, they either return the inner value if the
-//! `Result` is `Ok` or panic if the `Result` is `Err` printing the inner error
-//! as the source. The only difference between them is that with `expect` you
-//! provide a panic error message to be printed alongside the source, whereas
-//! `unwrap` has a default message indicating only that you unwraped an `Err`.
-//!
-//! Of the two, `expect` is generally preferred since its `msg` field allows you
-//! to convey your intent and assumptions which makes tracking down the source
-//! of a panic easier. `unwrap` on the other hand can still be a good fit in
-//! situations where you can trivially show that a piece of code will never
-//! panic, such as `"127.0.0.1".parse::<std::net::IpAddr>().unwrap()` or early
-//! prototyping.
-//!
-//! # Common Message Styles
-//!
-//! There are two common styles for how people word `expect` messages. Using
-//! the message to present information to users encountering a panic
-//! ("expect as error message") or using the message to present information
-//! to developers debugging the panic ("expect as precondition").
-//!
-//! In the former case the expect message is used to describe the error that
-//! has occurred which is considered a bug. Consider the following example:
-//!
-//! ```should_panic
-//! // Read environment variable, panic if it is not present
-//! let path = std::env::var("IMPORTANT_PATH").unwrap();
-//! ```
-//!
-//! In the "expect as error message" style we would use expect to describe
-//! that the environment variable was not set when it should have been:
-//!
-//! ```should_panic
-//! let path = std::env::var("IMPORTANT_PATH")
-//! .expect("env variable `IMPORTANT_PATH` is not set");
-//! ```
-//!
-//! In the "expect as precondition" style, we would instead describe the
-//! reason we _expect_ the `Result` should be `Ok`. With this style we would
-//! prefer to write:
-//!
-//! ```should_panic
-//! let path = std::env::var("IMPORTANT_PATH")
-//! .expect("env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`");
-//! ```
-//!
-//! The "expect as error message" style does not work as well with the
-//! default output of the std panic hooks, and often ends up repeating
-//! information that is already communicated by the source error being
-//! unwrapped:
-//!
-//! ```text
-//! thread 'main' panicked at 'env variable `IMPORTANT_PATH` is not set: NotPresent', src/main.rs:4:6
-//! ```
-//!
-//! In this example we end up mentioning that an env variable is not set,
-//! followed by our source message that says the env is not present, the
-//! only additional information we're communicating is the name of the
-//! environment variable being checked.
-//!
-//! The "expect as precondition" style instead focuses on source code
-//! readability, making it easier to understand what must have gone wrong in
-//! situations where panics are being used to represent bugs exclusively.
-//! Also, by framing our expect in terms of what "SHOULD" have happened to
-//! prevent the source error, we end up introducing new information that is
-//! independent from our source error.
-//!
-//! ```text
-//! thread 'main' panicked at 'env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`: NotPresent', src/main.rs:4:6
-//! ```
-//!
-//! In this example we are communicating not only the name of the
-//! environment variable that should have been set, but also an explanation
-//! for why it should have been set, and we let the source error display as
-//! a clear contradiction to our expectation.
-//!
-//! **Hint**: If you're having trouble remembering how to phrase
-//! expect-as-precondition style error messages remember to focus on the word
-//! "should" as in "env variable should be set by blah" or "the given binary
-//! should be available and executable by the current user".
-//!
-//! [`panic_any`]: crate::panic::panic_any
-//! [`PanicInfo`]: crate::panic::PanicInfo
-//! [`catch_unwind`]: crate::panic::catch_unwind
-//! [`resume_unwind`]: crate::panic::resume_unwind
-//! [`downcast`]: crate::error::Error
-//! [`Termination`]: crate::process::Termination
-//! [`Try`]: crate::ops::Try
-//! [panic hook]: crate::panic::set_hook
-//! [`set_hook`]: crate::panic::set_hook
-//! [`take_hook`]: crate::panic::take_hook
-//! [panic-handler]: <https://doc.rust-lang.org/nomicon/panic-handler.html>
-//! [`match`]: ../../std/keyword.match.html
-//! [`?`]: ../../std/result/index.html#the-question-mark-operator-
-
+#![doc = include_str!("../../core/src/error.md")]
#![stable(feature = "rust1", since = "1.0.0")]
-// A note about crates and the facade:
-//
-// Originally, the `Error` trait was defined in libcore, and the impls
-// were scattered about. However, coherence objected to this
-// arrangement, because to create the blanket impls for `Box` required
-// knowing that `&str: !Error`, and we have no means to deal with that
-// sort of conflict just now. Therefore, for the time being, we have
-// moved the `Error` trait into libstd. As we evolve a sol'n to the
-// coherence challenge (e.g., specialization, neg impls, etc) we can
-// reconsider what crate these items belong in.
-
#[cfg(test)]
mod tests;
-use core::array;
-use core::convert::Infallible;
-
-use crate::alloc::{AllocError, LayoutError};
-use crate::any::{Demand, Provider, TypeId};
use crate::backtrace::Backtrace;
-use crate::borrow::Cow;
-use crate::cell;
-use crate::char;
-use crate::fmt::{self, Debug, Display, Write};
-use crate::io;
-use crate::mem::transmute;
-use crate::num;
-use crate::str;
-use crate::string;
-use crate::sync::Arc;
-use crate::time;
+use crate::fmt::{self, Write};
-/// `Error` is a trait representing the basic expectations for error values,
-/// i.e., values of type `E` in [`Result<T, E>`].
-///
-/// Errors must describe themselves through the [`Display`] and [`Debug`]
-/// traits. Error messages are typically concise lowercase sentences without
-/// trailing punctuation:
-///
-/// ```
-/// let err = "NaN".parse::<u32>().unwrap_err();
-/// assert_eq!(err.to_string(), "invalid digit found in string");
-/// ```
-///
-/// Errors may provide cause chain information. [`Error::source()`] is generally
-/// used when errors cross "abstraction boundaries". If one module must report
-/// an error that is caused by an error from a lower-level module, it can allow
-/// accessing that error via [`Error::source()`]. This makes it possible for the
-/// high-level module to provide its own errors while also revealing some of the
-/// implementation for debugging via `source` chains.
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(test), rustc_diagnostic_item = "Error")]
-pub trait Error: Debug + Display {
- /// The lower-level source of this error, if any.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::fmt;
- ///
- /// #[derive(Debug)]
- /// struct SuperError {
- /// source: SuperErrorSideKick,
- /// }
- ///
- /// impl fmt::Display for SuperError {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "SuperError is here!")
- /// }
- /// }
- ///
- /// impl Error for SuperError {
- /// fn source(&self) -> Option<&(dyn Error + 'static)> {
- /// Some(&self.source)
- /// }
- /// }
- ///
- /// #[derive(Debug)]
- /// struct SuperErrorSideKick;
- ///
- /// impl fmt::Display for SuperErrorSideKick {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "SuperErrorSideKick is here!")
- /// }
- /// }
- ///
- /// impl Error for SuperErrorSideKick {}
- ///
- /// fn get_super_error() -> Result<(), SuperError> {
- /// Err(SuperError { source: SuperErrorSideKick })
- /// }
- ///
- /// fn main() {
- /// match get_super_error() {
- /// Err(e) => {
- /// println!("Error: {e}");
- /// println!("Caused by: {}", e.source().unwrap());
- /// }
- /// _ => println!("No error"),
- /// }
- /// }
- /// ```
- #[stable(feature = "error_source", since = "1.30.0")]
- fn source(&self) -> Option<&(dyn Error + 'static)> {
- None
- }
-
- /// Gets the `TypeId` of `self`.
- #[doc(hidden)]
- #[unstable(
- feature = "error_type_id",
- reason = "this is memory-unsafe to override in user code",
- issue = "60784"
- )]
- fn type_id(&self, _: private::Internal) -> TypeId
- where
- Self: 'static,
- {
- TypeId::of::<Self>()
- }
-
- /// ```
- /// if let Err(e) = "xc".parse::<u32>() {
- /// // Print `e` itself, no need for description().
- /// eprintln!("Error: {e}");
- /// }
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(since = "1.42.0", note = "use the Display impl or to_string()")]
- fn description(&self) -> &str {
- "description() is deprecated; use Display"
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(
- since = "1.33.0",
- note = "replaced by Error::source, which can support downcasting"
- )]
- #[allow(missing_docs)]
- fn cause(&self) -> Option<&dyn Error> {
- self.source()
- }
-
- /// Provides type based access to context intended for error reports.
- ///
- /// Used in conjunction with [`Demand::provide_value`] and [`Demand::provide_ref`] to extract
- /// references to member variables from `dyn Error` trait objects.
- ///
- /// # Example
- ///
- /// ```rust
- /// #![feature(provide_any)]
- /// #![feature(error_generic_member_access)]
- /// use core::fmt;
- /// use core::any::Demand;
- ///
- /// #[derive(Debug)]
- /// struct MyBacktrace {
- /// // ...
- /// }
- ///
- /// impl MyBacktrace {
- /// fn new() -> MyBacktrace {
- /// // ...
- /// # MyBacktrace {}
- /// }
- /// }
- ///
- /// #[derive(Debug)]
- /// struct SourceError {
- /// // ...
- /// }
- ///
- /// impl fmt::Display for SourceError {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "Example Source Error")
- /// }
- /// }
- ///
- /// impl std::error::Error for SourceError {}
- ///
- /// #[derive(Debug)]
- /// struct Error {
- /// source: SourceError,
- /// backtrace: MyBacktrace,
- /// }
- ///
- /// impl fmt::Display for Error {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "Example Error")
- /// }
- /// }
- ///
- /// impl std::error::Error for Error {
- /// fn provide<'a>(&'a self, req: &mut Demand<'a>) {
- /// req
- /// .provide_ref::<MyBacktrace>(&self.backtrace)
- /// .provide_ref::<dyn std::error::Error + 'static>(&self.source);
- /// }
- /// }
- ///
- /// fn main() {
- /// let backtrace = MyBacktrace::new();
- /// let source = SourceError {};
- /// let error = Error { source, backtrace };
- /// let dyn_error = &error as &dyn std::error::Error;
- /// let backtrace_ref = dyn_error.request_ref::<MyBacktrace>().unwrap();
- ///
- /// assert!(core::ptr::eq(&error.backtrace, backtrace_ref));
- /// }
- /// ```
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- #[allow(unused_variables)]
- fn provide<'a>(&'a self, req: &mut Demand<'a>) {}
-}
-
-#[unstable(feature = "error_generic_member_access", issue = "99301")]
-impl<'b> Provider for dyn Error + 'b {
- fn provide<'a>(&'a self, req: &mut Demand<'a>) {
- self.provide(req)
- }
-}
+pub use core::error::Error;
mod private {
// This is a hack to prevent `type_id` from being overridden by `Error`
@@ -370,742 +18,10 @@ mod private {
pub struct Internal;
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
- /// Converts a type of [`Error`] into a box of dyn [`Error`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::fmt;
- /// use std::mem;
- ///
- /// #[derive(Debug)]
- /// struct AnError;
- ///
- /// impl fmt::Display for AnError {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "An error")
- /// }
- /// }
- ///
- /// impl Error for AnError {}
- ///
- /// let an_error = AnError;
- /// assert!(0 == mem::size_of_val(&an_error));
- /// let a_boxed_error = Box::<dyn Error>::from(an_error);
- /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: E) -> Box<dyn Error + 'a> {
- Box::new(err)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
- /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
- /// dyn [`Error`] + [`Send`] + [`Sync`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::fmt;
- /// use std::mem;
- ///
- /// #[derive(Debug)]
- /// struct AnError;
- ///
- /// impl fmt::Display for AnError {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "An error")
- /// }
- /// }
- ///
- /// impl Error for AnError {}
- ///
- /// unsafe impl Send for AnError {}
- ///
- /// unsafe impl Sync for AnError {}
- ///
- /// let an_error = AnError;
- /// assert!(0 == mem::size_of_val(&an_error));
- /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
- /// assert!(
- /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
- Box::new(err)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl From<String> for Box<dyn Error + Send + Sync> {
- /// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- ///
- /// let a_string_error = "a string error".to_string();
- /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
- /// assert!(
- /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- #[inline]
- fn from(err: String) -> Box<dyn Error + Send + Sync> {
- struct StringError(String);
-
- impl Error for StringError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- &self.0
- }
- }
-
- impl Display for StringError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- Display::fmt(&self.0, f)
- }
- }
-
- // Purposefully skip printing "StringError(..)"
- impl Debug for StringError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- Debug::fmt(&self.0, f)
- }
- }
-
- Box::new(StringError(err))
- }
-}
-
-#[stable(feature = "string_box_error", since = "1.6.0")]
-impl From<String> for Box<dyn Error> {
- /// Converts a [`String`] into a box of dyn [`Error`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- ///
- /// let a_string_error = "a string error".to_string();
- /// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
- /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(str_err: String) -> Box<dyn Error> {
- let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
- let err2: Box<dyn Error> = err1;
- err2
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
- /// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
- ///
- /// [`str`]: prim@str
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- ///
- /// let a_str_error = "a str error";
- /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
- /// assert!(
- /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- #[inline]
- fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
- From::from(String::from(err))
- }
-}
-
-#[stable(feature = "string_box_error", since = "1.6.0")]
-impl From<&str> for Box<dyn Error> {
- /// Converts a [`str`] into a box of dyn [`Error`].
- ///
- /// [`str`]: prim@str
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- ///
- /// let a_str_error = "a str error";
- /// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
- /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: &str) -> Box<dyn Error> {
- From::from(String::from(err))
- }
-}
-
-#[stable(feature = "cow_box_error", since = "1.22.0")]
-impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
- /// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- /// use std::borrow::Cow;
- ///
- /// let a_cow_str_error = Cow::from("a str error");
- /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
- /// assert!(
- /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
- From::from(String::from(err))
- }
-}
-
-#[stable(feature = "cow_box_error", since = "1.22.0")]
-impl<'a> From<Cow<'a, str>> for Box<dyn Error> {
- /// Converts a [`Cow`] into a box of dyn [`Error`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::error::Error;
- /// use std::mem;
- /// use std::borrow::Cow;
- ///
- /// let a_cow_str_error = Cow::from("a str error");
- /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
- /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
- /// ```
- fn from(err: Cow<'a, str>) -> Box<dyn Error> {
- From::from(String::from(err))
- }
-}
-
-#[unstable(feature = "never_type", issue = "35121")]
-impl Error for ! {}
-
-#[unstable(
- feature = "allocator_api",
- reason = "the precise API and guarantees it provides may be tweaked.",
- issue = "32838"
-)]
-impl Error for AllocError {}
-
-#[stable(feature = "alloc_layout", since = "1.28.0")]
-impl Error for LayoutError {}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Error for str::ParseBoolError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "failed to parse bool"
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Error for str::Utf8Error {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "invalid utf-8: corrupt contents"
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Error for num::ParseIntError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- self.__description()
- }
-}
-
-#[stable(feature = "try_from", since = "1.34.0")]
-impl Error for num::TryFromIntError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- self.__description()
- }
-}
-
-#[stable(feature = "try_from", since = "1.34.0")]
-impl Error for array::TryFromSliceError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- self.__description()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Error for num::ParseFloatError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- self.__description()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Error for string::FromUtf8Error {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "invalid utf-8"
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Error for string::FromUtf16Error {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "invalid utf-16"
- }
-}
-
-#[stable(feature = "str_parse_error2", since = "1.8.0")]
-impl Error for Infallible {
- fn description(&self) -> &str {
- match *self {}
- }
-}
-
-#[stable(feature = "decode_utf16", since = "1.9.0")]
-impl Error for char::DecodeUtf16Error {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "unpaired surrogate found"
- }
-}
-
-#[stable(feature = "u8_from_char", since = "1.59.0")]
-impl Error for char::TryFromCharError {}
-
-#[unstable(feature = "map_try_insert", issue = "82766")]
-impl<'a, K: Debug + Ord, V: Debug> Error
- for crate::collections::btree_map::OccupiedError<'a, K, V>
-{
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "key already exists"
- }
-}
-
-#[unstable(feature = "map_try_insert", issue = "82766")]
-impl<'a, K: Debug, V: Debug> Error for crate::collections::hash_map::OccupiedError<'a, K, V> {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "key already exists"
- }
-}
-
-#[stable(feature = "box_error", since = "1.8.0")]
-impl<T: Error> Error for Box<T> {
- #[allow(deprecated, deprecated_in_future)]
- fn description(&self) -> &str {
- Error::description(&**self)
- }
-
- #[allow(deprecated)]
- fn cause(&self) -> Option<&dyn Error> {
- Error::cause(&**self)
- }
-
- fn source(&self) -> Option<&(dyn Error + 'static)> {
- Error::source(&**self)
- }
-}
-
-#[unstable(feature = "thin_box", issue = "92791")]
-impl<T: ?Sized + crate::error::Error> crate::error::Error for crate::boxed::ThinBox<T> {
- fn source(&self) -> Option<&(dyn crate::error::Error + 'static)> {
- use core::ops::Deref;
- self.deref().source()
- }
-}
-
-#[stable(feature = "error_by_ref", since = "1.51.0")]
-impl<'a, T: Error + ?Sized> Error for &'a T {
- #[allow(deprecated, deprecated_in_future)]
- fn description(&self) -> &str {
- Error::description(&**self)
- }
-
- #[allow(deprecated)]
- fn cause(&self) -> Option<&dyn Error> {
- Error::cause(&**self)
- }
-
- fn source(&self) -> Option<&(dyn Error + 'static)> {
- Error::source(&**self)
- }
-
- fn provide<'b>(&'b self, req: &mut Demand<'b>) {
- Error::provide(&**self, req);
- }
-}
-
-#[stable(feature = "arc_error", since = "1.52.0")]
-impl<T: Error + ?Sized> Error for Arc<T> {
- #[allow(deprecated, deprecated_in_future)]
- fn description(&self) -> &str {
- Error::description(&**self)
- }
-
- #[allow(deprecated)]
- fn cause(&self) -> Option<&dyn Error> {
- Error::cause(&**self)
- }
-
- fn source(&self) -> Option<&(dyn Error + 'static)> {
- Error::source(&**self)
- }
-
- fn provide<'a>(&'a self, req: &mut Demand<'a>) {
- Error::provide(&**self, req);
- }
-}
-
-#[stable(feature = "fmt_error", since = "1.11.0")]
-impl Error for fmt::Error {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "an error occurred when formatting an argument"
- }
-}
-
-#[stable(feature = "try_borrow", since = "1.13.0")]
-impl Error for cell::BorrowError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "already mutably borrowed"
- }
-}
-
-#[stable(feature = "try_borrow", since = "1.13.0")]
-impl Error for cell::BorrowMutError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "already borrowed"
- }
-}
-
-#[stable(feature = "try_from", since = "1.34.0")]
-impl Error for char::CharTryFromError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "converted integer out of range for `char`"
- }
-}
-
-#[stable(feature = "char_from_str", since = "1.20.0")]
-impl Error for char::ParseCharError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- self.__description()
- }
-}
-
-#[stable(feature = "try_reserve", since = "1.57.0")]
-impl Error for alloc::collections::TryReserveError {}
-
-#[unstable(feature = "duration_checked_float", issue = "83400")]
-impl Error for time::FromFloatSecsError {}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Error for alloc::ffi::NulError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "nul byte found in data"
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl From<alloc::ffi::NulError> for io::Error {
- /// Converts a [`alloc::ffi::NulError`] into a [`io::Error`].
- fn from(_: alloc::ffi::NulError) -> io::Error {
- io::const_io_error!(io::ErrorKind::InvalidInput, "data provided contains a nul byte")
- }
-}
-
-#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")]
-impl Error for core::ffi::FromBytesWithNulError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- self.__description()
- }
-}
-
-#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
-impl Error for core::ffi::FromBytesUntilNulError {}
-
-#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
-impl Error for alloc::ffi::FromVecWithNulError {}
-
-#[stable(feature = "cstring_into", since = "1.7.0")]
-impl Error for alloc::ffi::IntoStringError {
- #[allow(deprecated)]
- fn description(&self) -> &str {
- "C string contained non-utf8 bytes"
- }
-
- fn source(&self) -> Option<&(dyn Error + 'static)> {
- Some(self.__source())
- }
-}
-
-impl<'a> dyn Error + 'a {
- /// Request a reference of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_ref<T: ?Sized + 'static>(&'a self) -> Option<&'a T> {
- core::any::request_ref(self)
- }
-
- /// Request a value of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_value<T: 'static>(&'a self) -> Option<T> {
- core::any::request_value(self)
- }
-}
-
-// Copied from `any.rs`.
-impl dyn Error + 'static {
- /// Returns `true` if the inner type is the same as `T`.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn is<T: Error + 'static>(&self) -> bool {
- // Get `TypeId` of the type this function is instantiated with.
- let t = TypeId::of::<T>();
-
- // Get `TypeId` of the type in the trait object (`self`).
- let concrete = self.type_id(private::Internal);
-
- // Compare both `TypeId`s on equality.
- t == concrete
- }
-
- /// Returns some reference to the inner value if it is of type `T`, or
- /// `None` if it isn't.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
- if self.is::<T>() {
- unsafe { Some(&*(self as *const dyn Error as *const T)) }
- } else {
- None
- }
- }
-
- /// Returns some mutable reference to the inner value if it is of type `T`, or
- /// `None` if it isn't.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
- if self.is::<T>() {
- unsafe { Some(&mut *(self as *mut dyn Error as *mut T)) }
- } else {
- None
- }
- }
-}
-
-impl dyn Error + 'static + Send {
- /// Forwards to the method defined on the type `dyn Error`.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn is<T: Error + 'static>(&self) -> bool {
- <dyn Error + 'static>::is::<T>(self)
- }
-
- /// Forwards to the method defined on the type `dyn Error`.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
- <dyn Error + 'static>::downcast_ref::<T>(self)
- }
-
- /// Forwards to the method defined on the type `dyn Error`.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
- <dyn Error + 'static>::downcast_mut::<T>(self)
- }
-
- /// Request a reference of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
- <dyn Error>::request_ref(self)
- }
-
- /// Request a value of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_value<T: 'static>(&self) -> Option<T> {
- <dyn Error>::request_value(self)
- }
-}
-
-impl dyn Error + 'static + Send + Sync {
- /// Forwards to the method defined on the type `dyn Error`.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn is<T: Error + 'static>(&self) -> bool {
- <dyn Error + 'static>::is::<T>(self)
- }
-
- /// Forwards to the method defined on the type `dyn Error`.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
- <dyn Error + 'static>::downcast_ref::<T>(self)
- }
-
- /// Forwards to the method defined on the type `dyn Error`.
- #[stable(feature = "error_downcast", since = "1.3.0")]
- #[inline]
- pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
- <dyn Error + 'static>::downcast_mut::<T>(self)
- }
-
- /// Request a reference of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
- <dyn Error>::request_ref(self)
- }
-
- /// Request a value of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_value<T: 'static>(&self) -> Option<T> {
- <dyn Error>::request_value(self)
- }
-}
-
-impl dyn Error {
- #[inline]
- #[stable(feature = "error_downcast", since = "1.3.0")]
- /// Attempts to downcast the box to a concrete type.
- pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> {
- if self.is::<T>() {
- unsafe {
- let raw: *mut dyn Error = Box::into_raw(self);
- Ok(Box::from_raw(raw as *mut T))
- }
- } else {
- Err(self)
- }
- }
-
- /// Returns an iterator starting with the current error and continuing with
- /// recursively calling [`Error::source`].
- ///
- /// If you want to omit the current error and only use its sources,
- /// use `skip(1)`.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(error_iter)]
- /// use std::error::Error;
- /// use std::fmt;
- ///
- /// #[derive(Debug)]
- /// struct A;
- ///
- /// #[derive(Debug)]
- /// struct B(Option<Box<dyn Error + 'static>>);
- ///
- /// impl fmt::Display for A {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "A")
- /// }
- /// }
- ///
- /// impl fmt::Display for B {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "B")
- /// }
- /// }
- ///
- /// impl Error for A {}
- ///
- /// impl Error for B {
- /// fn source(&self) -> Option<&(dyn Error + 'static)> {
- /// self.0.as_ref().map(|e| e.as_ref())
- /// }
- /// }
- ///
- /// let b = B(Some(Box::new(A)));
- ///
- /// // let err : Box<Error> = b.into(); // or
- /// let err = &b as &(dyn Error);
- ///
- /// let mut iter = err.chain();
- ///
- /// assert_eq!("B".to_string(), iter.next().unwrap().to_string());
- /// assert_eq!("A".to_string(), iter.next().unwrap().to_string());
- /// assert!(iter.next().is_none());
- /// assert!(iter.next().is_none());
- /// ```
- #[unstable(feature = "error_iter", issue = "58520")]
- #[inline]
- pub fn chain(&self) -> Chain<'_> {
- Chain { current: Some(self) }
- }
-}
-
-/// An iterator over an [`Error`] and its sources.
-///
-/// If you want to omit the initial error and only process
-/// its sources, use `skip(1)`.
-#[unstable(feature = "error_iter", issue = "58520")]
-#[derive(Clone, Debug)]
-pub struct Chain<'a> {
- current: Option<&'a (dyn Error + 'static)>,
-}
-
-#[unstable(feature = "error_iter", issue = "58520")]
-impl<'a> Iterator for Chain<'a> {
- type Item = &'a (dyn Error + 'static);
-
- fn next(&mut self) -> Option<Self::Item> {
- let current = self.current;
- self.current = self.current.and_then(Error::source);
- current
- }
-}
-
-impl dyn Error + Send {
- #[inline]
- #[stable(feature = "error_downcast", since = "1.3.0")]
- /// Attempts to downcast the box to a concrete type.
- pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> {
- let err: Box<dyn Error> = self;
- <dyn Error>::downcast(err).map_err(|s| unsafe {
- // Reapply the `Send` marker.
- transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
- })
- }
-}
-
-impl dyn Error + Send + Sync {
- #[inline]
- #[stable(feature = "error_downcast", since = "1.3.0")]
- /// Attempts to downcast the box to a concrete type.
- pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
- let err: Box<dyn Error> = self;
- <dyn Error>::downcast(err).map_err(|s| unsafe {
- // Reapply the `Send + Sync` marker.
- transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
- })
- }
-}
-
/// An error reporter that prints an error and its sources.
///
-/// Report also exposes configuration options for formatting the error chain, either entirely on a
-/// single line, or in multi-line format with each cause in the error chain on a new line.
+/// Report also exposes configuration options for formatting the error sources, either entirely on a
+/// single line, or in multi-line format with each source on a new line.
///
/// `Report` only requires that the wrapped error implement `Error`. It doesn't require that the
/// wrapped error be `Send`, `Sync`, or `'static`.
@@ -1246,7 +162,7 @@ impl dyn Error + Send + Sync {
/// # Err(SuperError { source: SuperErrorSideKick })
/// # }
///
-/// fn main() -> Result<(), Report> {
+/// fn main() -> Result<(), Report<SuperError>> {
/// get_super_error()?;
/// Ok(())
/// }
@@ -1293,7 +209,7 @@ impl dyn Error + Send + Sync {
/// # Err(SuperError { source: SuperErrorSideKick })
/// # }
///
-/// fn main() -> Result<(), Report> {
+/// fn main() -> Result<(), Report<SuperError>> {
/// get_super_error()
/// .map_err(Report::from)
/// .map_err(|r| r.pretty(true).show_backtrace(true))?;
@@ -1450,11 +366,10 @@ impl<E> Report<E> {
///
/// **Note**: Report will search for the first `Backtrace` it can find starting from the
/// outermost error. In this example it will display the backtrace from the second error in the
- /// chain, `SuperErrorSideKick`.
+ /// sources, `SuperErrorSideKick`.
///
/// ```rust
/// #![feature(error_reporter)]
- /// #![feature(backtrace)]
/// #![feature(provide_any)]
/// #![feature(error_generic_member_access)]
/// # use std::error::Error;
@@ -1489,9 +404,8 @@ impl<E> Report<E> {
/// }
///
/// impl Error for SuperErrorSideKick {
- /// fn provide<'a>(&'a self, req: &mut Demand<'a>) {
- /// req
- /// .provide_ref::<Backtrace>(&self.backtrace);
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// demand.provide_ref::<Backtrace>(&self.backtrace);
/// }
/// }
///
@@ -1548,73 +462,7 @@ where
let backtrace = backtrace.or_else(|| {
self.error
.source()
- .map(|source| source.chain().find_map(|source| source.request_ref()))
- .flatten()
- });
- backtrace
- }
-
- /// Format the report as a single line.
- #[unstable(feature = "error_reporter", issue = "90172")]
- fn fmt_singleline(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{}", self.error)?;
-
- let sources = self.error.source().into_iter().flat_map(<dyn Error>::chain);
-
- for cause in sources {
- write!(f, ": {cause}")?;
- }
-
- Ok(())
- }
-
- /// Format the report as multiple lines, with each error cause on its own line.
- #[unstable(feature = "error_reporter", issue = "90172")]
- fn fmt_multiline(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let error = &self.error;
-
- write!(f, "{error}")?;
-
- if let Some(cause) = error.source() {
- write!(f, "\n\nCaused by:")?;
-
- let multiple = cause.source().is_some();
-
- for (ind, error) in cause.chain().enumerate() {
- writeln!(f)?;
- let mut indented = Indented { inner: f };
- if multiple {
- write!(indented, "{ind: >4}: {error}")?;
- } else {
- write!(indented, " {error}")?;
- }
- }
- }
-
- if self.show_backtrace {
- let backtrace = self.backtrace();
-
- if let Some(backtrace) = backtrace {
- let backtrace = backtrace.to_string();
-
- f.write_str("\n\nStack backtrace:\n")?;
- f.write_str(backtrace.trim_end())?;
- }
- }
-
- Ok(())
- }
-}
-
-impl Report<Box<dyn Error>> {
- fn backtrace(&self) -> Option<&Backtrace> {
- // have to grab the backtrace on the first error directly since that error may not be
- // 'static
- let backtrace = self.error.request_ref();
- let backtrace = backtrace.or_else(|| {
- self.error
- .source()
- .map(|source| source.chain().find_map(|source| source.request_ref()))
+ .map(|source| source.sources().find_map(|source| source.request_ref()))
.flatten()
});
backtrace
@@ -1625,7 +473,7 @@ impl Report<Box<dyn Error>> {
fn fmt_singleline(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.error)?;
- let sources = self.error.source().into_iter().flat_map(<dyn Error>::chain);
+ let sources = self.error.source().into_iter().flat_map(<dyn Error>::sources);
for cause in sources {
write!(f, ": {cause}")?;
@@ -1646,7 +494,7 @@ impl Report<Box<dyn Error>> {
let multiple = cause.source().is_some();
- for (ind, error) in cause.chain().enumerate() {
+ for (ind, error) in cause.sources().enumerate() {
writeln!(f)?;
let mut indented = Indented { inner: f };
if multiple {
@@ -1683,17 +531,6 @@ where
}
#[unstable(feature = "error_reporter", issue = "90172")]
-impl<'a, E> From<E> for Report<Box<dyn Error + 'a>>
-where
- E: Error + 'a,
-{
- fn from(error: E) -> Self {
- let error = box error;
- Report { error, show_backtrace: false, pretty: false }
- }
-}
-
-#[unstable(feature = "error_reporter", issue = "90172")]
impl<E> fmt::Display for Report<E>
where
E: Error,
@@ -1703,13 +540,6 @@ where
}
}
-#[unstable(feature = "error_reporter", issue = "90172")]
-impl fmt::Display for Report<Box<dyn Error>> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- if self.pretty { self.fmt_multiline(f) } else { self.fmt_singleline(f) }
- }
-}
-
// This type intentionally outputs the same format for `Display` and `Debug`for
// situations where you unwrap a `Report` or return it from main.
#[unstable(feature = "error_reporter", issue = "90172")]
diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs
index 933b52b4d..3dd5b1250 100644
--- a/library/std/src/f32.rs
+++ b/library/std/src/f32.rs
@@ -1,4 +1,4 @@
-//! Constants specific to the `f32` single-precision floating point type.
+//! Constants for the `f32` single-precision floating point type.
//!
//! *[See also the `f32` primitive type](primitive@f32).*
//!
diff --git a/library/std/src/f32/tests.rs b/library/std/src/f32/tests.rs
index 69fa203ff..4ec16c84a 100644
--- a/library/std/src/f32/tests.rs
+++ b/library/std/src/f32/tests.rs
@@ -299,6 +299,84 @@ fn test_is_sign_negative() {
assert!((-f32::NAN).is_sign_negative());
}
+#[allow(unused_macros)]
+macro_rules! assert_f32_biteq {
+ ($left : expr, $right : expr) => {
+ let l: &f32 = &$left;
+ let r: &f32 = &$right;
+ let lb = l.to_bits();
+ let rb = r.to_bits();
+ assert_eq!(lb, rb, "float {} ({:#x}) is not equal to {} ({:#x})", *l, lb, *r, rb);
+ };
+}
+
+// Ignore test on x87 floating point, these platforms do not guarantee NaN
+// payloads are preserved and flush denormals to zero, failing the tests.
+#[cfg(not(target_arch = "x86"))]
+#[test]
+fn test_next_up() {
+ let tiny = f32::from_bits(1);
+ let tiny_up = f32::from_bits(2);
+ let max_down = f32::from_bits(0x7f7f_fffe);
+ let largest_subnormal = f32::from_bits(0x007f_ffff);
+ let smallest_normal = f32::from_bits(0x0080_0000);
+ assert_f32_biteq!(f32::NEG_INFINITY.next_up(), f32::MIN);
+ assert_f32_biteq!(f32::MIN.next_up(), -max_down);
+ assert_f32_biteq!((-1.0 - f32::EPSILON).next_up(), -1.0);
+ assert_f32_biteq!((-smallest_normal).next_up(), -largest_subnormal);
+ assert_f32_biteq!((-tiny_up).next_up(), -tiny);
+ assert_f32_biteq!((-tiny).next_up(), -0.0f32);
+ assert_f32_biteq!((-0.0f32).next_up(), tiny);
+ assert_f32_biteq!(0.0f32.next_up(), tiny);
+ assert_f32_biteq!(tiny.next_up(), tiny_up);
+ assert_f32_biteq!(largest_subnormal.next_up(), smallest_normal);
+ assert_f32_biteq!(1.0f32.next_up(), 1.0 + f32::EPSILON);
+ assert_f32_biteq!(f32::MAX.next_up(), f32::INFINITY);
+ assert_f32_biteq!(f32::INFINITY.next_up(), f32::INFINITY);
+
+ // Check that NaNs roundtrip.
+ let nan0 = f32::NAN;
+ let nan1 = f32::from_bits(f32::NAN.to_bits() ^ 0x002a_aaaa);
+ let nan2 = f32::from_bits(f32::NAN.to_bits() ^ 0x0055_5555);
+ assert_f32_biteq!(nan0.next_up(), nan0);
+ assert_f32_biteq!(nan1.next_up(), nan1);
+ assert_f32_biteq!(nan2.next_up(), nan2);
+}
+
+// Ignore test on x87 floating point, these platforms do not guarantee NaN
+// payloads are preserved and flush denormals to zero, failing the tests.
+#[cfg(not(target_arch = "x86"))]
+#[test]
+fn test_next_down() {
+ let tiny = f32::from_bits(1);
+ let tiny_up = f32::from_bits(2);
+ let max_down = f32::from_bits(0x7f7f_fffe);
+ let largest_subnormal = f32::from_bits(0x007f_ffff);
+ let smallest_normal = f32::from_bits(0x0080_0000);
+ assert_f32_biteq!(f32::NEG_INFINITY.next_down(), f32::NEG_INFINITY);
+ assert_f32_biteq!(f32::MIN.next_down(), f32::NEG_INFINITY);
+ assert_f32_biteq!((-max_down).next_down(), f32::MIN);
+ assert_f32_biteq!((-1.0f32).next_down(), -1.0 - f32::EPSILON);
+ assert_f32_biteq!((-largest_subnormal).next_down(), -smallest_normal);
+ assert_f32_biteq!((-tiny).next_down(), -tiny_up);
+ assert_f32_biteq!((-0.0f32).next_down(), -tiny);
+ assert_f32_biteq!((0.0f32).next_down(), -tiny);
+ assert_f32_biteq!(tiny.next_down(), 0.0f32);
+ assert_f32_biteq!(tiny_up.next_down(), tiny);
+ assert_f32_biteq!(smallest_normal.next_down(), largest_subnormal);
+ assert_f32_biteq!((1.0 + f32::EPSILON).next_down(), 1.0f32);
+ assert_f32_biteq!(f32::MAX.next_down(), max_down);
+ assert_f32_biteq!(f32::INFINITY.next_down(), f32::MAX);
+
+ // Check that NaNs roundtrip.
+ let nan0 = f32::NAN;
+ let nan1 = f32::from_bits(f32::NAN.to_bits() ^ 0x002a_aaaa);
+ let nan2 = f32::from_bits(f32::NAN.to_bits() ^ 0x0055_5555);
+ assert_f32_biteq!(nan0.next_down(), nan0);
+ assert_f32_biteq!(nan1.next_down(), nan1);
+ assert_f32_biteq!(nan2.next_down(), nan2);
+}
+
#[test]
fn test_mul_add() {
let nan: f32 = f32::NAN;
diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs
index a9aa84f70..31351a879 100644
--- a/library/std/src/f64.rs
+++ b/library/std/src/f64.rs
@@ -1,4 +1,4 @@
-//! Constants specific to the `f64` double-precision floating point type.
+//! Constants for the `f64` double-precision floating point type.
//!
//! *[See also the `f64` primitive type](primitive@f64).*
//!
diff --git a/library/std/src/f64/tests.rs b/library/std/src/f64/tests.rs
index 5c163cfe9..12baa68f4 100644
--- a/library/std/src/f64/tests.rs
+++ b/library/std/src/f64/tests.rs
@@ -289,6 +289,82 @@ fn test_is_sign_negative() {
assert!((-f64::NAN).is_sign_negative());
}
+#[allow(unused_macros)]
+macro_rules! assert_f64_biteq {
+ ($left : expr, $right : expr) => {
+ let l: &f64 = &$left;
+ let r: &f64 = &$right;
+ let lb = l.to_bits();
+ let rb = r.to_bits();
+ assert_eq!(lb, rb, "float {} ({:#x}) is not equal to {} ({:#x})", *l, lb, *r, rb);
+ };
+}
+
+// Ignore test on x87 floating point, these platforms do not guarantee NaN
+// payloads are preserved and flush denormals to zero, failing the tests.
+#[cfg(not(target_arch = "x86"))]
+#[test]
+fn test_next_up() {
+ let tiny = f64::from_bits(1);
+ let tiny_up = f64::from_bits(2);
+ let max_down = f64::from_bits(0x7fef_ffff_ffff_fffe);
+ let largest_subnormal = f64::from_bits(0x000f_ffff_ffff_ffff);
+ let smallest_normal = f64::from_bits(0x0010_0000_0000_0000);
+ assert_f64_biteq!(f64::NEG_INFINITY.next_up(), f64::MIN);
+ assert_f64_biteq!(f64::MIN.next_up(), -max_down);
+ assert_f64_biteq!((-1.0 - f64::EPSILON).next_up(), -1.0);
+ assert_f64_biteq!((-smallest_normal).next_up(), -largest_subnormal);
+ assert_f64_biteq!((-tiny_up).next_up(), -tiny);
+ assert_f64_biteq!((-tiny).next_up(), -0.0f64);
+ assert_f64_biteq!((-0.0f64).next_up(), tiny);
+ assert_f64_biteq!(0.0f64.next_up(), tiny);
+ assert_f64_biteq!(tiny.next_up(), tiny_up);
+ assert_f64_biteq!(largest_subnormal.next_up(), smallest_normal);
+ assert_f64_biteq!(1.0f64.next_up(), 1.0 + f64::EPSILON);
+ assert_f64_biteq!(f64::MAX.next_up(), f64::INFINITY);
+ assert_f64_biteq!(f64::INFINITY.next_up(), f64::INFINITY);
+
+ let nan0 = f64::NAN;
+ let nan1 = f64::from_bits(f64::NAN.to_bits() ^ 0x000a_aaaa_aaaa_aaaa);
+ let nan2 = f64::from_bits(f64::NAN.to_bits() ^ 0x0005_5555_5555_5555);
+ assert_f64_biteq!(nan0.next_up(), nan0);
+ assert_f64_biteq!(nan1.next_up(), nan1);
+ assert_f64_biteq!(nan2.next_up(), nan2);
+}
+
+// Ignore test on x87 floating point, these platforms do not guarantee NaN
+// payloads are preserved and flush denormals to zero, failing the tests.
+#[cfg(not(target_arch = "x86"))]
+#[test]
+fn test_next_down() {
+ let tiny = f64::from_bits(1);
+ let tiny_up = f64::from_bits(2);
+ let max_down = f64::from_bits(0x7fef_ffff_ffff_fffe);
+ let largest_subnormal = f64::from_bits(0x000f_ffff_ffff_ffff);
+ let smallest_normal = f64::from_bits(0x0010_0000_0000_0000);
+ assert_f64_biteq!(f64::NEG_INFINITY.next_down(), f64::NEG_INFINITY);
+ assert_f64_biteq!(f64::MIN.next_down(), f64::NEG_INFINITY);
+ assert_f64_biteq!((-max_down).next_down(), f64::MIN);
+ assert_f64_biteq!((-1.0f64).next_down(), -1.0 - f64::EPSILON);
+ assert_f64_biteq!((-largest_subnormal).next_down(), -smallest_normal);
+ assert_f64_biteq!((-tiny).next_down(), -tiny_up);
+ assert_f64_biteq!((-0.0f64).next_down(), -tiny);
+ assert_f64_biteq!((0.0f64).next_down(), -tiny);
+ assert_f64_biteq!(tiny.next_down(), 0.0f64);
+ assert_f64_biteq!(tiny_up.next_down(), tiny);
+ assert_f64_biteq!(smallest_normal.next_down(), largest_subnormal);
+ assert_f64_biteq!((1.0 + f64::EPSILON).next_down(), 1.0f64);
+ assert_f64_biteq!(f64::MAX.next_down(), max_down);
+ assert_f64_biteq!(f64::INFINITY.next_down(), f64::MAX);
+
+ let nan0 = f64::NAN;
+ let nan1 = f64::from_bits(f64::NAN.to_bits() ^ 0x000a_aaaa_aaaa_aaaa);
+ let nan2 = f64::from_bits(f64::NAN.to_bits() ^ 0x0005_5555_5555_5555);
+ assert_f64_biteq!(nan0.next_down(), nan0);
+ assert_f64_biteq!(nan1.next_down(), nan1);
+ assert_f64_biteq!(nan2.next_down(), nan2);
+}
+
#[test]
fn test_mul_add() {
let nan: f64 = f64::NAN;
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
index a0a5c003d..80ed34157 100644
--- a/library/std/src/ffi/os_str.rs
+++ b/library/std/src/ffi/os_str.rs
@@ -290,7 +290,8 @@ impl OsString {
/// in the given `OsString`. The string may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns `Ok(())`.
- /// Does nothing if capacity is already sufficient.
+ /// Does nothing if capacity is already sufficient. This method preserves
+ /// the contents even if an error occurs.
///
/// See the main `OsString` documentation information about encoding and capacity units.
///
diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs
index c8e131b6e..188ff00e1 100644
--- a/library/std/src/fs.rs
+++ b/library/std/src/fs.rs
@@ -13,13 +13,13 @@ mod tests;
use crate::ffi::OsString;
use crate::fmt;
-use crate::io::{self, IoSlice, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, Write};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, Read, Seek, SeekFrom, Write};
use crate::path::{Path, PathBuf};
use crate::sys::fs as fs_imp;
use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
use crate::time::SystemTime;
-/// A reference to an open file on the filesystem.
+/// An object providing access to an open file on the filesystem.
///
/// An instance of a `File` can be read and/or written depending on what options
/// it was opened with. Files also implement [`Seek`] to alter the logical cursor
@@ -377,6 +377,35 @@ impl File {
OpenOptions::new().write(true).create(true).truncate(true).open(path.as_ref())
}
+ /// Creates a new file in read-write mode; error if the file exists.
+ ///
+ /// This function will create a file if it does not exist, or return an error if it does. This
+ /// way, if the call succeeds, the file returned is guaranteed to be new.
+ ///
+ /// This option is useful because it is atomic. Otherwise between checking whether a file
+ /// exists and creating a new one, the file may have been created by another process (a TOCTOU
+ /// race condition / attack).
+ ///
+ /// This can also be written using
+ /// `File::options().read(true).write(true).create_new(true).open(...)`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(file_create_new)]
+ ///
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::create_new("foo.txt")?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "file_create_new", issue = "none")]
+ pub fn create_new<P: AsRef<Path>>(path: P) -> io::Result<File> {
+ OpenOptions::new().read(true).write(true).create_new(true).open(path.as_ref())
+ }
+
/// Returns a new OpenOptions object.
///
/// This function returns a new OpenOptions object that you can use to
@@ -703,8 +732,8 @@ impl Read for File {
self.inner.read_vectored(bufs)
}
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- self.inner.read_buf(buf)
+ fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ self.inner.read_buf(cursor)
}
#[inline]
@@ -755,8 +784,8 @@ impl Read for &File {
self.inner.read(buf)
}
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- self.inner.read_buf(buf)
+ fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ self.inner.read_buf(cursor)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
@@ -1336,6 +1365,34 @@ impl FileTimes {
impl Permissions {
/// Returns `true` if these permissions describe a readonly (unwritable) file.
///
+ /// # Note
+ ///
+ /// This function does not take Access Control Lists (ACLs) or Unix group
+ /// membership into account.
+ ///
+ /// # Windows
+ ///
+ /// On Windows this returns [`FILE_ATTRIBUTE_READONLY`](https://docs.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants).
+ /// If `FILE_ATTRIBUTE_READONLY` is set then writes to the file will fail
+ /// but the user may still have permission to change this flag. If
+ /// `FILE_ATTRIBUTE_READONLY` is *not* set then writes may still fail due
+ /// to lack of write permission.
+ /// The behavior of this attribute for directories depends on the Windows
+ /// version.
+ ///
+ /// # Unix (including macOS)
+ ///
+ /// On Unix-based platforms this checks if *any* of the owner, group or others
+ /// write permission bits are set. It does not check if the current
+ /// user is in the file's assigned group. It also does not check ACLs.
+ /// Therefore even if this returns true you may not be able to write to the
+ /// file, and vice versa. The [`PermissionsExt`] trait gives direct access
+ /// to the permission bits but also does not read ACLs. If you need to
+ /// accurately know whether or not a file is writable use the `access()`
+ /// function from libc.
+ ///
+ /// [`PermissionsExt`]: crate::os::unix::fs::PermissionsExt
+ ///
/// # Examples
///
/// ```no_run
@@ -1361,8 +1418,40 @@ impl Permissions {
/// using the resulting `Permission` will update file permissions to allow
/// writing.
///
- /// This operation does **not** modify the filesystem. To modify the
- /// filesystem use the [`set_permissions`] function.
+ /// This operation does **not** modify the files attributes. This only
+ /// changes the in-memory value of these attributes for this `Permissions`
+ /// instance. To modify the files attributes use the [`set_permissions`]
+ /// function which commits these attribute changes to the file.
+ ///
+ /// # Note
+ ///
+ /// `set_readonly(false)` makes the file *world-writable* on Unix.
+ /// You can use the [`PermissionsExt`] trait on Unix to avoid this issue.
+ ///
+ /// It also does not take Access Control Lists (ACLs) or Unix group
+ /// membership into account.
+ ///
+ /// # Windows
+ ///
+ /// On Windows this sets or clears [`FILE_ATTRIBUTE_READONLY`](https://docs.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants).
+ /// If `FILE_ATTRIBUTE_READONLY` is set then writes to the file will fail
+ /// but the user may still have permission to change this flag. If
+ /// `FILE_ATTRIBUTE_READONLY` is *not* set then the write may still fail if
+ /// the user does not have permission to write to the file.
+ ///
+ /// In Windows 7 and earlier this attribute prevents deleting empty
+ /// directories. It does not prevent modifying the directory contents.
+ /// On later versions of Windows this attribute is ignored for directories.
+ ///
+ /// # Unix (including macOS)
+ ///
+ /// On Unix-based platforms this sets or clears the write access bit for
+ /// the owner, group *and* others, equivalent to `chmod a+w <file>`
+ /// or `chmod a-w <file>` respectively. The latter will grant write access
+ /// to all users! You can use the [`PermissionsExt`] trait on Unix
+ /// to avoid this issue.
+ ///
+ /// [`PermissionsExt`]: crate::os::unix::fs::PermissionsExt
///
/// # Examples
///
@@ -1376,7 +1465,8 @@ impl Permissions {
///
/// permissions.set_readonly(true);
///
- /// // filesystem doesn't change
+ /// // filesystem doesn't change, only the in memory state of the
+ /// // readonly permission
/// assert_eq!(false, metadata.permissions().readonly());
///
/// // just this particular `permissions`.
diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs
index f7fbaa9c2..4f339a18a 100644
--- a/library/std/src/io/buffered/bufreader.rs
+++ b/library/std/src/io/buffered/bufreader.rs
@@ -2,7 +2,7 @@ mod buffer;
use crate::fmt;
use crate::io::{
- self, BufRead, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, SizeHint, DEFAULT_BUF_SIZE,
+ self, BorrowedCursor, BufRead, IoSliceMut, Read, Seek, SeekFrom, SizeHint, DEFAULT_BUF_SIZE,
};
use buffer::Buffer;
@@ -224,6 +224,14 @@ impl<R> BufReader<R> {
}
}
+// This is only used by a test which asserts that the initialization-tracking is correct.
+#[cfg(test)]
+impl<R> BufReader<R> {
+ pub fn initialized(&self) -> usize {
+ self.buf.initialized()
+ }
+}
+
impl<R: Seek> BufReader<R> {
/// Seeks relative to the current position. If the new position lies within the buffer,
/// the buffer will not be flushed, allowing for more efficient seeks.
@@ -266,21 +274,21 @@ impl<R: Read> Read for BufReader<R> {
Ok(nread)
}
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
- if self.buf.pos() == self.buf.filled() && buf.remaining() >= self.capacity() {
+ if self.buf.pos() == self.buf.filled() && cursor.capacity() >= self.capacity() {
self.discard_buffer();
- return self.inner.read_buf(buf);
+ return self.inner.read_buf(cursor);
}
- let prev = buf.filled_len();
+ let prev = cursor.written();
let mut rem = self.fill_buf()?;
- rem.read_buf(buf)?;
+ rem.read_buf(cursor.reborrow())?;
- self.consume(buf.filled_len() - prev); //slice impl of read_buf known to never unfill buf
+ self.consume(cursor.written() - prev); //slice impl of read_buf known to never unfill buf
Ok(())
}
diff --git a/library/std/src/io/buffered/bufreader/buffer.rs b/library/std/src/io/buffered/bufreader/buffer.rs
index 8ae01f3b0..e9e29d60c 100644
--- a/library/std/src/io/buffered/bufreader/buffer.rs
+++ b/library/std/src/io/buffered/bufreader/buffer.rs
@@ -9,7 +9,7 @@
/// that user code which wants to do reads from a `BufReader` via `buffer` + `consume` can do so
/// without encountering any runtime bounds checks.
use crate::cmp;
-use crate::io::{self, Read, ReadBuf};
+use crate::io::{self, BorrowedBuf, Read};
use crate::mem::MaybeUninit;
pub struct Buffer {
@@ -20,13 +20,19 @@ pub struct Buffer {
// Each call to `fill_buf` sets `filled` to indicate how many bytes at the start of `buf` are
// initialized with bytes from a read.
filled: usize,
+ // This is the max number of bytes returned across all `fill_buf` calls. We track this so that we
+ // can accurately tell `read_buf` how many bytes of buf are initialized, to bypass as much of its
+ // defensive initialization as possible. Note that while this often the same as `filled`, it
+ // doesn't need to be. Calls to `fill_buf` are not required to actually fill the buffer, and
+ // omitting this is a huge perf regression for `Read` impls that do not.
+ initialized: usize,
}
impl Buffer {
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let buf = Box::new_uninit_slice(capacity);
- Self { buf, pos: 0, filled: 0 }
+ Self { buf, pos: 0, filled: 0, initialized: 0 }
}
#[inline]
@@ -51,6 +57,12 @@ impl Buffer {
self.pos
}
+ // This is only used by a test which asserts that the initialization-tracking is correct.
+ #[cfg(test)]
+ pub fn initialized(&self) -> usize {
+ self.initialized
+ }
+
#[inline]
pub fn discard_buffer(&mut self) {
self.pos = 0;
@@ -93,12 +105,17 @@ impl Buffer {
if self.pos >= self.filled {
debug_assert!(self.pos == self.filled);
- let mut readbuf = ReadBuf::uninit(&mut self.buf);
+ let mut buf = BorrowedBuf::from(&mut *self.buf);
+ // SAFETY: `self.filled` bytes will always have been initialized.
+ unsafe {
+ buf.set_init(self.initialized);
+ }
- reader.read_buf(&mut readbuf)?;
+ reader.read_buf(buf.unfilled())?;
- self.filled = readbuf.filled_len();
self.pos = 0;
+ self.filled = buf.len();
+ self.initialized = buf.init_len();
}
Ok(self.buffer())
}
diff --git a/library/std/src/io/buffered/tests.rs b/library/std/src/io/buffered/tests.rs
index fe45b1326..f4e688eb9 100644
--- a/library/std/src/io/buffered/tests.rs
+++ b/library/std/src/io/buffered/tests.rs
@@ -1,5 +1,7 @@
use crate::io::prelude::*;
-use crate::io::{self, BufReader, BufWriter, ErrorKind, IoSlice, LineWriter, ReadBuf, SeekFrom};
+use crate::io::{
+ self, BorrowedBuf, BufReader, BufWriter, ErrorKind, IoSlice, LineWriter, SeekFrom,
+};
use crate::mem::MaybeUninit;
use crate::panic;
use crate::sync::atomic::{AtomicUsize, Ordering};
@@ -61,48 +63,48 @@ fn test_buffered_reader_read_buf() {
let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
let mut reader = BufReader::with_capacity(2, inner);
- let mut buf = [MaybeUninit::uninit(); 3];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 3];
+ let mut buf: BorrowedBuf<'_> = buf.into();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [5, 6, 7]);
assert_eq!(reader.buffer(), []);
- let mut buf = [MaybeUninit::uninit(); 2];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 2];
+ let mut buf: BorrowedBuf<'_> = buf.into();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [0, 1]);
assert_eq!(reader.buffer(), []);
- let mut buf = [MaybeUninit::uninit(); 1];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1];
+ let mut buf: BorrowedBuf<'_> = buf.into();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [2]);
assert_eq!(reader.buffer(), [3]);
- let mut buf = [MaybeUninit::uninit(); 3];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 3];
+ let mut buf: BorrowedBuf<'_> = buf.into();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [3]);
assert_eq!(reader.buffer(), []);
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [3, 4]);
assert_eq!(reader.buffer(), []);
buf.clear();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
- assert_eq!(buf.filled_len(), 0);
+ assert!(buf.filled().is_empty());
}
#[test]
@@ -1037,3 +1039,27 @@ fn single_formatted_write() {
writeln!(&mut writer, "{}, {}!", "hello", "world").unwrap();
assert_eq!(writer.get_ref().events, [RecordedEvent::Write("hello, world!\n".to_string())]);
}
+
+#[test]
+fn bufreader_full_initialize() {
+ struct OneByteReader;
+ impl Read for OneByteReader {
+ fn read(&mut self, buf: &mut [u8]) -> crate::io::Result<usize> {
+ if buf.len() > 0 {
+ buf[0] = 0;
+ Ok(1)
+ } else {
+ Ok(0)
+ }
+ }
+ }
+ let mut reader = BufReader::new(OneByteReader);
+ // Nothing is initialized yet.
+ assert_eq!(reader.initialized(), 0);
+
+ let buf = reader.fill_buf().unwrap();
+ // We read one byte...
+ assert_eq!(buf.len(), 1);
+ // But we initialized the whole buffer!
+ assert_eq!(reader.initialized(), reader.capacity());
+}
diff --git a/library/std/src/io/copy.rs b/library/std/src/io/copy.rs
index 1a10245e4..38b98afff 100644
--- a/library/std/src/io/copy.rs
+++ b/library/std/src/io/copy.rs
@@ -1,4 +1,4 @@
-use super::{BufWriter, ErrorKind, Read, ReadBuf, Result, Write, DEFAULT_BUF_SIZE};
+use super::{BorrowedBuf, BufWriter, ErrorKind, Read, Result, Write, DEFAULT_BUF_SIZE};
use crate::mem::MaybeUninit;
/// Copies the entire contents of a reader into a writer.
@@ -97,37 +97,39 @@ impl<I: Write> BufferedCopySpec for BufWriter<I> {
loop {
let buf = writer.buffer_mut();
- let mut read_buf = ReadBuf::uninit(buf.spare_capacity_mut());
+ let mut read_buf: BorrowedBuf<'_> = buf.spare_capacity_mut().into();
- // SAFETY: init is either 0 or the initialized_len of the previous iteration
unsafe {
- read_buf.assume_init(init);
+ // SAFETY: init is either 0 or the init_len from the previous iteration.
+ read_buf.set_init(init);
}
if read_buf.capacity() >= DEFAULT_BUF_SIZE {
- match reader.read_buf(&mut read_buf) {
+ let mut cursor = read_buf.unfilled();
+ match reader.read_buf(cursor.reborrow()) {
Ok(()) => {
- let bytes_read = read_buf.filled_len();
+ let bytes_read = cursor.written();
if bytes_read == 0 {
return Ok(len);
}
- init = read_buf.initialized_len() - bytes_read;
+ init = read_buf.init_len() - bytes_read;
+ len += bytes_read as u64;
- // SAFETY: ReadBuf guarantees all of its filled bytes are init
+ // SAFETY: BorrowedBuf guarantees all of its filled bytes are init
unsafe { buf.set_len(buf.len() + bytes_read) };
- len += bytes_read as u64;
+
// Read again if the buffer still has enough capacity, as BufWriter itself would do
// This will occur if the reader returns short reads
- continue;
}
- Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
+ } else {
+ writer.flush_buf()?;
+ init = 0;
}
-
- writer.flush_buf()?;
}
}
}
@@ -136,13 +138,13 @@ fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
reader: &mut R,
writer: &mut W,
) -> Result<u64> {
- let mut buf = [MaybeUninit::uninit(); DEFAULT_BUF_SIZE];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); DEFAULT_BUF_SIZE];
+ let mut buf: BorrowedBuf<'_> = buf.into();
let mut len = 0;
loop {
- match reader.read_buf(&mut buf) {
+ match reader.read_buf(buf.unfilled()) {
Ok(()) => {}
Err(e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
diff --git a/library/std/src/io/cursor.rs b/library/std/src/io/cursor.rs
index f3fbfc447..d98ab021c 100644
--- a/library/std/src/io/cursor.rs
+++ b/library/std/src/io/cursor.rs
@@ -5,7 +5,7 @@ use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
-use crate::io::{self, ErrorKind, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
@@ -323,12 +323,12 @@ where
Ok(n)
}
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- let prev_filled = buf.filled_len();
+ fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ let prev_written = cursor.written();
- Read::read_buf(&mut self.fill_buf()?, buf)?;
+ Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
- self.pos += (buf.filled_len() - prev_filled) as u64;
+ self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs
index ff7fdcae1..3cabf2449 100644
--- a/library/std/src/io/error.rs
+++ b/library/std/src/io/error.rs
@@ -76,6 +76,14 @@ impl fmt::Debug for Error {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<alloc::ffi::NulError> for Error {
+ /// Converts a [`alloc::ffi::NulError`] into a [`Error`].
+ fn from(_: alloc::ffi::NulError) -> Error {
+ const_io_error!(ErrorKind::InvalidInput, "data provided contains a nul byte")
+ }
+}
+
// Only derive debug in tests, to make sure it
// doesn't accidentally get printed.
#[cfg_attr(test, derive(Debug))]
@@ -379,7 +387,7 @@ pub enum ErrorKind {
impl ErrorKind {
pub(crate) fn as_str(&self) -> &'static str {
use ErrorKind::*;
- // Strictly alphabetical, please. (Sadly rustfmt cannot do this yet.)
+ // tidy-alphabetical-start
match *self {
AddrInUse => "address in use",
AddrNotAvailable => "address not available",
@@ -423,6 +431,7 @@ impl ErrorKind {
WouldBlock => "operation would block",
WriteZero => "write zero",
}
+ // tidy-alphabetical-end
}
}
@@ -473,6 +482,7 @@ impl Error {
/// originate from the OS itself. The `error` argument is an arbitrary
/// payload which will be contained in this [`Error`].
///
+ /// Note that this function allocates memory on the heap.
/// If no extra payload is required, use the `From` conversion from
/// `ErrorKind`.
///
@@ -487,7 +497,7 @@ impl Error {
/// // errors can also be created from other errors
/// let custom_error2 = Error::new(ErrorKind::Interrupted, custom_error);
///
- /// // creating an error without payload
+ /// // creating an error without payload (and without memory allocation)
/// let eof_error = Error::from(ErrorKind::UnexpectedEof);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
@@ -564,6 +574,8 @@ impl Error {
/// println!("last OS error: {os_error:?}");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[doc(alias = "GetLastError")]
+ #[doc(alias = "errno")]
#[must_use]
#[inline]
pub fn last_os_error() -> Error {
diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs
index 292bf4826..781ae03ad 100644
--- a/library/std/src/io/error/repr_bitpacked.rs
+++ b/library/std/src/io/error/repr_bitpacked.rs
@@ -269,10 +269,10 @@ where
}
TAG_SIMPLE_MESSAGE => ErrorData::SimpleMessage(&*ptr.cast::<SimpleMessage>().as_ptr()),
TAG_CUSTOM => {
- // It would be correct for us to use `ptr::sub` here (see the
+ // It would be correct for us to use `ptr::byte_sub` here (see the
// comment above the `wrapping_add` call in `new_custom` for why),
// but it isn't clear that it makes a difference, so we don't.
- let custom = ptr.as_ptr().cast::<u8>().wrapping_sub(TAG_CUSTOM).cast::<Custom>();
+ let custom = ptr.as_ptr().wrapping_byte_sub(TAG_CUSTOM).cast::<Custom>();
ErrorData::Custom(make_custom(custom))
}
_ => {
diff --git a/library/std/src/io/error/tests.rs b/library/std/src/io/error/tests.rs
index c897a5e87..16c634e9a 100644
--- a/library/std/src/io/error/tests.rs
+++ b/library/std/src/io/error/tests.rs
@@ -86,7 +86,7 @@ fn test_errorkind_packing() {
assert_eq!(Error::from(ErrorKind::NotFound).kind(), ErrorKind::NotFound);
assert_eq!(Error::from(ErrorKind::PermissionDenied).kind(), ErrorKind::PermissionDenied);
assert_eq!(Error::from(ErrorKind::Uncategorized).kind(), ErrorKind::Uncategorized);
- // Check that the innards look like like what we want.
+ // Check that the innards look like what we want.
assert_matches!(
Error::from(ErrorKind::OutOfMemory).repr.data(),
ErrorData::Simple(ErrorKind::OutOfMemory),
diff --git a/library/std/src/io/impls.rs b/library/std/src/io/impls.rs
index 950725473..e5048dcc8 100644
--- a/library/std/src/io/impls.rs
+++ b/library/std/src/io/impls.rs
@@ -6,7 +6,7 @@ use crate::cmp;
use crate::collections::VecDeque;
use crate::fmt;
use crate::io::{
- self, BufRead, ErrorKind, IoSlice, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, Write,
+ self, BorrowedCursor, BufRead, ErrorKind, IoSlice, IoSliceMut, Read, Seek, SeekFrom, Write,
};
use crate::mem;
@@ -21,8 +21,8 @@ impl<R: Read + ?Sized> Read for &mut R {
}
#[inline]
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- (**self).read_buf(buf)
+ fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ (**self).read_buf(cursor)
}
#[inline]
@@ -125,8 +125,8 @@ impl<R: Read + ?Sized> Read for Box<R> {
}
#[inline]
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- (**self).read_buf(buf)
+ fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ (**self).read_buf(cursor)
}
#[inline]
@@ -249,11 +249,11 @@ impl Read for &[u8] {
}
#[inline]
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- let amt = cmp::min(buf.remaining(), self.len());
+ fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ let amt = cmp::min(cursor.capacity(), self.len());
let (a, b) = self.split_at(amt);
- buf.append(a);
+ cursor.append(a);
*self = b;
Ok(())
@@ -427,10 +427,10 @@ impl<A: Allocator> Read for VecDeque<u8, A> {
}
#[inline]
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
let (ref mut front, _) = self.as_slices();
- let n = cmp::min(buf.remaining(), front.len());
- Read::read_buf(front, buf)?;
+ let n = cmp::min(cursor.capacity(), front.len());
+ Read::read_buf(front, cursor)?;
self.drain(..n);
Ok(())
}
diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs
index 96addbd1a..23a13523f 100644
--- a/library/std/src/io/mod.rs
+++ b/library/std/src/io/mod.rs
@@ -262,9 +262,12 @@ use crate::sys_common::memchr;
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
pub use self::buffered::WriterPanicked;
+pub(crate) use self::stdio::attempt_print_to_stderr;
#[unstable(feature = "internal_output_capture", issue = "none")]
#[doc(no_inline, hidden)]
pub use self::stdio::set_output_capture;
+#[unstable(feature = "is_terminal", issue = "98070")]
+pub use self::stdio::IsTerminal;
#[unstable(feature = "print_internals", issue = "none")]
pub use self::stdio::{_eprint, _print};
#[stable(feature = "rust1", since = "1.0.0")]
@@ -278,7 +281,7 @@ pub use self::{
};
#[unstable(feature = "read_buf", issue = "78485")]
-pub use self::readbuf::ReadBuf;
+pub use self::readbuf::{BorrowedBuf, BorrowedCursor};
pub(crate) use error::const_io_error;
mod buffered;
@@ -362,29 +365,30 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(r: &mut R, buf: &mut Vec<u8>
buf.reserve(32); // buf is full, need more space
}
- let mut read_buf = ReadBuf::uninit(buf.spare_capacity_mut());
+ let mut read_buf: BorrowedBuf<'_> = buf.spare_capacity_mut().into();
// SAFETY: These bytes were initialized but not filled in the previous loop
unsafe {
- read_buf.assume_init(initialized);
+ read_buf.set_init(initialized);
}
- match r.read_buf(&mut read_buf) {
+ let mut cursor = read_buf.unfilled();
+ match r.read_buf(cursor.reborrow()) {
Ok(()) => {}
Err(e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
- if read_buf.filled_len() == 0 {
+ if cursor.written() == 0 {
return Ok(buf.len() - start_len);
}
// store how much was initialized but not filled
- initialized = read_buf.initialized_len() - read_buf.filled_len();
- let new_len = read_buf.filled_len() + buf.len();
+ initialized = cursor.init_ref().len();
- // SAFETY: ReadBuf's invariants mean this much memory is init
+ // SAFETY: BorrowedBuf's invariants mean this much memory is initialized.
unsafe {
+ let new_len = read_buf.filled().len() + buf.len();
buf.set_len(new_len);
}
@@ -461,12 +465,15 @@ pub(crate) fn default_read_exact<R: Read + ?Sized>(this: &mut R, mut buf: &mut [
}
}
-pub(crate) fn default_read_buf<F>(read: F, buf: &mut ReadBuf<'_>) -> Result<()>
+pub(crate) fn default_read_buf<F>(read: F, mut cursor: BorrowedCursor<'_>) -> Result<()>
where
F: FnOnce(&mut [u8]) -> Result<usize>,
{
- let n = read(buf.initialize_unfilled())?;
- buf.add_filled(n);
+ let n = read(cursor.ensure_init().init_mut())?;
+ unsafe {
+ // SAFETY: we initialised using `ensure_init` so there is no uninit data to advance to.
+ cursor.advance(n);
+ }
Ok(())
}
@@ -576,7 +583,7 @@ pub trait Read {
/// `n > buf.len()`.
///
/// No guarantees are provided about the contents of `buf` when this
- /// function is called, implementations cannot rely on any property of the
+ /// function is called, so implementations cannot rely on any property of the
/// contents of `buf` being true. It is recommended that *implementations*
/// only write data to `buf` instead of reading its contents.
///
@@ -752,7 +759,7 @@ pub trait Read {
/// specified buffer `buf`.
///
/// No guarantees are provided about the contents of `buf` when this
- /// function is called, implementations cannot rely on any property of the
+ /// function is called, so implementations cannot rely on any property of the
/// contents of `buf` being true. It is recommended that implementations
/// only write data to `buf` instead of reading its contents. The
/// documentation on [`read`] has a more detailed explanation on this
@@ -803,30 +810,30 @@ pub trait Read {
/// Pull some bytes from this source into the specified buffer.
///
- /// This is equivalent to the [`read`](Read::read) method, except that it is passed a [`ReadBuf`] rather than `[u8]` to allow use
+ /// This is equivalent to the [`read`](Read::read) method, except that it is passed a [`BorrowedCursor`] rather than `[u8]` to allow use
/// with uninitialized buffers. The new data will be appended to any existing contents of `buf`.
///
/// The default implementation delegates to `read`.
#[unstable(feature = "read_buf", issue = "78485")]
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> Result<()> {
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> Result<()> {
default_read_buf(|b| self.read(b), buf)
}
- /// Read the exact number of bytes required to fill `buf`.
+ /// Read the exact number of bytes required to fill `cursor`.
///
- /// This is equivalent to the [`read_exact`](Read::read_exact) method, except that it is passed a [`ReadBuf`] rather than `[u8]` to
+ /// This is equivalent to the [`read_exact`](Read::read_exact) method, except that it is passed a [`BorrowedCursor`] rather than `[u8]` to
/// allow use with uninitialized buffers.
#[unstable(feature = "read_buf", issue = "78485")]
- fn read_buf_exact(&mut self, buf: &mut ReadBuf<'_>) -> Result<()> {
- while buf.remaining() > 0 {
- let prev_filled = buf.filled().len();
- match self.read_buf(buf) {
+ fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> Result<()> {
+ while cursor.capacity() > 0 {
+ let prev_written = cursor.written();
+ match self.read_buf(cursor.reborrow()) {
Ok(()) => {}
Err(e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
- if buf.filled().len() == prev_filled {
+ if cursor.written() == prev_written {
return Err(Error::new(ErrorKind::UnexpectedEof, "failed to fill buffer"));
}
}
@@ -883,6 +890,10 @@ pub trait Read {
/// The yielded item is [`Ok`] if a byte was successfully read and [`Err`]
/// otherwise. EOF is mapped to returning [`None`] from this iterator.
///
+ /// The default implementation calls `read` for each byte,
+ /// which can be very inefficient for data that's not in memory,
+ /// such as [`File`]. Consider using a [`BufReader`] in such cases.
+ ///
/// # Examples
///
/// [`File`]s implement `Read`:
@@ -895,10 +906,11 @@ pub trait Read {
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
+ /// use std::io::BufReader;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
- /// let f = File::open("foo.txt")?;
+ /// let f = BufReader::new(File::open("foo.txt")?);
///
/// for byte in f.bytes() {
/// println!("{}", byte.unwrap());
@@ -1028,8 +1040,6 @@ pub trait Read {
/// # Examples
///
/// ```no_run
-/// #![feature(io_read_to_string)]
-///
/// # use std::io;
/// fn main() -> io::Result<()> {
/// let stdin = io::read_to_string(io::stdin())?;
@@ -1038,7 +1048,7 @@ pub trait Read {
/// Ok(())
/// }
/// ```
-#[unstable(feature = "io_read_to_string", issue = "80218")]
+#[stable(feature = "io_read_to_string", since = "1.65.0")]
pub fn read_to_string<R: Read>(mut reader: R) -> Result<String> {
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
@@ -2582,50 +2592,48 @@ impl<T: Read> Read for Take<T> {
Ok(n)
}
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> Result<()> {
+ fn read_buf(&mut self, mut buf: BorrowedCursor<'_>) -> Result<()> {
// Don't call into inner reader at all at EOF because it may still block
if self.limit == 0 {
return Ok(());
}
- let prev_filled = buf.filled_len();
-
- if self.limit <= buf.remaining() as u64 {
+ if self.limit <= buf.capacity() as u64 {
// if we just use an as cast to convert, limit may wrap around on a 32 bit target
let limit = cmp::min(self.limit, usize::MAX as u64) as usize;
- let extra_init = cmp::min(limit as usize, buf.initialized_len() - buf.filled_len());
+ let extra_init = cmp::min(limit as usize, buf.init_ref().len());
// SAFETY: no uninit data is written to ibuf
- let ibuf = unsafe { &mut buf.unfilled_mut()[..limit] };
+ let ibuf = unsafe { &mut buf.as_mut()[..limit] };
- let mut sliced_buf = ReadBuf::uninit(ibuf);
+ let mut sliced_buf: BorrowedBuf<'_> = ibuf.into();
// SAFETY: extra_init bytes of ibuf are known to be initialized
unsafe {
- sliced_buf.assume_init(extra_init);
+ sliced_buf.set_init(extra_init);
}
- self.inner.read_buf(&mut sliced_buf)?;
+ let mut cursor = sliced_buf.unfilled();
+ self.inner.read_buf(cursor.reborrow())?;
- let new_init = sliced_buf.initialized_len();
- let filled = sliced_buf.filled_len();
+ let new_init = cursor.init_ref().len();
+ let filled = sliced_buf.len();
- // sliced_buf / ibuf must drop here
+ // cursor / sliced_buf / ibuf must drop here
- // SAFETY: new_init bytes of buf's unfilled buffer have been initialized
unsafe {
- buf.assume_init(new_init);
+ // SAFETY: filled bytes have been filled and therefore initialized
+ buf.advance(filled);
+ // SAFETY: new_init bytes of buf's unfilled buffer have been initialized
+ buf.set_init(new_init);
}
- buf.add_filled(filled);
-
self.limit -= filled as u64;
} else {
- self.inner.read_buf(buf)?;
-
- //inner may unfill
- self.limit -= buf.filled_len().saturating_sub(prev_filled) as u64;
+ let written = buf.written();
+ self.inner.read_buf(buf.reborrow())?;
+ self.limit -= (buf.written() - written) as u64;
}
Ok(())
diff --git a/library/std/src/io/readbuf.rs b/library/std/src/io/readbuf.rs
index 78d1113f8..4800eeda0 100644
--- a/library/std/src/io/readbuf.rs
+++ b/library/std/src/io/readbuf.rs
@@ -3,11 +3,12 @@
#[cfg(test)]
mod tests;
-use crate::cmp;
use crate::fmt::{self, Debug, Formatter};
-use crate::mem::MaybeUninit;
+use crate::io::{Result, Write};
+use crate::mem::{self, MaybeUninit};
+use crate::{cmp, ptr};
-/// A wrapper around a byte buffer that is incrementally filled and initialized.
+/// A borrowed byte buffer which is incrementally filled and initialized.
///
/// This type is a sort of "double cursor". It tracks three regions in the buffer: a region at the beginning of the
/// buffer that has been logically filled with data, a region that has been initialized at some point but not yet
@@ -20,230 +21,289 @@ use crate::mem::MaybeUninit;
/// [ filled | unfilled ]
/// [ initialized | uninitialized ]
/// ```
-pub struct ReadBuf<'a> {
- buf: &'a mut [MaybeUninit<u8>],
+///
+/// A `BorrowedBuf` is created around some existing data (or capacity for data) via a unique reference
+/// (`&mut`). The `BorrowedBuf` can be configured (e.g., using `clear` or `set_init`), but cannot be
+/// directly written. To write into the buffer, use `unfilled` to create a `BorrowedCursor`. The cursor
+/// has write-only access to the unfilled portion of the buffer (you can think of it as a
+/// write-only iterator).
+///
+/// The lifetime `'data` is a bound on the lifetime of the underlying data.
+pub struct BorrowedBuf<'data> {
+ /// The buffer's underlying data.
+ buf: &'data mut [MaybeUninit<u8>],
+ /// The length of `self.buf` which is known to be filled.
filled: usize,
- initialized: usize,
+ /// The length of `self.buf` which is known to be initialized.
+ init: usize,
}
-impl Debug for ReadBuf<'_> {
+impl Debug for BorrowedBuf<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- f.debug_struct("ReadBuf")
- .field("init", &self.initialized())
+ f.debug_struct("BorrowedBuf")
+ .field("init", &self.init)
.field("filled", &self.filled)
.field("capacity", &self.capacity())
.finish()
}
}
-impl<'a> ReadBuf<'a> {
- /// Creates a new `ReadBuf` from a fully initialized buffer.
+/// Create a new `BorrowedBuf` from a fully initialized slice.
+impl<'data> From<&'data mut [u8]> for BorrowedBuf<'data> {
#[inline]
- pub fn new(buf: &'a mut [u8]) -> ReadBuf<'a> {
- let len = buf.len();
+ fn from(slice: &'data mut [u8]) -> BorrowedBuf<'data> {
+ let len = slice.len();
- ReadBuf {
- //SAFETY: initialized data never becoming uninitialized is an invariant of ReadBuf
- buf: unsafe { (buf as *mut [u8]).as_uninit_slice_mut().unwrap() },
+ BorrowedBuf {
+ // SAFETY: initialized data never becoming uninitialized is an invariant of BorrowedBuf
+ buf: unsafe { (slice as *mut [u8]).as_uninit_slice_mut().unwrap() },
filled: 0,
- initialized: len,
+ init: len,
}
}
+}
- /// Creates a new `ReadBuf` from a fully uninitialized buffer.
- ///
- /// Use `assume_init` if part of the buffer is known to be already initialized.
+/// Create a new `BorrowedBuf` from an uninitialized buffer.
+///
+/// Use `set_init` if part of the buffer is known to be already initialized.
+impl<'data> From<&'data mut [MaybeUninit<u8>]> for BorrowedBuf<'data> {
#[inline]
- pub fn uninit(buf: &'a mut [MaybeUninit<u8>]) -> ReadBuf<'a> {
- ReadBuf { buf, filled: 0, initialized: 0 }
+ fn from(buf: &'data mut [MaybeUninit<u8>]) -> BorrowedBuf<'data> {
+ BorrowedBuf { buf, filled: 0, init: 0 }
}
+}
+impl<'data> BorrowedBuf<'data> {
/// Returns the total capacity of the buffer.
#[inline]
pub fn capacity(&self) -> usize {
self.buf.len()
}
+ /// Returns the length of the filled part of the buffer.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.filled
+ }
+
+ /// Returns the length of the initialized part of the buffer.
+ #[inline]
+ pub fn init_len(&self) -> usize {
+ self.init
+ }
+
/// Returns a shared reference to the filled portion of the buffer.
#[inline]
pub fn filled(&self) -> &[u8] {
- //SAFETY: We only slice the filled part of the buffer, which is always valid
+ // SAFETY: We only slice the filled part of the buffer, which is always valid
unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) }
}
- /// Returns a mutable reference to the filled portion of the buffer.
+ /// Returns a cursor over the unfilled part of the buffer.
#[inline]
- pub fn filled_mut(&mut self) -> &mut [u8] {
- //SAFETY: We only slice the filled part of the buffer, which is always valid
- unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[0..self.filled]) }
+ pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> {
+ BorrowedCursor {
+ start: self.filled,
+ // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its
+ // lifetime covariantly is safe.
+ buf: unsafe {
+ mem::transmute::<&'this mut BorrowedBuf<'data>, &'this mut BorrowedBuf<'this>>(self)
+ },
+ }
}
- /// Returns a shared reference to the initialized portion of the buffer.
+ /// Clears the buffer, resetting the filled region to empty.
///
- /// This includes the filled portion.
+ /// The number of initialized bytes is not changed, and the contents of the buffer are not modified.
#[inline]
- pub fn initialized(&self) -> &[u8] {
- //SAFETY: We only slice the initialized part of the buffer, which is always valid
- unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.initialized]) }
+ pub fn clear(&mut self) -> &mut Self {
+ self.filled = 0;
+ self
}
- /// Returns a mutable reference to the initialized portion of the buffer.
+ /// Asserts that the first `n` bytes of the buffer are initialized.
///
- /// This includes the filled portion.
- #[inline]
- pub fn initialized_mut(&mut self) -> &mut [u8] {
- //SAFETY: We only slice the initialized part of the buffer, which is always valid
- unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[0..self.initialized]) }
- }
-
- /// Returns a mutable reference to the unfilled part of the buffer without ensuring that it has been fully
- /// initialized.
+ /// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer
+ /// bytes than are already known to be initialized.
///
/// # Safety
///
- /// The caller must not de-initialize portions of the buffer that have already been initialized.
+ /// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized.
#[inline]
- pub unsafe fn unfilled_mut(&mut self) -> &mut [MaybeUninit<u8>] {
- &mut self.buf[self.filled..]
+ pub unsafe fn set_init(&mut self, n: usize) -> &mut Self {
+ self.init = cmp::max(self.init, n);
+ self
}
+}
- /// Returns a mutable reference to the uninitialized part of the buffer.
+/// A writeable view of the unfilled portion of a [`BorrowedBuf`](BorrowedBuf).
+///
+/// Provides access to the initialized and uninitialized parts of the underlying `BorrowedBuf`.
+/// Data can be written directly to the cursor by using [`append`](BorrowedCursor::append) or
+/// indirectly by getting a slice of part or all of the cursor and writing into the slice. In the
+/// indirect case, the caller must call [`advance`](BorrowedCursor::advance) after writing to inform
+/// the cursor how many bytes have been written.
+///
+/// Once data is written to the cursor, it becomes part of the filled portion of the underlying
+/// `BorrowedBuf` and can no longer be accessed or re-written by the cursor. I.e., the cursor tracks
+/// the unfilled part of the underlying `BorrowedBuf`.
+///
+/// The lifetime `'a` is a bound on the lifetime of the underlying buffer (which means it is a bound
+/// on the data in that buffer by transitivity).
+#[derive(Debug)]
+pub struct BorrowedCursor<'a> {
+ /// The underlying buffer.
+ // Safety invariant: we treat the type of buf as covariant in the lifetime of `BorrowedBuf` when
+ // we create a `BorrowedCursor`. This is only safe if we never replace `buf` by assigning into
+ // it, so don't do that!
+ buf: &'a mut BorrowedBuf<'a>,
+ /// The length of the filled portion of the underlying buffer at the time of the cursor's
+ /// creation.
+ start: usize,
+}
+
+impl<'a> BorrowedCursor<'a> {
+ /// Reborrow this cursor by cloning it with a smaller lifetime.
///
- /// It is safe to uninitialize any of these bytes.
+ /// Since a cursor maintains unique access to its underlying buffer, the borrowed cursor is
+ /// not accessible while the new cursor exists.
#[inline]
- pub fn uninitialized_mut(&mut self) -> &mut [MaybeUninit<u8>] {
- &mut self.buf[self.initialized..]
+ pub fn reborrow<'this>(&'this mut self) -> BorrowedCursor<'this> {
+ BorrowedCursor {
+ // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its
+ // lifetime covariantly is safe.
+ buf: unsafe {
+ mem::transmute::<&'this mut BorrowedBuf<'a>, &'this mut BorrowedBuf<'this>>(
+ self.buf,
+ )
+ },
+ start: self.start,
+ }
}
- /// Returns a mutable reference to the unfilled part of the buffer, ensuring it is fully initialized.
- ///
- /// Since `ReadBuf` tracks the region of the buffer that has been initialized, this is effectively "free" after
- /// the first use.
+ /// Returns the available space in the cursor.
#[inline]
- pub fn initialize_unfilled(&mut self) -> &mut [u8] {
- // should optimize out the assertion
- self.initialize_unfilled_to(self.remaining())
+ pub fn capacity(&self) -> usize {
+ self.buf.capacity() - self.buf.filled
}
- /// Returns a mutable reference to the first `n` bytes of the unfilled part of the buffer, ensuring it is
- /// fully initialized.
+ /// Returns the number of bytes written to this cursor since it was created from a `BorrowedBuf`.
///
- /// # Panics
- ///
- /// Panics if `self.remaining()` is less than `n`.
+ /// Note that if this cursor is a reborrowed clone of another, then the count returned is the
+ /// count written via either cursor, not the count since the cursor was reborrowed.
#[inline]
- pub fn initialize_unfilled_to(&mut self, n: usize) -> &mut [u8] {
- assert!(self.remaining() >= n);
-
- let extra_init = self.initialized - self.filled;
- // If we don't have enough initialized, do zeroing
- if n > extra_init {
- let uninit = n - extra_init;
- let unfilled = &mut self.uninitialized_mut()[0..uninit];
-
- for byte in unfilled.iter_mut() {
- byte.write(0);
- }
-
- // SAFETY: we just initialized uninit bytes, and the previous bytes were already init
- unsafe {
- self.assume_init(n);
- }
- }
-
- let filled = self.filled;
+ pub fn written(&self) -> usize {
+ self.buf.filled - self.start
+ }
- &mut self.initialized_mut()[filled..filled + n]
+ /// Returns a shared reference to the initialized portion of the cursor.
+ #[inline]
+ pub fn init_ref(&self) -> &[u8] {
+ // SAFETY: We only slice the initialized part of the buffer, which is always valid
+ unsafe { MaybeUninit::slice_assume_init_ref(&self.buf.buf[self.buf.filled..self.buf.init]) }
}
- /// Returns the number of bytes at the end of the slice that have not yet been filled.
+ /// Returns a mutable reference to the initialized portion of the cursor.
#[inline]
- pub fn remaining(&self) -> usize {
- self.capacity() - self.filled
+ pub fn init_mut(&mut self) -> &mut [u8] {
+ // SAFETY: We only slice the initialized part of the buffer, which is always valid
+ unsafe {
+ MaybeUninit::slice_assume_init_mut(&mut self.buf.buf[self.buf.filled..self.buf.init])
+ }
}
- /// Clears the buffer, resetting the filled region to empty.
+ /// Returns a mutable reference to the uninitialized part of the cursor.
///
- /// The number of initialized bytes is not changed, and the contents of the buffer are not modified.
+ /// It is safe to uninitialize any of these bytes.
#[inline]
- pub fn clear(&mut self) -> &mut Self {
- self.set_filled(0) // The assertion in `set_filled` is optimized out
+ pub fn uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ &mut self.buf.buf[self.buf.init..]
}
- /// Increases the size of the filled region of the buffer.
+ /// Returns a mutable reference to the whole cursor.
///
- /// The number of initialized bytes is not changed.
- ///
- /// # Panics
+ /// # Safety
///
- /// Panics if the filled region of the buffer would become larger than the initialized region.
+ /// The caller must not uninitialize any bytes in the initialized portion of the cursor.
#[inline]
- pub fn add_filled(&mut self, n: usize) -> &mut Self {
- self.set_filled(self.filled + n)
+ pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ &mut self.buf.buf[self.buf.filled..]
}
- /// Sets the size of the filled region of the buffer.
- ///
- /// The number of initialized bytes is not changed.
+ /// Advance the cursor by asserting that `n` bytes have been filled.
///
- /// Note that this can be used to *shrink* the filled region of the buffer in addition to growing it (for
- /// example, by a `Read` implementation that compresses data in-place).
+ /// After advancing, the `n` bytes are no longer accessible via the cursor and can only be
+ /// accessed via the underlying buffer. I.e., the buffer's filled portion grows by `n` elements
+ /// and its unfilled portion (and the capacity of this cursor) shrinks by `n` elements.
///
- /// # Panics
+ /// # Safety
///
- /// Panics if the filled region of the buffer would become larger than the initialized region.
+ /// The caller must ensure that the first `n` bytes of the cursor have been properly
+ /// initialised.
+ #[inline]
+ pub unsafe fn advance(&mut self, n: usize) -> &mut Self {
+ self.buf.filled += n;
+ self.buf.init = cmp::max(self.buf.init, self.buf.filled);
+ self
+ }
+
+ /// Initializes all bytes in the cursor.
#[inline]
- pub fn set_filled(&mut self, n: usize) -> &mut Self {
- assert!(n <= self.initialized);
+ pub fn ensure_init(&mut self) -> &mut Self {
+ let uninit = self.uninit_mut();
+ // SAFETY: 0 is a valid value for MaybeUninit<u8> and the length matches the allocation
+ // since it is comes from a slice reference.
+ unsafe {
+ ptr::write_bytes(uninit.as_mut_ptr(), 0, uninit.len());
+ }
+ self.buf.init = self.buf.capacity();
- self.filled = n;
self
}
- /// Asserts that the first `n` unfilled bytes of the buffer are initialized.
+ /// Asserts that the first `n` unfilled bytes of the cursor are initialized.
///
- /// `ReadBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer
- /// bytes than are already known to be initialized.
+ /// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when
+ /// called with fewer bytes than are already known to be initialized.
///
/// # Safety
///
- /// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized.
+ /// The caller must ensure that the first `n` bytes of the buffer have already been initialized.
#[inline]
- pub unsafe fn assume_init(&mut self, n: usize) -> &mut Self {
- self.initialized = cmp::max(self.initialized, self.filled + n);
+ pub unsafe fn set_init(&mut self, n: usize) -> &mut Self {
+ self.buf.init = cmp::max(self.buf.init, self.buf.filled + n);
self
}
- /// Appends data to the buffer, advancing the written position and possibly also the initialized position.
+ /// Appends data to the cursor, advancing position within its buffer.
///
/// # Panics
///
- /// Panics if `self.remaining()` is less than `buf.len()`.
+ /// Panics if `self.capacity()` is less than `buf.len()`.
#[inline]
pub fn append(&mut self, buf: &[u8]) {
- assert!(self.remaining() >= buf.len());
+ assert!(self.capacity() >= buf.len());
// SAFETY: we do not de-initialize any of the elements of the slice
unsafe {
- MaybeUninit::write_slice(&mut self.unfilled_mut()[..buf.len()], buf);
+ MaybeUninit::write_slice(&mut self.as_mut()[..buf.len()], buf);
}
// SAFETY: We just added the entire contents of buf to the filled section.
unsafe {
- self.assume_init(buf.len());
+ self.set_init(buf.len());
}
- self.add_filled(buf.len());
+ self.buf.filled += buf.len();
}
+}
- /// Returns the amount of bytes that have been filled.
- #[inline]
- pub fn filled_len(&self) -> usize {
- self.filled
+impl<'a> Write for BorrowedCursor<'a> {
+ fn write(&mut self, buf: &[u8]) -> Result<usize> {
+ self.append(buf);
+ Ok(buf.len())
}
- /// Returns the amount of bytes that have been initialized.
- #[inline]
- pub fn initialized_len(&self) -> usize {
- self.initialized
+ fn flush(&mut self) -> Result<()> {
+ Ok(())
}
}
diff --git a/library/std/src/io/readbuf/tests.rs b/library/std/src/io/readbuf/tests.rs
index 3b7a5a56d..cc1b423f2 100644
--- a/library/std/src/io/readbuf/tests.rs
+++ b/library/std/src/io/readbuf/tests.rs
@@ -1,181 +1,175 @@
-use super::ReadBuf;
+use super::BorrowedBuf;
use crate::mem::MaybeUninit;
-/// Test that ReadBuf has the correct numbers when created with new
+/// Test that BorrowedBuf has the correct numbers when created with new
#[test]
fn new() {
- let mut buf = [0; 16];
- let rbuf = ReadBuf::new(&mut buf);
+ let buf: &mut [_] = &mut [0; 16];
+ let mut rbuf: BorrowedBuf<'_> = buf.into();
- assert_eq!(rbuf.filled_len(), 0);
- assert_eq!(rbuf.initialized_len(), 16);
+ assert_eq!(rbuf.filled().len(), 0);
+ assert_eq!(rbuf.init_len(), 16);
assert_eq!(rbuf.capacity(), 16);
- assert_eq!(rbuf.remaining(), 16);
+ assert_eq!(rbuf.unfilled().capacity(), 16);
}
-/// Test that ReadBuf has the correct numbers when created with uninit
+/// Test that BorrowedBuf has the correct numbers when created with uninit
#[test]
fn uninit() {
- let mut buf = [MaybeUninit::uninit(); 16];
- let rbuf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 16];
+ let mut rbuf: BorrowedBuf<'_> = buf.into();
- assert_eq!(rbuf.filled_len(), 0);
- assert_eq!(rbuf.initialized_len(), 0);
+ assert_eq!(rbuf.filled().len(), 0);
+ assert_eq!(rbuf.init_len(), 0);
assert_eq!(rbuf.capacity(), 16);
- assert_eq!(rbuf.remaining(), 16);
+ assert_eq!(rbuf.unfilled().capacity(), 16);
}
#[test]
fn initialize_unfilled() {
- let mut buf = [MaybeUninit::uninit(); 16];
- let mut rbuf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 16];
+ let mut rbuf: BorrowedBuf<'_> = buf.into();
- rbuf.initialize_unfilled();
+ rbuf.unfilled().ensure_init();
- assert_eq!(rbuf.initialized_len(), 16);
+ assert_eq!(rbuf.init_len(), 16);
}
#[test]
-fn initialize_unfilled_to() {
- let mut buf = [MaybeUninit::uninit(); 16];
- let mut rbuf = ReadBuf::uninit(&mut buf);
+fn addvance_filled() {
+ let buf: &mut [_] = &mut [0; 16];
+ let mut rbuf: BorrowedBuf<'_> = buf.into();
- rbuf.initialize_unfilled_to(8);
-
- assert_eq!(rbuf.initialized_len(), 8);
-
- rbuf.initialize_unfilled_to(4);
-
- assert_eq!(rbuf.initialized_len(), 8);
-
- rbuf.set_filled(8);
-
- rbuf.initialize_unfilled_to(6);
-
- assert_eq!(rbuf.initialized_len(), 14);
-
- rbuf.initialize_unfilled_to(8);
-
- assert_eq!(rbuf.initialized_len(), 16);
-}
-
-#[test]
-fn add_filled() {
- let mut buf = [0; 16];
- let mut rbuf = ReadBuf::new(&mut buf);
-
- rbuf.add_filled(1);
-
- assert_eq!(rbuf.filled_len(), 1);
- assert_eq!(rbuf.remaining(), 15);
-}
-
-#[test]
-#[should_panic]
-fn add_filled_panic() {
- let mut buf = [MaybeUninit::uninit(); 16];
- let mut rbuf = ReadBuf::uninit(&mut buf);
-
- rbuf.add_filled(1);
-}
-
-#[test]
-fn set_filled() {
- let mut buf = [0; 16];
- let mut rbuf = ReadBuf::new(&mut buf);
-
- rbuf.set_filled(16);
-
- assert_eq!(rbuf.filled_len(), 16);
- assert_eq!(rbuf.remaining(), 0);
-
- rbuf.set_filled(6);
-
- assert_eq!(rbuf.filled_len(), 6);
- assert_eq!(rbuf.remaining(), 10);
-}
-
-#[test]
-#[should_panic]
-fn set_filled_panic() {
- let mut buf = [MaybeUninit::uninit(); 16];
- let mut rbuf = ReadBuf::uninit(&mut buf);
+ unsafe {
+ rbuf.unfilled().advance(1);
+ }
- rbuf.set_filled(16);
+ assert_eq!(rbuf.filled().len(), 1);
+ assert_eq!(rbuf.unfilled().capacity(), 15);
}
#[test]
fn clear() {
- let mut buf = [255; 16];
- let mut rbuf = ReadBuf::new(&mut buf);
+ let buf: &mut [_] = &mut [255; 16];
+ let mut rbuf: BorrowedBuf<'_> = buf.into();
- rbuf.set_filled(16);
+ unsafe {
+ rbuf.unfilled().advance(16);
+ }
- assert_eq!(rbuf.filled_len(), 16);
- assert_eq!(rbuf.remaining(), 0);
+ assert_eq!(rbuf.filled().len(), 16);
+ assert_eq!(rbuf.unfilled().capacity(), 0);
rbuf.clear();
- assert_eq!(rbuf.filled_len(), 0);
- assert_eq!(rbuf.remaining(), 16);
+ assert_eq!(rbuf.filled().len(), 0);
+ assert_eq!(rbuf.unfilled().capacity(), 16);
- assert_eq!(rbuf.initialized(), [255; 16]);
+ assert_eq!(rbuf.unfilled().init_ref(), [255; 16]);
}
#[test]
-fn assume_init() {
- let mut buf = [MaybeUninit::uninit(); 16];
- let mut rbuf = ReadBuf::uninit(&mut buf);
+fn set_init() {
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 16];
+ let mut rbuf: BorrowedBuf<'_> = buf.into();
unsafe {
- rbuf.assume_init(8);
+ rbuf.set_init(8);
}
- assert_eq!(rbuf.initialized_len(), 8);
+ assert_eq!(rbuf.init_len(), 8);
- rbuf.add_filled(4);
+ unsafe {
+ rbuf.unfilled().advance(4);
+ }
unsafe {
- rbuf.assume_init(2);
+ rbuf.set_init(2);
}
- assert_eq!(rbuf.initialized_len(), 8);
+ assert_eq!(rbuf.init_len(), 8);
unsafe {
- rbuf.assume_init(8);
+ rbuf.set_init(8);
}
- assert_eq!(rbuf.initialized_len(), 12);
+ assert_eq!(rbuf.init_len(), 8);
}
#[test]
fn append() {
- let mut buf = [MaybeUninit::new(255); 16];
- let mut rbuf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::new(255); 16];
+ let mut rbuf: BorrowedBuf<'_> = buf.into();
- rbuf.append(&[0; 8]);
+ rbuf.unfilled().append(&[0; 8]);
- assert_eq!(rbuf.initialized_len(), 8);
- assert_eq!(rbuf.filled_len(), 8);
+ assert_eq!(rbuf.init_len(), 8);
+ assert_eq!(rbuf.filled().len(), 8);
assert_eq!(rbuf.filled(), [0; 8]);
rbuf.clear();
- rbuf.append(&[1; 16]);
+ rbuf.unfilled().append(&[1; 16]);
- assert_eq!(rbuf.initialized_len(), 16);
- assert_eq!(rbuf.filled_len(), 16);
+ assert_eq!(rbuf.init_len(), 16);
+ assert_eq!(rbuf.filled().len(), 16);
assert_eq!(rbuf.filled(), [1; 16]);
}
#[test]
-fn filled_mut() {
- let mut buf = [0; 16];
- let mut rbuf = ReadBuf::new(&mut buf);
+fn reborrow_written() {
+ let buf: &mut [_] = &mut [MaybeUninit::new(0); 32];
+ let mut buf: BorrowedBuf<'_> = buf.into();
+
+ let mut cursor = buf.unfilled();
+ cursor.append(&[1; 16]);
+
+ let mut cursor2 = cursor.reborrow();
+ cursor2.append(&[2; 16]);
+
+ assert_eq!(cursor2.written(), 32);
+ assert_eq!(cursor.written(), 32);
+
+ assert_eq!(buf.unfilled().written(), 0);
+ assert_eq!(buf.init_len(), 32);
+ assert_eq!(buf.filled().len(), 32);
+ let filled = buf.filled();
+ assert_eq!(&filled[..16], [1; 16]);
+ assert_eq!(&filled[16..], [2; 16]);
+}
+
+#[test]
+fn cursor_set_init() {
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 16];
+ let mut rbuf: BorrowedBuf<'_> = buf.into();
+
+ unsafe {
+ rbuf.unfilled().set_init(8);
+ }
- rbuf.add_filled(8);
+ assert_eq!(rbuf.init_len(), 8);
+ assert_eq!(rbuf.unfilled().init_ref().len(), 8);
+ assert_eq!(rbuf.unfilled().init_mut().len(), 8);
+ assert_eq!(rbuf.unfilled().uninit_mut().len(), 8);
+ assert_eq!(unsafe { rbuf.unfilled().as_mut() }.len(), 16);
+
+ unsafe {
+ rbuf.unfilled().advance(4);
+ }
- let filled = rbuf.filled().to_vec();
+ unsafe {
+ rbuf.unfilled().set_init(2);
+ }
+
+ assert_eq!(rbuf.init_len(), 8);
+
+ unsafe {
+ rbuf.unfilled().set_init(8);
+ }
- assert_eq!(&*filled, &*rbuf.filled_mut());
+ assert_eq!(rbuf.init_len(), 12);
+ assert_eq!(rbuf.unfilled().init_ref().len(), 8);
+ assert_eq!(rbuf.unfilled().init_mut().len(), 8);
+ assert_eq!(rbuf.unfilled().uninit_mut().len(), 4);
+ assert_eq!(unsafe { rbuf.unfilled().as_mut() }.len(), 12);
}
diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs
index 4d3736f79..1141a957d 100644
--- a/library/std/src/io/stdio.rs
+++ b/library/std/src/io/stdio.rs
@@ -7,8 +7,8 @@ use crate::io::prelude::*;
use crate::cell::{Cell, RefCell};
use crate::fmt;
+use crate::fs::File;
use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines};
-use crate::pin::Pin;
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::{Arc, Mutex, MutexGuard, OnceLock};
use crate::sys::stdio;
@@ -526,7 +526,7 @@ pub struct Stdout {
// FIXME: this should be LineWriter or BufWriter depending on the state of
// stdout (tty or not). Note that if this is not line buffered it
// should also flush-on-panic or some form of flush-on-abort.
- inner: Pin<&'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>>,
+ inner: &'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>,
}
/// A locked reference to the [`Stdout`] handle.
@@ -603,22 +603,27 @@ static STDOUT: OnceLock<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = OnceLo
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdout() -> Stdout {
Stdout {
- inner: Pin::static_ref(&STDOUT).get_or_init_pin(
- || unsafe { ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw()))) },
- |mutex| unsafe { mutex.init() },
- ),
+ inner: STDOUT
+ .get_or_init(|| ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw())))),
}
}
+// Flush the data and disable buffering during shutdown
+// by replacing the line writer by one with zero
+// buffering capacity.
pub fn cleanup() {
- if let Some(instance) = STDOUT.get() {
- // Flush the data and disable buffering during shutdown
- // by replacing the line writer by one with zero
- // buffering capacity.
+ let mut initialized = false;
+ let stdout = STDOUT.get_or_init(|| {
+ initialized = true;
+ ReentrantMutex::new(RefCell::new(LineWriter::with_capacity(0, stdout_raw())))
+ });
+
+ if !initialized {
+ // The buffer was previously initialized, overwrite it here.
// We use try_lock() instead of lock(), because someone
// might have leaked a StdoutLock, which would
// otherwise cause a deadlock here.
- if let Some(lock) = Pin::static_ref(instance).try_lock() {
+ if let Some(lock) = stdout.try_lock() {
*lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
}
}
@@ -761,7 +766,7 @@ impl fmt::Debug for StdoutLock<'_> {
/// standard library or via raw Windows API calls, will fail.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stderr {
- inner: Pin<&'static ReentrantMutex<RefCell<StderrRaw>>>,
+ inner: &'static ReentrantMutex<RefCell<StderrRaw>>,
}
/// A locked reference to the [`Stderr`] handle.
@@ -834,16 +839,12 @@ pub struct StderrLock<'a> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
// Note that unlike `stdout()` we don't use `at_exit` here to register a
- // destructor. Stderr is not buffered , so there's no need to run a
+ // destructor. Stderr is not buffered, so there's no need to run a
// destructor for flushing the buffer
- static INSTANCE: OnceLock<ReentrantMutex<RefCell<StderrRaw>>> = OnceLock::new();
+ static INSTANCE: ReentrantMutex<RefCell<StderrRaw>> =
+ ReentrantMutex::new(RefCell::new(stderr_raw()));
- Stderr {
- inner: Pin::static_ref(&INSTANCE).get_or_init_pin(
- || unsafe { ReentrantMutex::new(RefCell::new(stderr_raw())) },
- |mutex| unsafe { mutex.init() },
- ),
- }
+ Stderr { inner: &INSTANCE }
}
impl Stderr {
@@ -986,17 +987,31 @@ pub fn set_output_capture(sink: Option<LocalStream>) -> Option<LocalStream> {
/// otherwise. `label` identifies the stream in a panic message.
///
/// This function is used to print error messages, so it takes extra
-/// care to avoid causing a panic when `local_s` is unusable.
-/// For instance, if the TLS key for the local stream is
-/// already destroyed, or if the local stream is locked by another
-/// thread, it will just fall back to the global stream.
+/// care to avoid causing a panic when `OUTPUT_CAPTURE` is unusable.
+/// For instance, if the TLS key for output capturing is already destroyed, or
+/// if the local stream is in use by another thread, it will just fall back to
+/// the global stream.
///
/// However, if the actual I/O causes an error, this function does panic.
+///
+/// Writing to non-blocking stdout/stderr can cause an error, which will lead
+/// this function to panic.
fn print_to<T>(args: fmt::Arguments<'_>, global_s: fn() -> T, label: &str)
where
T: Write,
{
- if OUTPUT_CAPTURE_USED.load(Ordering::Relaxed)
+ if print_to_buffer_if_capture_used(args) {
+ // Successfully wrote to capture buffer.
+ return;
+ }
+
+ if let Err(e) = global_s().write_fmt(args) {
+ panic!("failed printing to {label}: {e}");
+ }
+}
+
+fn print_to_buffer_if_capture_used(args: fmt::Arguments<'_>) -> bool {
+ OUTPUT_CAPTURE_USED.load(Ordering::Relaxed)
&& OUTPUT_CAPTURE.try_with(|s| {
// Note that we completely remove a local sink to write to in case
// our printing recursively panics/prints, so the recursive
@@ -1006,16 +1021,49 @@ where
s.set(Some(w));
})
}) == Ok(Some(()))
- {
- // Successfully wrote to capture buffer.
+}
+
+/// Used by impl Termination for Result to print error after `main` or a test
+/// has returned. Should avoid panicking, although we can't help it if one of
+/// the Display impls inside args decides to.
+pub(crate) fn attempt_print_to_stderr(args: fmt::Arguments<'_>) {
+ if print_to_buffer_if_capture_used(args) {
return;
}
- if let Err(e) = global_s().write_fmt(args) {
- panic!("failed printing to {label}: {e}");
- }
+ // Ignore error if the write fails, for example because stderr is already
+ // closed. There is not much point panicking at this point.
+ let _ = stderr().write_fmt(args);
+}
+
+/// Trait to determine if a descriptor/handle refers to a terminal/tty.
+#[unstable(feature = "is_terminal", issue = "98070")]
+pub trait IsTerminal: crate::sealed::Sealed {
+ /// Returns `true` if the descriptor/handle refers to a terminal/tty.
+ ///
+ /// On platforms where Rust does not know how to detect a terminal yet, this will return
+ /// `false`. This will also return `false` if an unexpected error occurred, such as from
+ /// passing an invalid file descriptor.
+ fn is_terminal(&self) -> bool;
+}
+
+macro_rules! impl_is_terminal {
+ ($($t:ty),*$(,)?) => {$(
+ #[unstable(feature = "sealed", issue = "none")]
+ impl crate::sealed::Sealed for $t {}
+
+ #[unstable(feature = "is_terminal", issue = "98070")]
+ impl IsTerminal for $t {
+ #[inline]
+ fn is_terminal(&self) -> bool {
+ crate::sys::io::is_terminal(self)
+ }
+ }
+ )*}
}
+impl_is_terminal!(File, Stdin, StdinLock<'_>, Stdout, StdoutLock<'_>, Stderr, StderrLock<'_>);
+
#[unstable(
feature = "print_internals",
reason = "implementation detail which may disappear or be replaced at any time",
diff --git a/library/std/src/io/tests.rs b/library/std/src/io/tests.rs
index f357f33ec..f4a886d88 100644
--- a/library/std/src/io/tests.rs
+++ b/library/std/src/io/tests.rs
@@ -1,4 +1,4 @@
-use super::{repeat, Cursor, ReadBuf, SeekFrom};
+use super::{repeat, BorrowedBuf, Cursor, SeekFrom};
use crate::cmp::{self, min};
use crate::io::{self, IoSlice, IoSliceMut};
use crate::io::{BufRead, BufReader, Read, Seek, Write};
@@ -94,7 +94,7 @@ fn read_to_end() {
assert_eq!(c.read_to_end(&mut v).unwrap(), 1);
assert_eq!(v, b"1");
- let cap = 1024 * 1024;
+ let cap = if cfg!(miri) { 1024 } else { 1024 * 1024 };
let data = (0..cap).map(|i| (i / 3) as u8).collect::<Vec<_>>();
let mut v = Vec::new();
let (a, b) = data.split_at(data.len() / 2);
@@ -159,24 +159,24 @@ fn read_exact_slice() {
#[test]
fn read_buf_exact() {
- let mut buf = [0; 4];
- let mut buf = ReadBuf::new(&mut buf);
+ let buf: &mut [_] = &mut [0; 4];
+ let mut buf: BorrowedBuf<'_> = buf.into();
let mut c = Cursor::new(&b""[..]);
- assert_eq!(c.read_buf_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+ assert_eq!(c.read_buf_exact(buf.unfilled()).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
let mut c = Cursor::new(&b"123456789"[..]);
- c.read_buf_exact(&mut buf).unwrap();
+ c.read_buf_exact(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), b"1234");
buf.clear();
- c.read_buf_exact(&mut buf).unwrap();
+ c.read_buf_exact(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), b"5678");
buf.clear();
- assert_eq!(c.read_buf_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+ assert_eq!(c.read_buf_exact(buf.unfilled()).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
}
#[test]
@@ -309,6 +309,7 @@ fn chain_zero_length_read_is_not_eof() {
#[bench]
#[cfg_attr(target_os = "emscripten", ignore)]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
fn bench_read_to_end(b: &mut test::Bencher) {
b.iter(|| {
let mut lr = repeat(1).take(10000000);
@@ -614,10 +615,10 @@ fn bench_take_read(b: &mut test::Bencher) {
#[bench]
fn bench_take_read_buf(b: &mut test::Bencher) {
b.iter(|| {
- let mut buf = [MaybeUninit::uninit(); 64];
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 64];
- let mut rbuf = ReadBuf::uninit(&mut buf);
+ let mut buf: BorrowedBuf<'_> = buf.into();
- [255; 128].take(64).read_buf(&mut rbuf).unwrap();
+ [255; 128].take(64).read_buf(buf.unfilled()).unwrap();
});
}
diff --git a/library/std/src/io/util.rs b/library/std/src/io/util.rs
index c1300cd67..f076ee092 100644
--- a/library/std/src/io/util.rs
+++ b/library/std/src/io/util.rs
@@ -5,7 +5,7 @@ mod tests;
use crate::fmt;
use crate::io::{
- self, BufRead, IoSlice, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, SizeHint, Write,
+ self, BorrowedCursor, BufRead, IoSlice, IoSliceMut, Read, Seek, SeekFrom, SizeHint, Write,
};
/// A reader which is always at EOF.
@@ -47,7 +47,7 @@ impl Read for Empty {
}
#[inline]
- fn read_buf(&mut self, _buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ fn read_buf(&mut self, _cursor: BorrowedCursor<'_>) -> io::Result<()> {
Ok(())
}
}
@@ -130,21 +130,19 @@ impl Read for Repeat {
Ok(buf.len())
}
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ fn read_buf(&mut self, mut buf: BorrowedCursor<'_>) -> io::Result<()> {
// SAFETY: No uninit bytes are being written
- for slot in unsafe { buf.unfilled_mut() } {
+ for slot in unsafe { buf.as_mut() } {
slot.write(self.byte);
}
- let remaining = buf.remaining();
+ let remaining = buf.capacity();
// SAFETY: the entire unfilled portion of buf has been initialized
unsafe {
- buf.assume_init(remaining);
+ buf.advance(remaining);
}
- buf.add_filled(remaining);
-
Ok(())
}
diff --git a/library/std/src/io/util/tests.rs b/library/std/src/io/util/tests.rs
index 08972a59a..ce5e2c9da 100644
--- a/library/std/src/io/util/tests.rs
+++ b/library/std/src/io/util/tests.rs
@@ -1,7 +1,7 @@
use crate::cmp::{max, min};
use crate::io::prelude::*;
use crate::io::{
- copy, empty, repeat, sink, BufWriter, Empty, ReadBuf, Repeat, Result, SeekFrom, Sink,
+ copy, empty, repeat, sink, BorrowedBuf, BufWriter, Empty, Repeat, Result, SeekFrom, Sink,
DEFAULT_BUF_SIZE,
};
@@ -79,29 +79,29 @@ fn empty_reads() {
assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0);
assert_eq!(e.by_ref().read(&mut [0; 1024]).unwrap(), 0);
- let mut buf = [];
- let mut buf = ReadBuf::uninit(&mut buf);
- e.read_buf(&mut buf).unwrap();
- assert_eq!(buf.filled_len(), 0);
- assert_eq!(buf.initialized_len(), 0);
-
- let mut buf = [MaybeUninit::uninit()];
- let mut buf = ReadBuf::uninit(&mut buf);
- e.read_buf(&mut buf).unwrap();
- assert_eq!(buf.filled_len(), 0);
- assert_eq!(buf.initialized_len(), 0);
-
- let mut buf = [MaybeUninit::uninit(); 1024];
- let mut buf = ReadBuf::uninit(&mut buf);
- e.read_buf(&mut buf).unwrap();
- assert_eq!(buf.filled_len(), 0);
- assert_eq!(buf.initialized_len(), 0);
-
- let mut buf = [MaybeUninit::uninit(); 1024];
- let mut buf = ReadBuf::uninit(&mut buf);
- e.by_ref().read_buf(&mut buf).unwrap();
- assert_eq!(buf.filled_len(), 0);
- assert_eq!(buf.initialized_len(), 0);
+ let buf: &mut [MaybeUninit<_>] = &mut [];
+ let mut buf: BorrowedBuf<'_> = buf.into();
+ e.read_buf(buf.unfilled()).unwrap();
+ assert_eq!(buf.len(), 0);
+ assert_eq!(buf.init_len(), 0);
+
+ let buf: &mut [_] = &mut [MaybeUninit::uninit()];
+ let mut buf: BorrowedBuf<'_> = buf.into();
+ e.read_buf(buf.unfilled()).unwrap();
+ assert_eq!(buf.len(), 0);
+ assert_eq!(buf.init_len(), 0);
+
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1024];
+ let mut buf: BorrowedBuf<'_> = buf.into();
+ e.read_buf(buf.unfilled()).unwrap();
+ assert_eq!(buf.len(), 0);
+ assert_eq!(buf.init_len(), 0);
+
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1024];
+ let mut buf: BorrowedBuf<'_> = buf.into();
+ e.by_ref().read_buf(buf.unfilled()).unwrap();
+ assert_eq!(buf.len(), 0);
+ assert_eq!(buf.init_len(), 0);
}
#[test]
diff --git a/library/std/src/keyword_docs.rs b/library/std/src/keyword_docs.rs
index 7157b5af0..e35145c4a 100644
--- a/library/std/src/keyword_docs.rs
+++ b/library/std/src/keyword_docs.rs
@@ -1867,11 +1867,15 @@ mod type_keyword {}
/// Code or interfaces whose [memory safety] cannot be verified by the type
/// system.
///
-/// The `unsafe` keyword has two uses: to declare the existence of contracts the
-/// compiler can't check (`unsafe fn` and `unsafe trait`), and to declare that a
-/// programmer has checked that these contracts have been upheld (`unsafe {}`
-/// and `unsafe impl`, but also `unsafe fn` -- see below). They are not mutually
-/// exclusive, as can be seen in `unsafe fn`.
+/// The `unsafe` keyword has two uses:
+/// - to declare the existence of contracts the compiler can't check (`unsafe fn` and `unsafe
+/// trait`),
+/// - and to declare that a programmer has checked that these contracts have been upheld (`unsafe
+/// {}` and `unsafe impl`, but also `unsafe fn` -- see below).
+///
+/// They are not mutually exclusive, as can be seen in `unsafe fn`: the body of an `unsafe fn` is,
+/// by default, treated like an unsafe block. The `unsafe_op_in_unsafe_fn` lint can be enabled to
+/// change that.
///
/// # Unsafe abilities
///
@@ -1914,14 +1918,14 @@ mod type_keyword {}
/// - `unsafe impl`: the contract necessary to implement the trait has been
/// checked by the programmer and is guaranteed to be respected.
///
-/// `unsafe fn` also acts like an `unsafe {}` block
+/// By default, `unsafe fn` also acts like an `unsafe {}` block
/// around the code inside the function. This means it is not just a signal to
/// the caller, but also promises that the preconditions for the operations
-/// inside the function are upheld. Mixing these two meanings can be confusing
-/// and [proposal]s exist to use `unsafe {}` blocks inside such functions when
-/// making `unsafe` operations.
+/// inside the function are upheld. Mixing these two meanings can be confusing, so the
+/// `unsafe_op_in_unsafe_fn` lint can be enabled to warn against that and require explicit unsafe
+/// blocks even inside `unsafe fn`.
///
-/// See the [Rustnomicon] and the [Reference] for more informations.
+/// See the [Rustnomicon] and the [Reference] for more information.
///
/// # Examples
///
@@ -1987,13 +1991,16 @@ mod type_keyword {}
///
/// ```rust
/// # #![allow(dead_code)]
+/// #![deny(unsafe_op_in_unsafe_fn)]
+///
/// /// Dereference the given pointer.
/// ///
/// /// # Safety
/// ///
/// /// `ptr` must be aligned and must not be dangling.
/// unsafe fn deref_unchecked(ptr: *const i32) -> i32 {
-/// *ptr
+/// // SAFETY: the caller is required to ensure that `ptr` is aligned and dereferenceable.
+/// unsafe { *ptr }
/// }
///
/// let a = 3;
@@ -2003,35 +2010,118 @@ mod type_keyword {}
/// unsafe { assert_eq!(*b, deref_unchecked(b)); };
/// ```
///
-/// Traits marked as `unsafe` must be [`impl`]emented using `unsafe impl`. This
-/// makes a guarantee to other `unsafe` code that the implementation satisfies
-/// the trait's safety contract. The [Send] and [Sync] traits are examples of
-/// this behaviour in the standard library.
+/// ## `unsafe` and traits
+///
+/// The interactions of `unsafe` and traits can be surprising, so let us contrast the
+/// two combinations of safe `fn` in `unsafe trait` and `unsafe fn` in safe trait using two
+/// examples:
+///
+/// ```rust
+/// /// # Safety
+/// ///
+/// /// `make_even` must return an even number.
+/// unsafe trait MakeEven {
+/// fn make_even(&self) -> i32;
+/// }
+///
+/// // SAFETY: Our `make_even` always returns something even.
+/// unsafe impl MakeEven for i32 {
+/// fn make_even(&self) -> i32 {
+/// self << 1
+/// }
+/// }
+///
+/// fn use_make_even(x: impl MakeEven) {
+/// if x.make_even() % 2 == 1 {
+/// // SAFETY: this can never happen, because all `MakeEven` implementations
+/// // ensure that `make_even` returns something even.
+/// unsafe { std::hint::unreachable_unchecked() };
+/// }
+/// }
+/// ```
+///
+/// Note how the safety contract of the trait is upheld by the implementation, and is itself used to
+/// uphold the safety contract of the unsafe function `unreachable_unchecked` called by
+/// `use_make_even`. `make_even` itself is a safe function because its *callers* do not have to
+/// worry about any contract, only the *implementation* of `MakeEven` is required to uphold a
+/// certain contract. `use_make_even` is safe because it can use the promise made by `MakeEven`
+/// implementations to uphold the safety contract of the `unsafe fn unreachable_unchecked` it calls.
+///
+/// It is also possible to have `unsafe fn` in a regular safe `trait`:
///
/// ```rust
-/// /// Implementors of this trait must guarantee an element is always
-/// /// accessible with index 3.
-/// unsafe trait ThreeIndexable<T> {
-/// /// Returns a reference to the element with index 3 in `&self`.
-/// fn three(&self) -> &T;
+/// # #![feature(never_type)]
+/// #![deny(unsafe_op_in_unsafe_fn)]
+///
+/// trait Indexable {
+/// const LEN: usize;
+///
+/// /// # Safety
+/// ///
+/// /// The caller must ensure that `idx < LEN`.
+/// unsafe fn idx_unchecked(&self, idx: usize) -> i32;
/// }
///
-/// // The implementation of `ThreeIndexable` for `[T; 4]` is `unsafe`
-/// // because the implementor must abide by a contract the compiler cannot
-/// // check but as a programmer we know there will always be a valid element
-/// // at index 3 to access.
-/// unsafe impl<T> ThreeIndexable<T> for [T; 4] {
-/// fn three(&self) -> &T {
-/// // SAFETY: implementing the trait means there always is an element
-/// // with index 3 accessible.
-/// unsafe { self.get_unchecked(3) }
+/// // The implementation for `i32` doesn't need to do any contract reasoning.
+/// impl Indexable for i32 {
+/// const LEN: usize = 1;
+///
+/// unsafe fn idx_unchecked(&self, idx: usize) -> i32 {
+/// debug_assert_eq!(idx, 0);
+/// *self
/// }
/// }
///
-/// let a = [1, 2, 4, 8];
-/// assert_eq!(a.three(), &8);
+/// // The implementation for arrays exploits the function contract to
+/// // make use of `get_unchecked` on slices and avoid a run-time check.
+/// impl Indexable for [i32; 42] {
+/// const LEN: usize = 42;
+///
+/// unsafe fn idx_unchecked(&self, idx: usize) -> i32 {
+/// // SAFETY: As per this trait's documentation, the caller ensures
+/// // that `idx < 42`.
+/// unsafe { *self.get_unchecked(idx) }
+/// }
+/// }
+///
+/// // The implementation for the never type declares a length of 0,
+/// // which means `idx_unchecked` can never be called.
+/// impl Indexable for ! {
+/// const LEN: usize = 0;
+///
+/// unsafe fn idx_unchecked(&self, idx: usize) -> i32 {
+/// // SAFETY: As per this trait's documentation, the caller ensures
+/// // that `idx < 0`, which is impossible, so this is dead code.
+/// unsafe { std::hint::unreachable_unchecked() }
+/// }
+/// }
+///
+/// fn use_indexable<I: Indexable>(x: I, idx: usize) -> i32 {
+/// if idx < I::LEN {
+/// // SAFETY: We have checked that `idx < I::LEN`.
+/// unsafe { x.idx_unchecked(idx) }
+/// } else {
+/// panic!("index out-of-bounds")
+/// }
+/// }
/// ```
///
+/// This time, `use_indexable` is safe because it uses a run-time check to discharge the safety
+/// contract of `idx_unchecked`. Implementing `Indexable` is safe because when writing
+/// `idx_unchecked`, we don't have to worry: our *callers* need to discharge a proof obligation
+/// (like `use_indexable` does), but the *implementation* of `get_unchecked` has no proof obligation
+/// to contend with. Of course, the implementation of `Indexable` may choose to call other unsafe
+/// operations, and then it needs an `unsafe` *block* to indicate it discharged the proof
+/// obligations of its callees. (We enabled `unsafe_op_in_unsafe_fn`, so the body of `idx_unchecked`
+/// is not implicitly an unsafe block.) For that purpose it can make use of the contract that all
+/// its callers must uphold -- the fact that `idx < LEN`.
+///
+/// Formally speaking, an `unsafe fn` in a trait is a function with *preconditions* that go beyond
+/// those encoded by the argument types (such as `idx < LEN`), whereas an `unsafe trait` can declare
+/// that some of its functions have *postconditions* that go beyond those encoded in the return type
+/// (such as returning an even integer). If a trait needs a function with both extra precondition
+/// and extra postcondition, then it needs an `unsafe fn` in an `unsafe trait`.
+///
/// [`extern`]: keyword.extern.html
/// [`trait`]: keyword.trait.html
/// [`static`]: keyword.static.html
@@ -2043,7 +2133,6 @@ mod type_keyword {}
/// [nomicon-soundness]: ../nomicon/safe-unsafe-meaning.html
/// [soundness]: https://rust-lang.github.io/unsafe-code-guidelines/glossary.html#soundness-of-code--of-a-library
/// [Reference]: ../reference/unsafety.html
-/// [proposal]: https://github.com/rust-lang/rfcs/pull/2585
/// [discussion on Rust Internals]: https://internals.rust-lang.org/t/what-does-unsafe-mean/6696
mod unsafe_keyword {}
@@ -2113,7 +2202,7 @@ mod use_keyword {}
/// Add constraints that must be upheld to use an item.
///
/// `where` allows specifying constraints on lifetime and generic parameters.
-/// The [RFC] introducing `where` contains detailed informations about the
+/// The [RFC] introducing `where` contains detailed information about the
/// keyword.
///
/// # Examples
@@ -2355,7 +2444,7 @@ mod dyn_keyword {}
/// println!("f = {f} and i = {i}");
/// ```
///
-/// See the [Reference][union] for more informations on `union`s.
+/// See the [Reference][union] for more information on `union`s.
///
/// [`struct`]: keyword.struct.html
/// [union]: ../reference/items/unions.html
diff --git a/library/std/src/lazy.rs b/library/std/src/lazy.rs
deleted file mode 100644
index f8c06c3f9..000000000
--- a/library/std/src/lazy.rs
+++ /dev/null
@@ -1 +0,0 @@
-//! Lazy values and one-time initialization of static data.
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 20d25a608..385585dad 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -145,8 +145,8 @@
//! abstracting over differences in common platforms, most notably Windows and
//! Unix derivatives.
//!
-//! Common types of I/O, including [files], [TCP], [UDP], are defined in the
-//! [`io`], [`fs`], and [`net`] modules.
+//! Common types of I/O, including [files], [TCP], and [UDP], are defined in
+//! the [`io`], [`fs`], and [`net`] modules.
//!
//! The [`thread`] module contains Rust's threading abstractions. [`sync`]
//! contains further primitive shared memory types, including [`atomic`] and
@@ -187,6 +187,7 @@
//! [rust-discord]: https://discord.gg/rust-lang
//! [array]: prim@array
//! [slice]: prim@slice
+
#![cfg_attr(not(feature = "restricted-std"), stable(feature = "rust1", since = "1.0.0"))]
#![cfg_attr(feature = "restricted-std", unstable(feature = "restricted_std", issue = "none"))]
#![doc(
@@ -201,25 +202,35 @@
no_global_oom_handling,
not(no_global_oom_handling)
))]
+// To run libstd tests without x.py without ending up with two copies of libstd, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
+// miri-test-libstd also prefers to make std use the sysroot versions of the dependencies.
+#![cfg_attr(feature = "miri-test-libstd", feature(rustc_private))]
// Don't link to std. We are std.
#![no_std]
+// Tell the compiler to link to either panic_abort or panic_unwind
+#![needs_panic_runtime]
+//
+// Lints:
#![warn(deprecated_in_future)]
#![warn(missing_docs)]
#![warn(missing_debug_implementations)]
#![allow(explicit_outlives_requirements)]
#![allow(unused_lifetimes)]
-// Tell the compiler to link to either panic_abort or panic_unwind
-#![needs_panic_runtime]
+#![deny(rustc::existing_doc_keyword)]
// Ensure that std can be linked against panic_abort despite compiled with `-C panic=unwind`
-#![cfg_attr(not(bootstrap), deny(ffi_unwind_calls))]
+#![deny(ffi_unwind_calls)]
// std may use features in a platform-specific way
#![allow(unused_features)]
+//
+// Features:
#![cfg_attr(test, feature(internal_output_capture, print_internals, update_panic_count, rt))]
#![cfg_attr(
all(target_vendor = "fortanix", target_env = "sgx"),
feature(slice_index_methods, coerce_unsized, sgx_platform)
)]
-#![deny(rustc::existing_doc_keyword)]
//
// Language features:
#![feature(alloc_error_handler)]
@@ -240,12 +251,13 @@
#![feature(doc_notable_trait)]
#![feature(dropck_eyepatch)]
#![feature(exhaustive_patterns)]
+#![feature(if_let_guard)]
#![feature(intra_doc_pointers)]
-#![feature(label_break_value)]
+#![feature(is_terminal)]
#![feature(lang_items)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(linkage)]
+#![feature(link_cfg)]
#![feature(min_specialization)]
#![feature(must_not_suspend)]
#![feature(needs_panic_runtime)]
@@ -258,6 +270,7 @@
#![feature(staged_api)]
#![feature(thread_local)]
#![feature(try_blocks)]
+#![feature(utf8_chunks)]
//
// Library features (core):
#![feature(array_error_internals)]
@@ -267,23 +280,27 @@
#![feature(core_intrinsics)]
#![feature(cstr_from_bytes_until_nul)]
#![feature(cstr_internals)]
-#![feature(duration_checked_float)]
#![feature(duration_constants)]
+#![feature(error_generic_member_access)]
+#![feature(error_in_core)]
+#![feature(error_iter)]
#![feature(exact_size_is_empty)]
#![feature(exclusive_wrapper)]
#![feature(extend_one)]
#![feature(float_minimum_maximum)]
+#![feature(float_next_up_down)]
#![feature(hasher_prefixfree_extras)]
#![feature(hashmap_internals)]
#![feature(int_error_internals)]
-#![feature(is_some_with)]
+#![feature(is_some_and)]
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_write_slice)]
-#![feature(mixed_integer_ops)]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(panic_can_unwind)]
#![feature(panic_info_message)]
#![feature(panic_internals)]
+#![feature(pointer_byte_offsets)]
+#![feature(pointer_is_aligned)]
#![feature(portable_simd)]
#![feature(prelude_2024)]
#![feature(provide_any)]
@@ -294,6 +311,9 @@
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
+#![feature(maybe_uninit_uninit_array)]
+#![feature(const_maybe_uninit_uninit_array)]
+#![feature(const_waker)]
//
// Library features (alloc):
#![feature(alloc_layout_extra)]
@@ -329,9 +349,9 @@
#![feature(trace_macros)]
//
// Only used in tests/benchmarks:
-#![feature(bench_black_box)]
//
// Only for const-ness:
+#![feature(const_collections_with_hasher)]
#![feature(const_io_structs)]
#![feature(const_ip)]
#![feature(const_ipv4)]
@@ -509,9 +529,6 @@ pub mod process;
pub mod sync;
pub mod time;
-#[unstable(feature = "once_cell", issue = "74465")]
-pub mod lazy;
-
// Pull in `std_float` crate into libstd. The contents of
// `std_float` are in a different repository: rust-lang/portable-simd.
#[path = "../../portable-simd/crates/std_float/src/lib.rs"]
@@ -576,6 +593,7 @@ pub mod alloc;
// Private support modules
mod panicking;
+mod personality;
#[path = "../../backtrace/src/lib.rs"]
#[allow(dead_code, unused_attributes)]
diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs
index 0cb21ef53..6e4ba1404 100644
--- a/library/std/src/macros.rs
+++ b/library/std/src/macros.rs
@@ -27,17 +27,31 @@ macro_rules! panic {
/// necessary to use [`io::stdout().flush()`][flush] to ensure the output is emitted
/// immediately.
///
+/// The `print!` macro will lock the standard output on each call. If you call
+/// `print!` within a hot loop, this behavior may be the bottleneck of the loop.
+/// To avoid this, lock stdout with [`io::stdout().lock()`][lock]:
+/// ```
+/// use std::io::{stdout, Write};
+///
+/// let mut lock = stdout().lock();
+/// write!(lock, "hello world").unwrap();
+/// ```
+///
/// Use `print!` only for the primary output of your program. Use
/// [`eprint!`] instead to print error and progress messages.
///
/// [flush]: crate::io::Write::flush
/// [`println!`]: crate::println
/// [`eprint!`]: crate::eprint
+/// [lock]: crate::io::Stdout
///
/// # Panics
///
/// Panics if writing to `io::stdout()` fails.
///
+/// Writing to non-blocking stdout can cause an error, which will lead
+/// this macro to panic.
+///
/// # Examples
///
/// ```
@@ -75,16 +89,30 @@ macro_rules! print {
/// This macro uses the same syntax as [`format!`], but writes to the standard output instead.
/// See [`std::fmt`] for more information.
///
+/// The `println!` macro will lock the standard output on each call. If you call
+/// `println!` within a hot loop, this behavior may be the bottleneck of the loop.
+/// To avoid this, lock stdout with [`io::stdout().lock()`][lock]:
+/// ```
+/// use std::io::{stdout, Write};
+///
+/// let mut lock = stdout().lock();
+/// writeln!(lock, "hello world").unwrap();
+/// ```
+///
/// Use `println!` only for the primary output of your program. Use
/// [`eprintln!`] instead to print error and progress messages.
///
/// [`std::fmt`]: crate::fmt
/// [`eprintln!`]: crate::eprintln
+/// [lock]: crate::io::Stdout
///
/// # Panics
///
/// Panics if writing to [`io::stdout`] fails.
///
+/// Writing to non-blocking stdout can cause an error, which will lead
+/// this macro to panic.
+///
/// [`io::stdout`]: crate::io::stdout
///
/// # Examples
@@ -93,6 +121,8 @@ macro_rules! print {
/// println!(); // prints just a newline
/// println!("hello there!");
/// println!("format {} arguments", "some");
+/// let local_variable = "some";
+/// println!("format {local_variable} arguments");
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -123,6 +153,9 @@ macro_rules! println {
///
/// Panics if writing to `io::stderr` fails.
///
+/// Writing to non-blocking stdout can cause an error, which will lead
+/// this macro to panic.
+///
/// # Examples
///
/// ```
@@ -155,6 +188,9 @@ macro_rules! eprint {
///
/// Panics if writing to `io::stderr` fails.
///
+/// Writing to non-blocking stdout can cause an error, which will lead
+/// this macro to panic.
+///
/// # Examples
///
/// ```
diff --git a/library/std/src/net/addr.rs b/library/std/src/net/addr.rs
deleted file mode 100644
index 53fee952a..000000000
--- a/library/std/src/net/addr.rs
+++ /dev/null
@@ -1,988 +0,0 @@
-#[cfg(all(test, not(target_os = "emscripten")))]
-mod tests;
-
-use crate::cmp::Ordering;
-use crate::fmt;
-use crate::hash;
-use crate::io::{self, Write};
-use crate::iter;
-use crate::mem;
-use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr};
-use crate::option;
-use crate::slice;
-use crate::sys::net::netc as c;
-use crate::sys_common::net::LookupHost;
-use crate::sys_common::{FromInner, IntoInner};
-use crate::vec;
-
-/// An internet socket address, either IPv4 or IPv6.
-///
-/// Internet socket addresses consist of an [IP address], a 16-bit port number, as well
-/// as possibly some version-dependent additional information. See [`SocketAddrV4`]'s and
-/// [`SocketAddrV6`]'s respective documentation for more details.
-///
-/// The size of a `SocketAddr` instance may vary depending on the target operating
-/// system.
-///
-/// [IP address]: IpAddr
-///
-/// # Examples
-///
-/// ```
-/// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
-///
-/// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
-///
-/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket));
-/// assert_eq!(socket.port(), 8080);
-/// assert_eq!(socket.is_ipv4(), true);
-/// ```
-#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub enum SocketAddr {
- /// An IPv4 socket address.
- #[stable(feature = "rust1", since = "1.0.0")]
- V4(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV4),
- /// An IPv6 socket address.
- #[stable(feature = "rust1", since = "1.0.0")]
- V6(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV6),
-}
-
-/// An IPv4 socket address.
-///
-/// IPv4 socket addresses consist of an [`IPv4` address] and a 16-bit port number, as
-/// stated in [IETF RFC 793].
-///
-/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses.
-///
-/// The size of a `SocketAddrV4` struct may vary depending on the target operating
-/// system. Do not assume that this type has the same memory layout as the underlying
-/// system representation.
-///
-/// [IETF RFC 793]: https://tools.ietf.org/html/rfc793
-/// [`IPv4` address]: Ipv4Addr
-///
-/// # Examples
-///
-/// ```
-/// use std::net::{Ipv4Addr, SocketAddrV4};
-///
-/// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
-///
-/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket));
-/// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1));
-/// assert_eq!(socket.port(), 8080);
-/// ```
-#[derive(Copy, Clone, Eq, PartialEq)]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct SocketAddrV4 {
- ip: Ipv4Addr,
- port: u16,
-}
-
-/// An IPv6 socket address.
-///
-/// IPv6 socket addresses consist of an [`IPv6` address], a 16-bit port number, as well
-/// as fields containing the traffic class, the flow label, and a scope identifier
-/// (see [IETF RFC 2553, Section 3.3] for more details).
-///
-/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses.
-///
-/// The size of a `SocketAddrV6` struct may vary depending on the target operating
-/// system. Do not assume that this type has the same memory layout as the underlying
-/// system representation.
-///
-/// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
-/// [`IPv6` address]: Ipv6Addr
-///
-/// # Examples
-///
-/// ```
-/// use std::net::{Ipv6Addr, SocketAddrV6};
-///
-/// let socket = SocketAddrV6::new(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
-///
-/// assert_eq!("[2001:db8::1]:8080".parse(), Ok(socket));
-/// assert_eq!(socket.ip(), &Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1));
-/// assert_eq!(socket.port(), 8080);
-/// ```
-#[derive(Copy, Clone, Eq, PartialEq)]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct SocketAddrV6 {
- ip: Ipv6Addr,
- port: u16,
- flowinfo: u32,
- scope_id: u32,
-}
-
-impl SocketAddr {
- /// Creates a new socket address from an [IP address] and a port number.
- ///
- /// [IP address]: IpAddr
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
- ///
- /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
- /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
- /// assert_eq!(socket.port(), 8080);
- /// ```
- #[stable(feature = "ip_addr", since = "1.7.0")]
- #[must_use]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn new(ip: IpAddr, port: u16) -> SocketAddr {
- match ip {
- IpAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(a, port)),
- IpAddr::V6(a) => SocketAddr::V6(SocketAddrV6::new(a, port, 0, 0)),
- }
- }
-
- /// Returns the IP address associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
- ///
- /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
- /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
- /// ```
- #[must_use]
- #[stable(feature = "ip_addr", since = "1.7.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn ip(&self) -> IpAddr {
- match *self {
- SocketAddr::V4(ref a) => IpAddr::V4(*a.ip()),
- SocketAddr::V6(ref a) => IpAddr::V6(*a.ip()),
- }
- }
-
- /// Changes the IP address associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
- ///
- /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
- /// socket.set_ip(IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1)));
- /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1)));
- /// ```
- #[stable(feature = "sockaddr_setters", since = "1.9.0")]
- pub fn set_ip(&mut self, new_ip: IpAddr) {
- // `match (*self, new_ip)` would have us mutate a copy of self only to throw it away.
- match (self, new_ip) {
- (&mut SocketAddr::V4(ref mut a), IpAddr::V4(new_ip)) => a.set_ip(new_ip),
- (&mut SocketAddr::V6(ref mut a), IpAddr::V6(new_ip)) => a.set_ip(new_ip),
- (self_, new_ip) => *self_ = Self::new(new_ip, self_.port()),
- }
- }
-
- /// Returns the port number associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
- ///
- /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
- /// assert_eq!(socket.port(), 8080);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn port(&self) -> u16 {
- match *self {
- SocketAddr::V4(ref a) => a.port(),
- SocketAddr::V6(ref a) => a.port(),
- }
- }
-
- /// Changes the port number associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
- ///
- /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
- /// socket.set_port(1025);
- /// assert_eq!(socket.port(), 1025);
- /// ```
- #[stable(feature = "sockaddr_setters", since = "1.9.0")]
- pub fn set_port(&mut self, new_port: u16) {
- match *self {
- SocketAddr::V4(ref mut a) => a.set_port(new_port),
- SocketAddr::V6(ref mut a) => a.set_port(new_port),
- }
- }
-
- /// Returns [`true`] if the [IP address] in this `SocketAddr` is an
- /// [`IPv4` address], and [`false`] otherwise.
- ///
- /// [IP address]: IpAddr
- /// [`IPv4` address]: IpAddr::V4
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
- ///
- /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
- /// assert_eq!(socket.is_ipv4(), true);
- /// assert_eq!(socket.is_ipv6(), false);
- /// ```
- #[must_use]
- #[stable(feature = "sockaddr_checker", since = "1.16.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn is_ipv4(&self) -> bool {
- matches!(*self, SocketAddr::V4(_))
- }
-
- /// Returns [`true`] if the [IP address] in this `SocketAddr` is an
- /// [`IPv6` address], and [`false`] otherwise.
- ///
- /// [IP address]: IpAddr
- /// [`IPv6` address]: IpAddr::V6
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv6Addr, SocketAddr};
- ///
- /// let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 0, 1)), 8080);
- /// assert_eq!(socket.is_ipv4(), false);
- /// assert_eq!(socket.is_ipv6(), true);
- /// ```
- #[must_use]
- #[stable(feature = "sockaddr_checker", since = "1.16.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn is_ipv6(&self) -> bool {
- matches!(*self, SocketAddr::V6(_))
- }
-}
-
-impl SocketAddrV4 {
- /// Creates a new socket address from an [`IPv4` address] and a port number.
- ///
- /// [`IPv4` address]: Ipv4Addr
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV4, Ipv4Addr};
- ///
- /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn new(ip: Ipv4Addr, port: u16) -> SocketAddrV4 {
- SocketAddrV4 { ip, port }
- }
-
- /// Returns the IP address associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV4, Ipv4Addr};
- ///
- /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
- /// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1));
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn ip(&self) -> &Ipv4Addr {
- &self.ip
- }
-
- /// Changes the IP address associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV4, Ipv4Addr};
- ///
- /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
- /// socket.set_ip(Ipv4Addr::new(192, 168, 0, 1));
- /// assert_eq!(socket.ip(), &Ipv4Addr::new(192, 168, 0, 1));
- /// ```
- #[stable(feature = "sockaddr_setters", since = "1.9.0")]
- pub fn set_ip(&mut self, new_ip: Ipv4Addr) {
- self.ip = new_ip;
- }
-
- /// Returns the port number associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV4, Ipv4Addr};
- ///
- /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
- /// assert_eq!(socket.port(), 8080);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn port(&self) -> u16 {
- self.port
- }
-
- /// Changes the port number associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV4, Ipv4Addr};
- ///
- /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
- /// socket.set_port(4242);
- /// assert_eq!(socket.port(), 4242);
- /// ```
- #[stable(feature = "sockaddr_setters", since = "1.9.0")]
- pub fn set_port(&mut self, new_port: u16) {
- self.port = new_port;
- }
-}
-
-impl SocketAddrV6 {
- /// Creates a new socket address from an [`IPv6` address], a 16-bit port number,
- /// and the `flowinfo` and `scope_id` fields.
- ///
- /// For more information on the meaning and layout of the `flowinfo` and `scope_id`
- /// parameters, see [IETF RFC 2553, Section 3.3].
- ///
- /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
- /// [`IPv6` address]: Ipv6Addr
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn new(ip: Ipv6Addr, port: u16, flowinfo: u32, scope_id: u32) -> SocketAddrV6 {
- SocketAddrV6 { ip, port, flowinfo, scope_id }
- }
-
- /// Returns the IP address associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
- /// assert_eq!(socket.ip(), &Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn ip(&self) -> &Ipv6Addr {
- &self.ip
- }
-
- /// Changes the IP address associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
- /// socket.set_ip(Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0));
- /// assert_eq!(socket.ip(), &Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0));
- /// ```
- #[stable(feature = "sockaddr_setters", since = "1.9.0")]
- pub fn set_ip(&mut self, new_ip: Ipv6Addr) {
- self.ip = new_ip;
- }
-
- /// Returns the port number associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
- /// assert_eq!(socket.port(), 8080);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn port(&self) -> u16 {
- self.port
- }
-
- /// Changes the port number associated with this socket address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
- /// socket.set_port(4242);
- /// assert_eq!(socket.port(), 4242);
- /// ```
- #[stable(feature = "sockaddr_setters", since = "1.9.0")]
- pub fn set_port(&mut self, new_port: u16) {
- self.port = new_port;
- }
-
- /// Returns the flow information associated with this address.
- ///
- /// This information corresponds to the `sin6_flowinfo` field in C's `netinet/in.h`,
- /// as specified in [IETF RFC 2553, Section 3.3].
- /// It combines information about the flow label and the traffic class as specified
- /// in [IETF RFC 2460], respectively [Section 6] and [Section 7].
- ///
- /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
- /// [IETF RFC 2460]: https://tools.ietf.org/html/rfc2460
- /// [Section 6]: https://tools.ietf.org/html/rfc2460#section-6
- /// [Section 7]: https://tools.ietf.org/html/rfc2460#section-7
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0);
- /// assert_eq!(socket.flowinfo(), 10);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn flowinfo(&self) -> u32 {
- self.flowinfo
- }
-
- /// Changes the flow information associated with this socket address.
- ///
- /// See [`SocketAddrV6::flowinfo`]'s documentation for more details.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0);
- /// socket.set_flowinfo(56);
- /// assert_eq!(socket.flowinfo(), 56);
- /// ```
- #[stable(feature = "sockaddr_setters", since = "1.9.0")]
- pub fn set_flowinfo(&mut self, new_flowinfo: u32) {
- self.flowinfo = new_flowinfo;
- }
-
- /// Returns the scope ID associated with this address.
- ///
- /// This information corresponds to the `sin6_scope_id` field in C's `netinet/in.h`,
- /// as specified in [IETF RFC 2553, Section 3.3].
- ///
- /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78);
- /// assert_eq!(socket.scope_id(), 78);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
- pub const fn scope_id(&self) -> u32 {
- self.scope_id
- }
-
- /// Changes the scope ID associated with this socket address.
- ///
- /// See [`SocketAddrV6::scope_id`]'s documentation for more details.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{SocketAddrV6, Ipv6Addr};
- ///
- /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78);
- /// socket.set_scope_id(42);
- /// assert_eq!(socket.scope_id(), 42);
- /// ```
- #[stable(feature = "sockaddr_setters", since = "1.9.0")]
- pub fn set_scope_id(&mut self, new_scope_id: u32) {
- self.scope_id = new_scope_id;
- }
-}
-
-impl FromInner<c::sockaddr_in> for SocketAddrV4 {
- fn from_inner(addr: c::sockaddr_in) -> SocketAddrV4 {
- SocketAddrV4 { ip: Ipv4Addr::from_inner(addr.sin_addr), port: u16::from_be(addr.sin_port) }
- }
-}
-
-impl FromInner<c::sockaddr_in6> for SocketAddrV6 {
- fn from_inner(addr: c::sockaddr_in6) -> SocketAddrV6 {
- SocketAddrV6 {
- ip: Ipv6Addr::from_inner(addr.sin6_addr),
- port: u16::from_be(addr.sin6_port),
- flowinfo: addr.sin6_flowinfo,
- scope_id: addr.sin6_scope_id,
- }
- }
-}
-
-impl IntoInner<c::sockaddr_in> for SocketAddrV4 {
- fn into_inner(self) -> c::sockaddr_in {
- c::sockaddr_in {
- sin_family: c::AF_INET as c::sa_family_t,
- sin_port: self.port.to_be(),
- sin_addr: self.ip.into_inner(),
- ..unsafe { mem::zeroed() }
- }
- }
-}
-
-impl IntoInner<c::sockaddr_in6> for SocketAddrV6 {
- fn into_inner(self) -> c::sockaddr_in6 {
- c::sockaddr_in6 {
- sin6_family: c::AF_INET6 as c::sa_family_t,
- sin6_port: self.port.to_be(),
- sin6_addr: self.ip.into_inner(),
- sin6_flowinfo: self.flowinfo,
- sin6_scope_id: self.scope_id,
- ..unsafe { mem::zeroed() }
- }
- }
-}
-
-#[stable(feature = "ip_from_ip", since = "1.16.0")]
-impl From<SocketAddrV4> for SocketAddr {
- /// Converts a [`SocketAddrV4`] into a [`SocketAddr::V4`].
- fn from(sock4: SocketAddrV4) -> SocketAddr {
- SocketAddr::V4(sock4)
- }
-}
-
-#[stable(feature = "ip_from_ip", since = "1.16.0")]
-impl From<SocketAddrV6> for SocketAddr {
- /// Converts a [`SocketAddrV6`] into a [`SocketAddr::V6`].
- fn from(sock6: SocketAddrV6) -> SocketAddr {
- SocketAddr::V6(sock6)
- }
-}
-
-#[stable(feature = "addr_from_into_ip", since = "1.17.0")]
-impl<I: Into<IpAddr>> From<(I, u16)> for SocketAddr {
- /// Converts a tuple struct (Into<[`IpAddr`]>, `u16`) into a [`SocketAddr`].
- ///
- /// This conversion creates a [`SocketAddr::V4`] for an [`IpAddr::V4`]
- /// and creates a [`SocketAddr::V6`] for an [`IpAddr::V6`].
- ///
- /// `u16` is treated as port of the newly created [`SocketAddr`].
- fn from(pieces: (I, u16)) -> SocketAddr {
- SocketAddr::new(pieces.0.into(), pieces.1)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Display for SocketAddr {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match *self {
- SocketAddr::V4(ref a) => a.fmt(f),
- SocketAddr::V6(ref a) => a.fmt(f),
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Debug for SocketAddr {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(self, fmt)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Display for SocketAddrV4 {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // Fast path: if there's no alignment stuff, write to the output buffer
- // directly
- if f.precision().is_none() && f.width().is_none() {
- write!(f, "{}:{}", self.ip(), self.port())
- } else {
- const IPV4_SOCKET_BUF_LEN: usize = (3 * 4) // the segments
- + 3 // the separators
- + 1 + 5; // the port
- let mut buf = [0; IPV4_SOCKET_BUF_LEN];
- let mut buf_slice = &mut buf[..];
-
- // Unwrap is fine because writing to a sufficiently-sized
- // buffer is infallible
- write!(buf_slice, "{}:{}", self.ip(), self.port()).unwrap();
- let len = IPV4_SOCKET_BUF_LEN - buf_slice.len();
-
- // This unsafe is OK because we know what is being written to the buffer
- let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
- f.pad(buf)
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Debug for SocketAddrV4 {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(self, fmt)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Display for SocketAddrV6 {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // Fast path: if there's no alignment stuff, write to the output
- // buffer directly
- if f.precision().is_none() && f.width().is_none() {
- match self.scope_id() {
- 0 => write!(f, "[{}]:{}", self.ip(), self.port()),
- scope_id => write!(f, "[{}%{}]:{}", self.ip(), scope_id, self.port()),
- }
- } else {
- const IPV6_SOCKET_BUF_LEN: usize = (4 * 8) // The address
- + 7 // The colon separators
- + 2 // The brackets
- + 1 + 10 // The scope id
- + 1 + 5; // The port
-
- let mut buf = [0; IPV6_SOCKET_BUF_LEN];
- let mut buf_slice = &mut buf[..];
-
- match self.scope_id() {
- 0 => write!(buf_slice, "[{}]:{}", self.ip(), self.port()),
- scope_id => write!(buf_slice, "[{}%{}]:{}", self.ip(), scope_id, self.port()),
- }
- // Unwrap is fine because writing to a sufficiently-sized
- // buffer is infallible
- .unwrap();
- let len = IPV6_SOCKET_BUF_LEN - buf_slice.len();
-
- // This unsafe is OK because we know what is being written to the buffer
- let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
- f.pad(buf)
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Debug for SocketAddrV6 {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(self, fmt)
- }
-}
-
-#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
-impl PartialOrd for SocketAddrV4 {
- fn partial_cmp(&self, other: &SocketAddrV4) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
-impl PartialOrd for SocketAddrV6 {
- fn partial_cmp(&self, other: &SocketAddrV6) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
-impl Ord for SocketAddrV4 {
- fn cmp(&self, other: &SocketAddrV4) -> Ordering {
- self.ip().cmp(other.ip()).then(self.port().cmp(&other.port()))
- }
-}
-
-#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
-impl Ord for SocketAddrV6 {
- fn cmp(&self, other: &SocketAddrV6) -> Ordering {
- self.ip().cmp(other.ip()).then(self.port().cmp(&other.port()))
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl hash::Hash for SocketAddrV4 {
- fn hash<H: hash::Hasher>(&self, s: &mut H) {
- (self.port, self.ip).hash(s)
- }
-}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl hash::Hash for SocketAddrV6 {
- fn hash<H: hash::Hasher>(&self, s: &mut H) {
- (self.port, &self.ip, self.flowinfo, self.scope_id).hash(s)
- }
-}
-
-/// A trait for objects which can be converted or resolved to one or more
-/// [`SocketAddr`] values.
-///
-/// This trait is used for generic address resolution when constructing network
-/// objects. By default it is implemented for the following types:
-///
-/// * [`SocketAddr`]: [`to_socket_addrs`] is the identity function.
-///
-/// * [`SocketAddrV4`], [`SocketAddrV6`], <code>([IpAddr], [u16])</code>,
-/// <code>([Ipv4Addr], [u16])</code>, <code>([Ipv6Addr], [u16])</code>:
-/// [`to_socket_addrs`] constructs a [`SocketAddr`] trivially.
-///
-/// * <code>(&[str], [u16])</code>: <code>&[str]</code> should be either a string representation
-/// of an [`IpAddr`] address as expected by [`FromStr`] implementation or a host
-/// name. [`u16`] is the port number.
-///
-/// * <code>&[str]</code>: the string should be either a string representation of a
-/// [`SocketAddr`] as expected by its [`FromStr`] implementation or a string like
-/// `<host_name>:<port>` pair where `<port>` is a [`u16`] value.
-///
-/// This trait allows constructing network objects like [`TcpStream`] or
-/// [`UdpSocket`] easily with values of various types for the bind/connection
-/// address. It is needed because sometimes one type is more appropriate than
-/// the other: for simple uses a string like `"localhost:12345"` is much nicer
-/// than manual construction of the corresponding [`SocketAddr`], but sometimes
-/// [`SocketAddr`] value is *the* main source of the address, and converting it to
-/// some other type (e.g., a string) just for it to be converted back to
-/// [`SocketAddr`] in constructor methods is pointless.
-///
-/// Addresses returned by the operating system that are not IP addresses are
-/// silently ignored.
-///
-/// [`FromStr`]: crate::str::FromStr "std::str::FromStr"
-/// [`TcpStream`]: crate::net::TcpStream "net::TcpStream"
-/// [`to_socket_addrs`]: ToSocketAddrs::to_socket_addrs
-/// [`UdpSocket`]: crate::net::UdpSocket "net::UdpSocket"
-///
-/// # Examples
-///
-/// Creating a [`SocketAddr`] iterator that yields one item:
-///
-/// ```
-/// use std::net::{ToSocketAddrs, SocketAddr};
-///
-/// let addr = SocketAddr::from(([127, 0, 0, 1], 443));
-/// let mut addrs_iter = addr.to_socket_addrs().unwrap();
-///
-/// assert_eq!(Some(addr), addrs_iter.next());
-/// assert!(addrs_iter.next().is_none());
-/// ```
-///
-/// Creating a [`SocketAddr`] iterator from a hostname:
-///
-/// ```no_run
-/// use std::net::{SocketAddr, ToSocketAddrs};
-///
-/// // assuming 'localhost' resolves to 127.0.0.1
-/// let mut addrs_iter = "localhost:443".to_socket_addrs().unwrap();
-/// assert_eq!(addrs_iter.next(), Some(SocketAddr::from(([127, 0, 0, 1], 443))));
-/// assert!(addrs_iter.next().is_none());
-///
-/// // assuming 'foo' does not resolve
-/// assert!("foo:443".to_socket_addrs().is_err());
-/// ```
-///
-/// Creating a [`SocketAddr`] iterator that yields multiple items:
-///
-/// ```
-/// use std::net::{SocketAddr, ToSocketAddrs};
-///
-/// let addr1 = SocketAddr::from(([0, 0, 0, 0], 80));
-/// let addr2 = SocketAddr::from(([127, 0, 0, 1], 443));
-/// let addrs = vec![addr1, addr2];
-///
-/// let mut addrs_iter = (&addrs[..]).to_socket_addrs().unwrap();
-///
-/// assert_eq!(Some(addr1), addrs_iter.next());
-/// assert_eq!(Some(addr2), addrs_iter.next());
-/// assert!(addrs_iter.next().is_none());
-/// ```
-///
-/// Attempting to create a [`SocketAddr`] iterator from an improperly formatted
-/// socket address `&str` (missing the port):
-///
-/// ```
-/// use std::io;
-/// use std::net::ToSocketAddrs;
-///
-/// let err = "127.0.0.1".to_socket_addrs().unwrap_err();
-/// assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
-/// ```
-///
-/// [`TcpStream::connect`] is an example of an function that utilizes
-/// `ToSocketAddrs` as a trait bound on its parameter in order to accept
-/// different types:
-///
-/// ```no_run
-/// use std::net::{TcpStream, Ipv4Addr};
-///
-/// let stream = TcpStream::connect(("127.0.0.1", 443));
-/// // or
-/// let stream = TcpStream::connect("127.0.0.1:443");
-/// // or
-/// let stream = TcpStream::connect((Ipv4Addr::new(127, 0, 0, 1), 443));
-/// ```
-///
-/// [`TcpStream::connect`]: crate::net::TcpStream::connect
-#[stable(feature = "rust1", since = "1.0.0")]
-pub trait ToSocketAddrs {
- /// Returned iterator over socket addresses which this type may correspond
- /// to.
- #[stable(feature = "rust1", since = "1.0.0")]
- type Iter: Iterator<Item = SocketAddr>;
-
- /// Converts this object to an iterator of resolved [`SocketAddr`]s.
- ///
- /// The returned iterator might not actually yield any values depending on the
- /// outcome of any resolution performed.
- ///
- /// Note that this function may block the current thread while resolution is
- /// performed.
- #[stable(feature = "rust1", since = "1.0.0")]
- fn to_socket_addrs(&self) -> io::Result<Self::Iter>;
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl ToSocketAddrs for SocketAddr {
- type Iter = option::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
- Ok(Some(*self).into_iter())
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl ToSocketAddrs for SocketAddrV4 {
- type Iter = option::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
- SocketAddr::V4(*self).to_socket_addrs()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl ToSocketAddrs for SocketAddrV6 {
- type Iter = option::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
- SocketAddr::V6(*self).to_socket_addrs()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl ToSocketAddrs for (IpAddr, u16) {
- type Iter = option::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
- let (ip, port) = *self;
- match ip {
- IpAddr::V4(ref a) => (*a, port).to_socket_addrs(),
- IpAddr::V6(ref a) => (*a, port).to_socket_addrs(),
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl ToSocketAddrs for (Ipv4Addr, u16) {
- type Iter = option::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
- let (ip, port) = *self;
- SocketAddrV4::new(ip, port).to_socket_addrs()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl ToSocketAddrs for (Ipv6Addr, u16) {
- type Iter = option::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
- let (ip, port) = *self;
- SocketAddrV6::new(ip, port, 0, 0).to_socket_addrs()
- }
-}
-
-fn resolve_socket_addr(lh: LookupHost) -> io::Result<vec::IntoIter<SocketAddr>> {
- let p = lh.port();
- let v: Vec<_> = lh
- .map(|mut a| {
- a.set_port(p);
- a
- })
- .collect();
- Ok(v.into_iter())
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl ToSocketAddrs for (&str, u16) {
- type Iter = vec::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
- let (host, port) = *self;
-
- // try to parse the host as a regular IP address first
- if let Ok(addr) = host.parse::<Ipv4Addr>() {
- let addr = SocketAddrV4::new(addr, port);
- return Ok(vec![SocketAddr::V4(addr)].into_iter());
- }
- if let Ok(addr) = host.parse::<Ipv6Addr>() {
- let addr = SocketAddrV6::new(addr, port, 0, 0);
- return Ok(vec![SocketAddr::V6(addr)].into_iter());
- }
-
- resolve_socket_addr((host, port).try_into()?)
- }
-}
-
-#[stable(feature = "string_u16_to_socket_addrs", since = "1.46.0")]
-impl ToSocketAddrs for (String, u16) {
- type Iter = vec::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
- (&*self.0, self.1).to_socket_addrs()
- }
-}
-
-// accepts strings like 'localhost:12345'
-#[stable(feature = "rust1", since = "1.0.0")]
-impl ToSocketAddrs for str {
- type Iter = vec::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
- // try to parse as a regular SocketAddr first
- if let Ok(addr) = self.parse() {
- return Ok(vec![addr].into_iter());
- }
-
- resolve_socket_addr(self.try_into()?)
- }
-}
-
-#[stable(feature = "slice_to_socket_addrs", since = "1.8.0")]
-impl<'a> ToSocketAddrs for &'a [SocketAddr] {
- type Iter = iter::Cloned<slice::Iter<'a, SocketAddr>>;
-
- fn to_socket_addrs(&self) -> io::Result<Self::Iter> {
- Ok(self.iter().cloned())
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ToSocketAddrs + ?Sized> ToSocketAddrs for &T {
- type Iter = T::Iter;
- fn to_socket_addrs(&self) -> io::Result<T::Iter> {
- (**self).to_socket_addrs()
- }
-}
-
-#[stable(feature = "string_to_socket_addrs", since = "1.16.0")]
-impl ToSocketAddrs for String {
- type Iter = vec::IntoIter<SocketAddr>;
- fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
- (&**self).to_socket_addrs()
- }
-}
diff --git a/library/std/src/net/addr/tests.rs b/library/std/src/net/addr/tests.rs
deleted file mode 100644
index 585a17451..000000000
--- a/library/std/src/net/addr/tests.rs
+++ /dev/null
@@ -1,237 +0,0 @@
-use crate::net::test::{sa4, sa6, tsa};
-use crate::net::*;
-
-#[test]
-fn to_socket_addr_ipaddr_u16() {
- let a = Ipv4Addr::new(77, 88, 21, 11);
- let p = 12345;
- let e = SocketAddr::V4(SocketAddrV4::new(a, p));
- assert_eq!(Ok(vec![e]), tsa((a, p)));
-}
-
-#[test]
-fn to_socket_addr_str_u16() {
- let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
- assert_eq!(Ok(vec![a]), tsa(("77.88.21.11", 24352)));
-
- let a = sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53);
- assert_eq!(Ok(vec![a]), tsa(("2a02:6b8:0:1::1", 53)));
-
- let a = sa4(Ipv4Addr::new(127, 0, 0, 1), 23924);
- #[cfg(not(target_env = "sgx"))]
- assert!(tsa(("localhost", 23924)).unwrap().contains(&a));
- #[cfg(target_env = "sgx")]
- let _ = a;
-}
-
-#[test]
-fn to_socket_addr_str() {
- let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
- assert_eq!(Ok(vec![a]), tsa("77.88.21.11:24352"));
-
- let a = sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53);
- assert_eq!(Ok(vec![a]), tsa("[2a02:6b8:0:1::1]:53"));
-
- let a = sa4(Ipv4Addr::new(127, 0, 0, 1), 23924);
- #[cfg(not(target_env = "sgx"))]
- assert!(tsa("localhost:23924").unwrap().contains(&a));
- #[cfg(target_env = "sgx")]
- let _ = a;
-}
-
-#[test]
-fn to_socket_addr_string() {
- let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
- assert_eq!(Ok(vec![a]), tsa(&*format!("{}:{}", "77.88.21.11", "24352")));
- assert_eq!(Ok(vec![a]), tsa(&format!("{}:{}", "77.88.21.11", "24352")));
- assert_eq!(Ok(vec![a]), tsa(format!("{}:{}", "77.88.21.11", "24352")));
-
- let s = format!("{}:{}", "77.88.21.11", "24352");
- assert_eq!(Ok(vec![a]), tsa(s));
- // s has been moved into the tsa call
-}
-
-#[test]
-fn bind_udp_socket_bad() {
- // rust-lang/rust#53957: This is a regression test for a parsing problem
- // discovered as part of issue rust-lang/rust#23076, where we were
- // incorrectly parsing invalid input and then that would result in a
- // successful `UdpSocket` binding when we would expect failure.
- //
- // At one time, this test was written as a call to `tsa` with
- // INPUT_23076. However, that structure yields an unreliable test,
- // because it ends up passing junk input to the DNS server, and some DNS
- // servers will respond with `Ok` to such input, with the ip address of
- // the DNS server itself.
- //
- // This form of the test is more robust: even when the DNS server
- // returns its own address, it is still an error to bind a UDP socket to
- // a non-local address, and so we still get an error here in that case.
-
- const INPUT_23076: &str = "1200::AB00:1234::2552:7777:1313:34300";
-
- assert!(crate::net::UdpSocket::bind(INPUT_23076).is_err())
-}
-
-#[test]
-fn set_ip() {
- fn ip4(low: u8) -> Ipv4Addr {
- Ipv4Addr::new(77, 88, 21, low)
- }
- fn ip6(low: u16) -> Ipv6Addr {
- Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, low)
- }
-
- let mut v4 = SocketAddrV4::new(ip4(11), 80);
- assert_eq!(v4.ip(), &ip4(11));
- v4.set_ip(ip4(12));
- assert_eq!(v4.ip(), &ip4(12));
-
- let mut addr = SocketAddr::V4(v4);
- assert_eq!(addr.ip(), IpAddr::V4(ip4(12)));
- addr.set_ip(IpAddr::V4(ip4(13)));
- assert_eq!(addr.ip(), IpAddr::V4(ip4(13)));
- addr.set_ip(IpAddr::V6(ip6(14)));
- assert_eq!(addr.ip(), IpAddr::V6(ip6(14)));
-
- let mut v6 = SocketAddrV6::new(ip6(1), 80, 0, 0);
- assert_eq!(v6.ip(), &ip6(1));
- v6.set_ip(ip6(2));
- assert_eq!(v6.ip(), &ip6(2));
-
- let mut addr = SocketAddr::V6(v6);
- assert_eq!(addr.ip(), IpAddr::V6(ip6(2)));
- addr.set_ip(IpAddr::V6(ip6(3)));
- assert_eq!(addr.ip(), IpAddr::V6(ip6(3)));
- addr.set_ip(IpAddr::V4(ip4(4)));
- assert_eq!(addr.ip(), IpAddr::V4(ip4(4)));
-}
-
-#[test]
-fn set_port() {
- let mut v4 = SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80);
- assert_eq!(v4.port(), 80);
- v4.set_port(443);
- assert_eq!(v4.port(), 443);
-
- let mut addr = SocketAddr::V4(v4);
- assert_eq!(addr.port(), 443);
- addr.set_port(8080);
- assert_eq!(addr.port(), 8080);
-
- let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 0);
- assert_eq!(v6.port(), 80);
- v6.set_port(443);
- assert_eq!(v6.port(), 443);
-
- let mut addr = SocketAddr::V6(v6);
- assert_eq!(addr.port(), 443);
- addr.set_port(8080);
- assert_eq!(addr.port(), 8080);
-}
-
-#[test]
-fn set_flowinfo() {
- let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 10, 0);
- assert_eq!(v6.flowinfo(), 10);
- v6.set_flowinfo(20);
- assert_eq!(v6.flowinfo(), 20);
-}
-
-#[test]
-fn set_scope_id() {
- let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 10);
- assert_eq!(v6.scope_id(), 10);
- v6.set_scope_id(20);
- assert_eq!(v6.scope_id(), 20);
-}
-
-#[test]
-fn is_v4() {
- let v4 = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80));
- assert!(v4.is_ipv4());
- assert!(!v4.is_ipv6());
-}
-
-#[test]
-fn is_v6() {
- let v6 = SocketAddr::V6(SocketAddrV6::new(
- Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1),
- 80,
- 10,
- 0,
- ));
- assert!(!v6.is_ipv4());
- assert!(v6.is_ipv6());
-}
-
-#[test]
-fn socket_v4_to_str() {
- let socket = SocketAddrV4::new(Ipv4Addr::new(192, 168, 0, 1), 8080);
-
- assert_eq!(format!("{socket}"), "192.168.0.1:8080");
- assert_eq!(format!("{socket:<20}"), "192.168.0.1:8080 ");
- assert_eq!(format!("{socket:>20}"), " 192.168.0.1:8080");
- assert_eq!(format!("{socket:^20}"), " 192.168.0.1:8080 ");
- assert_eq!(format!("{socket:.10}"), "192.168.0.");
-}
-
-#[test]
-fn socket_v6_to_str() {
- let mut socket = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0);
-
- assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1]:53");
- assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1]:53 ");
- assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1]:53");
- assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1]:53 ");
- assert_eq!(format!("{socket:.15}"), "[2a02:6b8:0:1::");
-
- socket.set_scope_id(5);
-
- assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1%5]:53");
- assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1%5]:53 ");
- assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1%5]:53");
- assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1%5]:53 ");
- assert_eq!(format!("{socket:.18}"), "[2a02:6b8:0:1::1%5");
-}
-
-#[test]
-fn compare() {
- let v4_1 = "224.120.45.1:23456".parse::<SocketAddrV4>().unwrap();
- let v4_2 = "224.210.103.5:12345".parse::<SocketAddrV4>().unwrap();
- let v4_3 = "224.210.103.5:23456".parse::<SocketAddrV4>().unwrap();
- let v6_1 = "[2001:db8:f00::1002]:23456".parse::<SocketAddrV6>().unwrap();
- let v6_2 = "[2001:db8:f00::2001]:12345".parse::<SocketAddrV6>().unwrap();
- let v6_3 = "[2001:db8:f00::2001]:23456".parse::<SocketAddrV6>().unwrap();
-
- // equality
- assert_eq!(v4_1, v4_1);
- assert_eq!(v6_1, v6_1);
- assert_eq!(SocketAddr::V4(v4_1), SocketAddr::V4(v4_1));
- assert_eq!(SocketAddr::V6(v6_1), SocketAddr::V6(v6_1));
- assert!(v4_1 != v4_2);
- assert!(v6_1 != v6_2);
-
- // compare different addresses
- assert!(v4_1 < v4_2);
- assert!(v6_1 < v6_2);
- assert!(v4_2 > v4_1);
- assert!(v6_2 > v6_1);
-
- // compare the same address with different ports
- assert!(v4_2 < v4_3);
- assert!(v6_2 < v6_3);
- assert!(v4_3 > v4_2);
- assert!(v6_3 > v6_2);
-
- // compare different addresses with the same port
- assert!(v4_1 < v4_3);
- assert!(v6_1 < v6_3);
- assert!(v4_3 > v4_1);
- assert!(v6_3 > v6_1);
-
- // compare with an inferred right-hand side
- assert_eq!(v4_1, "224.120.45.1:23456".parse().unwrap());
- assert_eq!(v6_1, "[2001:db8:f00::1002]:23456".parse().unwrap());
- assert_eq!(SocketAddr::V4(v4_1), "224.120.45.1:23456".parse().unwrap());
-}
diff --git a/library/std/src/net/display_buffer.rs b/library/std/src/net/display_buffer.rs
new file mode 100644
index 000000000..7aadf06e9
--- /dev/null
+++ b/library/std/src/net/display_buffer.rs
@@ -0,0 +1,40 @@
+use crate::fmt;
+use crate::mem::MaybeUninit;
+use crate::str;
+
+/// Used for slow path in `Display` implementations when alignment is required.
+pub struct DisplayBuffer<const SIZE: usize> {
+ buf: [MaybeUninit<u8>; SIZE],
+ len: usize,
+}
+
+impl<const SIZE: usize> DisplayBuffer<SIZE> {
+ #[inline]
+ pub const fn new() -> Self {
+ Self { buf: MaybeUninit::uninit_array(), len: 0 }
+ }
+
+ #[inline]
+ pub fn as_str(&self) -> &str {
+ // SAFETY: `buf` is only written to by the `fmt::Write::write_str` implementation
+ // which writes a valid UTF-8 string to `buf` and correctly sets `len`.
+ unsafe {
+ let s = MaybeUninit::slice_assume_init_ref(&self.buf[..self.len]);
+ str::from_utf8_unchecked(s)
+ }
+ }
+}
+
+impl<const SIZE: usize> fmt::Write for DisplayBuffer<SIZE> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ let bytes = s.as_bytes();
+
+ if let Some(buf) = self.buf.get_mut(self.len..(self.len + bytes.len())) {
+ MaybeUninit::write_slice(buf, bytes);
+ self.len += bytes.len();
+ Ok(())
+ } else {
+ Err(fmt::Error)
+ }
+ }
+}
diff --git a/library/std/src/net/ip.rs b/library/std/src/net/ip.rs
deleted file mode 100644
index 41ca9ba84..000000000
--- a/library/std/src/net/ip.rs
+++ /dev/null
@@ -1,2040 +0,0 @@
-// Tests for this module
-#[cfg(all(test, not(target_os = "emscripten")))]
-mod tests;
-
-use crate::cmp::Ordering;
-use crate::fmt::{self, Write as FmtWrite};
-use crate::io::Write as IoWrite;
-use crate::mem::transmute;
-use crate::sys::net::netc as c;
-use crate::sys_common::{FromInner, IntoInner};
-
-/// An IP address, either IPv4 or IPv6.
-///
-/// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their
-/// respective documentation for more details.
-///
-/// # Examples
-///
-/// ```
-/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
-///
-/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
-/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
-///
-/// assert_eq!("127.0.0.1".parse(), Ok(localhost_v4));
-/// assert_eq!("::1".parse(), Ok(localhost_v6));
-///
-/// assert_eq!(localhost_v4.is_ipv6(), false);
-/// assert_eq!(localhost_v4.is_ipv4(), true);
-/// ```
-#[stable(feature = "ip_addr", since = "1.7.0")]
-#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)]
-pub enum IpAddr {
- /// An IPv4 address.
- #[stable(feature = "ip_addr", since = "1.7.0")]
- V4(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv4Addr),
- /// An IPv6 address.
- #[stable(feature = "ip_addr", since = "1.7.0")]
- V6(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv6Addr),
-}
-
-/// An IPv4 address.
-///
-/// IPv4 addresses are defined as 32-bit integers in [IETF RFC 791].
-/// They are usually represented as four octets.
-///
-/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
-///
-/// [IETF RFC 791]: https://tools.ietf.org/html/rfc791
-///
-/// # Textual representation
-///
-/// `Ipv4Addr` provides a [`FromStr`] implementation. The four octets are in decimal
-/// notation, divided by `.` (this is called "dot-decimal notation").
-/// Notably, octal numbers (which are indicated with a leading `0`) and hexadecimal numbers (which
-/// are indicated with a leading `0x`) are not allowed per [IETF RFC 6943].
-///
-/// [IETF RFC 6943]: https://tools.ietf.org/html/rfc6943#section-3.1.1
-/// [`FromStr`]: crate::str::FromStr
-///
-/// # Examples
-///
-/// ```
-/// use std::net::Ipv4Addr;
-///
-/// let localhost = Ipv4Addr::new(127, 0, 0, 1);
-/// assert_eq!("127.0.0.1".parse(), Ok(localhost));
-/// assert_eq!(localhost.is_loopback(), true);
-/// assert!("012.004.002.000".parse::<Ipv4Addr>().is_err()); // all octets are in octal
-/// assert!("0000000.0.0.0".parse::<Ipv4Addr>().is_err()); // first octet is a zero in octal
-/// assert!("0xcb.0x0.0x71.0x00".parse::<Ipv4Addr>().is_err()); // all octets are in hex
-/// ```
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct Ipv4Addr {
- octets: [u8; 4],
-}
-
-/// An IPv6 address.
-///
-/// IPv6 addresses are defined as 128-bit integers in [IETF RFC 4291].
-/// They are usually represented as eight 16-bit segments.
-///
-/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
-///
-/// # Embedding IPv4 Addresses
-///
-/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
-///
-/// To assist in the transition from IPv4 to IPv6 two types of IPv6 addresses that embed an IPv4 address were defined:
-/// IPv4-compatible and IPv4-mapped addresses. Of these IPv4-compatible addresses have been officially deprecated.
-///
-/// Both types of addresses are not assigned any special meaning by this implementation,
-/// other than what the relevant standards prescribe. This means that an address like `::ffff:127.0.0.1`,
-/// while representing an IPv4 loopback address, is not itself an IPv6 loopback address; only `::1` is.
-/// To handle these so called "IPv4-in-IPv6" addresses, they have to first be converted to their canonical IPv4 address.
-///
-/// ### IPv4-Compatible IPv6 Addresses
-///
-/// IPv4-compatible IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.1], and have been officially deprecated.
-/// The RFC describes the format of an "IPv4-Compatible IPv6 address" as follows:
-///
-/// ```text
-/// | 80 bits | 16 | 32 bits |
-/// +--------------------------------------+--------------------------+
-/// |0000..............................0000|0000| IPv4 address |
-/// +--------------------------------------+----+---------------------+
-/// ```
-/// So `::a.b.c.d` would be an IPv4-compatible IPv6 address representing the IPv4 address `a.b.c.d`.
-///
-/// To convert from an IPv4 address to an IPv4-compatible IPv6 address, use [`Ipv4Addr::to_ipv6_compatible`].
-/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-compatible IPv6 address to the canonical IPv4 address.
-///
-/// [IETF RFC 4291 Section 2.5.5.1]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.1
-///
-/// ### IPv4-Mapped IPv6 Addresses
-///
-/// IPv4-mapped IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.2].
-/// The RFC describes the format of an "IPv4-Mapped IPv6 address" as follows:
-///
-/// ```text
-/// | 80 bits | 16 | 32 bits |
-/// +--------------------------------------+--------------------------+
-/// |0000..............................0000|FFFF| IPv4 address |
-/// +--------------------------------------+----+---------------------+
-/// ```
-/// So `::ffff:a.b.c.d` would be an IPv4-mapped IPv6 address representing the IPv4 address `a.b.c.d`.
-///
-/// To convert from an IPv4 address to an IPv4-mapped IPv6 address, use [`Ipv4Addr::to_ipv6_mapped`].
-/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-mapped IPv6 address to the canonical IPv4 address.
-/// Note that this will also convert the IPv6 loopback address `::1` to `0.0.0.1`. Use
-/// [`Ipv6Addr::to_ipv4_mapped`] to avoid this.
-///
-/// [IETF RFC 4291 Section 2.5.5.2]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2
-///
-/// # Textual representation
-///
-/// `Ipv6Addr` provides a [`FromStr`] implementation. There are many ways to represent
-/// an IPv6 address in text, but in general, each segments is written in hexadecimal
-/// notation, and segments are separated by `:`. For more information, see
-/// [IETF RFC 5952].
-///
-/// [`FromStr`]: crate::str::FromStr
-/// [IETF RFC 5952]: https://tools.ietf.org/html/rfc5952
-///
-/// # Examples
-///
-/// ```
-/// use std::net::Ipv6Addr;
-///
-/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
-/// assert_eq!("::1".parse(), Ok(localhost));
-/// assert_eq!(localhost.is_loopback(), true);
-/// ```
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct Ipv6Addr {
- octets: [u8; 16],
-}
-
-/// Scope of an [IPv6 multicast address] as defined in [IETF RFC 7346 section 2].
-///
-/// # Stability Guarantees
-///
-/// Not all possible values for a multicast scope have been assigned.
-/// Future RFCs may introduce new scopes, which will be added as variants to this enum;
-/// because of this the enum is marked as `#[non_exhaustive]`.
-///
-/// # Examples
-/// ```
-/// #![feature(ip)]
-///
-/// use std::net::Ipv6Addr;
-/// use std::net::Ipv6MulticastScope::*;
-///
-/// // An IPv6 multicast address with global scope (`ff0e::`).
-/// let address = Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0);
-///
-/// // Will print "Global scope".
-/// match address.multicast_scope() {
-/// Some(InterfaceLocal) => println!("Interface-Local scope"),
-/// Some(LinkLocal) => println!("Link-Local scope"),
-/// Some(RealmLocal) => println!("Realm-Local scope"),
-/// Some(AdminLocal) => println!("Admin-Local scope"),
-/// Some(SiteLocal) => println!("Site-Local scope"),
-/// Some(OrganizationLocal) => println!("Organization-Local scope"),
-/// Some(Global) => println!("Global scope"),
-/// Some(_) => println!("Unknown scope"),
-/// None => println!("Not a multicast address!")
-/// }
-///
-/// ```
-///
-/// [IPv6 multicast address]: Ipv6Addr
-/// [IETF RFC 7346 section 2]: https://tools.ietf.org/html/rfc7346#section-2
-#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)]
-#[unstable(feature = "ip", issue = "27709")]
-#[non_exhaustive]
-pub enum Ipv6MulticastScope {
- /// Interface-Local scope.
- InterfaceLocal,
- /// Link-Local scope.
- LinkLocal,
- /// Realm-Local scope.
- RealmLocal,
- /// Admin-Local scope.
- AdminLocal,
- /// Site-Local scope.
- SiteLocal,
- /// Organization-Local scope.
- OrganizationLocal,
- /// Global scope.
- Global,
-}
-
-impl IpAddr {
- /// Returns [`true`] for the special 'unspecified' address.
- ///
- /// See the documentation for [`Ipv4Addr::is_unspecified()`] and
- /// [`Ipv6Addr::is_unspecified()`] for more details.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "ip_shared", since = "1.12.0")]
- #[must_use]
- #[inline]
- pub const fn is_unspecified(&self) -> bool {
- match self {
- IpAddr::V4(ip) => ip.is_unspecified(),
- IpAddr::V6(ip) => ip.is_unspecified(),
- }
- }
-
- /// Returns [`true`] if this is a loopback address.
- ///
- /// See the documentation for [`Ipv4Addr::is_loopback()`] and
- /// [`Ipv6Addr::is_loopback()`] for more details.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "ip_shared", since = "1.12.0")]
- #[must_use]
- #[inline]
- pub const fn is_loopback(&self) -> bool {
- match self {
- IpAddr::V4(ip) => ip.is_loopback(),
- IpAddr::V6(ip) => ip.is_loopback(),
- }
- }
-
- /// Returns [`true`] if the address appears to be globally routable.
- ///
- /// See the documentation for [`Ipv4Addr::is_global()`] and
- /// [`Ipv6Addr::is_global()`] for more details.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(80, 9, 12, 3)).is_global(), true);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1)).is_global(), true);
- /// ```
- #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_global(&self) -> bool {
- match self {
- IpAddr::V4(ip) => ip.is_global(),
- IpAddr::V6(ip) => ip.is_global(),
- }
- }
-
- /// Returns [`true`] if this is a multicast address.
- ///
- /// See the documentation for [`Ipv4Addr::is_multicast()`] and
- /// [`Ipv6Addr::is_multicast()`] for more details.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "ip_shared", since = "1.12.0")]
- #[must_use]
- #[inline]
- pub const fn is_multicast(&self) -> bool {
- match self {
- IpAddr::V4(ip) => ip.is_multicast(),
- IpAddr::V6(ip) => ip.is_multicast(),
- }
- }
-
- /// Returns [`true`] if this address is in a range designated for documentation.
- ///
- /// See the documentation for [`Ipv4Addr::is_documentation()`] and
- /// [`Ipv6Addr::is_documentation()`] for more details.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_documentation(), true);
- /// assert_eq!(
- /// IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_documentation(),
- /// true
- /// );
- /// ```
- #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_documentation(&self) -> bool {
- match self {
- IpAddr::V4(ip) => ip.is_documentation(),
- IpAddr::V6(ip) => ip.is_documentation(),
- }
- }
-
- /// Returns [`true`] if this address is in a range designated for benchmarking.
- ///
- /// See the documentation for [`Ipv4Addr::is_benchmarking()`] and
- /// [`Ipv6Addr::is_benchmarking()`] for more details.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(198, 19, 255, 255)).is_benchmarking(), true);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0)).is_benchmarking(), true);
- /// ```
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_benchmarking(&self) -> bool {
- match self {
- IpAddr::V4(ip) => ip.is_benchmarking(),
- IpAddr::V6(ip) => ip.is_benchmarking(),
- }
- }
-
- /// Returns [`true`] if this address is an [`IPv4` address], and [`false`]
- /// otherwise.
- ///
- /// [`IPv4` address]: IpAddr::V4
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "ipaddr_checker", since = "1.16.0")]
- #[must_use]
- #[inline]
- pub const fn is_ipv4(&self) -> bool {
- matches!(self, IpAddr::V4(_))
- }
-
- /// Returns [`true`] if this address is an [`IPv6` address], and [`false`]
- /// otherwise.
- ///
- /// [`IPv6` address]: IpAddr::V6
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "ipaddr_checker", since = "1.16.0")]
- #[must_use]
- #[inline]
- pub const fn is_ipv6(&self) -> bool {
- matches!(self, IpAddr::V6(_))
- }
-
- /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped IPv6 addresses, otherwise it
- /// return `self` as-is.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).to_canonical().is_loopback(), true);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).is_loopback(), false);
- /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).to_canonical().is_loopback(), true);
- /// ```
- #[inline]
- #[must_use = "this returns the result of the operation, \
- without modifying the original"]
- #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- pub const fn to_canonical(&self) -> IpAddr {
- match self {
- &v4 @ IpAddr::V4(_) => v4,
- IpAddr::V6(v6) => v6.to_canonical(),
- }
- }
-}
-
-impl Ipv4Addr {
- /// Creates a new IPv4 address from four eight-bit octets.
- ///
- /// The result will represent the IP address `a`.`b`.`c`.`d`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::new(127, 0, 0, 1);
- /// ```
- #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- #[inline]
- pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr {
- Ipv4Addr { octets: [a, b, c, d] }
- }
-
- /// An IPv4 address with the address pointing to localhost: `127.0.0.1`
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::LOCALHOST;
- /// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1));
- /// ```
- #[stable(feature = "ip_constructors", since = "1.30.0")]
- pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1);
-
- /// An IPv4 address representing an unspecified address: `0.0.0.0`
- ///
- /// This corresponds to the constant `INADDR_ANY` in other languages.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::UNSPECIFIED;
- /// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0));
- /// ```
- #[doc(alias = "INADDR_ANY")]
- #[stable(feature = "ip_constructors", since = "1.30.0")]
- pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0);
-
- /// An IPv4 address representing the broadcast address: `255.255.255.255`
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::BROADCAST;
- /// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255));
- /// ```
- #[stable(feature = "ip_constructors", since = "1.30.0")]
- pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255);
-
- /// Returns the four eight-bit integers that make up this address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::new(127, 0, 0, 1);
- /// assert_eq!(addr.octets(), [127, 0, 0, 1]);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- #[inline]
- pub const fn octets(&self) -> [u8; 4] {
- self.octets
- }
-
- /// Returns [`true`] for the special 'unspecified' address (`0.0.0.0`).
- ///
- /// This property is defined in _UNIX Network Programming, Second Edition_,
- /// W. Richard Stevens, p. 891; see also [ip7].
- ///
- /// [ip7]: https://man7.org/linux/man-pages/man7/ip.7.html
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true);
- /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
- #[stable(feature = "ip_shared", since = "1.12.0")]
- #[must_use]
- #[inline]
- pub const fn is_unspecified(&self) -> bool {
- u32::from_be_bytes(self.octets) == 0
- }
-
- /// Returns [`true`] if this is a loopback address (`127.0.0.0/8`).
- ///
- /// This property is defined by [IETF RFC 1122].
- ///
- /// [IETF RFC 1122]: https://tools.ietf.org/html/rfc1122
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true);
- /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_loopback(&self) -> bool {
- self.octets()[0] == 127
- }
-
- /// Returns [`true`] if this is a private address.
- ///
- /// The private address ranges are defined in [IETF RFC 1918] and include:
- ///
- /// - `10.0.0.0/8`
- /// - `172.16.0.0/12`
- /// - `192.168.0.0/16`
- ///
- /// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(10, 0, 0, 1).is_private(), true);
- /// assert_eq!(Ipv4Addr::new(10, 10, 10, 10).is_private(), true);
- /// assert_eq!(Ipv4Addr::new(172, 16, 10, 10).is_private(), true);
- /// assert_eq!(Ipv4Addr::new(172, 29, 45, 14).is_private(), true);
- /// assert_eq!(Ipv4Addr::new(172, 32, 0, 2).is_private(), false);
- /// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true);
- /// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_private(&self) -> bool {
- match self.octets() {
- [10, ..] => true,
- [172, b, ..] if b >= 16 && b <= 31 => true,
- [192, 168, ..] => true,
- _ => false,
- }
- }
-
- /// Returns [`true`] if the address is link-local (`169.254.0.0/16`).
- ///
- /// This property is defined by [IETF RFC 3927].
- ///
- /// [IETF RFC 3927]: https://tools.ietf.org/html/rfc3927
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(169, 254, 0, 0).is_link_local(), true);
- /// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true);
- /// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_link_local(&self) -> bool {
- matches!(self.octets(), [169, 254, ..])
- }
-
- /// Returns [`true`] if the address appears to be globally routable.
- /// See [iana-ipv4-special-registry][ipv4-sr].
- ///
- /// The following return [`false`]:
- ///
- /// - private addresses (see [`Ipv4Addr::is_private()`])
- /// - the loopback address (see [`Ipv4Addr::is_loopback()`])
- /// - the link-local address (see [`Ipv4Addr::is_link_local()`])
- /// - the broadcast address (see [`Ipv4Addr::is_broadcast()`])
- /// - addresses used for documentation (see [`Ipv4Addr::is_documentation()`])
- /// - the unspecified address (see [`Ipv4Addr::is_unspecified()`]), and the whole
- /// `0.0.0.0/8` block
- /// - addresses reserved for future protocols, except
- /// `192.0.0.9/32` and `192.0.0.10/32` which are globally routable
- /// - addresses reserved for future use (see [`Ipv4Addr::is_reserved()`]
- /// - addresses reserved for networking devices benchmarking (see
- /// [`Ipv4Addr::is_benchmarking()`])
- ///
- /// [ipv4-sr]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::Ipv4Addr;
- ///
- /// // private addresses are not global
- /// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false);
- /// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false);
- /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false);
- ///
- /// // the 0.0.0.0/8 block is not global
- /// assert_eq!(Ipv4Addr::new(0, 1, 2, 3).is_global(), false);
- /// // in particular, the unspecified address is not global
- /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_global(), false);
- ///
- /// // the loopback address is not global
- /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_global(), false);
- ///
- /// // link local addresses are not global
- /// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false);
- ///
- /// // the broadcast address is not global
- /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_global(), false);
- ///
- /// // the address space designated for documentation is not global
- /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false);
- /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false);
- /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false);
- ///
- /// // shared addresses are not global
- /// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false);
- ///
- /// // addresses reserved for protocol assignment are not global
- /// assert_eq!(Ipv4Addr::new(192, 0, 0, 0).is_global(), false);
- /// assert_eq!(Ipv4Addr::new(192, 0, 0, 255).is_global(), false);
- ///
- /// // addresses reserved for future use are not global
- /// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false);
- ///
- /// // addresses reserved for network devices benchmarking are not global
- /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false);
- ///
- /// // All the other addresses are global
- /// assert_eq!(Ipv4Addr::new(1, 1, 1, 1).is_global(), true);
- /// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_global(&self) -> bool {
- // check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
- // globally routable addresses in the 192.0.0.0/24 range.
- if u32::from_be_bytes(self.octets()) == 0xc0000009
- || u32::from_be_bytes(self.octets()) == 0xc000000a
- {
- return true;
- }
- !self.is_private()
- && !self.is_loopback()
- && !self.is_link_local()
- && !self.is_broadcast()
- && !self.is_documentation()
- && !self.is_shared()
- // addresses reserved for future protocols (`192.0.0.0/24`)
- && !(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0)
- && !self.is_reserved()
- && !self.is_benchmarking()
- // Make sure the address is not in 0.0.0.0/8
- && self.octets()[0] != 0
- }
-
- /// Returns [`true`] if this address is part of the Shared Address Space defined in
- /// [IETF RFC 6598] (`100.64.0.0/10`).
- ///
- /// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(100, 64, 0, 0).is_shared(), true);
- /// assert_eq!(Ipv4Addr::new(100, 127, 255, 255).is_shared(), true);
- /// assert_eq!(Ipv4Addr::new(100, 128, 0, 0).is_shared(), false);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_shared(&self) -> bool {
- self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000)
- }
-
- /// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for
- /// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0`
- /// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`.
- ///
- /// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544
- /// [errata 423]: https://www.rfc-editor.org/errata/eid423
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(198, 17, 255, 255).is_benchmarking(), false);
- /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking(), true);
- /// assert_eq!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking(), true);
- /// assert_eq!(Ipv4Addr::new(198, 20, 0, 0).is_benchmarking(), false);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_benchmarking(&self) -> bool {
- self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18
- }
-
- /// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112]
- /// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the
- /// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since
- /// it is obviously not reserved for future use.
- ///
- /// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112
- ///
- /// # Warning
- ///
- /// As IANA assigns new addresses, this method will be
- /// updated. This may result in non-reserved addresses being
- /// treated as reserved in code that relies on an outdated version
- /// of this method.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(240, 0, 0, 0).is_reserved(), true);
- /// assert_eq!(Ipv4Addr::new(255, 255, 255, 254).is_reserved(), true);
- ///
- /// assert_eq!(Ipv4Addr::new(239, 255, 255, 255).is_reserved(), false);
- /// // The broadcast address is not considered as reserved for future use by this implementation
- /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_reserved(), false);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_reserved(&self) -> bool {
- self.octets()[0] & 240 == 240 && !self.is_broadcast()
- }
-
- /// Returns [`true`] if this is a multicast address (`224.0.0.0/4`).
- ///
- /// Multicast addresses have a most significant octet between `224` and `239`,
- /// and is defined by [IETF RFC 5771].
- ///
- /// [IETF RFC 5771]: https://tools.ietf.org/html/rfc5771
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(224, 254, 0, 0).is_multicast(), true);
- /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true);
- /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_multicast(&self) -> bool {
- self.octets()[0] >= 224 && self.octets()[0] <= 239
- }
-
- /// Returns [`true`] if this is a broadcast address (`255.255.255.255`).
- ///
- /// A broadcast address has all octets set to `255` as defined in [IETF RFC 919].
- ///
- /// [IETF RFC 919]: https://tools.ietf.org/html/rfc919
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true);
- /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_broadcast(&self) -> bool {
- u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets())
- }
-
- /// Returns [`true`] if this address is in a range designated for documentation.
- ///
- /// This is defined in [IETF RFC 5737]:
- ///
- /// - `192.0.2.0/24` (TEST-NET-1)
- /// - `198.51.100.0/24` (TEST-NET-2)
- /// - `203.0.113.0/24` (TEST-NET-3)
- ///
- /// [IETF RFC 5737]: https://tools.ietf.org/html/rfc5737
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_documentation(), true);
- /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_documentation(), true);
- /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true);
- /// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_documentation(&self) -> bool {
- matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _])
- }
-
- /// Converts this address to an [IPv4-compatible] [`IPv6` address].
- ///
- /// `a.b.c.d` becomes `::a.b.c.d`
- ///
- /// Note that IPv4-compatible addresses have been officially deprecated.
- /// If you don't explicitly need an IPv4-compatible address for legacy reasons, consider using `to_ipv6_mapped` instead.
- ///
- /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses
- /// [`IPv6` address]: Ipv6Addr
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(
- /// Ipv4Addr::new(192, 0, 2, 255).to_ipv6_compatible(),
- /// Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x2ff)
- /// );
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use = "this returns the result of the operation, \
- without modifying the original"]
- #[inline]
- pub const fn to_ipv6_compatible(&self) -> Ipv6Addr {
- let [a, b, c, d] = self.octets();
- Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d] }
- }
-
- /// Converts this address to an [IPv4-mapped] [`IPv6` address].
- ///
- /// `a.b.c.d` becomes `::ffff:a.b.c.d`
- ///
- /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses
- /// [`IPv6` address]: Ipv6Addr
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(),
- /// Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x2ff));
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use = "this returns the result of the operation, \
- without modifying the original"]
- #[inline]
- pub const fn to_ipv6_mapped(&self) -> Ipv6Addr {
- let [a, b, c, d] = self.octets();
- Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d] }
- }
-}
-
-#[stable(feature = "ip_addr", since = "1.7.0")]
-impl fmt::Display for IpAddr {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- IpAddr::V4(ip) => ip.fmt(fmt),
- IpAddr::V6(ip) => ip.fmt(fmt),
- }
- }
-}
-
-#[stable(feature = "ip_addr", since = "1.7.0")]
-impl fmt::Debug for IpAddr {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(self, fmt)
- }
-}
-
-#[stable(feature = "ip_from_ip", since = "1.16.0")]
-impl From<Ipv4Addr> for IpAddr {
- /// Copies this address to a new `IpAddr::V4`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr};
- ///
- /// let addr = Ipv4Addr::new(127, 0, 0, 1);
- ///
- /// assert_eq!(
- /// IpAddr::V4(addr),
- /// IpAddr::from(addr)
- /// )
- /// ```
- #[inline]
- fn from(ipv4: Ipv4Addr) -> IpAddr {
- IpAddr::V4(ipv4)
- }
-}
-
-#[stable(feature = "ip_from_ip", since = "1.16.0")]
-impl From<Ipv6Addr> for IpAddr {
- /// Copies this address to a new `IpAddr::V6`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv6Addr};
- ///
- /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
- ///
- /// assert_eq!(
- /// IpAddr::V6(addr),
- /// IpAddr::from(addr)
- /// );
- /// ```
- #[inline]
- fn from(ipv6: Ipv6Addr) -> IpAddr {
- IpAddr::V6(ipv6)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Display for Ipv4Addr {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- let octets = self.octets();
- // Fast Path: if there's no alignment stuff, write directly to the buffer
- if fmt.precision().is_none() && fmt.width().is_none() {
- write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3])
- } else {
- const IPV4_BUF_LEN: usize = 15; // Long enough for the longest possible IPv4 address
- let mut buf = [0u8; IPV4_BUF_LEN];
- let mut buf_slice = &mut buf[..];
-
- // Note: The call to write should never fail, hence the unwrap
- write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap();
- let len = IPV4_BUF_LEN - buf_slice.len();
-
- // This unsafe is OK because we know what is being written to the buffer
- let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
- fmt.pad(buf)
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Debug for Ipv4Addr {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(self, fmt)
- }
-}
-
-#[stable(feature = "ip_cmp", since = "1.16.0")]
-impl PartialEq<Ipv4Addr> for IpAddr {
- #[inline]
- fn eq(&self, other: &Ipv4Addr) -> bool {
- match self {
- IpAddr::V4(v4) => v4 == other,
- IpAddr::V6(_) => false,
- }
- }
-}
-
-#[stable(feature = "ip_cmp", since = "1.16.0")]
-impl PartialEq<IpAddr> for Ipv4Addr {
- #[inline]
- fn eq(&self, other: &IpAddr) -> bool {
- match other {
- IpAddr::V4(v4) => self == v4,
- IpAddr::V6(_) => false,
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl PartialOrd for Ipv4Addr {
- #[inline]
- fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-#[stable(feature = "ip_cmp", since = "1.16.0")]
-impl PartialOrd<Ipv4Addr> for IpAddr {
- #[inline]
- fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
- match self {
- IpAddr::V4(v4) => v4.partial_cmp(other),
- IpAddr::V6(_) => Some(Ordering::Greater),
- }
- }
-}
-
-#[stable(feature = "ip_cmp", since = "1.16.0")]
-impl PartialOrd<IpAddr> for Ipv4Addr {
- #[inline]
- fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
- match other {
- IpAddr::V4(v4) => self.partial_cmp(v4),
- IpAddr::V6(_) => Some(Ordering::Less),
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Ord for Ipv4Addr {
- #[inline]
- fn cmp(&self, other: &Ipv4Addr) -> Ordering {
- self.octets.cmp(&other.octets)
- }
-}
-
-impl IntoInner<c::in_addr> for Ipv4Addr {
- #[inline]
- fn into_inner(self) -> c::in_addr {
- // `s_addr` is stored as BE on all machines and the array is in BE order.
- // So the native endian conversion method is used so that it's never swapped.
- c::in_addr { s_addr: u32::from_ne_bytes(self.octets) }
- }
-}
-impl FromInner<c::in_addr> for Ipv4Addr {
- fn from_inner(addr: c::in_addr) -> Ipv4Addr {
- Ipv4Addr { octets: addr.s_addr.to_ne_bytes() }
- }
-}
-
-#[stable(feature = "ip_u32", since = "1.1.0")]
-impl From<Ipv4Addr> for u32 {
- /// Converts an `Ipv4Addr` into a host byte order `u32`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
- /// assert_eq!(0x12345678, u32::from(addr));
- /// ```
- #[inline]
- fn from(ip: Ipv4Addr) -> u32 {
- u32::from_be_bytes(ip.octets)
- }
-}
-
-#[stable(feature = "ip_u32", since = "1.1.0")]
-impl From<u32> for Ipv4Addr {
- /// Converts a host byte order `u32` into an `Ipv4Addr`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::from(0x12345678);
- /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78), addr);
- /// ```
- #[inline]
- fn from(ip: u32) -> Ipv4Addr {
- Ipv4Addr { octets: ip.to_be_bytes() }
- }
-}
-
-#[stable(feature = "from_slice_v4", since = "1.9.0")]
-impl From<[u8; 4]> for Ipv4Addr {
- /// Creates an `Ipv4Addr` from a four element byte array.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::from([13u8, 12u8, 11u8, 10u8]);
- /// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr);
- /// ```
- #[inline]
- fn from(octets: [u8; 4]) -> Ipv4Addr {
- Ipv4Addr { octets }
- }
-}
-
-#[stable(feature = "ip_from_slice", since = "1.17.0")]
-impl From<[u8; 4]> for IpAddr {
- /// Creates an `IpAddr::V4` from a four element byte array.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv4Addr};
- ///
- /// let addr = IpAddr::from([13u8, 12u8, 11u8, 10u8]);
- /// assert_eq!(IpAddr::V4(Ipv4Addr::new(13, 12, 11, 10)), addr);
- /// ```
- #[inline]
- fn from(octets: [u8; 4]) -> IpAddr {
- IpAddr::V4(Ipv4Addr::from(octets))
- }
-}
-
-impl Ipv6Addr {
- /// Creates a new IPv6 address from eight 16-bit segments.
- ///
- /// The result will represent the IP address `a:b:c:d:e:f:g:h`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
- /// ```
- #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- #[inline]
- pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr {
- let addr16 = [
- a.to_be(),
- b.to_be(),
- c.to_be(),
- d.to_be(),
- e.to_be(),
- f.to_be(),
- g.to_be(),
- h.to_be(),
- ];
- Ipv6Addr {
- // All elements in `addr16` are big endian.
- // SAFETY: `[u16; 8]` is always safe to transmute to `[u8; 16]`.
- octets: unsafe { transmute::<_, [u8; 16]>(addr16) },
- }
- }
-
- /// An IPv6 address representing localhost: `::1`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::LOCALHOST;
- /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
- /// ```
- #[stable(feature = "ip_constructors", since = "1.30.0")]
- pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
-
- /// An IPv6 address representing the unspecified address: `::`
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::UNSPECIFIED;
- /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
- /// ```
- #[stable(feature = "ip_constructors", since = "1.30.0")]
- pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
-
- /// Returns the eight 16-bit segments that make up this address.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(),
- /// [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- #[inline]
- pub const fn segments(&self) -> [u16; 8] {
- // All elements in `self.octets` must be big endian.
- // SAFETY: `[u8; 16]` is always safe to transmute to `[u16; 8]`.
- let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.octets) };
- // We want native endian u16
- [
- u16::from_be(a),
- u16::from_be(b),
- u16::from_be(c),
- u16::from_be(d),
- u16::from_be(e),
- u16::from_be(f),
- u16::from_be(g),
- u16::from_be(h),
- ]
- }
-
- /// Returns [`true`] for the special 'unspecified' address (`::`).
- ///
- /// This property is defined in [IETF RFC 4291].
- ///
- /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_unspecified(&self) -> bool {
- u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets())
- }
-
- /// Returns [`true`] if this is the [loopback address] (`::1`),
- /// as defined in [IETF RFC 4291 section 2.5.3].
- ///
- /// Contrary to IPv4, in IPv6 there is only one loopback address.
- ///
- /// [loopback address]: Ipv6Addr::LOCALHOST
- /// [IETF RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_loopback(&self) -> bool {
- u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets())
- }
-
- /// Returns [`true`] if the address appears to be globally routable.
- ///
- /// The following return [`false`]:
- ///
- /// - the loopback address
- /// - link-local and unique local unicast addresses
- /// - interface-, link-, realm-, admin- and site-local multicast addresses
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), true);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_global(), false);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1).is_global(), true);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_global(&self) -> bool {
- match self.multicast_scope() {
- Some(Ipv6MulticastScope::Global) => true,
- None => self.is_unicast_global(),
- _ => false,
- }
- }
-
- /// Returns [`true`] if this is a unique local address (`fc00::/7`).
- ///
- /// This property is defined in [IETF RFC 4193].
- ///
- /// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unique_local(), false);
- /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 0).is_unique_local(), true);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_unique_local(&self) -> bool {
- (self.segments()[0] & 0xfe00) == 0xfc00
- }
-
- /// Returns [`true`] if this is a unicast address, as defined by [IETF RFC 4291].
- /// Any address that is not a [multicast address] (`ff00::/8`) is unicast.
- ///
- /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
- /// [multicast address]: Ipv6Addr::is_multicast
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::Ipv6Addr;
- ///
- /// // The unspecified and loopback addresses are unicast.
- /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_unicast(), true);
- /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast(), true);
- ///
- /// // Any address that is not a multicast address (`ff00::/8`) is unicast.
- /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast(), true);
- /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_unicast(), false);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_unicast(&self) -> bool {
- !self.is_multicast()
- }
-
- /// Returns `true` if the address is a unicast address with link-local scope,
- /// as defined in [RFC 4291].
- ///
- /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4].
- /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6],
- /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format:
- ///
- /// ```text
- /// | 10 bits | 54 bits | 64 bits |
- /// +----------+-------------------------+----------------------------+
- /// |1111111010| 0 | interface ID |
- /// +----------+-------------------------+----------------------------+
- /// ```
- /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`,
- /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated,
- /// and those addresses will have link-local scope.
- ///
- /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope",
- /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it.
- ///
- /// [RFC 4291]: https://tools.ietf.org/html/rfc4291
- /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4
- /// [RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3
- /// [RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6
- /// [loopback address]: Ipv6Addr::LOCALHOST
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::Ipv6Addr;
- ///
- /// // The loopback address (`::1`) does not actually have link-local scope.
- /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast_link_local(), false);
- ///
- /// // Only addresses in `fe80::/10` have link-local scope.
- /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), false);
- /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true);
- ///
- /// // Addresses outside the stricter `fe80::/64` also have link-local scope.
- /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0).is_unicast_link_local(), true);
- /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_unicast_link_local(&self) -> bool {
- (self.segments()[0] & 0xffc0) == 0xfe80
- }
-
- /// Returns [`true`] if this is an address reserved for documentation
- /// (`2001:db8::/32`).
- ///
- /// This property is defined in [IETF RFC 3849].
- ///
- /// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_documentation(), false);
- /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_documentation(), true);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_documentation(&self) -> bool {
- (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8)
- }
-
- /// Returns [`true`] if this is an address reserved for benchmarking (`2001:2::/48`).
- ///
- /// This property is defined in [IETF RFC 5180], where it is mistakenly specified as covering the range `2001:0200::/48`.
- /// This is corrected in [IETF RFC Errata 1752] to `2001:0002::/48`.
- ///
- /// [IETF RFC 5180]: https://tools.ietf.org/html/rfc5180
- /// [IETF RFC Errata 1752]: https://www.rfc-editor.org/errata_search.php?eid=1752
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc613, 0x0).is_benchmarking(), false);
- /// assert_eq!(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0).is_benchmarking(), true);
- /// ```
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_benchmarking(&self) -> bool {
- (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0)
- }
-
- /// Returns [`true`] if the address is a globally routable unicast address.
- ///
- /// The following return false:
- ///
- /// - the loopback address
- /// - the link-local addresses
- /// - unique local addresses
- /// - the unspecified address
- /// - the address range reserved for documentation
- ///
- /// This method returns [`true`] for site-local addresses as per [RFC 4291 section 2.5.7]
- ///
- /// ```no_rust
- /// The special behavior of [the site-local unicast] prefix defined in [RFC3513] must no longer
- /// be supported in new implementations (i.e., new implementations must treat this prefix as
- /// Global Unicast).
- /// ```
- ///
- /// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_global(), false);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_global(), true);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn is_unicast_global(&self) -> bool {
- self.is_unicast()
- && !self.is_loopback()
- && !self.is_unicast_link_local()
- && !self.is_unique_local()
- && !self.is_unspecified()
- && !self.is_documentation()
- }
-
- /// Returns the address's multicast scope if the address is multicast.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- ///
- /// use std::net::{Ipv6Addr, Ipv6MulticastScope};
- ///
- /// assert_eq!(
- /// Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0).multicast_scope(),
- /// Some(Ipv6MulticastScope::Global)
- /// );
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).multicast_scope(), None);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use]
- #[inline]
- pub const fn multicast_scope(&self) -> Option<Ipv6MulticastScope> {
- if self.is_multicast() {
- match self.segments()[0] & 0x000f {
- 1 => Some(Ipv6MulticastScope::InterfaceLocal),
- 2 => Some(Ipv6MulticastScope::LinkLocal),
- 3 => Some(Ipv6MulticastScope::RealmLocal),
- 4 => Some(Ipv6MulticastScope::AdminLocal),
- 5 => Some(Ipv6MulticastScope::SiteLocal),
- 8 => Some(Ipv6MulticastScope::OrganizationLocal),
- 14 => Some(Ipv6MulticastScope::Global),
- _ => None,
- }
- } else {
- None
- }
- }
-
- /// Returns [`true`] if this is a multicast address (`ff00::/8`).
- ///
- /// This property is defined by [IETF RFC 4291].
- ///
- /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false);
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(since = "1.7.0", feature = "ip_17")]
- #[must_use]
- #[inline]
- pub const fn is_multicast(&self) -> bool {
- (self.segments()[0] & 0xff00) == 0xff00
- }
-
- /// Converts this address to an [`IPv4` address] if it's an [IPv4-mapped] address,
- /// as defined in [IETF RFC 4291 section 2.5.5.2], otherwise returns [`None`].
- ///
- /// `::ffff:a.b.c.d` becomes `a.b.c.d`.
- /// All addresses *not* starting with `::ffff` will return `None`.
- ///
- /// [`IPv4` address]: Ipv4Addr
- /// [IPv4-mapped]: Ipv6Addr
- /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4_mapped(), None);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4_mapped(),
- /// Some(Ipv4Addr::new(192, 10, 2, 255)));
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4_mapped(), None);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[stable(feature = "ipv6_to_ipv4_mapped", since = "1.63.0")]
- #[must_use = "this returns the result of the operation, \
- without modifying the original"]
- #[inline]
- pub const fn to_ipv4_mapped(&self) -> Option<Ipv4Addr> {
- match self.octets() {
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => {
- Some(Ipv4Addr::new(a, b, c, d))
- }
- _ => None,
- }
- }
-
- /// Converts this address to an [`IPv4` address] if it is either
- /// an [IPv4-compatible] address as defined in [IETF RFC 4291 section 2.5.5.1],
- /// or an [IPv4-mapped] address as defined in [IETF RFC 4291 section 2.5.5.2],
- /// otherwise returns [`None`].
- ///
- /// Note that this will return an [`IPv4` address] for the IPv6 loopback address `::1`. Use
- /// [`Ipv6Addr::to_ipv4_mapped`] to avoid this.
- ///
- /// `::a.b.c.d` and `::ffff:a.b.c.d` become `a.b.c.d`. `::1` becomes `0.0.0.1`.
- /// All addresses *not* starting with either all zeroes or `::ffff` will return `None`.
- ///
- /// [`IPv4` address]: Ipv4Addr
- /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses
- /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses
- /// [IETF RFC 4291 section 2.5.5.1]: https://tools.ietf.org/html/rfc4291#section-2.5.5.1
- /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{Ipv4Addr, Ipv6Addr};
- ///
- /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4(), None);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4(),
- /// Some(Ipv4Addr::new(192, 10, 2, 255)));
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(),
- /// Some(Ipv4Addr::new(0, 0, 0, 1)));
- /// ```
- #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use = "this returns the result of the operation, \
- without modifying the original"]
- #[inline]
- pub const fn to_ipv4(&self) -> Option<Ipv4Addr> {
- if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() {
- let [a, b] = ab.to_be_bytes();
- let [c, d] = cd.to_be_bytes();
- Some(Ipv4Addr::new(a, b, c, d))
- } else {
- None
- }
- }
-
- /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped addresses, otherwise it
- /// returns self wrapped in an `IpAddr::V6`.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(ip)]
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).is_loopback(), false);
- /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).to_canonical().is_loopback(), true);
- /// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
- #[must_use = "this returns the result of the operation, \
- without modifying the original"]
- #[inline]
- pub const fn to_canonical(&self) -> IpAddr {
- if let Some(mapped) = self.to_ipv4_mapped() {
- return IpAddr::V4(mapped);
- }
- IpAddr::V6(*self)
- }
-
- /// Returns the sixteen eight-bit integers the IPv6 address consists of.
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(),
- /// [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
- /// ```
- #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
- #[stable(feature = "ipv6_to_octets", since = "1.12.0")]
- #[must_use]
- #[inline]
- pub const fn octets(&self) -> [u8; 16] {
- self.octets
- }
-}
-
-/// Write an Ipv6Addr, conforming to the canonical style described by
-/// [RFC 5952](https://tools.ietf.org/html/rfc5952).
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Display for Ipv6Addr {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // If there are no alignment requirements, write out the IP address to
- // f. Otherwise, write it to a local buffer, then use f.pad.
- if f.precision().is_none() && f.width().is_none() {
- let segments = self.segments();
-
- // Special case for :: and ::1; otherwise they get written with the
- // IPv4 formatter
- if self.is_unspecified() {
- f.write_str("::")
- } else if self.is_loopback() {
- f.write_str("::1")
- } else if let Some(ipv4) = self.to_ipv4() {
- match segments[5] {
- // IPv4 Compatible address
- 0 => write!(f, "::{}", ipv4),
- // IPv4 Mapped address
- 0xffff => write!(f, "::ffff:{}", ipv4),
- _ => unreachable!(),
- }
- } else {
- #[derive(Copy, Clone, Default)]
- struct Span {
- start: usize,
- len: usize,
- }
-
- // Find the inner 0 span
- let zeroes = {
- let mut longest = Span::default();
- let mut current = Span::default();
-
- for (i, &segment) in segments.iter().enumerate() {
- if segment == 0 {
- if current.len == 0 {
- current.start = i;
- }
-
- current.len += 1;
-
- if current.len > longest.len {
- longest = current;
- }
- } else {
- current = Span::default();
- }
- }
-
- longest
- };
-
- /// Write a colon-separated part of the address
- #[inline]
- fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) -> fmt::Result {
- if let Some((first, tail)) = chunk.split_first() {
- write!(f, "{:x}", first)?;
- for segment in tail {
- f.write_char(':')?;
- write!(f, "{:x}", segment)?;
- }
- }
- Ok(())
- }
-
- if zeroes.len > 1 {
- fmt_subslice(f, &segments[..zeroes.start])?;
- f.write_str("::")?;
- fmt_subslice(f, &segments[zeroes.start + zeroes.len..])
- } else {
- fmt_subslice(f, &segments)
- }
- }
- } else {
- // Slow path: write the address to a local buffer, then use f.pad.
- // Defined recursively by using the fast path to write to the
- // buffer.
-
- // This is the largest possible size of an IPv6 address
- const IPV6_BUF_LEN: usize = (4 * 8) + 7;
- let mut buf = [0u8; IPV6_BUF_LEN];
- let mut buf_slice = &mut buf[..];
-
- // Note: This call to write should never fail, so unwrap is okay.
- write!(buf_slice, "{}", self).unwrap();
- let len = IPV6_BUF_LEN - buf_slice.len();
-
- // This is safe because we know exactly what can be in this buffer
- let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
- f.pad(buf)
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Debug for Ipv6Addr {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(self, fmt)
- }
-}
-
-#[stable(feature = "ip_cmp", since = "1.16.0")]
-impl PartialEq<IpAddr> for Ipv6Addr {
- #[inline]
- fn eq(&self, other: &IpAddr) -> bool {
- match other {
- IpAddr::V4(_) => false,
- IpAddr::V6(v6) => self == v6,
- }
- }
-}
-
-#[stable(feature = "ip_cmp", since = "1.16.0")]
-impl PartialEq<Ipv6Addr> for IpAddr {
- #[inline]
- fn eq(&self, other: &Ipv6Addr) -> bool {
- match self {
- IpAddr::V4(_) => false,
- IpAddr::V6(v6) => v6 == other,
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl PartialOrd for Ipv6Addr {
- #[inline]
- fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-#[stable(feature = "ip_cmp", since = "1.16.0")]
-impl PartialOrd<Ipv6Addr> for IpAddr {
- #[inline]
- fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
- match self {
- IpAddr::V4(_) => Some(Ordering::Less),
- IpAddr::V6(v6) => v6.partial_cmp(other),
- }
- }
-}
-
-#[stable(feature = "ip_cmp", since = "1.16.0")]
-impl PartialOrd<IpAddr> for Ipv6Addr {
- #[inline]
- fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
- match other {
- IpAddr::V4(_) => Some(Ordering::Greater),
- IpAddr::V6(v6) => self.partial_cmp(v6),
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Ord for Ipv6Addr {
- #[inline]
- fn cmp(&self, other: &Ipv6Addr) -> Ordering {
- self.segments().cmp(&other.segments())
- }
-}
-
-impl IntoInner<c::in6_addr> for Ipv6Addr {
- fn into_inner(self) -> c::in6_addr {
- c::in6_addr { s6_addr: self.octets }
- }
-}
-impl FromInner<c::in6_addr> for Ipv6Addr {
- #[inline]
- fn from_inner(addr: c::in6_addr) -> Ipv6Addr {
- Ipv6Addr { octets: addr.s6_addr }
- }
-}
-
-#[stable(feature = "i128", since = "1.26.0")]
-impl From<Ipv6Addr> for u128 {
- /// Convert an `Ipv6Addr` into a host byte order `u128`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::new(
- /// 0x1020, 0x3040, 0x5060, 0x7080,
- /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
- /// );
- /// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
- /// ```
- #[inline]
- fn from(ip: Ipv6Addr) -> u128 {
- u128::from_be_bytes(ip.octets)
- }
-}
-#[stable(feature = "i128", since = "1.26.0")]
-impl From<u128> for Ipv6Addr {
- /// Convert a host byte order `u128` into an `Ipv6Addr`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128);
- /// assert_eq!(
- /// Ipv6Addr::new(
- /// 0x1020, 0x3040, 0x5060, 0x7080,
- /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
- /// ),
- /// addr);
- /// ```
- #[inline]
- fn from(ip: u128) -> Ipv6Addr {
- Ipv6Addr::from(ip.to_be_bytes())
- }
-}
-
-#[stable(feature = "ipv6_from_octets", since = "1.9.0")]
-impl From<[u8; 16]> for Ipv6Addr {
- /// Creates an `Ipv6Addr` from a sixteen element byte array.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::from([
- /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
- /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
- /// ]);
- /// assert_eq!(
- /// Ipv6Addr::new(
- /// 0x1918, 0x1716,
- /// 0x1514, 0x1312,
- /// 0x1110, 0x0f0e,
- /// 0x0d0c, 0x0b0a
- /// ),
- /// addr
- /// );
- /// ```
- #[inline]
- fn from(octets: [u8; 16]) -> Ipv6Addr {
- Ipv6Addr { octets }
- }
-}
-
-#[stable(feature = "ipv6_from_segments", since = "1.16.0")]
-impl From<[u16; 8]> for Ipv6Addr {
- /// Creates an `Ipv6Addr` from an eight element 16-bit array.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::from([
- /// 525u16, 524u16, 523u16, 522u16,
- /// 521u16, 520u16, 519u16, 518u16,
- /// ]);
- /// assert_eq!(
- /// Ipv6Addr::new(
- /// 0x20d, 0x20c,
- /// 0x20b, 0x20a,
- /// 0x209, 0x208,
- /// 0x207, 0x206
- /// ),
- /// addr
- /// );
- /// ```
- #[inline]
- fn from(segments: [u16; 8]) -> Ipv6Addr {
- let [a, b, c, d, e, f, g, h] = segments;
- Ipv6Addr::new(a, b, c, d, e, f, g, h)
- }
-}
-
-#[stable(feature = "ip_from_slice", since = "1.17.0")]
-impl From<[u8; 16]> for IpAddr {
- /// Creates an `IpAddr::V6` from a sixteen element byte array.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv6Addr};
- ///
- /// let addr = IpAddr::from([
- /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
- /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
- /// ]);
- /// assert_eq!(
- /// IpAddr::V6(Ipv6Addr::new(
- /// 0x1918, 0x1716,
- /// 0x1514, 0x1312,
- /// 0x1110, 0x0f0e,
- /// 0x0d0c, 0x0b0a
- /// )),
- /// addr
- /// );
- /// ```
- #[inline]
- fn from(octets: [u8; 16]) -> IpAddr {
- IpAddr::V6(Ipv6Addr::from(octets))
- }
-}
-
-#[stable(feature = "ip_from_slice", since = "1.17.0")]
-impl From<[u16; 8]> for IpAddr {
- /// Creates an `IpAddr::V6` from an eight element 16-bit array.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::{IpAddr, Ipv6Addr};
- ///
- /// let addr = IpAddr::from([
- /// 525u16, 524u16, 523u16, 522u16,
- /// 521u16, 520u16, 519u16, 518u16,
- /// ]);
- /// assert_eq!(
- /// IpAddr::V6(Ipv6Addr::new(
- /// 0x20d, 0x20c,
- /// 0x20b, 0x20a,
- /// 0x209, 0x208,
- /// 0x207, 0x206
- /// )),
- /// addr
- /// );
- /// ```
- #[inline]
- fn from(segments: [u16; 8]) -> IpAddr {
- IpAddr::V6(Ipv6Addr::from(segments))
- }
-}
diff --git a/library/std/src/net/ip/tests.rs b/library/std/src/net/ip/tests.rs
deleted file mode 100644
index c29509331..000000000
--- a/library/std/src/net/ip/tests.rs
+++ /dev/null
@@ -1,969 +0,0 @@
-use crate::net::test::{sa4, sa6, tsa};
-use crate::net::*;
-use crate::str::FromStr;
-
-#[test]
-fn test_from_str_ipv4() {
- assert_eq!(Ok(Ipv4Addr::new(127, 0, 0, 1)), "127.0.0.1".parse());
- assert_eq!(Ok(Ipv4Addr::new(255, 255, 255, 255)), "255.255.255.255".parse());
- assert_eq!(Ok(Ipv4Addr::new(0, 0, 0, 0)), "0.0.0.0".parse());
-
- // out of range
- let none: Option<Ipv4Addr> = "256.0.0.1".parse().ok();
- assert_eq!(None, none);
- // too short
- let none: Option<Ipv4Addr> = "255.0.0".parse().ok();
- assert_eq!(None, none);
- // too long
- let none: Option<Ipv4Addr> = "255.0.0.1.2".parse().ok();
- assert_eq!(None, none);
- // no number between dots
- let none: Option<Ipv4Addr> = "255.0..1".parse().ok();
- assert_eq!(None, none);
- // octal
- let none: Option<Ipv4Addr> = "255.0.0.01".parse().ok();
- assert_eq!(None, none);
- // octal zero
- let none: Option<Ipv4Addr> = "255.0.0.00".parse().ok();
- assert_eq!(None, none);
- let none: Option<Ipv4Addr> = "255.0.00.0".parse().ok();
- assert_eq!(None, none);
-}
-
-#[test]
-fn test_from_str_ipv6() {
- assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "0:0:0:0:0:0:0:0".parse());
- assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "0:0:0:0:0:0:0:1".parse());
-
- assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "::1".parse());
- assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "::".parse());
-
- assert_eq!(Ok(Ipv6Addr::new(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)), "2a02:6b8::11:11".parse());
-
- // too long group
- let none: Option<Ipv6Addr> = "::00000".parse().ok();
- assert_eq!(None, none);
- // too short
- let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7".parse().ok();
- assert_eq!(None, none);
- // too long
- let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:8:9".parse().ok();
- assert_eq!(None, none);
- // triple colon
- let none: Option<Ipv6Addr> = "1:2:::6:7:8".parse().ok();
- assert_eq!(None, none);
- // two double colons
- let none: Option<Ipv6Addr> = "1:2::6::8".parse().ok();
- assert_eq!(None, none);
- // `::` indicating zero groups of zeros
- let none: Option<Ipv6Addr> = "1:2:3:4::5:6:7:8".parse().ok();
- assert_eq!(None, none);
-}
-
-#[test]
-fn test_from_str_ipv4_in_ipv6() {
- assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 545)), "::192.0.2.33".parse());
- assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)), "::FFFF:192.0.2.33".parse());
- assert_eq!(
- Ok(Ipv6Addr::new(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)),
- "64:ff9b::192.0.2.33".parse()
- );
- assert_eq!(
- Ok(Ipv6Addr::new(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)),
- "2001:db8:122:c000:2:2100:192.0.2.33".parse()
- );
-
- // colon after v4
- let none: Option<Ipv4Addr> = "::127.0.0.1:".parse().ok();
- assert_eq!(None, none);
- // not enough groups
- let none: Option<Ipv6Addr> = "1:2:3:4:5:127.0.0.1".parse().ok();
- assert_eq!(None, none);
- // too many groups
- let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:127.0.0.1".parse().ok();
- assert_eq!(None, none);
-}
-
-#[test]
-fn test_from_str_socket_addr() {
- assert_eq!(Ok(sa4(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse());
- assert_eq!(Ok(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse());
- assert_eq!(
- Ok(sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53)),
- "[2a02:6b8:0:1::1]:53".parse()
- );
- assert_eq!(
- Ok(SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0)),
- "[2a02:6b8:0:1::1]:53".parse()
- );
- assert_eq!(Ok(sa6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22)), "[::127.0.0.1]:22".parse());
- assert_eq!(
- Ok(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22, 0, 0)),
- "[::127.0.0.1]:22".parse()
- );
-
- // without port
- let none: Option<SocketAddr> = "127.0.0.1".parse().ok();
- assert_eq!(None, none);
- // without port
- let none: Option<SocketAddr> = "127.0.0.1:".parse().ok();
- assert_eq!(None, none);
- // wrong brackets around v4
- let none: Option<SocketAddr> = "[127.0.0.1]:22".parse().ok();
- assert_eq!(None, none);
- // port out of range
- let none: Option<SocketAddr> = "127.0.0.1:123456".parse().ok();
- assert_eq!(None, none);
-}
-
-#[test]
-fn ipv4_addr_to_string() {
- assert_eq!(Ipv4Addr::new(127, 0, 0, 1).to_string(), "127.0.0.1");
- // Short address
- assert_eq!(Ipv4Addr::new(1, 1, 1, 1).to_string(), "1.1.1.1");
- // Long address
- assert_eq!(Ipv4Addr::new(127, 127, 127, 127).to_string(), "127.127.127.127");
-
- // Test padding
- assert_eq!(&format!("{:16}", Ipv4Addr::new(1, 1, 1, 1)), "1.1.1.1 ");
- assert_eq!(&format!("{:>16}", Ipv4Addr::new(1, 1, 1, 1)), " 1.1.1.1");
-}
-
-#[test]
-fn ipv6_addr_to_string() {
- // ipv4-mapped address
- let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280);
- assert_eq!(a1.to_string(), "::ffff:192.0.2.128");
-
- // ipv4-compatible address
- let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280);
- assert_eq!(a1.to_string(), "::192.0.2.128");
-
- // v6 address with no zero segments
- assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f");
-
- // longest possible IPv6 length
- assert_eq!(
- Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888).to_string(),
- "1111:2222:3333:4444:5555:6666:7777:8888"
- );
- // padding
- assert_eq!(&format!("{:20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), "1:2:3:4:5:6:7:8 ");
- assert_eq!(&format!("{:>20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), " 1:2:3:4:5:6:7:8");
-
- // reduce a single run of zeros
- assert_eq!(
- "ae::ffff:102:304",
- Ipv6Addr::new(0xae, 0, 0, 0, 0, 0xffff, 0x0102, 0x0304).to_string()
- );
-
- // don't reduce just a single zero segment
- assert_eq!("1:2:3:4:5:6:0:8", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 0, 8).to_string());
-
- // 'any' address
- assert_eq!("::", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).to_string());
-
- // loopback address
- assert_eq!("::1", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_string());
-
- // ends in zeros
- assert_eq!("1::", Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0).to_string());
-
- // two runs of zeros, second one is longer
- assert_eq!("1:0:0:4::8", Ipv6Addr::new(1, 0, 0, 4, 0, 0, 0, 8).to_string());
-
- // two runs of zeros, equal length
- assert_eq!("1::4:5:0:0:8", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8).to_string());
-
- // don't prefix `0x` to each segment in `dbg!`.
- assert_eq!("1::4:5:0:0:8", &format!("{:#?}", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8)));
-}
-
-#[test]
-fn ipv4_to_ipv6() {
- assert_eq!(
- Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678),
- Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_mapped()
- );
- assert_eq!(
- Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678),
- Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_compatible()
- );
-}
-
-#[test]
-fn ipv6_to_ipv4_mapped() {
- assert_eq!(
- Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4_mapped(),
- Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
- );
- assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4_mapped(), None);
-}
-
-#[test]
-fn ipv6_to_ipv4() {
- assert_eq!(
- Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4(),
- Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
- );
- assert_eq!(
- Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4(),
- Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
- );
- assert_eq!(Ipv6Addr::new(0, 0, 1, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), None);
-}
-
-#[test]
-fn ip_properties() {
- macro_rules! ip {
- ($s:expr) => {
- IpAddr::from_str($s).unwrap()
- };
- }
-
- macro_rules! check {
- ($s:expr) => {
- check!($s, 0);
- };
-
- ($s:expr, $mask:expr) => {{
- let unspec: u8 = 1 << 0;
- let loopback: u8 = 1 << 1;
- let global: u8 = 1 << 2;
- let multicast: u8 = 1 << 3;
- let doc: u8 = 1 << 4;
- let benchmarking: u8 = 1 << 5;
-
- if ($mask & unspec) == unspec {
- assert!(ip!($s).is_unspecified());
- } else {
- assert!(!ip!($s).is_unspecified());
- }
-
- if ($mask & loopback) == loopback {
- assert!(ip!($s).is_loopback());
- } else {
- assert!(!ip!($s).is_loopback());
- }
-
- if ($mask & global) == global {
- assert!(ip!($s).is_global());
- } else {
- assert!(!ip!($s).is_global());
- }
-
- if ($mask & multicast) == multicast {
- assert!(ip!($s).is_multicast());
- } else {
- assert!(!ip!($s).is_multicast());
- }
-
- if ($mask & doc) == doc {
- assert!(ip!($s).is_documentation());
- } else {
- assert!(!ip!($s).is_documentation());
- }
-
- if ($mask & benchmarking) == benchmarking {
- assert!(ip!($s).is_benchmarking());
- } else {
- assert!(!ip!($s).is_benchmarking());
- }
- }};
- }
-
- let unspec: u8 = 1 << 0;
- let loopback: u8 = 1 << 1;
- let global: u8 = 1 << 2;
- let multicast: u8 = 1 << 3;
- let doc: u8 = 1 << 4;
- let benchmarking: u8 = 1 << 5;
-
- check!("0.0.0.0", unspec);
- check!("0.0.0.1");
- check!("0.1.0.0");
- check!("10.9.8.7");
- check!("127.1.2.3", loopback);
- check!("172.31.254.253");
- check!("169.254.253.242");
- check!("192.0.2.183", doc);
- check!("192.1.2.183", global);
- check!("192.168.254.253");
- check!("198.51.100.0", doc);
- check!("203.0.113.0", doc);
- check!("203.2.113.0", global);
- check!("224.0.0.0", global | multicast);
- check!("239.255.255.255", global | multicast);
- check!("255.255.255.255");
- // make sure benchmarking addresses are not global
- check!("198.18.0.0", benchmarking);
- check!("198.18.54.2", benchmarking);
- check!("198.19.255.255", benchmarking);
- // make sure addresses reserved for protocol assignment are not global
- check!("192.0.0.0");
- check!("192.0.0.255");
- check!("192.0.0.100");
- // make sure reserved addresses are not global
- check!("240.0.0.0");
- check!("251.54.1.76");
- check!("254.255.255.255");
- // make sure shared addresses are not global
- check!("100.64.0.0");
- check!("100.127.255.255");
- check!("100.100.100.0");
-
- check!("::", unspec);
- check!("::1", loopback);
- check!("::0.0.0.2", global);
- check!("1::", global);
- check!("fc00::");
- check!("fdff:ffff::");
- check!("fe80:ffff::");
- check!("febf:ffff::");
- check!("fec0::", global);
- check!("ff01::", multicast);
- check!("ff02::", multicast);
- check!("ff03::", multicast);
- check!("ff04::", multicast);
- check!("ff05::", multicast);
- check!("ff08::", multicast);
- check!("ff0e::", global | multicast);
- check!("2001:db8:85a3::8a2e:370:7334", doc);
- check!("2001:2::ac32:23ff:21", global | benchmarking);
- check!("102:304:506:708:90a:b0c:d0e:f10", global);
-}
-
-#[test]
-fn ipv4_properties() {
- macro_rules! ip {
- ($s:expr) => {
- Ipv4Addr::from_str($s).unwrap()
- };
- }
-
- macro_rules! check {
- ($s:expr) => {
- check!($s, 0);
- };
-
- ($s:expr, $mask:expr) => {{
- let unspec: u16 = 1 << 0;
- let loopback: u16 = 1 << 1;
- let private: u16 = 1 << 2;
- let link_local: u16 = 1 << 3;
- let global: u16 = 1 << 4;
- let multicast: u16 = 1 << 5;
- let broadcast: u16 = 1 << 6;
- let documentation: u16 = 1 << 7;
- let benchmarking: u16 = 1 << 8;
- let reserved: u16 = 1 << 10;
- let shared: u16 = 1 << 11;
-
- if ($mask & unspec) == unspec {
- assert!(ip!($s).is_unspecified());
- } else {
- assert!(!ip!($s).is_unspecified());
- }
-
- if ($mask & loopback) == loopback {
- assert!(ip!($s).is_loopback());
- } else {
- assert!(!ip!($s).is_loopback());
- }
-
- if ($mask & private) == private {
- assert!(ip!($s).is_private());
- } else {
- assert!(!ip!($s).is_private());
- }
-
- if ($mask & link_local) == link_local {
- assert!(ip!($s).is_link_local());
- } else {
- assert!(!ip!($s).is_link_local());
- }
-
- if ($mask & global) == global {
- assert!(ip!($s).is_global());
- } else {
- assert!(!ip!($s).is_global());
- }
-
- if ($mask & multicast) == multicast {
- assert!(ip!($s).is_multicast());
- } else {
- assert!(!ip!($s).is_multicast());
- }
-
- if ($mask & broadcast) == broadcast {
- assert!(ip!($s).is_broadcast());
- } else {
- assert!(!ip!($s).is_broadcast());
- }
-
- if ($mask & documentation) == documentation {
- assert!(ip!($s).is_documentation());
- } else {
- assert!(!ip!($s).is_documentation());
- }
-
- if ($mask & benchmarking) == benchmarking {
- assert!(ip!($s).is_benchmarking());
- } else {
- assert!(!ip!($s).is_benchmarking());
- }
-
- if ($mask & reserved) == reserved {
- assert!(ip!($s).is_reserved());
- } else {
- assert!(!ip!($s).is_reserved());
- }
-
- if ($mask & shared) == shared {
- assert!(ip!($s).is_shared());
- } else {
- assert!(!ip!($s).is_shared());
- }
- }};
- }
-
- let unspec: u16 = 1 << 0;
- let loopback: u16 = 1 << 1;
- let private: u16 = 1 << 2;
- let link_local: u16 = 1 << 3;
- let global: u16 = 1 << 4;
- let multicast: u16 = 1 << 5;
- let broadcast: u16 = 1 << 6;
- let documentation: u16 = 1 << 7;
- let benchmarking: u16 = 1 << 8;
- let reserved: u16 = 1 << 10;
- let shared: u16 = 1 << 11;
-
- check!("0.0.0.0", unspec);
- check!("0.0.0.1");
- check!("0.1.0.0");
- check!("10.9.8.7", private);
- check!("127.1.2.3", loopback);
- check!("172.31.254.253", private);
- check!("169.254.253.242", link_local);
- check!("192.0.2.183", documentation);
- check!("192.1.2.183", global);
- check!("192.168.254.253", private);
- check!("198.51.100.0", documentation);
- check!("203.0.113.0", documentation);
- check!("203.2.113.0", global);
- check!("224.0.0.0", global | multicast);
- check!("239.255.255.255", global | multicast);
- check!("255.255.255.255", broadcast);
- check!("198.18.0.0", benchmarking);
- check!("198.18.54.2", benchmarking);
- check!("198.19.255.255", benchmarking);
- check!("192.0.0.0");
- check!("192.0.0.255");
- check!("192.0.0.100");
- check!("240.0.0.0", reserved);
- check!("251.54.1.76", reserved);
- check!("254.255.255.255", reserved);
- check!("100.64.0.0", shared);
- check!("100.127.255.255", shared);
- check!("100.100.100.0", shared);
-}
-
-#[test]
-fn ipv6_properties() {
- macro_rules! ip {
- ($s:expr) => {
- Ipv6Addr::from_str($s).unwrap()
- };
- }
-
- macro_rules! check {
- ($s:expr, &[$($octet:expr),*], $mask:expr) => {
- assert_eq!($s, ip!($s).to_string());
- let octets = &[$($octet),*];
- assert_eq!(&ip!($s).octets(), octets);
- assert_eq!(Ipv6Addr::from(*octets), ip!($s));
-
- let unspecified: u32 = 1 << 0;
- let loopback: u32 = 1 << 1;
- let unique_local: u32 = 1 << 2;
- let global: u32 = 1 << 3;
- let unicast_link_local: u32 = 1 << 4;
- let unicast_global: u32 = 1 << 7;
- let documentation: u32 = 1 << 8;
- let benchmarking: u32 = 1 << 16;
- let multicast_interface_local: u32 = 1 << 9;
- let multicast_link_local: u32 = 1 << 10;
- let multicast_realm_local: u32 = 1 << 11;
- let multicast_admin_local: u32 = 1 << 12;
- let multicast_site_local: u32 = 1 << 13;
- let multicast_organization_local: u32 = 1 << 14;
- let multicast_global: u32 = 1 << 15;
- let multicast: u32 = multicast_interface_local
- | multicast_admin_local
- | multicast_global
- | multicast_link_local
- | multicast_realm_local
- | multicast_site_local
- | multicast_organization_local;
-
- if ($mask & unspecified) == unspecified {
- assert!(ip!($s).is_unspecified());
- } else {
- assert!(!ip!($s).is_unspecified());
- }
- if ($mask & loopback) == loopback {
- assert!(ip!($s).is_loopback());
- } else {
- assert!(!ip!($s).is_loopback());
- }
- if ($mask & unique_local) == unique_local {
- assert!(ip!($s).is_unique_local());
- } else {
- assert!(!ip!($s).is_unique_local());
- }
- if ($mask & global) == global {
- assert!(ip!($s).is_global());
- } else {
- assert!(!ip!($s).is_global());
- }
- if ($mask & unicast_link_local) == unicast_link_local {
- assert!(ip!($s).is_unicast_link_local());
- } else {
- assert!(!ip!($s).is_unicast_link_local());
- }
- if ($mask & unicast_global) == unicast_global {
- assert!(ip!($s).is_unicast_global());
- } else {
- assert!(!ip!($s).is_unicast_global());
- }
- if ($mask & documentation) == documentation {
- assert!(ip!($s).is_documentation());
- } else {
- assert!(!ip!($s).is_documentation());
- }
- if ($mask & benchmarking) == benchmarking {
- assert!(ip!($s).is_benchmarking());
- } else {
- assert!(!ip!($s).is_benchmarking());
- }
- if ($mask & multicast) != 0 {
- assert!(ip!($s).multicast_scope().is_some());
- assert!(ip!($s).is_multicast());
- } else {
- assert!(ip!($s).multicast_scope().is_none());
- assert!(!ip!($s).is_multicast());
- }
- if ($mask & multicast_interface_local) == multicast_interface_local {
- assert_eq!(ip!($s).multicast_scope().unwrap(),
- Ipv6MulticastScope::InterfaceLocal);
- }
- if ($mask & multicast_link_local) == multicast_link_local {
- assert_eq!(ip!($s).multicast_scope().unwrap(),
- Ipv6MulticastScope::LinkLocal);
- }
- if ($mask & multicast_realm_local) == multicast_realm_local {
- assert_eq!(ip!($s).multicast_scope().unwrap(),
- Ipv6MulticastScope::RealmLocal);
- }
- if ($mask & multicast_admin_local) == multicast_admin_local {
- assert_eq!(ip!($s).multicast_scope().unwrap(),
- Ipv6MulticastScope::AdminLocal);
- }
- if ($mask & multicast_site_local) == multicast_site_local {
- assert_eq!(ip!($s).multicast_scope().unwrap(),
- Ipv6MulticastScope::SiteLocal);
- }
- if ($mask & multicast_organization_local) == multicast_organization_local {
- assert_eq!(ip!($s).multicast_scope().unwrap(),
- Ipv6MulticastScope::OrganizationLocal);
- }
- if ($mask & multicast_global) == multicast_global {
- assert_eq!(ip!($s).multicast_scope().unwrap(),
- Ipv6MulticastScope::Global);
- }
- }
- }
-
- let unspecified: u32 = 1 << 0;
- let loopback: u32 = 1 << 1;
- let unique_local: u32 = 1 << 2;
- let global: u32 = 1 << 3;
- let unicast_link_local: u32 = 1 << 4;
- let unicast_global: u32 = 1 << 7;
- let documentation: u32 = 1 << 8;
- let benchmarking: u32 = 1 << 16;
- let multicast_interface_local: u32 = 1 << 9;
- let multicast_link_local: u32 = 1 << 10;
- let multicast_realm_local: u32 = 1 << 11;
- let multicast_admin_local: u32 = 1 << 12;
- let multicast_site_local: u32 = 1 << 13;
- let multicast_organization_local: u32 = 1 << 14;
- let multicast_global: u32 = 1 << 15;
-
- check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified);
-
- check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback);
-
- check!("::0.0.0.2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], global | unicast_global);
-
- check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global);
-
- check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local);
-
- check!(
- "fdff:ffff::",
- &[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- unique_local
- );
-
- check!(
- "fe80:ffff::",
- &[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- unicast_link_local
- );
-
- check!("fe80::", &[0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local);
-
- check!(
- "febf:ffff::",
- &[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- unicast_link_local
- );
-
- check!("febf::", &[0xfe, 0xbf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local);
-
- check!(
- "febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
- &[
- 0xfe, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff
- ],
- unicast_link_local
- );
-
- check!(
- "fe80::ffff:ffff:ffff:ffff",
- &[
- 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff
- ],
- unicast_link_local
- );
-
- check!(
- "fe80:0:0:1::",
- &[0xfe, 0x80, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- unicast_link_local
- );
-
- check!(
- "fec0::",
- &[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- unicast_global | global
- );
-
- check!(
- "ff01::",
- &[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- multicast_interface_local
- );
-
- check!("ff02::", &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_link_local);
-
- check!("ff03::", &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_realm_local);
-
- check!("ff04::", &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_admin_local);
-
- check!("ff05::", &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_site_local);
-
- check!(
- "ff08::",
- &[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- multicast_organization_local
- );
-
- check!(
- "ff0e::",
- &[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- multicast_global | global
- );
-
- check!(
- "2001:db8:85a3::8a2e:370:7334",
- &[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34],
- documentation
- );
-
- check!(
- "2001:2::ac32:23ff:21",
- &[0x20, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0xac, 0x32, 0x23, 0xff, 0, 0x21],
- global | unicast_global | benchmarking
- );
-
- check!(
- "102:304:506:708:90a:b0c:d0e:f10",
- &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
- global | unicast_global
- );
-}
-
-#[test]
-fn to_socket_addr_socketaddr() {
- let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 12345);
- assert_eq!(Ok(vec![a]), tsa(a));
-}
-
-#[test]
-fn test_ipv4_to_int() {
- let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
- assert_eq!(u32::from(a), 0x11223344);
-}
-
-#[test]
-fn test_int_to_ipv4() {
- let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
- assert_eq!(Ipv4Addr::from(0x11223344), a);
-}
-
-#[test]
-fn test_ipv6_to_int() {
- let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
- assert_eq!(u128::from(a), 0x112233445566778899aabbccddeeff11u128);
-}
-
-#[test]
-fn test_int_to_ipv6() {
- let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
- assert_eq!(Ipv6Addr::from(0x112233445566778899aabbccddeeff11u128), a);
-}
-
-#[test]
-fn ipv4_from_constructors() {
- assert_eq!(Ipv4Addr::LOCALHOST, Ipv4Addr::new(127, 0, 0, 1));
- assert!(Ipv4Addr::LOCALHOST.is_loopback());
- assert_eq!(Ipv4Addr::UNSPECIFIED, Ipv4Addr::new(0, 0, 0, 0));
- assert!(Ipv4Addr::UNSPECIFIED.is_unspecified());
- assert_eq!(Ipv4Addr::BROADCAST, Ipv4Addr::new(255, 255, 255, 255));
- assert!(Ipv4Addr::BROADCAST.is_broadcast());
-}
-
-#[test]
-fn ipv6_from_constructors() {
- assert_eq!(Ipv6Addr::LOCALHOST, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
- assert!(Ipv6Addr::LOCALHOST.is_loopback());
- assert_eq!(Ipv6Addr::UNSPECIFIED, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
- assert!(Ipv6Addr::UNSPECIFIED.is_unspecified());
-}
-
-#[test]
-fn ipv4_from_octets() {
- assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1))
-}
-
-#[test]
-fn ipv6_from_segments() {
- let from_u16s =
- Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
- let new = Ipv6Addr::new(0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff);
- assert_eq!(new, from_u16s);
-}
-
-#[test]
-fn ipv6_from_octets() {
- let from_u16s =
- Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
- let from_u8s = Ipv6Addr::from([
- 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
- 0xff,
- ]);
- assert_eq!(from_u16s, from_u8s);
-}
-
-#[test]
-fn cmp() {
- let v41 = Ipv4Addr::new(100, 64, 3, 3);
- let v42 = Ipv4Addr::new(192, 0, 2, 2);
- let v61 = "2001:db8:f00::1002".parse::<Ipv6Addr>().unwrap();
- let v62 = "2001:db8:f00::2001".parse::<Ipv6Addr>().unwrap();
- assert!(v41 < v42);
- assert!(v61 < v62);
-
- assert_eq!(v41, IpAddr::V4(v41));
- assert_eq!(v61, IpAddr::V6(v61));
- assert!(v41 != IpAddr::V4(v42));
- assert!(v61 != IpAddr::V6(v62));
-
- assert!(v41 < IpAddr::V4(v42));
- assert!(v61 < IpAddr::V6(v62));
- assert!(IpAddr::V4(v41) < v42);
- assert!(IpAddr::V6(v61) < v62);
-
- assert!(v41 < IpAddr::V6(v61));
- assert!(IpAddr::V4(v41) < v61);
-}
-
-#[test]
-fn is_v4() {
- let ip = IpAddr::V4(Ipv4Addr::new(100, 64, 3, 3));
- assert!(ip.is_ipv4());
- assert!(!ip.is_ipv6());
-}
-
-#[test]
-fn is_v6() {
- let ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678));
- assert!(!ip.is_ipv4());
- assert!(ip.is_ipv6());
-}
-
-#[test]
-fn ipv4_const() {
- // test that the methods of `Ipv4Addr` are usable in a const context
-
- const IP_ADDRESS: Ipv4Addr = Ipv4Addr::new(127, 0, 0, 1);
- assert_eq!(IP_ADDRESS, Ipv4Addr::LOCALHOST);
-
- const OCTETS: [u8; 4] = IP_ADDRESS.octets();
- assert_eq!(OCTETS, [127, 0, 0, 1]);
-
- const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
- assert!(!IS_UNSPECIFIED);
-
- const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
- assert!(IS_LOOPBACK);
-
- const IS_PRIVATE: bool = IP_ADDRESS.is_private();
- assert!(!IS_PRIVATE);
-
- const IS_LINK_LOCAL: bool = IP_ADDRESS.is_link_local();
- assert!(!IS_LINK_LOCAL);
-
- const IS_GLOBAL: bool = IP_ADDRESS.is_global();
- assert!(!IS_GLOBAL);
-
- const IS_SHARED: bool = IP_ADDRESS.is_shared();
- assert!(!IS_SHARED);
-
- const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking();
- assert!(!IS_BENCHMARKING);
-
- const IS_RESERVED: bool = IP_ADDRESS.is_reserved();
- assert!(!IS_RESERVED);
-
- const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
- assert!(!IS_MULTICAST);
-
- const IS_BROADCAST: bool = IP_ADDRESS.is_broadcast();
- assert!(!IS_BROADCAST);
-
- const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation();
- assert!(!IS_DOCUMENTATION);
-
- const IP_V6_COMPATIBLE: Ipv6Addr = IP_ADDRESS.to_ipv6_compatible();
- assert_eq!(
- IP_V6_COMPATIBLE,
- Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 1])
- );
-
- const IP_V6_MAPPED: Ipv6Addr = IP_ADDRESS.to_ipv6_mapped();
- assert_eq!(
- IP_V6_MAPPED,
- Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 127, 0, 0, 1])
- );
-}
-
-#[test]
-fn ipv6_const() {
- // test that the methods of `Ipv6Addr` are usable in a const context
-
- const IP_ADDRESS: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
- assert_eq!(IP_ADDRESS, Ipv6Addr::LOCALHOST);
-
- const SEGMENTS: [u16; 8] = IP_ADDRESS.segments();
- assert_eq!(SEGMENTS, [0, 0, 0, 0, 0, 0, 0, 1]);
-
- const OCTETS: [u8; 16] = IP_ADDRESS.octets();
- assert_eq!(OCTETS, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]);
-
- const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
- assert!(!IS_UNSPECIFIED);
-
- const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
- assert!(IS_LOOPBACK);
-
- const IS_GLOBAL: bool = IP_ADDRESS.is_global();
- assert!(!IS_GLOBAL);
-
- const IS_UNIQUE_LOCAL: bool = IP_ADDRESS.is_unique_local();
- assert!(!IS_UNIQUE_LOCAL);
-
- const IS_UNICAST_LINK_LOCAL: bool = IP_ADDRESS.is_unicast_link_local();
- assert!(!IS_UNICAST_LINK_LOCAL);
-
- const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation();
- assert!(!IS_DOCUMENTATION);
-
- const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking();
- assert!(!IS_BENCHMARKING);
-
- const IS_UNICAST_GLOBAL: bool = IP_ADDRESS.is_unicast_global();
- assert!(!IS_UNICAST_GLOBAL);
-
- const MULTICAST_SCOPE: Option<Ipv6MulticastScope> = IP_ADDRESS.multicast_scope();
- assert_eq!(MULTICAST_SCOPE, None);
-
- const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
- assert!(!IS_MULTICAST);
-
- const IP_V4: Option<Ipv4Addr> = IP_ADDRESS.to_ipv4();
- assert_eq!(IP_V4.unwrap(), Ipv4Addr::new(0, 0, 0, 1));
-}
-
-#[test]
-fn ip_const() {
- // test that the methods of `IpAddr` are usable in a const context
-
- const IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST);
-
- const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
- assert!(!IS_UNSPECIFIED);
-
- const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
- assert!(IS_LOOPBACK);
-
- const IS_GLOBAL: bool = IP_ADDRESS.is_global();
- assert!(!IS_GLOBAL);
-
- const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
- assert!(!IS_MULTICAST);
-
- const IS_IP_V4: bool = IP_ADDRESS.is_ipv4();
- assert!(IS_IP_V4);
-
- const IS_IP_V6: bool = IP_ADDRESS.is_ipv6();
- assert!(!IS_IP_V6);
-}
-
-#[test]
-fn structural_match() {
- // test that all IP types can be structurally matched upon
-
- const IPV4: Ipv4Addr = Ipv4Addr::LOCALHOST;
- match IPV4 {
- Ipv4Addr::LOCALHOST => {}
- _ => unreachable!(),
- }
-
- const IPV6: Ipv6Addr = Ipv6Addr::LOCALHOST;
- match IPV6 {
- Ipv6Addr::LOCALHOST => {}
- _ => unreachable!(),
- }
-
- const IP: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST);
- match IP {
- IpAddr::V4(Ipv4Addr::LOCALHOST) => {}
- _ => unreachable!(),
- }
-}
diff --git a/library/std/src/net/ip_addr.rs b/library/std/src/net/ip_addr.rs
new file mode 100644
index 000000000..4f14fc280
--- /dev/null
+++ b/library/std/src/net/ip_addr.rs
@@ -0,0 +1,2095 @@
+// Tests for this module
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::cmp::Ordering;
+use crate::fmt::{self, Write};
+use crate::mem::transmute;
+use crate::sys::net::netc as c;
+use crate::sys_common::{FromInner, IntoInner};
+
+use super::display_buffer::DisplayBuffer;
+
+/// An IP address, either IPv4 or IPv6.
+///
+/// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their
+/// respective documentation for more details.
+///
+/// # Examples
+///
+/// ```
+/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+///
+/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
+/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+///
+/// assert_eq!("127.0.0.1".parse(), Ok(localhost_v4));
+/// assert_eq!("::1".parse(), Ok(localhost_v6));
+///
+/// assert_eq!(localhost_v4.is_ipv6(), false);
+/// assert_eq!(localhost_v4.is_ipv4(), true);
+/// ```
+#[cfg_attr(not(test), rustc_diagnostic_item = "IpAddr")]
+#[stable(feature = "ip_addr", since = "1.7.0")]
+#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)]
+pub enum IpAddr {
+ /// An IPv4 address.
+ #[stable(feature = "ip_addr", since = "1.7.0")]
+ V4(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv4Addr),
+ /// An IPv6 address.
+ #[stable(feature = "ip_addr", since = "1.7.0")]
+ V6(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv6Addr),
+}
+
+/// An IPv4 address.
+///
+/// IPv4 addresses are defined as 32-bit integers in [IETF RFC 791].
+/// They are usually represented as four octets.
+///
+/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
+///
+/// [IETF RFC 791]: https://tools.ietf.org/html/rfc791
+///
+/// # Textual representation
+///
+/// `Ipv4Addr` provides a [`FromStr`] implementation. The four octets are in decimal
+/// notation, divided by `.` (this is called "dot-decimal notation").
+/// Notably, octal numbers (which are indicated with a leading `0`) and hexadecimal numbers (which
+/// are indicated with a leading `0x`) are not allowed per [IETF RFC 6943].
+///
+/// [IETF RFC 6943]: https://tools.ietf.org/html/rfc6943#section-3.1.1
+/// [`FromStr`]: crate::str::FromStr
+///
+/// # Examples
+///
+/// ```
+/// use std::net::Ipv4Addr;
+///
+/// let localhost = Ipv4Addr::new(127, 0, 0, 1);
+/// assert_eq!("127.0.0.1".parse(), Ok(localhost));
+/// assert_eq!(localhost.is_loopback(), true);
+/// assert!("012.004.002.000".parse::<Ipv4Addr>().is_err()); // all octets are in octal
+/// assert!("0000000.0.0.0".parse::<Ipv4Addr>().is_err()); // first octet is a zero in octal
+/// assert!("0xcb.0x0.0x71.0x00".parse::<Ipv4Addr>().is_err()); // all octets are in hex
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Ipv4Addr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Ipv4Addr {
+ octets: [u8; 4],
+}
+
+/// An IPv6 address.
+///
+/// IPv6 addresses are defined as 128-bit integers in [IETF RFC 4291].
+/// They are usually represented as eight 16-bit segments.
+///
+/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
+///
+/// # Embedding IPv4 Addresses
+///
+/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
+///
+/// To assist in the transition from IPv4 to IPv6 two types of IPv6 addresses that embed an IPv4 address were defined:
+/// IPv4-compatible and IPv4-mapped addresses. Of these IPv4-compatible addresses have been officially deprecated.
+///
+/// Both types of addresses are not assigned any special meaning by this implementation,
+/// other than what the relevant standards prescribe. This means that an address like `::ffff:127.0.0.1`,
+/// while representing an IPv4 loopback address, is not itself an IPv6 loopback address; only `::1` is.
+/// To handle these so called "IPv4-in-IPv6" addresses, they have to first be converted to their canonical IPv4 address.
+///
+/// ### IPv4-Compatible IPv6 Addresses
+///
+/// IPv4-compatible IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.1], and have been officially deprecated.
+/// The RFC describes the format of an "IPv4-Compatible IPv6 address" as follows:
+///
+/// ```text
+/// | 80 bits | 16 | 32 bits |
+/// +--------------------------------------+--------------------------+
+/// |0000..............................0000|0000| IPv4 address |
+/// +--------------------------------------+----+---------------------+
+/// ```
+/// So `::a.b.c.d` would be an IPv4-compatible IPv6 address representing the IPv4 address `a.b.c.d`.
+///
+/// To convert from an IPv4 address to an IPv4-compatible IPv6 address, use [`Ipv4Addr::to_ipv6_compatible`].
+/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-compatible IPv6 address to the canonical IPv4 address.
+///
+/// [IETF RFC 4291 Section 2.5.5.1]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.1
+///
+/// ### IPv4-Mapped IPv6 Addresses
+///
+/// IPv4-mapped IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.2].
+/// The RFC describes the format of an "IPv4-Mapped IPv6 address" as follows:
+///
+/// ```text
+/// | 80 bits | 16 | 32 bits |
+/// +--------------------------------------+--------------------------+
+/// |0000..............................0000|FFFF| IPv4 address |
+/// +--------------------------------------+----+---------------------+
+/// ```
+/// So `::ffff:a.b.c.d` would be an IPv4-mapped IPv6 address representing the IPv4 address `a.b.c.d`.
+///
+/// To convert from an IPv4 address to an IPv4-mapped IPv6 address, use [`Ipv4Addr::to_ipv6_mapped`].
+/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-mapped IPv6 address to the canonical IPv4 address.
+/// Note that this will also convert the IPv6 loopback address `::1` to `0.0.0.1`. Use
+/// [`Ipv6Addr::to_ipv4_mapped`] to avoid this.
+///
+/// [IETF RFC 4291 Section 2.5.5.2]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2
+///
+/// # Textual representation
+///
+/// `Ipv6Addr` provides a [`FromStr`] implementation. There are many ways to represent
+/// an IPv6 address in text, but in general, each segments is written in hexadecimal
+/// notation, and segments are separated by `:`. For more information, see
+/// [IETF RFC 5952].
+///
+/// [`FromStr`]: crate::str::FromStr
+/// [IETF RFC 5952]: https://tools.ietf.org/html/rfc5952
+///
+/// # Examples
+///
+/// ```
+/// use std::net::Ipv6Addr;
+///
+/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
+/// assert_eq!("::1".parse(), Ok(localhost));
+/// assert_eq!(localhost.is_loopback(), true);
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Ipv6Addr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Ipv6Addr {
+ octets: [u8; 16],
+}
+
+/// Scope of an [IPv6 multicast address] as defined in [IETF RFC 7346 section 2].
+///
+/// # Stability Guarantees
+///
+/// Not all possible values for a multicast scope have been assigned.
+/// Future RFCs may introduce new scopes, which will be added as variants to this enum;
+/// because of this the enum is marked as `#[non_exhaustive]`.
+///
+/// # Examples
+/// ```
+/// #![feature(ip)]
+///
+/// use std::net::Ipv6Addr;
+/// use std::net::Ipv6MulticastScope::*;
+///
+/// // An IPv6 multicast address with global scope (`ff0e::`).
+/// let address = Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0);
+///
+/// // Will print "Global scope".
+/// match address.multicast_scope() {
+/// Some(InterfaceLocal) => println!("Interface-Local scope"),
+/// Some(LinkLocal) => println!("Link-Local scope"),
+/// Some(RealmLocal) => println!("Realm-Local scope"),
+/// Some(AdminLocal) => println!("Admin-Local scope"),
+/// Some(SiteLocal) => println!("Site-Local scope"),
+/// Some(OrganizationLocal) => println!("Organization-Local scope"),
+/// Some(Global) => println!("Global scope"),
+/// Some(_) => println!("Unknown scope"),
+/// None => println!("Not a multicast address!")
+/// }
+///
+/// ```
+///
+/// [IPv6 multicast address]: Ipv6Addr
+/// [IETF RFC 7346 section 2]: https://tools.ietf.org/html/rfc7346#section-2
+#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)]
+#[unstable(feature = "ip", issue = "27709")]
+#[non_exhaustive]
+pub enum Ipv6MulticastScope {
+ /// Interface-Local scope.
+ InterfaceLocal,
+ /// Link-Local scope.
+ LinkLocal,
+ /// Realm-Local scope.
+ RealmLocal,
+ /// Admin-Local scope.
+ AdminLocal,
+ /// Site-Local scope.
+ SiteLocal,
+ /// Organization-Local scope.
+ OrganizationLocal,
+ /// Global scope.
+ Global,
+}
+
+impl IpAddr {
+ /// Returns [`true`] for the special 'unspecified' address.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_unspecified()`] and
+ /// [`Ipv6Addr::is_unspecified()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ip_shared", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unspecified(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_unspecified(),
+ IpAddr::V6(ip) => ip.is_unspecified(),
+ }
+ }
+
+ /// Returns [`true`] if this is a loopback address.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_loopback()`] and
+ /// [`Ipv6Addr::is_loopback()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ip_shared", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_loopback(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_loopback(),
+ IpAddr::V6(ip) => ip.is_loopback(),
+ }
+ }
+
+ /// Returns [`true`] if the address appears to be globally routable.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_global()`] and
+ /// [`Ipv6Addr::is_global()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(80, 9, 12, 3)).is_global(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1)).is_global(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_global(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_global(),
+ IpAddr::V6(ip) => ip.is_global(),
+ }
+ }
+
+ /// Returns [`true`] if this is a multicast address.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_multicast()`] and
+ /// [`Ipv6Addr::is_multicast()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ip_shared", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_multicast(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_multicast(),
+ IpAddr::V6(ip) => ip.is_multicast(),
+ }
+ }
+
+ /// Returns [`true`] if this address is in a range designated for documentation.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_documentation()`] and
+ /// [`Ipv6Addr::is_documentation()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_documentation(), true);
+ /// assert_eq!(
+ /// IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_documentation(),
+ /// true
+ /// );
+ /// ```
+ #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_documentation(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_documentation(),
+ IpAddr::V6(ip) => ip.is_documentation(),
+ }
+ }
+
+ /// Returns [`true`] if this address is in a range designated for benchmarking.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_benchmarking()`] and
+ /// [`Ipv6Addr::is_benchmarking()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(198, 19, 255, 255)).is_benchmarking(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0)).is_benchmarking(), true);
+ /// ```
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_benchmarking(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_benchmarking(),
+ IpAddr::V6(ip) => ip.is_benchmarking(),
+ }
+ }
+
+ /// Returns [`true`] if this address is an [`IPv4` address], and [`false`]
+ /// otherwise.
+ ///
+ /// [`IPv4` address]: IpAddr::V4
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ipaddr_checker", since = "1.16.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_ipv4(&self) -> bool {
+ matches!(self, IpAddr::V4(_))
+ }
+
+ /// Returns [`true`] if this address is an [`IPv6` address], and [`false`]
+ /// otherwise.
+ ///
+ /// [`IPv6` address]: IpAddr::V6
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ipaddr_checker", since = "1.16.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_ipv6(&self) -> bool {
+ matches!(self, IpAddr::V6(_))
+ }
+
+ /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped IPv6 addresses, otherwise it
+ /// return `self` as-is.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).to_canonical().is_loopback(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).is_loopback(), false);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).to_canonical().is_loopback(), true);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ pub const fn to_canonical(&self) -> IpAddr {
+ match self {
+ &v4 @ IpAddr::V4(_) => v4,
+ IpAddr::V6(v6) => v6.to_canonical(),
+ }
+ }
+}
+
+impl Ipv4Addr {
+ /// Creates a new IPv4 address from four eight-bit octets.
+ ///
+ /// The result will represent the IP address `a`.`b`.`c`.`d`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(127, 0, 0, 1);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr {
+ Ipv4Addr { octets: [a, b, c, d] }
+ }
+
+ /// An IPv4 address with the address pointing to localhost: `127.0.0.1`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::LOCALHOST;
+ /// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1));
+ /// ```
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1);
+
+ /// An IPv4 address representing an unspecified address: `0.0.0.0`
+ ///
+ /// This corresponds to the constant `INADDR_ANY` in other languages.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::UNSPECIFIED;
+ /// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0));
+ /// ```
+ #[doc(alias = "INADDR_ANY")]
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0);
+
+ /// An IPv4 address representing the broadcast address: `255.255.255.255`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::BROADCAST;
+ /// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255));
+ /// ```
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255);
+
+ /// Returns the four eight-bit integers that make up this address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(127, 0, 0, 1);
+ /// assert_eq!(addr.octets(), [127, 0, 0, 1]);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub const fn octets(&self) -> [u8; 4] {
+ self.octets
+ }
+
+ /// Returns [`true`] for the special 'unspecified' address (`0.0.0.0`).
+ ///
+ /// This property is defined in _UNIX Network Programming, Second Edition_,
+ /// W. Richard Stevens, p. 891; see also [ip7].
+ ///
+ /// [ip7]: https://man7.org/linux/man-pages/man7/ip.7.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true);
+ /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
+ #[stable(feature = "ip_shared", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unspecified(&self) -> bool {
+ u32::from_be_bytes(self.octets) == 0
+ }
+
+ /// Returns [`true`] if this is a loopback address (`127.0.0.0/8`).
+ ///
+ /// This property is defined by [IETF RFC 1122].
+ ///
+ /// [IETF RFC 1122]: https://tools.ietf.org/html/rfc1122
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true);
+ /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_loopback(&self) -> bool {
+ self.octets()[0] == 127
+ }
+
+ /// Returns [`true`] if this is a private address.
+ ///
+ /// The private address ranges are defined in [IETF RFC 1918] and include:
+ ///
+ /// - `10.0.0.0/8`
+ /// - `172.16.0.0/12`
+ /// - `192.168.0.0/16`
+ ///
+ /// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(10, 0, 0, 1).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(10, 10, 10, 10).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(172, 16, 10, 10).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(172, 29, 45, 14).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(172, 32, 0, 2).is_private(), false);
+ /// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_private(&self) -> bool {
+ match self.octets() {
+ [10, ..] => true,
+ [172, b, ..] if b >= 16 && b <= 31 => true,
+ [192, 168, ..] => true,
+ _ => false,
+ }
+ }
+
+ /// Returns [`true`] if the address is link-local (`169.254.0.0/16`).
+ ///
+ /// This property is defined by [IETF RFC 3927].
+ ///
+ /// [IETF RFC 3927]: https://tools.ietf.org/html/rfc3927
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(169, 254, 0, 0).is_link_local(), true);
+ /// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true);
+ /// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_link_local(&self) -> bool {
+ matches!(self.octets(), [169, 254, ..])
+ }
+
+ /// Returns [`true`] if the address appears to be globally reachable
+ /// as specified by the [IANA IPv4 Special-Purpose Address Registry].
+ /// Whether or not an address is practically reachable will depend on your network configuration.
+ ///
+ /// Most IPv4 addresses are globally reachable;
+ /// unless they are specifically defined as *not* globally reachable.
+ ///
+ /// Non-exhaustive list of notable addresses that are not globally reachable:
+ ///
+ /// - The [unspecified address] ([`is_unspecified`](Ipv4Addr::is_unspecified))
+ /// - Addresses reserved for private use ([`is_private`](Ipv4Addr::is_private))
+ /// - Addresses in the shared address space ([`is_shared`](Ipv4Addr::is_shared))
+ /// - Loopback addresses ([`is_loopback`](Ipv4Addr::is_loopback))
+ /// - Link-local addresses ([`is_link_local`](Ipv4Addr::is_link_local))
+ /// - Addresses reserved for documentation ([`is_documentation`](Ipv4Addr::is_documentation))
+ /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv4Addr::is_benchmarking))
+ /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved))
+ /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast))
+ ///
+ /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry].
+ ///
+ /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
+ /// [unspecified address]: Ipv4Addr::UNSPECIFIED
+ /// [broadcast address]: Ipv4Addr::BROADCAST
+
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv4Addr;
+ ///
+ /// // Most IPv4 addresses are globally reachable:
+ /// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true);
+ ///
+ /// // However some addresses have been assigned a special meaning
+ /// // that makes them not globally reachable. Some examples are:
+ ///
+ /// // The unspecified address (`0.0.0.0`)
+ /// assert_eq!(Ipv4Addr::UNSPECIFIED.is_global(), false);
+ ///
+ /// // Addresses reserved for private use (`10.0.0.0/8`, `172.16.0.0/12`, 192.168.0.0/16)
+ /// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false);
+ ///
+ /// // Addresses in the shared address space (`100.64.0.0/10`)
+ /// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false);
+ ///
+ /// // The loopback addresses (`127.0.0.0/8`)
+ /// assert_eq!(Ipv4Addr::LOCALHOST.is_global(), false);
+ ///
+ /// // Link-local addresses (`169.254.0.0/16`)
+ /// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false);
+ ///
+ /// // Addresses reserved for documentation (`192.0.2.0/24`, `198.51.100.0/24`, `203.0.113.0/24`)
+ /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false);
+ ///
+ /// // Addresses reserved for benchmarking (`198.18.0.0/15`)
+ /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false);
+ ///
+ /// // Reserved addresses (`240.0.0.0/4`)
+ /// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false);
+ ///
+ /// // The broadcast address (`255.255.255.255`)
+ /// assert_eq!(Ipv4Addr::BROADCAST.is_global(), false);
+ ///
+ /// // For a complete overview see the IANA IPv4 Special-Purpose Address Registry.
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_global(&self) -> bool {
+ !(self.octets()[0] == 0 // "This network"
+ || self.is_private()
+ || self.is_shared()
+ || self.is_loopback()
+ || self.is_link_local()
+ // addresses reserved for future protocols (`192.0.0.0/24`)
+ ||(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0)
+ || self.is_documentation()
+ || self.is_benchmarking()
+ || self.is_reserved()
+ || self.is_broadcast())
+ }
+
+ /// Returns [`true`] if this address is part of the Shared Address Space defined in
+ /// [IETF RFC 6598] (`100.64.0.0/10`).
+ ///
+ /// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(100, 64, 0, 0).is_shared(), true);
+ /// assert_eq!(Ipv4Addr::new(100, 127, 255, 255).is_shared(), true);
+ /// assert_eq!(Ipv4Addr::new(100, 128, 0, 0).is_shared(), false);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_shared(&self) -> bool {
+ self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000)
+ }
+
+ /// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for
+ /// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0`
+ /// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`.
+ ///
+ /// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544
+ /// [errata 423]: https://www.rfc-editor.org/errata/eid423
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(198, 17, 255, 255).is_benchmarking(), false);
+ /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking(), true);
+ /// assert_eq!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking(), true);
+ /// assert_eq!(Ipv4Addr::new(198, 20, 0, 0).is_benchmarking(), false);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_benchmarking(&self) -> bool {
+ self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18
+ }
+
+ /// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112]
+ /// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the
+ /// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since
+ /// it is obviously not reserved for future use.
+ ///
+ /// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112
+ ///
+ /// # Warning
+ ///
+ /// As IANA assigns new addresses, this method will be
+ /// updated. This may result in non-reserved addresses being
+ /// treated as reserved in code that relies on an outdated version
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(240, 0, 0, 0).is_reserved(), true);
+ /// assert_eq!(Ipv4Addr::new(255, 255, 255, 254).is_reserved(), true);
+ ///
+ /// assert_eq!(Ipv4Addr::new(239, 255, 255, 255).is_reserved(), false);
+ /// // The broadcast address is not considered as reserved for future use by this implementation
+ /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_reserved(), false);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_reserved(&self) -> bool {
+ self.octets()[0] & 240 == 240 && !self.is_broadcast()
+ }
+
+ /// Returns [`true`] if this is a multicast address (`224.0.0.0/4`).
+ ///
+ /// Multicast addresses have a most significant octet between `224` and `239`,
+ /// and is defined by [IETF RFC 5771].
+ ///
+ /// [IETF RFC 5771]: https://tools.ietf.org/html/rfc5771
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(224, 254, 0, 0).is_multicast(), true);
+ /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true);
+ /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_multicast(&self) -> bool {
+ self.octets()[0] >= 224 && self.octets()[0] <= 239
+ }
+
+ /// Returns [`true`] if this is a broadcast address (`255.255.255.255`).
+ ///
+ /// A broadcast address has all octets set to `255` as defined in [IETF RFC 919].
+ ///
+ /// [IETF RFC 919]: https://tools.ietf.org/html/rfc919
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true);
+ /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_broadcast(&self) -> bool {
+ u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets())
+ }
+
+ /// Returns [`true`] if this address is in a range designated for documentation.
+ ///
+ /// This is defined in [IETF RFC 5737]:
+ ///
+ /// - `192.0.2.0/24` (TEST-NET-1)
+ /// - `198.51.100.0/24` (TEST-NET-2)
+ /// - `203.0.113.0/24` (TEST-NET-3)
+ ///
+ /// [IETF RFC 5737]: https://tools.ietf.org/html/rfc5737
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_documentation(), true);
+ /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_documentation(), true);
+ /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true);
+ /// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_documentation(&self) -> bool {
+ matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _])
+ }
+
+ /// Converts this address to an [IPv4-compatible] [`IPv6` address].
+ ///
+ /// `a.b.c.d` becomes `::a.b.c.d`
+ ///
+ /// Note that IPv4-compatible addresses have been officially deprecated.
+ /// If you don't explicitly need an IPv4-compatible address for legacy reasons, consider using `to_ipv6_mapped` instead.
+ ///
+ /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses
+ /// [`IPv6` address]: Ipv6Addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(
+ /// Ipv4Addr::new(192, 0, 2, 255).to_ipv6_compatible(),
+ /// Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x2ff)
+ /// );
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ipv6_compatible(&self) -> Ipv6Addr {
+ let [a, b, c, d] = self.octets();
+ Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d] }
+ }
+
+ /// Converts this address to an [IPv4-mapped] [`IPv6` address].
+ ///
+ /// `a.b.c.d` becomes `::ffff:a.b.c.d`
+ ///
+ /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses
+ /// [`IPv6` address]: Ipv6Addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(),
+ /// Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x2ff));
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ipv6_mapped(&self) -> Ipv6Addr {
+ let [a, b, c, d] = self.octets();
+ Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d] }
+ }
+}
+
+#[stable(feature = "ip_addr", since = "1.7.0")]
+impl fmt::Display for IpAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ IpAddr::V4(ip) => ip.fmt(fmt),
+ IpAddr::V6(ip) => ip.fmt(fmt),
+ }
+ }
+}
+
+#[stable(feature = "ip_addr", since = "1.7.0")]
+impl fmt::Debug for IpAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "ip_from_ip", since = "1.16.0")]
+impl From<Ipv4Addr> for IpAddr {
+ /// Copies this address to a new `IpAddr::V4`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr};
+ ///
+ /// let addr = Ipv4Addr::new(127, 0, 0, 1);
+ ///
+ /// assert_eq!(
+ /// IpAddr::V4(addr),
+ /// IpAddr::from(addr)
+ /// )
+ /// ```
+ #[inline]
+ fn from(ipv4: Ipv4Addr) -> IpAddr {
+ IpAddr::V4(ipv4)
+ }
+}
+
+#[stable(feature = "ip_from_ip", since = "1.16.0")]
+impl From<Ipv6Addr> for IpAddr {
+ /// Copies this address to a new `IpAddr::V6`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv6Addr};
+ ///
+ /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
+ ///
+ /// assert_eq!(
+ /// IpAddr::V6(addr),
+ /// IpAddr::from(addr)
+ /// );
+ /// ```
+ #[inline]
+ fn from(ipv6: Ipv6Addr) -> IpAddr {
+ IpAddr::V6(ipv6)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for Ipv4Addr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let octets = self.octets();
+
+ // If there are no alignment requirements, write the IP address directly to `f`.
+ // Otherwise, write it to a local buffer and then use `f.pad`.
+ if fmt.precision().is_none() && fmt.width().is_none() {
+ write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3])
+ } else {
+ const LONGEST_IPV4_ADDR: &str = "255.255.255.255";
+
+ let mut buf = DisplayBuffer::<{ LONGEST_IPV4_ADDR.len() }>::new();
+ // Buffer is long enough for the longest possible IPv4 address, so this should never fail.
+ write!(buf, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap();
+
+ fmt.pad(buf.as_str())
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Ipv4Addr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialEq<Ipv4Addr> for IpAddr {
+ #[inline]
+ fn eq(&self, other: &Ipv4Addr) -> bool {
+ match self {
+ IpAddr::V4(v4) => v4 == other,
+ IpAddr::V6(_) => false,
+ }
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialEq<IpAddr> for Ipv4Addr {
+ #[inline]
+ fn eq(&self, other: &IpAddr) -> bool {
+ match other {
+ IpAddr::V4(v4) => self == v4,
+ IpAddr::V6(_) => false,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for Ipv4Addr {
+ #[inline]
+ fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialOrd<Ipv4Addr> for IpAddr {
+ #[inline]
+ fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
+ match self {
+ IpAddr::V4(v4) => v4.partial_cmp(other),
+ IpAddr::V6(_) => Some(Ordering::Greater),
+ }
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialOrd<IpAddr> for Ipv4Addr {
+ #[inline]
+ fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
+ match other {
+ IpAddr::V4(v4) => self.partial_cmp(v4),
+ IpAddr::V6(_) => Some(Ordering::Less),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for Ipv4Addr {
+ #[inline]
+ fn cmp(&self, other: &Ipv4Addr) -> Ordering {
+ self.octets.cmp(&other.octets)
+ }
+}
+
+impl IntoInner<c::in_addr> for Ipv4Addr {
+ #[inline]
+ fn into_inner(self) -> c::in_addr {
+ // `s_addr` is stored as BE on all machines and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ c::in_addr { s_addr: u32::from_ne_bytes(self.octets) }
+ }
+}
+impl FromInner<c::in_addr> for Ipv4Addr {
+ fn from_inner(addr: c::in_addr) -> Ipv4Addr {
+ Ipv4Addr { octets: addr.s_addr.to_ne_bytes() }
+ }
+}
+
+#[stable(feature = "ip_u32", since = "1.1.0")]
+impl From<Ipv4Addr> for u32 {
+ /// Converts an `Ipv4Addr` into a host byte order `u32`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
+ /// assert_eq!(0x12345678, u32::from(addr));
+ /// ```
+ #[inline]
+ fn from(ip: Ipv4Addr) -> u32 {
+ u32::from_be_bytes(ip.octets)
+ }
+}
+
+#[stable(feature = "ip_u32", since = "1.1.0")]
+impl From<u32> for Ipv4Addr {
+ /// Converts a host byte order `u32` into an `Ipv4Addr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::from(0x12345678);
+ /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78), addr);
+ /// ```
+ #[inline]
+ fn from(ip: u32) -> Ipv4Addr {
+ Ipv4Addr { octets: ip.to_be_bytes() }
+ }
+}
+
+#[stable(feature = "from_slice_v4", since = "1.9.0")]
+impl From<[u8; 4]> for Ipv4Addr {
+ /// Creates an `Ipv4Addr` from a four element byte array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::from([13u8, 12u8, 11u8, 10u8]);
+ /// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr);
+ /// ```
+ #[inline]
+ fn from(octets: [u8; 4]) -> Ipv4Addr {
+ Ipv4Addr { octets }
+ }
+}
+
+#[stable(feature = "ip_from_slice", since = "1.17.0")]
+impl From<[u8; 4]> for IpAddr {
+ /// Creates an `IpAddr::V4` from a four element byte array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr};
+ ///
+ /// let addr = IpAddr::from([13u8, 12u8, 11u8, 10u8]);
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(13, 12, 11, 10)), addr);
+ /// ```
+ #[inline]
+ fn from(octets: [u8; 4]) -> IpAddr {
+ IpAddr::V4(Ipv4Addr::from(octets))
+ }
+}
+
+impl Ipv6Addr {
+ /// Creates a new IPv6 address from eight 16-bit segments.
+ ///
+ /// The result will represent the IP address `a:b:c:d:e:f:g:h`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr {
+ let addr16 = [
+ a.to_be(),
+ b.to_be(),
+ c.to_be(),
+ d.to_be(),
+ e.to_be(),
+ f.to_be(),
+ g.to_be(),
+ h.to_be(),
+ ];
+ Ipv6Addr {
+ // All elements in `addr16` are big endian.
+ // SAFETY: `[u16; 8]` is always safe to transmute to `[u8; 16]`.
+ octets: unsafe { transmute::<_, [u8; 16]>(addr16) },
+ }
+ }
+
+ /// An IPv6 address representing localhost: `::1`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::LOCALHOST;
+ /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+ /// ```
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
+
+ /// An IPv6 address representing the unspecified address: `::`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::UNSPECIFIED;
+ /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
+ /// ```
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
+
+ /// Returns the eight 16-bit segments that make up this address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(),
+ /// [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub const fn segments(&self) -> [u16; 8] {
+ // All elements in `self.octets` must be big endian.
+ // SAFETY: `[u8; 16]` is always safe to transmute to `[u16; 8]`.
+ let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.octets) };
+ // We want native endian u16
+ [
+ u16::from_be(a),
+ u16::from_be(b),
+ u16::from_be(c),
+ u16::from_be(d),
+ u16::from_be(e),
+ u16::from_be(f),
+ u16::from_be(g),
+ u16::from_be(h),
+ ]
+ }
+
+ /// Returns [`true`] for the special 'unspecified' address (`::`).
+ ///
+ /// This property is defined in [IETF RFC 4291].
+ ///
+ /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unspecified(&self) -> bool {
+ u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets())
+ }
+
+ /// Returns [`true`] if this is the [loopback address] (`::1`),
+ /// as defined in [IETF RFC 4291 section 2.5.3].
+ ///
+ /// Contrary to IPv4, in IPv6 there is only one loopback address.
+ ///
+ /// [loopback address]: Ipv6Addr::LOCALHOST
+ /// [IETF RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_loopback(&self) -> bool {
+ u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets())
+ }
+
+ /// Returns [`true`] if the address appears to be globally reachable
+ /// as specified by the [IANA IPv6 Special-Purpose Address Registry].
+ /// Whether or not an address is practically reachable will depend on your network configuration.
+ ///
+ /// Most IPv6 addresses are globally reachable;
+ /// unless they are specifically defined as *not* globally reachable.
+ ///
+ /// Non-exhaustive list of notable addresses that are not globally reachable:
+ /// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified))
+ /// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback))
+ /// - IPv4-mapped addresses
+ /// - Addresses reserved for benchmarking
+ /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation))
+ /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local))
+ /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local))
+ ///
+ /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry].
+ ///
+ /// Note that an address having global scope is not the same as being globally reachable,
+ /// and there is no direct relation between the two concepts: There exist addresses with global scope
+ /// that are not globally reachable (for example unique local addresses),
+ /// and addresses that are globally reachable without having global scope
+ /// (multicast addresses with non-global scope).
+ ///
+ /// [IANA IPv6 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
+ /// [unspecified address]: Ipv6Addr::UNSPECIFIED
+ /// [loopback address]: Ipv6Addr::LOCALHOST
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// // Most IPv6 addresses are globally reachable:
+ /// assert_eq!(Ipv6Addr::new(0x26, 0, 0x1c9, 0, 0, 0xafc8, 0x10, 0x1).is_global(), true);
+ ///
+ /// // However some addresses have been assigned a special meaning
+ /// // that makes them not globally reachable. Some examples are:
+ ///
+ /// // The unspecified address (`::`)
+ /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_global(), false);
+ ///
+ /// // The loopback address (`::1`)
+ /// assert_eq!(Ipv6Addr::LOCALHOST.is_global(), false);
+ ///
+ /// // IPv4-mapped addresses (`::ffff:0:0/96`)
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), false);
+ ///
+ /// // Addresses reserved for benchmarking (`2001:2::/48`)
+ /// assert_eq!(Ipv6Addr::new(0x2001, 2, 0, 0, 0, 0, 0, 1,).is_global(), false);
+ ///
+ /// // Addresses reserved for documentation (`2001:db8::/32`)
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1).is_global(), false);
+ ///
+ /// // Unique local addresses (`fc00::/7`)
+ /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 1).is_global(), false);
+ ///
+ /// // Unicast addresses with link-local scope (`fe80::/10`)
+ /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 1).is_global(), false);
+ ///
+ /// // For a complete overview see the IANA IPv6 Special-Purpose Address Registry.
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_global(&self) -> bool {
+ !(self.is_unspecified()
+ || self.is_loopback()
+ // IPv4-mapped Address (`::ffff:0:0/96`)
+ || matches!(self.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
+ // IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
+ || matches!(self.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
+ // Discard-Only Address Block (`100::/64`)
+ || matches!(self.segments(), [0x100, 0, 0, 0, _, _, _, _])
+ // IETF Protocol Assignments (`2001::/23`)
+ || (matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
+ && !(
+ // Port Control Protocol Anycast (`2001:1::1`)
+ u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
+ // Traversal Using Relays around NAT Anycast (`2001:1::2`)
+ || u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
+ // AMT (`2001:3::/32`)
+ || matches!(self.segments(), [0x2001, 3, _, _, _, _, _, _])
+ // AS112-v6 (`2001:4:112::/48`)
+ || matches!(self.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
+ // ORCHIDv2 (`2001:20::/28`)
+ || matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b >= 0x20 && b <= 0x2F)
+ ))
+ || self.is_documentation()
+ || self.is_unique_local()
+ || self.is_unicast_link_local())
+ }
+
+ /// Returns [`true`] if this is a unique local address (`fc00::/7`).
+ ///
+ /// This property is defined in [IETF RFC 4193].
+ ///
+ /// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unique_local(), false);
+ /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 0).is_unique_local(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unique_local(&self) -> bool {
+ (self.segments()[0] & 0xfe00) == 0xfc00
+ }
+
+ /// Returns [`true`] if this is a unicast address, as defined by [IETF RFC 4291].
+ /// Any address that is not a [multicast address] (`ff00::/8`) is unicast.
+ ///
+ /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
+ /// [multicast address]: Ipv6Addr::is_multicast
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// // The unspecified and loopback addresses are unicast.
+ /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_unicast(), true);
+ /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast(), true);
+ ///
+ /// // Any address that is not a multicast address (`ff00::/8`) is unicast.
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast(), true);
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_unicast(), false);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unicast(&self) -> bool {
+ !self.is_multicast()
+ }
+
+ /// Returns `true` if the address is a unicast address with link-local scope,
+ /// as defined in [RFC 4291].
+ ///
+ /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4].
+ /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6],
+ /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format:
+ ///
+ /// ```text
+ /// | 10 bits | 54 bits | 64 bits |
+ /// +----------+-------------------------+----------------------------+
+ /// |1111111010| 0 | interface ID |
+ /// +----------+-------------------------+----------------------------+
+ /// ```
+ /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`,
+ /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated,
+ /// and those addresses will have link-local scope.
+ ///
+ /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope",
+ /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it.
+ ///
+ /// [RFC 4291]: https://tools.ietf.org/html/rfc4291
+ /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4
+ /// [RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3
+ /// [RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6
+ /// [loopback address]: Ipv6Addr::LOCALHOST
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// // The loopback address (`::1`) does not actually have link-local scope.
+ /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast_link_local(), false);
+ ///
+ /// // Only addresses in `fe80::/10` have link-local scope.
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), false);
+ /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true);
+ ///
+ /// // Addresses outside the stricter `fe80::/64` also have link-local scope.
+ /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0).is_unicast_link_local(), true);
+ /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unicast_link_local(&self) -> bool {
+ (self.segments()[0] & 0xffc0) == 0xfe80
+ }
+
+ /// Returns [`true`] if this is an address reserved for documentation
+ /// (`2001:db8::/32`).
+ ///
+ /// This property is defined in [IETF RFC 3849].
+ ///
+ /// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_documentation(), false);
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_documentation(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_documentation(&self) -> bool {
+ (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8)
+ }
+
+ /// Returns [`true`] if this is an address reserved for benchmarking (`2001:2::/48`).
+ ///
+ /// This property is defined in [IETF RFC 5180], where it is mistakenly specified as covering the range `2001:0200::/48`.
+ /// This is corrected in [IETF RFC Errata 1752] to `2001:0002::/48`.
+ ///
+ /// [IETF RFC 5180]: https://tools.ietf.org/html/rfc5180
+ /// [IETF RFC Errata 1752]: https://www.rfc-editor.org/errata_search.php?eid=1752
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc613, 0x0).is_benchmarking(), false);
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0).is_benchmarking(), true);
+ /// ```
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_benchmarking(&self) -> bool {
+ (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0)
+ }
+
+ /// Returns [`true`] if the address is a globally routable unicast address.
+ ///
+ /// The following return false:
+ ///
+ /// - the loopback address
+ /// - the link-local addresses
+ /// - unique local addresses
+ /// - the unspecified address
+ /// - the address range reserved for documentation
+ ///
+ /// This method returns [`true`] for site-local addresses as per [RFC 4291 section 2.5.7]
+ ///
+ /// ```no_rust
+ /// The special behavior of [the site-local unicast] prefix defined in [RFC3513] must no longer
+ /// be supported in new implementations (i.e., new implementations must treat this prefix as
+ /// Global Unicast).
+ /// ```
+ ///
+ /// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_global(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_global(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unicast_global(&self) -> bool {
+ self.is_unicast()
+ && !self.is_loopback()
+ && !self.is_unicast_link_local()
+ && !self.is_unique_local()
+ && !self.is_unspecified()
+ && !self.is_documentation()
+ && !self.is_benchmarking()
+ }
+
+ /// Returns the address's multicast scope if the address is multicast.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::{Ipv6Addr, Ipv6MulticastScope};
+ ///
+ /// assert_eq!(
+ /// Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0).multicast_scope(),
+ /// Some(Ipv6MulticastScope::Global)
+ /// );
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).multicast_scope(), None);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn multicast_scope(&self) -> Option<Ipv6MulticastScope> {
+ if self.is_multicast() {
+ match self.segments()[0] & 0x000f {
+ 1 => Some(Ipv6MulticastScope::InterfaceLocal),
+ 2 => Some(Ipv6MulticastScope::LinkLocal),
+ 3 => Some(Ipv6MulticastScope::RealmLocal),
+ 4 => Some(Ipv6MulticastScope::AdminLocal),
+ 5 => Some(Ipv6MulticastScope::SiteLocal),
+ 8 => Some(Ipv6MulticastScope::OrganizationLocal),
+ 14 => Some(Ipv6MulticastScope::Global),
+ _ => None,
+ }
+ } else {
+ None
+ }
+ }
+
+ /// Returns [`true`] if this is a multicast address (`ff00::/8`).
+ ///
+ /// This property is defined by [IETF RFC 4291].
+ ///
+ /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_multicast(&self) -> bool {
+ (self.segments()[0] & 0xff00) == 0xff00
+ }
+
+ /// Converts this address to an [`IPv4` address] if it's an [IPv4-mapped] address,
+ /// as defined in [IETF RFC 4291 section 2.5.5.2], otherwise returns [`None`].
+ ///
+ /// `::ffff:a.b.c.d` becomes `a.b.c.d`.
+ /// All addresses *not* starting with `::ffff` will return `None`.
+ ///
+ /// [`IPv4` address]: Ipv4Addr
+ /// [IPv4-mapped]: Ipv6Addr
+ /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4_mapped(), None);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4_mapped(),
+ /// Some(Ipv4Addr::new(192, 10, 2, 255)));
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4_mapped(), None);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[stable(feature = "ipv6_to_ipv4_mapped", since = "1.63.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ipv4_mapped(&self) -> Option<Ipv4Addr> {
+ match self.octets() {
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => {
+ Some(Ipv4Addr::new(a, b, c, d))
+ }
+ _ => None,
+ }
+ }
+
+ /// Converts this address to an [`IPv4` address] if it is either
+ /// an [IPv4-compatible] address as defined in [IETF RFC 4291 section 2.5.5.1],
+ /// or an [IPv4-mapped] address as defined in [IETF RFC 4291 section 2.5.5.2],
+ /// otherwise returns [`None`].
+ ///
+ /// Note that this will return an [`IPv4` address] for the IPv6 loopback address `::1`. Use
+ /// [`Ipv6Addr::to_ipv4_mapped`] to avoid this.
+ ///
+ /// `::a.b.c.d` and `::ffff:a.b.c.d` become `a.b.c.d`. `::1` becomes `0.0.0.1`.
+ /// All addresses *not* starting with either all zeroes or `::ffff` will return `None`.
+ ///
+ /// [`IPv4` address]: Ipv4Addr
+ /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses
+ /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses
+ /// [IETF RFC 4291 section 2.5.5.1]: https://tools.ietf.org/html/rfc4291#section-2.5.5.1
+ /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4(), None);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4(),
+ /// Some(Ipv4Addr::new(192, 10, 2, 255)));
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(),
+ /// Some(Ipv4Addr::new(0, 0, 0, 1)));
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ipv4(&self) -> Option<Ipv4Addr> {
+ if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() {
+ let [a, b] = ab.to_be_bytes();
+ let [c, d] = cd.to_be_bytes();
+ Some(Ipv4Addr::new(a, b, c, d))
+ } else {
+ None
+ }
+ }
+
+ /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped addresses, otherwise it
+ /// returns self wrapped in an `IpAddr::V6`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).is_loopback(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).to_canonical().is_loopback(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_canonical(&self) -> IpAddr {
+ if let Some(mapped) = self.to_ipv4_mapped() {
+ return IpAddr::V4(mapped);
+ }
+ IpAddr::V6(*self)
+ }
+
+ /// Returns the sixteen eight-bit integers the IPv6 address consists of.
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(),
+ /// [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
+ #[stable(feature = "ipv6_to_octets", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn octets(&self) -> [u8; 16] {
+ self.octets
+ }
+}
+
+/// Write an Ipv6Addr, conforming to the canonical style described by
+/// [RFC 5952](https://tools.ietf.org/html/rfc5952).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for Ipv6Addr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // If there are no alignment requirements, write the IP address directly to `f`.
+ // Otherwise, write it to a local buffer and then use `f.pad`.
+ if f.precision().is_none() && f.width().is_none() {
+ let segments = self.segments();
+
+ // Special case for :: and ::1; otherwise they get written with the
+ // IPv4 formatter
+ if self.is_unspecified() {
+ f.write_str("::")
+ } else if self.is_loopback() {
+ f.write_str("::1")
+ } else if let Some(ipv4) = self.to_ipv4() {
+ match segments[5] {
+ // IPv4 Compatible address
+ 0 => write!(f, "::{}", ipv4),
+ // IPv4 Mapped address
+ 0xffff => write!(f, "::ffff:{}", ipv4),
+ _ => unreachable!(),
+ }
+ } else {
+ #[derive(Copy, Clone, Default)]
+ struct Span {
+ start: usize,
+ len: usize,
+ }
+
+ // Find the inner 0 span
+ let zeroes = {
+ let mut longest = Span::default();
+ let mut current = Span::default();
+
+ for (i, &segment) in segments.iter().enumerate() {
+ if segment == 0 {
+ if current.len == 0 {
+ current.start = i;
+ }
+
+ current.len += 1;
+
+ if current.len > longest.len {
+ longest = current;
+ }
+ } else {
+ current = Span::default();
+ }
+ }
+
+ longest
+ };
+
+ /// Write a colon-separated part of the address
+ #[inline]
+ fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) -> fmt::Result {
+ if let Some((first, tail)) = chunk.split_first() {
+ write!(f, "{:x}", first)?;
+ for segment in tail {
+ f.write_char(':')?;
+ write!(f, "{:x}", segment)?;
+ }
+ }
+ Ok(())
+ }
+
+ if zeroes.len > 1 {
+ fmt_subslice(f, &segments[..zeroes.start])?;
+ f.write_str("::")?;
+ fmt_subslice(f, &segments[zeroes.start + zeroes.len..])
+ } else {
+ fmt_subslice(f, &segments)
+ }
+ }
+ } else {
+ const LONGEST_IPV6_ADDR: &str = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff";
+
+ let mut buf = DisplayBuffer::<{ LONGEST_IPV6_ADDR.len() }>::new();
+ // Buffer is long enough for the longest possible IPv6 address, so this should never fail.
+ write!(buf, "{}", self).unwrap();
+
+ f.pad(buf.as_str())
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Ipv6Addr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialEq<IpAddr> for Ipv6Addr {
+ #[inline]
+ fn eq(&self, other: &IpAddr) -> bool {
+ match other {
+ IpAddr::V4(_) => false,
+ IpAddr::V6(v6) => self == v6,
+ }
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialEq<Ipv6Addr> for IpAddr {
+ #[inline]
+ fn eq(&self, other: &Ipv6Addr) -> bool {
+ match self {
+ IpAddr::V4(_) => false,
+ IpAddr::V6(v6) => v6 == other,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for Ipv6Addr {
+ #[inline]
+ fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialOrd<Ipv6Addr> for IpAddr {
+ #[inline]
+ fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
+ match self {
+ IpAddr::V4(_) => Some(Ordering::Less),
+ IpAddr::V6(v6) => v6.partial_cmp(other),
+ }
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialOrd<IpAddr> for Ipv6Addr {
+ #[inline]
+ fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
+ match other {
+ IpAddr::V4(_) => Some(Ordering::Greater),
+ IpAddr::V6(v6) => self.partial_cmp(v6),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for Ipv6Addr {
+ #[inline]
+ fn cmp(&self, other: &Ipv6Addr) -> Ordering {
+ self.segments().cmp(&other.segments())
+ }
+}
+
+impl IntoInner<c::in6_addr> for Ipv6Addr {
+ fn into_inner(self) -> c::in6_addr {
+ c::in6_addr { s6_addr: self.octets }
+ }
+}
+impl FromInner<c::in6_addr> for Ipv6Addr {
+ #[inline]
+ fn from_inner(addr: c::in6_addr) -> Ipv6Addr {
+ Ipv6Addr { octets: addr.s6_addr }
+ }
+}
+
+#[stable(feature = "i128", since = "1.26.0")]
+impl From<Ipv6Addr> for u128 {
+ /// Convert an `Ipv6Addr` into a host byte order `u128`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
+ /// );
+ /// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
+ /// ```
+ #[inline]
+ fn from(ip: Ipv6Addr) -> u128 {
+ u128::from_be_bytes(ip.octets)
+ }
+}
+#[stable(feature = "i128", since = "1.26.0")]
+impl From<u128> for Ipv6Addr {
+ /// Convert a host byte order `u128` into an `Ipv6Addr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128);
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
+ /// ),
+ /// addr);
+ /// ```
+ #[inline]
+ fn from(ip: u128) -> Ipv6Addr {
+ Ipv6Addr::from(ip.to_be_bytes())
+ }
+}
+
+#[stable(feature = "ipv6_from_octets", since = "1.9.0")]
+impl From<[u8; 16]> for Ipv6Addr {
+ /// Creates an `Ipv6Addr` from a sixteen element byte array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::from([
+ /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
+ /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
+ /// ]);
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x1918, 0x1716,
+ /// 0x1514, 0x1312,
+ /// 0x1110, 0x0f0e,
+ /// 0x0d0c, 0x0b0a
+ /// ),
+ /// addr
+ /// );
+ /// ```
+ #[inline]
+ fn from(octets: [u8; 16]) -> Ipv6Addr {
+ Ipv6Addr { octets }
+ }
+}
+
+#[stable(feature = "ipv6_from_segments", since = "1.16.0")]
+impl From<[u16; 8]> for Ipv6Addr {
+ /// Creates an `Ipv6Addr` from an eight element 16-bit array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::from([
+ /// 525u16, 524u16, 523u16, 522u16,
+ /// 521u16, 520u16, 519u16, 518u16,
+ /// ]);
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x20d, 0x20c,
+ /// 0x20b, 0x20a,
+ /// 0x209, 0x208,
+ /// 0x207, 0x206
+ /// ),
+ /// addr
+ /// );
+ /// ```
+ #[inline]
+ fn from(segments: [u16; 8]) -> Ipv6Addr {
+ let [a, b, c, d, e, f, g, h] = segments;
+ Ipv6Addr::new(a, b, c, d, e, f, g, h)
+ }
+}
+
+#[stable(feature = "ip_from_slice", since = "1.17.0")]
+impl From<[u8; 16]> for IpAddr {
+ /// Creates an `IpAddr::V6` from a sixteen element byte array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv6Addr};
+ ///
+ /// let addr = IpAddr::from([
+ /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
+ /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
+ /// ]);
+ /// assert_eq!(
+ /// IpAddr::V6(Ipv6Addr::new(
+ /// 0x1918, 0x1716,
+ /// 0x1514, 0x1312,
+ /// 0x1110, 0x0f0e,
+ /// 0x0d0c, 0x0b0a
+ /// )),
+ /// addr
+ /// );
+ /// ```
+ #[inline]
+ fn from(octets: [u8; 16]) -> IpAddr {
+ IpAddr::V6(Ipv6Addr::from(octets))
+ }
+}
+
+#[stable(feature = "ip_from_slice", since = "1.17.0")]
+impl From<[u16; 8]> for IpAddr {
+ /// Creates an `IpAddr::V6` from an eight element 16-bit array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv6Addr};
+ ///
+ /// let addr = IpAddr::from([
+ /// 525u16, 524u16, 523u16, 522u16,
+ /// 521u16, 520u16, 519u16, 518u16,
+ /// ]);
+ /// assert_eq!(
+ /// IpAddr::V6(Ipv6Addr::new(
+ /// 0x20d, 0x20c,
+ /// 0x20b, 0x20a,
+ /// 0x209, 0x208,
+ /// 0x207, 0x206
+ /// )),
+ /// addr
+ /// );
+ /// ```
+ #[inline]
+ fn from(segments: [u16; 8]) -> IpAddr {
+ IpAddr::V6(Ipv6Addr::from(segments))
+ }
+}
diff --git a/library/std/src/net/ip_addr/tests.rs b/library/std/src/net/ip_addr/tests.rs
new file mode 100644
index 000000000..7c3430b2b
--- /dev/null
+++ b/library/std/src/net/ip_addr/tests.rs
@@ -0,0 +1,1039 @@
+use crate::net::test::{sa4, sa6, tsa};
+use crate::net::*;
+use crate::str::FromStr;
+
+#[test]
+fn test_from_str_ipv4() {
+ assert_eq!(Ok(Ipv4Addr::new(127, 0, 0, 1)), "127.0.0.1".parse());
+ assert_eq!(Ok(Ipv4Addr::new(255, 255, 255, 255)), "255.255.255.255".parse());
+ assert_eq!(Ok(Ipv4Addr::new(0, 0, 0, 0)), "0.0.0.0".parse());
+
+ // out of range
+ let none: Option<Ipv4Addr> = "256.0.0.1".parse().ok();
+ assert_eq!(None, none);
+ // too short
+ let none: Option<Ipv4Addr> = "255.0.0".parse().ok();
+ assert_eq!(None, none);
+ // too long
+ let none: Option<Ipv4Addr> = "255.0.0.1.2".parse().ok();
+ assert_eq!(None, none);
+ // no number between dots
+ let none: Option<Ipv4Addr> = "255.0..1".parse().ok();
+ assert_eq!(None, none);
+ // octal
+ let none: Option<Ipv4Addr> = "255.0.0.01".parse().ok();
+ assert_eq!(None, none);
+ // octal zero
+ let none: Option<Ipv4Addr> = "255.0.0.00".parse().ok();
+ assert_eq!(None, none);
+ let none: Option<Ipv4Addr> = "255.0.00.0".parse().ok();
+ assert_eq!(None, none);
+}
+
+#[test]
+fn test_from_str_ipv6() {
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "0:0:0:0:0:0:0:0".parse());
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "0:0:0:0:0:0:0:1".parse());
+
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "::1".parse());
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "::".parse());
+
+ assert_eq!(Ok(Ipv6Addr::new(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)), "2a02:6b8::11:11".parse());
+
+ // too long group
+ let none: Option<Ipv6Addr> = "::00000".parse().ok();
+ assert_eq!(None, none);
+ // too short
+ let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7".parse().ok();
+ assert_eq!(None, none);
+ // too long
+ let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:8:9".parse().ok();
+ assert_eq!(None, none);
+ // triple colon
+ let none: Option<Ipv6Addr> = "1:2:::6:7:8".parse().ok();
+ assert_eq!(None, none);
+ // two double colons
+ let none: Option<Ipv6Addr> = "1:2::6::8".parse().ok();
+ assert_eq!(None, none);
+ // `::` indicating zero groups of zeros
+ let none: Option<Ipv6Addr> = "1:2:3:4::5:6:7:8".parse().ok();
+ assert_eq!(None, none);
+}
+
+#[test]
+fn test_from_str_ipv4_in_ipv6() {
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 545)), "::192.0.2.33".parse());
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)), "::FFFF:192.0.2.33".parse());
+ assert_eq!(
+ Ok(Ipv6Addr::new(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)),
+ "64:ff9b::192.0.2.33".parse()
+ );
+ assert_eq!(
+ Ok(Ipv6Addr::new(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)),
+ "2001:db8:122:c000:2:2100:192.0.2.33".parse()
+ );
+
+ // colon after v4
+ let none: Option<Ipv4Addr> = "::127.0.0.1:".parse().ok();
+ assert_eq!(None, none);
+ // not enough groups
+ let none: Option<Ipv6Addr> = "1:2:3:4:5:127.0.0.1".parse().ok();
+ assert_eq!(None, none);
+ // too many groups
+ let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:127.0.0.1".parse().ok();
+ assert_eq!(None, none);
+}
+
+#[test]
+fn test_from_str_socket_addr() {
+ assert_eq!(Ok(sa4(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse());
+ assert_eq!(Ok(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse());
+ assert_eq!(
+ Ok(sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53)),
+ "[2a02:6b8:0:1::1]:53".parse()
+ );
+ assert_eq!(
+ Ok(SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0)),
+ "[2a02:6b8:0:1::1]:53".parse()
+ );
+ assert_eq!(Ok(sa6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22)), "[::127.0.0.1]:22".parse());
+ assert_eq!(
+ Ok(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22, 0, 0)),
+ "[::127.0.0.1]:22".parse()
+ );
+
+ // without port
+ let none: Option<SocketAddr> = "127.0.0.1".parse().ok();
+ assert_eq!(None, none);
+ // without port
+ let none: Option<SocketAddr> = "127.0.0.1:".parse().ok();
+ assert_eq!(None, none);
+ // wrong brackets around v4
+ let none: Option<SocketAddr> = "[127.0.0.1]:22".parse().ok();
+ assert_eq!(None, none);
+ // port out of range
+ let none: Option<SocketAddr> = "127.0.0.1:123456".parse().ok();
+ assert_eq!(None, none);
+}
+
+#[test]
+fn ipv4_addr_to_string() {
+ assert_eq!(Ipv4Addr::new(127, 0, 0, 1).to_string(), "127.0.0.1");
+ // Short address
+ assert_eq!(Ipv4Addr::new(1, 1, 1, 1).to_string(), "1.1.1.1");
+ // Long address
+ assert_eq!(Ipv4Addr::new(127, 127, 127, 127).to_string(), "127.127.127.127");
+
+ // Test padding
+ assert_eq!(&format!("{:16}", Ipv4Addr::new(1, 1, 1, 1)), "1.1.1.1 ");
+ assert_eq!(&format!("{:>16}", Ipv4Addr::new(1, 1, 1, 1)), " 1.1.1.1");
+}
+
+#[test]
+fn ipv6_addr_to_string() {
+ // ipv4-mapped address
+ let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280);
+ assert_eq!(a1.to_string(), "::ffff:192.0.2.128");
+
+ // ipv4-compatible address
+ let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280);
+ assert_eq!(a1.to_string(), "::192.0.2.128");
+
+ // v6 address with no zero segments
+ assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f");
+
+ // longest possible IPv6 length
+ assert_eq!(
+ Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888).to_string(),
+ "1111:2222:3333:4444:5555:6666:7777:8888"
+ );
+ // padding
+ assert_eq!(&format!("{:20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), "1:2:3:4:5:6:7:8 ");
+ assert_eq!(&format!("{:>20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), " 1:2:3:4:5:6:7:8");
+
+ // reduce a single run of zeros
+ assert_eq!(
+ "ae::ffff:102:304",
+ Ipv6Addr::new(0xae, 0, 0, 0, 0, 0xffff, 0x0102, 0x0304).to_string()
+ );
+
+ // don't reduce just a single zero segment
+ assert_eq!("1:2:3:4:5:6:0:8", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 0, 8).to_string());
+
+ // 'any' address
+ assert_eq!("::", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).to_string());
+
+ // loopback address
+ assert_eq!("::1", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_string());
+
+ // ends in zeros
+ assert_eq!("1::", Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0).to_string());
+
+ // two runs of zeros, second one is longer
+ assert_eq!("1:0:0:4::8", Ipv6Addr::new(1, 0, 0, 4, 0, 0, 0, 8).to_string());
+
+ // two runs of zeros, equal length
+ assert_eq!("1::4:5:0:0:8", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8).to_string());
+
+ // don't prefix `0x` to each segment in `dbg!`.
+ assert_eq!("1::4:5:0:0:8", &format!("{:#?}", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8)));
+}
+
+#[test]
+fn ipv4_to_ipv6() {
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678),
+ Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_mapped()
+ );
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678),
+ Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_compatible()
+ );
+}
+
+#[test]
+fn ipv6_to_ipv4_mapped() {
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4_mapped(),
+ Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
+ );
+ assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4_mapped(), None);
+}
+
+#[test]
+fn ipv6_to_ipv4() {
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4(),
+ Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
+ );
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4(),
+ Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
+ );
+ assert_eq!(Ipv6Addr::new(0, 0, 1, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), None);
+}
+
+#[test]
+fn ip_properties() {
+ macro_rules! ip {
+ ($s:expr) => {
+ IpAddr::from_str($s).unwrap()
+ };
+ }
+
+ macro_rules! check {
+ ($s:expr) => {
+ check!($s, 0);
+ };
+
+ ($s:expr, $mask:expr) => {{
+ let unspec: u8 = 1 << 0;
+ let loopback: u8 = 1 << 1;
+ let global: u8 = 1 << 2;
+ let multicast: u8 = 1 << 3;
+ let doc: u8 = 1 << 4;
+ let benchmarking: u8 = 1 << 5;
+
+ if ($mask & unspec) == unspec {
+ assert!(ip!($s).is_unspecified());
+ } else {
+ assert!(!ip!($s).is_unspecified());
+ }
+
+ if ($mask & loopback) == loopback {
+ assert!(ip!($s).is_loopback());
+ } else {
+ assert!(!ip!($s).is_loopback());
+ }
+
+ if ($mask & global) == global {
+ assert!(ip!($s).is_global());
+ } else {
+ assert!(!ip!($s).is_global());
+ }
+
+ if ($mask & multicast) == multicast {
+ assert!(ip!($s).is_multicast());
+ } else {
+ assert!(!ip!($s).is_multicast());
+ }
+
+ if ($mask & doc) == doc {
+ assert!(ip!($s).is_documentation());
+ } else {
+ assert!(!ip!($s).is_documentation());
+ }
+
+ if ($mask & benchmarking) == benchmarking {
+ assert!(ip!($s).is_benchmarking());
+ } else {
+ assert!(!ip!($s).is_benchmarking());
+ }
+ }};
+ }
+
+ let unspec: u8 = 1 << 0;
+ let loopback: u8 = 1 << 1;
+ let global: u8 = 1 << 2;
+ let multicast: u8 = 1 << 3;
+ let doc: u8 = 1 << 4;
+ let benchmarking: u8 = 1 << 5;
+
+ check!("0.0.0.0", unspec);
+ check!("0.0.0.1");
+ check!("0.1.0.0");
+ check!("10.9.8.7");
+ check!("127.1.2.3", loopback);
+ check!("172.31.254.253");
+ check!("169.254.253.242");
+ check!("192.0.2.183", doc);
+ check!("192.1.2.183", global);
+ check!("192.168.254.253");
+ check!("198.51.100.0", doc);
+ check!("203.0.113.0", doc);
+ check!("203.2.113.0", global);
+ check!("224.0.0.0", global | multicast);
+ check!("239.255.255.255", global | multicast);
+ check!("255.255.255.255");
+ // make sure benchmarking addresses are not global
+ check!("198.18.0.0", benchmarking);
+ check!("198.18.54.2", benchmarking);
+ check!("198.19.255.255", benchmarking);
+ // make sure addresses reserved for protocol assignment are not global
+ check!("192.0.0.0");
+ check!("192.0.0.255");
+ check!("192.0.0.100");
+ // make sure reserved addresses are not global
+ check!("240.0.0.0");
+ check!("251.54.1.76");
+ check!("254.255.255.255");
+ // make sure shared addresses are not global
+ check!("100.64.0.0");
+ check!("100.127.255.255");
+ check!("100.100.100.0");
+
+ check!("::", unspec);
+ check!("::1", loopback);
+ check!("::0.0.0.2", global);
+ check!("1::", global);
+ check!("fc00::");
+ check!("fdff:ffff::");
+ check!("fe80:ffff::");
+ check!("febf:ffff::");
+ check!("fec0::", global);
+ check!("ff01::", global | multicast);
+ check!("ff02::", global | multicast);
+ check!("ff03::", global | multicast);
+ check!("ff04::", global | multicast);
+ check!("ff05::", global | multicast);
+ check!("ff08::", global | multicast);
+ check!("ff0e::", global | multicast);
+ check!("2001:db8:85a3::8a2e:370:7334", doc);
+ check!("2001:2::ac32:23ff:21", benchmarking);
+ check!("102:304:506:708:90a:b0c:d0e:f10", global);
+}
+
+#[test]
+fn ipv4_properties() {
+ macro_rules! ip {
+ ($s:expr) => {
+ Ipv4Addr::from_str($s).unwrap()
+ };
+ }
+
+ macro_rules! check {
+ ($s:expr) => {
+ check!($s, 0);
+ };
+
+ ($s:expr, $mask:expr) => {{
+ let unspec: u16 = 1 << 0;
+ let loopback: u16 = 1 << 1;
+ let private: u16 = 1 << 2;
+ let link_local: u16 = 1 << 3;
+ let global: u16 = 1 << 4;
+ let multicast: u16 = 1 << 5;
+ let broadcast: u16 = 1 << 6;
+ let documentation: u16 = 1 << 7;
+ let benchmarking: u16 = 1 << 8;
+ let reserved: u16 = 1 << 10;
+ let shared: u16 = 1 << 11;
+
+ if ($mask & unspec) == unspec {
+ assert!(ip!($s).is_unspecified());
+ } else {
+ assert!(!ip!($s).is_unspecified());
+ }
+
+ if ($mask & loopback) == loopback {
+ assert!(ip!($s).is_loopback());
+ } else {
+ assert!(!ip!($s).is_loopback());
+ }
+
+ if ($mask & private) == private {
+ assert!(ip!($s).is_private());
+ } else {
+ assert!(!ip!($s).is_private());
+ }
+
+ if ($mask & link_local) == link_local {
+ assert!(ip!($s).is_link_local());
+ } else {
+ assert!(!ip!($s).is_link_local());
+ }
+
+ if ($mask & global) == global {
+ assert!(ip!($s).is_global());
+ } else {
+ assert!(!ip!($s).is_global());
+ }
+
+ if ($mask & multicast) == multicast {
+ assert!(ip!($s).is_multicast());
+ } else {
+ assert!(!ip!($s).is_multicast());
+ }
+
+ if ($mask & broadcast) == broadcast {
+ assert!(ip!($s).is_broadcast());
+ } else {
+ assert!(!ip!($s).is_broadcast());
+ }
+
+ if ($mask & documentation) == documentation {
+ assert!(ip!($s).is_documentation());
+ } else {
+ assert!(!ip!($s).is_documentation());
+ }
+
+ if ($mask & benchmarking) == benchmarking {
+ assert!(ip!($s).is_benchmarking());
+ } else {
+ assert!(!ip!($s).is_benchmarking());
+ }
+
+ if ($mask & reserved) == reserved {
+ assert!(ip!($s).is_reserved());
+ } else {
+ assert!(!ip!($s).is_reserved());
+ }
+
+ if ($mask & shared) == shared {
+ assert!(ip!($s).is_shared());
+ } else {
+ assert!(!ip!($s).is_shared());
+ }
+ }};
+ }
+
+ let unspec: u16 = 1 << 0;
+ let loopback: u16 = 1 << 1;
+ let private: u16 = 1 << 2;
+ let link_local: u16 = 1 << 3;
+ let global: u16 = 1 << 4;
+ let multicast: u16 = 1 << 5;
+ let broadcast: u16 = 1 << 6;
+ let documentation: u16 = 1 << 7;
+ let benchmarking: u16 = 1 << 8;
+ let reserved: u16 = 1 << 10;
+ let shared: u16 = 1 << 11;
+
+ check!("0.0.0.0", unspec);
+ check!("0.0.0.1");
+ check!("0.1.0.0");
+ check!("10.9.8.7", private);
+ check!("127.1.2.3", loopback);
+ check!("172.31.254.253", private);
+ check!("169.254.253.242", link_local);
+ check!("192.0.2.183", documentation);
+ check!("192.1.2.183", global);
+ check!("192.168.254.253", private);
+ check!("198.51.100.0", documentation);
+ check!("203.0.113.0", documentation);
+ check!("203.2.113.0", global);
+ check!("224.0.0.0", global | multicast);
+ check!("239.255.255.255", global | multicast);
+ check!("255.255.255.255", broadcast);
+ check!("198.18.0.0", benchmarking);
+ check!("198.18.54.2", benchmarking);
+ check!("198.19.255.255", benchmarking);
+ check!("192.0.0.0");
+ check!("192.0.0.255");
+ check!("192.0.0.100");
+ check!("240.0.0.0", reserved);
+ check!("251.54.1.76", reserved);
+ check!("254.255.255.255", reserved);
+ check!("100.64.0.0", shared);
+ check!("100.127.255.255", shared);
+ check!("100.100.100.0", shared);
+}
+
+#[test]
+fn ipv6_properties() {
+ macro_rules! ip {
+ ($s:expr) => {
+ Ipv6Addr::from_str($s).unwrap()
+ };
+ }
+
+ macro_rules! check {
+ ($s:expr, &[$($octet:expr),*], $mask:expr) => {
+ assert_eq!($s, ip!($s).to_string());
+ let octets = &[$($octet),*];
+ assert_eq!(&ip!($s).octets(), octets);
+ assert_eq!(Ipv6Addr::from(*octets), ip!($s));
+
+ let unspecified: u32 = 1 << 0;
+ let loopback: u32 = 1 << 1;
+ let unique_local: u32 = 1 << 2;
+ let global: u32 = 1 << 3;
+ let unicast_link_local: u32 = 1 << 4;
+ let unicast_global: u32 = 1 << 7;
+ let documentation: u32 = 1 << 8;
+ let benchmarking: u32 = 1 << 16;
+ let multicast_interface_local: u32 = 1 << 9;
+ let multicast_link_local: u32 = 1 << 10;
+ let multicast_realm_local: u32 = 1 << 11;
+ let multicast_admin_local: u32 = 1 << 12;
+ let multicast_site_local: u32 = 1 << 13;
+ let multicast_organization_local: u32 = 1 << 14;
+ let multicast_global: u32 = 1 << 15;
+ let multicast: u32 = multicast_interface_local
+ | multicast_admin_local
+ | multicast_global
+ | multicast_link_local
+ | multicast_realm_local
+ | multicast_site_local
+ | multicast_organization_local;
+
+ if ($mask & unspecified) == unspecified {
+ assert!(ip!($s).is_unspecified());
+ } else {
+ assert!(!ip!($s).is_unspecified());
+ }
+ if ($mask & loopback) == loopback {
+ assert!(ip!($s).is_loopback());
+ } else {
+ assert!(!ip!($s).is_loopback());
+ }
+ if ($mask & unique_local) == unique_local {
+ assert!(ip!($s).is_unique_local());
+ } else {
+ assert!(!ip!($s).is_unique_local());
+ }
+ if ($mask & global) == global {
+ assert!(ip!($s).is_global());
+ } else {
+ assert!(!ip!($s).is_global());
+ }
+ if ($mask & unicast_link_local) == unicast_link_local {
+ assert!(ip!($s).is_unicast_link_local());
+ } else {
+ assert!(!ip!($s).is_unicast_link_local());
+ }
+ if ($mask & unicast_global) == unicast_global {
+ assert!(ip!($s).is_unicast_global());
+ } else {
+ assert!(!ip!($s).is_unicast_global());
+ }
+ if ($mask & documentation) == documentation {
+ assert!(ip!($s).is_documentation());
+ } else {
+ assert!(!ip!($s).is_documentation());
+ }
+ if ($mask & benchmarking) == benchmarking {
+ assert!(ip!($s).is_benchmarking());
+ } else {
+ assert!(!ip!($s).is_benchmarking());
+ }
+ if ($mask & multicast) != 0 {
+ assert!(ip!($s).multicast_scope().is_some());
+ assert!(ip!($s).is_multicast());
+ } else {
+ assert!(ip!($s).multicast_scope().is_none());
+ assert!(!ip!($s).is_multicast());
+ }
+ if ($mask & multicast_interface_local) == multicast_interface_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::InterfaceLocal);
+ }
+ if ($mask & multicast_link_local) == multicast_link_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::LinkLocal);
+ }
+ if ($mask & multicast_realm_local) == multicast_realm_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::RealmLocal);
+ }
+ if ($mask & multicast_admin_local) == multicast_admin_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::AdminLocal);
+ }
+ if ($mask & multicast_site_local) == multicast_site_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::SiteLocal);
+ }
+ if ($mask & multicast_organization_local) == multicast_organization_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::OrganizationLocal);
+ }
+ if ($mask & multicast_global) == multicast_global {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::Global);
+ }
+ }
+ }
+
+ let unspecified: u32 = 1 << 0;
+ let loopback: u32 = 1 << 1;
+ let unique_local: u32 = 1 << 2;
+ let global: u32 = 1 << 3;
+ let unicast_link_local: u32 = 1 << 4;
+ let unicast_global: u32 = 1 << 7;
+ let documentation: u32 = 1 << 8;
+ let benchmarking: u32 = 1 << 16;
+ let multicast_interface_local: u32 = 1 << 9;
+ let multicast_link_local: u32 = 1 << 10;
+ let multicast_realm_local: u32 = 1 << 11;
+ let multicast_admin_local: u32 = 1 << 12;
+ let multicast_site_local: u32 = 1 << 13;
+ let multicast_organization_local: u32 = 1 << 14;
+ let multicast_global: u32 = 1 << 15;
+
+ check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified);
+
+ check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback);
+
+ check!("::0.0.0.2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], global | unicast_global);
+
+ check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global);
+
+ check!(
+ "::ffff:127.0.0.1",
+ &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x7f, 0, 0, 1],
+ unicast_global
+ );
+
+ check!(
+ "64:ff9b:1::",
+ &[0, 0x64, 0xff, 0x9b, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_global
+ );
+
+ check!("100::", &[0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global);
+
+ check!("2001::", &[0x20, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global);
+
+ check!(
+ "2001:1::1",
+ &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
+ global | unicast_global
+ );
+
+ check!(
+ "2001:1::2",
+ &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2],
+ global | unicast_global
+ );
+
+ check!(
+ "2001:3::",
+ &[0x20, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ global | unicast_global
+ );
+
+ check!(
+ "2001:4:112::",
+ &[0x20, 1, 0, 4, 1, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ global | unicast_global
+ );
+
+ check!(
+ "2001:20::",
+ &[0x20, 1, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ global | unicast_global
+ );
+
+ check!("2001:30::", &[0x20, 1, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global);
+
+ check!(
+ "2001:200::",
+ &[0x20, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ global | unicast_global
+ );
+
+ check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local);
+
+ check!(
+ "fdff:ffff::",
+ &[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unique_local
+ );
+
+ check!(
+ "fe80:ffff::",
+ &[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_link_local
+ );
+
+ check!("fe80::", &[0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local);
+
+ check!(
+ "febf:ffff::",
+ &[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_link_local
+ );
+
+ check!("febf::", &[0xfe, 0xbf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local);
+
+ check!(
+ "febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
+ &[
+ 0xfe, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff
+ ],
+ unicast_link_local
+ );
+
+ check!(
+ "fe80::ffff:ffff:ffff:ffff",
+ &[
+ 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff
+ ],
+ unicast_link_local
+ );
+
+ check!(
+ "fe80:0:0:1::",
+ &[0xfe, 0x80, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_link_local
+ );
+
+ check!(
+ "fec0::",
+ &[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_global | global
+ );
+
+ check!(
+ "ff01::",
+ &[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_interface_local | global
+ );
+
+ check!(
+ "ff02::",
+ &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_link_local | global
+ );
+
+ check!(
+ "ff03::",
+ &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_realm_local | global
+ );
+
+ check!(
+ "ff04::",
+ &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_admin_local | global
+ );
+
+ check!(
+ "ff05::",
+ &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_site_local | global
+ );
+
+ check!(
+ "ff08::",
+ &[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_organization_local | global
+ );
+
+ check!(
+ "ff0e::",
+ &[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_global | global
+ );
+
+ check!(
+ "2001:db8:85a3::8a2e:370:7334",
+ &[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34],
+ documentation
+ );
+
+ check!(
+ "2001:2::ac32:23ff:21",
+ &[0x20, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0xac, 0x32, 0x23, 0xff, 0, 0x21],
+ benchmarking
+ );
+
+ check!(
+ "102:304:506:708:90a:b0c:d0e:f10",
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ global | unicast_global
+ );
+}
+
+#[test]
+fn to_socket_addr_socketaddr() {
+ let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 12345);
+ assert_eq!(Ok(vec![a]), tsa(a));
+}
+
+#[test]
+fn test_ipv4_to_int() {
+ let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
+ assert_eq!(u32::from(a), 0x11223344);
+}
+
+#[test]
+fn test_int_to_ipv4() {
+ let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
+ assert_eq!(Ipv4Addr::from(0x11223344), a);
+}
+
+#[test]
+fn test_ipv6_to_int() {
+ let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
+ assert_eq!(u128::from(a), 0x112233445566778899aabbccddeeff11u128);
+}
+
+#[test]
+fn test_int_to_ipv6() {
+ let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
+ assert_eq!(Ipv6Addr::from(0x112233445566778899aabbccddeeff11u128), a);
+}
+
+#[test]
+fn ipv4_from_constructors() {
+ assert_eq!(Ipv4Addr::LOCALHOST, Ipv4Addr::new(127, 0, 0, 1));
+ assert!(Ipv4Addr::LOCALHOST.is_loopback());
+ assert_eq!(Ipv4Addr::UNSPECIFIED, Ipv4Addr::new(0, 0, 0, 0));
+ assert!(Ipv4Addr::UNSPECIFIED.is_unspecified());
+ assert_eq!(Ipv4Addr::BROADCAST, Ipv4Addr::new(255, 255, 255, 255));
+ assert!(Ipv4Addr::BROADCAST.is_broadcast());
+}
+
+#[test]
+fn ipv6_from_constructors() {
+ assert_eq!(Ipv6Addr::LOCALHOST, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+ assert!(Ipv6Addr::LOCALHOST.is_loopback());
+ assert_eq!(Ipv6Addr::UNSPECIFIED, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
+ assert!(Ipv6Addr::UNSPECIFIED.is_unspecified());
+}
+
+#[test]
+fn ipv4_from_octets() {
+ assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1))
+}
+
+#[test]
+fn ipv6_from_segments() {
+ let from_u16s =
+ Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
+ let new = Ipv6Addr::new(0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff);
+ assert_eq!(new, from_u16s);
+}
+
+#[test]
+fn ipv6_from_octets() {
+ let from_u16s =
+ Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
+ let from_u8s = Ipv6Addr::from([
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
+ 0xff,
+ ]);
+ assert_eq!(from_u16s, from_u8s);
+}
+
+#[test]
+fn cmp() {
+ let v41 = Ipv4Addr::new(100, 64, 3, 3);
+ let v42 = Ipv4Addr::new(192, 0, 2, 2);
+ let v61 = "2001:db8:f00::1002".parse::<Ipv6Addr>().unwrap();
+ let v62 = "2001:db8:f00::2001".parse::<Ipv6Addr>().unwrap();
+ assert!(v41 < v42);
+ assert!(v61 < v62);
+
+ assert_eq!(v41, IpAddr::V4(v41));
+ assert_eq!(v61, IpAddr::V6(v61));
+ assert!(v41 != IpAddr::V4(v42));
+ assert!(v61 != IpAddr::V6(v62));
+
+ assert!(v41 < IpAddr::V4(v42));
+ assert!(v61 < IpAddr::V6(v62));
+ assert!(IpAddr::V4(v41) < v42);
+ assert!(IpAddr::V6(v61) < v62);
+
+ assert!(v41 < IpAddr::V6(v61));
+ assert!(IpAddr::V4(v41) < v61);
+}
+
+#[test]
+fn is_v4() {
+ let ip = IpAddr::V4(Ipv4Addr::new(100, 64, 3, 3));
+ assert!(ip.is_ipv4());
+ assert!(!ip.is_ipv6());
+}
+
+#[test]
+fn is_v6() {
+ let ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678));
+ assert!(!ip.is_ipv4());
+ assert!(ip.is_ipv6());
+}
+
+#[test]
+fn ipv4_const() {
+ // test that the methods of `Ipv4Addr` are usable in a const context
+
+ const IP_ADDRESS: Ipv4Addr = Ipv4Addr::new(127, 0, 0, 1);
+ assert_eq!(IP_ADDRESS, Ipv4Addr::LOCALHOST);
+
+ const OCTETS: [u8; 4] = IP_ADDRESS.octets();
+ assert_eq!(OCTETS, [127, 0, 0, 1]);
+
+ const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
+ assert!(!IS_UNSPECIFIED);
+
+ const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
+ assert!(IS_LOOPBACK);
+
+ const IS_PRIVATE: bool = IP_ADDRESS.is_private();
+ assert!(!IS_PRIVATE);
+
+ const IS_LINK_LOCAL: bool = IP_ADDRESS.is_link_local();
+ assert!(!IS_LINK_LOCAL);
+
+ const IS_GLOBAL: bool = IP_ADDRESS.is_global();
+ assert!(!IS_GLOBAL);
+
+ const IS_SHARED: bool = IP_ADDRESS.is_shared();
+ assert!(!IS_SHARED);
+
+ const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking();
+ assert!(!IS_BENCHMARKING);
+
+ const IS_RESERVED: bool = IP_ADDRESS.is_reserved();
+ assert!(!IS_RESERVED);
+
+ const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
+ assert!(!IS_MULTICAST);
+
+ const IS_BROADCAST: bool = IP_ADDRESS.is_broadcast();
+ assert!(!IS_BROADCAST);
+
+ const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation();
+ assert!(!IS_DOCUMENTATION);
+
+ const IP_V6_COMPATIBLE: Ipv6Addr = IP_ADDRESS.to_ipv6_compatible();
+ assert_eq!(
+ IP_V6_COMPATIBLE,
+ Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 1])
+ );
+
+ const IP_V6_MAPPED: Ipv6Addr = IP_ADDRESS.to_ipv6_mapped();
+ assert_eq!(
+ IP_V6_MAPPED,
+ Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 127, 0, 0, 1])
+ );
+}
+
+#[test]
+fn ipv6_const() {
+ // test that the methods of `Ipv6Addr` are usable in a const context
+
+ const IP_ADDRESS: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
+ assert_eq!(IP_ADDRESS, Ipv6Addr::LOCALHOST);
+
+ const SEGMENTS: [u16; 8] = IP_ADDRESS.segments();
+ assert_eq!(SEGMENTS, [0, 0, 0, 0, 0, 0, 0, 1]);
+
+ const OCTETS: [u8; 16] = IP_ADDRESS.octets();
+ assert_eq!(OCTETS, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]);
+
+ const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
+ assert!(!IS_UNSPECIFIED);
+
+ const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
+ assert!(IS_LOOPBACK);
+
+ const IS_GLOBAL: bool = IP_ADDRESS.is_global();
+ assert!(!IS_GLOBAL);
+
+ const IS_UNIQUE_LOCAL: bool = IP_ADDRESS.is_unique_local();
+ assert!(!IS_UNIQUE_LOCAL);
+
+ const IS_UNICAST_LINK_LOCAL: bool = IP_ADDRESS.is_unicast_link_local();
+ assert!(!IS_UNICAST_LINK_LOCAL);
+
+ const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation();
+ assert!(!IS_DOCUMENTATION);
+
+ const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking();
+ assert!(!IS_BENCHMARKING);
+
+ const IS_UNICAST_GLOBAL: bool = IP_ADDRESS.is_unicast_global();
+ assert!(!IS_UNICAST_GLOBAL);
+
+ const MULTICAST_SCOPE: Option<Ipv6MulticastScope> = IP_ADDRESS.multicast_scope();
+ assert_eq!(MULTICAST_SCOPE, None);
+
+ const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
+ assert!(!IS_MULTICAST);
+
+ const IP_V4: Option<Ipv4Addr> = IP_ADDRESS.to_ipv4();
+ assert_eq!(IP_V4.unwrap(), Ipv4Addr::new(0, 0, 0, 1));
+}
+
+#[test]
+fn ip_const() {
+ // test that the methods of `IpAddr` are usable in a const context
+
+ const IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST);
+
+ const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
+ assert!(!IS_UNSPECIFIED);
+
+ const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
+ assert!(IS_LOOPBACK);
+
+ const IS_GLOBAL: bool = IP_ADDRESS.is_global();
+ assert!(!IS_GLOBAL);
+
+ const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
+ assert!(!IS_MULTICAST);
+
+ const IS_IP_V4: bool = IP_ADDRESS.is_ipv4();
+ assert!(IS_IP_V4);
+
+ const IS_IP_V6: bool = IP_ADDRESS.is_ipv6();
+ assert!(!IS_IP_V6);
+}
+
+#[test]
+fn structural_match() {
+ // test that all IP types can be structurally matched upon
+
+ const IPV4: Ipv4Addr = Ipv4Addr::LOCALHOST;
+ match IPV4 {
+ Ipv4Addr::LOCALHOST => {}
+ _ => unreachable!(),
+ }
+
+ const IPV6: Ipv6Addr = Ipv6Addr::LOCALHOST;
+ match IPV6 {
+ Ipv6Addr::LOCALHOST => {}
+ _ => unreachable!(),
+ }
+
+ const IP: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST);
+ match IP {
+ IpAddr::V4(Ipv4Addr::LOCALHOST) => {}
+ _ => unreachable!(),
+ }
+}
diff --git a/library/std/src/net/mod.rs b/library/std/src/net/mod.rs
index e7a40bdaf..01e3db9de 100644
--- a/library/std/src/net/mod.rs
+++ b/library/std/src/net/mod.rs
@@ -24,11 +24,11 @@
use crate::io::{self, ErrorKind};
#[stable(feature = "rust1", since = "1.0.0")]
-pub use self::addr::{SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use self::ip::{IpAddr, Ipv4Addr, Ipv6Addr, Ipv6MulticastScope};
+pub use self::ip_addr::{IpAddr, Ipv4Addr, Ipv6Addr, Ipv6MulticastScope};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::parser::AddrParseError;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::socket_addr::{SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
#[unstable(feature = "tcplistener_into_incoming", issue = "88339")]
pub use self::tcp::IntoIncoming;
#[stable(feature = "rust1", since = "1.0.0")]
@@ -36,12 +36,13 @@ pub use self::tcp::{Incoming, TcpListener, TcpStream};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::udp::UdpSocket;
-mod addr;
-mod ip;
+mod display_buffer;
+mod ip_addr;
mod parser;
+mod socket_addr;
mod tcp;
#[cfg(test)]
-mod test;
+pub(crate) mod test;
mod udp;
/// Possible values which can be passed to the [`TcpStream::shutdown`] method.
diff --git a/library/std/src/net/parser.rs b/library/std/src/net/parser.rs
index 069b66099..a38031c48 100644
--- a/library/std/src/net/parser.rs
+++ b/library/std/src/net/parser.rs
@@ -39,8 +39,8 @@ struct Parser<'a> {
}
impl<'a> Parser<'a> {
- fn new(input: &'a str) -> Parser<'a> {
- Parser { state: input.as_bytes() }
+ fn new(input: &'a [u8]) -> Parser<'a> {
+ Parser { state: input }
}
/// Run a parser, and restore the pre-parse state if it fails.
@@ -273,32 +273,106 @@ impl<'a> Parser<'a> {
}
}
+impl IpAddr {
+ /// Parse an IP address from a slice of bytes.
+ ///
+ /// ```
+ /// #![feature(addr_parse_ascii)]
+ ///
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
+ /// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+ ///
+ /// assert_eq!(IpAddr::parse_ascii(b"127.0.0.1"), Ok(localhost_v4));
+ /// assert_eq!(IpAddr::parse_ascii(b"::1"), Ok(localhost_v6));
+ /// ```
+ #[unstable(feature = "addr_parse_ascii", issue = "101035")]
+ pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
+ Parser::new(b).parse_with(|p| p.read_ip_addr(), AddrKind::Ip)
+ }
+}
+
#[stable(feature = "ip_addr", since = "1.7.0")]
impl FromStr for IpAddr {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<IpAddr, AddrParseError> {
- Parser::new(s).parse_with(|p| p.read_ip_addr(), AddrKind::Ip)
+ Self::parse_ascii(s.as_bytes())
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl FromStr for Ipv4Addr {
- type Err = AddrParseError;
- fn from_str(s: &str) -> Result<Ipv4Addr, AddrParseError> {
+impl Ipv4Addr {
+ /// Parse an IPv4 address from a slice of bytes.
+ ///
+ /// ```
+ /// #![feature(addr_parse_ascii)]
+ ///
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let localhost = Ipv4Addr::new(127, 0, 0, 1);
+ ///
+ /// assert_eq!(Ipv4Addr::parse_ascii(b"127.0.0.1"), Ok(localhost));
+ /// ```
+ #[unstable(feature = "addr_parse_ascii", issue = "101035")]
+ pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
// don't try to parse if too long
- if s.len() > 15 {
+ if b.len() > 15 {
Err(AddrParseError(AddrKind::Ipv4))
} else {
- Parser::new(s).parse_with(|p| p.read_ipv4_addr(), AddrKind::Ipv4)
+ Parser::new(b).parse_with(|p| p.read_ipv4_addr(), AddrKind::Ipv4)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for Ipv4Addr {
+ type Err = AddrParseError;
+ fn from_str(s: &str) -> Result<Ipv4Addr, AddrParseError> {
+ Self::parse_ascii(s.as_bytes())
+ }
+}
+
+impl Ipv6Addr {
+ /// Parse an IPv6 address from a slice of bytes.
+ ///
+ /// ```
+ /// #![feature(addr_parse_ascii)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
+ ///
+ /// assert_eq!(Ipv6Addr::parse_ascii(b"::1"), Ok(localhost));
+ /// ```
+ #[unstable(feature = "addr_parse_ascii", issue = "101035")]
+ pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
+ Parser::new(b).parse_with(|p| p.read_ipv6_addr(), AddrKind::Ipv6)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
impl FromStr for Ipv6Addr {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<Ipv6Addr, AddrParseError> {
- Parser::new(s).parse_with(|p| p.read_ipv6_addr(), AddrKind::Ipv6)
+ Self::parse_ascii(s.as_bytes())
+ }
+}
+
+impl SocketAddrV4 {
+ /// Parse an IPv4 socket address from a slice of bytes.
+ ///
+ /// ```
+ /// #![feature(addr_parse_ascii)]
+ ///
+ /// use std::net::{Ipv4Addr, SocketAddrV4};
+ ///
+ /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ ///
+ /// assert_eq!(SocketAddrV4::parse_ascii(b"127.0.0.1:8080"), Ok(socket));
+ /// ```
+ #[unstable(feature = "addr_parse_ascii", issue = "101035")]
+ pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
+ Parser::new(b).parse_with(|p| p.read_socket_addr_v4(), AddrKind::SocketV4)
}
}
@@ -306,7 +380,25 @@ impl FromStr for Ipv6Addr {
impl FromStr for SocketAddrV4 {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<SocketAddrV4, AddrParseError> {
- Parser::new(s).parse_with(|p| p.read_socket_addr_v4(), AddrKind::SocketV4)
+ Self::parse_ascii(s.as_bytes())
+ }
+}
+
+impl SocketAddrV6 {
+ /// Parse an IPv6 socket address from a slice of bytes.
+ ///
+ /// ```
+ /// #![feature(addr_parse_ascii)]
+ ///
+ /// use std::net::{Ipv6Addr, SocketAddrV6};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ ///
+ /// assert_eq!(SocketAddrV6::parse_ascii(b"[2001:db8::1]:8080"), Ok(socket));
+ /// ```
+ #[unstable(feature = "addr_parse_ascii", issue = "101035")]
+ pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
+ Parser::new(b).parse_with(|p| p.read_socket_addr_v6(), AddrKind::SocketV6)
}
}
@@ -314,7 +406,27 @@ impl FromStr for SocketAddrV4 {
impl FromStr for SocketAddrV6 {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<SocketAddrV6, AddrParseError> {
- Parser::new(s).parse_with(|p| p.read_socket_addr_v6(), AddrKind::SocketV6)
+ Self::parse_ascii(s.as_bytes())
+ }
+}
+
+impl SocketAddr {
+ /// Parse a socket address from a slice of bytes.
+ ///
+ /// ```
+ /// #![feature(addr_parse_ascii)]
+ ///
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
+ ///
+ /// let socket_v4 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// let socket_v6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080);
+ ///
+ /// assert_eq!(SocketAddr::parse_ascii(b"127.0.0.1:8080"), Ok(socket_v4));
+ /// assert_eq!(SocketAddr::parse_ascii(b"[::1]:8080"), Ok(socket_v6));
+ /// ```
+ #[unstable(feature = "addr_parse_ascii", issue = "101035")]
+ pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
+ Parser::new(b).parse_with(|p| p.read_socket_addr(), AddrKind::Socket)
}
}
@@ -322,7 +434,7 @@ impl FromStr for SocketAddrV6 {
impl FromStr for SocketAddr {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<SocketAddr, AddrParseError> {
- Parser::new(s).parse_with(|p| p.read_socket_addr(), AddrKind::Socket)
+ Self::parse_ascii(s.as_bytes())
}
}
diff --git a/library/std/src/net/socket_addr.rs b/library/std/src/net/socket_addr.rs
new file mode 100644
index 000000000..33b0dfa03
--- /dev/null
+++ b/library/std/src/net/socket_addr.rs
@@ -0,0 +1,974 @@
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::cmp::Ordering;
+use crate::fmt::{self, Write};
+use crate::hash;
+use crate::io;
+use crate::iter;
+use crate::mem;
+use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+use crate::option;
+use crate::slice;
+use crate::sys::net::netc as c;
+use crate::sys_common::net::LookupHost;
+use crate::sys_common::{FromInner, IntoInner};
+use crate::vec;
+
+use super::display_buffer::DisplayBuffer;
+
+/// An internet socket address, either IPv4 or IPv6.
+///
+/// Internet socket addresses consist of an [IP address], a 16-bit port number, as well
+/// as possibly some version-dependent additional information. See [`SocketAddrV4`]'s and
+/// [`SocketAddrV6`]'s respective documentation for more details.
+///
+/// The size of a `SocketAddr` instance may vary depending on the target operating
+/// system.
+///
+/// [IP address]: IpAddr
+///
+/// # Examples
+///
+/// ```
+/// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+///
+/// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+///
+/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket));
+/// assert_eq!(socket.port(), 8080);
+/// assert_eq!(socket.is_ipv4(), true);
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum SocketAddr {
+ /// An IPv4 socket address.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ V4(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV4),
+ /// An IPv6 socket address.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ V6(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV6),
+}
+
+/// An IPv4 socket address.
+///
+/// IPv4 socket addresses consist of an [`IPv4` address] and a 16-bit port number, as
+/// stated in [IETF RFC 793].
+///
+/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses.
+///
+/// The size of a `SocketAddrV4` struct may vary depending on the target operating
+/// system. Do not assume that this type has the same memory layout as the underlying
+/// system representation.
+///
+/// [IETF RFC 793]: https://tools.ietf.org/html/rfc793
+/// [`IPv4` address]: Ipv4Addr
+///
+/// # Examples
+///
+/// ```
+/// use std::net::{Ipv4Addr, SocketAddrV4};
+///
+/// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+///
+/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket));
+/// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1));
+/// assert_eq!(socket.port(), 8080);
+/// ```
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SocketAddrV4 {
+ ip: Ipv4Addr,
+ port: u16,
+}
+
+/// An IPv6 socket address.
+///
+/// IPv6 socket addresses consist of an [`IPv6` address], a 16-bit port number, as well
+/// as fields containing the traffic class, the flow label, and a scope identifier
+/// (see [IETF RFC 2553, Section 3.3] for more details).
+///
+/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses.
+///
+/// The size of a `SocketAddrV6` struct may vary depending on the target operating
+/// system. Do not assume that this type has the same memory layout as the underlying
+/// system representation.
+///
+/// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
+/// [`IPv6` address]: Ipv6Addr
+///
+/// # Examples
+///
+/// ```
+/// use std::net::{Ipv6Addr, SocketAddrV6};
+///
+/// let socket = SocketAddrV6::new(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+///
+/// assert_eq!("[2001:db8::1]:8080".parse(), Ok(socket));
+/// assert_eq!(socket.ip(), &Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1));
+/// assert_eq!(socket.port(), 8080);
+/// ```
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SocketAddrV6 {
+ ip: Ipv6Addr,
+ port: u16,
+ flowinfo: u32,
+ scope_id: u32,
+}
+
+impl SocketAddr {
+ /// Creates a new socket address from an [IP address] and a port number.
+ ///
+ /// [IP address]: IpAddr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
+ /// assert_eq!(socket.port(), 8080);
+ /// ```
+ #[stable(feature = "ip_addr", since = "1.7.0")]
+ #[must_use]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn new(ip: IpAddr, port: u16) -> SocketAddr {
+ match ip {
+ IpAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(a, port)),
+ IpAddr::V6(a) => SocketAddr::V6(SocketAddrV6::new(a, port, 0, 0)),
+ }
+ }
+
+ /// Returns the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
+ /// ```
+ #[must_use]
+ #[stable(feature = "ip_addr", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn ip(&self) -> IpAddr {
+ match *self {
+ SocketAddr::V4(ref a) => IpAddr::V4(*a.ip()),
+ SocketAddr::V6(ref a) => IpAddr::V6(*a.ip()),
+ }
+ }
+
+ /// Changes the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// socket.set_ip(IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1)));
+ /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1)));
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_ip(&mut self, new_ip: IpAddr) {
+ // `match (*self, new_ip)` would have us mutate a copy of self only to throw it away.
+ match (self, new_ip) {
+ (&mut SocketAddr::V4(ref mut a), IpAddr::V4(new_ip)) => a.set_ip(new_ip),
+ (&mut SocketAddr::V6(ref mut a), IpAddr::V6(new_ip)) => a.set_ip(new_ip),
+ (self_, new_ip) => *self_ = Self::new(new_ip, self_.port()),
+ }
+ }
+
+ /// Returns the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// assert_eq!(socket.port(), 8080);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn port(&self) -> u16 {
+ match *self {
+ SocketAddr::V4(ref a) => a.port(),
+ SocketAddr::V6(ref a) => a.port(),
+ }
+ }
+
+ /// Changes the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// socket.set_port(1025);
+ /// assert_eq!(socket.port(), 1025);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_port(&mut self, new_port: u16) {
+ match *self {
+ SocketAddr::V4(ref mut a) => a.set_port(new_port),
+ SocketAddr::V6(ref mut a) => a.set_port(new_port),
+ }
+ }
+
+ /// Returns [`true`] if the [IP address] in this `SocketAddr` is an
+ /// [`IPv4` address], and [`false`] otherwise.
+ ///
+ /// [IP address]: IpAddr
+ /// [`IPv4` address]: IpAddr::V4
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// assert_eq!(socket.is_ipv4(), true);
+ /// assert_eq!(socket.is_ipv6(), false);
+ /// ```
+ #[must_use]
+ #[stable(feature = "sockaddr_checker", since = "1.16.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn is_ipv4(&self) -> bool {
+ matches!(*self, SocketAddr::V4(_))
+ }
+
+ /// Returns [`true`] if the [IP address] in this `SocketAddr` is an
+ /// [`IPv6` address], and [`false`] otherwise.
+ ///
+ /// [IP address]: IpAddr
+ /// [`IPv6` address]: IpAddr::V6
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv6Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 0, 1)), 8080);
+ /// assert_eq!(socket.is_ipv4(), false);
+ /// assert_eq!(socket.is_ipv6(), true);
+ /// ```
+ #[must_use]
+ #[stable(feature = "sockaddr_checker", since = "1.16.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn is_ipv6(&self) -> bool {
+ matches!(*self, SocketAddr::V6(_))
+ }
+}
+
+impl SocketAddrV4 {
+ /// Creates a new socket address from an [`IPv4` address] and a port number.
+ ///
+ /// [`IPv4` address]: Ipv4Addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn new(ip: Ipv4Addr, port: u16) -> SocketAddrV4 {
+ SocketAddrV4 { ip, port }
+ }
+
+ /// Returns the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1));
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn ip(&self) -> &Ipv4Addr {
+ &self.ip
+ }
+
+ /// Changes the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// socket.set_ip(Ipv4Addr::new(192, 168, 0, 1));
+ /// assert_eq!(socket.ip(), &Ipv4Addr::new(192, 168, 0, 1));
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_ip(&mut self, new_ip: Ipv4Addr) {
+ self.ip = new_ip;
+ }
+
+ /// Returns the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// assert_eq!(socket.port(), 8080);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn port(&self) -> u16 {
+ self.port
+ }
+
+ /// Changes the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// socket.set_port(4242);
+ /// assert_eq!(socket.port(), 4242);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_port(&mut self, new_port: u16) {
+ self.port = new_port;
+ }
+}
+
+impl SocketAddrV6 {
+ /// Creates a new socket address from an [`IPv6` address], a 16-bit port number,
+ /// and the `flowinfo` and `scope_id` fields.
+ ///
+ /// For more information on the meaning and layout of the `flowinfo` and `scope_id`
+ /// parameters, see [IETF RFC 2553, Section 3.3].
+ ///
+ /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
+ /// [`IPv6` address]: Ipv6Addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn new(ip: Ipv6Addr, port: u16, flowinfo: u32, scope_id: u32) -> SocketAddrV6 {
+ SocketAddrV6 { ip, port, flowinfo, scope_id }
+ }
+
+ /// Returns the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// assert_eq!(socket.ip(), &Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn ip(&self) -> &Ipv6Addr {
+ &self.ip
+ }
+
+ /// Changes the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// socket.set_ip(Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0));
+ /// assert_eq!(socket.ip(), &Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0));
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_ip(&mut self, new_ip: Ipv6Addr) {
+ self.ip = new_ip;
+ }
+
+ /// Returns the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// assert_eq!(socket.port(), 8080);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn port(&self) -> u16 {
+ self.port
+ }
+
+ /// Changes the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// socket.set_port(4242);
+ /// assert_eq!(socket.port(), 4242);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_port(&mut self, new_port: u16) {
+ self.port = new_port;
+ }
+
+ /// Returns the flow information associated with this address.
+ ///
+ /// This information corresponds to the `sin6_flowinfo` field in C's `netinet/in.h`,
+ /// as specified in [IETF RFC 2553, Section 3.3].
+ /// It combines information about the flow label and the traffic class as specified
+ /// in [IETF RFC 2460], respectively [Section 6] and [Section 7].
+ ///
+ /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
+ /// [IETF RFC 2460]: https://tools.ietf.org/html/rfc2460
+ /// [Section 6]: https://tools.ietf.org/html/rfc2460#section-6
+ /// [Section 7]: https://tools.ietf.org/html/rfc2460#section-7
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0);
+ /// assert_eq!(socket.flowinfo(), 10);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn flowinfo(&self) -> u32 {
+ self.flowinfo
+ }
+
+ /// Changes the flow information associated with this socket address.
+ ///
+ /// See [`SocketAddrV6::flowinfo`]'s documentation for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0);
+ /// socket.set_flowinfo(56);
+ /// assert_eq!(socket.flowinfo(), 56);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_flowinfo(&mut self, new_flowinfo: u32) {
+ self.flowinfo = new_flowinfo;
+ }
+
+ /// Returns the scope ID associated with this address.
+ ///
+ /// This information corresponds to the `sin6_scope_id` field in C's `netinet/in.h`,
+ /// as specified in [IETF RFC 2553, Section 3.3].
+ ///
+ /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78);
+ /// assert_eq!(socket.scope_id(), 78);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn scope_id(&self) -> u32 {
+ self.scope_id
+ }
+
+ /// Changes the scope ID associated with this socket address.
+ ///
+ /// See [`SocketAddrV6::scope_id`]'s documentation for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78);
+ /// socket.set_scope_id(42);
+ /// assert_eq!(socket.scope_id(), 42);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_scope_id(&mut self, new_scope_id: u32) {
+ self.scope_id = new_scope_id;
+ }
+}
+
+impl FromInner<c::sockaddr_in> for SocketAddrV4 {
+ fn from_inner(addr: c::sockaddr_in) -> SocketAddrV4 {
+ SocketAddrV4 { ip: Ipv4Addr::from_inner(addr.sin_addr), port: u16::from_be(addr.sin_port) }
+ }
+}
+
+impl FromInner<c::sockaddr_in6> for SocketAddrV6 {
+ fn from_inner(addr: c::sockaddr_in6) -> SocketAddrV6 {
+ SocketAddrV6 {
+ ip: Ipv6Addr::from_inner(addr.sin6_addr),
+ port: u16::from_be(addr.sin6_port),
+ flowinfo: addr.sin6_flowinfo,
+ scope_id: addr.sin6_scope_id,
+ }
+ }
+}
+
+impl IntoInner<c::sockaddr_in> for SocketAddrV4 {
+ fn into_inner(self) -> c::sockaddr_in {
+ c::sockaddr_in {
+ sin_family: c::AF_INET as c::sa_family_t,
+ sin_port: self.port.to_be(),
+ sin_addr: self.ip.into_inner(),
+ ..unsafe { mem::zeroed() }
+ }
+ }
+}
+
+impl IntoInner<c::sockaddr_in6> for SocketAddrV6 {
+ fn into_inner(self) -> c::sockaddr_in6 {
+ c::sockaddr_in6 {
+ sin6_family: c::AF_INET6 as c::sa_family_t,
+ sin6_port: self.port.to_be(),
+ sin6_addr: self.ip.into_inner(),
+ sin6_flowinfo: self.flowinfo,
+ sin6_scope_id: self.scope_id,
+ ..unsafe { mem::zeroed() }
+ }
+ }
+}
+
+#[stable(feature = "ip_from_ip", since = "1.16.0")]
+impl From<SocketAddrV4> for SocketAddr {
+ /// Converts a [`SocketAddrV4`] into a [`SocketAddr::V4`].
+ fn from(sock4: SocketAddrV4) -> SocketAddr {
+ SocketAddr::V4(sock4)
+ }
+}
+
+#[stable(feature = "ip_from_ip", since = "1.16.0")]
+impl From<SocketAddrV6> for SocketAddr {
+ /// Converts a [`SocketAddrV6`] into a [`SocketAddr::V6`].
+ fn from(sock6: SocketAddrV6) -> SocketAddr {
+ SocketAddr::V6(sock6)
+ }
+}
+
+#[stable(feature = "addr_from_into_ip", since = "1.17.0")]
+impl<I: Into<IpAddr>> From<(I, u16)> for SocketAddr {
+ /// Converts a tuple struct (Into<[`IpAddr`]>, `u16`) into a [`SocketAddr`].
+ ///
+ /// This conversion creates a [`SocketAddr::V4`] for an [`IpAddr::V4`]
+ /// and creates a [`SocketAddr::V6`] for an [`IpAddr::V6`].
+ ///
+ /// `u16` is treated as port of the newly created [`SocketAddr`].
+ fn from(pieces: (I, u16)) -> SocketAddr {
+ SocketAddr::new(pieces.0.into(), pieces.1)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for SocketAddr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ SocketAddr::V4(ref a) => a.fmt(f),
+ SocketAddr::V6(ref a) => a.fmt(f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for SocketAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for SocketAddrV4 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // If there are no alignment requirements, write the socket address directly to `f`.
+ // Otherwise, write it to a local buffer and then use `f.pad`.
+ if f.precision().is_none() && f.width().is_none() {
+ write!(f, "{}:{}", self.ip(), self.port())
+ } else {
+ const LONGEST_IPV4_SOCKET_ADDR: &str = "255.255.255.255:65536";
+
+ let mut buf = DisplayBuffer::<{ LONGEST_IPV4_SOCKET_ADDR.len() }>::new();
+ // Buffer is long enough for the longest possible IPv4 socket address, so this should never fail.
+ write!(buf, "{}:{}", self.ip(), self.port()).unwrap();
+
+ f.pad(buf.as_str())
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for SocketAddrV4 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for SocketAddrV6 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // If there are no alignment requirements, write the socket address directly to `f`.
+ // Otherwise, write it to a local buffer and then use `f.pad`.
+ if f.precision().is_none() && f.width().is_none() {
+ match self.scope_id() {
+ 0 => write!(f, "[{}]:{}", self.ip(), self.port()),
+ scope_id => write!(f, "[{}%{}]:{}", self.ip(), scope_id, self.port()),
+ }
+ } else {
+ const LONGEST_IPV6_SOCKET_ADDR: &str =
+ "[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%4294967296]:65536";
+
+ let mut buf = DisplayBuffer::<{ LONGEST_IPV6_SOCKET_ADDR.len() }>::new();
+ match self.scope_id() {
+ 0 => write!(buf, "[{}]:{}", self.ip(), self.port()),
+ scope_id => write!(buf, "[{}%{}]:{}", self.ip(), scope_id, self.port()),
+ }
+ // Buffer is long enough for the longest possible IPv6 socket address, so this should never fail.
+ .unwrap();
+
+ f.pad(buf.as_str())
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for SocketAddrV6 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
+impl PartialOrd for SocketAddrV4 {
+ fn partial_cmp(&self, other: &SocketAddrV4) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
+impl PartialOrd for SocketAddrV6 {
+ fn partial_cmp(&self, other: &SocketAddrV6) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
+impl Ord for SocketAddrV4 {
+ fn cmp(&self, other: &SocketAddrV4) -> Ordering {
+ self.ip().cmp(other.ip()).then(self.port().cmp(&other.port()))
+ }
+}
+
+#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
+impl Ord for SocketAddrV6 {
+ fn cmp(&self, other: &SocketAddrV6) -> Ordering {
+ self.ip().cmp(other.ip()).then(self.port().cmp(&other.port()))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for SocketAddrV4 {
+ fn hash<H: hash::Hasher>(&self, s: &mut H) {
+ (self.port, self.ip).hash(s)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for SocketAddrV6 {
+ fn hash<H: hash::Hasher>(&self, s: &mut H) {
+ (self.port, &self.ip, self.flowinfo, self.scope_id).hash(s)
+ }
+}
+
+/// A trait for objects which can be converted or resolved to one or more
+/// [`SocketAddr`] values.
+///
+/// This trait is used for generic address resolution when constructing network
+/// objects. By default it is implemented for the following types:
+///
+/// * [`SocketAddr`]: [`to_socket_addrs`] is the identity function.
+///
+/// * [`SocketAddrV4`], [`SocketAddrV6`], <code>([IpAddr], [u16])</code>,
+/// <code>([Ipv4Addr], [u16])</code>, <code>([Ipv6Addr], [u16])</code>:
+/// [`to_socket_addrs`] constructs a [`SocketAddr`] trivially.
+///
+/// * <code>(&[str], [u16])</code>: <code>&[str]</code> should be either a string representation
+/// of an [`IpAddr`] address as expected by [`FromStr`] implementation or a host
+/// name. [`u16`] is the port number.
+///
+/// * <code>&[str]</code>: the string should be either a string representation of a
+/// [`SocketAddr`] as expected by its [`FromStr`] implementation or a string like
+/// `<host_name>:<port>` pair where `<port>` is a [`u16`] value.
+///
+/// This trait allows constructing network objects like [`TcpStream`] or
+/// [`UdpSocket`] easily with values of various types for the bind/connection
+/// address. It is needed because sometimes one type is more appropriate than
+/// the other: for simple uses a string like `"localhost:12345"` is much nicer
+/// than manual construction of the corresponding [`SocketAddr`], but sometimes
+/// [`SocketAddr`] value is *the* main source of the address, and converting it to
+/// some other type (e.g., a string) just for it to be converted back to
+/// [`SocketAddr`] in constructor methods is pointless.
+///
+/// Addresses returned by the operating system that are not IP addresses are
+/// silently ignored.
+///
+/// [`FromStr`]: crate::str::FromStr "std::str::FromStr"
+/// [`TcpStream`]: crate::net::TcpStream "net::TcpStream"
+/// [`to_socket_addrs`]: ToSocketAddrs::to_socket_addrs
+/// [`UdpSocket`]: crate::net::UdpSocket "net::UdpSocket"
+///
+/// # Examples
+///
+/// Creating a [`SocketAddr`] iterator that yields one item:
+///
+/// ```
+/// use std::net::{ToSocketAddrs, SocketAddr};
+///
+/// let addr = SocketAddr::from(([127, 0, 0, 1], 443));
+/// let mut addrs_iter = addr.to_socket_addrs().unwrap();
+///
+/// assert_eq!(Some(addr), addrs_iter.next());
+/// assert!(addrs_iter.next().is_none());
+/// ```
+///
+/// Creating a [`SocketAddr`] iterator from a hostname:
+///
+/// ```no_run
+/// use std::net::{SocketAddr, ToSocketAddrs};
+///
+/// // assuming 'localhost' resolves to 127.0.0.1
+/// let mut addrs_iter = "localhost:443".to_socket_addrs().unwrap();
+/// assert_eq!(addrs_iter.next(), Some(SocketAddr::from(([127, 0, 0, 1], 443))));
+/// assert!(addrs_iter.next().is_none());
+///
+/// // assuming 'foo' does not resolve
+/// assert!("foo:443".to_socket_addrs().is_err());
+/// ```
+///
+/// Creating a [`SocketAddr`] iterator that yields multiple items:
+///
+/// ```
+/// use std::net::{SocketAddr, ToSocketAddrs};
+///
+/// let addr1 = SocketAddr::from(([0, 0, 0, 0], 80));
+/// let addr2 = SocketAddr::from(([127, 0, 0, 1], 443));
+/// let addrs = vec![addr1, addr2];
+///
+/// let mut addrs_iter = (&addrs[..]).to_socket_addrs().unwrap();
+///
+/// assert_eq!(Some(addr1), addrs_iter.next());
+/// assert_eq!(Some(addr2), addrs_iter.next());
+/// assert!(addrs_iter.next().is_none());
+/// ```
+///
+/// Attempting to create a [`SocketAddr`] iterator from an improperly formatted
+/// socket address `&str` (missing the port):
+///
+/// ```
+/// use std::io;
+/// use std::net::ToSocketAddrs;
+///
+/// let err = "127.0.0.1".to_socket_addrs().unwrap_err();
+/// assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
+/// ```
+///
+/// [`TcpStream::connect`] is an example of an function that utilizes
+/// `ToSocketAddrs` as a trait bound on its parameter in order to accept
+/// different types:
+///
+/// ```no_run
+/// use std::net::{TcpStream, Ipv4Addr};
+///
+/// let stream = TcpStream::connect(("127.0.0.1", 443));
+/// // or
+/// let stream = TcpStream::connect("127.0.0.1:443");
+/// // or
+/// let stream = TcpStream::connect((Ipv4Addr::new(127, 0, 0, 1), 443));
+/// ```
+///
+/// [`TcpStream::connect`]: crate::net::TcpStream::connect
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ToSocketAddrs {
+ /// Returned iterator over socket addresses which this type may correspond
+ /// to.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Iter: Iterator<Item = SocketAddr>;
+
+ /// Converts this object to an iterator of resolved [`SocketAddr`]s.
+ ///
+ /// The returned iterator might not actually yield any values depending on the
+ /// outcome of any resolution performed.
+ ///
+ /// Note that this function may block the current thread while resolution is
+ /// performed.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn to_socket_addrs(&self) -> io::Result<Self::Iter>;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for SocketAddr {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ Ok(Some(*self).into_iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for SocketAddrV4 {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ SocketAddr::V4(*self).to_socket_addrs()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for SocketAddrV6 {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ SocketAddr::V6(*self).to_socket_addrs()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for (IpAddr, u16) {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ let (ip, port) = *self;
+ match ip {
+ IpAddr::V4(ref a) => (*a, port).to_socket_addrs(),
+ IpAddr::V6(ref a) => (*a, port).to_socket_addrs(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for (Ipv4Addr, u16) {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ let (ip, port) = *self;
+ SocketAddrV4::new(ip, port).to_socket_addrs()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for (Ipv6Addr, u16) {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ let (ip, port) = *self;
+ SocketAddrV6::new(ip, port, 0, 0).to_socket_addrs()
+ }
+}
+
+fn resolve_socket_addr(lh: LookupHost) -> io::Result<vec::IntoIter<SocketAddr>> {
+ let p = lh.port();
+ let v: Vec<_> = lh
+ .map(|mut a| {
+ a.set_port(p);
+ a
+ })
+ .collect();
+ Ok(v.into_iter())
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for (&str, u16) {
+ type Iter = vec::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
+ let (host, port) = *self;
+
+ // try to parse the host as a regular IP address first
+ if let Ok(addr) = host.parse::<Ipv4Addr>() {
+ let addr = SocketAddrV4::new(addr, port);
+ return Ok(vec![SocketAddr::V4(addr)].into_iter());
+ }
+ if let Ok(addr) = host.parse::<Ipv6Addr>() {
+ let addr = SocketAddrV6::new(addr, port, 0, 0);
+ return Ok(vec![SocketAddr::V6(addr)].into_iter());
+ }
+
+ resolve_socket_addr((host, port).try_into()?)
+ }
+}
+
+#[stable(feature = "string_u16_to_socket_addrs", since = "1.46.0")]
+impl ToSocketAddrs for (String, u16) {
+ type Iter = vec::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
+ (&*self.0, self.1).to_socket_addrs()
+ }
+}
+
+// accepts strings like 'localhost:12345'
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for str {
+ type Iter = vec::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
+ // try to parse as a regular SocketAddr first
+ if let Ok(addr) = self.parse() {
+ return Ok(vec![addr].into_iter());
+ }
+
+ resolve_socket_addr(self.try_into()?)
+ }
+}
+
+#[stable(feature = "slice_to_socket_addrs", since = "1.8.0")]
+impl<'a> ToSocketAddrs for &'a [SocketAddr] {
+ type Iter = iter::Cloned<slice::Iter<'a, SocketAddr>>;
+
+ fn to_socket_addrs(&self) -> io::Result<Self::Iter> {
+ Ok(self.iter().cloned())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ToSocketAddrs + ?Sized> ToSocketAddrs for &T {
+ type Iter = T::Iter;
+ fn to_socket_addrs(&self) -> io::Result<T::Iter> {
+ (**self).to_socket_addrs()
+ }
+}
+
+#[stable(feature = "string_to_socket_addrs", since = "1.16.0")]
+impl ToSocketAddrs for String {
+ type Iter = vec::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
+ (&**self).to_socket_addrs()
+ }
+}
diff --git a/library/std/src/net/socket_addr/tests.rs b/library/std/src/net/socket_addr/tests.rs
new file mode 100644
index 000000000..15211f819
--- /dev/null
+++ b/library/std/src/net/socket_addr/tests.rs
@@ -0,0 +1,306 @@
+use crate::net::test::{sa4, sa6, tsa};
+use crate::net::*;
+
+#[test]
+fn to_socket_addr_ipaddr_u16() {
+ let a = Ipv4Addr::new(77, 88, 21, 11);
+ let p = 12345;
+ let e = SocketAddr::V4(SocketAddrV4::new(a, p));
+ assert_eq!(Ok(vec![e]), tsa((a, p)));
+}
+
+#[test]
+fn to_socket_addr_str_u16() {
+ let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
+ assert_eq!(Ok(vec![a]), tsa(("77.88.21.11", 24352)));
+
+ let a = sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53);
+ assert_eq!(Ok(vec![a]), tsa(("2a02:6b8:0:1::1", 53)));
+
+ let a = sa4(Ipv4Addr::new(127, 0, 0, 1), 23924);
+ #[cfg(not(target_env = "sgx"))]
+ assert!(tsa(("localhost", 23924)).unwrap().contains(&a));
+ #[cfg(target_env = "sgx")]
+ let _ = a;
+}
+
+#[test]
+fn to_socket_addr_str() {
+ let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
+ assert_eq!(Ok(vec![a]), tsa("77.88.21.11:24352"));
+
+ let a = sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53);
+ assert_eq!(Ok(vec![a]), tsa("[2a02:6b8:0:1::1]:53"));
+
+ let a = sa4(Ipv4Addr::new(127, 0, 0, 1), 23924);
+ #[cfg(not(target_env = "sgx"))]
+ assert!(tsa("localhost:23924").unwrap().contains(&a));
+ #[cfg(target_env = "sgx")]
+ let _ = a;
+}
+
+#[test]
+fn to_socket_addr_string() {
+ let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
+ assert_eq!(Ok(vec![a]), tsa(&*format!("{}:{}", "77.88.21.11", "24352")));
+ assert_eq!(Ok(vec![a]), tsa(&format!("{}:{}", "77.88.21.11", "24352")));
+ assert_eq!(Ok(vec![a]), tsa(format!("{}:{}", "77.88.21.11", "24352")));
+
+ let s = format!("{}:{}", "77.88.21.11", "24352");
+ assert_eq!(Ok(vec![a]), tsa(s));
+ // s has been moved into the tsa call
+}
+
+#[test]
+fn ipv4_socket_addr_to_string() {
+ // Shortest possible IPv4 length.
+ assert_eq!(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0).to_string(), "0.0.0.0:0");
+
+ // Longest possible IPv4 length.
+ assert_eq!(
+ SocketAddrV4::new(Ipv4Addr::new(255, 255, 255, 255), u16::MAX).to_string(),
+ "255.255.255.255:65535"
+ );
+
+ // Test padding.
+ assert_eq!(
+ &format!("{:16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)),
+ "1.1.1.1:53 "
+ );
+ assert_eq!(
+ &format!("{:>16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)),
+ " 1.1.1.1:53"
+ );
+}
+
+#[test]
+fn ipv6_socket_addr_to_string() {
+ // IPv4-mapped address.
+ assert_eq!(
+ SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280), 8080, 0, 0)
+ .to_string(),
+ "[::ffff:192.0.2.128]:8080"
+ );
+
+ // IPv4-compatible address.
+ assert_eq!(
+ SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280), 8080, 0, 0).to_string(),
+ "[::192.0.2.128]:8080"
+ );
+
+ // IPv6 address with no zero segments.
+ assert_eq!(
+ SocketAddrV6::new(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15), 80, 0, 0).to_string(),
+ "[8:9:a:b:c:d:e:f]:80"
+ );
+
+ // Shortest possible IPv6 length.
+ assert_eq!(SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, 0, 0, 0).to_string(), "[::]:0");
+
+ // Longest possible IPv6 length.
+ assert_eq!(
+ SocketAddrV6::new(
+ Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888),
+ u16::MAX,
+ u32::MAX,
+ u32::MAX,
+ )
+ .to_string(),
+ "[1111:2222:3333:4444:5555:6666:7777:8888%4294967295]:65535"
+ );
+
+ // Test padding.
+ assert_eq!(
+ &format!("{:22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)),
+ "[1:2:3:4:5:6:7:8]:9 "
+ );
+ assert_eq!(
+ &format!("{:>22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)),
+ " [1:2:3:4:5:6:7:8]:9"
+ );
+}
+
+#[test]
+fn bind_udp_socket_bad() {
+ // rust-lang/rust#53957: This is a regression test for a parsing problem
+ // discovered as part of issue rust-lang/rust#23076, where we were
+ // incorrectly parsing invalid input and then that would result in a
+ // successful `UdpSocket` binding when we would expect failure.
+ //
+ // At one time, this test was written as a call to `tsa` with
+ // INPUT_23076. However, that structure yields an unreliable test,
+ // because it ends up passing junk input to the DNS server, and some DNS
+ // servers will respond with `Ok` to such input, with the ip address of
+ // the DNS server itself.
+ //
+ // This form of the test is more robust: even when the DNS server
+ // returns its own address, it is still an error to bind a UDP socket to
+ // a non-local address, and so we still get an error here in that case.
+
+ const INPUT_23076: &str = "1200::AB00:1234::2552:7777:1313:34300";
+
+ assert!(crate::net::UdpSocket::bind(INPUT_23076).is_err())
+}
+
+#[test]
+fn set_ip() {
+ fn ip4(low: u8) -> Ipv4Addr {
+ Ipv4Addr::new(77, 88, 21, low)
+ }
+ fn ip6(low: u16) -> Ipv6Addr {
+ Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, low)
+ }
+
+ let mut v4 = SocketAddrV4::new(ip4(11), 80);
+ assert_eq!(v4.ip(), &ip4(11));
+ v4.set_ip(ip4(12));
+ assert_eq!(v4.ip(), &ip4(12));
+
+ let mut addr = SocketAddr::V4(v4);
+ assert_eq!(addr.ip(), IpAddr::V4(ip4(12)));
+ addr.set_ip(IpAddr::V4(ip4(13)));
+ assert_eq!(addr.ip(), IpAddr::V4(ip4(13)));
+ addr.set_ip(IpAddr::V6(ip6(14)));
+ assert_eq!(addr.ip(), IpAddr::V6(ip6(14)));
+
+ let mut v6 = SocketAddrV6::new(ip6(1), 80, 0, 0);
+ assert_eq!(v6.ip(), &ip6(1));
+ v6.set_ip(ip6(2));
+ assert_eq!(v6.ip(), &ip6(2));
+
+ let mut addr = SocketAddr::V6(v6);
+ assert_eq!(addr.ip(), IpAddr::V6(ip6(2)));
+ addr.set_ip(IpAddr::V6(ip6(3)));
+ assert_eq!(addr.ip(), IpAddr::V6(ip6(3)));
+ addr.set_ip(IpAddr::V4(ip4(4)));
+ assert_eq!(addr.ip(), IpAddr::V4(ip4(4)));
+}
+
+#[test]
+fn set_port() {
+ let mut v4 = SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80);
+ assert_eq!(v4.port(), 80);
+ v4.set_port(443);
+ assert_eq!(v4.port(), 443);
+
+ let mut addr = SocketAddr::V4(v4);
+ assert_eq!(addr.port(), 443);
+ addr.set_port(8080);
+ assert_eq!(addr.port(), 8080);
+
+ let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 0);
+ assert_eq!(v6.port(), 80);
+ v6.set_port(443);
+ assert_eq!(v6.port(), 443);
+
+ let mut addr = SocketAddr::V6(v6);
+ assert_eq!(addr.port(), 443);
+ addr.set_port(8080);
+ assert_eq!(addr.port(), 8080);
+}
+
+#[test]
+fn set_flowinfo() {
+ let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 10, 0);
+ assert_eq!(v6.flowinfo(), 10);
+ v6.set_flowinfo(20);
+ assert_eq!(v6.flowinfo(), 20);
+}
+
+#[test]
+fn set_scope_id() {
+ let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 10);
+ assert_eq!(v6.scope_id(), 10);
+ v6.set_scope_id(20);
+ assert_eq!(v6.scope_id(), 20);
+}
+
+#[test]
+fn is_v4() {
+ let v4 = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80));
+ assert!(v4.is_ipv4());
+ assert!(!v4.is_ipv6());
+}
+
+#[test]
+fn is_v6() {
+ let v6 = SocketAddr::V6(SocketAddrV6::new(
+ Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1),
+ 80,
+ 10,
+ 0,
+ ));
+ assert!(!v6.is_ipv4());
+ assert!(v6.is_ipv6());
+}
+
+#[test]
+fn socket_v4_to_str() {
+ let socket = SocketAddrV4::new(Ipv4Addr::new(192, 168, 0, 1), 8080);
+
+ assert_eq!(format!("{socket}"), "192.168.0.1:8080");
+ assert_eq!(format!("{socket:<20}"), "192.168.0.1:8080 ");
+ assert_eq!(format!("{socket:>20}"), " 192.168.0.1:8080");
+ assert_eq!(format!("{socket:^20}"), " 192.168.0.1:8080 ");
+ assert_eq!(format!("{socket:.10}"), "192.168.0.");
+}
+
+#[test]
+fn socket_v6_to_str() {
+ let mut socket = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0);
+
+ assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1]:53");
+ assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1]:53 ");
+ assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1]:53");
+ assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1]:53 ");
+ assert_eq!(format!("{socket:.15}"), "[2a02:6b8:0:1::");
+
+ socket.set_scope_id(5);
+
+ assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1%5]:53");
+ assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1%5]:53 ");
+ assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1%5]:53");
+ assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1%5]:53 ");
+ assert_eq!(format!("{socket:.18}"), "[2a02:6b8:0:1::1%5");
+}
+
+#[test]
+fn compare() {
+ let v4_1 = "224.120.45.1:23456".parse::<SocketAddrV4>().unwrap();
+ let v4_2 = "224.210.103.5:12345".parse::<SocketAddrV4>().unwrap();
+ let v4_3 = "224.210.103.5:23456".parse::<SocketAddrV4>().unwrap();
+ let v6_1 = "[2001:db8:f00::1002]:23456".parse::<SocketAddrV6>().unwrap();
+ let v6_2 = "[2001:db8:f00::2001]:12345".parse::<SocketAddrV6>().unwrap();
+ let v6_3 = "[2001:db8:f00::2001]:23456".parse::<SocketAddrV6>().unwrap();
+
+ // equality
+ assert_eq!(v4_1, v4_1);
+ assert_eq!(v6_1, v6_1);
+ assert_eq!(SocketAddr::V4(v4_1), SocketAddr::V4(v4_1));
+ assert_eq!(SocketAddr::V6(v6_1), SocketAddr::V6(v6_1));
+ assert!(v4_1 != v4_2);
+ assert!(v6_1 != v6_2);
+
+ // compare different addresses
+ assert!(v4_1 < v4_2);
+ assert!(v6_1 < v6_2);
+ assert!(v4_2 > v4_1);
+ assert!(v6_2 > v6_1);
+
+ // compare the same address with different ports
+ assert!(v4_2 < v4_3);
+ assert!(v6_2 < v6_3);
+ assert!(v4_3 > v4_2);
+ assert!(v6_3 > v6_2);
+
+ // compare different addresses with the same port
+ assert!(v4_1 < v4_3);
+ assert!(v6_1 < v6_3);
+ assert!(v4_3 > v4_1);
+ assert!(v6_3 > v6_1);
+
+ // compare with an inferred right-hand side
+ assert_eq!(v4_1, "224.120.45.1:23456".parse().unwrap());
+ assert_eq!(v6_1, "[2001:db8:f00::1002]:23456".parse().unwrap());
+ assert_eq!(SocketAddr::V4(v4_1), "224.120.45.1:23456".parse().unwrap());
+}
diff --git a/library/std/src/os/android/mod.rs b/library/std/src/os/android/mod.rs
index dbb0127f3..5adcb82b6 100644
--- a/library/std/src/os/android/mod.rs
+++ b/library/std/src/os/android/mod.rs
@@ -3,4 +3,5 @@
#![stable(feature = "raw_ext", since = "1.1.0")]
pub mod fs;
+pub mod net;
pub mod raw;
diff --git a/library/std/src/os/android/net.rs b/library/std/src/os/android/net.rs
new file mode 100644
index 000000000..ff96125c3
--- /dev/null
+++ b/library/std/src/os/android/net.rs
@@ -0,0 +1,4 @@
+//! Linux and Android-specific definitions for socket options.
+
+#![unstable(feature = "tcp_quickack", issue = "96256")]
+pub use crate::os::net::tcp::TcpStreamExt;
diff --git a/library/std/src/os/fd/mod.rs b/library/std/src/os/fd/mod.rs
index a45694753..c6aa7c77d 100644
--- a/library/std/src/os/fd/mod.rs
+++ b/library/std/src/os/fd/mod.rs
@@ -1,16 +1,25 @@
//! Owned and borrowed Unix-like file descriptors.
+//!
+//! This module is supported on Unix platforms and WASI, which both use a
+//! similar file descriptor system for referencing OS resources.
#![stable(feature = "io_safety", since = "1.63.0")]
#![deny(unsafe_op_in_unsafe_fn)]
// `RawFd`, `AsRawFd`, etc.
-pub mod raw;
+mod raw;
// `OwnedFd`, `AsFd`, etc.
-pub mod owned;
+mod owned;
// Implementations for `AsRawFd` etc. for network types.
mod net;
#[cfg(test)]
mod tests;
+
+// Export the types and traits for the public API.
+#[unstable(feature = "os_fd", issue = "98699")]
+pub use owned::*;
+#[unstable(feature = "os_fd", issue = "98699")]
+pub use raw::*;
diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs
index a463bc41d..c16518577 100644
--- a/library/std/src/os/fd/owned.rs
+++ b/library/std/src/os/fd/owned.rs
@@ -6,6 +6,7 @@
use super::raw::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use crate::fmt;
use crate::fs;
+use crate::io;
use crate::marker::PhantomData;
use crate::mem::forget;
#[cfg(not(any(target_arch = "wasm32", target_env = "sgx")))]
@@ -104,7 +105,8 @@ impl BorrowedFd<'_> {
#[cfg(target_os = "espidf")]
let cmd = libc::F_DUPFD;
- let fd = cvt(unsafe { libc::fcntl(self.as_raw_fd(), cmd, 0) })?;
+ // Avoid using file descriptors below 3 as they are used for stdio
+ let fd = cvt(unsafe { libc::fcntl(self.as_raw_fd(), cmd, 3) })?;
Ok(unsafe { OwnedFd::from_raw_fd(fd) })
}
@@ -191,6 +193,23 @@ impl fmt::Debug for OwnedFd {
}
}
+macro_rules! impl_is_terminal {
+ ($($t:ty),*$(,)?) => {$(
+ #[unstable(feature = "sealed", issue = "none")]
+ impl crate::sealed::Sealed for $t {}
+
+ #[unstable(feature = "is_terminal", issue = "98070")]
+ impl crate::io::IsTerminal for $t {
+ #[inline]
+ fn is_terminal(&self) -> bool {
+ crate::sys::io::is_terminal(self)
+ }
+ }
+ )*}
+}
+
+impl_is_terminal!(BorrowedFd<'_>, OwnedFd);
+
/// A trait to borrow the file descriptor from an underlying object.
///
/// This is only available on unix platforms and must be imported in order to
@@ -205,10 +224,8 @@ pub trait AsFd {
/// ```rust,no_run
/// use std::fs::File;
/// # use std::io;
- /// # #[cfg(target_os = "wasi")]
- /// # use std::os::wasi::io::{AsFd, BorrowedFd};
- /// # #[cfg(unix)]
- /// # use std::os::unix::io::{AsFd, BorrowedFd};
+ /// # #[cfg(any(unix, target_os = "wasi"))]
+ /// # use std::os::fd::{AsFd, BorrowedFd};
///
/// let mut f = File::open("foo.txt")?;
/// # #[cfg(any(unix, target_os = "wasi"))]
@@ -386,3 +403,54 @@ impl<T: AsFd> AsFd for Box<T> {
(**self).as_fd()
}
}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for io::Stdin {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(0) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsFd for io::StdinLock<'a> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ // SAFETY: user code should not close stdin out from under the standard library
+ unsafe { BorrowedFd::borrow_raw(0) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for io::Stdout {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(1) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsFd for io::StdoutLock<'a> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ // SAFETY: user code should not close stdout out from under the standard library
+ unsafe { BorrowedFd::borrow_raw(1) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for io::Stderr {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(2) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsFd for io::StderrLock<'a> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ // SAFETY: user code should not close stderr out from under the standard library
+ unsafe { BorrowedFd::borrow_raw(2) }
+ }
+}
diff --git a/library/std/src/os/fd/raw.rs b/library/std/src/os/fd/raw.rs
index 081915ed1..f92a05066 100644
--- a/library/std/src/os/fd/raw.rs
+++ b/library/std/src/os/fd/raw.rs
@@ -14,7 +14,7 @@ use crate::os::wasi::io::OwnedFd;
use crate::sys_common::{AsInner, IntoInner};
/// Raw file descriptors.
-#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_allowed_through_unstable_modules]
#[stable(feature = "rust1", since = "1.0.0")]
pub type RawFd = raw::c_int;
@@ -23,7 +23,7 @@ pub type RawFd = raw::c_int;
/// This is only available on unix and WASI platforms and must be imported in
/// order to call the method. Windows platforms have a corresponding
/// `AsRawHandle` and `AsRawSocket` set of traits.
-#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_allowed_through_unstable_modules]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait AsRawFd {
/// Extracts the raw file descriptor.
@@ -42,10 +42,8 @@ pub trait AsRawFd {
/// ```no_run
/// use std::fs::File;
/// # use std::io;
- /// #[cfg(unix)]
- /// use std::os::unix::io::{AsRawFd, RawFd};
- /// #[cfg(target_os = "wasi")]
- /// use std::os::wasi::io::{AsRawFd, RawFd};
+ /// #[cfg(any(unix, target_os = "wasi"))]
+ /// use std::os::fd::{AsRawFd, RawFd};
///
/// let mut f = File::open("foo.txt")?;
/// // Note that `raw_fd` is only valid as long as `f` exists.
@@ -59,7 +57,7 @@ pub trait AsRawFd {
/// A trait to express the ability to construct an object from a raw file
/// descriptor.
-#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_allowed_through_unstable_modules]
#[stable(feature = "from_raw_os", since = "1.1.0")]
pub trait FromRawFd {
/// Constructs a new instance of `Self` from the given raw file
@@ -83,10 +81,8 @@ pub trait FromRawFd {
/// ```no_run
/// use std::fs::File;
/// # use std::io;
- /// #[cfg(unix)]
- /// use std::os::unix::io::{FromRawFd, IntoRawFd, RawFd};
- /// #[cfg(target_os = "wasi")]
- /// use std::os::wasi::io::{FromRawFd, IntoRawFd, RawFd};
+ /// #[cfg(any(unix, target_os = "wasi"))]
+ /// use std::os::fd::{FromRawFd, IntoRawFd, RawFd};
///
/// let f = File::open("foo.txt")?;
/// # #[cfg(any(unix, target_os = "wasi"))]
@@ -103,7 +99,7 @@ pub trait FromRawFd {
/// A trait to express the ability to consume an object and acquire ownership of
/// its raw file descriptor.
-#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_allowed_through_unstable_modules]
#[stable(feature = "into_raw_os", since = "1.4.0")]
pub trait IntoRawFd {
/// Consumes this object, returning the raw underlying file descriptor.
@@ -121,10 +117,8 @@ pub trait IntoRawFd {
/// ```no_run
/// use std::fs::File;
/// # use std::io;
- /// #[cfg(unix)]
- /// use std::os::unix::io::{IntoRawFd, RawFd};
- /// #[cfg(target_os = "wasi")]
- /// use std::os::wasi::io::{IntoRawFd, RawFd};
+ /// #[cfg(any(unix, target_os = "wasi"))]
+ /// use std::os::fd::{IntoRawFd, RawFd};
///
/// let f = File::open("foo.txt")?;
/// #[cfg(any(unix, target_os = "wasi"))]
diff --git a/library/std/src/os/fortanix_sgx/mod.rs b/library/std/src/os/fortanix_sgx/mod.rs
index a40dabe19..39a42f4e1 100644
--- a/library/std/src/os/fortanix_sgx/mod.rs
+++ b/library/std/src/os/fortanix_sgx/mod.rs
@@ -26,10 +26,13 @@ pub mod usercalls {
free, insecure_time, launch_thread, read, read_alloc, send, wait, write,
};
pub use crate::sys::abi::usercalls::raw::{do_usercall, Usercalls as UsercallNrs};
+ pub use crate::sys::abi::usercalls::raw::{Register, RegisterArgument, ReturnValue};
// fortanix-sgx-abi re-exports
pub use crate::sys::abi::usercalls::raw::Error;
- pub use crate::sys::abi::usercalls::raw::{ByteBuffer, FifoDescriptor, Return, Usercall};
+ pub use crate::sys::abi::usercalls::raw::{
+ ByteBuffer, Cancel, FifoDescriptor, Return, Usercall,
+ };
pub use crate::sys::abi::usercalls::raw::{Fd, Result, Tcs};
pub use crate::sys::abi::usercalls::raw::{
EV_RETURNQ_NOT_EMPTY, EV_UNPARK, EV_USERCALLQ_NOT_FULL, FD_STDERR, FD_STDIN, FD_STDOUT,
diff --git a/library/std/src/os/linux/mod.rs b/library/std/src/os/linux/mod.rs
index 8e7776f66..c17053011 100644
--- a/library/std/src/os/linux/mod.rs
+++ b/library/std/src/os/linux/mod.rs
@@ -4,5 +4,6 @@
#![doc(cfg(target_os = "linux"))]
pub mod fs;
+pub mod net;
pub mod process;
pub mod raw;
diff --git a/library/std/src/os/linux/net.rs b/library/std/src/os/linux/net.rs
new file mode 100644
index 000000000..ff96125c3
--- /dev/null
+++ b/library/std/src/os/linux/net.rs
@@ -0,0 +1,4 @@
+//! Linux and Android-specific definitions for socket options.
+
+#![unstable(feature = "tcp_quickack", issue = "96256")]
+pub use crate::os::net::tcp::TcpStreamExt;
diff --git a/library/std/src/os/mod.rs b/library/std/src/os/mod.rs
index 6fbaa42c7..42773805c 100644
--- a/library/std/src/os/mod.rs
+++ b/library/std/src/os/mod.rs
@@ -145,6 +145,11 @@ pub mod solaris;
pub mod solid;
#[cfg(target_os = "vxworks")]
pub mod vxworks;
+#[cfg(target_os = "watchos")]
+pub(crate) mod watchos;
#[cfg(any(unix, target_os = "wasi", doc))]
-mod fd;
+pub mod fd;
+
+#[cfg(any(target_os = "linux", target_os = "android", doc))]
+mod net;
diff --git a/library/std/src/os/net/mod.rs b/library/std/src/os/net/mod.rs
new file mode 100644
index 000000000..d6d84d24e
--- /dev/null
+++ b/library/std/src/os/net/mod.rs
@@ -0,0 +1,7 @@
+//! Linux and Android-specific definitions for socket options.
+
+#![unstable(feature = "tcp_quickack", issue = "96256")]
+#![doc(cfg(any(target_os = "linux", target_os = "android",)))]
+pub mod tcp;
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/os/net/tcp.rs b/library/std/src/os/net/tcp.rs
new file mode 100644
index 000000000..5e9ee65a4
--- /dev/null
+++ b/library/std/src/os/net/tcp.rs
@@ -0,0 +1,70 @@
+//! Linux and Android-specific tcp extensions to primitives in the [`std::net`] module.
+//!
+//! [`std::net`]: crate::net
+
+use crate::io;
+use crate::net;
+use crate::sealed::Sealed;
+use crate::sys_common::AsInner;
+
+/// Os-specific extensions for [`TcpStream`]
+///
+/// [`TcpStream`]: net::TcpStream
+#[unstable(feature = "tcp_quickack", issue = "96256")]
+pub trait TcpStreamExt: Sealed {
+ /// Enable or disable `TCP_QUICKACK`.
+ ///
+ /// This flag causes Linux to eagerly send ACKs rather than delaying them.
+ /// Linux may reset this flag after further operations on the socket.
+ ///
+ /// See [`man 7 tcp`](https://man7.org/linux/man-pages/man7/tcp.7.html) and
+ /// [TCP delayed acknowledgement](https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment)
+ /// for more information.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(tcp_quickack)]
+ /// use std::net::TcpStream;
+ /// use std::os::linux::net::TcpStreamExt;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_quickack(true).expect("set_quickack call failed");
+ /// ```
+ #[unstable(feature = "tcp_quickack", issue = "96256")]
+ fn set_quickack(&self, quickack: bool) -> io::Result<()>;
+
+ /// Gets the value of the `TCP_QUICKACK` option on this socket.
+ ///
+ /// For more information about this option, see [`TcpStreamExt::set_quickack`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(tcp_quickack)]
+ /// use std::net::TcpStream;
+ /// use std::os::linux::net::TcpStreamExt;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_quickack(true).expect("set_quickack call failed");
+ /// assert_eq!(stream.quickack().unwrap_or(false), true);
+ /// ```
+ #[unstable(feature = "tcp_quickack", issue = "96256")]
+ fn quickack(&self) -> io::Result<bool>;
+}
+
+#[unstable(feature = "tcp_quickack", issue = "96256")]
+impl Sealed for net::TcpStream {}
+
+#[unstable(feature = "tcp_quickack", issue = "96256")]
+impl TcpStreamExt for net::TcpStream {
+ fn set_quickack(&self, quickack: bool) -> io::Result<()> {
+ self.as_inner().as_inner().set_quickack(quickack)
+ }
+
+ fn quickack(&self) -> io::Result<bool> {
+ self.as_inner().as_inner().quickack()
+ }
+}
diff --git a/library/std/src/os/net/tests.rs b/library/std/src/os/net/tests.rs
new file mode 100644
index 000000000..4704e3156
--- /dev/null
+++ b/library/std/src/os/net/tests.rs
@@ -0,0 +1,29 @@
+#[cfg(any(target_os = "android", target_os = "linux",))]
+#[test]
+fn quickack() {
+ use crate::{
+ net::{test::next_test_ip4, TcpListener, TcpStream},
+ os::net::tcp::TcpStreamExt,
+ };
+
+ macro_rules! t {
+ ($e:expr) => {
+ match $e {
+ Ok(t) => t,
+ Err(e) => panic!("received error for `{}`: {}", stringify!($e), e),
+ }
+ };
+ }
+
+ let addr = next_test_ip4();
+ let _listener = t!(TcpListener::bind(&addr));
+
+ let stream = t!(TcpStream::connect(&("localhost", addr.port())));
+
+ t!(stream.set_quickack(false));
+ assert_eq!(false, t!(stream.quickack()));
+ t!(stream.set_quickack(true));
+ assert_eq!(true, t!(stream.quickack()));
+ t!(stream.set_quickack(false));
+ assert_eq!(false, t!(stream.quickack()));
+}
diff --git a/library/std/src/os/unix/io/fd.rs b/library/std/src/os/unix/io/fd.rs
deleted file mode 100644
index d4cb69645..000000000
--- a/library/std/src/os/unix/io/fd.rs
+++ /dev/null
@@ -1,8 +0,0 @@
-//! Owned and borrowed file descriptors.
-
-// Tests for this module
-#[cfg(test)]
-mod tests;
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-pub use crate::os::fd::owned::*;
diff --git a/library/std/src/os/unix/io/mod.rs b/library/std/src/os/unix/io/mod.rs
index 3ab5606f8..25b5dbff1 100644
--- a/library/std/src/os/unix/io/mod.rs
+++ b/library/std/src/os/unix/io/mod.rs
@@ -77,10 +77,9 @@
#![stable(feature = "rust1", since = "1.0.0")]
-mod fd;
-mod raw;
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-pub use fd::*;
#[stable(feature = "rust1", since = "1.0.0")]
-pub use raw::*;
+pub use crate::os::fd::*;
+
+// Tests for this module
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/os/unix/io/raw.rs b/library/std/src/os/unix/io/raw.rs
deleted file mode 100644
index a4d2ba797..000000000
--- a/library/std/src/os/unix/io/raw.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-//! Unix-specific extensions to general I/O primitives.
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use crate::os::fd::raw::*;
diff --git a/library/std/src/os/unix/io/fd/tests.rs b/library/std/src/os/unix/io/tests.rs
index 84d2a7a1a..84d2a7a1a 100644
--- a/library/std/src/os/unix/io/fd/tests.rs
+++ b/library/std/src/os/unix/io/tests.rs
diff --git a/library/std/src/os/unix/mod.rs b/library/std/src/os/unix/mod.rs
index 411cc0925..f97fa0fb0 100644
--- a/library/std/src/os/unix/mod.rs
+++ b/library/std/src/os/unix/mod.rs
@@ -73,6 +73,8 @@ mod platform {
pub use crate::os::solaris::*;
#[cfg(target_os = "vxworks")]
pub use crate::os::vxworks::*;
+ #[cfg(target_os = "watchos")]
+ pub use crate::os::watchos::*;
}
pub mod ffi;
diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs
index 9aeae4b2c..094085e19 100644
--- a/library/std/src/os/unix/net/addr.rs
+++ b/library/std/src/os/unix/net/addr.rs
@@ -2,7 +2,7 @@ use crate::ffi::OsStr;
use crate::os::unix::ffi::OsStrExt;
use crate::path::Path;
use crate::sys::cvt;
-use crate::{ascii, fmt, io, mem, ptr};
+use crate::{fmt, io, mem, ptr};
// FIXME(#43348): Make libc adapt #[doc(cfg(...))] so we don't need these fake definitions here?
#[cfg(not(unix))]
@@ -64,18 +64,6 @@ enum AddressKind<'a> {
Abstract(&'a [u8]),
}
-struct AsciiEscaped<'a>(&'a [u8]);
-
-impl<'a> fmt::Display for AsciiEscaped<'a> {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(fmt, "\"")?;
- for byte in self.0.iter().cloned().flat_map(ascii::escape_default) {
- write!(fmt, "{}", byte as char)?;
- }
- write!(fmt, "\"")
- }
-}
-
/// An address associated with a Unix socket.
///
/// # Examples
@@ -329,7 +317,7 @@ impl SocketAddr {
crate::ptr::copy_nonoverlapping(
namespace.as_ptr(),
- addr.sun_path.as_mut_ptr().offset(1) as *mut u8,
+ addr.sun_path.as_mut_ptr().add(1) as *mut u8,
namespace.len(),
);
let len = (sun_path_offset(&addr) + 1 + namespace.len()) as libc::socklen_t;
@@ -343,7 +331,7 @@ impl fmt::Debug for SocketAddr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.address() {
AddressKind::Unnamed => write!(fmt, "(unnamed)"),
- AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)),
+ AddressKind::Abstract(name) => write!(fmt, "\"{}\" (abstract)", name.escape_ascii()),
AddressKind::Pathname(path) => write!(fmt, "{path:?} (pathname)"),
}
}
diff --git a/library/std/src/os/unix/net/datagram.rs b/library/std/src/os/unix/net/datagram.rs
index 8008acfd1..f758f88d0 100644
--- a/library/std/src/os/unix/net/datagram.rs
+++ b/library/std/src/os/unix/net/datagram.rs
@@ -838,6 +838,31 @@ impl UnixDatagram {
self.0.passcred()
}
+ /// Set the id of the socket for network filtering purpose
+ ///
+ #[cfg_attr(
+ any(target_os = "linux", target_os = "freebsd", target_os = "openbsd"),
+ doc = "```no_run"
+ )]
+ #[cfg_attr(
+ not(any(target_os = "linux", target_os = "freebsd", target_os = "openbsd")),
+ doc = "```ignore"
+ )]
+ /// #![feature(unix_set_mark)]
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.set_mark(32)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "linux", target_os = "freebsd", target_os = "openbsd",))]
+ #[unstable(feature = "unix_set_mark", issue = "96467")]
+ pub fn set_mark(&self, mark: u32) -> io::Result<()> {
+ self.0.set_mark(mark)
+ }
+
/// Returns the value of the `SO_ERROR` option.
///
/// # Examples
diff --git a/library/std/src/os/unix/net/listener.rs b/library/std/src/os/unix/net/listener.rs
index 7c0d53950..02090afc8 100644
--- a/library/std/src/os/unix/net/listener.rs
+++ b/library/std/src/os/unix/net/listener.rs
@@ -73,9 +73,11 @@ impl UnixListener {
unsafe {
let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
let (addr, len) = sockaddr_un(path.as_ref())?;
+ const backlog: libc::c_int =
+ if cfg!(any(target_os = "linux", target_os = "freebsd")) { -1 } else { 128 };
cvt(libc::bind(inner.as_inner().as_raw_fd(), &addr as *const _ as *const _, len as _))?;
- cvt(libc::listen(inner.as_inner().as_raw_fd(), 128))?;
+ cvt(libc::listen(inner.as_inner().as_raw_fd(), backlog))?;
Ok(UnixListener(inner))
}
@@ -109,12 +111,16 @@ impl UnixListener {
pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixListener> {
unsafe {
let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ #[cfg(target_os = "linux")]
+ const backlog: libc::c_int = -1;
+ #[cfg(not(target_os = "linux"))]
+ const backlog: libc::c_int = 128;
cvt(libc::bind(
inner.as_raw_fd(),
&socket_addr.addr as *const _ as *const _,
socket_addr.len as _,
))?;
- cvt(libc::listen(inner.as_raw_fd(), 128))?;
+ cvt(libc::listen(inner.as_raw_fd(), backlog))?;
Ok(UnixListener(inner))
}
}
diff --git a/library/std/src/os/unix/net/stream.rs b/library/std/src/os/unix/net/stream.rs
index cc3a88587..dff8f6e85 100644
--- a/library/std/src/os/unix/net/stream.rs
+++ b/library/std/src/os/unix/net/stream.rs
@@ -427,6 +427,31 @@ impl UnixStream {
self.0.passcred()
}
+ /// Set the id of the socket for network filtering purpose
+ ///
+ #[cfg_attr(
+ any(target_os = "linux", target_os = "freebsd", target_os = "openbsd"),
+ doc = "```no_run"
+ )]
+ #[cfg_attr(
+ not(any(target_os = "linux", target_os = "freebsd", target_os = "openbsd")),
+ doc = "```ignore"
+ )]
+ /// #![feature(unix_set_mark)]
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixStream::connect("/tmp/sock")?;
+ /// sock.set_mark(32)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "linux", target_os = "freebsd", target_os = "openbsd",))]
+ #[unstable(feature = "unix_set_mark", issue = "96467")]
+ pub fn set_mark(&self, mark: u32) -> io::Result<()> {
+ self.0.set_mark(mark)
+ }
+
/// Returns the value of the `SO_ERROR` option.
///
/// # Examples
diff --git a/library/std/src/os/wasi/io/mod.rs b/library/std/src/os/wasi/io/mod.rs
index 6c884e2ea..57bd842a5 100644
--- a/library/std/src/os/wasi/io/mod.rs
+++ b/library/std/src/os/wasi/io/mod.rs
@@ -1,12 +1,6 @@
//! WASI-specific extensions to general I/O primitives.
-#![deny(unsafe_op_in_unsafe_fn)]
-#![unstable(feature = "wasi_ext", issue = "71213")]
+#![stable(feature = "io_safety", since = "1.63.0")]
-mod fd;
-mod raw;
-
-#[unstable(feature = "wasi_ext", issue = "71213")]
-pub use fd::*;
-#[unstable(feature = "wasi_ext", issue = "71213")]
-pub use raw::*;
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub use crate::os::fd::*;
diff --git a/library/std/src/os/watchos/fs.rs b/library/std/src/os/watchos/fs.rs
new file mode 100644
index 000000000..a14fe35a7
--- /dev/null
+++ b/library/std/src/os/watchos/fs.rs
@@ -0,0 +1,142 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::watchos::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_flags(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gen(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_lspare(&self) -> u32;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_birthtime(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime as i64
+ }
+ fn st_birthtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_gen(&self) -> u32 {
+ self.as_inner().as_inner().st_gen as u32
+ }
+ fn st_flags(&self) -> u32 {
+ self.as_inner().as_inner().st_flags as u32
+ }
+ fn st_lspare(&self) -> u32 {
+ self.as_inner().as_inner().st_lspare as u32
+ }
+}
diff --git a/library/std/src/os/watchos/mod.rs b/library/std/src/os/watchos/mod.rs
new file mode 100644
index 000000000..cd6454ebb
--- /dev/null
+++ b/library/std/src/os/watchos/mod.rs
@@ -0,0 +1,6 @@
+//! watchOS-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/watchos/raw.rs b/library/std/src/os/watchos/raw.rs
new file mode 100644
index 000000000..630a533d9
--- /dev/null
+++ b/library/std/src/os/watchos/raw.rs
@@ -0,0 +1,83 @@
+//! watchOS-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = usize;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_lspare: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_qspare: [i64; 2],
+}
diff --git a/library/std/src/os/windows/io/handle.rs b/library/std/src/os/windows/io/handle.rs
index 16cc8fa27..1dfecc573 100644
--- a/library/std/src/os/windows/io/handle.rs
+++ b/library/std/src/os/windows/io/handle.rs
@@ -384,6 +384,23 @@ impl fmt::Debug for OwnedHandle {
}
}
+macro_rules! impl_is_terminal {
+ ($($t:ty),*$(,)?) => {$(
+ #[unstable(feature = "sealed", issue = "none")]
+ impl crate::sealed::Sealed for $t {}
+
+ #[unstable(feature = "is_terminal", issue = "98070")]
+ impl crate::io::IsTerminal for $t {
+ #[inline]
+ fn is_terminal(&self) -> bool {
+ crate::sys::io::is_terminal(self)
+ }
+ }
+ )*}
+}
+
+impl_is_terminal!(BorrowedHandle<'_>, OwnedHandle);
+
/// A trait to borrow the handle from an underlying object.
#[stable(feature = "io_safety", since = "1.63.0")]
pub trait AsHandle {
diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
index 45bc56efb..c4f022de0 100644
--- a/library/std/src/panic.rs
+++ b/library/std/src/panic.rs
@@ -295,23 +295,22 @@ pub fn get_backtrace_style() -> Option<BacktraceStyle> {
return Some(style);
}
- // Setting environment variables for Fuchsia components isn't a standard
- // or easily supported workflow. For now, display backtraces by default.
- let format = if cfg!(target_os = "fuchsia") {
- BacktraceStyle::Full
- } else {
- crate::env::var_os("RUST_BACKTRACE")
- .map(|x| {
- if &x == "0" {
- BacktraceStyle::Off
- } else if &x == "full" {
- BacktraceStyle::Full
- } else {
- BacktraceStyle::Short
- }
- })
- .unwrap_or(BacktraceStyle::Off)
- };
+ let format = crate::env::var_os("RUST_BACKTRACE")
+ .map(|x| {
+ if &x == "0" {
+ BacktraceStyle::Off
+ } else if &x == "full" {
+ BacktraceStyle::Full
+ } else {
+ BacktraceStyle::Short
+ }
+ })
+ .unwrap_or(if cfg!(target_os = "fuchsia") {
+ // Fuchsia components default to full backtrace.
+ BacktraceStyle::Full
+ } else {
+ BacktraceStyle::Off
+ });
set_backtrace_style(format);
Some(format)
}
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index 25c9201f2..d4976a469 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -18,9 +18,9 @@ use crate::intrinsics;
use crate::mem::{self, ManuallyDrop};
use crate::process;
use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sync::{PoisonError, RwLock};
use crate::sys::stdio::panic_output;
use crate::sys_common::backtrace;
-use crate::sys_common::rwlock::StaticRwLock;
use crate::sys_common::thread_info;
use crate::thread;
@@ -71,20 +71,29 @@ extern "C" fn __rust_foreign_exception() -> ! {
rtabort!("Rust cannot catch foreign exceptions");
}
-#[derive(Copy, Clone)]
enum Hook {
Default,
- Custom(*mut (dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send)),
+ Custom(Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>),
}
impl Hook {
- fn custom(f: impl Fn(&PanicInfo<'_>) + 'static + Sync + Send) -> Self {
- Self::Custom(Box::into_raw(Box::new(f)))
+ #[inline]
+ fn into_box(self) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
+ match self {
+ Hook::Default => Box::new(default_hook),
+ Hook::Custom(hook) => hook,
+ }
+ }
+}
+
+impl Default for Hook {
+ #[inline]
+ fn default() -> Hook {
+ Hook::Default
}
}
-static HOOK_LOCK: StaticRwLock = StaticRwLock::new();
-static mut HOOK: Hook = Hook::Default;
+static HOOK: RwLock<Hook> = RwLock::new(Hook::Default);
/// Registers a custom panic hook, replacing any that was previously registered.
///
@@ -125,24 +134,13 @@ pub fn set_hook(hook: Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>) {
panic!("cannot modify the panic hook from a panicking thread");
}
- // SAFETY:
- //
- // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`.
- // - The argument of `Box::from_raw` is always a valid pointer that was created using
- // `Box::into_raw`.
- unsafe {
- let guard = HOOK_LOCK.write();
- let old_hook = HOOK;
- HOOK = Hook::Custom(Box::into_raw(hook));
- drop(guard);
-
- if let Hook::Custom(ptr) = old_hook {
- #[allow(unused_must_use)]
- {
- Box::from_raw(ptr);
- }
- }
- }
+ let new = Hook::Custom(hook);
+ let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner);
+ let old = mem::replace(&mut *hook, new);
+ drop(hook);
+ // Only drop the old hook after releasing the lock to avoid deadlocking
+ // if its destructor panics.
+ drop(old);
}
/// Unregisters the current panic hook, returning it.
@@ -179,22 +177,11 @@ pub fn take_hook() -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
panic!("cannot modify the panic hook from a panicking thread");
}
- // SAFETY:
- //
- // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`.
- // - The argument of `Box::from_raw` is always a valid pointer that was created using
- // `Box::into_raw`.
- unsafe {
- let guard = HOOK_LOCK.write();
- let hook = HOOK;
- HOOK = Hook::Default;
- drop(guard);
+ let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner);
+ let old_hook = mem::take(&mut *hook);
+ drop(hook);
- match hook {
- Hook::Default => Box::new(default_hook),
- Hook::Custom(ptr) => Box::from_raw(ptr),
- }
- }
+ old_hook.into_box()
}
/// Atomic combination of [`take_hook`] and [`set_hook`]. Use this to replace the panic handler with
@@ -240,24 +227,9 @@ where
panic!("cannot modify the panic hook from a panicking thread");
}
- // SAFETY:
- //
- // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`.
- // - The argument of `Box::from_raw` is always a valid pointer that was created using
- // `Box::into_raw`.
- unsafe {
- let guard = HOOK_LOCK.write();
- let old_hook = HOOK;
- HOOK = Hook::Default;
-
- let prev = match old_hook {
- Hook::Default => Box::new(default_hook),
- Hook::Custom(ptr) => Box::from_raw(ptr),
- };
-
- HOOK = Hook::custom(move |info| hook_fn(&prev, info));
- drop(guard);
- }
+ let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner);
+ let prev = mem::take(&mut *hook).into_box();
+ *hook = Hook::Custom(Box::new(move |info| hook_fn(&prev, info)));
}
fn default_hook(info: &PanicInfo<'_>) {
@@ -328,7 +300,7 @@ pub mod panic_count {
thread_local! { static LOCAL_PANIC_COUNT: Cell<usize> = const { Cell::new(0) } }
// Sum of panic counts from all threads. The purpose of this is to have
- // a fast path in `is_zero` (which is used by `panicking`). In any particular
+ // a fast path in `count_is_zero` (which is used by `panicking`). In any particular
// thread, if that thread currently views `GLOBAL_PANIC_COUNT` as being zero,
// then `LOCAL_PANIC_COUNT` in that thread is zero. This invariant holds before
// and after increase and decrease, but not necessarily during their execution.
@@ -336,6 +308,14 @@ pub mod panic_count {
// Additionally, the top bit of GLOBAL_PANIC_COUNT (GLOBAL_ALWAYS_ABORT_FLAG)
// records whether panic::always_abort() has been called. This can only be
// set, never cleared.
+ // panic::always_abort() is usually called to prevent memory allocations done by
+ // the panic handling in the child created by `libc::fork`.
+ // Memory allocations performed in a child created with `libc::fork` are undefined
+ // behavior in most operating systems.
+ // Accessing LOCAL_PANIC_COUNT in a child created by `libc::fork` would lead to a memory
+ // allocation. Only GLOBAL_PANIC_COUNT can be accessed in this situation. This is
+ // sufficient because a child process will always have exactly one thread only.
+ // See also #85261 for details.
//
// This could be viewed as a struct containing a single bit and an n-1-bit
// value, but if we wrote it like that it would be more than a single word,
@@ -346,15 +326,26 @@ pub mod panic_count {
// panicking thread consumes at least 2 bytes of address space.
static GLOBAL_PANIC_COUNT: AtomicUsize = AtomicUsize::new(0);
+ // Return the state of the ALWAYS_ABORT_FLAG and number of panics.
+ //
+ // If ALWAYS_ABORT_FLAG is not set, the number is determined on a per-thread
+ // base (stored in LOCAL_PANIC_COUNT), i.e. it is the amount of recursive calls
+ // of the calling thread.
+ // If ALWAYS_ABORT_FLAG is set, the number equals the *global* number of panic
+ // calls. See above why LOCAL_PANIC_COUNT is not used.
pub fn increase() -> (bool, usize) {
- (
- GLOBAL_PANIC_COUNT.fetch_add(1, Ordering::Relaxed) & ALWAYS_ABORT_FLAG != 0,
+ let global_count = GLOBAL_PANIC_COUNT.fetch_add(1, Ordering::Relaxed);
+ let must_abort = global_count & ALWAYS_ABORT_FLAG != 0;
+ let panics = if must_abort {
+ global_count & !ALWAYS_ABORT_FLAG
+ } else {
LOCAL_PANIC_COUNT.with(|c| {
let next = c.get() + 1;
c.set(next);
next
- }),
- )
+ })
+ };
+ (must_abort, panics)
}
pub fn decrease() {
@@ -397,7 +388,7 @@ pub mod panic_count {
}
// Slow path is in a separate function to reduce the amount of code
- // inlined from `is_zero`.
+ // inlined from `count_is_zero`.
#[inline(never)]
#[cold]
fn is_zero_slow_path() -> bool {
@@ -682,27 +673,26 @@ fn rust_panic_with_hook(
crate::sys::abort_internal();
}
- unsafe {
- let mut info = PanicInfo::internal_constructor(message, location, can_unwind);
- let _guard = HOOK_LOCK.read();
- match HOOK {
- // Some platforms (like wasm) know that printing to stderr won't ever actually
- // print anything, and if that's the case we can skip the default
- // hook. Since string formatting happens lazily when calling `payload`
- // methods, this means we avoid formatting the string at all!
- // (The panic runtime might still call `payload.take_box()` though and trigger
- // formatting.)
- Hook::Default if panic_output().is_none() => {}
- Hook::Default => {
- info.set_payload(payload.get());
- default_hook(&info);
- }
- Hook::Custom(ptr) => {
- info.set_payload(payload.get());
- (*ptr)(&info);
- }
- };
- }
+ let mut info = PanicInfo::internal_constructor(message, location, can_unwind);
+ let hook = HOOK.read().unwrap_or_else(PoisonError::into_inner);
+ match *hook {
+ // Some platforms (like wasm) know that printing to stderr won't ever actually
+ // print anything, and if that's the case we can skip the default
+ // hook. Since string formatting happens lazily when calling `payload`
+ // methods, this means we avoid formatting the string at all!
+ // (The panic runtime might still call `payload.take_box()` though and trigger
+ // formatting.)
+ Hook::Default if panic_output().is_none() => {}
+ Hook::Default => {
+ info.set_payload(payload.get());
+ default_hook(&info);
+ }
+ Hook::Custom(ref hook) => {
+ info.set_payload(payload.get());
+ hook(&info);
+ }
+ };
+ drop(hook);
if panics > 1 || !can_unwind {
// If a thread panics while it's already unwinding then we
diff --git a/library/std/src/path.rs b/library/std/src/path.rs
index 5dfeb517a..9d6328162 100644
--- a/library/std/src/path.rs
+++ b/library/std/src/path.rs
@@ -2158,6 +2158,7 @@ impl Path {
/// assert_eq!(grand_parent.parent(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[doc(alias = "dirname")]
#[must_use]
pub fn parent(&self) -> Option<&Path> {
let mut comps = self.components();
@@ -2225,6 +2226,7 @@ impl Path {
/// assert_eq!(None, Path::new("/").file_name());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[doc(alias = "basename")]
#[must_use]
pub fn file_name(&self) -> Option<&OsStr> {
self.components().next_back().and_then(|p| match p {
@@ -2401,7 +2403,7 @@ impl Path {
self.file_name().map(split_file_at_dot).and_then(|(before, _after)| Some(before))
}
- /// Extracts the extension of [`self.file_name`], if possible.
+ /// Extracts the extension (without the leading dot) of [`self.file_name`], if possible.
///
/// The extension is:
///
diff --git a/library/std/src/path/tests.rs b/library/std/src/path/tests.rs
index 351cf6988..dd307022c 100644
--- a/library/std/src/path/tests.rs
+++ b/library/std/src/path/tests.rs
@@ -1768,6 +1768,7 @@ fn test_windows_absolute() {
}
#[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
fn bench_path_cmp_fast_path_buf_sort(b: &mut test::Bencher) {
let prefix = "my/home";
let mut paths: Vec<_> =
@@ -1781,6 +1782,7 @@ fn bench_path_cmp_fast_path_buf_sort(b: &mut test::Bencher) {
}
#[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
fn bench_path_cmp_fast_path_long(b: &mut test::Bencher) {
let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
let paths: Vec<_> =
@@ -1799,6 +1801,7 @@ fn bench_path_cmp_fast_path_long(b: &mut test::Bencher) {
}
#[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
fn bench_path_cmp_fast_path_short(b: &mut test::Bencher) {
let prefix = "my/home";
let paths: Vec<_> =
@@ -1817,6 +1820,7 @@ fn bench_path_cmp_fast_path_short(b: &mut test::Bencher) {
}
#[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
fn bench_path_hashset(b: &mut test::Bencher) {
let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
let paths: Vec<_> =
@@ -1835,6 +1839,7 @@ fn bench_path_hashset(b: &mut test::Bencher) {
}
#[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
fn bench_path_hashset_miss(b: &mut test::Bencher) {
let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
let paths: Vec<_> =
diff --git a/library/std/src/personality.rs b/library/std/src/personality.rs
new file mode 100644
index 000000000..63f0ad4f1
--- /dev/null
+++ b/library/std/src/personality.rs
@@ -0,0 +1,46 @@
+//! This module contains the implementation of the `eh_personality` lang item.
+//!
+//! The actual implementation is heavily dependent on the target since Rust
+//! tries to use the native stack unwinding mechanism whenever possible.
+//!
+//! This personality function is still required with `-C panic=abort` because
+//! it is used to catch foreign exceptions from `extern "C-unwind"` and turn
+//! them into aborts.
+//!
+//! Additionally, ARM EHABI uses the personality function when generating
+//! backtraces.
+
+mod dwarf;
+
+#[cfg(not(test))]
+cfg_if::cfg_if! {
+ if #[cfg(target_os = "emscripten")] {
+ mod emcc;
+ } else if #[cfg(target_env = "msvc")] {
+ // This is required by the compiler to exist (e.g., it's a lang item),
+ // but it's never actually called by the compiler because
+ // _CxxFrameHandler3 is the personality function that is always used.
+ // Hence this is just an aborting stub.
+ #[lang = "eh_personality"]
+ fn rust_eh_personality() {
+ core::intrinsics::abort()
+ }
+ } else if #[cfg(any(
+ all(target_family = "windows", target_env = "gnu"),
+ target_os = "psp",
+ target_os = "solid_asp3",
+ all(target_family = "unix", not(target_os = "espidf")),
+ all(target_vendor = "fortanix", target_env = "sgx"),
+ ))] {
+ mod gcc;
+ } else {
+ // Targets that don't support unwinding.
+ // - family=wasm
+ // - os=none ("bare metal" targets)
+ // - os=uefi
+ // - os=espidf
+ // - os=hermit
+ // - nvptx64-nvidia-cuda
+ // - arch=avr
+ }
+}
diff --git a/library/panic_unwind/src/dwarf/eh.rs b/library/std/src/personality/dwarf/eh.rs
index 7394feab8..27b50c13b 100644
--- a/library/panic_unwind/src/dwarf/eh.rs
+++ b/library/std/src/personality/dwarf/eh.rs
@@ -11,7 +11,7 @@
#![allow(non_upper_case_globals)]
#![allow(unused)]
-use crate::dwarf::DwarfReader;
+use super::DwarfReader;
use core::mem;
pub const DW_EH_PE_omit: u8 = 0xFF;
@@ -75,7 +75,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
let call_site_encoding = reader.read::<u8>();
let call_site_table_length = reader.read_uleb128();
- let action_table = reader.ptr.offset(call_site_table_length as isize);
+ let action_table = reader.ptr.add(call_site_table_length as usize);
let ip = context.ip;
if !USING_SJLJ_EXCEPTIONS {
@@ -98,9 +98,8 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
}
}
}
- // Ip is not present in the table. This should not happen... but it does: issue #35011.
- // So rather than returning EHAction::Terminate, we do this.
- Ok(EHAction::None)
+ // Ip is not present in the table. This indicates a nounwind call.
+ Ok(EHAction::Terminate)
} else {
// SjLj version:
// The "IP" is an index into the call-site table, with two exceptions:
diff --git a/library/panic_unwind/src/dwarf/mod.rs b/library/std/src/personality/dwarf/mod.rs
index 652fbe95a..652fbe95a 100644
--- a/library/panic_unwind/src/dwarf/mod.rs
+++ b/library/std/src/personality/dwarf/mod.rs
diff --git a/library/panic_unwind/src/dwarf/tests.rs b/library/std/src/personality/dwarf/tests.rs
index 1644f3708..1644f3708 100644
--- a/library/panic_unwind/src/dwarf/tests.rs
+++ b/library/std/src/personality/dwarf/tests.rs
diff --git a/library/std/src/personality/emcc.rs b/library/std/src/personality/emcc.rs
new file mode 100644
index 000000000..f942bdf18
--- /dev/null
+++ b/library/std/src/personality/emcc.rs
@@ -0,0 +1,20 @@
+//! On Emscripten Rust panics are wrapped in C++ exceptions, so we just forward
+//! to `__gxx_personality_v0` which is provided by Emscripten.
+
+use libc::c_int;
+use unwind as uw;
+
+// This is required by the compiler to exist (e.g., it's a lang item), but it's
+// never actually called by the compiler. Emscripten EH doesn't use a
+// personality function at all, it instead uses __cxa_find_matching_catch.
+// Wasm error handling would use __gxx_personality_wasm0.
+#[lang = "eh_personality"]
+unsafe extern "C" fn rust_eh_personality(
+ _version: c_int,
+ _actions: uw::_Unwind_Action,
+ _exception_class: uw::_Unwind_Exception_Class,
+ _exception_object: *mut uw::_Unwind_Exception,
+ _context: *mut uw::_Unwind_Context,
+) -> uw::_Unwind_Reason_Code {
+ core::intrinsics::abort()
+}
diff --git a/library/std/src/personality/gcc.rs b/library/std/src/personality/gcc.rs
new file mode 100644
index 000000000..7f0b0439c
--- /dev/null
+++ b/library/std/src/personality/gcc.rs
@@ -0,0 +1,279 @@
+//! Implementation of panics backed by libgcc/libunwind (in some form).
+//!
+//! For background on exception handling and stack unwinding please see
+//! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
+//! documents linked from it.
+//! These are also good reads:
+//! * <https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html>
+//! * <https://monoinfinito.wordpress.com/series/exception-handling-in-c/>
+//! * <https://www.airs.com/blog/index.php?s=exception+frames>
+//!
+//! ## A brief summary
+//!
+//! Exception handling happens in two phases: a search phase and a cleanup
+//! phase.
+//!
+//! In both phases the unwinder walks stack frames from top to bottom using
+//! information from the stack frame unwind sections of the current process's
+//! modules ("module" here refers to an OS module, i.e., an executable or a
+//! dynamic library).
+//!
+//! For each stack frame, it invokes the associated "personality routine", whose
+//! address is also stored in the unwind info section.
+//!
+//! In the search phase, the job of a personality routine is to examine
+//! exception object being thrown, and to decide whether it should be caught at
+//! that stack frame. Once the handler frame has been identified, cleanup phase
+//! begins.
+//!
+//! In the cleanup phase, the unwinder invokes each personality routine again.
+//! This time it decides which (if any) cleanup code needs to be run for
+//! the current stack frame. If so, the control is transferred to a special
+//! branch in the function body, the "landing pad", which invokes destructors,
+//! frees memory, etc. At the end of the landing pad, control is transferred
+//! back to the unwinder and unwinding resumes.
+//!
+//! Once stack has been unwound down to the handler frame level, unwinding stops
+//! and the last personality routine transfers control to the catch block.
+
+use super::dwarf::eh::{self, EHAction, EHContext};
+use libc::{c_int, uintptr_t};
+use unwind as uw;
+
+// Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister()
+// and TargetLowering::getExceptionSelectorRegister() for each architecture,
+// then mapped to DWARF register numbers via register definition tables
+// (typically <arch>RegisterInfo.td, search for "DwarfRegNum").
+// See also https://llvm.org/docs/WritingAnLLVMBackend.html#defining-a-register.
+
+#[cfg(target_arch = "x86")]
+const UNWIND_DATA_REG: (i32, i32) = (0, 2); // EAX, EDX
+
+#[cfg(target_arch = "x86_64")]
+const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX
+
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1 / X0, X1
+
+#[cfg(target_arch = "m68k")]
+const UNWIND_DATA_REG: (i32, i32) = (0, 1); // D0, D1
+
+#[cfg(any(target_arch = "mips", target_arch = "mips64"))]
+const UNWIND_DATA_REG: (i32, i32) = (4, 5); // A0, A1
+
+#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
+const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 / X3, X4
+
+#[cfg(target_arch = "s390x")]
+const UNWIND_DATA_REG: (i32, i32) = (6, 7); // R6, R7
+
+#[cfg(any(target_arch = "sparc", target_arch = "sparc64"))]
+const UNWIND_DATA_REG: (i32, i32) = (24, 25); // I0, I1
+
+#[cfg(target_arch = "hexagon")]
+const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1
+
+#[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))]
+const UNWIND_DATA_REG: (i32, i32) = (10, 11); // x10, x11
+
+// The following code is based on GCC's C and C++ personality routines. For reference, see:
+// https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc
+// https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c
+
+cfg_if::cfg_if! {
+ if #[cfg(all(target_arch = "arm", not(target_os = "ios"), not(target_os = "watchos"), not(target_os = "netbsd")))] {
+ // ARM EHABI personality routine.
+ // https://infocenter.arm.com/help/topic/com.arm.doc.ihi0038b/IHI0038B_ehabi.pdf
+ //
+ // iOS uses the default routine instead since it uses SjLj unwinding.
+ #[lang = "eh_personality"]
+ unsafe extern "C" fn rust_eh_personality(
+ state: uw::_Unwind_State,
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context,
+ ) -> uw::_Unwind_Reason_Code {
+ let state = state as c_int;
+ let action = state & uw::_US_ACTION_MASK as c_int;
+ let search_phase = if action == uw::_US_VIRTUAL_UNWIND_FRAME as c_int {
+ // Backtraces on ARM will call the personality routine with
+ // state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases
+ // we want to continue unwinding the stack, otherwise all our backtraces
+ // would end at __rust_try
+ if state & uw::_US_FORCE_UNWIND as c_int != 0 {
+ return continue_unwind(exception_object, context);
+ }
+ true
+ } else if action == uw::_US_UNWIND_FRAME_STARTING as c_int {
+ false
+ } else if action == uw::_US_UNWIND_FRAME_RESUME as c_int {
+ return continue_unwind(exception_object, context);
+ } else {
+ return uw::_URC_FAILURE;
+ };
+
+ // The DWARF unwinder assumes that _Unwind_Context holds things like the function
+ // and LSDA pointers, however ARM EHABI places them into the exception object.
+ // To preserve signatures of functions like _Unwind_GetLanguageSpecificData(), which
+ // take only the context pointer, GCC personality routines stash a pointer to
+ // exception_object in the context, using location reserved for ARM's
+ // "scratch register" (r12).
+ uw::_Unwind_SetGR(context, uw::UNWIND_POINTER_REG, exception_object as uw::_Unwind_Ptr);
+ // ...A more principled approach would be to provide the full definition of ARM's
+ // _Unwind_Context in our libunwind bindings and fetch the required data from there
+ // directly, bypassing DWARF compatibility functions.
+
+ let eh_action = match find_eh_action(context) {
+ Ok(action) => action,
+ Err(_) => return uw::_URC_FAILURE,
+ };
+ if search_phase {
+ match eh_action {
+ EHAction::None | EHAction::Cleanup(_) => {
+ return continue_unwind(exception_object, context);
+ }
+ EHAction::Catch(_) => {
+ // EHABI requires the personality routine to update the
+ // SP value in the barrier cache of the exception object.
+ (*exception_object).private[5] =
+ uw::_Unwind_GetGR(context, uw::UNWIND_SP_REG);
+ return uw::_URC_HANDLER_FOUND;
+ }
+ EHAction::Terminate => return uw::_URC_FAILURE,
+ }
+ } else {
+ match eh_action {
+ EHAction::None => return continue_unwind(exception_object, context),
+ EHAction::Cleanup(lpad) | EHAction::Catch(lpad) => {
+ uw::_Unwind_SetGR(
+ context,
+ UNWIND_DATA_REG.0,
+ exception_object as uintptr_t,
+ );
+ uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
+ uw::_Unwind_SetIP(context, lpad);
+ return uw::_URC_INSTALL_CONTEXT;
+ }
+ EHAction::Terminate => return uw::_URC_FAILURE,
+ }
+ }
+
+ // On ARM EHABI the personality routine is responsible for actually
+ // unwinding a single stack frame before returning (ARM EHABI Sec. 6.1).
+ unsafe fn continue_unwind(
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context,
+ ) -> uw::_Unwind_Reason_Code {
+ if __gnu_unwind_frame(exception_object, context) == uw::_URC_NO_REASON {
+ uw::_URC_CONTINUE_UNWIND
+ } else {
+ uw::_URC_FAILURE
+ }
+ }
+ // defined in libgcc
+ extern "C" {
+ fn __gnu_unwind_frame(
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context,
+ ) -> uw::_Unwind_Reason_Code;
+ }
+ }
+ } else {
+ // Default personality routine, which is used directly on most targets
+ // and indirectly on Windows x86_64 via SEH.
+ unsafe extern "C" fn rust_eh_personality_impl(
+ version: c_int,
+ actions: uw::_Unwind_Action,
+ _exception_class: uw::_Unwind_Exception_Class,
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context,
+ ) -> uw::_Unwind_Reason_Code {
+ if version != 1 {
+ return uw::_URC_FATAL_PHASE1_ERROR;
+ }
+ let eh_action = match find_eh_action(context) {
+ Ok(action) => action,
+ Err(_) => return uw::_URC_FATAL_PHASE1_ERROR,
+ };
+ if actions as i32 & uw::_UA_SEARCH_PHASE as i32 != 0 {
+ match eh_action {
+ EHAction::None | EHAction::Cleanup(_) => uw::_URC_CONTINUE_UNWIND,
+ EHAction::Catch(_) => uw::_URC_HANDLER_FOUND,
+ EHAction::Terminate => uw::_URC_FATAL_PHASE1_ERROR,
+ }
+ } else {
+ match eh_action {
+ EHAction::None => uw::_URC_CONTINUE_UNWIND,
+ EHAction::Cleanup(lpad) | EHAction::Catch(lpad) => {
+ uw::_Unwind_SetGR(
+ context,
+ UNWIND_DATA_REG.0,
+ exception_object as uintptr_t,
+ );
+ uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
+ uw::_Unwind_SetIP(context, lpad);
+ uw::_URC_INSTALL_CONTEXT
+ }
+ EHAction::Terminate => uw::_URC_FATAL_PHASE2_ERROR,
+ }
+ }
+ }
+
+ cfg_if::cfg_if! {
+ if #[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))] {
+ // On x86_64 MinGW targets, the unwinding mechanism is SEH however the unwind
+ // handler data (aka LSDA) uses GCC-compatible encoding.
+ #[lang = "eh_personality"]
+ #[allow(nonstandard_style)]
+ unsafe extern "C" fn rust_eh_personality(
+ exceptionRecord: *mut uw::EXCEPTION_RECORD,
+ establisherFrame: uw::LPVOID,
+ contextRecord: *mut uw::CONTEXT,
+ dispatcherContext: *mut uw::DISPATCHER_CONTEXT,
+ ) -> uw::EXCEPTION_DISPOSITION {
+ uw::_GCC_specific_handler(
+ exceptionRecord,
+ establisherFrame,
+ contextRecord,
+ dispatcherContext,
+ rust_eh_personality_impl,
+ )
+ }
+ } else {
+ // The personality routine for most of our targets.
+ #[lang = "eh_personality"]
+ unsafe extern "C" fn rust_eh_personality(
+ version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context,
+ ) -> uw::_Unwind_Reason_Code {
+ rust_eh_personality_impl(
+ version,
+ actions,
+ exception_class,
+ exception_object,
+ context,
+ )
+ }
+ }
+ }
+ }
+}
+
+unsafe fn find_eh_action(context: *mut uw::_Unwind_Context) -> Result<EHAction, ()> {
+ let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8;
+ let mut ip_before_instr: c_int = 0;
+ let ip = uw::_Unwind_GetIPInfo(context, &mut ip_before_instr);
+ let eh_context = EHContext {
+ // The return address points 1 byte past the call instruction,
+ // which could be in the next IP range in LSDA range table.
+ //
+ // `ip = -1` has special meaning, so use wrapping sub to allow for that
+ ip: if ip_before_instr != 0 { ip } else { ip.wrapping_sub(1) },
+ func_start: uw::_Unwind_GetRegionStart(context),
+ get_text_start: &|| uw::_Unwind_GetTextRelBase(context),
+ get_data_start: &|| uw::_Unwind_GetDataRelBase(context),
+ };
+ eh::find_eh_action(lsda, &eh_context)
+}
diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs
index b8e546164..331714a99 100644
--- a/library/std/src/primitive_docs.rs
+++ b/library/std/src/primitive_docs.rs
@@ -611,7 +611,19 @@ mod prim_pointer {}
///
/// Arrays coerce to [slices (`[T]`)][slice], so a slice method may be called on
/// an array. Indeed, this provides most of the API for working with arrays.
-/// Slices have a dynamic size and do not coerce to arrays.
+///
+/// Slices have a dynamic size and do not coerce to arrays. Instead, use
+/// `slice.try_into().unwrap()` or `<ArrayType>::try_from(slice).unwrap()`.
+///
+/// Array's `try_from(slice)` implementations (and the corresponding `slice.try_into()`
+/// array implementations) succeed if the input slice length is the same as the result
+/// array length. They optimize especially well when the optimizer can easily determine
+/// the slice length, e.g. `<[u8; 4]>::try_from(&slice[4..8]).unwrap()`. Array implements
+/// [TryFrom](crate::convert::TryFrom) returning:
+///
+/// - `[T; N]` copies from the slice's elements
+/// - `&[T; N]` references the original slice's elements
+/// - `&mut [T; N]` references the original slice's elements
///
/// You can move elements out of an array with a [slice pattern]. If you want
/// one element, see [`mem::replace`].
@@ -640,6 +652,15 @@ mod prim_pointer {}
/// for x in &array { }
/// ```
///
+/// You can use `<ArrayType>::try_from(slice)` or `slice.try_into()` to get an array from
+/// a slice:
+///
+/// ```
+/// let bytes: [u8; 3] = [1, 0, 2];
+/// assert_eq!(1, u16::from_le_bytes(<[u8; 2]>::try_from(&bytes[0..2]).unwrap()));
+/// assert_eq!(512, u16::from_le_bytes(bytes[1..3].try_into().unwrap()));
+/// ```
+///
/// You can use a [slice pattern] to move elements out of an array:
///
/// ```
@@ -801,11 +822,53 @@ mod prim_array {}
/// assert_eq!(2 * pointer_size, std::mem::size_of::<Box<[u8]>>());
/// assert_eq!(2 * pointer_size, std::mem::size_of::<Rc<[u8]>>());
/// ```
+///
+/// ## Trait Implementations
+///
+/// Some traits are implemented for slices if the element type implements
+/// that trait. This includes [`Eq`], [`Hash`] and [`Ord`].
+///
+/// ## Iteration
+///
+/// The slices implement `IntoIterator`. The iterator yields references to the
+/// slice elements.
+///
+/// ```
+/// let numbers: &[i32] = &[0, 1, 2];
+/// for n in numbers {
+/// println!("{n} is a number!");
+/// }
+/// ```
+///
+/// The mutable slice yields mutable references to the elements:
+///
+/// ```
+/// let mut scores: &mut [i32] = &mut [7, 8, 9];
+/// for score in scores {
+/// *score += 1;
+/// }
+/// ```
+///
+/// This iterator yields mutable references to the slice's elements, so while
+/// the element type of the slice is `i32`, the element type of the iterator is
+/// `&mut i32`.
+///
+/// * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
+/// iterators.
+/// * Further methods that return iterators are [`.split`], [`.splitn`],
+/// [`.chunks`], [`.windows`] and more.
+///
+/// [`Hash`]: core::hash::Hash
+/// [`.iter`]: slice::iter
+/// [`.iter_mut`]: slice::iter_mut
+/// [`.split`]: slice::split
+/// [`.splitn`]: slice::splitn
+/// [`.chunks`]: slice::chunks
+/// [`.windows`]: slice::windows
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_slice {}
#[doc(primitive = "str")]
-//
/// String slices.
///
/// *[See also the `std::str` module](crate::str).*
@@ -816,19 +879,22 @@ mod prim_slice {}
///
/// String slices are always valid UTF-8.
///
-/// # Examples
+/// # Basic Usage
///
/// String literals are string slices:
///
/// ```
-/// let hello = "Hello, world!";
-///
-/// // with an explicit type annotation
-/// let hello: &'static str = "Hello, world!";
+/// let hello_world = "Hello, World!";
/// ```
///
-/// They are `'static` because they're stored directly in the final binary, and
-/// so will be valid for the `'static` duration.
+/// Here we have declared a string slice initialized with a string literal.
+/// String literals have a static lifetime, which means the string `hello_world`
+/// is guaranteed to be valid for the duration of the entire program.
+/// We can explicitly specify `hello_world`'s lifetime as well:
+///
+/// ```
+/// let hello_world: &'static str = "Hello, world!";
+/// ```
///
/// # Representation
///
@@ -996,7 +1062,7 @@ impl<T> (T,) {}
// Fake impl that's only really used for docs.
#[cfg(doc)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+#[doc(fake_variadic)]
/// This trait is implemented on arbitrary-length tuples.
impl<T: Clone> Clone for (T,) {
fn clone(&self) -> Self {
@@ -1007,7 +1073,7 @@ impl<T: Clone> Clone for (T,) {
// Fake impl that's only really used for docs.
#[cfg(doc)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+#[doc(fake_variadic)]
/// This trait is implemented on arbitrary-length tuples.
impl<T: Copy> Copy for (T,) {
// empty
@@ -1178,7 +1244,7 @@ mod prim_usize {}
#[doc(alias = "&")]
#[doc(alias = "&mut")]
//
-/// References, both shared and mutable.
+/// References, `&T` and `&mut T`.
///
/// A reference represents a borrow of some owned value. You can get one by using the `&` or `&mut`
/// operators on a value, or by using a [`ref`](../std/keyword.ref.html) or
@@ -1484,13 +1550,12 @@ mod prim_fn {}
// Required to make auto trait impls render.
// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
#[doc(hidden)]
-#[cfg(not(bootstrap))]
impl<Ret, T> fn(T) -> Ret {}
// Fake impl that's only really used for docs.
#[cfg(doc)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+#[doc(fake_variadic)]
/// This trait is implemented on function pointers with any number of arguments.
impl<Ret, T> Clone for fn(T) -> Ret {
fn clone(&self) -> Self {
@@ -1501,7 +1566,7 @@ impl<Ret, T> Clone for fn(T) -> Ret {
// Fake impl that's only really used for docs.
#[cfg(doc)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+#[doc(fake_variadic)]
/// This trait is implemented on function pointers with any number of arguments.
impl<Ret, T> Copy for fn(T) -> Ret {
// empty
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
index d6cba7e75..400d25beb 100644
--- a/library/std/src/process.rs
+++ b/library/std/src/process.rs
@@ -169,15 +169,15 @@ use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
pub struct Child {
pub(crate) handle: imp::Process,
- /// The handle for writing to the child's standard input (stdin), if it has
- /// been captured. To avoid partially moving
- /// the `child` and thus blocking yourself from calling
- /// functions on `child` while using `stdin`,
- /// you might find it helpful:
+ /// The handle for writing to the child's standard input (stdin), if it
+ /// has been captured. You might find it helpful to do
///
/// ```compile_fail,E0425
/// let stdin = child.stdin.take().unwrap();
/// ```
+ ///
+ /// to avoid partially moving the `child` and thus blocking yourself from calling
+ /// functions on `child` while using `stdin`.
#[stable(feature = "process", since = "1.0.0")]
pub stdin: Option<ChildStdin>,
@@ -1629,7 +1629,7 @@ impl ExitStatusError {
///
/// This is exactly like [`code()`](Self::code), except that it returns a `NonZeroI32`.
///
- /// Plain `code`, returning a plain integer, is provided because is is often more convenient.
+ /// Plain `code`, returning a plain integer, is provided because it is often more convenient.
/// The returned value from `code()` is indeed also nonzero; use `code_nonzero()` when you want
/// a type-level guarantee of nonzeroness.
///
@@ -2154,8 +2154,16 @@ pub fn id() -> u32 {
#[cfg_attr(not(test), lang = "termination")]
#[stable(feature = "termination_trait_lib", since = "1.61.0")]
#[rustc_on_unimplemented(
- message = "`main` has invalid return type `{Self}`",
- label = "`main` can only return types that implement `{Termination}`"
+ on(
+ all(not(bootstrap), cause = "MainFunctionType"),
+ message = "`main` has invalid return type `{Self}`",
+ label = "`main` can only return types that implement `{Termination}`"
+ ),
+ on(
+ bootstrap,
+ message = "`main` has invalid return type `{Self}`",
+ label = "`main` can only return types that implement `{Termination}`"
+ )
)]
pub trait Termination {
/// Is called to get the representation of the value as status code.
@@ -2200,9 +2208,7 @@ impl<T: Termination, E: fmt::Debug> Termination for Result<T, E> {
match self {
Ok(val) => val.report(),
Err(err) => {
- // Ignore error if the write fails, for example because stderr is
- // already closed. There is not much point panicking at this point.
- let _ = writeln!(io::stderr(), "Error: {err:?}");
+ io::attempt_print_to_stderr(format_args_nl!("Error: {err:?}"));
ExitCode::FAILURE
}
}
diff --git a/library/std/src/rt.rs b/library/std/src/rt.rs
index 663537a05..9c2f0c1dd 100644
--- a/library/std/src/rt.rs
+++ b/library/std/src/rt.rs
@@ -72,10 +72,29 @@ macro_rules! rtunwrap {
// Runs before `main`.
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
+//
+// # The `sigpipe` parameter
+//
+// Since 2014, the Rust runtime on Unix has set the `SIGPIPE` handler to
+// `SIG_IGN`. Applications have good reasons to want a different behavior
+// though, so there is a `#[unix_sigpipe = "..."]` attribute on `fn main()` that
+// can be used to select how `SIGPIPE` shall be setup (if changed at all) before
+// `fn main()` is called. See <https://github.com/rust-lang/rust/issues/97889>
+// for more info.
+//
+// The `sigpipe` parameter to this function gets its value via the code that
+// rustc generates to invoke `fn lang_start()`. The reason we have `sigpipe` for
+// all platforms and not only Unix, is because std is not allowed to have `cfg`
+// directives as this high level. See the module docs in
+// `src/tools/tidy/src/pal.rs` for more info. On all other platforms, `sigpipe`
+// has a value, but its value is ignored.
+//
+// Even though it is an `u8`, it only ever has 4 values. These are documented in
+// `compiler/rustc_session/src/config/sigpipe.rs`.
#[cfg_attr(test, allow(dead_code))]
-unsafe fn init(argc: isize, argv: *const *const u8) {
+unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
unsafe {
- sys::init(argc, argv);
+ sys::init(argc, argv, sigpipe);
let main_guard = sys::thread::guard::init();
// Next, set up the current Thread with the guard information we just
@@ -107,6 +126,7 @@ fn lang_start_internal(
main: &(dyn Fn() -> i32 + Sync + crate::panic::RefUnwindSafe),
argc: isize,
argv: *const *const u8,
+ sigpipe: u8,
) -> Result<isize, !> {
use crate::{mem, panic};
let rt_abort = move |e| {
@@ -124,7 +144,7 @@ fn lang_start_internal(
// prevent libstd from accidentally introducing a panic to these functions. Another is from
// user code from `main` or, more nefariously, as described in e.g. issue #86030.
// SAFETY: Only called once during runtime initialization.
- panic::catch_unwind(move || unsafe { init(argc, argv) }).map_err(rt_abort)?;
+ panic::catch_unwind(move || unsafe { init(argc, argv, sigpipe) }).map_err(rt_abort)?;
let ret_code = panic::catch_unwind(move || panic::catch_unwind(main).unwrap_or(101) as isize)
.map_err(move |e| {
mem::forget(e);
@@ -140,11 +160,13 @@ fn lang_start<T: crate::process::Termination + 'static>(
main: fn() -> T,
argc: isize,
argv: *const *const u8,
+ sigpipe: u8,
) -> isize {
let Ok(v) = lang_start_internal(
&move || crate::sys_common::backtrace::__rust_begin_short_backtrace(main).report().to_i32(),
argc,
argv,
+ sigpipe,
);
v
}
diff --git a/library/std/src/sync/mpsc/mpsc_queue/tests.rs b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
index 9f4f31ed0..34b2a9a98 100644
--- a/library/std/src/sync/mpsc/mpsc_queue/tests.rs
+++ b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
@@ -13,7 +13,7 @@ fn test_full() {
#[test]
fn test() {
let nthreads = 8;
- let nmsgs = 1000;
+ let nmsgs = if cfg!(miri) { 100 } else { 1000 };
let q = Queue::new();
match q.pop() {
Empty => {}
diff --git a/library/std/src/sync/mpsc/spsc_queue/tests.rs b/library/std/src/sync/mpsc/spsc_queue/tests.rs
index 467ef3dbd..eb6d5c2cf 100644
--- a/library/std/src/sync/mpsc/spsc_queue/tests.rs
+++ b/library/std/src/sync/mpsc/spsc_queue/tests.rs
@@ -77,12 +77,13 @@ fn stress() {
}
unsafe fn stress_bound(bound: usize) {
+ let count = if cfg!(miri) { 1000 } else { 100000 };
let q = Arc::new(Queue::with_additions(bound, (), ()));
let (tx, rx) = channel();
let q2 = q.clone();
let _t = thread::spawn(move || {
- for _ in 0..100000 {
+ for _ in 0..count {
loop {
match q2.pop() {
Some(1) => break,
@@ -93,7 +94,7 @@ fn stress() {
}
tx.send(()).unwrap();
});
- for _ in 0..100000 {
+ for _ in 0..count {
q.push(1);
}
rx.recv().unwrap();
diff --git a/library/std/src/sync/mpsc/stream.rs b/library/std/src/sync/mpsc/stream.rs
index 4c3812c79..4592e9141 100644
--- a/library/std/src/sync/mpsc/stream.rs
+++ b/library/std/src/sync/mpsc/stream.rs
@@ -114,7 +114,7 @@ impl<T> Packet<T> {
match self.queue.producer_addition().cnt.fetch_add(1, Ordering::SeqCst) {
// As described in the mod's doc comment, -1 == wakeup
-1 => UpWoke(self.take_to_wake()),
- // As as described before, SPSC queues must be >= -2
+ // As described before, SPSC queues must be >= -2
-2 => UpSuccess,
// Be sure to preserve the disconnected state, and the return value
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs
index e58649bab..63c794369 100644
--- a/library/std/src/sync/mpsc/sync_tests.rs
+++ b/library/std/src/sync/mpsc/sync_tests.rs
@@ -113,23 +113,25 @@ fn chan_gone_concurrent() {
#[test]
fn stress() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = sync_channel::<i32>(0);
thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
- for _ in 0..10000 {
+ for _ in 0..count {
assert_eq!(rx.recv().unwrap(), 1);
}
}
#[test]
fn stress_recv_timeout_two_threads() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = sync_channel::<i32>(0);
thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
@@ -146,12 +148,12 @@ fn stress_recv_timeout_two_threads() {
}
}
- assert_eq!(recv_count, 10000);
+ assert_eq!(recv_count, count);
}
#[test]
fn stress_recv_timeout_shared() {
- const AMT: u32 = 1000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 1000 };
const NTHREADS: u32 = 8;
let (tx, rx) = sync_channel::<i32>(0);
let (dtx, drx) = sync_channel::<()>(0);
@@ -191,7 +193,7 @@ fn stress_recv_timeout_shared() {
#[test]
fn stress_shared() {
- const AMT: u32 = 1000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 1000 };
const NTHREADS: u32 = 8;
let (tx, rx) = sync_channel::<i32>(0);
let (dtx, drx) = sync_channel::<()>(0);
@@ -438,12 +440,13 @@ fn stream_send_recv_stress() {
#[test]
fn recv_a_lot() {
+ let count = if cfg!(miri) { 1000 } else { 10000 };
// Regression test that we don't run out of stack in scheduler context
- let (tx, rx) = sync_channel(10000);
- for _ in 0..10000 {
+ let (tx, rx) = sync_channel(count);
+ for _ in 0..count {
tx.send(()).unwrap();
}
- for _ in 0..10000 {
+ for _ in 0..count {
rx.recv().unwrap();
}
}
diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs
index 4deb3e596..f6d0796f6 100644
--- a/library/std/src/sync/mpsc/tests.rs
+++ b/library/std/src/sync/mpsc/tests.rs
@@ -120,13 +120,14 @@ fn chan_gone_concurrent() {
#[test]
fn stress() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = channel::<i32>();
let t = thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
- for _ in 0..10000 {
+ for _ in 0..count {
assert_eq!(rx.recv().unwrap(), 1);
}
t.join().ok().expect("thread panicked");
@@ -134,7 +135,7 @@ fn stress() {
#[test]
fn stress_shared() {
- const AMT: u32 = 10000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 10000 };
const NTHREADS: u32 = 8;
let (tx, rx) = channel::<i32>();
@@ -504,12 +505,13 @@ fn very_long_recv_timeout_wont_panic() {
#[test]
fn recv_a_lot() {
+ let count = if cfg!(miri) { 1000 } else { 10000 };
// Regression test that we don't run out of stack in scheduler context
let (tx, rx) = channel();
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(()).unwrap();
}
- for _ in 0..10000 {
+ for _ in 0..count {
rx.recv().unwrap();
}
}
diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs
index e0d13cd64..de851c8fb 100644
--- a/library/std/src/sync/mutex.rs
+++ b/library/std/src/sync/mutex.rs
@@ -192,6 +192,7 @@ unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
and cause Futures to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "MutexGuard")]
pub struct MutexGuard<'a, T: ?Sized + 'a> {
lock: &'a Mutex<T>,
poison: poison::Guard,
diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs
index a7feea588..0f25417d6 100644
--- a/library/std/src/sync/once.rs
+++ b/library/std/src/sync/once.rs
@@ -3,99 +3,12 @@
//! This primitive is meant to be used to run one-time initialization. An
//! example use case would be for initializing an FFI library.
-// A "once" is a relatively simple primitive, and it's also typically provided
-// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS
-// primitives, however, tend to have surprising restrictions, such as the Unix
-// one doesn't allow an argument to be passed to the function.
-//
-// As a result, we end up implementing it ourselves in the standard library.
-// This also gives us the opportunity to optimize the implementation a bit which
-// should help the fast path on call sites. Consequently, let's explain how this
-// primitive works now!
-//
-// So to recap, the guarantees of a Once are that it will call the
-// initialization closure at most once, and it will never return until the one
-// that's running has finished running. This means that we need some form of
-// blocking here while the custom callback is running at the very least.
-// Additionally, we add on the restriction of **poisoning**. Whenever an
-// initialization closure panics, the Once enters a "poisoned" state which means
-// that all future calls will immediately panic as well.
-//
-// So to implement this, one might first reach for a `Mutex`, but those cannot
-// be put into a `static`. It also gets a lot harder with poisoning to figure
-// out when the mutex needs to be deallocated because it's not after the closure
-// finishes, but after the first successful closure finishes.
-//
-// All in all, this is instead implemented with atomics and lock-free
-// operations! Whee! Each `Once` has one word of atomic state, and this state is
-// CAS'd on to determine what to do. There are four possible state of a `Once`:
-//
-// * Incomplete - no initialization has run yet, and no thread is currently
-// using the Once.
-// * Poisoned - some thread has previously attempted to initialize the Once, but
-// it panicked, so the Once is now poisoned. There are no other
-// threads currently accessing this Once.
-// * Running - some thread is currently attempting to run initialization. It may
-// succeed, so all future threads need to wait for it to finish.
-// Note that this state is accompanied with a payload, described
-// below.
-// * Complete - initialization has completed and all future calls should finish
-// immediately.
-//
-// With 4 states we need 2 bits to encode this, and we use the remaining bits
-// in the word we have allocated as a queue of threads waiting for the thread
-// responsible for entering the RUNNING state. This queue is just a linked list
-// of Waiter nodes which is monotonically increasing in size. Each node is
-// allocated on the stack, and whenever the running closure finishes it will
-// consume the entire queue and notify all waiters they should try again.
-//
-// You'll find a few more details in the implementation, but that's the gist of
-// it!
-//
-// Atomic orderings:
-// When running `Once` we deal with multiple atomics:
-// `Once.state_and_queue` and an unknown number of `Waiter.signaled`.
-// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the
-// result of the `Once`, and (3) for synchronizing `Waiter` nodes.
-// - At the end of the `call_inner` function we have to make sure the result
-// of the `Once` is acquired. So every load which can be the only one to
-// load COMPLETED must have at least Acquire ordering, which means all
-// three of them.
-// - `WaiterQueue::Drop` is the only place that may store COMPLETED, and
-// must do so with Release ordering to make the result available.
-// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and
-// needs to make the nodes available with Release ordering. The load in
-// its `compare_exchange` can be Relaxed because it only has to compare
-// the atomic, not to read other data.
-// - `WaiterQueue::Drop` must see the `Waiter` nodes, so it must load
-// `state_and_queue` with Acquire ordering.
-// - There is just one store where `state_and_queue` is used only as a
-// state flag, without having to synchronize data: switching the state
-// from INCOMPLETE to RUNNING in `call_inner`. This store can be Relaxed,
-// but the read has to be Acquire because of the requirements mentioned
-// above.
-// * `Waiter.signaled` is both used as a flag, and to protect a field with
-// interior mutability in `Waiter`. `Waiter.thread` is changed in
-// `WaiterQueue::Drop` which then sets `signaled` with Release ordering.
-// After `wait` loads `signaled` with Acquire and sees it is true, it needs to
-// see the changes to drop the `Waiter` struct correctly.
-// * There is one place where the two atomics `Once.state_and_queue` and
-// `Waiter.signaled` come together, and might be reordered by the compiler or
-// processor. Because both use Acquire ordering such a reordering is not
-// allowed, so no need for SeqCst.
-
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
-use crate::cell::Cell;
use crate::fmt;
-use crate::marker;
use crate::panic::{RefUnwindSafe, UnwindSafe};
-use crate::ptr;
-use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
-use crate::thread::{self, Thread};
-
-type Masked = ();
+use crate::sys_common::once as sys;
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
@@ -114,19 +27,9 @@ type Masked = ();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Once {
- // `state_and_queue` is actually a pointer to a `Waiter` with extra state
- // bits, so we add the `PhantomData` appropriately.
- state_and_queue: AtomicPtr<Masked>,
- _marker: marker::PhantomData<*const Waiter>,
+ inner: sys::Once,
}
-// The `PhantomData` of a raw pointer removes these two auto traits, but we
-// enforce both below in the implementation so this should be safe to add.
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl Sync for Once {}
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl Send for Once {}
-
#[stable(feature = "sync_once_unwind_safe", since = "1.59.0")]
impl UnwindSafe for Once {}
@@ -136,10 +39,8 @@ impl RefUnwindSafe for Once {}
/// State yielded to [`Once::call_once_force()`]’s closure parameter. The state
/// can be used to query the poison status of the [`Once`].
#[stable(feature = "once_poison", since = "1.51.0")]
-#[derive(Debug)]
pub struct OnceState {
- poisoned: bool,
- set_state_on_drop_to: Cell<*mut Masked>,
+ pub(crate) inner: sys::OnceState,
}
/// Initialization value for static [`Once`] values.
@@ -159,38 +60,6 @@ pub struct OnceState {
)]
pub const ONCE_INIT: Once = Once::new();
-// Four states that a Once can be in, encoded into the lower bits of
-// `state_and_queue` in the Once structure.
-const INCOMPLETE: usize = 0x0;
-const POISONED: usize = 0x1;
-const RUNNING: usize = 0x2;
-const COMPLETE: usize = 0x3;
-
-// Mask to learn about the state. All other bits are the queue of waiters if
-// this is in the RUNNING state.
-const STATE_MASK: usize = 0x3;
-
-// Representation of a node in the linked list of waiters, used while in the
-// RUNNING state.
-// Note: `Waiter` can't hold a mutable pointer to the next thread, because then
-// `wait` would both hand out a mutable reference to its `Waiter` node, and keep
-// a shared reference to check `signaled`. Instead we hold shared references and
-// use interior mutability.
-#[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
-struct Waiter {
- thread: Cell<Option<Thread>>,
- signaled: AtomicBool,
- next: *const Waiter,
-}
-
-// Head of a linked list of waiters.
-// Every node is a struct on the stack of a waiting thread.
-// Will wake up the waiters when it gets dropped, i.e. also on panic.
-struct WaiterQueue<'a> {
- state_and_queue: &'a AtomicPtr<Masked>,
- set_state_on_drop_to: *mut Masked,
-}
-
impl Once {
/// Creates a new `Once` value.
#[inline]
@@ -198,10 +67,7 @@ impl Once {
#[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
#[must_use]
pub const fn new() -> Once {
- Once {
- state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)),
- _marker: marker::PhantomData,
- }
+ Once { inner: sys::Once::new() }
}
/// Performs an initialization routine once and only once. The given closure
@@ -261,6 +127,7 @@ impl Once {
/// This is similar to [poisoning with mutexes][poison].
///
/// [poison]: struct.Mutex.html#poisoning
+ #[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
pub fn call_once<F>(&self, f: F)
@@ -268,12 +135,12 @@ impl Once {
F: FnOnce(),
{
// Fast path check
- if self.is_completed() {
+ if self.inner.is_completed() {
return;
}
let mut f = Some(f);
- self.call_inner(false, &mut |_| f.take().unwrap()());
+ self.inner.call(false, &mut |_| f.take().unwrap()());
}
/// Performs the same function as [`call_once()`] except ignores poisoning.
@@ -320,18 +187,19 @@ impl Once {
/// // once any success happens, we stop propagating the poison
/// INIT.call_once(|| {});
/// ```
+ #[inline]
#[stable(feature = "once_poison", since = "1.51.0")]
pub fn call_once_force<F>(&self, f: F)
where
F: FnOnce(&OnceState),
{
// Fast path check
- if self.is_completed() {
+ if self.inner.is_completed() {
return;
}
let mut f = Some(f);
- self.call_inner(true, &mut |p| f.take().unwrap()(p));
+ self.inner.call(true, &mut |p| f.take().unwrap()(p));
}
/// Returns `true` if some [`call_once()`] call has completed
@@ -378,119 +246,7 @@ impl Once {
#[stable(feature = "once_is_completed", since = "1.43.0")]
#[inline]
pub fn is_completed(&self) -> bool {
- // An `Acquire` load is enough because that makes all the initialization
- // operations visible to us, and, this being a fast path, weaker
- // ordering helps with performance. This `Acquire` synchronizes with
- // `Release` operations on the slow path.
- self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE
- }
-
- // This is a non-generic function to reduce the monomorphization cost of
- // using `call_once` (this isn't exactly a trivial or small implementation).
- //
- // Additionally, this is tagged with `#[cold]` as it should indeed be cold
- // and it helps let LLVM know that calls to this function should be off the
- // fast path. Essentially, this should help generate more straight line code
- // in LLVM.
- //
- // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
- // currently no way to take an `FnOnce` and call it via virtual dispatch
- // without some allocation overhead.
- #[cold]
- #[track_caller]
- fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) {
- let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire);
- loop {
- match state_and_queue.addr() {
- COMPLETE => break,
- POISONED if !ignore_poisoning => {
- // Panic to propagate the poison.
- panic!("Once instance has previously been poisoned");
- }
- POISONED | INCOMPLETE => {
- // Try to register this thread as the one RUNNING.
- let exchange_result = self.state_and_queue.compare_exchange(
- state_and_queue,
- ptr::invalid_mut(RUNNING),
- Ordering::Acquire,
- Ordering::Acquire,
- );
- if let Err(old) = exchange_result {
- state_and_queue = old;
- continue;
- }
- // `waiter_queue` will manage other waiting threads, and
- // wake them up on drop.
- let mut waiter_queue = WaiterQueue {
- state_and_queue: &self.state_and_queue,
- set_state_on_drop_to: ptr::invalid_mut(POISONED),
- };
- // Run the initialization function, letting it know if we're
- // poisoned or not.
- let init_state = OnceState {
- poisoned: state_and_queue.addr() == POISONED,
- set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)),
- };
- init(&init_state);
- waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get();
- break;
- }
- _ => {
- // All other values must be RUNNING with possibly a
- // pointer to the waiter queue in the more significant bits.
- assert!(state_and_queue.addr() & STATE_MASK == RUNNING);
- wait(&self.state_and_queue, state_and_queue);
- state_and_queue = self.state_and_queue.load(Ordering::Acquire);
- }
- }
- }
- }
-}
-
-fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) {
- // Note: the following code was carefully written to avoid creating a
- // mutable reference to `node` that gets aliased.
- loop {
- // Don't queue this thread if the status is no longer running,
- // otherwise we will not be woken up.
- if current_state.addr() & STATE_MASK != RUNNING {
- return;
- }
-
- // Create the node for our current thread.
- let node = Waiter {
- thread: Cell::new(Some(thread::current())),
- signaled: AtomicBool::new(false),
- next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter,
- };
- let me = &node as *const Waiter as *const Masked as *mut Masked;
-
- // Try to slide in the node at the head of the linked list, making sure
- // that another thread didn't just replace the head of the linked list.
- let exchange_result = state_and_queue.compare_exchange(
- current_state,
- me.with_addr(me.addr() | RUNNING),
- Ordering::Release,
- Ordering::Relaxed,
- );
- if let Err(old) = exchange_result {
- current_state = old;
- continue;
- }
-
- // We have enqueued ourselves, now lets wait.
- // It is important not to return before being signaled, otherwise we
- // would drop our `Waiter` node and leave a hole in the linked list
- // (and a dangling reference). Guard against spurious wakeups by
- // reparking ourselves until we are signaled.
- while !node.signaled.load(Ordering::Acquire) {
- // If the managing thread happens to signal and unpark us before we
- // can park ourselves, the result could be this thread never gets
- // unparked. Luckily `park` comes with the guarantee that if it got
- // an `unpark` just before on an unparked thread it does not park.
- thread::park();
- }
- break;
+ self.inner.is_completed()
}
}
@@ -501,37 +257,6 @@ impl fmt::Debug for Once {
}
}
-impl Drop for WaiterQueue<'_> {
- fn drop(&mut self) {
- // Swap out our state with however we finished.
- let state_and_queue =
- self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel);
-
- // We should only ever see an old state which was RUNNING.
- assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING);
-
- // Walk the entire linked list of waiters and wake them up (in lifo
- // order, last to register is first to wake up).
- unsafe {
- // Right after setting `node.signaled = true` the other thread may
- // free `node` if there happens to be has a spurious wakeup.
- // So we have to take out the `thread` field and copy the pointer to
- // `next` first.
- let mut queue =
- state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter;
- while !queue.is_null() {
- let next = (*queue).next;
- let thread = (*queue).thread.take().unwrap();
- (*queue).signaled.store(true, Ordering::Release);
- // ^- FIXME (maybe): This is another case of issue #55005
- // `store()` has a potentially dangling ref to `signaled`.
- queue = next;
- thread.unpark();
- }
- }
- }
-}
-
impl OnceState {
/// Returns `true` if the associated [`Once`] was poisoned prior to the
/// invocation of the closure passed to [`Once::call_once_force()`].
@@ -568,13 +293,22 @@ impl OnceState {
/// assert!(!state.is_poisoned());
/// });
#[stable(feature = "once_poison", since = "1.51.0")]
+ #[inline]
pub fn is_poisoned(&self) -> bool {
- self.poisoned
+ self.inner.is_poisoned()
}
/// Poison the associated [`Once`] without explicitly panicking.
- // NOTE: This is currently only exposed for the `lazy` module
+ // NOTE: This is currently only exposed for `OnceLock`.
+ #[inline]
pub(crate) fn poison(&self) {
- self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED));
+ self.inner.poison();
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for OnceState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OnceState").field("poisoned", &self.is_poisoned()).finish()
}
}
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index 813516040..37413ec62 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -3,7 +3,6 @@ use crate::fmt;
use crate::marker::PhantomData;
use crate::mem::MaybeUninit;
use crate::panic::{RefUnwindSafe, UnwindSafe};
-use crate::pin::Pin;
use crate::sync::Once;
/// A synchronization primitive which can be written to only once.
@@ -223,60 +222,6 @@ impl<T> OnceLock<T> {
Ok(unsafe { self.get_unchecked() })
}
- /// Internal-only API that gets the contents of the cell, initializing it
- /// in two steps with `f` and `g` if the cell was empty.
- ///
- /// `f` is called to construct the value, which is then moved into the cell
- /// and given as a (pinned) mutable reference to `g` to finish
- /// initialization.
- ///
- /// This allows `g` to inspect an manipulate the value after it has been
- /// moved into its final place in the cell, but before the cell is
- /// considered initialized.
- ///
- /// # Panics
- ///
- /// If `f` or `g` panics, the panic is propagated to the caller, and the
- /// cell remains uninitialized.
- ///
- /// With the current implementation, if `g` panics, the value from `f` will
- /// not be dropped. This should probably be fixed if this is ever used for
- /// a type where this matters.
- ///
- /// It is an error to reentrantly initialize the cell from `f`. The exact
- /// outcome is unspecified. Current implementation deadlocks, but this may
- /// be changed to a panic in the future.
- pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
- where
- F: FnOnce() -> T,
- G: FnOnce(Pin<&mut T>),
- {
- if let Some(value) = self.get_ref().get() {
- // SAFETY: The inner value was already initialized, and will not be
- // moved anymore.
- return unsafe { Pin::new_unchecked(value) };
- }
-
- let slot = &self.value;
-
- // Ignore poisoning from other threads
- // If another thread panics, then we'll be able to run our closure
- self.once.call_once_force(|_| {
- let value = f();
- // SAFETY: We use the Once (self.once) to guarantee unique access
- // to the UnsafeCell (slot).
- let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
- // SAFETY: The value has been written to its final place in
- // self.value. We do not to move it anymore, which we promise here
- // with a Pin<&mut T>.
- g(unsafe { Pin::new_unchecked(value) });
- });
-
- // SAFETY: The inner value has been initialized, and will not be moved
- // anymore.
- unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
- }
-
/// Consumes the `OnceLock`, returning the wrapped value. Returns
/// `None` if the cell was empty.
///
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index 6e4a2cfc8..8b3877607 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -76,6 +76,7 @@ use crate::sys_common::rwlock as sys;
///
/// [`Mutex`]: super::Mutex
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RwLock")]
pub struct RwLock<T: ?Sized> {
inner: sys::MovableRwLock,
poison: poison::Flag,
@@ -101,6 +102,7 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
and cause Futures to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RwLockReadGuard")]
pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
// NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
// `Ref` argument doesn't hold immutability for its whole scope, only until it drops.
@@ -130,6 +132,7 @@ unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
and cause Future's to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RwLockWriteGuard")]
pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
lock: &'a RwLock<T>,
poison: poison::Guard,
@@ -164,7 +167,7 @@ impl<T> RwLock<T> {
}
impl<T: ?Sized> RwLock<T> {
- /// Locks this rwlock with shared read access, blocking the current thread
+ /// Locks this `RwLock` with shared read access, blocking the current thread
/// until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which
@@ -178,9 +181,10 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return an error if the RwLock is poisoned. An RwLock
- /// is poisoned whenever a writer panics while holding an exclusive lock.
- /// The failure will occur immediately after the lock has been acquired.
+ /// This function will return an error if the `RwLock` is poisoned. An
+ /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
+ /// lock. The failure will occur immediately after the lock has been
+ /// acquired.
///
/// # Panics
///
@@ -212,7 +216,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
- /// Attempts to acquire this rwlock with shared read access.
+ /// Attempts to acquire this `RwLock` with shared read access.
///
/// If the access could not be granted at this time, then `Err` is returned.
/// Otherwise, an RAII guard is returned which will release the shared access
@@ -225,13 +229,13 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return the [`Poisoned`] error if the RwLock is poisoned.
- /// An RwLock is poisoned whenever a writer panics while holding an exclusive
- /// lock. `Poisoned` will only be returned if the lock would have otherwise been
- /// acquired.
+ /// This function will return the [`Poisoned`] error if the `RwLock` is
+ /// poisoned. An `RwLock` is poisoned whenever a writer panics while holding
+ /// an exclusive lock. `Poisoned` will only be returned if the lock would
+ /// have otherwise been acquired.
///
- /// This function will return the [`WouldBlock`] error if the RwLock could not
- /// be acquired because it was already locked exclusively.
+ /// This function will return the [`WouldBlock`] error if the `RwLock` could
+ /// not be acquired because it was already locked exclusively.
///
/// [`Poisoned`]: TryLockError::Poisoned
/// [`WouldBlock`]: TryLockError::WouldBlock
@@ -260,20 +264,20 @@ impl<T: ?Sized> RwLock<T> {
}
}
- /// Locks this rwlock with exclusive write access, blocking the current
+ /// Locks this `RwLock` with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
///
- /// Returns an RAII guard which will drop the write access of this rwlock
+ /// Returns an RAII guard which will drop the write access of this `RwLock`
/// when dropped.
///
/// # Errors
///
- /// This function will return an error if the RwLock is poisoned. An RwLock
- /// is poisoned whenever a writer panics while holding an exclusive lock.
- /// An error will be returned when the lock is acquired.
+ /// This function will return an error if the `RwLock` is poisoned. An
+ /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
+ /// lock. An error will be returned when the lock is acquired.
///
/// # Panics
///
@@ -300,7 +304,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
- /// Attempts to lock this rwlock with exclusive write access.
+ /// Attempts to lock this `RwLock` with exclusive write access.
///
/// If the lock could not be acquired at this time, then `Err` is returned.
/// Otherwise, an RAII guard is returned which will release the lock when
@@ -313,13 +317,13 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return the [`Poisoned`] error if the RwLock is
- /// poisoned. An RwLock is poisoned whenever a writer panics while holding
- /// an exclusive lock. `Poisoned` will only be returned if the lock would have
- /// otherwise been acquired.
+ /// This function will return the [`Poisoned`] error if the `RwLock` is
+ /// poisoned. An `RwLock` is poisoned whenever a writer panics while holding
+ /// an exclusive lock. `Poisoned` will only be returned if the lock would
+ /// have otherwise been acquired.
///
- /// This function will return the [`WouldBlock`] error if the RwLock could not
- /// be acquired because it was already locked exclusively.
+ /// This function will return the [`WouldBlock`] error if the `RwLock` could
+ /// not be acquired because it was already locked exclusively.
///
/// [`Poisoned`]: TryLockError::Poisoned
/// [`WouldBlock`]: TryLockError::WouldBlock
@@ -419,10 +423,10 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return an error if the RwLock is poisoned. An RwLock
- /// is poisoned whenever a writer panics while holding an exclusive lock. An
- /// error will only be returned if the lock would have otherwise been
- /// acquired.
+ /// This function will return an error if the `RwLock` is poisoned. An
+ /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
+ /// lock. An error will only be returned if the lock would have otherwise
+ /// been acquired.
///
/// # Examples
///
@@ -452,10 +456,10 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return an error if the RwLock is poisoned. An RwLock
- /// is poisoned whenever a writer panics while holding an exclusive lock. An
- /// error will only be returned if the lock would have otherwise been
- /// acquired.
+ /// This function will return an error if the `RwLock` is poisoned. An
+ /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
+ /// lock. An error will only be returned if the lock would have otherwise
+ /// been acquired.
///
/// # Examples
///
diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs
index 08255c985..b5b3ad989 100644
--- a/library/std/src/sync/rwlock/tests.rs
+++ b/library/std/src/sync/rwlock/tests.rs
@@ -19,7 +19,7 @@ fn smoke() {
#[test]
fn frob() {
const N: u32 = 10;
- const M: usize = 1000;
+ const M: usize = if cfg!(miri) { 100 } else { 1000 };
let r = Arc::new(RwLock::new(()));
diff --git a/library/std/src/sys/common/mod.rs b/library/std/src/sys/common/mod.rs
index ff64d2aa8..29fc0835d 100644
--- a/library/std/src/sys/common/mod.rs
+++ b/library/std/src/sys/common/mod.rs
@@ -11,3 +11,7 @@
#![allow(dead_code)]
pub mod alloc;
+pub mod small_c_string;
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/sys/common/small_c_string.rs b/library/std/src/sys/common/small_c_string.rs
new file mode 100644
index 000000000..01acd5191
--- /dev/null
+++ b/library/std/src/sys/common/small_c_string.rs
@@ -0,0 +1,58 @@
+use crate::ffi::{CStr, CString};
+use crate::mem::MaybeUninit;
+use crate::path::Path;
+use crate::slice;
+use crate::{io, ptr};
+
+// Make sure to stay under 4096 so the compiler doesn't insert a probe frame:
+// https://docs.rs/compiler_builtins/latest/compiler_builtins/probestack/index.html
+#[cfg(not(target_os = "espidf"))]
+const MAX_STACK_ALLOCATION: usize = 384;
+#[cfg(target_os = "espidf")]
+const MAX_STACK_ALLOCATION: usize = 32;
+
+const NUL_ERR: io::Error =
+ io::const_io_error!(io::ErrorKind::InvalidInput, "file name contained an unexpected NUL byte");
+
+#[inline]
+pub fn run_path_with_cstr<T, F>(path: &Path, f: F) -> io::Result<T>
+where
+ F: FnOnce(&CStr) -> io::Result<T>,
+{
+ run_with_cstr(path.as_os_str().bytes(), f)
+}
+
+#[inline]
+pub fn run_with_cstr<T, F>(bytes: &[u8], f: F) -> io::Result<T>
+where
+ F: FnOnce(&CStr) -> io::Result<T>,
+{
+ if bytes.len() >= MAX_STACK_ALLOCATION {
+ return run_with_cstr_allocating(bytes, f);
+ }
+
+ let mut buf = MaybeUninit::<[u8; MAX_STACK_ALLOCATION]>::uninit();
+ let buf_ptr = buf.as_mut_ptr() as *mut u8;
+
+ unsafe {
+ ptr::copy_nonoverlapping(bytes.as_ptr(), buf_ptr, bytes.len());
+ buf_ptr.add(bytes.len()).write(0);
+ }
+
+ match CStr::from_bytes_with_nul(unsafe { slice::from_raw_parts(buf_ptr, bytes.len() + 1) }) {
+ Ok(s) => f(s),
+ Err(_) => Err(NUL_ERR),
+ }
+}
+
+#[cold]
+#[inline(never)]
+fn run_with_cstr_allocating<T, F>(bytes: &[u8], f: F) -> io::Result<T>
+where
+ F: FnOnce(&CStr) -> io::Result<T>,
+{
+ match CString::new(bytes) {
+ Ok(s) => f(&s),
+ Err(_) => Err(NUL_ERR),
+ }
+}
diff --git a/library/std/src/sys/common/tests.rs b/library/std/src/sys/common/tests.rs
new file mode 100644
index 000000000..fb6f5d6af
--- /dev/null
+++ b/library/std/src/sys/common/tests.rs
@@ -0,0 +1,66 @@
+use crate::ffi::CString;
+use crate::hint::black_box;
+use crate::path::Path;
+use crate::sys::common::small_c_string::run_path_with_cstr;
+use core::iter::repeat;
+
+#[test]
+fn stack_allocation_works() {
+ let path = Path::new("abc");
+ let result = run_path_with_cstr(path, |p| {
+ assert_eq!(p, &*CString::new(path.as_os_str().bytes()).unwrap());
+ Ok(42)
+ });
+ assert_eq!(result.unwrap(), 42);
+}
+
+#[test]
+fn stack_allocation_fails() {
+ let path = Path::new("ab\0");
+ assert!(run_path_with_cstr::<(), _>(path, |_| unreachable!()).is_err());
+}
+
+#[test]
+fn heap_allocation_works() {
+ let path = repeat("a").take(384).collect::<String>();
+ let path = Path::new(&path);
+ let result = run_path_with_cstr(path, |p| {
+ assert_eq!(p, &*CString::new(path.as_os_str().bytes()).unwrap());
+ Ok(42)
+ });
+ assert_eq!(result.unwrap(), 42);
+}
+
+#[test]
+fn heap_allocation_fails() {
+ let mut path = repeat("a").take(384).collect::<String>();
+ path.push('\0');
+ let path = Path::new(&path);
+ assert!(run_path_with_cstr::<(), _>(path, |_| unreachable!()).is_err());
+}
+
+#[bench]
+fn bench_stack_path_alloc(b: &mut test::Bencher) {
+ let path = repeat("a").take(383).collect::<String>();
+ let p = Path::new(&path);
+ b.iter(|| {
+ run_path_with_cstr(p, |cstr| {
+ black_box(cstr);
+ Ok(())
+ })
+ .unwrap();
+ });
+}
+
+#[bench]
+fn bench_heap_path_alloc(b: &mut test::Bencher) {
+ let path = repeat("a").take(384).collect::<String>();
+ let p = Path::new(&path);
+ b.iter(|| {
+ run_path_with_cstr(p, |cstr| {
+ black_box(cstr);
+ Ok(())
+ })
+ .unwrap();
+ });
+}
diff --git a/library/std/src/sys/hermit/args.rs b/library/std/src/sys/hermit/args.rs
index 1c7e1dd8d..afcae6c90 100644
--- a/library/std/src/sys/hermit/args.rs
+++ b/library/std/src/sys/hermit/args.rs
@@ -1,20 +1,37 @@
-use crate::ffi::OsString;
+use crate::ffi::{c_char, CStr, OsString};
use crate::fmt;
+use crate::os::unix::ffi::OsStringExt;
+use crate::ptr;
+use crate::sync::atomic::{
+ AtomicIsize, AtomicPtr,
+ Ordering::{Acquire, Relaxed, Release},
+};
use crate::vec;
+static ARGC: AtomicIsize = AtomicIsize::new(0);
+static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(ptr::null_mut());
+
/// One-time global initialization.
pub unsafe fn init(argc: isize, argv: *const *const u8) {
- imp::init(argc, argv)
-}
-
-/// One-time global cleanup.
-pub unsafe fn cleanup() {
- imp::cleanup()
+ ARGC.store(argc, Relaxed);
+ // Use release ordering here to broadcast writes by the OS.
+ ARGV.store(argv as *mut *const u8, Release);
}
/// Returns the command line arguments
pub fn args() -> Args {
- imp::args()
+ // Synchronize with the store above.
+ let argv = ARGV.load(Acquire);
+ // If argv has not been initialized yet, do not return any arguments.
+ let argc = if argv.is_null() { 0 } else { ARGC.load(Relaxed) };
+ let args: Vec<OsString> = (0..argc)
+ .map(|i| unsafe {
+ let cstr = CStr::from_ptr(*argv.offset(i) as *const c_char);
+ OsStringExt::from_vec(cstr.to_bytes().to_vec())
+ })
+ .collect();
+
+ Args { iter: args.into_iter() }
}
pub struct Args {
@@ -51,44 +68,3 @@ impl DoubleEndedIterator for Args {
self.iter.next_back()
}
}
-
-mod imp {
- use super::Args;
- use crate::ffi::{CStr, OsString};
- use crate::os::unix::ffi::OsStringExt;
- use crate::ptr;
-
- use crate::sys_common::mutex::StaticMutex;
-
- static mut ARGC: isize = 0;
- static mut ARGV: *const *const u8 = ptr::null();
- static LOCK: StaticMutex = StaticMutex::new();
-
- pub unsafe fn init(argc: isize, argv: *const *const u8) {
- let _guard = LOCK.lock();
- ARGC = argc;
- ARGV = argv;
- }
-
- pub unsafe fn cleanup() {
- let _guard = LOCK.lock();
- ARGC = 0;
- ARGV = ptr::null();
- }
-
- pub fn args() -> Args {
- Args { iter: clone().into_iter() }
- }
-
- fn clone() -> Vec<OsString> {
- unsafe {
- let _guard = LOCK.lock();
- (0..ARGC)
- .map(|i| {
- let cstr = CStr::from_ptr(*ARGV.offset(i) as *const i8);
- OsStringExt::from_vec(cstr.to_bytes().to_vec())
- })
- .collect()
- }
- }
-}
diff --git a/library/std/src/sys/hermit/condvar.rs b/library/std/src/sys/hermit/condvar.rs
deleted file mode 100644
index 22059ca0d..000000000
--- a/library/std/src/sys/hermit/condvar.rs
+++ /dev/null
@@ -1,90 +0,0 @@
-use crate::ffi::c_void;
-use crate::ptr;
-use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
-use crate::sys::hermit::abi;
-use crate::sys::locks::Mutex;
-use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-use crate::time::Duration;
-
-// The implementation is inspired by Andrew D. Birrell's paper
-// "Implementing Condition Variables with Semaphores"
-
-pub struct Condvar {
- counter: AtomicUsize,
- sem1: *const c_void,
- sem2: *const c_void,
-}
-
-pub(crate) type MovableCondvar = LazyBox<Condvar>;
-
-impl LazyInit for Condvar {
- fn init() -> Box<Self> {
- Box::new(Self::new())
- }
-}
-
-unsafe impl Send for Condvar {}
-unsafe impl Sync for Condvar {}
-
-impl Condvar {
- pub fn new() -> Self {
- let mut condvar =
- Self { counter: AtomicUsize::new(0), sem1: ptr::null(), sem2: ptr::null() };
- unsafe {
- let _ = abi::sem_init(&mut condvar.sem1, 0);
- let _ = abi::sem_init(&mut condvar.sem2, 0);
- }
- condvar
- }
-
- pub unsafe fn notify_one(&self) {
- if self.counter.load(SeqCst) > 0 {
- self.counter.fetch_sub(1, SeqCst);
- abi::sem_post(self.sem1);
- abi::sem_timedwait(self.sem2, 0);
- }
- }
-
- pub unsafe fn notify_all(&self) {
- let counter = self.counter.swap(0, SeqCst);
- for _ in 0..counter {
- abi::sem_post(self.sem1);
- }
- for _ in 0..counter {
- abi::sem_timedwait(self.sem2, 0);
- }
- }
-
- pub unsafe fn wait(&self, mutex: &Mutex) {
- self.counter.fetch_add(1, SeqCst);
- mutex.unlock();
- abi::sem_timedwait(self.sem1, 0);
- abi::sem_post(self.sem2);
- mutex.lock();
- }
-
- pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- self.counter.fetch_add(1, SeqCst);
- mutex.unlock();
- let millis = dur.as_millis().min(u32::MAX as u128) as u32;
-
- let res = if millis > 0 {
- abi::sem_timedwait(self.sem1, millis)
- } else {
- abi::sem_trywait(self.sem1)
- };
-
- abi::sem_post(self.sem2);
- mutex.lock();
- res == 0
- }
-}
-
-impl Drop for Condvar {
- fn drop(&mut self) {
- unsafe {
- let _ = abi::sem_destroy(self.sem1);
- let _ = abi::sem_destroy(self.sem2);
- }
- }
-}
diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs
index fa9a7fb19..af297ff1e 100644
--- a/library/std/src/sys/hermit/fs.rs
+++ b/library/std/src/sys/hermit/fs.rs
@@ -1,10 +1,12 @@
+use crate::convert::TryFrom;
use crate::ffi::{CStr, CString, OsString};
use crate::fmt;
use crate::hash::{Hash, Hasher};
use crate::io::{self, Error, ErrorKind};
-use crate::io::{IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
use crate::os::unix::ffi::OsStrExt;
use crate::path::{Path, PathBuf};
+use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::cvt;
use crate::sys::hermit::abi;
use crate::sys::hermit::abi::{O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY};
@@ -15,10 +17,6 @@ use crate::sys::unsupported;
pub use crate::sys_common::fs::{copy, try_exists};
//pub use crate::sys_common::fs::remove_dir_all;
-fn cstr(path: &Path) -> io::Result<CString> {
- Ok(CString::new(path.as_os_str().as_bytes())?)
-}
-
#[derive(Debug)]
pub struct File(FileDesc);
@@ -41,6 +39,9 @@ pub struct OpenOptions {
mode: i32,
}
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {}
+
pub struct FilePermissions(!);
pub struct FileType(!);
@@ -110,6 +111,11 @@ impl fmt::Debug for FilePermissions {
}
}
+impl FileTimes {
+ pub fn set_accessed(&mut self, _t: SystemTime) {}
+ pub fn set_modified(&mut self, _t: SystemTime) {}
+}
+
impl FileType {
pub fn is_dir(&self) -> bool {
self.0
@@ -264,8 +270,7 @@ impl OpenOptions {
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
- let path = cstr(path)?;
- File::open_c(&path, opts)
+ run_path_with_cstr(path, |path| File::open_c(&path, opts))
}
pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
@@ -312,8 +317,8 @@ impl File {
false
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- crate::io::default_read_buf(|buf| self.read(buf), buf)
+ pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ crate::io::default_read_buf(|buf| self.read(buf), cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -344,6 +349,10 @@ impl File {
pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
Err(Error::from_raw_os_error(22))
}
+
+ pub fn set_times(&self, _times: FileTimes) -> io::Result<()> {
+ Err(Error::from_raw_os_error(22))
+ }
}
impl DirBuilder {
@@ -361,9 +370,7 @@ pub fn readdir(_p: &Path) -> io::Result<ReadDir> {
}
pub fn unlink(path: &Path) -> io::Result<()> {
- let name = cstr(path)?;
- let _ = unsafe { cvt(abi::unlink(name.as_ptr()))? };
- Ok(())
+ run_path_with_cstr(path, |path| cvt(unsafe { abi::unlink(path.as_ptr()) }).map(|_| ()))
}
pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> {
diff --git a/library/std/src/sys/hermit/futex.rs b/library/std/src/sys/hermit/futex.rs
new file mode 100644
index 000000000..b64c174b0
--- /dev/null
+++ b/library/std/src/sys/hermit/futex.rs
@@ -0,0 +1,39 @@
+use super::abi;
+use crate::ptr::null;
+use crate::sync::atomic::AtomicU32;
+use crate::time::Duration;
+
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ // Calculate the timeout as a relative timespec.
+ //
+ // Overflows are rounded up to an infinite timeout (None).
+ let timespec = timeout.and_then(|dur| {
+ Some(abi::timespec {
+ tv_sec: dur.as_secs().try_into().ok()?,
+ tv_nsec: dur.subsec_nanos().into(),
+ })
+ });
+
+ let r = unsafe {
+ abi::futex_wait(
+ futex.as_mut_ptr(),
+ expected,
+ timespec.as_ref().map_or(null(), |t| t as *const abi::timespec),
+ abi::FUTEX_RELATIVE_TIMEOUT,
+ )
+ };
+
+ r != -abi::errno::ETIMEDOUT
+}
+
+#[inline]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ unsafe { abi::futex_wake(futex.as_mut_ptr(), 1) > 0 }
+}
+
+#[inline]
+pub fn futex_wake_all(futex: &AtomicU32) {
+ unsafe {
+ abi::futex_wake(futex.as_mut_ptr(), i32::MAX);
+ }
+}
diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs
index 60b7a973c..e6534df89 100644
--- a/library/std/src/sys/hermit/mod.rs
+++ b/library/std/src/sys/hermit/mod.rs
@@ -25,6 +25,7 @@ pub mod cmath;
pub mod env;
pub mod fd;
pub mod fs;
+pub mod futex;
#[path = "../unsupported/io.rs"]
pub mod io;
pub mod memchr;
@@ -45,14 +46,14 @@ pub mod thread_local_dtor;
pub mod thread_local_key;
pub mod time;
-mod condvar;
-mod mutex;
-mod rwlock;
-
+#[path = "../unix/locks"]
pub mod locks {
- pub use super::condvar::*;
- pub use super::mutex::*;
- pub use super::rwlock::*;
+ mod futex_condvar;
+ mod futex_mutex;
+ mod futex_rwlock;
+ pub(crate) use futex_condvar::MovableCondvar;
+ pub(crate) use futex_mutex::{MovableMutex, Mutex};
+ pub(crate) use futex_rwlock::{MovableRwLock, RwLock};
}
use crate::io::ErrorKind;
@@ -98,16 +99,14 @@ pub extern "C" fn __rust_abort() {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(argc: isize, argv: *const *const u8) {
+pub unsafe fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) {
let _ = net::init();
args::init(argc, argv);
}
// SAFETY: must be called only once during runtime cleanup.
// NOTE: this is not guaranteed to run, for example when the program aborts.
-pub unsafe fn cleanup() {
- args::cleanup();
-}
+pub unsafe fn cleanup() {}
#[cfg(not(test))]
#[no_mangle]
diff --git a/library/std/src/sys/hermit/mutex.rs b/library/std/src/sys/hermit/mutex.rs
deleted file mode 100644
index eb15a04ff..000000000
--- a/library/std/src/sys/hermit/mutex.rs
+++ /dev/null
@@ -1,216 +0,0 @@
-use crate::cell::UnsafeCell;
-use crate::collections::VecDeque;
-use crate::hint;
-use crate::ops::{Deref, DerefMut, Drop};
-use crate::ptr;
-use crate::sync::atomic::{AtomicUsize, Ordering};
-use crate::sys::hermit::abi;
-
-/// This type provides a lock based on busy waiting to realize mutual exclusion
-///
-/// # Description
-///
-/// This structure behaves a lot like a common mutex. There are some differences:
-///
-/// - By using busy waiting, it can be used outside the runtime.
-/// - It is a so called ticket lock and is completely fair.
-#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
-#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
-struct Spinlock<T: ?Sized> {
- queue: AtomicUsize,
- dequeue: AtomicUsize,
- data: UnsafeCell<T>,
-}
-
-unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
-unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
-
-/// A guard to which the protected data can be accessed
-///
-/// When the guard falls out of scope it will release the lock.
-struct SpinlockGuard<'a, T: ?Sized + 'a> {
- dequeue: &'a AtomicUsize,
- data: &'a mut T,
-}
-
-impl<T> Spinlock<T> {
- pub const fn new(user_data: T) -> Spinlock<T> {
- Spinlock {
- queue: AtomicUsize::new(0),
- dequeue: AtomicUsize::new(1),
- data: UnsafeCell::new(user_data),
- }
- }
-
- #[inline]
- fn obtain_lock(&self) {
- let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1;
- let mut counter: u16 = 0;
- while self.dequeue.load(Ordering::SeqCst) != ticket {
- counter += 1;
- if counter < 100 {
- hint::spin_loop();
- } else {
- counter = 0;
- unsafe {
- abi::yield_now();
- }
- }
- }
- }
-
- #[inline]
- pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> {
- self.obtain_lock();
- SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() }
- }
-}
-
-impl<T: ?Sized + Default> Default for Spinlock<T> {
- fn default() -> Spinlock<T> {
- Spinlock::new(Default::default())
- }
-}
-
-impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
- type Target = T;
- fn deref(&self) -> &T {
- &*self.data
- }
-}
-
-impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
- fn deref_mut(&mut self) -> &mut T {
- &mut *self.data
- }
-}
-
-impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
- /// The dropping of the SpinlockGuard will release the lock it was created from.
- fn drop(&mut self) {
- self.dequeue.fetch_add(1, Ordering::SeqCst);
- }
-}
-
-/// Realize a priority queue for tasks
-struct PriorityQueue {
- queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES],
- prio_bitmap: u64,
-}
-
-impl PriorityQueue {
- pub const fn new() -> PriorityQueue {
- PriorityQueue {
- queues: [
- None, None, None, None, None, None, None, None, None, None, None, None, None, None,
- None, None, None, None, None, None, None, None, None, None, None, None, None, None,
- None, None, None,
- ],
- prio_bitmap: 0,
- }
- }
-
- /// Add a task id by its priority to the queue
- pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) {
- let i: usize = prio.into().into();
- self.prio_bitmap |= (1 << i) as u64;
- if let Some(queue) = &mut self.queues[i] {
- queue.push_back(id);
- } else {
- let mut queue = VecDeque::new();
- queue.push_back(id);
- self.queues[i] = Some(queue);
- }
- }
-
- fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> {
- if let Some(queue) = &mut self.queues[queue_index] {
- let id = queue.pop_front();
-
- if queue.is_empty() {
- self.prio_bitmap &= !(1 << queue_index as u64);
- }
-
- id
- } else {
- None
- }
- }
-
- /// Pop the task handle with the highest priority from the queue
- pub fn pop(&mut self) -> Option<abi::Tid> {
- for i in 0..abi::NO_PRIORITIES {
- if self.prio_bitmap & (1 << i) != 0 {
- return self.pop_from_queue(i);
- }
- }
-
- None
- }
-}
-
-struct MutexInner {
- locked: bool,
- blocked_task: PriorityQueue,
-}
-
-impl MutexInner {
- pub const fn new() -> MutexInner {
- MutexInner { locked: false, blocked_task: PriorityQueue::new() }
- }
-}
-
-pub struct Mutex {
- inner: Spinlock<MutexInner>,
-}
-
-pub type MovableMutex = Mutex;
-
-unsafe impl Send for Mutex {}
-unsafe impl Sync for Mutex {}
-
-impl Mutex {
- pub const fn new() -> Mutex {
- Mutex { inner: Spinlock::new(MutexInner::new()) }
- }
-
- #[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
- pub unsafe fn lock(&self) {
- loop {
- let mut guard = self.inner.lock();
- if guard.locked == false {
- guard.locked = true;
- return;
- } else {
- let prio = abi::get_priority();
- let id = abi::getpid();
-
- guard.blocked_task.push(prio, id);
- abi::block_current_task();
- drop(guard);
- abi::yield_now();
- }
- }
- }
-
- #[inline]
- pub unsafe fn unlock(&self) {
- let mut guard = self.inner.lock();
- guard.locked = false;
- if let Some(tid) = guard.blocked_task.pop() {
- abi::wakeup_task(tid);
- }
- }
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- let mut guard = self.inner.lock();
- if guard.locked == false {
- guard.locked = true;
- }
- guard.locked
- }
-}
diff --git a/library/std/src/sys/hermit/net.rs b/library/std/src/sys/hermit/net.rs
index 745476171..8a13879d8 100644
--- a/library/std/src/sys/hermit/net.rs
+++ b/library/std/src/sys/hermit/net.rs
@@ -487,6 +487,4 @@ pub mod netc {
#[derive(Copy, Clone)]
pub struct sockaddr {}
-
- pub type socklen_t = usize;
}
diff --git a/library/std/src/sys/hermit/rwlock.rs b/library/std/src/sys/hermit/rwlock.rs
deleted file mode 100644
index 9701bab1f..000000000
--- a/library/std/src/sys/hermit/rwlock.rs
+++ /dev/null
@@ -1,144 +0,0 @@
-use crate::cell::UnsafeCell;
-use crate::sys::locks::{MovableCondvar, Mutex};
-use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-
-pub struct RwLock {
- lock: Mutex,
- cond: MovableCondvar,
- state: UnsafeCell<State>,
-}
-
-pub type MovableRwLock = RwLock;
-
-enum State {
- Unlocked,
- Reading(usize),
- Writing,
-}
-
-unsafe impl Send for RwLock {}
-unsafe impl Sync for RwLock {}
-
-// This rwlock implementation is a relatively simple implementation which has a
-// condition variable for readers/writers as well as a mutex protecting the
-// internal state of the lock. A current downside of the implementation is that
-// unlocking the lock will notify *all* waiters rather than just readers or just
-// writers. This can cause lots of "thundering stampede" problems. While
-// hopefully correct this implementation is very likely to want to be changed in
-// the future.
-
-impl RwLock {
- pub const fn new() -> RwLock {
- RwLock {
- lock: Mutex::new(),
- cond: MovableCondvar::new(),
- state: UnsafeCell::new(State::Unlocked),
- }
- }
-
- #[inline]
- pub unsafe fn read(&self) {
- self.lock.lock();
- while !(*self.state.get()).inc_readers() {
- self.cond.wait(&self.lock);
- }
- self.lock.unlock();
- }
-
- #[inline]
- pub unsafe fn try_read(&self) -> bool {
- self.lock.lock();
- let ok = (*self.state.get()).inc_readers();
- self.lock.unlock();
- return ok;
- }
-
- #[inline]
- pub unsafe fn write(&self) {
- self.lock.lock();
- while !(*self.state.get()).inc_writers() {
- self.cond.wait(&self.lock);
- }
- self.lock.unlock();
- }
-
- #[inline]
- pub unsafe fn try_write(&self) -> bool {
- self.lock.lock();
- let ok = (*self.state.get()).inc_writers();
- self.lock.unlock();
- return ok;
- }
-
- #[inline]
- pub unsafe fn read_unlock(&self) {
- self.lock.lock();
- let notify = (*self.state.get()).dec_readers();
- self.lock.unlock();
- if notify {
- // FIXME: should only wake up one of these some of the time
- self.cond.notify_all();
- }
- }
-
- #[inline]
- pub unsafe fn write_unlock(&self) {
- self.lock.lock();
- (*self.state.get()).dec_writers();
- self.lock.unlock();
- // FIXME: should only wake up one of these some of the time
- self.cond.notify_all();
- }
-}
-
-impl State {
- fn inc_readers(&mut self) -> bool {
- match *self {
- State::Unlocked => {
- *self = State::Reading(1);
- true
- }
- State::Reading(ref mut cnt) => {
- *cnt += 1;
- true
- }
- State::Writing => false,
- }
- }
-
- fn inc_writers(&mut self) -> bool {
- match *self {
- State::Unlocked => {
- *self = State::Writing;
- true
- }
- State::Reading(_) | State::Writing => false,
- }
- }
-
- fn dec_readers(&mut self) -> bool {
- let zero = match *self {
- State::Reading(ref mut cnt) => {
- *cnt -= 1;
- *cnt == 0
- }
- State::Unlocked | State::Writing => invalid(),
- };
- if zero {
- *self = State::Unlocked;
- }
- zero
- }
-
- fn dec_writers(&mut self) {
- match *self {
- State::Writing => {}
- State::Unlocked | State::Reading(_) => invalid(),
- }
- *self = State::Unlocked;
- }
-}
-
-fn invalid() -> ! {
- panic!("inconsistent rwlock");
-}
diff --git a/library/std/src/sys/itron/mutex.rs b/library/std/src/sys/itron/mutex.rs
index 715e94c3b..085662e6d 100644
--- a/library/std/src/sys/itron/mutex.rs
+++ b/library/std/src/sys/itron/mutex.rs
@@ -31,12 +31,6 @@ impl Mutex {
Mutex { mtx: SpinIdOnceCell::new() }
}
- pub unsafe fn init(&mut self) {
- // Initialize `self.mtx` eagerly
- let id = new_mtx().unwrap_or_else(|e| fail(e, &"acre_mtx"));
- unsafe { self.mtx.set_unchecked((id, ())) };
- }
-
/// Get the inner mutex's ID, which is lazily created.
fn raw(&self) -> abi::ID {
match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) {
diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs
index 167c918c9..c080c176a 100644
--- a/library/std/src/sys/mod.rs
+++ b/library/std/src/sys/mod.rs
@@ -22,7 +22,7 @@
#![allow(missing_debug_implementations)]
-mod common;
+pub mod common;
cfg_if::cfg_if! {
if #[cfg(unix)] {
diff --git a/library/std/src/sys/sgx/abi/thread.rs b/library/std/src/sys/sgx/abi/thread.rs
index ef55b821a..2b23e368c 100644
--- a/library/std/src/sys/sgx/abi/thread.rs
+++ b/library/std/src/sys/sgx/abi/thread.rs
@@ -7,7 +7,11 @@ use fortanix_sgx_abi::Tcs;
#[unstable(feature = "sgx_platform", issue = "56975")]
pub fn current() -> Tcs {
extern "C" {
- fn get_tcs_addr() -> Tcs;
+ fn get_tcs_addr() -> *mut u8;
+ }
+ let addr = unsafe { get_tcs_addr() };
+ match Tcs::new(addr) {
+ Some(tcs) => tcs,
+ None => rtabort!("TCS must not be placed at address zero (this is a linker error)"),
}
- unsafe { get_tcs_addr() }
}
diff --git a/library/std/src/sys/sgx/abi/tls/mod.rs b/library/std/src/sys/sgx/abi/tls/mod.rs
index 13d96e9a6..09c4ab3d3 100644
--- a/library/std/src/sys/sgx/abi/tls/mod.rs
+++ b/library/std/src/sys/sgx/abi/tls/mod.rs
@@ -111,6 +111,7 @@ impl Tls {
rtabort!("TLS limit exceeded")
};
TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed);
+ unsafe { Self::current() }.data[index].set(ptr::null_mut());
Key::from_index(index)
}
diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
index ea24fedd0..0d934318c 100644
--- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
@@ -56,6 +56,8 @@ unsafe impl UserSafeSized for Usercall {}
#[unstable(feature = "sgx_platform", issue = "56975")]
unsafe impl UserSafeSized for Return {}
#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl UserSafeSized for Cancel {}
+#[unstable(feature = "sgx_platform", issue = "56975")]
unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {}
/// A type that can be represented in memory as one or more `UserSafeSized`s.
@@ -115,7 +117,7 @@ pub unsafe trait UserSafe {
/// * the pointer is null.
/// * the pointed-to range is not in user memory.
unsafe fn check_ptr(ptr: *const Self) {
- let is_aligned = |p| -> bool { 0 == (p as usize) & (Self::align_of() - 1) };
+ let is_aligned = |p: *const u8| -> bool { p.is_aligned_to(Self::align_of()) };
assert!(is_aligned(ptr as *const u8));
assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr })));
@@ -305,6 +307,34 @@ where
}
}
+// Split a memory region ptr..ptr + len into three parts:
+// +--------+
+// | small0 | Chunk smaller than 8 bytes
+// +--------+
+// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
+// +--------+
+// | small1 | Chunk smaller than 8 bytes
+// +--------+
+fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) {
+ let small0_size = if ptr.is_aligned_to(8) { 0 } else { 8 - ptr.addr() % 8 };
+ let small1_size = (len - small0_size) % 8;
+ let big_size = len - small0_size - small1_size;
+
+ (small0_size, big_size, small1_size)
+}
+
+unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) {
+ unsafe {
+ asm!(
+ "rep movsq (%rsi), (%rdi)",
+ inout("rcx") len / 8 => _,
+ inout("rdi") dst => _,
+ inout("rsi") src => _,
+ options(att_syntax, nostack, preserves_flags)
+ );
+ }
+}
+
/// Copies `len` bytes of data from enclave pointer `src` to userspace `dst`
///
/// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either:
@@ -334,8 +364,8 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
mfence
lfence
",
- val = in(reg_byte) *src.offset(off as isize),
- dst = in(reg) dst.offset(off as isize),
+ val = in(reg_byte) *src.add(off),
+ dst = in(reg) dst.add(off),
seg_sel = in(reg) &mut seg_sel,
options(nostack, att_syntax)
);
@@ -343,34 +373,23 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
}
}
- unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
- unsafe {
- asm!(
- "rep movsq (%rsi), (%rdi)",
- inout("rcx") len / 8 => _,
- inout("rdi") dst => _,
- inout("rsi") src => _,
- options(att_syntax, nostack, preserves_flags)
- );
- }
- }
assert!(!src.is_null());
assert!(!dst.is_null());
assert!(is_enclave_range(src, len));
assert!(is_user_range(dst, len));
assert!(len < isize::MAX as usize);
- assert!(!(src as usize).overflowing_add(len).1);
- assert!(!(dst as usize).overflowing_add(len).1);
+ assert!(!src.addr().overflowing_add(len).1);
+ assert!(!dst.addr().overflowing_add(len).1);
if len < 8 {
// Can't align on 8 byte boundary: copy safely byte per byte
unsafe {
copy_bytewise_to_userspace(src, dst, len);
}
- } else if len % 8 == 0 && dst as usize % 8 == 0 {
+ } else if len % 8 == 0 && dst.is_aligned_to(8) {
// Copying 8-byte aligned quadwords: copy quad word per quad word
unsafe {
- copy_aligned_quadwords_to_userspace(src, dst, len);
+ copy_quadwords(src, dst, len);
}
} else {
// Split copies into three parts:
@@ -381,25 +400,121 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
// +--------+
// | small1 | Chunk smaller than 8 bytes
// +--------+
+ let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
unsafe {
// Copy small0
- let small0_size = (8 - dst as usize % 8) as u8;
- let small0_src = src;
- let small0_dst = dst;
- copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _);
+ copy_bytewise_to_userspace(src, dst, small0_size);
// Copy big
- let small1_size = ((len - small0_size as usize) % 8) as u8;
- let big_size = len - small0_size as usize - small1_size as usize;
- let big_src = src.offset(small0_size as _);
- let big_dst = dst.offset(small0_size as _);
- copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size);
+ let big_src = src.add(small0_size);
+ let big_dst = dst.add(small0_size);
+ copy_quadwords(big_src, big_dst, big_size);
// Copy small1
- let small1_src = src.offset(big_size as isize + small0_size as isize);
- let small1_dst = dst.offset(big_size as isize + small0_size as isize);
- copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _);
+ let small1_src = src.add(big_size + small0_size);
+ let small1_dst = dst.add(big_size + small0_size);
+ copy_bytewise_to_userspace(small1_src, small1_dst, small1_size);
+ }
+ }
+}
+
+/// Copies `len` bytes of data from userspace pointer `src` to enclave pointer `dst`
+///
+/// This function mitigates AEPIC leak vulnerabilities by ensuring all reads from untrusted memory are 8-byte aligned
+///
+/// # Panics
+/// This function panics if:
+///
+/// * The `src` pointer is null
+/// * The `dst` pointer is null
+/// * The `src` memory range is not in user memory
+/// * The `dst` memory range is not in enclave memory
+///
+/// # References
+/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00657.html
+/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/stale-data-read-from-xapic.html
+pub(crate) unsafe fn copy_from_userspace(src: *const u8, dst: *mut u8, len: usize) {
+ // Copies memory region `src..src + len` to the enclave at `dst`. The source memory region
+ // is:
+ // - strictly less than 8 bytes in size and may be
+ // - located at a misaligned memory location
+ fn copy_misaligned_chunk_to_enclave(src: *const u8, dst: *mut u8, len: usize) {
+ let mut tmp_buff = [0u8; 16];
+
+ unsafe {
+ // Compute an aligned memory region to read from
+ // +--------+ <-- aligned_src + aligned_len (8B-aligned)
+ // | pad1 |
+ // +--------+ <-- src + len (misaligned)
+ // | |
+ // | |
+ // | |
+ // +--------+ <-- src (misaligned)
+ // | pad0 |
+ // +--------+ <-- aligned_src (8B-aligned)
+ let pad0_size = src as usize % 8;
+ let aligned_src = src.sub(pad0_size);
+
+ let pad1_size = 8 - (src.add(len) as usize % 8);
+ let aligned_len = pad0_size + len + pad1_size;
+
+ debug_assert!(len < 8);
+ debug_assert_eq!(aligned_src as usize % 8, 0);
+ debug_assert_eq!(aligned_len % 8, 0);
+ debug_assert!(aligned_len <= 16);
+
+ // Copy the aligned buffer to a temporary buffer
+ // Note: copying from a slightly different memory location is a bit odd. In this case it
+ // can't lead to page faults or inadvertent copying from the enclave as we only ensured
+ // that the `src` pointer is aligned at an 8 byte boundary. As pages are 4096 bytes
+ // aligned, `aligned_src` must be on the same page as `src`. A similar argument can be made
+ // for `src + len`
+ copy_quadwords(aligned_src as _, tmp_buff.as_mut_ptr(), aligned_len);
+
+ // Copy the correct parts of the temporary buffer to the destination
+ ptr::copy(tmp_buff.as_ptr().add(pad0_size), dst, len);
+ }
+ }
+
+ assert!(!src.is_null());
+ assert!(!dst.is_null());
+ assert!(is_user_range(src, len));
+ assert!(is_enclave_range(dst, len));
+ assert!(!(src as usize).overflowing_add(len + 8).1);
+ assert!(!(dst as usize).overflowing_add(len + 8).1);
+
+ if len < 8 {
+ copy_misaligned_chunk_to_enclave(src, dst, len);
+ } else if len % 8 == 0 && src as usize % 8 == 0 {
+ // Copying 8-byte aligned quadwords: copy quad word per quad word
+ unsafe {
+ copy_quadwords(src, dst, len);
+ }
+ } else {
+ // Split copies into three parts:
+ // +--------+
+ // | small0 | Chunk smaller than 8 bytes
+ // +--------+
+ // | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
+ // +--------+
+ // | small1 | Chunk smaller than 8 bytes
+ // +--------+
+ let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
+
+ unsafe {
+ // Copy small0
+ copy_misaligned_chunk_to_enclave(src, dst, small0_size);
+
+ // Copy big
+ let big_src = src.add(small0_size);
+ let big_dst = dst.add(small0_size);
+ copy_quadwords(big_src, big_dst, big_size);
+
+ // Copy small1
+ let small1_src = src.add(big_size + small0_size);
+ let small1_dst = dst.add(big_size + small0_size);
+ copy_misaligned_chunk_to_enclave(small1_src, small1_dst, small1_size);
}
}
}
@@ -468,7 +583,7 @@ where
pub fn copy_to_enclave(&self, dest: &mut T) {
unsafe {
assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get()));
- ptr::copy(
+ copy_from_userspace(
self.0.get() as *const T as *const u8,
dest as *mut T as *mut u8,
mem::size_of_val(dest),
@@ -494,7 +609,11 @@ where
{
/// Copies the value from user memory into enclave memory.
pub fn to_enclave(&self) -> T {
- unsafe { ptr::read(self.0.get()) }
+ unsafe {
+ let mut data: T = mem::MaybeUninit::uninit().assume_init();
+ copy_from_userspace(self.0.get() as _, &mut data as *mut T as _, mem::size_of::<T>());
+ data
+ }
}
}
diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs
index 79d1db5e1..e19e84326 100644
--- a/library/std/src/sys/sgx/abi/usercalls/mod.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs
@@ -292,12 +292,17 @@ fn check_os_error(err: Result) -> i32 {
}
}
-trait FromSgxResult {
+/// Translate the raw result of an SGX usercall.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait FromSgxResult {
+ /// Return type
type Return;
+ /// Translate the raw result of an SGX usercall.
fn from_sgx_result(self) -> IoResult<Self::Return>;
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl<T> FromSgxResult for (Result, T) {
type Return = T;
@@ -310,6 +315,7 @@ impl<T> FromSgxResult for (Result, T) {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl FromSgxResult for Result {
type Return = ();
diff --git a/library/std/src/sys/sgx/abi/usercalls/raw.rs b/library/std/src/sys/sgx/abi/usercalls/raw.rs
index 4267b96cc..10c1456d4 100644
--- a/library/std/src/sys/sgx/abi/usercalls/raw.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/raw.rs
@@ -37,14 +37,23 @@ pub unsafe fn do_usercall(
(a, b)
}
-type Register = u64;
+/// A value passed or returned in a CPU register.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub type Register = u64;
-trait RegisterArgument {
+/// Translate a type from/to Register to be used as an argument.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait RegisterArgument {
+ /// Translate a Register to Self.
fn from_register(_: Register) -> Self;
+ /// Translate self to a Register.
fn into_register(self) -> Register;
}
-trait ReturnValue {
+/// Translate a pair of Registers to the raw usercall return value.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait ReturnValue {
+ /// Translate a pair of Registers to the raw usercall return value.
fn from_registers(call: &'static str, regs: (Register, Register)) -> Self;
}
@@ -68,6 +77,7 @@ macro_rules! define_usercalls {
macro_rules! define_ra {
(< $i:ident > $t:ty) => {
+ #[unstable(feature = "sgx_platform", issue = "56975")]
impl<$i> RegisterArgument for $t {
fn from_register(a: Register) -> Self {
a as _
@@ -78,6 +88,7 @@ macro_rules! define_ra {
}
};
($i:ty as $t:ty) => {
+ #[unstable(feature = "sgx_platform", issue = "56975")]
impl RegisterArgument for $t {
fn from_register(a: Register) -> Self {
a as $i as _
@@ -88,6 +99,7 @@ macro_rules! define_ra {
}
};
($t:ty) => {
+ #[unstable(feature = "sgx_platform", issue = "56975")]
impl RegisterArgument for $t {
fn from_register(a: Register) -> Self {
a as _
@@ -112,6 +124,7 @@ define_ra!(usize as isize);
define_ra!(<T> *const T);
define_ra!(<T> *mut T);
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl RegisterArgument for bool {
fn from_register(a: Register) -> bool {
if a != 0 { true } else { false }
@@ -121,6 +134,7 @@ impl RegisterArgument for bool {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> {
fn from_register(a: Register) -> Option<NonNull<T>> {
NonNull::new(a as _)
@@ -130,12 +144,14 @@ impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl ReturnValue for ! {
fn from_registers(call: &'static str, _regs: (Register, Register)) -> Self {
rtabort!("Usercall {call}: did not expect to be re-entered");
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl ReturnValue for () {
fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self {
rtassert!(usercall_retval.0 == 0);
@@ -144,6 +160,7 @@ impl ReturnValue for () {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl<T: RegisterArgument> ReturnValue for T {
fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self {
rtassert!(usercall_retval.1 == 0);
@@ -151,6 +168,7 @@ impl<T: RegisterArgument> ReturnValue for T {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl<T: RegisterArgument, U: RegisterArgument> ReturnValue for (T, U) {
fn from_registers(_call: &'static str, regs: (Register, Register)) -> Self {
(T::from_register(regs.0), U::from_register(regs.1))
diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs
index cbf7d7d54..58b8eb215 100644
--- a/library/std/src/sys/sgx/abi/usercalls/tests.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs
@@ -1,8 +1,8 @@
-use super::alloc::copy_to_userspace;
use super::alloc::User;
+use super::alloc::{copy_from_userspace, copy_to_userspace};
#[test]
-fn test_copy_function() {
+fn test_copy_to_userspace_function() {
let mut src = [0u8; 100];
let mut dst = User::<[u8]>::uninitialized(100);
@@ -17,12 +17,38 @@ fn test_copy_function() {
dst.copy_from_enclave(&[0u8; 100]);
// Copy src[0..size] to dst + offset
- unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) };
+ unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().add(offset), size) };
// Verify copy
for byte in 0..size {
unsafe {
- assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]);
+ assert_eq!(*dst.as_ptr().add(offset + byte), src[byte as usize]);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn test_copy_from_userspace_function() {
+ let mut dst = [0u8; 100];
+ let mut src = User::<[u8]>::uninitialized(100);
+
+ src.copy_from_enclave(&[0u8; 100]);
+
+ for size in 0..48 {
+ // For all possible alignment
+ for offset in 0..8 {
+ // overwrite complete dst
+ dst = [0u8; 100];
+
+ // Copy src[0..size] to dst + offset
+ unsafe { copy_from_userspace(src.as_ptr().offset(offset), dst.as_mut_ptr(), size) };
+
+ // Verify copy
+ for byte in 0..size {
+ unsafe {
+ assert_eq!(dst[byte as usize], *src.as_ptr().offset(offset + byte as isize));
}
}
}
diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs
index 696400670..b1d32929e 100644
--- a/library/std/src/sys/sgx/mod.rs
+++ b/library/std/src/sys/sgx/mod.rs
@@ -47,7 +47,7 @@ pub mod locks {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(argc: isize, argv: *const *const u8) {
+pub unsafe fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) {
unsafe {
args::init(argc, argv);
}
diff --git a/library/std/src/sys/sgx/mutex.rs b/library/std/src/sys/sgx/mutex.rs
index 513cd77fd..aa747d56b 100644
--- a/library/std/src/sys/sgx/mutex.rs
+++ b/library/std/src/sys/sgx/mutex.rs
@@ -21,9 +21,6 @@ impl Mutex {
}
#[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
pub unsafe fn lock(&self) {
let mut guard = self.inner.lock();
if *guard.lock_var() {
diff --git a/library/std/src/sys/sgx/thread_local_key.rs b/library/std/src/sys/sgx/thread_local_key.rs
index b21784475..c7a57d3a3 100644
--- a/library/std/src/sys/sgx/thread_local_key.rs
+++ b/library/std/src/sys/sgx/thread_local_key.rs
@@ -21,8 +21,3 @@ pub unsafe fn get(key: Key) -> *mut u8 {
pub unsafe fn destroy(key: Key) {
Tls::destroy(AbiKey::from_usize(key))
}
-
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- false
-}
diff --git a/library/std/src/sys/solid/fs.rs b/library/std/src/sys/solid/fs.rs
index a2cbee4dc..6c66b93a3 100644
--- a/library/std/src/sys/solid/fs.rs
+++ b/library/std/src/sys/solid/fs.rs
@@ -2,7 +2,7 @@ use super::{abi, error};
use crate::{
ffi::{CStr, CString, OsStr, OsString},
fmt,
- io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom},
+ io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom},
mem::MaybeUninit,
os::raw::{c_int, c_short},
os::solid::ffi::OsStrExt,
@@ -77,6 +77,9 @@ pub struct OpenOptions {
custom_flags: i32,
}
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {}
+
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FilePermissions(c_short);
@@ -126,6 +129,11 @@ impl FilePermissions {
}
}
+impl FileTimes {
+ pub fn set_accessed(&mut self, _t: SystemTime) {}
+ pub fn set_modified(&mut self, _t: SystemTime) {}
+}
+
impl FileType {
pub fn is_dir(&self) -> bool {
self.is(abi::S_IFDIR)
@@ -167,15 +175,19 @@ impl Iterator for ReadDir {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<io::Result<DirEntry>> {
- unsafe {
- let mut out_dirent = MaybeUninit::uninit();
- error::SolidError::err_if_negative(abi::SOLID_FS_ReadDir(
+ let entry = unsafe {
+ let mut out_entry = MaybeUninit::uninit();
+ match error::SolidError::err_if_negative(abi::SOLID_FS_ReadDir(
self.inner.dirp,
- out_dirent.as_mut_ptr(),
- ))
- .ok()?;
- Some(Ok(DirEntry { entry: out_dirent.assume_init(), inner: Arc::clone(&self.inner) }))
- }
+ out_entry.as_mut_ptr(),
+ )) {
+ Ok(_) => out_entry.assume_init(),
+ Err(e) if e.as_raw() == abi::SOLID_ERR_NOTFOUND => return None,
+ Err(e) => return Some(Err(e.as_io_error())),
+ }
+ };
+
+ (entry.d_name[0] != 0).then(|| Ok(DirEntry { entry, inner: Arc::clone(&self.inner) }))
}
}
@@ -358,13 +370,13 @@ impl File {
}
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ pub fn read_buf(&self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
unsafe {
- let len = buf.remaining();
+ let len = cursor.capacity();
let mut out_num_bytes = MaybeUninit::uninit();
error::SolidError::err_if_negative(abi::SOLID_FS_Read(
self.fd.raw(),
- buf.unfilled_mut().as_mut_ptr() as *mut u8,
+ cursor.as_mut().as_mut_ptr() as *mut u8,
len,
out_num_bytes.as_mut_ptr(),
))
@@ -376,9 +388,7 @@ impl File {
// Safety: `num_bytes_read` bytes were written to the unfilled
// portion of the buffer
- buf.assume_init(num_bytes_read);
-
- buf.add_filled(num_bytes_read);
+ cursor.advance(num_bytes_read);
Ok(())
}
@@ -452,6 +462,10 @@ impl File {
pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
unsupported()
}
+
+ pub fn set_times(&self, _times: FileTimes) -> io::Result<()> {
+ unsupported()
+ }
}
impl Drop for File {
diff --git a/library/std/src/sys/solid/mod.rs b/library/std/src/sys/solid/mod.rs
index 778a589d1..5867979a2 100644
--- a/library/std/src/sys/solid/mod.rs
+++ b/library/std/src/sys/solid/mod.rs
@@ -56,7 +56,7 @@ pub mod locks {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {}
// SAFETY: must be called only once during runtime cleanup.
pub unsafe fn cleanup() {}
diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs
index b5649d6e0..4906c6268 100644
--- a/library/std/src/sys/solid/os.rs
+++ b/library/std/src/sys/solid/os.rs
@@ -1,4 +1,5 @@
use super::unsupported;
+use crate::convert::TryFrom;
use crate::error::Error as StdError;
use crate::ffi::{CStr, CString, OsStr, OsString};
use crate::fmt;
@@ -8,7 +9,8 @@ use crate::os::{
solid::ffi::{OsStrExt, OsStringExt},
};
use crate::path::{self, PathBuf};
-use crate::sys_common::rwlock::StaticRwLock;
+use crate::sync::RwLock;
+use crate::sys::common::small_c_string::run_with_cstr;
use crate::vec;
use super::{error, itron, memchr};
@@ -78,7 +80,7 @@ pub fn current_exe() -> io::Result<PathBuf> {
unsupported()
}
-static ENV_LOCK: StaticRwLock = StaticRwLock::new();
+static ENV_LOCK: RwLock<()> = RwLock::new(());
pub struct Env {
iter: vec::IntoIter<(OsString, OsString)>,
@@ -139,35 +141,33 @@ pub fn env() -> Env {
pub fn getenv(k: &OsStr) -> Option<OsString> {
// environment variables with a nul byte can't be set, so their value is
// always None as well
- let k = CString::new(k.as_bytes()).ok()?;
- unsafe {
+ let s = run_with_cstr(k.as_bytes(), |k| {
let _guard = ENV_LOCK.read();
- let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
- }
+ Ok(unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char)
+ })
+ .ok()?;
+
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
}
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
- let k = CString::new(k.as_bytes())?;
- let v = CString::new(v.as_bytes())?;
-
- unsafe {
- let _guard = ENV_LOCK.write();
- cvt_env(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
- }
+ run_with_cstr(k.as_bytes(), |k| {
+ run_with_cstr(v.as_bytes(), |v| {
+ let _guard = ENV_LOCK.write();
+ cvt_env(unsafe { libc::setenv(k.as_ptr(), v.as_ptr(), 1) }).map(drop)
+ })
+ })
}
pub fn unsetenv(n: &OsStr) -> io::Result<()> {
- let nbuf = CString::new(n.as_bytes())?;
-
- unsafe {
+ run_with_cstr(n.as_bytes(), |nbuf| {
let _guard = ENV_LOCK.write();
- cvt_env(libc::unsetenv(nbuf.as_ptr())).map(drop)
- }
+ cvt_env(unsafe { libc::unsetenv(nbuf.as_ptr()) }).map(drop)
+ })
}
/// In kmclib, `setenv` and `unsetenv` don't always set `errno`, so this
diff --git a/library/std/src/sys/solid/thread_local_key.rs b/library/std/src/sys/solid/thread_local_key.rs
index b17521f70..b37bf9996 100644
--- a/library/std/src/sys/solid/thread_local_key.rs
+++ b/library/std/src/sys/solid/thread_local_key.rs
@@ -19,8 +19,3 @@ pub unsafe fn get(_key: Key) -> *mut u8 {
pub unsafe fn destroy(_key: Key) {
panic!("should not be used on the solid target");
}
-
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- panic!("should not be used on the solid target");
-}
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs
index 30812dabb..dbaa3c33e 100644
--- a/library/std/src/sys/unix/fd.rs
+++ b/library/std/src/sys/unix/fd.rs
@@ -4,7 +4,7 @@
mod tests;
use crate::cmp;
-use crate::io::{self, IoSlice, IoSliceMut, Read, ReadBuf};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, Read};
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
use crate::sys::cvt;
use crate::sys_common::{AsInner, FromInner, IntoInner};
@@ -131,20 +131,19 @@ impl FileDesc {
}
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ pub fn read_buf(&self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let ret = cvt(unsafe {
libc::read(
self.as_raw_fd(),
- buf.unfilled_mut().as_mut_ptr() as *mut libc::c_void,
- cmp::min(buf.remaining(), READ_LIMIT),
+ cursor.as_mut().as_mut_ptr() as *mut libc::c_void,
+ cmp::min(cursor.capacity(), READ_LIMIT),
)
})?;
// Safety: `ret` bytes were written to the initialized portion of the buffer
unsafe {
- buf.assume_init(ret as usize);
+ cursor.advance(ret as usize);
}
- buf.add_filled(ret as usize);
Ok(())
}
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index b5cc8038c..37a49f2d7 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -1,13 +1,26 @@
+// miri has some special hacks here that make things unused.
+#![cfg_attr(miri, allow(unused))]
+
use crate::os::unix::prelude::*;
-use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
-use crate::io::{self, Error, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{self, BorrowedCursor, Error, IoSlice, IoSliceMut, SeekFrom};
use crate::mem;
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "illumos"
+))]
+use crate::mem::MaybeUninit;
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd};
use crate::path::{Path, PathBuf};
use crate::ptr;
use crate::sync::Arc;
+use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::fd::FileDesc;
use crate::sys::time::SystemTime;
use crate::sys::{cvt, cvt_r};
@@ -260,7 +273,7 @@ pub struct DirEntry {
// We need to store an owned copy of the entry name on platforms that use
// readdir() (not readdir_r()), because a) struct dirent may use a flexible
// array to store the name, b) it lives only until the next readdir() call.
- name: CString,
+ name: crate::ffi::CString,
}
// Define a minimal subset of fields we need from `dirent64`, especially since
@@ -313,8 +326,11 @@ pub struct FilePermissions {
mode: mode_t,
}
-#[derive(Copy, Clone)]
-pub struct FileTimes([libc::timespec; 2]);
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {
+ accessed: Option<SystemTime>,
+ modified: Option<SystemTime>,
+}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct FileType {
@@ -512,45 +528,11 @@ impl FilePermissions {
impl FileTimes {
pub fn set_accessed(&mut self, t: SystemTime) {
- self.0[0] = t.t.to_timespec().expect("Invalid system time");
+ self.accessed = Some(t);
}
pub fn set_modified(&mut self, t: SystemTime) {
- self.0[1] = t.t.to_timespec().expect("Invalid system time");
- }
-}
-
-struct TimespecDebugAdapter<'a>(&'a libc::timespec);
-
-impl fmt::Debug for TimespecDebugAdapter<'_> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("timespec")
- .field("tv_sec", &self.0.tv_sec)
- .field("tv_nsec", &self.0.tv_nsec)
- .finish()
- }
-}
-
-impl fmt::Debug for FileTimes {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("FileTimes")
- .field("accessed", &TimespecDebugAdapter(&self.0[0]))
- .field("modified", &TimespecDebugAdapter(&self.0[1]))
- .finish()
- }
-}
-
-impl Default for FileTimes {
- fn default() -> Self {
- // Redox doesn't appear to support `UTIME_OMIT`, so we stub it out here, and always return
- // an error in `set_times`.
- // ESP-IDF does not support `futimens` at all and the behavior for that OS is therefore
- // the same as for Redox.
- #[cfg(any(target_os = "redox", target_os = "espidf"))]
- let omit = libc::timespec { tv_sec: 0, tv_nsec: 0 };
- #[cfg(not(any(target_os = "redox", target_os = "espidf")))]
- let omit = libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ };
- Self([omit; 2])
+ self.modified = Some(t);
}
}
@@ -614,33 +596,69 @@ impl Iterator for ReadDir {
};
}
- // Only d_reclen bytes of *entry_ptr are valid, so we can't just copy the
- // whole thing (#93384). Instead, copy everything except the name.
- let mut copy: dirent64 = mem::zeroed();
- // Can't dereference entry_ptr, so use the local entry to get
- // offsetof(struct dirent, d_name)
- let copy_bytes = &mut copy as *mut _ as *mut u8;
- let copy_name = &mut copy.d_name as *mut _ as *mut u8;
- let name_offset = copy_name.offset_from(copy_bytes) as usize;
- let entry_bytes = entry_ptr as *const u8;
- let entry_name = entry_bytes.add(name_offset);
- ptr::copy_nonoverlapping(entry_bytes, copy_bytes, name_offset);
+ // The dirent64 struct is a weird imaginary thing that isn't ever supposed
+ // to be worked with by value. Its trailing d_name field is declared
+ // variously as [c_char; 256] or [c_char; 1] on different systems but
+ // either way that size is meaningless; only the offset of d_name is
+ // meaningful. The dirent64 pointers that libc returns from readdir64 are
+ // allowed to point to allocations smaller _or_ LARGER than implied by the
+ // definition of the struct.
+ //
+ // As such, we need to be even more careful with dirent64 than if its
+ // contents were "simply" partially initialized data.
+ //
+ // Like for uninitialized contents, converting entry_ptr to `&dirent64`
+ // would not be legal. However, unique to dirent64 is that we don't even
+ // get to use `addr_of!((*entry_ptr).d_name)` because that operation
+ // requires the full extent of *entry_ptr to be in bounds of the same
+ // allocation, which is not necessarily the case here.
+ //
+ // Absent any other way to obtain a pointer to `(*entry_ptr).d_name`
+ // legally in Rust analogously to how it would be done in C, we instead
+ // need to make our own non-libc allocation that conforms to the weird
+ // imaginary definition of dirent64, and use that for a field offset
+ // computation.
+ macro_rules! offset_ptr {
+ ($entry_ptr:expr, $field:ident) => {{
+ const OFFSET: isize = {
+ let delusion = MaybeUninit::<dirent64>::uninit();
+ let entry_ptr = delusion.as_ptr();
+ unsafe {
+ ptr::addr_of!((*entry_ptr).$field)
+ .cast::<u8>()
+ .offset_from(entry_ptr.cast::<u8>())
+ }
+ };
+ if true {
+ // Cast to the same type determined by the else branch.
+ $entry_ptr.byte_offset(OFFSET).cast::<_>()
+ } else {
+ #[allow(deref_nullptr)]
+ {
+ ptr::addr_of!((*ptr::null::<dirent64>()).$field)
+ }
+ }
+ }};
+ }
+
+ // d_name is guaranteed to be null-terminated.
+ let name = CStr::from_ptr(offset_ptr!(entry_ptr, d_name).cast());
+ let name_bytes = name.to_bytes();
+ if name_bytes == b"." || name_bytes == b".." {
+ continue;
+ }
let entry = dirent64_min {
- d_ino: copy.d_ino as u64,
+ d_ino: *offset_ptr!(entry_ptr, d_ino) as u64,
#[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
- d_type: copy.d_type as u8,
+ d_type: *offset_ptr!(entry_ptr, d_type) as u8,
};
- let ret = DirEntry {
+ return Some(Ok(DirEntry {
entry,
- // d_name is guaranteed to be null-terminated.
- name: CStr::from_ptr(entry_name as *const _).to_owned(),
+ name: name.to_owned(),
dir: Arc::clone(&self.inner),
- };
- if ret.name_bytes() != b"." && ret.name_bytes() != b".." {
- return Some(Ok(ret));
- }
+ }));
}
}
}
@@ -687,7 +705,11 @@ impl Iterator for ReadDir {
impl Drop for Dir {
fn drop(&mut self) {
let r = unsafe { libc::closedir(self.0) };
- debug_assert_eq!(r, 0);
+ assert!(
+ r == 0 || crate::io::Error::last_os_error().kind() == crate::io::ErrorKind::Interrupted,
+ "unexpected error during closedir: {:?}",
+ crate::io::Error::last_os_error()
+ );
}
}
@@ -700,7 +722,10 @@ impl DirEntry {
self.file_name_os_str().to_os_string()
}
- #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "android"))]
+ #[cfg(all(
+ any(target_os = "linux", target_os = "emscripten", target_os = "android"),
+ not(miri)
+ ))]
pub fn metadata(&self) -> io::Result<FileAttr> {
let fd = cvt(unsafe { dirfd(self.dir.dirp.0) })?;
let name = self.name_cstr().as_ptr();
@@ -721,7 +746,10 @@ impl DirEntry {
Ok(FileAttr::from_stat64(stat))
}
- #[cfg(not(any(target_os = "linux", target_os = "emscripten", target_os = "android")))]
+ #[cfg(any(
+ not(any(target_os = "linux", target_os = "emscripten", target_os = "android")),
+ miri
+ ))]
pub fn metadata(&self) -> io::Result<FileAttr> {
lstat(&self.path())
}
@@ -925,8 +953,7 @@ impl OpenOptions {
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
- let path = cstr(path)?;
- File::open_c(&path, opts)
+ run_path_with_cstr(path, |path| File::open_c(path, opts))
}
pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
@@ -1031,8 +1058,8 @@ impl File {
self.0.read_at(buf, offset)
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- self.0.read_buf(buf)
+ pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ self.0.read_buf(cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -1078,10 +1105,21 @@ impl File {
}
pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
+ #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))]
+ let to_timespec = |time: Option<SystemTime>| {
+ match time {
+ Some(time) if let Some(ts) = time.t.to_timespec() => Ok(ts),
+ Some(time) if time > crate::sys::time::UNIX_EPOCH => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too large to set as a file time")),
+ Some(_) => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too small to set as a file time")),
+ None => Ok(libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ }),
+ }
+ };
+ #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))]
+ let times = [to_timespec(times.accessed)?, to_timespec(times.modified)?];
cfg_if::cfg_if! {
- if #[cfg(any(target_os = "redox", target_os = "espidf"))] {
+ if #[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon"))] {
// Redox doesn't appear to support `UTIME_OMIT`.
- // ESP-IDF does not support `futimens` at all and the behavior for that OS is therefore
+ // ESP-IDF and HorizonOS do not support `futimens` at all and the behavior for those OS is therefore
// the same as for Redox.
drop(times);
Err(io::const_io_error!(
@@ -1093,7 +1131,7 @@ impl File {
cvt(unsafe {
weak!(fn futimens(c_int, *const libc::timespec) -> c_int);
match futimens.get() {
- Some(futimens) => futimens(self.as_raw_fd(), times.0.as_ptr()),
+ Some(futimens) => futimens(self.as_raw_fd(), times.as_ptr()),
#[cfg(target_os = "macos")]
None => {
fn ts_to_tv(ts: &libc::timespec) -> libc::timeval {
@@ -1102,7 +1140,7 @@ impl File {
tv_usec: (ts.tv_nsec / 1000) as _
}
}
- let timevals = [ts_to_tv(&times.0[0]), ts_to_tv(&times.0[1])];
+ let timevals = [ts_to_tv(&times[0]), ts_to_tv(&times[1])];
libc::futimes(self.as_raw_fd(), timevals.as_ptr())
}
// futimes requires even newer Android.
@@ -1115,7 +1153,7 @@ impl File {
})?;
Ok(())
} else {
- cvt(unsafe { libc::futimens(self.as_raw_fd(), times.0.as_ptr()) })?;
+ cvt(unsafe { libc::futimens(self.as_raw_fd(), times.as_ptr()) })?;
Ok(())
}
}
@@ -1128,9 +1166,7 @@ impl DirBuilder {
}
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
- let p = cstr(p)?;
- cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) })?;
- Ok(())
+ run_path_with_cstr(p, |p| cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }).map(|_| ()))
}
pub fn set_mode(&mut self, mode: u32) {
@@ -1138,10 +1174,6 @@ impl DirBuilder {
}
}
-fn cstr(path: &Path) -> io::Result<CString> {
- Ok(CString::new(path.as_os_str().as_bytes())?)
-}
-
impl AsInner<FileDesc> for File {
fn as_inner(&self) -> &FileDesc {
&self.0
@@ -1192,7 +1224,12 @@ impl FromRawFd for File {
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- #[cfg(any(target_os = "linux", target_os = "netbsd"))]
+ #[cfg(any(
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "illumos",
+ target_os = "solaris"
+ ))]
fn get_path(fd: c_int) -> Option<PathBuf> {
let mut p = PathBuf::from("/proc/self/fd");
p.push(&fd.to_string());
@@ -1247,14 +1284,23 @@ impl fmt::Debug for File {
target_os = "macos",
target_os = "vxworks",
all(target_os = "freebsd", target_arch = "x86_64"),
- target_os = "netbsd"
+ target_os = "netbsd",
+ target_os = "illumos",
+ target_os = "solaris"
)))]
fn get_path(_fd: c_int) -> Option<PathBuf> {
// FIXME(#24570): implement this for other Unix platforms
None
}
- #[cfg(any(target_os = "linux", target_os = "macos", target_os = "vxworks"))]
+ #[cfg(any(
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "vxworks"
+ ))]
fn get_mode(fd: c_int) -> Option<(bool, bool)> {
let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) };
if mode == -1 {
@@ -1268,7 +1314,14 @@ impl fmt::Debug for File {
}
}
- #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "vxworks")))]
+ #[cfg(not(any(
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "vxworks"
+ )))]
fn get_mode(_fd: c_int) -> Option<(bool, bool)> {
// FIXME(#24570): implement this for other Unix platforms
None
@@ -1287,173 +1340,170 @@ impl fmt::Debug for File {
}
}
-pub fn readdir(p: &Path) -> io::Result<ReadDir> {
- let root = p.to_path_buf();
- let p = cstr(p)?;
- unsafe {
- let ptr = libc::opendir(p.as_ptr());
- if ptr.is_null() {
- Err(Error::last_os_error())
- } else {
- let inner = InnerReadDir { dirp: Dir(ptr), root };
- Ok(ReadDir {
- inner: Arc::new(inner),
- #[cfg(not(any(
- target_os = "android",
- target_os = "linux",
- target_os = "solaris",
- target_os = "illumos",
- target_os = "fuchsia",
- target_os = "redox",
- )))]
- end_of_stream: false,
- })
- }
+pub fn readdir(path: &Path) -> io::Result<ReadDir> {
+ let ptr = run_path_with_cstr(path, |p| unsafe { Ok(libc::opendir(p.as_ptr())) })?;
+ if ptr.is_null() {
+ Err(Error::last_os_error())
+ } else {
+ let root = path.to_path_buf();
+ let inner = InnerReadDir { dirp: Dir(ptr), root };
+ Ok(ReadDir {
+ inner: Arc::new(inner),
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox",
+ )))]
+ end_of_stream: false,
+ })
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
- let p = cstr(p)?;
- cvt(unsafe { libc::unlink(p.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(p, |p| cvt(unsafe { libc::unlink(p.as_ptr()) }).map(|_| ()))
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
- let old = cstr(old)?;
- let new = cstr(new)?;
- cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(old, |old| {
+ run_path_with_cstr(new, |new| {
+ cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }).map(|_| ())
+ })
+ })
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
- let p = cstr(p)?;
- cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) })?;
- Ok(())
+ run_path_with_cstr(p, |p| cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }).map(|_| ()))
}
pub fn rmdir(p: &Path) -> io::Result<()> {
- let p = cstr(p)?;
- cvt(unsafe { libc::rmdir(p.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(p, |p| cvt(unsafe { libc::rmdir(p.as_ptr()) }).map(|_| ()))
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
- let c_path = cstr(p)?;
- let p = c_path.as_ptr();
+ run_path_with_cstr(p, |c_path| {
+ let p = c_path.as_ptr();
- let mut buf = Vec::with_capacity(256);
+ let mut buf = Vec::with_capacity(256);
- loop {
- let buf_read =
- cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })? as usize;
+ loop {
+ let buf_read =
+ cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })?
+ as usize;
- unsafe {
- buf.set_len(buf_read);
- }
+ unsafe {
+ buf.set_len(buf_read);
+ }
- if buf_read != buf.capacity() {
- buf.shrink_to_fit();
+ if buf_read != buf.capacity() {
+ buf.shrink_to_fit();
- return Ok(PathBuf::from(OsString::from_vec(buf)));
- }
+ return Ok(PathBuf::from(OsString::from_vec(buf)));
+ }
- // Trigger the internal buffer resizing logic of `Vec` by requiring
- // more space than the current capacity. The length is guaranteed to be
- // the same as the capacity due to the if statement above.
- buf.reserve(1);
- }
+ // Trigger the internal buffer resizing logic of `Vec` by requiring
+ // more space than the current capacity. The length is guaranteed to be
+ // the same as the capacity due to the if statement above.
+ buf.reserve(1);
+ }
+ })
}
pub fn symlink(original: &Path, link: &Path) -> io::Result<()> {
- let original = cstr(original)?;
- let link = cstr(link)?;
- cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(original, |original| {
+ run_path_with_cstr(link, |link| {
+ cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) }).map(|_| ())
+ })
+ })
}
pub fn link(original: &Path, link: &Path) -> io::Result<()> {
- let original = cstr(original)?;
- let link = cstr(link)?;
- cfg_if::cfg_if! {
- if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android", target_os = "espidf", target_os = "horizon"))] {
- // VxWorks, Redox and ESP-IDF lack `linkat`, so use `link` instead. POSIX leaves
- // it implementation-defined whether `link` follows symlinks, so rely on the
- // `symlink_hard_link` test in library/std/src/fs/tests.rs to check the behavior.
- // Android has `linkat` on newer versions, but we happen to know `link`
- // always has the correct behavior, so it's here as well.
- cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
- } else if #[cfg(target_os = "macos")] {
- // On MacOS, older versions (<=10.9) lack support for linkat while newer
- // versions have it. We want to use linkat if it is available, so we use weak!
- // to check. `linkat` is preferable to `link` because it gives us a flag to
- // specify how symlinks should be handled. We pass 0 as the flags argument,
- // meaning it shouldn't follow symlinks.
- weak!(fn linkat(c_int, *const c_char, c_int, *const c_char, c_int) -> c_int);
-
- if let Some(f) = linkat.get() {
- cvt(unsafe { f(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
- } else {
- cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
- };
- } else {
- // Where we can, use `linkat` instead of `link`; see the comment above
- // this one for details on why.
- cvt(unsafe { libc::linkat(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
- }
- }
- Ok(())
+ run_path_with_cstr(original, |original| {
+ run_path_with_cstr(link, |link| {
+ cfg_if::cfg_if! {
+ if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android", target_os = "espidf", target_os = "horizon"))] {
+ // VxWorks, Redox and ESP-IDF lack `linkat`, so use `link` instead. POSIX leaves
+ // it implementation-defined whether `link` follows symlinks, so rely on the
+ // `symlink_hard_link` test in library/std/src/fs/tests.rs to check the behavior.
+ // Android has `linkat` on newer versions, but we happen to know `link`
+ // always has the correct behavior, so it's here as well.
+ cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
+ } else if #[cfg(target_os = "macos")] {
+ // On MacOS, older versions (<=10.9) lack support for linkat while newer
+ // versions have it. We want to use linkat if it is available, so we use weak!
+ // to check. `linkat` is preferable to `link` because it gives us a flag to
+ // specify how symlinks should be handled. We pass 0 as the flags argument,
+ // meaning it shouldn't follow symlinks.
+ weak!(fn linkat(c_int, *const c_char, c_int, *const c_char, c_int) -> c_int);
+
+ if let Some(f) = linkat.get() {
+ cvt(unsafe { f(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
+ } else {
+ cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
+ };
+ } else {
+ // Where we can, use `linkat` instead of `link`; see the comment above
+ // this one for details on why.
+ cvt(unsafe { libc::linkat(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
+ }
+ }
+ Ok(())
+ })
+ })
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
- let p = cstr(p)?;
-
- cfg_has_statx! {
- if let Some(ret) = unsafe { try_statx(
- libc::AT_FDCWD,
- p.as_ptr(),
- libc::AT_STATX_SYNC_AS_STAT,
- libc::STATX_ALL,
- ) } {
- return ret;
+ run_path_with_cstr(p, |p| {
+ cfg_has_statx! {
+ if let Some(ret) = unsafe { try_statx(
+ libc::AT_FDCWD,
+ p.as_ptr(),
+ libc::AT_STATX_SYNC_AS_STAT,
+ libc::STATX_ALL,
+ ) } {
+ return ret;
+ }
}
- }
- let mut stat: stat64 = unsafe { mem::zeroed() };
- cvt(unsafe { stat64(p.as_ptr(), &mut stat) })?;
- Ok(FileAttr::from_stat64(stat))
+ let mut stat: stat64 = unsafe { mem::zeroed() };
+ cvt(unsafe { stat64(p.as_ptr(), &mut stat) })?;
+ Ok(FileAttr::from_stat64(stat))
+ })
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
- let p = cstr(p)?;
-
- cfg_has_statx! {
- if let Some(ret) = unsafe { try_statx(
- libc::AT_FDCWD,
- p.as_ptr(),
- libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT,
- libc::STATX_ALL,
- ) } {
- return ret;
+ run_path_with_cstr(p, |p| {
+ cfg_has_statx! {
+ if let Some(ret) = unsafe { try_statx(
+ libc::AT_FDCWD,
+ p.as_ptr(),
+ libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT,
+ libc::STATX_ALL,
+ ) } {
+ return ret;
+ }
}
- }
- let mut stat: stat64 = unsafe { mem::zeroed() };
- cvt(unsafe { lstat64(p.as_ptr(), &mut stat) })?;
- Ok(FileAttr::from_stat64(stat))
+ let mut stat: stat64 = unsafe { mem::zeroed() };
+ cvt(unsafe { lstat64(p.as_ptr(), &mut stat) })?;
+ Ok(FileAttr::from_stat64(stat))
+ })
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
- let path = CString::new(p.as_os_str().as_bytes())?;
- let buf;
- unsafe {
- let r = libc::realpath(path.as_ptr(), ptr::null_mut());
- if r.is_null() {
- return Err(io::Error::last_os_error());
- }
- buf = CStr::from_ptr(r).to_bytes().to_vec();
- libc::free(r as *mut _);
+ let r = run_path_with_cstr(p, |path| unsafe {
+ Ok(libc::realpath(path.as_ptr(), ptr::null_mut()))
+ })?;
+ if r.is_null() {
+ return Err(io::Error::last_os_error());
}
- Ok(PathBuf::from(OsString::from_vec(buf)))
+ Ok(PathBuf::from(OsString::from_vec(unsafe {
+ let buf = CStr::from_ptr(r).to_bytes().to_vec();
+ libc::free(r as *mut _);
+ buf
+ })))
}
fn open_from(from: &Path) -> io::Result<(crate::fs::File, crate::fs::Metadata)> {
@@ -1603,9 +1653,9 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
// Opportunistically attempt to create a copy-on-write clone of `from`
// using `fclonefileat`.
if HAS_FCLONEFILEAT.load(Ordering::Relaxed) {
- let to = cstr(to)?;
- let clonefile_result =
- cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) });
+ let clonefile_result = run_path_with_cstr(to, |to| {
+ cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) })
+ });
match clonefile_result {
Ok(_) => return Ok(reader_metadata.len()),
Err(err) => match err.raw_os_error() {
@@ -1649,9 +1699,10 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
}
pub fn chown(path: &Path, uid: u32, gid: u32) -> io::Result<()> {
- let path = cstr(path)?;
- cvt(unsafe { libc::chown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })?;
- Ok(())
+ run_path_with_cstr(path, |path| {
+ cvt(unsafe { libc::chown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })
+ .map(|_| ())
+ })
}
pub fn fchown(fd: c_int, uid: u32, gid: u32) -> io::Result<()> {
@@ -1660,16 +1711,15 @@ pub fn fchown(fd: c_int, uid: u32, gid: u32) -> io::Result<()> {
}
pub fn lchown(path: &Path, uid: u32, gid: u32) -> io::Result<()> {
- let path = cstr(path)?;
- cvt(unsafe { libc::lchown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })?;
- Ok(())
+ run_path_with_cstr(path, |path| {
+ cvt(unsafe { libc::lchown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })
+ .map(|_| ())
+ })
}
#[cfg(not(any(target_os = "fuchsia", target_os = "vxworks")))]
pub fn chroot(dir: &Path) -> io::Result<()> {
- let dir = cstr(dir)?;
- cvt(unsafe { libc::chroot(dir.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(dir, |dir| cvt(unsafe { libc::chroot(dir.as_ptr()) }).map(|_| ()))
}
pub use remove_dir_impl::remove_dir_all;
@@ -1683,13 +1733,14 @@ mod remove_dir_impl {
// Modern implementation using openat(), unlinkat() and fdopendir()
#[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon", miri)))]
mod remove_dir_impl {
- use super::{cstr, lstat, Dir, DirEntry, InnerReadDir, ReadDir};
+ use super::{lstat, Dir, DirEntry, InnerReadDir, ReadDir};
use crate::ffi::CStr;
use crate::io;
use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use crate::os::unix::prelude::{OwnedFd, RawFd};
use crate::path::{Path, PathBuf};
use crate::sync::Arc;
+ use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::{cvt, cvt_r};
#[cfg(not(all(target_os = "macos", not(target_arch = "aarch64")),))]
@@ -1856,7 +1907,7 @@ mod remove_dir_impl {
if attr.file_type().is_symlink() {
crate::fs::remove_file(p)
} else {
- remove_dir_all_recursive(None, &cstr(p)?)
+ run_path_with_cstr(p, |p| remove_dir_all_recursive(None, &p))
}
}
diff --git a/library/std/src/sys/unix/io.rs b/library/std/src/sys/unix/io.rs
index deb5ee76b..29c340dd3 100644
--- a/library/std/src/sys/unix/io.rs
+++ b/library/std/src/sys/unix/io.rs
@@ -1,4 +1,5 @@
use crate::marker::PhantomData;
+use crate::os::fd::{AsFd, AsRawFd};
use crate::slice;
use libc::{c_void, iovec};
@@ -74,3 +75,8 @@ impl<'a> IoSliceMut<'a> {
unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) }
}
}
+
+pub fn is_terminal(fd: &impl AsFd) -> bool {
+ let fd = fd.as_fd();
+ unsafe { libc::isatty(fd.as_raw_fd()) != 0 }
+}
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
index 8f7abb55e..94546ca09 100644
--- a/library/std/src/sys/unix/kernel_copy.rs
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -20,7 +20,7 @@
//! Since those syscalls have requirements that cannot be fully checked in advance and
//! gathering additional information about file descriptors would require additional syscalls
//! anyway it simply attempts to use them one after another (guided by inaccurate hints) to
-//! figure out which one works and and falls back to the generic read-write copy loop if none of them
+//! figure out which one works and falls back to the generic read-write copy loop if none of them
//! does.
//! Once a working syscall is found for a pair of file descriptors it will be called in a loop
//! until the copy operation is completed.
diff --git a/library/std/src/sys/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
index ce427599c..117611ce4 100644
--- a/library/std/src/sys/unix/locks/fuchsia_mutex.rs
+++ b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
@@ -86,9 +86,6 @@ impl Mutex {
}
#[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
pub unsafe fn try_lock(&self) -> bool {
let thread_self = zx_thread_self();
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok()
@@ -138,7 +135,7 @@ impl Mutex {
}
}
- // The state has changed or a wakeup occured, try to lock the mutex.
+ // The state has changed or a wakeup occurred, try to lock the mutex.
match self.futex.compare_exchange(UNLOCKED, owned_state, Acquire, Relaxed) {
Ok(_) => return,
Err(updated) => state = updated,
diff --git a/library/std/src/sys/unix/locks/futex_mutex.rs b/library/std/src/sys/unix/locks/futex_mutex.rs
index 99ba86e5f..33b13dad4 100644
--- a/library/std/src/sys/unix/locks/futex_mutex.rs
+++ b/library/std/src/sys/unix/locks/futex_mutex.rs
@@ -20,9 +20,6 @@ impl Mutex {
}
#[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
pub unsafe fn try_lock(&self) -> bool {
self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
}
@@ -53,7 +50,7 @@ impl Mutex {
// We avoid an unnecessary write if it as already set to 2,
// to be friendlier for the caches.
if state != 2 && self.futex.swap(2, Acquire) == 0 {
- // We changed it from 0 to 2, so we just succesfully locked it.
+ // We changed it from 0 to 2, so we just successfully locked it.
return;
}
diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs
index b3bbbf743..0cc92244e 100644
--- a/library/std/src/sys/unix/locks/futex_rwlock.rs
+++ b/library/std/src/sys/unix/locks/futex_rwlock.rs
@@ -54,7 +54,7 @@ fn is_read_lockable(state: u32) -> bool {
// We don't allow read-locking if there's readers waiting, even if the lock is unlocked
// and there's no writers waiting. The only situation when this happens is after unlocking,
// at which point the unlocking thread might be waking up writers, which have priority over readers.
- // The unlocking thread will clear the readers waiting bit and wake up readers, if necssary.
+ // The unlocking thread will clear the readers waiting bit and wake up readers, if necessary.
state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state)
}
diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs
index f5f92f693..9bb314b70 100644
--- a/library/std/src/sys/unix/locks/mod.rs
+++ b/library/std/src/sys/unix/locks/mod.rs
@@ -11,21 +11,21 @@ cfg_if::cfg_if! {
mod futex_rwlock;
mod futex_condvar;
pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use futex_rwlock::MovableRwLock;
pub(crate) use futex_condvar::MovableCondvar;
} else if #[cfg(target_os = "fuchsia")] {
mod fuchsia_mutex;
mod futex_rwlock;
mod futex_condvar;
pub(crate) use fuchsia_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use futex_rwlock::MovableRwLock;
pub(crate) use futex_condvar::MovableCondvar;
} else {
mod pthread_mutex;
mod pthread_rwlock;
mod pthread_condvar;
pub(crate) use pthread_mutex::{Mutex, MovableMutex};
- pub(crate) use pthread_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use pthread_rwlock::MovableRwLock;
pub(crate) use pthread_condvar::MovableCondvar;
}
}
diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs
index abf27e7db..4741c0c67 100644
--- a/library/std/src/sys/unix/locks/pthread_condvar.rs
+++ b/library/std/src/sys/unix/locks/pthread_condvar.rs
@@ -172,7 +172,7 @@ impl Condvar {
let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 };
let stable_now = Instant::now();
let r = libc::gettimeofday(&mut sys_now, ptr::null_mut());
- debug_assert_eq!(r, 0);
+ assert_eq!(r, 0, "unexpected error: {:?}", crate::io::Error::last_os_error());
let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long;
let extra = (nsec / 1_000_000_000) as libc::time_t;
diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs
index 98afee69b..5964935dd 100644
--- a/library/std/src/sys/unix/locks/pthread_mutex.rs
+++ b/library/std/src/sys/unix/locks/pthread_mutex.rs
@@ -52,7 +52,7 @@ impl Mutex {
Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
}
#[inline]
- pub unsafe fn init(&mut self) {
+ unsafe fn init(&mut self) {
// Issue #33770
//
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs
index 3d0d91460..9055a011c 100644
--- a/library/std/src/sys/unix/mod.rs
+++ b/library/std/src/sys/unix/mod.rs
@@ -44,12 +44,13 @@ pub mod thread_parker;
pub mod time;
#[cfg(target_os = "espidf")]
-pub fn init(argc: isize, argv: *const *const u8) {}
+pub fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) {}
#[cfg(not(target_os = "espidf"))]
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(argc: isize, argv: *const *const u8) {
+// See `fn init()` in `library/std/src/rt.rs` for docs on `sigpipe`.
+pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
// The standard streams might be closed on application startup. To prevent
// std::io::{stdin, stdout,stderr} objects from using other unrelated file
// resources opened later, we reopen standards streams when they are closed.
@@ -61,8 +62,9 @@ pub unsafe fn init(argc: isize, argv: *const *const u8) {
// want!
//
// Hence, we set SIGPIPE to ignore when the program starts up in order
- // to prevent this problem.
- reset_sigpipe();
+ // to prevent this problem. Add `#[unix_sigpipe = "..."]` above `fn main()` to
+ // alter this behavior.
+ reset_sigpipe(sigpipe);
stack_overflow::init();
args::init(argc, argv);
@@ -151,12 +153,64 @@ pub unsafe fn init(argc: isize, argv: *const *const u8) {
}
}
- unsafe fn reset_sigpipe() {
+ unsafe fn reset_sigpipe(#[allow(unused_variables)] sigpipe: u8) {
#[cfg(not(any(target_os = "emscripten", target_os = "fuchsia", target_os = "horizon")))]
- rtassert!(signal(libc::SIGPIPE, libc::SIG_IGN) != libc::SIG_ERR);
+ {
+ // We don't want to add this as a public type to libstd, nor do we
+ // want to `include!` a file from the compiler (which would break
+ // Miri and xargo for example), so we choose to duplicate these
+ // constants from `compiler/rustc_session/src/config/sigpipe.rs`.
+ // See the other file for docs. NOTE: Make sure to keep them in
+ // sync!
+ mod sigpipe {
+ pub const DEFAULT: u8 = 0;
+ pub const INHERIT: u8 = 1;
+ pub const SIG_IGN: u8 = 2;
+ pub const SIG_DFL: u8 = 3;
+ }
+
+ let (sigpipe_attr_specified, handler) = match sigpipe {
+ sigpipe::DEFAULT => (false, Some(libc::SIG_IGN)),
+ sigpipe::INHERIT => (true, None),
+ sigpipe::SIG_IGN => (true, Some(libc::SIG_IGN)),
+ sigpipe::SIG_DFL => (true, Some(libc::SIG_DFL)),
+ _ => unreachable!(),
+ };
+ // The bootstrap compiler doesn't know about sigpipe::DEFAULT, and always passes in
+ // SIG_IGN. This causes some tests to fail because they expect SIGPIPE to be reset to
+ // default on process spawning (which doesn't happen if #[unix_sigpipe] is specified).
+ // Since we can't differentiate between the cases here, treat SIG_IGN as DEFAULT
+ // unconditionally.
+ if sigpipe_attr_specified && !(cfg!(bootstrap) && sigpipe == sigpipe::SIG_IGN) {
+ UNIX_SIGPIPE_ATTR_SPECIFIED.store(true, crate::sync::atomic::Ordering::Relaxed);
+ }
+ if let Some(handler) = handler {
+ rtassert!(signal(libc::SIGPIPE, handler) != libc::SIG_ERR);
+ }
+ }
}
}
+// This is set (up to once) in reset_sigpipe.
+#[cfg(not(any(
+ target_os = "espidf",
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "horizon"
+)))]
+static UNIX_SIGPIPE_ATTR_SPECIFIED: crate::sync::atomic::AtomicBool =
+ crate::sync::atomic::AtomicBool::new(false);
+
+#[cfg(not(any(
+ target_os = "espidf",
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "horizon"
+)))]
+pub(crate) fn unix_sigpipe_attr_specified() -> bool {
+ UNIX_SIGPIPE_ATTR_SPECIFIED.load(crate::sync::atomic::Ordering::Relaxed)
+}
+
// SAFETY: must be called only once during runtime cleanup.
// NOTE: this is not guaranteed to run, for example when the program aborts.
pub unsafe fn cleanup() {
@@ -295,8 +349,10 @@ pub fn abort_internal() -> ! {
cfg_if::cfg_if! {
if #[cfg(target_os = "android")] {
- #[link(name = "dl")]
- #[link(name = "log")]
+ #[link(name = "dl", kind = "static", modifiers = "-bundle",
+ cfg(target_feature = "crt-static"))]
+ #[link(name = "dl", cfg(not(target_feature = "crt-static")))]
+ #[link(name = "log", cfg(not(target_feature = "crt-static")))]
extern "C" {}
} else if #[cfg(target_os = "freebsd")] {
#[link(name = "execinfo")]
@@ -326,16 +382,12 @@ cfg_if::cfg_if! {
extern "C" {}
} else if #[cfg(target_os = "macos")] {
#[link(name = "System")]
- // res_init and friends require -lresolv on macOS/iOS.
- // See #41582 and https://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html
- #[link(name = "resolv")]
extern "C" {}
} else if #[cfg(any(target_os = "ios", target_os = "watchos"))] {
#[link(name = "System")]
#[link(name = "objc")]
#[link(name = "Security", kind = "framework")]
#[link(name = "Foundation", kind = "framework")]
- #[link(name = "resolv")]
extern "C" {}
} else if #[cfg(target_os = "fuchsia")] {
#[link(name = "zircon")]
diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs
index 462a45b01..b84bf8f92 100644
--- a/library/std/src/sys/unix/net.rs
+++ b/library/std/src/sys/unix/net.rs
@@ -393,6 +393,17 @@ impl Socket {
}
#[cfg(any(target_os = "android", target_os = "linux",))]
+ pub fn set_quickack(&self, quickack: bool) -> io::Result<()> {
+ setsockopt(self, libc::IPPROTO_TCP, libc::TCP_QUICKACK, quickack as c_int)
+ }
+
+ #[cfg(any(target_os = "android", target_os = "linux",))]
+ pub fn quickack(&self) -> io::Result<bool> {
+ let raw: c_int = getsockopt(self, libc::IPPROTO_TCP, libc::TCP_QUICKACK)?;
+ Ok(raw != 0)
+ }
+
+ #[cfg(any(target_os = "android", target_os = "linux",))]
pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
setsockopt(self, libc::SOL_SOCKET, libc::SO_PASSCRED, passcred as libc::c_int)
}
@@ -427,6 +438,17 @@ impl Socket {
self.0.set_nonblocking(nonblocking)
}
+ #[cfg(any(target_os = "linux", target_os = "freebsd", target_os = "openbsd"))]
+ pub fn set_mark(&self, mark: u32) -> io::Result<()> {
+ #[cfg(target_os = "linux")]
+ let option = libc::SO_MARK;
+ #[cfg(target_os = "freebsd")]
+ let option = libc::SO_USER_COOKIE;
+ #[cfg(target_os = "openbsd")]
+ let option = libc::SO_RTABLE;
+ setsockopt(self, libc::SOL_SOCKET, option, mark as libc::c_int)
+ }
+
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
let raw: c_int = getsockopt(self, libc::SOL_SOCKET, libc::SO_ERROR)?;
if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) }
diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs
index 46545a083..2f2663db6 100644
--- a/library/std/src/sys/unix/os.rs
+++ b/library/std/src/sys/unix/os.rs
@@ -7,6 +7,7 @@ mod tests;
use crate::os::unix::prelude::*;
+use crate::convert::TryFrom;
use crate::error::Error as StdError;
use crate::ffi::{CStr, CString, OsStr, OsString};
use crate::fmt;
@@ -17,10 +18,11 @@ use crate::path::{self, PathBuf};
use crate::ptr;
use crate::slice;
use crate::str;
+use crate::sync::{PoisonError, RwLock};
+use crate::sys::common::small_c_string::{run_path_with_cstr, run_with_cstr};
use crate::sys::cvt;
use crate::sys::fd;
use crate::sys::memchr;
-use crate::sys_common::rwlock::{StaticRwLock, StaticRwLockReadGuard};
use crate::vec;
#[cfg(all(target_env = "gnu", not(target_os = "vxworks")))]
@@ -125,7 +127,9 @@ pub fn error_string(errno: i32) -> String {
}
let p = p as *const _;
- str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned()
+ // We can't always expect a UTF-8 environment. When we don't get that luxury,
+ // it's better to give a low-quality error message than none at all.
+ String::from_utf8_lossy(CStr::from_ptr(p).to_bytes()).into()
}
}
@@ -168,12 +172,8 @@ pub fn chdir(p: &path::Path) -> io::Result<()> {
#[cfg(not(target_os = "espidf"))]
pub fn chdir(p: &path::Path) -> io::Result<()> {
- let p: &OsStr = p.as_ref();
- let p = CString::new(p.as_bytes())?;
- if unsafe { libc::chdir(p.as_ptr()) } != 0 {
- return Err(io::Error::last_os_error());
- }
- Ok(())
+ let result = run_path_with_cstr(p, |p| unsafe { Ok(libc::chdir(p.as_ptr())) })?;
+ if result == 0 { Ok(()) } else { Err(io::Error::last_os_error()) }
}
pub struct SplitPaths<'a> {
@@ -501,10 +501,10 @@ pub unsafe fn environ() -> *mut *const *const c_char {
ptr::addr_of_mut!(environ)
}
-static ENV_LOCK: StaticRwLock = StaticRwLock::new();
+static ENV_LOCK: RwLock<()> = RwLock::new(());
-pub fn env_read_lock() -> StaticRwLockReadGuard {
- ENV_LOCK.read()
+pub fn env_read_lock() -> impl Drop {
+ ENV_LOCK.read().unwrap_or_else(PoisonError::into_inner)
}
/// Returns a vector of (variable, value) byte-vector pairs for all the
@@ -546,35 +546,32 @@ pub fn env() -> Env {
pub fn getenv(k: &OsStr) -> Option<OsString> {
// environment variables with a nul byte can't be set, so their value is
// always None as well
- let k = CString::new(k.as_bytes()).ok()?;
- unsafe {
+ let s = run_with_cstr(k.as_bytes(), |k| {
let _guard = env_read_lock();
- let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
- }
+ Ok(unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char)
+ })
+ .ok()?;
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
}
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
- let k = CString::new(k.as_bytes())?;
- let v = CString::new(v.as_bytes())?;
-
- unsafe {
- let _guard = ENV_LOCK.write();
- cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
- }
+ run_with_cstr(k.as_bytes(), |k| {
+ run_with_cstr(v.as_bytes(), |v| {
+ let _guard = ENV_LOCK.write();
+ cvt(unsafe { libc::setenv(k.as_ptr(), v.as_ptr(), 1) }).map(drop)
+ })
+ })
}
pub fn unsetenv(n: &OsStr) -> io::Result<()> {
- let nbuf = CString::new(n.as_bytes())?;
-
- unsafe {
+ run_with_cstr(n.as_bytes(), |nbuf| {
let _guard = ENV_LOCK.write();
- cvt(libc::unsetenv(nbuf.as_ptr())).map(drop)
- }
+ cvt(unsafe { libc::unsetenv(nbuf.as_ptr()) }).map(drop)
+ })
}
#[cfg(not(target_os = "espidf"))]
diff --git a/library/std/src/sys/unix/os_str.rs b/library/std/src/sys/unix/os_str.rs
index ccbc18224..017e2af29 100644
--- a/library/std/src/sys/unix/os_str.rs
+++ b/library/std/src/sys/unix/os_str.rs
@@ -11,7 +11,7 @@ use crate::str;
use crate::sync::Arc;
use crate::sys_common::{AsInner, IntoInner};
-use core::str::lossy::{Utf8Lossy, Utf8LossyChunk};
+use core::str::Utf8Chunks;
#[cfg(test)]
#[path = "../unix/os_str/tests.rs"]
@@ -29,26 +29,32 @@ pub struct Slice {
}
impl fmt::Debug for Slice {
- fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
- // Writes out a valid unicode string with the correct escape sequences
-
- formatter.write_str("\"")?;
- for Utf8LossyChunk { valid, broken } in Utf8Lossy::from_bytes(&self.inner).chunks() {
- for c in valid.chars().flat_map(|c| c.escape_debug()) {
- formatter.write_char(c)?
- }
-
- for b in broken {
- write!(formatter, "\\x{:02X}", b)?;
- }
- }
- formatter.write_str("\"")
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&Utf8Chunks::new(&self.inner).debug(), f)
}
}
impl fmt::Display for Slice {
- fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&Utf8Lossy::from_bytes(&self.inner), formatter)
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // If we're the empty string then our iterator won't actually yield
+ // anything, so perform the formatting manually
+ if self.inner.is_empty() {
+ return "".fmt(f);
+ }
+
+ for chunk in Utf8Chunks::new(&self.inner) {
+ let valid = chunk.valid();
+ // If we successfully decoded the whole chunk as a valid string then
+ // we can return a direct formatting of the string which will also
+ // respect various formatting flags if possible.
+ if chunk.invalid().is_empty() {
+ return valid.fmt(f);
+ }
+
+ f.write_str(valid)?;
+ f.write_char(char::REPLACEMENT_CHARACTER)?;
+ }
+ Ok(())
}
}
diff --git a/library/std/src/sys/unix/os_str/tests.rs b/library/std/src/sys/unix/os_str/tests.rs
index 213277f01..22ba0c923 100644
--- a/library/std/src/sys/unix/os_str/tests.rs
+++ b/library/std/src/sys/unix/os_str/tests.rs
@@ -8,3 +8,11 @@ fn slice_debug_output() {
assert_eq!(output, expected);
}
+
+#[test]
+fn display() {
+ assert_eq!(
+ "Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye",
+ Slice::from_u8_slice(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string(),
+ );
+}
diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs
index bca1b65a7..848adca78 100644
--- a/library/std/src/sys/unix/process/process_common.rs
+++ b/library/std/src/sys/unix/process/process_common.rs
@@ -39,17 +39,39 @@ cfg_if::cfg_if! {
// https://github.com/aosp-mirror/platform_bionic/blob/ad8dcd6023294b646e5a8288c0ed431b0845da49/libc/include/android/legacy_signal_inlines.h
cfg_if::cfg_if! {
if #[cfg(target_os = "android")] {
+ #[allow(dead_code)]
pub unsafe fn sigemptyset(set: *mut libc::sigset_t) -> libc::c_int {
set.write_bytes(0u8, 1);
return 0;
}
+
#[allow(dead_code)]
pub unsafe fn sigaddset(set: *mut libc::sigset_t, signum: libc::c_int) -> libc::c_int {
- use crate::{slice, mem};
+ use crate::{
+ mem::{align_of, size_of},
+ slice,
+ };
+ use libc::{c_ulong, sigset_t};
+
+ // The implementations from bionic (android libc) type pun `sigset_t` as an
+ // array of `c_ulong`. This works, but lets add a smoke check to make sure
+ // that doesn't change.
+ const _: () = assert!(
+ align_of::<c_ulong>() == align_of::<sigset_t>()
+ && (size_of::<sigset_t>() % size_of::<c_ulong>()) == 0
+ );
- let raw = slice::from_raw_parts_mut(set as *mut u8, mem::size_of::<libc::sigset_t>());
let bit = (signum - 1) as usize;
- raw[bit / 8] |= 1 << (bit % 8);
+ if set.is_null() || bit >= (8 * size_of::<sigset_t>()) {
+ crate::sys::unix::os::set_errno(libc::EINVAL);
+ return -1;
+ }
+ let raw = slice::from_raw_parts_mut(
+ set as *mut c_ulong,
+ size_of::<sigset_t>() / size_of::<c_ulong>(),
+ );
+ const LONG_BIT: usize = size_of::<c_ulong>() * 8;
+ raw[bit / LONG_BIT] |= 1 << (bit % LONG_BIT);
return 0;
}
} else {
@@ -72,6 +94,7 @@ pub struct Command {
argv: Argv,
env: CommandEnv,
+ program_kind: ProgramKind,
cwd: Option<CString>,
uid: Option<uid_t>,
gid: Option<gid_t>,
@@ -128,15 +151,40 @@ pub enum Stdio {
Fd(FileDesc),
}
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum ProgramKind {
+ /// A program that would be looked up on the PATH (e.g. `ls`)
+ PathLookup,
+ /// A relative path (e.g. `my-dir/foo`, `../foo`, `./foo`)
+ Relative,
+ /// An absolute path.
+ Absolute,
+}
+
+impl ProgramKind {
+ fn new(program: &OsStr) -> Self {
+ if program.bytes().starts_with(b"/") {
+ Self::Absolute
+ } else if program.bytes().contains(&b'/') {
+ // If the program has more than one component in it, it is a relative path.
+ Self::Relative
+ } else {
+ Self::PathLookup
+ }
+ }
+}
+
impl Command {
#[cfg(not(target_os = "linux"))]
pub fn new(program: &OsStr) -> Command {
let mut saw_nul = false;
+ let program_kind = ProgramKind::new(program.as_ref());
let program = os2c(program, &mut saw_nul);
Command {
argv: Argv(vec![program.as_ptr(), ptr::null()]),
args: vec![program.clone()],
program,
+ program_kind,
env: Default::default(),
cwd: None,
uid: None,
@@ -154,11 +202,13 @@ impl Command {
#[cfg(target_os = "linux")]
pub fn new(program: &OsStr) -> Command {
let mut saw_nul = false;
+ let program_kind = ProgramKind::new(program.as_ref());
let program = os2c(program, &mut saw_nul);
Command {
argv: Argv(vec![program.as_ptr(), ptr::null()]),
args: vec![program.clone()],
program,
+ program_kind,
env: Default::default(),
cwd: None,
uid: None,
@@ -234,6 +284,11 @@ impl Command {
OsStr::from_bytes(self.program.as_bytes())
}
+ #[allow(dead_code)]
+ pub fn get_program_kind(&self) -> ProgramKind {
+ self.program_kind
+ }
+
pub fn get_args(&self) -> CommandArgs<'_> {
let mut iter = self.args.iter();
iter.next();
diff --git a/library/std/src/sys/unix/process/process_common/tests.rs b/library/std/src/sys/unix/process/process_common/tests.rs
index 1956b3692..03631e4e3 100644
--- a/library/std/src/sys/unix/process/process_common/tests.rs
+++ b/library/std/src/sys/unix/process/process_common/tests.rs
@@ -31,41 +31,54 @@ macro_rules! t {
ignore
)]
fn test_process_mask() {
- unsafe {
- // Test to make sure that a signal mask does not get inherited.
- let mut cmd = Command::new(OsStr::new("cat"));
-
- let mut set = mem::MaybeUninit::<libc::sigset_t>::uninit();
- let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit();
- t!(cvt(sigemptyset(set.as_mut_ptr())));
- t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT)));
- t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), old_set.as_mut_ptr())));
-
- cmd.stdin(Stdio::MakePipe);
- cmd.stdout(Stdio::MakePipe);
-
- let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true));
- let stdin_write = pipes.stdin.take().unwrap();
- let stdout_read = pipes.stdout.take().unwrap();
-
- t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut())));
-
- t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT)));
- // We need to wait until SIGINT is definitely delivered. The
- // easiest way is to write something to cat, and try to read it
- // back: if SIGINT is unmasked, it'll get delivered when cat is
- // next scheduled.
- let _ = stdin_write.write(b"Hello");
- drop(stdin_write);
-
- // Either EOF or failure (EPIPE) is okay.
- let mut buf = [0; 5];
- if let Ok(ret) = stdout_read.read(&mut buf) {
- assert_eq!(ret, 0);
+ // Test to make sure that a signal mask *does* get inherited.
+ fn test_inner(mut cmd: Command) {
+ unsafe {
+ let mut set = mem::MaybeUninit::<libc::sigset_t>::uninit();
+ let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit();
+ t!(cvt(sigemptyset(set.as_mut_ptr())));
+ t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT)));
+ t!(cvt_nz(libc::pthread_sigmask(
+ libc::SIG_SETMASK,
+ set.as_ptr(),
+ old_set.as_mut_ptr()
+ )));
+
+ cmd.stdin(Stdio::MakePipe);
+ cmd.stdout(Stdio::MakePipe);
+
+ let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true));
+ let stdin_write = pipes.stdin.take().unwrap();
+ let stdout_read = pipes.stdout.take().unwrap();
+
+ t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut())));
+
+ t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT)));
+ // We need to wait until SIGINT is definitely delivered. The
+ // easiest way is to write something to cat, and try to read it
+ // back: if SIGINT is unmasked, it'll get delivered when cat is
+ // next scheduled.
+ let _ = stdin_write.write(b"Hello");
+ drop(stdin_write);
+
+ // Exactly 5 bytes should be read.
+ let mut buf = [0; 5];
+ let ret = t!(stdout_read.read(&mut buf));
+ assert_eq!(ret, 5);
+ assert_eq!(&buf, b"Hello");
+
+ t!(cat.wait());
}
-
- t!(cat.wait());
}
+
+ // A plain `Command::new` uses the posix_spawn path on many platforms.
+ let cmd = Command::new(OsStr::new("cat"));
+ test_inner(cmd);
+
+ // Specifying `pre_exec` forces the fork/exec path.
+ let mut cmd = Command::new(OsStr::new("cat"));
+ unsafe { cmd.pre_exec(Box::new(|| Ok(()))) };
+ test_inner(cmd);
}
#[test]
@@ -122,3 +135,27 @@ fn test_process_group_no_posix_spawn() {
t!(cat.wait());
}
}
+
+#[test]
+fn test_program_kind() {
+ let vectors = &[
+ ("foo", ProgramKind::PathLookup),
+ ("foo.out", ProgramKind::PathLookup),
+ ("./foo", ProgramKind::Relative),
+ ("../foo", ProgramKind::Relative),
+ ("dir/foo", ProgramKind::Relative),
+ // Note that paths on Unix can't contain / in them, so this is actually the directory "fo\\"
+ // followed by the file "o".
+ ("fo\\/o", ProgramKind::Relative),
+ ("/foo", ProgramKind::Absolute),
+ ("/dir/../foo", ProgramKind::Absolute),
+ ];
+
+ for (program, expected_kind) in vectors {
+ assert_eq!(
+ ProgramKind::new(program.as_ref()),
+ *expected_kind,
+ "actual != expected program kind for input {program}",
+ );
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs
index 73f5d3a61..66ea3db20 100644
--- a/library/std/src/sys/unix/process/process_fuchsia.rs
+++ b/library/std/src/sys/unix/process/process_fuchsia.rs
@@ -287,7 +287,7 @@ impl ExitStatus {
// SuS and POSIX) say a wait status is, but Fuchsia apparently uses a u64, so it won't
// necessarily fit.
//
- // It seems to me that that the right answer would be to provide std::os::fuchsia with its
+ // It seems to me that the right answer would be to provide std::os::fuchsia with its
// own ExitStatusExt, rather that trying to provide a not very convincing imitation of
// Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But
// fixing this up that is beyond the scope of my efforts now.
diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs
index 75bb92437..56a805cef 100644
--- a/library/std/src/sys/unix/process/process_unix.rs
+++ b/library/std/src/sys/unix/process/process_unix.rs
@@ -2,7 +2,6 @@ use crate::fmt;
use crate::io::{self, Error, ErrorKind};
use crate::mem;
use crate::num::NonZeroI32;
-use crate::ptr;
use crate::sys;
use crate::sys::cvt;
use crate::sys::process::process_common::*;
@@ -310,7 +309,7 @@ impl Command {
//FIXME: Redox kernel does not support setgroups yet
#[cfg(not(target_os = "redox"))]
if libc::getuid() == 0 && self.get_groups().is_none() {
- cvt(libc::setgroups(0, ptr::null()))?;
+ cvt(libc::setgroups(0, crate::ptr::null()))?;
}
cvt(libc::setuid(u as uid_t))?;
}
@@ -326,30 +325,26 @@ impl Command {
// emscripten has no signal support.
#[cfg(not(target_os = "emscripten"))]
{
- use crate::mem::MaybeUninit;
- use crate::sys::cvt_nz;
- // Reset signal handling so the child process starts in a
- // standardized state. libstd ignores SIGPIPE, and signal-handling
- // libraries often set a mask. Child processes inherit ignored
- // signals and the signal mask from their parent, but most
- // UNIX programs do not reset these things on their own, so we
- // need to clean things up now to avoid confusing the program
- // we're about to run.
- let mut set = MaybeUninit::<libc::sigset_t>::uninit();
- cvt(sigemptyset(set.as_mut_ptr()))?;
- cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), ptr::null_mut()))?;
-
- #[cfg(target_os = "android")] // see issue #88585
- {
- let mut action: libc::sigaction = mem::zeroed();
- action.sa_sigaction = libc::SIG_DFL;
- cvt(libc::sigaction(libc::SIGPIPE, &action, ptr::null_mut()))?;
- }
- #[cfg(not(target_os = "android"))]
- {
- let ret = sys::signal(libc::SIGPIPE, libc::SIG_DFL);
- if ret == libc::SIG_ERR {
- return Err(io::Error::last_os_error());
+ // Inherit the signal mask from the parent rather than resetting it (i.e. do not call
+ // pthread_sigmask).
+
+ // If #[unix_sigpipe] is specified, don't reset SIGPIPE to SIG_DFL.
+ // If #[unix_sigpipe] is not specified, reset SIGPIPE to SIG_DFL for backward compatibility.
+ //
+ // #[unix_sigpipe] is an opportunity to change the default here.
+ if !crate::sys::unix_sigpipe_attr_specified() {
+ #[cfg(target_os = "android")] // see issue #88585
+ {
+ let mut action: libc::sigaction = mem::zeroed();
+ action.sa_sigaction = libc::SIG_DFL;
+ cvt(libc::sigaction(libc::SIGPIPE, &action, crate::ptr::null_mut()))?;
+ }
+ #[cfg(not(target_os = "android"))]
+ {
+ let ret = sys::signal(libc::SIGPIPE, libc::SIG_DFL);
+ if ret == libc::SIG_ERR {
+ return Err(io::Error::last_os_error());
+ }
}
}
}
@@ -411,7 +406,7 @@ impl Command {
envp: Option<&CStringArray>,
) -> io::Result<Option<Process>> {
use crate::mem::MaybeUninit;
- use crate::sys::{self, cvt_nz};
+ use crate::sys::{self, cvt_nz, unix_sigpipe_attr_specified};
if self.get_gid().is_some()
|| self.get_uid().is_some()
@@ -453,7 +448,9 @@ impl Command {
// successfully launch the program, but erroneously return
// ENOENT when used with posix_spawn_file_actions_addchdir_np
// which was introduced in macOS 10.15.
- return Ok(None);
+ if self.get_program_kind() == ProgramKind::Relative {
+ return Ok(None);
+ }
}
match posix_spawn_file_actions_addchdir_np.get() {
Some(f) => Some((f, cwd)),
@@ -529,13 +526,24 @@ impl Command {
cvt_nz(libc::posix_spawnattr_setpgroup(attrs.0.as_mut_ptr(), pgroup))?;
}
- let mut set = MaybeUninit::<libc::sigset_t>::uninit();
- cvt(sigemptyset(set.as_mut_ptr()))?;
- cvt_nz(libc::posix_spawnattr_setsigmask(attrs.0.as_mut_ptr(), set.as_ptr()))?;
- cvt(sigaddset(set.as_mut_ptr(), libc::SIGPIPE))?;
- cvt_nz(libc::posix_spawnattr_setsigdefault(attrs.0.as_mut_ptr(), set.as_ptr()))?;
+ // Inherit the signal mask from this process rather than resetting it (i.e. do not call
+ // posix_spawnattr_setsigmask).
+
+ // If #[unix_sigpipe] is specified, don't reset SIGPIPE to SIG_DFL.
+ // If #[unix_sigpipe] is not specified, reset SIGPIPE to SIG_DFL for backward compatibility.
+ //
+ // #[unix_sigpipe] is an opportunity to change the default here.
+ if !unix_sigpipe_attr_specified() {
+ let mut default_set = MaybeUninit::<libc::sigset_t>::uninit();
+ cvt(sigemptyset(default_set.as_mut_ptr()))?;
+ cvt(sigaddset(default_set.as_mut_ptr(), libc::SIGPIPE))?;
+ cvt_nz(libc::posix_spawnattr_setsigdefault(
+ attrs.0.as_mut_ptr(),
+ default_set.as_ptr(),
+ ))?;
+ flags |= libc::POSIX_SPAWN_SETSIGDEF;
+ }
- flags |= libc::POSIX_SPAWN_SETSIGDEF | libc::POSIX_SPAWN_SETSIGMASK;
cvt_nz(libc::posix_spawnattr_setflags(attrs.0.as_mut_ptr(), flags as _))?;
// Make sure we synchronize access to the global `environ` resource
@@ -820,14 +828,14 @@ impl crate::os::linux::process::ChildExt for crate::process::Child {
self.handle
.pidfd
.as_ref()
- .ok_or_else(|| Error::new(ErrorKind::Other, "No pidfd was created."))
+ .ok_or_else(|| Error::new(ErrorKind::Uncategorized, "No pidfd was created."))
}
fn take_pidfd(&mut self) -> io::Result<PidFd> {
self.handle
.pidfd
.take()
- .ok_or_else(|| Error::new(ErrorKind::Other, "No pidfd was created."))
+ .ok_or_else(|| Error::new(ErrorKind::Uncategorized, "No pidfd was created."))
}
}
diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs
index bf4920488..a6fe07873 100644
--- a/library/std/src/sys/unix/rand.rs
+++ b/library/std/src/sys/unix/rand.rs
@@ -1,13 +1,13 @@
-use crate::mem;
-use crate::slice;
-
pub fn hashmap_random_keys() -> (u64, u64) {
- let mut v = (0, 0);
- unsafe {
- let view = slice::from_raw_parts_mut(&mut v as *mut _ as *mut u8, mem::size_of_val(&v));
- imp::fill_bytes(view);
- }
- v
+ const KEY_LEN: usize = core::mem::size_of::<u64>();
+
+ let mut v = [0u8; KEY_LEN * 2];
+ imp::fill_bytes(&mut v);
+
+ let key1 = v[0..KEY_LEN].try_into().unwrap();
+ let key2 = v[KEY_LEN..].try_into().unwrap();
+
+ (u64::from_ne_bytes(key1), u64::from_ne_bytes(key2))
}
#[cfg(all(
diff --git a/library/std/src/sys/unix/stdio.rs b/library/std/src/sys/unix/stdio.rs
index 329f9433d..b3626c564 100644
--- a/library/std/src/sys/unix/stdio.rs
+++ b/library/std/src/sys/unix/stdio.rs
@@ -1,6 +1,6 @@
use crate::io::{self, IoSlice, IoSliceMut};
use crate::mem::ManuallyDrop;
-use crate::os::unix::io::{AsFd, BorrowedFd, FromRawFd};
+use crate::os::unix::io::FromRawFd;
use crate::sys::fd::FileDesc;
pub struct Stdin(());
@@ -91,51 +91,3 @@ pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE;
pub fn panic_output() -> Option<impl io::Write> {
Some(Stderr::new())
}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl AsFd for io::Stdin {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDIN_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl<'a> AsFd for io::StdinLock<'a> {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDIN_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl AsFd for io::Stdout {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl<'a> AsFd for io::StdoutLock<'a> {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl AsFd for io::Stderr {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl<'a> AsFd for io::StderrLock<'a> {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) }
- }
-}
diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs
index 36a3fa602..c1d30dd9d 100644
--- a/library/std/src/sys/unix/thread.rs
+++ b/library/std/src/sys/unix/thread.rs
@@ -116,11 +116,9 @@ impl Thread {
debug_assert_eq!(ret, 0);
}
- #[cfg(any(target_os = "linux", target_os = "android"))]
+ #[cfg(target_os = "android")]
pub fn set_name(name: &CStr) {
const PR_SET_NAME: libc::c_int = 15;
- // pthread wrapper only appeared in glibc 2.12, so we use syscall
- // directly.
unsafe {
libc::prctl(
PR_SET_NAME,
@@ -132,6 +130,19 @@ impl Thread {
}
}
+ #[cfg(target_os = "linux")]
+ pub fn set_name(name: &CStr) {
+ const TASK_COMM_LEN: usize = 16;
+
+ unsafe {
+ // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20.
+ let name = truncate_cstr(name, TASK_COMM_LEN);
+ let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
+ // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
+ debug_assert_eq!(res, 0);
+ }
+ }
+
#[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))]
pub fn set_name(name: &CStr) {
unsafe {
@@ -142,20 +153,23 @@ impl Thread {
#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
pub fn set_name(name: &CStr) {
unsafe {
- libc::pthread_setname_np(name.as_ptr());
+ let name = truncate_cstr(name, libc::MAXTHREADNAMESIZE);
+ let res = libc::pthread_setname_np(name.as_ptr());
+ // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
+ debug_assert_eq!(res, 0);
}
}
#[cfg(target_os = "netbsd")]
pub fn set_name(name: &CStr) {
- use crate::ffi::CString;
- let cname = CString::new(&b"%s"[..]).unwrap();
unsafe {
- libc::pthread_setname_np(
+ let cname = CStr::from_bytes_with_nul_unchecked(b"%s\0".as_slice());
+ let res = libc::pthread_setname_np(
libc::pthread_self(),
cname.as_ptr(),
name.as_ptr() as *mut libc::c_void,
);
+ debug_assert_eq!(res, 0);
}
}
@@ -168,9 +182,8 @@ impl Thread {
}
if let Some(f) = pthread_setname_np.get() {
- unsafe {
- f(libc::pthread_self(), name.as_ptr());
- }
+ let res = unsafe { f(libc::pthread_self(), name.as_ptr()) };
+ debug_assert_eq!(res, 0);
}
}
@@ -271,6 +284,20 @@ impl Drop for Thread {
}
}
+#[cfg(any(target_os = "linux", target_os = "macos", target_os = "ios", target_os = "watchos"))]
+fn truncate_cstr(cstr: &CStr, max_with_nul: usize) -> crate::borrow::Cow<'_, CStr> {
+ use crate::{borrow::Cow, ffi::CString};
+
+ if cstr.to_bytes_with_nul().len() > max_with_nul {
+ let bytes = cstr.to_bytes()[..max_with_nul - 1].to_vec();
+ // SAFETY: the non-nul bytes came straight from a CStr.
+ // (CString will add the terminating nul.)
+ Cow::Owned(unsafe { CString::from_vec_unchecked(bytes) })
+ } else {
+ Cow::Borrowed(cstr)
+ }
+}
+
pub fn available_parallelism() -> io::Result<NonZeroUsize> {
cfg_if::cfg_if! {
if #[cfg(any(
@@ -423,7 +450,7 @@ mod cgroups {
Some(b"") => Cgroup::V2,
Some(controllers)
if from_utf8(controllers)
- .is_ok_and(|c| c.split(",").any(|c| c == "cpu")) =>
+ .is_ok_and(|c| c.split(',').any(|c| c == "cpu")) =>
{
Cgroup::V1
}
@@ -761,6 +788,16 @@ pub mod guard {
const GUARD_PAGES: usize = 1;
let guard = guardaddr..guardaddr + GUARD_PAGES * page_size;
Some(guard)
+ } else if cfg!(target_os = "openbsd") {
+ // OpenBSD stack already includes a guard page, and stack is
+ // immutable.
+ //
+ // We'll just note where we expect rlimit to start
+ // faulting, so our handler can report "stack overflow", and
+ // trust that the kernel's own stack guard will work.
+ let stackptr = get_stack_start_aligned()?;
+ let stackaddr = stackptr.addr();
+ Some(stackaddr - page_size..stackaddr)
} else {
// Reallocate the last page of the stack.
// This ensures SIGBUS will be raised on
diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs
index 6e8be2a91..d7fd2130f 100644
--- a/library/std/src/sys/unix/thread_local_dtor.rs
+++ b/library/std/src/sys/unix/thread_local_dtor.rs
@@ -17,6 +17,7 @@
target_os = "redox",
target_os = "emscripten"
))]
+#[cfg_attr(target_family = "wasm", allow(unused))] // might remain unused depending on target details (e.g. wasm32-unknown-emscripten)
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
use crate::mem;
use crate::sys_common::thread_local_dtor::register_dtor_fallback;
diff --git a/library/std/src/sys/unix/thread_local_key.rs b/library/std/src/sys/unix/thread_local_key.rs
index 2c5b94b1e..2b2d079ee 100644
--- a/library/std/src/sys/unix/thread_local_key.rs
+++ b/library/std/src/sys/unix/thread_local_key.rs
@@ -27,8 +27,3 @@ pub unsafe fn destroy(key: Key) {
let r = libc::pthread_key_delete(key);
debug_assert_eq!(r, 0);
}
-
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- false
-}
diff --git a/library/std/src/sys/unix/thread_parker.rs b/library/std/src/sys/unix/thread_parker.rs
deleted file mode 100644
index ca1a7138f..000000000
--- a/library/std/src/sys/unix/thread_parker.rs
+++ /dev/null
@@ -1,281 +0,0 @@
-//! Thread parking without `futex` using the `pthread` synchronization primitives.
-
-#![cfg(not(any(
- target_os = "linux",
- target_os = "android",
- all(target_os = "emscripten", target_feature = "atomics"),
- target_os = "freebsd",
- target_os = "openbsd",
- target_os = "dragonfly",
- target_os = "fuchsia",
-)))]
-
-use crate::cell::UnsafeCell;
-use crate::marker::PhantomPinned;
-use crate::pin::Pin;
-use crate::ptr::addr_of_mut;
-use crate::sync::atomic::AtomicUsize;
-use crate::sync::atomic::Ordering::SeqCst;
-use crate::time::Duration;
-
-const EMPTY: usize = 0;
-const PARKED: usize = 1;
-const NOTIFIED: usize = 2;
-
-unsafe fn lock(lock: *mut libc::pthread_mutex_t) {
- let r = libc::pthread_mutex_lock(lock);
- debug_assert_eq!(r, 0);
-}
-
-unsafe fn unlock(lock: *mut libc::pthread_mutex_t) {
- let r = libc::pthread_mutex_unlock(lock);
- debug_assert_eq!(r, 0);
-}
-
-unsafe fn notify_one(cond: *mut libc::pthread_cond_t) {
- let r = libc::pthread_cond_signal(cond);
- debug_assert_eq!(r, 0);
-}
-
-unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) {
- let r = libc::pthread_cond_wait(cond, lock);
- debug_assert_eq!(r, 0);
-}
-
-const TIMESPEC_MAX: libc::timespec =
- libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
-
-unsafe fn wait_timeout(
- cond: *mut libc::pthread_cond_t,
- lock: *mut libc::pthread_mutex_t,
- dur: Duration,
-) {
- // Use the system clock on systems that do not support pthread_condattr_setclock.
- // This unfortunately results in problems when the system time changes.
- #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "espidf"
- ))]
- let (now, dur) = {
- use super::time::SystemTime;
- use crate::cmp::min;
-
- // OSX implementation of `pthread_cond_timedwait` is buggy
- // with super long durations. When duration is greater than
- // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
- // in macOS Sierra return error 316.
- //
- // This program demonstrates the issue:
- // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
- //
- // To work around this issue, and possible bugs of other OSes, timeout
- // is clamped to 1000 years, which is allowable per the API of `park_timeout`
- // because of spurious wakeups.
- let dur = min(dur, Duration::from_secs(1000 * 365 * 86400));
- let now = SystemTime::now().t;
- (now, dur)
- };
- // Use the monotonic clock on other systems.
- #[cfg(not(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "espidf"
- )))]
- let (now, dur) = {
- use super::time::Timespec;
-
- (Timespec::now(libc::CLOCK_MONOTONIC), dur)
- };
-
- let timeout =
- now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX);
- let r = libc::pthread_cond_timedwait(cond, lock, &timeout);
- debug_assert!(r == libc::ETIMEDOUT || r == 0);
-}
-
-pub struct Parker {
- state: AtomicUsize,
- lock: UnsafeCell<libc::pthread_mutex_t>,
- cvar: UnsafeCell<libc::pthread_cond_t>,
- // The `pthread` primitives require a stable address, so make this struct `!Unpin`.
- _pinned: PhantomPinned,
-}
-
-impl Parker {
- /// Construct the UNIX parker in-place.
- ///
- /// # Safety
- /// The constructed parker must never be moved.
- pub unsafe fn new(parker: *mut Parker) {
- // Use the default mutex implementation to allow for simpler initialization.
- // This could lead to undefined behaviour when deadlocking. This is avoided
- // by not deadlocking. Note in particular the unlocking operation before any
- // panic, as code after the panic could try to park again.
- addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY));
- addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
-
- cfg_if::cfg_if! {
- if #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox"
- ))] {
- addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER));
- } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
- let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null());
- assert_eq!(r, 0);
- } else {
- use crate::mem::MaybeUninit;
- let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
- let r = libc::pthread_condattr_init(attr.as_mut_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
- assert_eq!(r, 0);
- let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
- assert_eq!(r, 0);
- }
- }
- }
-
- // This implementation doesn't require `unsafe`, but other implementations
- // may assume this is only called by the thread that owns the Parker.
- pub unsafe fn park(self: Pin<&Self>) {
- // If we were previously notified then we consume this notification and
- // return quickly.
- if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
- return;
- }
-
- // Otherwise we need to coordinate going to sleep
- lock(self.lock.get());
- match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
- Ok(_) => {}
- Err(NOTIFIED) => {
- // We must read here, even though we know it will be `NOTIFIED`.
- // This is because `unpark` may have been called again since we read
- // `NOTIFIED` in the `compare_exchange` above. We must perform an
- // acquire operation that synchronizes with that `unpark` to observe
- // any writes it made before the call to unpark. To do that we must
- // read from the write it made to `state`.
- let old = self.state.swap(EMPTY, SeqCst);
-
- unlock(self.lock.get());
-
- assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
- return;
- } // should consume this notification, so prohibit spurious wakeups in next park.
- Err(_) => {
- unlock(self.lock.get());
-
- panic!("inconsistent park state")
- }
- }
-
- loop {
- wait(self.cvar.get(), self.lock.get());
-
- match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
- Ok(_) => break, // got a notification
- Err(_) => {} // spurious wakeup, go back to sleep
- }
- }
-
- unlock(self.lock.get());
- }
-
- // This implementation doesn't require `unsafe`, but other implementations
- // may assume this is only called by the thread that owns the Parker. Use
- // `Pin` to guarantee a stable address for the mutex and condition variable.
- pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
- // Like `park` above we have a fast path for an already-notified thread, and
- // afterwards we start coordinating for a sleep.
- // return quickly.
- if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
- return;
- }
-
- lock(self.lock.get());
- match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
- Ok(_) => {}
- Err(NOTIFIED) => {
- // We must read again here, see `park`.
- let old = self.state.swap(EMPTY, SeqCst);
- unlock(self.lock.get());
-
- assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
- return;
- } // should consume this notification, so prohibit spurious wakeups in next park.
- Err(_) => {
- unlock(self.lock.get());
- panic!("inconsistent park_timeout state")
- }
- }
-
- // Wait with a timeout, and if we spuriously wake up or otherwise wake up
- // from a notification we just want to unconditionally set the state back to
- // empty, either consuming a notification or un-flagging ourselves as
- // parked.
- wait_timeout(self.cvar.get(), self.lock.get(), dur);
-
- match self.state.swap(EMPTY, SeqCst) {
- NOTIFIED => unlock(self.lock.get()), // got a notification, hurray!
- PARKED => unlock(self.lock.get()), // no notification, alas
- n => {
- unlock(self.lock.get());
- panic!("inconsistent park_timeout state: {n}")
- }
- }
- }
-
- pub fn unpark(self: Pin<&Self>) {
- // To ensure the unparked thread will observe any writes we made
- // before this call, we must perform a release operation that `park`
- // can synchronize with. To do that we must write `NOTIFIED` even if
- // `state` is already `NOTIFIED`. That is why this must be a swap
- // rather than a compare-and-swap that returns if it reads `NOTIFIED`
- // on failure.
- match self.state.swap(NOTIFIED, SeqCst) {
- EMPTY => return, // no one was waiting
- NOTIFIED => return, // already unparked
- PARKED => {} // gotta go wake someone up
- _ => panic!("inconsistent state in unpark"),
- }
-
- // There is a period between when the parked thread sets `state` to
- // `PARKED` (or last checked `state` in the case of a spurious wake
- // up) and when it actually waits on `cvar`. If we were to notify
- // during this period it would be ignored and then when the parked
- // thread went to sleep it would never wake up. Fortunately, it has
- // `lock` locked at this stage so we can acquire `lock` to wait until
- // it is ready to receive the notification.
- //
- // Releasing `lock` before the call to `notify_one` means that when the
- // parked thread wakes it doesn't get woken only to have to wait for us
- // to release `lock`.
- unsafe {
- lock(self.lock.get());
- unlock(self.lock.get());
- notify_one(self.cvar.get());
- }
- }
-}
-
-impl Drop for Parker {
- fn drop(&mut self) {
- unsafe {
- libc::pthread_cond_destroy(self.cvar.get_mut());
- libc::pthread_mutex_destroy(self.lock.get_mut());
- }
- }
-}
-
-unsafe impl Sync for Parker {}
-unsafe impl Send for Parker {}
diff --git a/library/std/src/sys/unix/thread_parker/darwin.rs b/library/std/src/sys/unix/thread_parker/darwin.rs
new file mode 100644
index 000000000..2f5356fe2
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker/darwin.rs
@@ -0,0 +1,131 @@
+//! Thread parking for Darwin-based systems.
+//!
+//! Darwin actually has futex syscalls (`__ulock_wait`/`__ulock_wake`), but they
+//! cannot be used in `std` because they are non-public (their use will lead to
+//! rejection from the App Store) and because they are only available starting
+//! with macOS version 10.12, even though the minimum target version is 10.7.
+//!
+//! Therefore, we need to look for other synchronization primitives. Luckily, Darwin
+//! supports semaphores, which allow us to implement the behaviour we need with
+//! only one primitive (as opposed to a mutex-condvar pair). We use the semaphore
+//! provided by libdispatch, as the underlying Mach semaphore is only dubiously
+//! public.
+
+use crate::pin::Pin;
+use crate::sync::atomic::{
+ AtomicI8,
+ Ordering::{Acquire, Release},
+};
+use crate::time::Duration;
+
+type dispatch_semaphore_t = *mut crate::ffi::c_void;
+type dispatch_time_t = u64;
+
+const DISPATCH_TIME_NOW: dispatch_time_t = 0;
+const DISPATCH_TIME_FOREVER: dispatch_time_t = !0;
+
+// Contained in libSystem.dylib, which is linked by default.
+extern "C" {
+ fn dispatch_time(when: dispatch_time_t, delta: i64) -> dispatch_time_t;
+ fn dispatch_semaphore_create(val: isize) -> dispatch_semaphore_t;
+ fn dispatch_semaphore_wait(dsema: dispatch_semaphore_t, timeout: dispatch_time_t) -> isize;
+ fn dispatch_semaphore_signal(dsema: dispatch_semaphore_t) -> isize;
+ fn dispatch_release(object: *mut crate::ffi::c_void);
+}
+
+const EMPTY: i8 = 0;
+const NOTIFIED: i8 = 1;
+const PARKED: i8 = -1;
+
+pub struct Parker {
+ semaphore: dispatch_semaphore_t,
+ state: AtomicI8,
+}
+
+unsafe impl Sync for Parker {}
+unsafe impl Send for Parker {}
+
+impl Parker {
+ pub unsafe fn new(parker: *mut Parker) {
+ let semaphore = dispatch_semaphore_create(0);
+ assert!(
+ !semaphore.is_null(),
+ "failed to create dispatch semaphore for thread synchronization"
+ );
+ parker.write(Parker { semaphore, state: AtomicI8::new(EMPTY) })
+ }
+
+ // Does not need `Pin`, but other implementation do.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // The semaphore counter must be zero at this point, because unparking
+ // threads will not actually increase it until we signalled that we
+ // are waiting.
+
+ // Change NOTIFIED to EMPTY and EMPTY to PARKED.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ // Another thread may increase the semaphore counter from this point on.
+ // If it is faster than us, we will decrement it again immediately below.
+ // If we are faster, we wait.
+
+ // Ensure that the semaphore counter has actually been decremented, even
+ // if the call timed out for some reason.
+ while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {}
+
+ // At this point, the semaphore counter is zero again.
+
+ // We were definitely woken up, so we don't need to check the state.
+ // Still, we need to reset the state using a swap to observe the state
+ // change with acquire ordering.
+ self.state.swap(EMPTY, Acquire);
+ }
+
+ // Does not need `Pin`, but other implementation do.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ let nanos = dur.as_nanos().try_into().unwrap_or(i64::MAX);
+ let timeout = dispatch_time(DISPATCH_TIME_NOW, nanos);
+
+ let timeout = dispatch_semaphore_wait(self.semaphore, timeout) != 0;
+
+ let state = self.state.swap(EMPTY, Acquire);
+ if state == NOTIFIED && timeout {
+ // If the state was NOTIFIED but semaphore_wait returned without
+ // decrementing the count because of a timeout, it means another
+ // thread is about to call semaphore_signal. We must wait for that
+ // to happen to ensure the semaphore count is reset.
+ while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {}
+ } else {
+ // Either a timeout occurred and we reset the state before any thread
+ // tried to wake us up, or we were woken up and reset the state,
+ // making sure to observe the state change with acquire ordering.
+ // Either way, the semaphore counter is now zero again.
+ }
+ }
+
+ // Does not need `Pin`, but other implementation do.
+ pub fn unpark(self: Pin<&Self>) {
+ let state = self.state.swap(NOTIFIED, Release);
+ if state == PARKED {
+ unsafe {
+ dispatch_semaphore_signal(self.semaphore);
+ }
+ }
+ }
+}
+
+impl Drop for Parker {
+ fn drop(&mut self) {
+ // SAFETY:
+ // We always ensure that the semaphore count is reset, so this will
+ // never cause an exception.
+ unsafe {
+ dispatch_release(self.semaphore);
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/thread_parker/mod.rs b/library/std/src/sys/unix/thread_parker/mod.rs
new file mode 100644
index 000000000..35f1e68a8
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker/mod.rs
@@ -0,0 +1,32 @@
+//! Thread parking on systems without futex support.
+
+#![cfg(not(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_os = "emscripten", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+)))]
+
+cfg_if::cfg_if! {
+ if #[cfg(all(
+ any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "tvos",
+ ),
+ not(miri),
+ ))] {
+ mod darwin;
+ pub use darwin::Parker;
+ } else if #[cfg(target_os = "netbsd")] {
+ mod netbsd;
+ pub use netbsd::Parker;
+ } else {
+ mod pthread;
+ pub use pthread::Parker;
+ }
+}
diff --git a/library/std/src/sys/unix/thread_parker/netbsd.rs b/library/std/src/sys/unix/thread_parker/netbsd.rs
new file mode 100644
index 000000000..7657605b5
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker/netbsd.rs
@@ -0,0 +1,113 @@
+use crate::ffi::{c_int, c_void};
+use crate::pin::Pin;
+use crate::ptr::{null, null_mut};
+use crate::sync::atomic::{
+ AtomicU64,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::time::Duration;
+use libc::{_lwp_self, clockid_t, lwpid_t, time_t, timespec, CLOCK_MONOTONIC};
+
+extern "C" {
+ fn ___lwp_park60(
+ clock_id: clockid_t,
+ flags: c_int,
+ ts: *mut timespec,
+ unpark: lwpid_t,
+ hint: *const c_void,
+ unparkhint: *const c_void,
+ ) -> c_int;
+ fn _lwp_unpark(lwp: lwpid_t, hint: *const c_void) -> c_int;
+}
+
+/// The thread is not parked and the token is not available.
+///
+/// Zero cannot be a valid LWP id, since it is used as empty value for the unpark
+/// argument in _lwp_park.
+const EMPTY: u64 = 0;
+/// The token is available. Do not park anymore.
+const NOTIFIED: u64 = u64::MAX;
+
+pub struct Parker {
+ /// The parker state. Contains either one of the two state values above or the LWP
+ /// id of the parked thread.
+ state: AtomicU64,
+}
+
+impl Parker {
+ pub unsafe fn new(parker: *mut Parker) {
+ parker.write(Parker { state: AtomicU64::new(EMPTY) })
+ }
+
+ // Does not actually need `unsafe` or `Pin`, but the pthread implementation does.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // If the token has already been made available, we can skip
+ // a bit of work, so check for it here.
+ if self.state.load(Acquire) != NOTIFIED {
+ let parked = _lwp_self() as u64;
+ let hint = self.state.as_mut_ptr().cast();
+ if self.state.compare_exchange(EMPTY, parked, Relaxed, Acquire).is_ok() {
+ // Loop to guard against spurious wakeups.
+ loop {
+ ___lwp_park60(0, 0, null_mut(), 0, hint, null());
+ if self.state.load(Acquire) == NOTIFIED {
+ break;
+ }
+ }
+ }
+ }
+
+ // At this point, the change to NOTIFIED has always been observed with acquire
+ // ordering, so we can just use a relaxed store here (instead of a swap).
+ self.state.store(EMPTY, Relaxed);
+ }
+
+ // Does not actually need `unsafe` or `Pin`, but the pthread implementation does.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ if self.state.load(Acquire) != NOTIFIED {
+ let parked = _lwp_self() as u64;
+ let hint = self.state.as_mut_ptr().cast();
+ let mut timeout = timespec {
+ // Saturate so that the operation will definitely time out
+ // (even if it is after the heat death of the universe).
+ tv_sec: dur.as_secs().try_into().ok().unwrap_or(time_t::MAX),
+ tv_nsec: dur.subsec_nanos().into(),
+ };
+
+ if self.state.compare_exchange(EMPTY, parked, Relaxed, Acquire).is_ok() {
+ // Timeout needs to be mutable since it is modified on NetBSD 9.0 and
+ // above.
+ ___lwp_park60(CLOCK_MONOTONIC, 0, &mut timeout, 0, hint, null());
+ // Use a swap to get acquire ordering even if the token was set after
+ // the timeout occurred.
+ self.state.swap(EMPTY, Acquire);
+ return;
+ }
+ }
+
+ self.state.store(EMPTY, Relaxed);
+ }
+
+ // Does not actually need `Pin`, but the pthread implementation does.
+ pub fn unpark(self: Pin<&Self>) {
+ let state = self.state.swap(NOTIFIED, Release);
+ if !matches!(state, EMPTY | NOTIFIED) {
+ let lwp = state as lwpid_t;
+ let hint = self.state.as_mut_ptr().cast();
+
+ // If the parking thread terminated and did not actually park, this will
+ // probably return an error, which is OK. In the worst case, another
+ // thread has received the same LWP id. It will then receive a spurious
+ // wakeup, but those are allowable per the API contract. The same reasoning
+ // applies if a timeout occurred before this call, but the state was not
+ // yet reset.
+
+ // SAFETY:
+ // The syscall has no invariants to hold. Only unsafe because it is an
+ // extern function.
+ unsafe {
+ _lwp_unpark(lwp, hint);
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/thread_parker/pthread.rs b/library/std/src/sys/unix/thread_parker/pthread.rs
new file mode 100644
index 000000000..3dfc0026e
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker/pthread.rs
@@ -0,0 +1,271 @@
+//! Thread parking without `futex` using the `pthread` synchronization primitives.
+
+use crate::cell::UnsafeCell;
+use crate::marker::PhantomPinned;
+use crate::pin::Pin;
+use crate::ptr::addr_of_mut;
+use crate::sync::atomic::AtomicUsize;
+use crate::sync::atomic::Ordering::SeqCst;
+use crate::time::Duration;
+
+const EMPTY: usize = 0;
+const PARKED: usize = 1;
+const NOTIFIED: usize = 2;
+
+unsafe fn lock(lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_mutex_lock(lock);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn unlock(lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_mutex_unlock(lock);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn notify_one(cond: *mut libc::pthread_cond_t) {
+ let r = libc::pthread_cond_signal(cond);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_cond_wait(cond, lock);
+ debug_assert_eq!(r, 0);
+}
+
+const TIMESPEC_MAX: libc::timespec =
+ libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
+
+unsafe fn wait_timeout(
+ cond: *mut libc::pthread_cond_t,
+ lock: *mut libc::pthread_mutex_t,
+ dur: Duration,
+) {
+ // Use the system clock on systems that do not support pthread_condattr_setclock.
+ // This unfortunately results in problems when the system time changes.
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "espidf"
+ ))]
+ let (now, dur) = {
+ use crate::cmp::min;
+ use crate::sys::time::SystemTime;
+
+ // OSX implementation of `pthread_cond_timedwait` is buggy
+ // with super long durations. When duration is greater than
+ // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
+ // in macOS Sierra return error 316.
+ //
+ // This program demonstrates the issue:
+ // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
+ //
+ // To work around this issue, and possible bugs of other OSes, timeout
+ // is clamped to 1000 years, which is allowable per the API of `park_timeout`
+ // because of spurious wakeups.
+ let dur = min(dur, Duration::from_secs(1000 * 365 * 86400));
+ let now = SystemTime::now().t;
+ (now, dur)
+ };
+ // Use the monotonic clock on other systems.
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "espidf"
+ )))]
+ let (now, dur) = {
+ use crate::sys::time::Timespec;
+
+ (Timespec::now(libc::CLOCK_MONOTONIC), dur)
+ };
+
+ let timeout =
+ now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX);
+ let r = libc::pthread_cond_timedwait(cond, lock, &timeout);
+ debug_assert!(r == libc::ETIMEDOUT || r == 0);
+}
+
+pub struct Parker {
+ state: AtomicUsize,
+ lock: UnsafeCell<libc::pthread_mutex_t>,
+ cvar: UnsafeCell<libc::pthread_cond_t>,
+ // The `pthread` primitives require a stable address, so make this struct `!Unpin`.
+ _pinned: PhantomPinned,
+}
+
+impl Parker {
+ /// Construct the UNIX parker in-place.
+ ///
+ /// # Safety
+ /// The constructed parker must never be moved.
+ pub unsafe fn new(parker: *mut Parker) {
+ // Use the default mutex implementation to allow for simpler initialization.
+ // This could lead to undefined behaviour when deadlocking. This is avoided
+ // by not deadlocking. Note in particular the unlocking operation before any
+ // panic, as code after the panic could try to park again.
+ addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY));
+ addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
+
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox"
+ ))] {
+ addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER));
+ } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
+ let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null());
+ assert_eq!(r, 0);
+ } else {
+ use crate::mem::MaybeUninit;
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = libc::pthread_condattr_init(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
+ assert_eq!(r, 0);
+ let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ }
+ }
+ }
+
+ // This implementation doesn't require `unsafe`, but other implementations
+ // may assume this is only called by the thread that owns the Parker.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // If we were previously notified then we consume this notification and
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+
+ // Otherwise we need to coordinate going to sleep
+ lock(self.lock.get());
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read here, even though we know it will be `NOTIFIED`.
+ // This is because `unpark` may have been called again since we read
+ // `NOTIFIED` in the `compare_exchange` above. We must perform an
+ // acquire operation that synchronizes with that `unpark` to observe
+ // any writes it made before the call to unpark. To do that we must
+ // read from the write it made to `state`.
+ let old = self.state.swap(EMPTY, SeqCst);
+
+ unlock(self.lock.get());
+
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => {
+ unlock(self.lock.get());
+
+ panic!("inconsistent park state")
+ }
+ }
+
+ loop {
+ wait(self.cvar.get(), self.lock.get());
+
+ match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
+ Ok(_) => break, // got a notification
+ Err(_) => {} // spurious wakeup, go back to sleep
+ }
+ }
+
+ unlock(self.lock.get());
+ }
+
+ // This implementation doesn't require `unsafe`, but other implementations
+ // may assume this is only called by the thread that owns the Parker. Use
+ // `Pin` to guarantee a stable address for the mutex and condition variable.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ // Like `park` above we have a fast path for an already-notified thread, and
+ // afterwards we start coordinating for a sleep.
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+
+ lock(self.lock.get());
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read again here, see `park`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ unlock(self.lock.get());
+
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => {
+ unlock(self.lock.get());
+ panic!("inconsistent park_timeout state")
+ }
+ }
+
+ // Wait with a timeout, and if we spuriously wake up or otherwise wake up
+ // from a notification we just want to unconditionally set the state back to
+ // empty, either consuming a notification or un-flagging ourselves as
+ // parked.
+ wait_timeout(self.cvar.get(), self.lock.get(), dur);
+
+ match self.state.swap(EMPTY, SeqCst) {
+ NOTIFIED => unlock(self.lock.get()), // got a notification, hurray!
+ PARKED => unlock(self.lock.get()), // no notification, alas
+ n => {
+ unlock(self.lock.get());
+ panic!("inconsistent park_timeout state: {n}")
+ }
+ }
+ }
+
+ pub fn unpark(self: Pin<&Self>) {
+ // To ensure the unparked thread will observe any writes we made
+ // before this call, we must perform a release operation that `park`
+ // can synchronize with. To do that we must write `NOTIFIED` even if
+ // `state` is already `NOTIFIED`. That is why this must be a swap
+ // rather than a compare-and-swap that returns if it reads `NOTIFIED`
+ // on failure.
+ match self.state.swap(NOTIFIED, SeqCst) {
+ EMPTY => return, // no one was waiting
+ NOTIFIED => return, // already unparked
+ PARKED => {} // gotta go wake someone up
+ _ => panic!("inconsistent state in unpark"),
+ }
+
+ // There is a period between when the parked thread sets `state` to
+ // `PARKED` (or last checked `state` in the case of a spurious wake
+ // up) and when it actually waits on `cvar`. If we were to notify
+ // during this period it would be ignored and then when the parked
+ // thread went to sleep it would never wake up. Fortunately, it has
+ // `lock` locked at this stage so we can acquire `lock` to wait until
+ // it is ready to receive the notification.
+ //
+ // Releasing `lock` before the call to `notify_one` means that when the
+ // parked thread wakes it doesn't get woken only to have to wait for us
+ // to release `lock`.
+ unsafe {
+ lock(self.lock.get());
+ unlock(self.lock.get());
+ notify_one(self.cvar.get());
+ }
+ }
+}
+
+impl Drop for Parker {
+ fn drop(&mut self) {
+ unsafe {
+ libc::pthread_cond_destroy(self.cvar.get_mut());
+ libc::pthread_mutex_destroy(self.lock.get_mut());
+ }
+ }
+}
+
+unsafe impl Sync for Parker {}
+unsafe impl Send for Parker {}
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index dff973f59..cca9c6767 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -7,6 +7,12 @@ const NSEC_PER_SEC: u64 = 1_000_000_000;
pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() };
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+#[rustc_layout_scalar_valid_range_end(999_999_999)]
+struct Nanoseconds(u32);
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SystemTime {
pub(in crate::sys::unix) t: Timespec,
}
@@ -14,7 +20,7 @@ pub struct SystemTime {
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(in crate::sys::unix) struct Timespec {
tv_sec: i64,
- tv_nsec: i64,
+ tv_nsec: Nanoseconds,
}
impl SystemTime {
@@ -46,18 +52,20 @@ impl fmt::Debug for SystemTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SystemTime")
.field("tv_sec", &self.t.tv_sec)
- .field("tv_nsec", &self.t.tv_nsec)
+ .field("tv_nsec", &self.t.tv_nsec.0)
.finish()
}
}
impl Timespec {
pub const fn zero() -> Timespec {
- Timespec { tv_sec: 0, tv_nsec: 0 }
+ Timespec::new(0, 0)
}
- fn new(tv_sec: i64, tv_nsec: i64) -> Timespec {
- Timespec { tv_sec, tv_nsec }
+ const fn new(tv_sec: i64, tv_nsec: i64) -> Timespec {
+ assert!(tv_nsec >= 0 && tv_nsec < NSEC_PER_SEC as i64);
+ // SAFETY: The assert above checks tv_nsec is within the valid range
+ Timespec { tv_sec, tv_nsec: unsafe { Nanoseconds(tv_nsec as u32) } }
}
pub fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
@@ -75,12 +83,12 @@ impl Timespec {
//
// Ideally this code could be rearranged such that it more
// directly expresses the lower-cost behavior we want from it.
- let (secs, nsec) = if self.tv_nsec >= other.tv_nsec {
- ((self.tv_sec - other.tv_sec) as u64, (self.tv_nsec - other.tv_nsec) as u32)
+ let (secs, nsec) = if self.tv_nsec.0 >= other.tv_nsec.0 {
+ ((self.tv_sec - other.tv_sec) as u64, self.tv_nsec.0 - other.tv_nsec.0)
} else {
(
(self.tv_sec - other.tv_sec - 1) as u64,
- self.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.tv_nsec as u32,
+ self.tv_nsec.0 + (NSEC_PER_SEC as u32) - other.tv_nsec.0,
)
};
@@ -102,7 +110,7 @@ impl Timespec {
// Nano calculations can't overflow because nanos are <1B which fit
// in a u32.
- let mut nsec = other.subsec_nanos() + self.tv_nsec as u32;
+ let mut nsec = other.subsec_nanos() + self.tv_nsec.0;
if nsec >= NSEC_PER_SEC as u32 {
nsec -= NSEC_PER_SEC as u32;
secs = secs.checked_add(1)?;
@@ -118,7 +126,7 @@ impl Timespec {
.and_then(|secs| self.tv_sec.checked_sub(secs))?;
// Similar to above, nanos can't overflow.
- let mut nsec = self.tv_nsec as i32 - other.subsec_nanos() as i32;
+ let mut nsec = self.tv_nsec.0 as i32 - other.subsec_nanos() as i32;
if nsec < 0 {
nsec += NSEC_PER_SEC as i32;
secs = secs.checked_sub(1)?;
@@ -130,7 +138,7 @@ impl Timespec {
pub fn to_timespec(&self) -> Option<libc::timespec> {
Some(libc::timespec {
tv_sec: self.tv_sec.try_into().ok()?,
- tv_nsec: self.tv_nsec.try_into().ok()?,
+ tv_nsec: self.tv_nsec.0.try_into().ok()?,
})
}
}
@@ -293,7 +301,7 @@ mod inner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Instant")
.field("tv_sec", &self.t.tv_sec)
- .field("tv_nsec", &self.t.tv_nsec)
+ .field("tv_nsec", &self.t.tv_nsec.0)
.finish()
}
}
@@ -334,7 +342,7 @@ mod inner {
let mut t = MaybeUninit::uninit();
cvt(unsafe { clock_gettime64(clock, t.as_mut_ptr()) }).unwrap();
let t = unsafe { t.assume_init() };
- return Timespec { tv_sec: t.tv_sec, tv_nsec: t.tv_nsec as i64 };
+ return Timespec::new(t.tv_sec, t.tv_nsec as i64);
}
}
diff --git a/library/std/src/sys/unsupported/alloc.rs b/library/std/src/sys/unsupported/alloc.rs
index 8d5d0a2f5..d715ae454 100644
--- a/library/std/src/sys/unsupported/alloc.rs
+++ b/library/std/src/sys/unsupported/alloc.rs
@@ -1,15 +1,16 @@
use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::ptr::null_mut;
#[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
- 0 as *mut u8
+ null_mut()
}
#[inline]
unsafe fn alloc_zeroed(&self, _layout: Layout) -> *mut u8 {
- 0 as *mut u8
+ null_mut()
}
#[inline]
@@ -17,6 +18,6 @@ unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn realloc(&self, _ptr: *mut u8, _layout: Layout, _new_size: usize) -> *mut u8 {
- 0 as *mut u8
+ null_mut()
}
}
diff --git a/library/std/src/sys/unsupported/common.rs b/library/std/src/sys/unsupported/common.rs
index 4c9ade4a8..5cd9e57de 100644
--- a/library/std/src/sys/unsupported/common.rs
+++ b/library/std/src/sys/unsupported/common.rs
@@ -6,7 +6,7 @@ pub mod memchr {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {}
// SAFETY: must be called only once during runtime cleanup.
// NOTE: this is not guaranteed to run, for example when the program aborts.
diff --git a/library/std/src/sys/unsupported/fs.rs b/library/std/src/sys/unsupported/fs.rs
index 0e1a6257e..6ac1b5d2b 100644
--- a/library/std/src/sys/unsupported/fs.rs
+++ b/library/std/src/sys/unsupported/fs.rs
@@ -1,7 +1,7 @@
use crate::ffi::OsString;
use crate::fmt;
use crate::hash::{Hash, Hasher};
-use crate::io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
use crate::path::{Path, PathBuf};
use crate::sys::time::SystemTime;
use crate::sys::unsupported;
@@ -214,7 +214,7 @@ impl File {
self.0
}
- pub fn read_buf(&self, _buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ pub fn read_buf(&self, _cursor: BorrowedCursor<'_>) -> io::Result<()> {
self.0
}
diff --git a/library/std/src/sys/unsupported/io.rs b/library/std/src/sys/unsupported/io.rs
index d5f475b43..82610ffab 100644
--- a/library/std/src/sys/unsupported/io.rs
+++ b/library/std/src/sys/unsupported/io.rs
@@ -45,3 +45,7 @@ impl<'a> IoSliceMut<'a> {
self.0
}
}
+
+pub fn is_terminal<T>(_: &T) -> bool {
+ false
+}
diff --git a/library/std/src/sys/unsupported/locks/condvar.rs b/library/std/src/sys/unsupported/locks/condvar.rs
index e703fd0d2..527a26a12 100644
--- a/library/std/src/sys/unsupported/locks/condvar.rs
+++ b/library/std/src/sys/unsupported/locks/condvar.rs
@@ -7,6 +7,7 @@ pub type MovableCondvar = Condvar;
impl Condvar {
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Condvar {
Condvar {}
}
diff --git a/library/std/src/sys/unsupported/locks/mod.rs b/library/std/src/sys/unsupported/locks/mod.rs
index d412ff152..602a2d623 100644
--- a/library/std/src/sys/unsupported/locks/mod.rs
+++ b/library/std/src/sys/unsupported/locks/mod.rs
@@ -3,4 +3,4 @@ mod mutex;
mod rwlock;
pub use condvar::{Condvar, MovableCondvar};
pub use mutex::{MovableMutex, Mutex};
-pub use rwlock::{MovableRwLock, RwLock};
+pub use rwlock::MovableRwLock;
diff --git a/library/std/src/sys/unsupported/locks/mutex.rs b/library/std/src/sys/unsupported/locks/mutex.rs
index d7cb12e0c..87ea475c6 100644
--- a/library/std/src/sys/unsupported/locks/mutex.rs
+++ b/library/std/src/sys/unsupported/locks/mutex.rs
@@ -12,14 +12,12 @@ unsafe impl Sync for Mutex {} // no threads on this platform
impl Mutex {
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Mutex {
Mutex { locked: Cell::new(false) }
}
#[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
pub unsafe fn lock(&self) {
assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");
}
diff --git a/library/std/src/sys/unsupported/locks/rwlock.rs b/library/std/src/sys/unsupported/locks/rwlock.rs
index aca5fb715..5292691b9 100644
--- a/library/std/src/sys/unsupported/locks/rwlock.rs
+++ b/library/std/src/sys/unsupported/locks/rwlock.rs
@@ -12,6 +12,7 @@ unsafe impl Sync for RwLock {} // no threads on this platform
impl RwLock {
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> RwLock {
RwLock { mode: Cell::new(0) }
}
diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs
index 42a1ff730..633f17c05 100644
--- a/library/std/src/sys/unsupported/process.rs
+++ b/library/std/src/sys/unsupported/process.rs
@@ -200,6 +200,9 @@ impl<'a> Iterator for CommandArgs<'a> {
fn next(&mut self) -> Option<&'a OsStr> {
None
}
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(0))
+ }
}
impl<'a> ExactSizeIterator for CommandArgs<'a> {}
diff --git a/library/std/src/sys/unsupported/thread_local_dtor.rs b/library/std/src/sys/unsupported/thread_local_dtor.rs
index 85d660983..84660ea58 100644
--- a/library/std/src/sys/unsupported/thread_local_dtor.rs
+++ b/library/std/src/sys/unsupported/thread_local_dtor.rs
@@ -1,5 +1,6 @@
#![unstable(feature = "thread_local_internals", issue = "none")]
+#[cfg_attr(target_family = "wasm", allow(unused))] // unused on wasm32-unknown-unknown
pub unsafe fn register_dtor(_t: *mut u8, _dtor: unsafe extern "C" fn(*mut u8)) {
// FIXME: right now there is no concept of "thread exit", but this is likely
// going to show up at some point in the form of an exported symbol that the
diff --git a/library/std/src/sys/unsupported/thread_local_key.rs b/library/std/src/sys/unsupported/thread_local_key.rs
index c31b61cbf..b6e5e4cd2 100644
--- a/library/std/src/sys/unsupported/thread_local_key.rs
+++ b/library/std/src/sys/unsupported/thread_local_key.rs
@@ -19,8 +19,3 @@ pub unsafe fn get(_key: Key) -> *mut u8 {
pub unsafe fn destroy(_key: Key) {
panic!("should not be used on this target");
}
-
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- panic!("should not be used on this target");
-}
diff --git a/library/std/src/sys/wasi/fs.rs b/library/std/src/sys/wasi/fs.rs
index 6614ae397..d4866bbc3 100644
--- a/library/std/src/sys/wasi/fs.rs
+++ b/library/std/src/sys/wasi/fs.rs
@@ -1,9 +1,9 @@
#![deny(unsafe_op_in_unsafe_fn)]
use super::fd::WasiFd;
-use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
-use crate::io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
use crate::iter;
use crate::mem::{self, ManuallyDrop};
use crate::os::raw::c_int;
@@ -12,6 +12,7 @@ use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd
use crate::path::{Path, PathBuf};
use crate::ptr;
use crate::sync::Arc;
+use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::time::SystemTime;
use crate::sys::unsupported;
use crate::sys_common::{AsInner, FromInner, IntoInner};
@@ -65,8 +66,8 @@ pub struct FilePermissions {
#[derive(Copy, Clone, Debug, Default)]
pub struct FileTimes {
- accessed: Option<wasi::Timestamp>,
- modified: Option<wasi::Timestamp>,
+ accessed: Option<SystemTime>,
+ modified: Option<SystemTime>,
}
#[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)]
@@ -120,11 +121,11 @@ impl FilePermissions {
impl FileTimes {
pub fn set_accessed(&mut self, t: SystemTime) {
- self.accessed = Some(t.to_wasi_timestamp_or_panic());
+ self.accessed = Some(t);
}
pub fn set_modified(&mut self, t: SystemTime) {
- self.modified = Some(t.to_wasi_timestamp_or_panic());
+ self.modified = Some(t);
}
}
@@ -439,8 +440,8 @@ impl File {
true
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- crate::io::default_read_buf(|buf| self.read(buf), buf)
+ pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ crate::io::default_read_buf(|buf| self.read(buf), cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -476,9 +477,16 @@ impl File {
}
pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
+ let to_timestamp = |time: Option<SystemTime>| {
+ match time {
+ Some(time) if let Some(ts) = time.to_wasi_timestamp() => Ok(ts),
+ Some(_) => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too large to set as a file time")),
+ None => Ok(0),
+ }
+ };
self.fd.filestat_set_times(
- times.accessed.unwrap_or(0),
- times.modified.unwrap_or(0),
+ to_timestamp(times.accessed)?,
+ to_timestamp(times.modified)?,
times.accessed.map_or(0, |_| wasi::FSTFLAGS_ATIM)
| times.modified.map_or(0, |_| wasi::FSTFLAGS_MTIM),
)
@@ -687,51 +695,52 @@ fn open_at(fd: &WasiFd, path: &Path, opts: &OpenOptions) -> io::Result<File> {
/// Note that this can fail if `p` doesn't look like it can be opened relative
/// to any pre-opened file descriptor.
fn open_parent(p: &Path) -> io::Result<(ManuallyDrop<WasiFd>, PathBuf)> {
- let p = CString::new(p.as_os_str().as_bytes())?;
- let mut buf = Vec::<u8>::with_capacity(512);
- loop {
- unsafe {
- let mut relative_path = buf.as_ptr().cast();
- let mut abs_prefix = ptr::null();
- let fd = __wasilibc_find_relpath(
- p.as_ptr(),
- &mut abs_prefix,
- &mut relative_path,
- buf.capacity(),
- );
- if fd == -1 {
- if io::Error::last_os_error().raw_os_error() == Some(libc::ENOMEM) {
- // Trigger the internal buffer resizing logic of `Vec` by requiring
- // more space than the current capacity.
- let cap = buf.capacity();
- buf.set_len(cap);
- buf.reserve(1);
- continue;
- }
- let msg = format!(
- "failed to find a pre-opened file descriptor \
- through which {:?} could be opened",
- p
+ run_path_with_cstr(p, |p| {
+ let mut buf = Vec::<u8>::with_capacity(512);
+ loop {
+ unsafe {
+ let mut relative_path = buf.as_ptr().cast();
+ let mut abs_prefix = ptr::null();
+ let fd = __wasilibc_find_relpath(
+ p.as_ptr(),
+ &mut abs_prefix,
+ &mut relative_path,
+ buf.capacity(),
);
- return Err(io::Error::new(io::ErrorKind::Uncategorized, msg));
- }
- let relative = CStr::from_ptr(relative_path).to_bytes().to_vec();
+ if fd == -1 {
+ if io::Error::last_os_error().raw_os_error() == Some(libc::ENOMEM) {
+ // Trigger the internal buffer resizing logic of `Vec` by requiring
+ // more space than the current capacity.
+ let cap = buf.capacity();
+ buf.set_len(cap);
+ buf.reserve(1);
+ continue;
+ }
+ let msg = format!(
+ "failed to find a pre-opened file descriptor \
+ through which {:?} could be opened",
+ p
+ );
+ return Err(io::Error::new(io::ErrorKind::Uncategorized, msg));
+ }
+ let relative = CStr::from_ptr(relative_path).to_bytes().to_vec();
- return Ok((
- ManuallyDrop::new(WasiFd::from_raw_fd(fd as c_int)),
- PathBuf::from(OsString::from_vec(relative)),
- ));
+ return Ok((
+ ManuallyDrop::new(WasiFd::from_raw_fd(fd as c_int)),
+ PathBuf::from(OsString::from_vec(relative)),
+ ));
+ }
}
- }
- extern "C" {
- pub fn __wasilibc_find_relpath(
- path: *const libc::c_char,
- abs_prefix: *mut *const libc::c_char,
- relative_path: *mut *const libc::c_char,
- relative_path_len: libc::size_t,
- ) -> libc::c_int;
- }
+ extern "C" {
+ pub fn __wasilibc_find_relpath(
+ path: *const libc::c_char,
+ abs_prefix: *mut *const libc::c_char,
+ relative_path: *mut *const libc::c_char,
+ relative_path_len: libc::size_t,
+ ) -> libc::c_int;
+ }
+ })
}
pub fn osstr2str(f: &OsStr) -> io::Result<&str> {
diff --git a/library/std/src/sys/wasi/io.rs b/library/std/src/sys/wasi/io.rs
index ee017d13a..2cd45df88 100644
--- a/library/std/src/sys/wasi/io.rs
+++ b/library/std/src/sys/wasi/io.rs
@@ -1,6 +1,7 @@
#![deny(unsafe_op_in_unsafe_fn)]
use crate::marker::PhantomData;
+use crate::os::fd::{AsFd, AsRawFd};
use crate::slice;
#[derive(Copy, Clone)]
@@ -71,3 +72,8 @@ impl<'a> IoSliceMut<'a> {
unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.buf_len) }
}
}
+
+pub fn is_terminal(fd: &impl AsFd) -> bool {
+ let fd = fd.as_fd();
+ unsafe { libc::isatty(fd.as_raw_fd()) != 0 }
+}
diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs
index 683a07a34..c8c47763a 100644
--- a/library/std/src/sys/wasi/mod.rs
+++ b/library/std/src/sys/wasi/mod.rs
@@ -25,6 +25,9 @@ pub mod cmath;
pub mod env;
pub mod fd;
pub mod fs;
+#[allow(unused)]
+#[path = "../wasm/atomics/futex.rs"]
+pub mod futex;
pub mod io;
#[path = "../unsupported/locks/mod.rs"]
pub mod locks;
diff --git a/library/std/src/sys/wasi/os.rs b/library/std/src/sys/wasi/os.rs
index c5229a188..f5513e999 100644
--- a/library/std/src/sys/wasi/os.rs
+++ b/library/std/src/sys/wasi/os.rs
@@ -1,14 +1,15 @@
#![deny(unsafe_op_in_unsafe_fn)]
-use crate::any::Any;
use crate::error::Error as StdError;
-use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::marker::PhantomData;
+use crate::ops::Drop;
use crate::os::wasi::prelude::*;
use crate::path::{self, PathBuf};
use crate::str;
+use crate::sys::common::small_c_string::{run_path_with_cstr, run_with_cstr};
use crate::sys::memchr;
use crate::sys::unsupported;
use crate::vec;
@@ -23,10 +24,26 @@ mod libc {
}
}
-#[cfg(not(target_feature = "atomics"))]
-pub unsafe fn env_lock() -> impl Any {
- // No need for a lock if we're single-threaded, but this function will need
- // to get implemented for multi-threaded scenarios
+cfg_if::cfg_if! {
+ if #[cfg(target_feature = "atomics")] {
+ // Access to the environment must be protected by a lock in multi-threaded scenarios.
+ use crate::sync::{PoisonError, RwLock};
+ static ENV_LOCK: RwLock<()> = RwLock::new(());
+ pub fn env_read_lock() -> impl Drop {
+ ENV_LOCK.read().unwrap_or_else(PoisonError::into_inner)
+ }
+ pub fn env_write_lock() -> impl Drop {
+ ENV_LOCK.write().unwrap_or_else(PoisonError::into_inner)
+ }
+ } else {
+ // No need for a lock if we are single-threaded.
+ pub fn env_read_lock() -> impl Drop {
+ Box::new(())
+ }
+ pub fn env_write_lock() -> impl Drop {
+ Box::new(())
+ }
+ }
}
pub fn errno() -> i32 {
@@ -77,13 +94,10 @@ pub fn getcwd() -> io::Result<PathBuf> {
}
pub fn chdir(p: &path::Path) -> io::Result<()> {
- let p: &OsStr = p.as_ref();
- let p = CString::new(p.as_bytes())?;
- unsafe {
- match libc::chdir(p.as_ptr()) == (0 as libc::c_int) {
- true => Ok(()),
- false => Err(io::Error::last_os_error()),
- }
+ let result = run_path_with_cstr(p, |p| unsafe { Ok(libc::chdir(p.as_ptr())) })?;
+ match result == (0 as libc::c_int) {
+ true => Ok(()),
+ false => Err(io::Error::last_os_error()),
}
}
@@ -146,7 +160,7 @@ impl Iterator for Env {
pub fn env() -> Env {
unsafe {
- let _guard = env_lock();
+ let _guard = env_read_lock();
let mut environ = libc::environ;
let mut result = Vec::new();
if !environ.is_null() {
@@ -176,35 +190,32 @@ pub fn env() -> Env {
}
pub fn getenv(k: &OsStr) -> Option<OsString> {
- let k = CString::new(k.as_bytes()).ok()?;
- unsafe {
- let _guard = env_lock();
- let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
- }
+ let s = run_with_cstr(k.as_bytes(), |k| unsafe {
+ let _guard = env_read_lock();
+ Ok(libc::getenv(k.as_ptr()) as *const libc::c_char)
+ })
+ .ok()?;
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
}
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
- let k = CString::new(k.as_bytes())?;
- let v = CString::new(v.as_bytes())?;
-
- unsafe {
- let _guard = env_lock();
- cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
- }
+ run_with_cstr(k.as_bytes(), |k| {
+ run_with_cstr(v.as_bytes(), |v| unsafe {
+ let _guard = env_write_lock();
+ cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
+ })
+ })
}
pub fn unsetenv(n: &OsStr) -> io::Result<()> {
- let nbuf = CString::new(n.as_bytes())?;
-
- unsafe {
- let _guard = env_lock();
+ run_with_cstr(n.as_bytes(), |nbuf| unsafe {
+ let _guard = env_write_lock();
cvt(libc::unsetenv(nbuf.as_ptr())).map(drop)
- }
+ })
}
pub fn temp_dir() -> PathBuf {
diff --git a/library/std/src/sys/wasi/time.rs b/library/std/src/sys/wasi/time.rs
index 3d326e491..016b06efb 100644
--- a/library/std/src/sys/wasi/time.rs
+++ b/library/std/src/sys/wasi/time.rs
@@ -47,8 +47,8 @@ impl SystemTime {
SystemTime(Duration::from_nanos(ts))
}
- pub fn to_wasi_timestamp_or_panic(&self) -> wasi::Timestamp {
- self.0.as_nanos().try_into().expect("time does not fit in WASI timestamp")
+ pub fn to_wasi_timestamp(&self) -> Option<wasi::Timestamp> {
+ self.0.as_nanos().try_into().ok()
}
pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs
index 4159efe2a..93838390b 100644
--- a/library/std/src/sys/wasm/mod.rs
+++ b/library/std/src/sys/wasm/mod.rs
@@ -57,7 +57,7 @@ cfg_if::cfg_if! {
mod futex_rwlock;
pub(crate) use futex_condvar::{Condvar, MovableCondvar};
pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use futex_rwlock::MovableRwLock;
}
#[path = "atomics/futex.rs"]
pub mod futex;
diff --git a/library/std/src/sys/windows/alloc.rs b/library/std/src/sys/windows/alloc.rs
index fdc81cdea..d53ea1600 100644
--- a/library/std/src/sys/windows/alloc.rs
+++ b/library/std/src/sys/windows/alloc.rs
@@ -16,6 +16,7 @@ mod tests;
// Flag to indicate that the memory returned by `HeapAlloc` should be zeroed.
const HEAP_ZERO_MEMORY: c::DWORD = 0x00000008;
+#[link(name = "kernel32")]
extern "system" {
// Get a handle to the default heap of the current process, or null if the operation fails.
//
@@ -168,7 +169,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 {
// SAFETY: Because the size and alignment of a header is <= `MIN_ALIGN` and `aligned`
// is aligned to at least `MIN_ALIGN` and has at least `MIN_ALIGN` bytes of padding before
// it, it is safe to write a header directly before it.
- unsafe { ptr::write((aligned as *mut Header).offset(-1), Header(ptr)) };
+ unsafe { ptr::write((aligned as *mut Header).sub(1), Header(ptr)) };
// SAFETY: The returned pointer does not point to the to the start of an allocated block,
// but there is a header readable directly before it containing the location of the start
@@ -213,7 +214,7 @@ unsafe impl GlobalAlloc for System {
// SAFETY: Because of the contract of `System`, `ptr` is guaranteed to be non-null
// and have a header readable directly before it.
- unsafe { ptr::read((ptr as *mut Header).offset(-1)).0 }
+ unsafe { ptr::read((ptr as *mut Header).sub(1)).0 }
}
};
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index 478068c73..be6fc2ebb 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -66,10 +66,12 @@ pub type LPSYSTEM_INFO = *mut SYSTEM_INFO;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut c_void;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *mut c_void;
+pub type BCRYPT_ALG_HANDLE = LPVOID;
pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
pub type PLARGE_INTEGER = *mut c_longlong;
pub type PSRWLOCK = *mut SRWLOCK;
+pub type LPINIT_ONCE = *mut INIT_ONCE;
pub type SOCKET = crate::os::windows::raw::SOCKET;
pub type socklen_t = c_int;
@@ -125,6 +127,10 @@ pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000;
pub const FIONBIO: c_ulong = 0x8004667e;
+pub const MAX_PATH: usize = 260;
+
+pub const FILE_TYPE_PIPE: u32 = 3;
+
#[repr(C)]
#[derive(Copy)]
pub struct WIN32_FIND_DATAW {
@@ -193,6 +199,9 @@ pub const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002;
pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { ptr: ptr::null_mut() };
pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: ptr::null_mut() };
+pub const INIT_ONCE_STATIC_INIT: INIT_ONCE = INIT_ONCE { ptr: ptr::null_mut() };
+
+pub const INIT_ONCE_INIT_FAILED: DWORD = 0x00000004;
pub const DETACHED_PROCESS: DWORD = 0x00000008;
pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200;
@@ -285,6 +294,8 @@ pub fn nt_success(status: NTSTATUS) -> bool {
status >= 0
}
+// "RNG\0"
+pub const BCRYPT_RNG_ALGORITHM: &[u16] = &[b'R' as u16, b'N' as u16, b'G' as u16, 0];
pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002;
#[repr(C)]
@@ -455,6 +466,12 @@ pub enum FILE_INFO_BY_HANDLE_CLASS {
}
#[repr(C)]
+pub struct FILE_ATTRIBUTE_TAG_INFO {
+ pub FileAttributes: DWORD,
+ pub ReparseTag: DWORD,
+}
+
+#[repr(C)]
pub struct FILE_DISPOSITION_INFO {
pub DeleteFile: BOOLEAN,
}
@@ -501,6 +518,8 @@ pub struct FILE_END_OF_FILE_INFO {
pub EndOfFile: LARGE_INTEGER,
}
+/// NB: Use carefully! In general using this as a reference is likely to get the
+/// provenance wrong for the `rest` field!
#[repr(C)]
pub struct REPARSE_DATA_BUFFER {
pub ReparseTag: c_uint,
@@ -509,6 +528,8 @@ pub struct REPARSE_DATA_BUFFER {
pub rest: (),
}
+/// NB: Use carefully! In general using this as a reference is likely to get the
+/// provenance wrong for the `PathBuffer` field!
#[repr(C)]
pub struct SYMBOLIC_LINK_REPARSE_BUFFER {
pub SubstituteNameOffset: c_ushort,
@@ -519,6 +540,14 @@ pub struct SYMBOLIC_LINK_REPARSE_BUFFER {
pub PathBuffer: WCHAR,
}
+/// NB: Use carefully! In general using this as a reference is likely to get the
+/// provenance wrong for the `PathBuffer` field!
+#[repr(C)]
+pub struct FILE_NAME_INFO {
+ pub FileNameLength: DWORD,
+ pub FileName: [WCHAR; 1],
+}
+
#[repr(C)]
pub struct MOUNT_POINT_REPARSE_BUFFER {
pub SubstituteNameOffset: c_ushort,
@@ -550,6 +579,10 @@ pub struct CONDITION_VARIABLE {
pub struct SRWLOCK {
pub ptr: LPVOID,
}
+#[repr(C)]
+pub struct INIT_ONCE {
+ pub ptr: LPVOID,
+}
#[repr(C)]
pub struct REPARSE_MOUNTPOINT_DATA_BUFFER {
@@ -802,10 +835,6 @@ if #[cfg(not(target_vendor = "uwp"))] {
#[link(name = "advapi32")]
extern "system" {
- // Forbidden when targeting UWP
- #[link_name = "SystemFunction036"]
- pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN;
-
// Allowed but unused by UWP
pub fn OpenProcessToken(
ProcessHandle: HANDLE,
@@ -944,6 +973,7 @@ extern "system" {
pub fn TlsAlloc() -> DWORD;
pub fn TlsGetValue(dwTlsIndex: DWORD) -> LPVOID;
pub fn TlsSetValue(dwTlsIndex: DWORD, lpTlsvalue: LPVOID) -> BOOL;
+ pub fn TlsFree(dwTlsIndex: DWORD) -> BOOL;
pub fn GetLastError() -> DWORD;
pub fn QueryPerformanceFrequency(lpFrequency: *mut LARGE_INTEGER) -> BOOL;
pub fn QueryPerformanceCounter(lpPerformanceCount: *mut LARGE_INTEGER) -> BOOL;
@@ -1086,6 +1116,7 @@ extern "system" {
lpFileInformation: LPVOID,
dwBufferSize: DWORD,
) -> BOOL;
+ pub fn GetFileType(hfile: HANDLE) -> DWORD;
pub fn SleepConditionVariableSRW(
ConditionVariable: PCONDITION_VARIABLE,
SRWLock: PSRWLOCK,
@@ -1103,6 +1134,14 @@ extern "system" {
pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
+ pub fn InitOnceBeginInitialize(
+ lpInitOnce: LPINIT_ONCE,
+ dwFlags: DWORD,
+ fPending: LPBOOL,
+ lpContext: *mut LPVOID,
+ ) -> BOOL;
+ pub fn InitOnceComplete(lpInitOnce: LPINIT_ONCE, dwFlags: DWORD, lpContext: LPVOID) -> BOOL;
+
pub fn CompareStringOrdinal(
lpString1: LPCWSTR,
cchCount1: c_int,
@@ -1217,11 +1256,18 @@ extern "system" {
// >= Vista / Server 2008
// https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom
pub fn BCryptGenRandom(
- hAlgorithm: LPVOID,
+ hAlgorithm: BCRYPT_ALG_HANDLE,
pBuffer: *mut u8,
cbBuffer: ULONG,
dwFlags: ULONG,
) -> NTSTATUS;
+ pub fn BCryptOpenAlgorithmProvider(
+ phalgorithm: *mut BCRYPT_ALG_HANDLE,
+ pszAlgId: LPCWSTR,
+ pszimplementation: LPCWSTR,
+ dwflags: ULONG,
+ ) -> NTSTATUS;
+ pub fn BCryptCloseAlgorithmProvider(hAlgorithm: BCRYPT_ALG_HANDLE, dwFlags: ULONG) -> NTSTATUS;
}
// Functions that aren't available on every version of Windows that we support,
@@ -1251,17 +1297,14 @@ compat_fn_with_fallback! {
}
compat_fn_optional! {
- pub static SYNCH_API: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
-
- // >= Windows 8 / Server 2012
- // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitonaddress
+ crate::sys::compat::load_synch_functions();
pub fn WaitOnAddress(
Address: LPVOID,
CompareAddress: LPVOID,
AddressSize: SIZE_T,
dwMilliseconds: DWORD
- ) -> BOOL;
- pub fn WakeByAddressSingle(Address: LPVOID) -> ();
+ );
+ pub fn WakeByAddressSingle(Address: LPVOID);
}
compat_fn_with_fallback! {
diff --git a/library/std/src/sys/windows/cmath.rs b/library/std/src/sys/windows/cmath.rs
index 1a5421fac..43ab8c7ee 100644
--- a/library/std/src/sys/windows/cmath.rs
+++ b/library/std/src/sys/windows/cmath.rs
@@ -44,7 +44,7 @@ mod shims {
}
// On 32-bit x86 MSVC these functions aren't defined, so we just define shims
-// which promote everything fo f64, perform the calculation, and then demote
+// which promote everything to f64, perform the calculation, and then demote
// back to f32. While not precisely correct should be "correct enough" for now.
#[cfg(all(target_env = "msvc", target_arch = "x86"))]
mod shims {
diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs
index ccc90177a..7dff81ecb 100644
--- a/library/std/src/sys/windows/compat.rs
+++ b/library/std/src/sys/windows/compat.rs
@@ -7,52 +7,66 @@
//! `GetModuleHandle` and `GetProcAddress` to look up DLL entry points at
//! runtime.
//!
-//! This implementation uses a static initializer to look up the DLL entry
-//! points. The CRT (C runtime) executes static initializers before `main`
-//! is called (for binaries) and before `DllMain` is called (for DLLs).
-//! This is the ideal time to look up DLL imports, because we are guaranteed
-//! that no other threads will attempt to call these entry points. Thus,
-//! we can look up the imports and store them in `static mut` fields
-//! without any synchronization.
+//! This is implemented simply by storing a function pointer in an atomic.
+//! Loading and calling this function will have little or no overhead
+//! compared with calling any other dynamically imported function.
//!
-//! This has an additional advantage: Because the DLL import lookup happens
-//! at module initialization, the cost of these lookups is deterministic,
-//! and is removed from the code paths that actually call the DLL imports.
-//! That is, there is no unpredictable "cache miss" that occurs when calling
-//! a DLL import. For applications that benefit from predictable delays,
-//! this is a benefit. This also eliminates the comparison-and-branch
-//! from the hot path.
-//!
-//! Currently, the standard library uses only a small number of dynamic
-//! DLL imports. If this number grows substantially, then the cost of
-//! performing all of the lookups at initialization time might become
-//! substantial.
-//!
-//! The mechanism of registering a static initializer with the CRT is
-//! documented in
-//! [CRT Initialization](https://docs.microsoft.com/en-us/cpp/c-runtime-library/crt-initialization?view=msvc-160).
-//! It works by contributing a global symbol to the `.CRT$XCU` section.
-//! The linker builds a table of all static initializer functions.
-//! The CRT startup code then iterates that table, calling each
-//! initializer function.
-//!
-//! # **WARNING!!*
-//! The environment that a static initializer function runs in is highly
-//! constrained. There are **many** restrictions on what static initializers
-//! can safely do. Static initializer functions **MUST NOT** do any of the
-//! following (this list is not comprehensive):
-//! * touch any other static field that is used by a different static
-//! initializer, because the order that static initializers run in
-//! is not defined.
-//! * call `LoadLibrary` or any other function that acquires the DLL
-//! loader lock.
-//! * call any Rust function or CRT function that touches any static
-//! (global) state.
+//! The stored function pointer starts out as an importer function which will
+//! swap itself with the real function when it's called for the first time. If
+//! the real function can't be imported then a fallback function is used in its
+//! place. While this is low cost for the happy path (where the function is
+//! already loaded) it does mean there's some overhead the first time the
+//! function is called. In the worst case, multiple threads may all end up
+//! importing the same function unnecessarily.
use crate::ffi::{c_void, CStr};
use crate::ptr::NonNull;
+use crate::sync::atomic::Ordering;
use crate::sys::c;
+// This uses a static initializer to preload some imported functions.
+// The CRT (C runtime) executes static initializers before `main`
+// is called (for binaries) and before `DllMain` is called (for DLLs).
+//
+// It works by contributing a global symbol to the `.CRT$XCT` section.
+// The linker builds a table of all static initializer functions.
+// The CRT startup code then iterates that table, calling each
+// initializer function.
+//
+// NOTE: User code should instead use .CRT$XCU to reliably run after std's initializer.
+// If you're reading this and would like a guarantee here, please
+// file an issue for discussion; currently we don't guarantee any functionality
+// before main.
+// See https://docs.microsoft.com/en-us/cpp/c-runtime-library/crt-initialization?view=msvc-170
+#[used]
+#[link_section = ".CRT$XCT"]
+static INIT_TABLE_ENTRY: unsafe extern "C" fn() = init;
+
+/// Preload some imported functions.
+///
+/// Note that any functions included here will be unconditionally loaded in
+/// the final binary, regardless of whether or not they're actually used.
+///
+/// Therefore, this should be limited to `compat_fn_optional` functions which
+/// must be preloaded or any functions where lazier loading demonstrates a
+/// negative performance impact in practical situations.
+///
+/// Currently we only preload `WaitOnAddress` and `WakeByAddressSingle`.
+unsafe extern "C" fn init() {
+ // In an exe this code is executed before main() so is single threaded.
+ // In a DLL the system's loader lock will be held thereby synchronizing
+ // access. So the same best practices apply here as they do to running in DllMain:
+ // https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-best-practices
+ //
+ // DO NOT do anything interesting or complicated in this function! DO NOT call
+ // any Rust functions or CRT functions if those functions touch any global state,
+ // because this function runs during global initialization. For example, DO NOT
+ // do any dynamic allocation, don't call LoadLibrary, etc.
+
+ // Attempt to preload the synch functions.
+ load_synch_functions();
+}
+
/// Helper macro for creating CStrs from literals and symbol names.
macro_rules! ansi_str {
(sym $ident:ident) => {{
@@ -85,39 +99,6 @@ pub(crate) const fn const_cstr_from_bytes(bytes: &'static [u8]) -> &'static CStr
unsafe { crate::ffi::CStr::from_bytes_with_nul_unchecked(bytes) }
}
-#[used]
-#[link_section = ".CRT$XCU"]
-static INIT_TABLE_ENTRY: unsafe extern "C" fn() = init;
-
-/// This is where the magic preloading of symbols happens.
-///
-/// Note that any functions included here will be unconditionally included in
-/// the final binary, regardless of whether or not they're actually used.
-///
-/// Therefore, this is limited to `compat_fn_optional` functions which must be
-/// preloaded and any functions which may be more time sensitive, even for the first call.
-unsafe extern "C" fn init() {
- // There is no locking here. This code is executed before main() is entered, and
- // is guaranteed to be single-threaded.
- //
- // DO NOT do anything interesting or complicated in this function! DO NOT call
- // any Rust functions or CRT functions if those functions touch any global state,
- // because this function runs during global initialization. For example, DO NOT
- // do any dynamic allocation, don't call LoadLibrary, etc.
-
- if let Some(synch) = Module::new(c::SYNCH_API) {
- // These are optional and so we must manually attempt to load them
- // before they can be used.
- c::WaitOnAddress::preload(synch);
- c::WakeByAddressSingle::preload(synch);
- }
-
- if let Some(kernel32) = Module::new(c::KERNEL32) {
- // Preloading this means getting a precise time will be as fast as possible.
- c::GetSystemTimePreciseAsFileTime::preload(kernel32);
- }
-}
-
/// Represents a loaded module.
///
/// Note that the modules std depends on must not be unloaded.
@@ -151,7 +132,7 @@ impl Module {
macro_rules! compat_fn_with_fallback {
(pub static $module:ident: &CStr = $name:expr; $(
$(#[$meta:meta])*
- pub fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty $fallback_body:block
+ $vis:vis fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty $fallback_body:block
)*) => (
pub static $module: &CStr = $name;
$(
@@ -196,11 +177,6 @@ macro_rules! compat_fn_with_fallback {
$fallback_body
}
- #[allow(unused)]
- pub(in crate::sys) fn preload(module: Module) {
- load_from_module(Some(module));
- }
-
#[inline(always)]
pub unsafe fn call($($argname: $argtype),*) -> $rettype {
let func: F = mem::transmute(PTR.load(Ordering::Relaxed));
@@ -208,66 +184,60 @@ macro_rules! compat_fn_with_fallback {
}
}
$(#[$meta])*
- pub use $symbol::call as $symbol;
+ $vis use $symbol::call as $symbol;
)*)
}
-/// A function that either exists or doesn't.
+/// Optionally loaded functions.
///
-/// NOTE: Optional functions must be preloaded in the `init` function above, or they will always be None.
+/// Actual loading of the function defers to $load_functions.
macro_rules! compat_fn_optional {
- (pub static $module:ident: &CStr = $name:expr; $(
- $(#[$meta:meta])*
- pub fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty;
- )*) => (
- pub static $module: &CStr = $name;
+ ($load_functions:expr;
$(
- $(#[$meta])*
- pub mod $symbol {
- #[allow(unused_imports)]
- use super::*;
- use crate::mem;
- use crate::sync::atomic::{AtomicPtr, Ordering};
- use crate::sys::compat::Module;
- use crate::ptr::{self, NonNull};
-
- type F = unsafe extern "system" fn($($argtype),*) -> $rettype;
-
- /// `PTR` will either be `null()` or set to the loaded function.
- static PTR: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
-
- /// Only allow access to the function if it has loaded successfully.
- #[inline(always)]
- #[cfg(not(miri))]
- pub fn option() -> Option<F> {
- unsafe {
- NonNull::new(PTR.load(Ordering::Relaxed)).map(|f| mem::transmute(f))
+ $(#[$meta:meta])*
+ $vis:vis fn $symbol:ident($($argname:ident: $argtype:ty),*) $(-> $rettype:ty)?;
+ )+) => (
+ $(
+ pub mod $symbol {
+ use super::*;
+ use crate::ffi::c_void;
+ use crate::mem;
+ use crate::ptr::{self, NonNull};
+ use crate::sync::atomic::{AtomicPtr, Ordering};
+
+ pub(in crate::sys) static PTR: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
+
+ type F = unsafe extern "system" fn($($argtype),*) $(-> $rettype)?;
+
+ #[inline(always)]
+ pub fn option() -> Option<F> {
+ // Miri does not understand the way we do preloading
+ // therefore load the function here instead.
+ #[cfg(miri)] $load_functions;
+ NonNull::new(PTR.load(Ordering::Relaxed)).map(|f| unsafe { mem::transmute(f) })
}
}
+ )+
+ )
+}
- // Miri does not understand the way we do preloading
- // therefore load the function here instead.
- #[cfg(miri)]
- pub fn option() -> Option<F> {
- let mut func = NonNull::new(PTR.load(Ordering::Relaxed));
- if func.is_none() {
- unsafe { Module::new($module).map(preload) };
- func = NonNull::new(PTR.load(Ordering::Relaxed));
- }
- unsafe {
- func.map(|f| mem::transmute(f))
- }
- }
+/// Load all needed functions from "api-ms-win-core-synch-l1-2-0".
+pub(super) fn load_synch_functions() {
+ fn try_load() -> Option<()> {
+ const MODULE_NAME: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
+ const WAIT_ON_ADDRESS: &CStr = ansi_str!("WaitOnAddress");
+ const WAKE_BY_ADDRESS_SINGLE: &CStr = ansi_str!("WakeByAddressSingle");
+
+ // Try loading the library and all the required functions.
+ // If any step fails, then they all fail.
+ let library = unsafe { Module::new(MODULE_NAME) }?;
+ let wait_on_address = library.proc_address(WAIT_ON_ADDRESS)?;
+ let wake_by_address_single = library.proc_address(WAKE_BY_ADDRESS_SINGLE)?;
+
+ c::WaitOnAddress::PTR.store(wait_on_address.as_ptr(), Ordering::Relaxed);
+ c::WakeByAddressSingle::PTR.store(wake_by_address_single.as_ptr(), Ordering::Relaxed);
+ Some(())
+ }
- #[allow(unused)]
- pub(in crate::sys) fn preload(module: Module) {
- unsafe {
- static SYMBOL_NAME: &CStr = ansi_str!(sym $symbol);
- if let Some(f) = module.proc_address(SYMBOL_NAME) {
- PTR.store(f.as_ptr(), Ordering::Relaxed);
- }
- }
- }
- }
- )*)
+ try_load();
}
diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs
index aed082b3e..378098038 100644
--- a/library/std/src/sys/windows/fs.rs
+++ b/library/std/src/sys/windows/fs.rs
@@ -1,9 +1,10 @@
use crate::os::windows::prelude::*;
+use crate::borrow::Cow;
use crate::ffi::OsString;
use crate::fmt;
-use crate::io::{self, Error, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
-use crate::mem;
+use crate::io::{self, BorrowedCursor, Error, IoSlice, IoSliceMut, SeekFrom};
+use crate::mem::{self, MaybeUninit};
use crate::os::windows::io::{AsHandle, BorrowedHandle};
use crate::path::{Path, PathBuf};
use crate::ptr;
@@ -11,7 +12,7 @@ use crate::slice;
use crate::sync::Arc;
use crate::sys::handle::Handle;
use crate::sys::time::SystemTime;
-use crate::sys::{c, cvt};
+use crate::sys::{c, cvt, Align8};
use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::thread;
@@ -326,9 +327,15 @@ impl File {
cvt(c::GetFileInformationByHandle(self.handle.as_raw_handle(), &mut info))?;
let mut reparse_tag = 0;
if info.dwFileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
- let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
- if let Ok((_, buf)) = self.reparse_point(&mut b) {
- reparse_tag = buf.ReparseTag;
+ let mut attr_tag: c::FILE_ATTRIBUTE_TAG_INFO = mem::zeroed();
+ cvt(c::GetFileInformationByHandleEx(
+ self.handle.as_raw_handle(),
+ c::FileAttributeTagInfo,
+ ptr::addr_of_mut!(attr_tag).cast(),
+ mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
+ ))?;
+ if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
+ reparse_tag = attr_tag.ReparseTag;
}
}
Ok(FileAttr {
@@ -389,9 +396,15 @@ impl File {
attr.file_size = info.AllocationSize as u64;
attr.number_of_links = Some(info.NumberOfLinks);
if attr.file_type().is_reparse_point() {
- let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
- if let Ok((_, buf)) = self.reparse_point(&mut b) {
- attr.reparse_tag = buf.ReparseTag;
+ let mut attr_tag: c::FILE_ATTRIBUTE_TAG_INFO = mem::zeroed();
+ cvt(c::GetFileInformationByHandleEx(
+ self.handle.as_raw_handle(),
+ c::FileAttributeTagInfo,
+ ptr::addr_of_mut!(attr_tag).cast(),
+ mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
+ ))?;
+ if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
+ attr.reparse_tag = attr_tag.ReparseTag;
}
}
Ok(attr)
@@ -415,8 +428,8 @@ impl File {
self.handle.read_at(buf, offset)
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- self.handle.read_buf(buf)
+ pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ self.handle.read_buf(cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -458,38 +471,46 @@ impl File {
Ok(Self { handle: self.handle.try_clone()? })
}
- fn reparse_point<'a>(
+ // NB: returned pointer is derived from `space`, and has provenance to
+ // match. A raw pointer is returned rather than a reference in order to
+ // avoid narrowing provenance to the actual `REPARSE_DATA_BUFFER`.
+ fn reparse_point(
&self,
- space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE],
- ) -> io::Result<(c::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
+ space: &mut Align8<[MaybeUninit<u8>]>,
+ ) -> io::Result<(c::DWORD, *const c::REPARSE_DATA_BUFFER)> {
unsafe {
let mut bytes = 0;
cvt({
+ // Grab this in advance to avoid it invalidating the pointer
+ // we get from `space.0.as_mut_ptr()`.
+ let len = space.0.len();
c::DeviceIoControl(
self.handle.as_raw_handle(),
c::FSCTL_GET_REPARSE_POINT,
ptr::null_mut(),
0,
- space.as_mut_ptr() as *mut _,
- space.len() as c::DWORD,
+ space.0.as_mut_ptr().cast(),
+ len as c::DWORD,
&mut bytes,
ptr::null_mut(),
)
})?;
- Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
+ const _: () = assert!(core::mem::align_of::<c::REPARSE_DATA_BUFFER>() <= 8);
+ Ok((bytes, space.0.as_ptr().cast::<c::REPARSE_DATA_BUFFER>()))
}
}
fn readlink(&self) -> io::Result<PathBuf> {
- let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ let mut space = Align8([MaybeUninit::<u8>::uninit(); c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]);
let (_bytes, buf) = self.reparse_point(&mut space)?;
unsafe {
- let (path_buffer, subst_off, subst_len, relative) = match buf.ReparseTag {
+ let (path_buffer, subst_off, subst_len, relative) = match (*buf).ReparseTag {
c::IO_REPARSE_TAG_SYMLINK => {
let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER =
- &buf.rest as *const _ as *const _;
+ ptr::addr_of!((*buf).rest).cast();
+ assert!(info.is_aligned());
(
- &(*info).PathBuffer as *const _ as *const u16,
+ ptr::addr_of!((*info).PathBuffer).cast::<u16>(),
(*info).SubstituteNameOffset / 2,
(*info).SubstituteNameLength / 2,
(*info).Flags & c::SYMLINK_FLAG_RELATIVE != 0,
@@ -497,9 +518,10 @@ impl File {
}
c::IO_REPARSE_TAG_MOUNT_POINT => {
let info: *const c::MOUNT_POINT_REPARSE_BUFFER =
- &buf.rest as *const _ as *const _;
+ ptr::addr_of!((*buf).rest).cast();
+ assert!(info.is_aligned());
(
- &(*info).PathBuffer as *const _ as *const u16,
+ ptr::addr_of!((*info).PathBuffer).cast::<u16>(),
(*info).SubstituteNameOffset / 2,
(*info).SubstituteNameLength / 2,
false,
@@ -512,7 +534,7 @@ impl File {
));
}
};
- let subst_ptr = path_buffer.offset(subst_off as isize);
+ let subst_ptr = path_buffer.add(subst_off.into());
let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
// Absolute paths start with an NT internal namespace prefix `\??\`
// We should not let it leak through.
@@ -551,6 +573,14 @@ impl File {
"Cannot set file timestamp to 0",
));
}
+ let is_max =
+ |t: c::FILETIME| t.dwLowDateTime == c::DWORD::MAX && t.dwHighDateTime == c::DWORD::MAX;
+ if times.accessed.map_or(false, is_max) || times.modified.map_or(false, is_max) {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "Cannot set file timestamp to 0xFFFF_FFFF_FFFF_FFFF",
+ ));
+ }
cvt(unsafe {
c::SetFileTime(self.as_handle(), None, times.accessed.as_ref(), times.modified.as_ref())
})?;
@@ -649,27 +679,31 @@ impl File {
/// A buffer for holding directory entries.
struct DirBuff {
- buffer: Vec<u8>,
+ buffer: Box<Align8<[MaybeUninit<u8>; Self::BUFFER_SIZE]>>,
}
impl DirBuff {
+ const BUFFER_SIZE: usize = 1024;
fn new() -> Self {
- const BUFFER_SIZE: usize = 1024;
- Self { buffer: vec![0_u8; BUFFER_SIZE] }
+ Self {
+ // Safety: `Align8<[MaybeUninit<u8>; N]>` does not need
+ // initialization.
+ buffer: unsafe { Box::new_uninit().assume_init() },
+ }
}
fn capacity(&self) -> usize {
- self.buffer.len()
+ self.buffer.0.len()
}
fn as_mut_ptr(&mut self) -> *mut u8 {
- self.buffer.as_mut_ptr().cast()
+ self.buffer.0.as_mut_ptr().cast()
}
/// Returns a `DirBuffIter`.
fn iter(&self) -> DirBuffIter<'_> {
DirBuffIter::new(self)
}
}
-impl AsRef<[u8]> for DirBuff {
- fn as_ref(&self) -> &[u8] {
- &self.buffer
+impl AsRef<[MaybeUninit<u8>]> for DirBuff {
+ fn as_ref(&self) -> &[MaybeUninit<u8>] {
+ &self.buffer.0
}
}
@@ -677,7 +711,7 @@ impl AsRef<[u8]> for DirBuff {
///
/// Currently only returns file names (UTF-16 encoded).
struct DirBuffIter<'a> {
- buffer: Option<&'a [u8]>,
+ buffer: Option<&'a [MaybeUninit<u8>]>,
cursor: usize,
}
impl<'a> DirBuffIter<'a> {
@@ -686,23 +720,34 @@ impl<'a> DirBuffIter<'a> {
}
}
impl<'a> Iterator for DirBuffIter<'a> {
- type Item = (&'a [u16], bool);
+ type Item = (Cow<'a, [u16]>, bool);
fn next(&mut self) -> Option<Self::Item> {
use crate::mem::size_of;
let buffer = &self.buffer?[self.cursor..];
// Get the name and next entry from the buffer.
- // SAFETY: The buffer contains a `FILE_ID_BOTH_DIR_INFO` struct but the
- // last field (the file name) is unsized. So an offset has to be
- // used to get the file name slice.
+ // SAFETY:
+ // - The buffer contains a `FILE_ID_BOTH_DIR_INFO` struct but the last
+ // field (the file name) is unsized. So an offset has to be used to
+ // get the file name slice.
+ // - The OS has guaranteed initialization of the fields of
+ // `FILE_ID_BOTH_DIR_INFO` and the trailing filename (for at least
+ // `FileNameLength` bytes)
let (name, is_directory, next_entry) = unsafe {
let info = buffer.as_ptr().cast::<c::FILE_ID_BOTH_DIR_INFO>();
- let next_entry = (*info).NextEntryOffset as usize;
- let name = crate::slice::from_raw_parts(
- (*info).FileName.as_ptr().cast::<u16>(),
- (*info).FileNameLength as usize / size_of::<u16>(),
+ // While this is guaranteed to be aligned in documentation for
+ // https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_both_dir_info
+ // it does not seem that reality is so kind, and assuming this
+ // caused crashes in some cases (https://github.com/rust-lang/rust/issues/104530)
+ // presumably, this can be blamed on buggy filesystem drivers, but who knows.
+ let next_entry = ptr::addr_of!((*info).NextEntryOffset).read_unaligned() as usize;
+ let length = ptr::addr_of!((*info).FileNameLength).read_unaligned() as usize;
+ let attrs = ptr::addr_of!((*info).FileAttributes).read_unaligned();
+ let name = from_maybe_unaligned(
+ ptr::addr_of!((*info).FileName).cast::<u16>(),
+ length / size_of::<u16>(),
);
- let is_directory = ((*info).FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) != 0;
+ let is_directory = (attrs & c::FILE_ATTRIBUTE_DIRECTORY) != 0;
(name, is_directory, next_entry)
};
@@ -715,13 +760,21 @@ impl<'a> Iterator for DirBuffIter<'a> {
// Skip `.` and `..` pseudo entries.
const DOT: u16 = b'.' as u16;
- match name {
+ match &name[..] {
[DOT] | [DOT, DOT] => self.next(),
_ => Some((name, is_directory)),
}
}
}
+unsafe fn from_maybe_unaligned<'a>(p: *const u16, len: usize) -> Cow<'a, [u16]> {
+ if p.is_aligned() {
+ Cow::Borrowed(crate::slice::from_raw_parts(p, len))
+ } else {
+ Cow::Owned((0..len).map(|i| p.add(i).read_unaligned()).collect())
+ }
+}
+
/// Open a link relative to the parent directory, ensure no symlinks are followed.
fn open_link_no_reparse(parent: &File, name: &[u16], access: u32) -> io::Result<File> {
// This is implemented using the lower level `NtCreateFile` function as
@@ -1077,13 +1130,13 @@ fn remove_dir_all_iterative(f: &File, delete: fn(&File) -> io::Result<()>) -> io
if is_directory {
let child_dir = open_link_no_reparse(
&dir,
- name,
+ &name,
c::SYNCHRONIZE | c::DELETE | c::FILE_LIST_DIRECTORY,
)?;
dirlist.push(child_dir);
} else {
for i in 1..=MAX_RETRIES {
- let result = open_link_no_reparse(&dir, name, c::SYNCHRONIZE | c::DELETE);
+ let result = open_link_no_reparse(&dir, &name, c::SYNCHRONIZE | c::DELETE);
match result {
Ok(f) => delete(&f)?,
// Already deleted, so skip.
@@ -1337,18 +1390,19 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
let h = f.as_inner().as_raw_handle();
unsafe {
- let mut data = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
- let db = data.as_mut_ptr() as *mut c::REPARSE_MOUNTPOINT_DATA_BUFFER;
- let buf = &mut (*db).ReparseTarget as *mut c::WCHAR;
+ let mut data = Align8([MaybeUninit::<u8>::uninit(); c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]);
+ let data_ptr = data.0.as_mut_ptr();
+ let db = data_ptr.cast::<c::REPARSE_MOUNTPOINT_DATA_BUFFER>();
+ let buf = ptr::addr_of_mut!((*db).ReparseTarget).cast::<c::WCHAR>();
let mut i = 0;
// FIXME: this conversion is very hacky
let v = br"\??\";
let v = v.iter().map(|x| *x as u16);
for c in v.chain(original.as_os_str().encode_wide()) {
- *buf.offset(i) = c;
+ *buf.add(i) = c;
i += 1;
}
- *buf.offset(i) = 0;
+ *buf.add(i) = 0;
i += 1;
(*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
(*db).ReparseTargetMaximumLength = (i * 2) as c::WORD;
@@ -1359,7 +1413,7 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
cvt(c::DeviceIoControl(
h as *mut _,
c::FSCTL_SET_REPARSE_POINT,
- data.as_ptr() as *mut _,
+ data_ptr.cast(),
(*db).ReparseDataLength + 8,
ptr::null_mut(),
0,
diff --git a/library/std/src/sys/windows/handle.rs b/library/std/src/sys/windows/handle.rs
index e24b09cc9..ae33d48c6 100644
--- a/library/std/src/sys/windows/handle.rs
+++ b/library/std/src/sys/windows/handle.rs
@@ -4,7 +4,7 @@
mod tests;
use crate::cmp;
-use crate::io::{self, ErrorKind, IoSlice, IoSliceMut, Read, ReadBuf};
+use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, Read};
use crate::mem;
use crate::os::windows::io::{
AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle,
@@ -112,18 +112,16 @@ impl Handle {
}
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- let res = unsafe {
- self.synchronous_read(buf.unfilled_mut().as_mut_ptr(), buf.remaining(), None)
- };
+ pub fn read_buf(&self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ let res =
+ unsafe { self.synchronous_read(cursor.as_mut().as_mut_ptr(), cursor.capacity(), None) };
match res {
Ok(read) => {
// Safety: `read` bytes were written to the initialized portion of the buffer
unsafe {
- buf.assume_init(read as usize);
+ cursor.advance(read as usize);
}
- buf.add_filled(read as usize);
Ok(())
}
diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs
index fb06df1f8..2cc34c986 100644
--- a/library/std/src/sys/windows/io.rs
+++ b/library/std/src/sys/windows/io.rs
@@ -1,6 +1,10 @@
use crate::marker::PhantomData;
+use crate::mem::size_of;
+use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle};
use crate::slice;
-use crate::sys::c;
+use crate::sys::{c, Align8};
+use core;
+use libc;
#[derive(Copy, Clone)]
#[repr(transparent)]
@@ -78,3 +82,73 @@ impl<'a> IoSliceMut<'a> {
unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.len as usize) }
}
}
+
+pub fn is_terminal(h: &impl AsHandle) -> bool {
+ unsafe { handle_is_console(h.as_handle()) }
+}
+
+unsafe fn handle_is_console(handle: BorrowedHandle<'_>) -> bool {
+ let handle = handle.as_raw_handle();
+
+ // A null handle means the process has no console.
+ if handle.is_null() {
+ return false;
+ }
+
+ let mut out = 0;
+ if c::GetConsoleMode(handle, &mut out) != 0 {
+ // False positives aren't possible. If we got a console then we definitely have a console.
+ return true;
+ }
+
+ // At this point, we *could* have a false negative. We can determine that this is a true
+ // negative if we can detect the presence of a console on any of the standard I/O streams. If
+ // another stream has a console, then we know we're in a Windows console and can therefore
+ // trust the negative.
+ for std_handle in [c::STD_INPUT_HANDLE, c::STD_OUTPUT_HANDLE, c::STD_ERROR_HANDLE] {
+ let std_handle = c::GetStdHandle(std_handle);
+ if !std_handle.is_null()
+ && std_handle != handle
+ && c::GetConsoleMode(std_handle, &mut out) != 0
+ {
+ return false;
+ }
+ }
+
+ // Otherwise, we fall back to an msys hack to see if we can detect the presence of a pty.
+ msys_tty_on(handle)
+}
+
+unsafe fn msys_tty_on(handle: c::HANDLE) -> bool {
+ // Early return if the handle is not a pipe.
+ if c::GetFileType(handle) != c::FILE_TYPE_PIPE {
+ return false;
+ }
+
+ const SIZE: usize = size_of::<c::FILE_NAME_INFO>() + c::MAX_PATH * size_of::<c::WCHAR>();
+ let mut name_info_bytes = Align8([0u8; SIZE]);
+ let res = c::GetFileInformationByHandleEx(
+ handle,
+ c::FileNameInfo,
+ name_info_bytes.0.as_mut_ptr() as *mut libc::c_void,
+ SIZE as u32,
+ );
+ if res == 0 {
+ return false;
+ }
+ let name_info: &c::FILE_NAME_INFO = &*(name_info_bytes.0.as_ptr() as *const c::FILE_NAME_INFO);
+ let name_len = name_info.FileNameLength as usize / 2;
+ // Offset to get the `FileName` field.
+ let name_ptr = name_info_bytes.0.as_ptr().offset(size_of::<c::DWORD>() as isize).cast::<u16>();
+ let s = core::slice::from_raw_parts(name_ptr, name_len);
+ let name = String::from_utf16_lossy(s);
+ // Get the file name only.
+ let name = name.rsplit('\\').next().unwrap_or(&name);
+ // This checks whether 'pty' exists in the file name, which indicates that
+ // a pseudo-terminal is attached. To mitigate against false positives
+ // (e.g., an actual file name that contains 'pty'), we also require that
+ // the file name begins with either the strings 'msys-' or 'cygwin-'.)
+ let is_msys = name.starts_with("msys-") || name.starts_with("cygwin-");
+ let is_pty = name.contains("-pty");
+ is_msys && is_pty
+}
diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs
index d412ff152..602a2d623 100644
--- a/library/std/src/sys/windows/locks/mod.rs
+++ b/library/std/src/sys/windows/locks/mod.rs
@@ -3,4 +3,4 @@ mod mutex;
mod rwlock;
pub use condvar::{Condvar, MovableCondvar};
pub use mutex::{MovableMutex, Mutex};
-pub use rwlock::{MovableRwLock, RwLock};
+pub use rwlock::MovableRwLock;
diff --git a/library/std/src/sys/windows/locks/mutex.rs b/library/std/src/sys/windows/locks/mutex.rs
index f91e8f9f5..91207f5f4 100644
--- a/library/std/src/sys/windows/locks/mutex.rs
+++ b/library/std/src/sys/windows/locks/mutex.rs
@@ -37,8 +37,6 @@ impl Mutex {
pub const fn new() -> Mutex {
Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) }
}
- #[inline]
- pub unsafe fn init(&mut self) {}
#[inline]
pub unsafe fn lock(&self) {
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index b3f6d2d0a..eab9b9612 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -2,6 +2,7 @@
use crate::ffi::{CStr, OsStr, OsString};
use crate::io::ErrorKind;
+use crate::mem::MaybeUninit;
use crate::os::windows::ffi::{OsStrExt, OsStringExt};
use crate::path::PathBuf;
use crate::time::Duration;
@@ -47,7 +48,7 @@ cfg_if::cfg_if! {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(_argc: isize, _argv: *const *const u8) {
+pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {
stack_overflow::init();
// Normally, `thread::spawn` will call `Thread::set_name` but since this thread already
@@ -204,8 +205,8 @@ where
// This initial size also works around `GetFullPathNameW` returning
// incorrect size hints for some short paths:
// https://github.com/dylni/normpath/issues/5
- let mut stack_buf = [0u16; 512];
- let mut heap_buf = Vec::new();
+ let mut stack_buf: [MaybeUninit<u16>; 512] = MaybeUninit::uninit_array();
+ let mut heap_buf: Vec<MaybeUninit<u16>> = Vec::new();
unsafe {
let mut n = stack_buf.len();
loop {
@@ -214,6 +215,11 @@ where
} else {
let extra = n - heap_buf.len();
heap_buf.reserve(extra);
+ // We used `reserve` and not `reserve_exact`, so in theory we
+ // may have gotten more than requested. If so, we'd like to use
+ // it... so long as we won't cause overflow.
+ n = heap_buf.capacity().min(c::DWORD::MAX as usize);
+ // Safety: MaybeUninit<u16> does not need initialization
heap_buf.set_len(n);
&mut heap_buf[..]
};
@@ -228,13 +234,13 @@ where
// error" is still 0 then we interpret it as a 0 length buffer and
// not an actual error.
c::SetLastError(0);
- let k = match f1(buf.as_mut_ptr(), n as c::DWORD) {
+ let k = match f1(buf.as_mut_ptr().cast::<u16>(), n as c::DWORD) {
0 if c::GetLastError() == 0 => 0,
0 => return Err(crate::io::Error::last_os_error()),
n => n,
} as usize;
if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER {
- n *= 2;
+ n = n.saturating_mul(2).min(c::DWORD::MAX as usize);
} else if k > n {
n = k;
} else if k == n {
@@ -244,7 +250,9 @@ where
// Therefore k never equals n.
unreachable!();
} else {
- return Ok(f2(&buf[..k]));
+ // Safety: First `k` values are initialized.
+ let slice: &[u16] = MaybeUninit::slice_assume_init_ref(&buf[..k]);
+ return Ok(f2(slice));
}
}
}
@@ -321,3 +329,11 @@ pub fn abort_internal() -> ! {
}
crate::intrinsics::abort();
}
+
+/// Align the inner value to 8 bytes.
+///
+/// This is enough for almost all of the buffers we're likely to work with in
+/// the Windows APIs we use.
+#[repr(C, align(8))]
+#[derive(Copy, Clone)]
+pub(crate) struct Align8<T: ?Sized>(pub T);
diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs
index bcac996c0..352337ba3 100644
--- a/library/std/src/sys/windows/os.rs
+++ b/library/std/src/sys/windows/os.rs
@@ -99,11 +99,11 @@ impl Iterator for Env {
}
let p = self.cur as *const u16;
let mut len = 0;
- while *p.offset(len) != 0 {
+ while *p.add(len) != 0 {
len += 1;
}
- let s = slice::from_raw_parts(p, len as usize);
- self.cur = self.cur.offset(len + 1);
+ let s = slice::from_raw_parts(p, len);
+ self.cur = self.cur.add(len + 1);
// Windows allows environment variables to start with an equals
// symbol (in any other position, this is the separator between
diff --git a/library/std/src/sys/windows/os_str.rs b/library/std/src/sys/windows/os_str.rs
index 11883f150..4bdd8c505 100644
--- a/library/std/src/sys/windows/os_str.rs
+++ b/library/std/src/sys/windows/os_str.rs
@@ -164,9 +164,7 @@ impl Slice {
}
pub fn to_owned(&self) -> Buf {
- let mut buf = Wtf8Buf::with_capacity(self.inner.len());
- buf.push_wtf8(&self.inner);
- Buf { inner: buf }
+ Buf { inner: self.inner.to_owned() }
}
pub fn clone_into(&self, buf: &mut Buf) {
diff --git a/library/std/src/sys/windows/path/tests.rs b/library/std/src/sys/windows/path/tests.rs
index 6eab38cab..623c62361 100644
--- a/library/std/src/sys/windows/path/tests.rs
+++ b/library/std/src/sys/windows/path/tests.rs
@@ -105,7 +105,7 @@ fn test_parse_prefix_verbatim_device() {
assert_eq!(prefix, parse_prefix(r"\\?/C:\windows\system32\notepad.exe"));
}
-// See #93586 for more infomation.
+// See #93586 for more information.
#[test]
fn test_windows_prefix_components() {
use crate::path::Path;
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index 02d5af471..9cbb4ef19 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -16,6 +16,7 @@ use crate::os::windows::ffi::{OsStrExt, OsStringExt};
use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle};
use crate::path::{Path, PathBuf};
use crate::ptr;
+use crate::sync::Mutex;
use crate::sys::args::{self, Arg};
use crate::sys::c;
use crate::sys::c::NonZeroDWORD;
@@ -25,7 +26,6 @@ use crate::sys::handle::Handle;
use crate::sys::path;
use crate::sys::pipe::{self, AnonPipe};
use crate::sys::stdio;
-use crate::sys_common::mutex::StaticMutex;
use crate::sys_common::process::{CommandEnv, CommandEnvs};
use crate::sys_common::IntoInner;
@@ -301,9 +301,9 @@ impl Command {
//
// For more information, msdn also has an article about this race:
// https://support.microsoft.com/kb/315939
- static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new();
+ static CREATE_PROCESS_LOCK: Mutex<()> = Mutex::new(());
- let _guard = unsafe { CREATE_PROCESS_LOCK.lock() };
+ let _guard = CREATE_PROCESS_LOCK.lock();
let mut pipes = StdioPipes { stdin: None, stdout: None, stderr: None };
let null = Stdio::Null;
diff --git a/library/std/src/sys/windows/rand.rs b/library/std/src/sys/windows/rand.rs
index f8fd93a73..b5a49489d 100644
--- a/library/std/src/sys/windows/rand.rs
+++ b/library/std/src/sys/windows/rand.rs
@@ -1,35 +1,106 @@
-use crate::io;
+//! # Random key generation
+//!
+//! This module wraps the RNG provided by the OS. There are a few different
+//! ways to interface with the OS RNG so it's worth exploring each of the options.
+//! Note that at the time of writing these all go through the (undocumented)
+//! `bcryptPrimitives.dll` but they use different route to get there.
+//!
+//! Originally we were using [`RtlGenRandom`], however that function is
+//! deprecated and warns it "may be altered or unavailable in subsequent versions".
+//!
+//! So we switched to [`BCryptGenRandom`] with the `BCRYPT_USE_SYSTEM_PREFERRED_RNG`
+//! flag to query and find the system configured RNG. However, this change caused a small
+//! but significant number of users to experience panics caused by a failure of
+//! this function. See [#94098].
+//!
+//! The current version falls back to using `BCryptOpenAlgorithmProvider` if
+//! `BCRYPT_USE_SYSTEM_PREFERRED_RNG` fails for any reason.
+//!
+//! [#94098]: https://github.com/rust-lang/rust/issues/94098
+//! [`RtlGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom
+//! [`BCryptGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom
use crate::mem;
use crate::ptr;
use crate::sys::c;
+/// Generates high quality secure random keys for use by [`HashMap`].
+///
+/// This is used to seed the default [`RandomState`].
+///
+/// [`HashMap`]: crate::collections::HashMap
+/// [`RandomState`]: crate::collections::hash_map::RandomState
pub fn hashmap_random_keys() -> (u64, u64) {
- let mut v = (0, 0);
- let ret = unsafe {
- c::BCryptGenRandom(
- ptr::null_mut(),
- &mut v as *mut _ as *mut u8,
- mem::size_of_val(&v) as c::ULONG,
- c::BCRYPT_USE_SYSTEM_PREFERRED_RNG,
- )
- };
- if ret != 0 { fallback_rng() } else { v }
+ Rng::SYSTEM.gen_random_keys().unwrap_or_else(fallback_rng)
}
-/// Generate random numbers using the fallback RNG function (RtlGenRandom)
-#[cfg(not(target_vendor = "uwp"))]
-#[inline(never)]
-fn fallback_rng() -> (u64, u64) {
- let mut v = (0, 0);
- let ret =
- unsafe { c::RtlGenRandom(&mut v as *mut _ as *mut u8, mem::size_of_val(&v) as c::ULONG) };
+struct Rng {
+ algorithm: c::BCRYPT_ALG_HANDLE,
+ flags: u32,
+}
+impl Rng {
+ const SYSTEM: Self = unsafe { Self::new(ptr::null_mut(), c::BCRYPT_USE_SYSTEM_PREFERRED_RNG) };
+
+ /// Create the RNG from an existing algorithm handle.
+ ///
+ /// # Safety
+ ///
+ /// The handle must either be null or a valid algorithm handle.
+ const unsafe fn new(algorithm: c::BCRYPT_ALG_HANDLE, flags: u32) -> Self {
+ Self { algorithm, flags }
+ }
+
+ /// Open a handle to the RNG algorithm.
+ fn open() -> Result<Self, c::NTSTATUS> {
+ use crate::sync::atomic::AtomicPtr;
+ use crate::sync::atomic::Ordering::{Acquire, Release};
+
+ // An atomic is used so we don't need to reopen the handle every time.
+ static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(ptr::null_mut());
+
+ let mut handle = HANDLE.load(Acquire);
+ if handle.is_null() {
+ let status = unsafe {
+ c::BCryptOpenAlgorithmProvider(
+ &mut handle,
+ c::BCRYPT_RNG_ALGORITHM.as_ptr(),
+ ptr::null(),
+ 0,
+ )
+ };
+ if c::nt_success(status) {
+ // If another thread opens a handle first then use that handle instead.
+ let result = HANDLE.compare_exchange(ptr::null_mut(), handle, Release, Acquire);
+ if let Err(previous_handle) = result {
+ // Close our handle and return the previous one.
+ unsafe { c::BCryptCloseAlgorithmProvider(handle, 0) };
+ handle = previous_handle;
+ }
+ Ok(unsafe { Self::new(handle, 0) })
+ } else {
+ Err(status)
+ }
+ } else {
+ Ok(unsafe { Self::new(handle, 0) })
+ }
+ }
- if ret != 0 { v } else { panic!("fallback RNG broken: {}", io::Error::last_os_error()) }
+ fn gen_random_keys(self) -> Result<(u64, u64), c::NTSTATUS> {
+ let mut v = (0, 0);
+ let status = unsafe {
+ let size = mem::size_of_val(&v).try_into().unwrap();
+ c::BCryptGenRandom(self.algorithm, ptr::addr_of_mut!(v).cast(), size, self.flags)
+ };
+ if c::nt_success(status) { Ok(v) } else { Err(status) }
+ }
}
-/// We can't use RtlGenRandom with UWP, so there is no fallback
-#[cfg(target_vendor = "uwp")]
+/// Generate random numbers using the fallback RNG function
#[inline(never)]
-fn fallback_rng() -> (u64, u64) {
- panic!("fallback RNG broken: RtlGenRandom() not supported on UWP");
+fn fallback_rng(rng_status: c::NTSTATUS) -> (u64, u64) {
+ match Rng::open().and_then(|rng| rng.gen_random_keys()) {
+ Ok(keys) => keys,
+ Err(status) => {
+ panic!("RNG broken: {rng_status:#x}, fallback RNG broken: {status:#x}")
+ }
+ }
}
diff --git a/library/std/src/sys/windows/stdio.rs b/library/std/src/sys/windows/stdio.rs
index a001d6b98..70c9b14a0 100644
--- a/library/std/src/sys/windows/stdio.rs
+++ b/library/std/src/sys/windows/stdio.rs
@@ -3,6 +3,7 @@
use crate::char::decode_utf16;
use crate::cmp;
use crate::io;
+use crate::mem::MaybeUninit;
use crate::os::windows::io::{FromRawHandle, IntoRawHandle};
use crate::ptr;
use crate::str;
@@ -169,13 +170,14 @@ fn write(
}
fn write_valid_utf8_to_console(handle: c::HANDLE, utf8: &str) -> io::Result<usize> {
- let mut utf16 = [0u16; MAX_BUFFER_SIZE / 2];
+ let mut utf16 = [MaybeUninit::<u16>::uninit(); MAX_BUFFER_SIZE / 2];
let mut len_utf16 = 0;
for (chr, dest) in utf8.encode_utf16().zip(utf16.iter_mut()) {
- *dest = chr;
+ *dest = MaybeUninit::new(chr);
len_utf16 += 1;
}
- let utf16 = &utf16[..len_utf16];
+ // Safety: We've initialized `len_utf16` values.
+ let utf16: &[u16] = unsafe { MaybeUninit::slice_assume_init_ref(&utf16[..len_utf16]) };
let mut written = write_u16s(handle, &utf16)?;
@@ -250,11 +252,14 @@ impl io::Read for Stdin {
return Ok(bytes_copied);
} else if buf.len() - bytes_copied < 4 {
// Not enough space to get a UTF-8 byte. We will use the incomplete UTF8.
- let mut utf16_buf = [0u16; 1];
+ let mut utf16_buf = [MaybeUninit::new(0); 1];
// Read one u16 character.
let read = read_u16s_fixup_surrogates(handle, &mut utf16_buf, 1, &mut self.surrogate)?;
// Read bytes, using the (now-empty) self.incomplete_utf8 as extra space.
- let read_bytes = utf16_to_utf8(&utf16_buf[..read], &mut self.incomplete_utf8.bytes)?;
+ let read_bytes = utf16_to_utf8(
+ unsafe { MaybeUninit::slice_assume_init_ref(&utf16_buf[..read]) },
+ &mut self.incomplete_utf8.bytes,
+ )?;
// Read in the bytes from incomplete_utf8 until the buffer is full.
self.incomplete_utf8.len = read_bytes as u8;
@@ -262,15 +267,18 @@ impl io::Read for Stdin {
bytes_copied += self.incomplete_utf8.read(&mut buf[bytes_copied..]);
Ok(bytes_copied)
} else {
- let mut utf16_buf = [0u16; MAX_BUFFER_SIZE / 2];
+ let mut utf16_buf = [MaybeUninit::<u16>::uninit(); MAX_BUFFER_SIZE / 2];
+
// In the worst case, a UTF-8 string can take 3 bytes for every `u16` of a UTF-16. So
// we can read at most a third of `buf.len()` chars and uphold the guarantee no data gets
// lost.
let amount = cmp::min(buf.len() / 3, utf16_buf.len());
let read =
read_u16s_fixup_surrogates(handle, &mut utf16_buf, amount, &mut self.surrogate)?;
-
- match utf16_to_utf8(&utf16_buf[..read], buf) {
+ // Safety `read_u16s_fixup_surrogates` returns the number of items
+ // initialized.
+ let utf16s = unsafe { MaybeUninit::slice_assume_init_ref(&utf16_buf[..read]) };
+ match utf16_to_utf8(utf16s, buf) {
Ok(value) => return Ok(bytes_copied + value),
Err(e) => return Err(e),
}
@@ -283,14 +291,14 @@ impl io::Read for Stdin {
// This is a best effort, and might not work if we are not the only reader on Stdin.
fn read_u16s_fixup_surrogates(
handle: c::HANDLE,
- buf: &mut [u16],
+ buf: &mut [MaybeUninit<u16>],
mut amount: usize,
surrogate: &mut u16,
) -> io::Result<usize> {
// Insert possibly remaining unpaired surrogate from last read.
let mut start = 0;
if *surrogate != 0 {
- buf[0] = *surrogate;
+ buf[0] = MaybeUninit::new(*surrogate);
*surrogate = 0;
start = 1;
if amount == 1 {
@@ -303,7 +311,10 @@ fn read_u16s_fixup_surrogates(
let mut amount = read_u16s(handle, &mut buf[start..amount])? + start;
if amount > 0 {
- let last_char = buf[amount - 1];
+ // Safety: The returned `amount` is the number of values initialized,
+ // and it is not 0, so we know that `buf[amount - 1]` have been
+ // initialized.
+ let last_char = unsafe { buf[amount - 1].assume_init() };
if last_char >= 0xD800 && last_char <= 0xDBFF {
// high surrogate
*surrogate = last_char;
@@ -313,7 +324,8 @@ fn read_u16s_fixup_surrogates(
Ok(amount)
}
-fn read_u16s(handle: c::HANDLE, buf: &mut [u16]) -> io::Result<usize> {
+// Returns `Ok(n)` if it initialized `n` values in `buf`.
+fn read_u16s(handle: c::HANDLE, buf: &mut [MaybeUninit<u16>]) -> io::Result<usize> {
// Configure the `pInputControl` parameter to not only return on `\r\n` but also Ctrl-Z, the
// traditional DOS method to indicate end of character stream / user input (SUB).
// See #38274 and https://stackoverflow.com/questions/43836040/win-api-readconsole.
@@ -346,8 +358,9 @@ fn read_u16s(handle: c::HANDLE, buf: &mut [u16]) -> io::Result<usize> {
}
break;
}
-
- if amount > 0 && buf[amount as usize - 1] == CTRL_Z {
+ // Safety: if `amount > 0`, then that many bytes were written, so
+ // `buf[amount as usize - 1]` has been initialized.
+ if amount > 0 && unsafe { buf[amount as usize - 1].assume_init() } == CTRL_Z {
amount -= 1;
}
Ok(amount as usize)
diff --git a/library/std/src/sys/windows/thread_local_dtor.rs b/library/std/src/sys/windows/thread_local_dtor.rs
index 25d1c6e8e..9707a95df 100644
--- a/library/std/src/sys/windows/thread_local_dtor.rs
+++ b/library/std/src/sys/windows/thread_local_dtor.rs
@@ -8,10 +8,14 @@
#[thread_local]
static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
+// Ensure this can never be inlined because otherwise this may break in dylibs.
+// See #44391.
+#[inline(never)]
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
DESTRUCTORS.push((t, dtor));
}
+#[inline(never)] // See comment above
/// Runs destructors. This should not be called until thread exit.
pub unsafe fn run_keyless_dtors() {
// Drop all the destructors.
diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs
index ec670238e..17628b757 100644
--- a/library/std/src/sys/windows/thread_local_key.rs
+++ b/library/std/src/sys/windows/thread_local_key.rs
@@ -1,11 +1,16 @@
-use crate::mem::ManuallyDrop;
+use crate::cell::UnsafeCell;
use crate::ptr;
-use crate::sync::atomic::AtomicPtr;
-use crate::sync::atomic::Ordering::SeqCst;
+use crate::sync::atomic::{
+ AtomicPtr, AtomicU32,
+ Ordering::{AcqRel, Acquire, Relaxed, Release},
+};
use crate::sys::c;
-pub type Key = c::DWORD;
-pub type Dtor = unsafe extern "C" fn(*mut u8);
+#[cfg(test)]
+mod tests;
+
+type Key = c::DWORD;
+type Dtor = unsafe extern "C" fn(*mut u8);
// Turns out, like pretty much everything, Windows is pretty close the
// functionality that Unix provides, but slightly different! In the case of
@@ -22,60 +27,109 @@ pub type Dtor = unsafe extern "C" fn(*mut u8);
// To accomplish this feat, we perform a number of threads, all contained
// within this module:
//
-// * All TLS destructors are tracked by *us*, not the windows runtime. This
+// * All TLS destructors are tracked by *us*, not the Windows runtime. This
// means that we have a global list of destructors for each TLS key that
// we know about.
// * When a thread exits, we run over the entire list and run dtors for all
// non-null keys. This attempts to match Unix semantics in this regard.
//
-// This ends up having the overhead of using a global list, having some
-// locks here and there, and in general just adding some more code bloat. We
-// attempt to optimize runtime by forgetting keys that don't have
-// destructors, but this only gets us so far.
-//
// For more details and nitty-gritty, see the code sections below!
//
// [1]: https://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way
-// [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base
-// /threading/thread_local_storage_win.cc#L42
+// [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base/threading/thread_local_storage_win.cc#L42
-// -------------------------------------------------------------------------
-// Native bindings
-//
-// This section is just raw bindings to the native functions that Windows
-// provides, There's a few extra calls to deal with destructors.
+pub struct StaticKey {
+ /// The key value shifted up by one. Since TLS_OUT_OF_INDEXES == DWORD::MAX
+ /// is not a valid key value, this allows us to use zero as sentinel value
+ /// without risking overflow.
+ key: AtomicU32,
+ dtor: Option<Dtor>,
+ next: AtomicPtr<StaticKey>,
+ /// Currently, destructors cannot be unregistered, so we cannot use racy
+ /// initialization for keys. Instead, we need synchronize initialization.
+ /// Use the Windows-provided `Once` since it does not require TLS.
+ once: UnsafeCell<c::INIT_ONCE>,
+}
-#[inline]
-pub unsafe fn create(dtor: Option<Dtor>) -> Key {
- let key = c::TlsAlloc();
- assert!(key != c::TLS_OUT_OF_INDEXES);
- if let Some(f) = dtor {
- register_dtor(key, f);
+impl StaticKey {
+ #[inline]
+ pub const fn new(dtor: Option<Dtor>) -> StaticKey {
+ StaticKey {
+ key: AtomicU32::new(0),
+ dtor,
+ next: AtomicPtr::new(ptr::null_mut()),
+ once: UnsafeCell::new(c::INIT_ONCE_STATIC_INIT),
+ }
}
- key
-}
-#[inline]
-pub unsafe fn set(key: Key, value: *mut u8) {
- let r = c::TlsSetValue(key, value as c::LPVOID);
- debug_assert!(r != 0);
-}
+ #[inline]
+ pub unsafe fn set(&'static self, val: *mut u8) {
+ let r = c::TlsSetValue(self.key(), val.cast());
+ debug_assert_eq!(r, c::TRUE);
+ }
-#[inline]
-pub unsafe fn get(key: Key) -> *mut u8 {
- c::TlsGetValue(key) as *mut u8
-}
+ #[inline]
+ pub unsafe fn get(&'static self) -> *mut u8 {
+ c::TlsGetValue(self.key()).cast()
+ }
-#[inline]
-pub unsafe fn destroy(_key: Key) {
- rtabort!("can't destroy tls keys on windows")
-}
+ #[inline]
+ unsafe fn key(&'static self) -> Key {
+ match self.key.load(Acquire) {
+ 0 => self.init(),
+ key => key - 1,
+ }
+ }
+
+ #[cold]
+ unsafe fn init(&'static self) -> Key {
+ if self.dtor.is_some() {
+ let mut pending = c::FALSE;
+ let r = c::InitOnceBeginInitialize(self.once.get(), 0, &mut pending, ptr::null_mut());
+ assert_eq!(r, c::TRUE);
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- true
+ if pending == c::FALSE {
+ // Some other thread initialized the key, load it.
+ self.key.load(Relaxed) - 1
+ } else {
+ let key = c::TlsAlloc();
+ if key == c::TLS_OUT_OF_INDEXES {
+ // Wakeup the waiting threads before panicking to avoid deadlock.
+ c::InitOnceComplete(self.once.get(), c::INIT_ONCE_INIT_FAILED, ptr::null_mut());
+ panic!("out of TLS indexes");
+ }
+
+ self.key.store(key + 1, Release);
+ register_dtor(self);
+
+ let r = c::InitOnceComplete(self.once.get(), 0, ptr::null_mut());
+ debug_assert_eq!(r, c::TRUE);
+
+ key
+ }
+ } else {
+ // If there is no destructor to clean up, we can use racy initialization.
+
+ let key = c::TlsAlloc();
+ assert_ne!(key, c::TLS_OUT_OF_INDEXES, "out of TLS indexes");
+
+ match self.key.compare_exchange(0, key + 1, AcqRel, Acquire) {
+ Ok(_) => key,
+ Err(new) => {
+ // Some other thread completed initialization first, so destroy
+ // our key and use theirs.
+ let r = c::TlsFree(key);
+ debug_assert_eq!(r, c::TRUE);
+ new - 1
+ }
+ }
+ }
+ }
}
+unsafe impl Send for StaticKey {}
+unsafe impl Sync for StaticKey {}
+
// -------------------------------------------------------------------------
// Dtor registration
//
@@ -96,29 +150,21 @@ pub fn requires_synchronized_create() -> bool {
// Typically processes have a statically known set of TLS keys which is pretty
// small, and we'd want to keep this memory alive for the whole process anyway
// really.
-//
-// Perhaps one day we can fold the `Box` here into a static allocation,
-// expanding the `StaticKey` structure to contain not only a slot for the TLS
-// key but also a slot for the destructor queue on windows. An optimization for
-// another day!
-
-static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut());
-
-struct Node {
- dtor: Dtor,
- key: Key,
- next: *mut Node,
-}
-unsafe fn register_dtor(key: Key, dtor: Dtor) {
- let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
+static DTORS: AtomicPtr<StaticKey> = AtomicPtr::new(ptr::null_mut());
- let mut head = DTORS.load(SeqCst);
+/// Should only be called once per key, otherwise loops or breaks may occur in
+/// the linked list.
+unsafe fn register_dtor(key: &'static StaticKey) {
+ let this = <*const StaticKey>::cast_mut(key);
+ // Use acquire ordering to pass along the changes done by the previously
+ // registered keys when we store the new head with release ordering.
+ let mut head = DTORS.load(Acquire);
loop {
- node.next = head;
- match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) {
- Ok(_) => return, // nothing to drop, we successfully added the node to the list
- Err(cur) => head = cur,
+ key.next.store(head, Relaxed);
+ match DTORS.compare_exchange_weak(head, this, Release, Acquire) {
+ Ok(_) => break,
+ Err(new) => head = new,
}
}
}
@@ -214,25 +260,29 @@ unsafe extern "system" fn on_tls_callback(h: c::LPVOID, dwReason: c::DWORD, pv:
unsafe fn reference_tls_used() {}
}
-#[allow(dead_code)] // actually called above
+#[allow(dead_code)] // actually called below
unsafe fn run_dtors() {
- let mut any_run = true;
for _ in 0..5 {
- if !any_run {
- break;
- }
- any_run = false;
- let mut cur = DTORS.load(SeqCst);
+ let mut any_run = false;
+
+ // Use acquire ordering to observe key initialization.
+ let mut cur = DTORS.load(Acquire);
while !cur.is_null() {
- let ptr = c::TlsGetValue((*cur).key);
+ let key = (*cur).key.load(Relaxed) - 1;
+ let dtor = (*cur).dtor.unwrap();
+ let ptr = c::TlsGetValue(key);
if !ptr.is_null() {
- c::TlsSetValue((*cur).key, ptr::null_mut());
- ((*cur).dtor)(ptr as *mut _);
+ c::TlsSetValue(key, ptr::null_mut());
+ dtor(ptr as *mut _);
any_run = true;
}
- cur = (*cur).next;
+ cur = (*cur).next.load(Relaxed);
+ }
+
+ if !any_run {
+ break;
}
}
}
diff --git a/library/std/src/sys/windows/thread_local_key/tests.rs b/library/std/src/sys/windows/thread_local_key/tests.rs
new file mode 100644
index 000000000..c95f383fb
--- /dev/null
+++ b/library/std/src/sys/windows/thread_local_key/tests.rs
@@ -0,0 +1,53 @@
+use super::StaticKey;
+use crate::ptr;
+
+#[test]
+fn smoke() {
+ static K1: StaticKey = StaticKey::new(None);
+ static K2: StaticKey = StaticKey::new(None);
+
+ unsafe {
+ assert!(K1.get().is_null());
+ assert!(K2.get().is_null());
+ K1.set(ptr::invalid_mut(1));
+ K2.set(ptr::invalid_mut(2));
+ assert_eq!(K1.get() as usize, 1);
+ assert_eq!(K2.get() as usize, 2);
+ }
+}
+
+#[test]
+fn destructors() {
+ use crate::mem::ManuallyDrop;
+ use crate::sync::Arc;
+ use crate::thread;
+
+ unsafe extern "C" fn destruct(ptr: *mut u8) {
+ drop(Arc::from_raw(ptr as *const ()));
+ }
+
+ static KEY: StaticKey = StaticKey::new(Some(destruct));
+
+ let shared1 = Arc::new(());
+ let shared2 = Arc::clone(&shared1);
+
+ unsafe {
+ assert!(KEY.get().is_null());
+ KEY.set(Arc::into_raw(shared1) as *mut u8);
+ }
+
+ thread::spawn(move || unsafe {
+ assert!(KEY.get().is_null());
+ KEY.set(Arc::into_raw(shared2) as *mut u8);
+ })
+ .join()
+ .unwrap();
+
+ // Leak the Arc, let the TLS destructor clean it up.
+ let shared1 = unsafe { ManuallyDrop::new(Arc::from_raw(KEY.get() as *const ())) };
+ assert_eq!(
+ Arc::strong_count(&shared1),
+ 1,
+ "destructor should have dropped the other reference on thread exit"
+ );
+}
diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs
index d876e0f6f..2f7ae863b 100644
--- a/library/std/src/sys/windows/thread_parker.rs
+++ b/library/std/src/sys/windows/thread_parker.rs
@@ -197,19 +197,17 @@ impl Parker {
// purpose, to make sure every unpark() has a release-acquire ordering
// with park().
if self.state.swap(NOTIFIED, Release) == PARKED {
- if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() {
- unsafe {
+ unsafe {
+ if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() {
wake_by_address_single(self.ptr());
- }
- } else {
- // If we run NtReleaseKeyedEvent before the waiting thread runs
- // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
- // If the waiting thread wakes up before we run NtReleaseKeyedEvent
- // (e.g. due to a timeout), this blocks until we do wake up a thread.
- // To prevent this thread from blocking indefinitely in that case,
- // park_impl() will, after seeing the state set to NOTIFIED after
- // waking up, call NtWaitForKeyedEvent again to unblock us.
- unsafe {
+ } else {
+ // If we run NtReleaseKeyedEvent before the waiting thread runs
+ // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
+ // If the waiting thread wakes up before we run NtReleaseKeyedEvent
+ // (e.g. due to a timeout), this blocks until we do wake up a thread.
+ // To prevent this thread from blocking indefinitely in that case,
+ // park_impl() will, after seeing the state set to NOTIFIED after
+ // waking up, call NtWaitForKeyedEvent again to unblock us.
c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
}
}
diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs
index 31164afdc..8807077cb 100644
--- a/library/std/src/sys_common/backtrace.rs
+++ b/library/std/src/sys_common/backtrace.rs
@@ -7,15 +7,14 @@ use crate::fmt;
use crate::io;
use crate::io::prelude::*;
use crate::path::{self, Path, PathBuf};
-use crate::sys_common::mutex::StaticMutex;
+use crate::sync::{Mutex, PoisonError};
/// Max number of frames to print.
const MAX_NB_FRAMES: usize = 100;
-// SAFETY: Don't attempt to lock this reentrantly.
-pub unsafe fn lock() -> impl Drop {
- static LOCK: StaticMutex = StaticMutex::new();
- LOCK.lock()
+pub fn lock() -> impl Drop {
+ static LOCK: Mutex<()> = Mutex::new(());
+ LOCK.lock().unwrap_or_else(PoisonError::into_inner)
}
/// Prints the current backtrace.
diff --git a/library/std/src/sys_common/condvar.rs b/library/std/src/sys_common/condvar.rs
index f3ac1061b..8bc5b2411 100644
--- a/library/std/src/sys_common/condvar.rs
+++ b/library/std/src/sys_common/condvar.rs
@@ -15,6 +15,7 @@ pub struct Condvar {
impl Condvar {
/// Creates a new condition variable for use.
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Self {
Self { inner: imp::MovableCondvar::new(), check: CondvarCheck::new() }
}
diff --git a/library/std/src/sys_common/condvar/check.rs b/library/std/src/sys_common/condvar/check.rs
index ce8f36704..4ac9e62bf 100644
--- a/library/std/src/sys_common/condvar/check.rs
+++ b/library/std/src/sys_common/condvar/check.rs
@@ -50,6 +50,7 @@ pub struct NoCheck;
#[allow(dead_code)]
impl NoCheck {
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Self {
Self
}
diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs
index 80f56bf75..8c19f9332 100644
--- a/library/std/src/sys_common/mod.rs
+++ b/library/std/src/sys_common/mod.rs
@@ -27,17 +27,25 @@ pub mod io;
pub mod lazy_box;
pub mod memchr;
pub mod mutex;
+pub mod once;
pub mod process;
pub mod remutex;
pub mod rwlock;
pub mod thread;
pub mod thread_info;
pub mod thread_local_dtor;
-pub mod thread_local_key;
pub mod thread_parker;
pub mod wtf8;
cfg_if::cfg_if! {
+ if #[cfg(target_os = "windows")] {
+ pub use crate::sys::thread_local_key;
+ } else {
+ pub mod thread_local_key;
+ }
+}
+
+cfg_if::cfg_if! {
if #[cfg(any(target_os = "l4re",
target_os = "hermit",
feature = "restricted-std",
diff --git a/library/std/src/sys_common/mutex.rs b/library/std/src/sys_common/mutex.rs
index 48479f5bd..98046f20f 100644
--- a/library/std/src/sys_common/mutex.rs
+++ b/library/std/src/sys_common/mutex.rs
@@ -1,49 +1,5 @@
use crate::sys::locks as imp;
-/// An OS-based mutual exclusion lock, meant for use in static variables.
-///
-/// This mutex has a const constructor ([`StaticMutex::new`]), does not
-/// implement `Drop` to cleanup resources, and causes UB when used reentrantly.
-///
-/// This mutex does not implement poisoning.
-///
-/// This is a wrapper around `imp::Mutex` that does *not* call `init()` and
-/// `destroy()`.
-pub struct StaticMutex(imp::Mutex);
-
-unsafe impl Sync for StaticMutex {}
-
-impl StaticMutex {
- /// Creates a new mutex for use.
- #[inline]
- pub const fn new() -> Self {
- Self(imp::Mutex::new())
- }
-
- /// Calls raw_lock() and then returns an RAII guard to guarantee the mutex
- /// will be unlocked.
- ///
- /// It is undefined behaviour to call this function while locked by the
- /// same thread.
- #[inline]
- pub unsafe fn lock(&'static self) -> StaticMutexGuard {
- self.0.lock();
- StaticMutexGuard(&self.0)
- }
-}
-
-#[must_use]
-pub struct StaticMutexGuard(&'static imp::Mutex);
-
-impl Drop for StaticMutexGuard {
- #[inline]
- fn drop(&mut self) {
- unsafe {
- self.0.unlock();
- }
- }
-}
-
/// An OS-based mutual exclusion lock.
///
/// This mutex cleans up its resources in its `Drop` implementation, may safely
@@ -61,6 +17,7 @@ unsafe impl Sync for MovableMutex {}
impl MovableMutex {
/// Creates a new mutex.
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Self {
Self(imp::MovableMutex::new())
}
diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs
index 33d336c43..fad4a6333 100644
--- a/library/std/src/sys_common/net.rs
+++ b/library/std/src/sys_common/net.rs
@@ -2,15 +2,16 @@
mod tests;
use crate::cmp;
-use crate::ffi::CString;
+use crate::convert::{TryFrom, TryInto};
use crate::fmt;
use crate::io::{self, ErrorKind, IoSlice, IoSliceMut};
use crate::mem;
use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
use crate::ptr;
+use crate::sys::common::small_c_string::run_with_cstr;
use crate::sys::net::netc as c;
use crate::sys::net::{cvt, cvt_gai, cvt_r, init, wrlen_t, Socket};
-use crate::sys_common::{FromInner, IntoInner};
+use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::time::Duration;
use libc::{c_int, c_void};
@@ -197,14 +198,15 @@ impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
fn try_from((host, port): (&'a str, u16)) -> io::Result<LookupHost> {
init();
- let c_host = CString::new(host)?;
- let mut hints: c::addrinfo = unsafe { mem::zeroed() };
- hints.ai_socktype = c::SOCK_STREAM;
- let mut res = ptr::null_mut();
- unsafe {
- cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res))
- .map(|_| LookupHost { original: res, cur: res, port })
- }
+ run_with_cstr(host.as_bytes(), |c_host| {
+ let mut hints: c::addrinfo = unsafe { mem::zeroed() };
+ hints.ai_socktype = c::SOCK_STREAM;
+ let mut res = ptr::null_mut();
+ unsafe {
+ cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res))
+ .map(|_| LookupHost { original: res, cur: res, port })
+ }
+ })
}
}
@@ -345,6 +347,12 @@ impl TcpStream {
}
}
+impl AsInner<Socket> for TcpStream {
+ fn as_inner(&self) -> &Socket {
+ &self.inner
+ }
+}
+
impl FromInner<Socket> for TcpStream {
fn from_inner(socket: Socket) -> TcpStream {
TcpStream { inner: socket }
diff --git a/library/std/src/sys_common/once/futex.rs b/library/std/src/sys_common/once/futex.rs
new file mode 100644
index 000000000..5c7e6c013
--- /dev/null
+++ b/library/std/src/sys_common/once/futex.rs
@@ -0,0 +1,134 @@
+use crate::cell::Cell;
+use crate::sync as public;
+use crate::sync::atomic::{
+ AtomicU32,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::futex::{futex_wait, futex_wake_all};
+
+// On some platforms, the OS is very nice and handles the waiter queue for us.
+// This means we only need one atomic value with 5 states:
+
+/// No initialization has run yet, and no thread is currently using the Once.
+const INCOMPLETE: u32 = 0;
+/// Some thread has previously attempted to initialize the Once, but it panicked,
+/// so the Once is now poisoned. There are no other threads currently accessing
+/// this Once.
+const POISONED: u32 = 1;
+/// Some thread is currently attempting to run initialization. It may succeed,
+/// so all future threads need to wait for it to finish.
+const RUNNING: u32 = 2;
+/// Some thread is currently attempting to run initialization and there are threads
+/// waiting for it to finish.
+const QUEUED: u32 = 3;
+/// Initialization has completed and all future calls should finish immediately.
+const COMPLETE: u32 = 4;
+
+// Threads wait by setting the state to QUEUED and calling `futex_wait` on the state
+// variable. When the running thread finishes, it will wake all waiting threads using
+// `futex_wake_all`.
+
+pub struct OnceState {
+ poisoned: bool,
+ set_state_to: Cell<u32>,
+}
+
+impl OnceState {
+ #[inline]
+ pub fn is_poisoned(&self) -> bool {
+ self.poisoned
+ }
+
+ #[inline]
+ pub fn poison(&self) {
+ self.set_state_to.set(POISONED);
+ }
+}
+
+struct CompletionGuard<'a> {
+ state: &'a AtomicU32,
+ set_state_on_drop_to: u32,
+}
+
+impl<'a> Drop for CompletionGuard<'a> {
+ fn drop(&mut self) {
+ // Use release ordering to propagate changes to all threads checking
+ // up on the Once. `futex_wake_all` does its own synchronization, hence
+ // we do not need `AcqRel`.
+ if self.state.swap(self.set_state_on_drop_to, Release) == QUEUED {
+ futex_wake_all(&self.state);
+ }
+ }
+}
+
+pub struct Once {
+ state: AtomicU32,
+}
+
+impl Once {
+ #[inline]
+ pub const fn new() -> Once {
+ Once { state: AtomicU32::new(INCOMPLETE) }
+ }
+
+ #[inline]
+ pub fn is_completed(&self) -> bool {
+ // Use acquire ordering to make all initialization changes visible to the
+ // current thread.
+ self.state.load(Acquire) == COMPLETE
+ }
+
+ // This uses FnMut to match the API of the generic implementation. As this
+ // implementation is quite light-weight, it is generic over the closure and
+ // so avoids the cost of dynamic dispatch.
+ #[cold]
+ #[track_caller]
+ pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) {
+ let mut state = self.state.load(Acquire);
+ loop {
+ match state {
+ POISONED if !ignore_poisoning => {
+ // Panic to propagate the poison.
+ panic!("Once instance has previously been poisoned");
+ }
+ INCOMPLETE | POISONED => {
+ // Try to register the current thread as the one running.
+ if let Err(new) =
+ self.state.compare_exchange_weak(state, RUNNING, Acquire, Acquire)
+ {
+ state = new;
+ continue;
+ }
+ // `waiter_queue` will manage other waiting threads, and
+ // wake them up on drop.
+ let mut waiter_queue =
+ CompletionGuard { state: &self.state, set_state_on_drop_to: POISONED };
+ // Run the function, letting it know if we're poisoned or not.
+ let f_state = public::OnceState {
+ inner: OnceState {
+ poisoned: state == POISONED,
+ set_state_to: Cell::new(COMPLETE),
+ },
+ };
+ f(&f_state);
+ waiter_queue.set_state_on_drop_to = f_state.inner.set_state_to.get();
+ return;
+ }
+ RUNNING | QUEUED => {
+ // Set the state to QUEUED if it is not already.
+ if state == RUNNING
+ && let Err(new) = self.state.compare_exchange_weak(RUNNING, QUEUED, Relaxed, Acquire)
+ {
+ state = new;
+ continue;
+ }
+
+ futex_wait(&self.state, QUEUED, None);
+ state = self.state.load(Acquire);
+ }
+ COMPLETE => return,
+ _ => unreachable!("state is never set to invalid values"),
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys_common/once/generic.rs b/library/std/src/sys_common/once/generic.rs
new file mode 100644
index 000000000..acf5f2471
--- /dev/null
+++ b/library/std/src/sys_common/once/generic.rs
@@ -0,0 +1,282 @@
+// Each `Once` has one word of atomic state, and this state is CAS'd on to
+// determine what to do. There are four possible state of a `Once`:
+//
+// * Incomplete - no initialization has run yet, and no thread is currently
+// using the Once.
+// * Poisoned - some thread has previously attempted to initialize the Once, but
+// it panicked, so the Once is now poisoned. There are no other
+// threads currently accessing this Once.
+// * Running - some thread is currently attempting to run initialization. It may
+// succeed, so all future threads need to wait for it to finish.
+// Note that this state is accompanied with a payload, described
+// below.
+// * Complete - initialization has completed and all future calls should finish
+// immediately.
+//
+// With 4 states we need 2 bits to encode this, and we use the remaining bits
+// in the word we have allocated as a queue of threads waiting for the thread
+// responsible for entering the RUNNING state. This queue is just a linked list
+// of Waiter nodes which is monotonically increasing in size. Each node is
+// allocated on the stack, and whenever the running closure finishes it will
+// consume the entire queue and notify all waiters they should try again.
+//
+// You'll find a few more details in the implementation, but that's the gist of
+// it!
+//
+// Atomic orderings:
+// When running `Once` we deal with multiple atomics:
+// `Once.state_and_queue` and an unknown number of `Waiter.signaled`.
+// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the
+// result of the `Once`, and (3) for synchronizing `Waiter` nodes.
+// - At the end of the `call` function we have to make sure the result
+// of the `Once` is acquired. So every load which can be the only one to
+// load COMPLETED must have at least acquire ordering, which means all
+// three of them.
+// - `WaiterQueue::drop` is the only place that may store COMPLETED, and
+// must do so with release ordering to make the result available.
+// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and
+// needs to make the nodes available with release ordering. The load in
+// its `compare_exchange` can be relaxed because it only has to compare
+// the atomic, not to read other data.
+// - `WaiterQueue::drop` must see the `Waiter` nodes, so it must load
+// `state_and_queue` with acquire ordering.
+// - There is just one store where `state_and_queue` is used only as a
+// state flag, without having to synchronize data: switching the state
+// from INCOMPLETE to RUNNING in `call`. This store can be Relaxed,
+// but the read has to be Acquire because of the requirements mentioned
+// above.
+// * `Waiter.signaled` is both used as a flag, and to protect a field with
+// interior mutability in `Waiter`. `Waiter.thread` is changed in
+// `WaiterQueue::drop` which then sets `signaled` with release ordering.
+// After `wait` loads `signaled` with acquire ordering and sees it is true,
+// it needs to see the changes to drop the `Waiter` struct correctly.
+// * There is one place where the two atomics `Once.state_and_queue` and
+// `Waiter.signaled` come together, and might be reordered by the compiler or
+// processor. Because both use acquire ordering such a reordering is not
+// allowed, so no need for `SeqCst`.
+
+use crate::cell::Cell;
+use crate::fmt;
+use crate::ptr;
+use crate::sync as public;
+use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
+use crate::thread::{self, Thread};
+
+type Masked = ();
+
+pub struct Once {
+ state_and_queue: AtomicPtr<Masked>,
+}
+
+pub struct OnceState {
+ poisoned: bool,
+ set_state_on_drop_to: Cell<*mut Masked>,
+}
+
+// Four states that a Once can be in, encoded into the lower bits of
+// `state_and_queue` in the Once structure.
+const INCOMPLETE: usize = 0x0;
+const POISONED: usize = 0x1;
+const RUNNING: usize = 0x2;
+const COMPLETE: usize = 0x3;
+
+// Mask to learn about the state. All other bits are the queue of waiters if
+// this is in the RUNNING state.
+const STATE_MASK: usize = 0x3;
+
+// Representation of a node in the linked list of waiters, used while in the
+// RUNNING state.
+// Note: `Waiter` can't hold a mutable pointer to the next thread, because then
+// `wait` would both hand out a mutable reference to its `Waiter` node, and keep
+// a shared reference to check `signaled`. Instead we hold shared references and
+// use interior mutability.
+#[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
+struct Waiter {
+ thread: Cell<Option<Thread>>,
+ signaled: AtomicBool,
+ next: *const Waiter,
+}
+
+// Head of a linked list of waiters.
+// Every node is a struct on the stack of a waiting thread.
+// Will wake up the waiters when it gets dropped, i.e. also on panic.
+struct WaiterQueue<'a> {
+ state_and_queue: &'a AtomicPtr<Masked>,
+ set_state_on_drop_to: *mut Masked,
+}
+
+impl Once {
+ #[inline]
+ pub const fn new() -> Once {
+ Once { state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)) }
+ }
+
+ #[inline]
+ pub fn is_completed(&self) -> bool {
+ // An `Acquire` load is enough because that makes all the initialization
+ // operations visible to us, and, this being a fast path, weaker
+ // ordering helps with performance. This `Acquire` synchronizes with
+ // `Release` operations on the slow path.
+ self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE
+ }
+
+ // This is a non-generic function to reduce the monomorphization cost of
+ // using `call_once` (this isn't exactly a trivial or small implementation).
+ //
+ // Additionally, this is tagged with `#[cold]` as it should indeed be cold
+ // and it helps let LLVM know that calls to this function should be off the
+ // fast path. Essentially, this should help generate more straight line code
+ // in LLVM.
+ //
+ // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
+ // currently no way to take an `FnOnce` and call it via virtual dispatch
+ // without some allocation overhead.
+ #[cold]
+ #[track_caller]
+ pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) {
+ let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire);
+ loop {
+ match state_and_queue.addr() {
+ COMPLETE => break,
+ POISONED if !ignore_poisoning => {
+ // Panic to propagate the poison.
+ panic!("Once instance has previously been poisoned");
+ }
+ POISONED | INCOMPLETE => {
+ // Try to register this thread as the one RUNNING.
+ let exchange_result = self.state_and_queue.compare_exchange(
+ state_and_queue,
+ ptr::invalid_mut(RUNNING),
+ Ordering::Acquire,
+ Ordering::Acquire,
+ );
+ if let Err(old) = exchange_result {
+ state_and_queue = old;
+ continue;
+ }
+ // `waiter_queue` will manage other waiting threads, and
+ // wake them up on drop.
+ let mut waiter_queue = WaiterQueue {
+ state_and_queue: &self.state_and_queue,
+ set_state_on_drop_to: ptr::invalid_mut(POISONED),
+ };
+ // Run the initialization function, letting it know if we're
+ // poisoned or not.
+ let init_state = public::OnceState {
+ inner: OnceState {
+ poisoned: state_and_queue.addr() == POISONED,
+ set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)),
+ },
+ };
+ init(&init_state);
+ waiter_queue.set_state_on_drop_to = init_state.inner.set_state_on_drop_to.get();
+ break;
+ }
+ _ => {
+ // All other values must be RUNNING with possibly a
+ // pointer to the waiter queue in the more significant bits.
+ assert!(state_and_queue.addr() & STATE_MASK == RUNNING);
+ wait(&self.state_and_queue, state_and_queue);
+ state_and_queue = self.state_and_queue.load(Ordering::Acquire);
+ }
+ }
+ }
+ }
+}
+
+fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) {
+ // Note: the following code was carefully written to avoid creating a
+ // mutable reference to `node` that gets aliased.
+ loop {
+ // Don't queue this thread if the status is no longer running,
+ // otherwise we will not be woken up.
+ if current_state.addr() & STATE_MASK != RUNNING {
+ return;
+ }
+
+ // Create the node for our current thread.
+ let node = Waiter {
+ thread: Cell::new(Some(thread::current())),
+ signaled: AtomicBool::new(false),
+ next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter,
+ };
+ let me = &node as *const Waiter as *const Masked as *mut Masked;
+
+ // Try to slide in the node at the head of the linked list, making sure
+ // that another thread didn't just replace the head of the linked list.
+ let exchange_result = state_and_queue.compare_exchange(
+ current_state,
+ me.with_addr(me.addr() | RUNNING),
+ Ordering::Release,
+ Ordering::Relaxed,
+ );
+ if let Err(old) = exchange_result {
+ current_state = old;
+ continue;
+ }
+
+ // We have enqueued ourselves, now lets wait.
+ // It is important not to return before being signaled, otherwise we
+ // would drop our `Waiter` node and leave a hole in the linked list
+ // (and a dangling reference). Guard against spurious wakeups by
+ // reparking ourselves until we are signaled.
+ while !node.signaled.load(Ordering::Acquire) {
+ // If the managing thread happens to signal and unpark us before we
+ // can park ourselves, the result could be this thread never gets
+ // unparked. Luckily `park` comes with the guarantee that if it got
+ // an `unpark` just before on an unparked thread it does not park.
+ thread::park();
+ }
+ break;
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Once {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Once").finish_non_exhaustive()
+ }
+}
+
+impl Drop for WaiterQueue<'_> {
+ fn drop(&mut self) {
+ // Swap out our state with however we finished.
+ let state_and_queue =
+ self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel);
+
+ // We should only ever see an old state which was RUNNING.
+ assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING);
+
+ // Walk the entire linked list of waiters and wake them up (in lifo
+ // order, last to register is first to wake up).
+ unsafe {
+ // Right after setting `node.signaled = true` the other thread may
+ // free `node` if there happens to be has a spurious wakeup.
+ // So we have to take out the `thread` field and copy the pointer to
+ // `next` first.
+ let mut queue =
+ state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter;
+ while !queue.is_null() {
+ let next = (*queue).next;
+ let thread = (*queue).thread.take().unwrap();
+ (*queue).signaled.store(true, Ordering::Release);
+ // ^- FIXME (maybe): This is another case of issue #55005
+ // `store()` has a potentially dangling ref to `signaled`.
+ queue = next;
+ thread.unpark();
+ }
+ }
+ }
+}
+
+impl OnceState {
+ #[inline]
+ pub fn is_poisoned(&self) -> bool {
+ self.poisoned
+ }
+
+ #[inline]
+ pub fn poison(&self) {
+ self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED));
+ }
+}
diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs
new file mode 100644
index 000000000..8742e68cc
--- /dev/null
+++ b/library/std/src/sys_common/once/mod.rs
@@ -0,0 +1,43 @@
+// A "once" is a relatively simple primitive, and it's also typically provided
+// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS
+// primitives, however, tend to have surprising restrictions, such as the Unix
+// one doesn't allow an argument to be passed to the function.
+//
+// As a result, we end up implementing it ourselves in the standard library.
+// This also gives us the opportunity to optimize the implementation a bit which
+// should help the fast path on call sites.
+//
+// So to recap, the guarantees of a Once are that it will call the
+// initialization closure at most once, and it will never return until the one
+// that's running has finished running. This means that we need some form of
+// blocking here while the custom callback is running at the very least.
+// Additionally, we add on the restriction of **poisoning**. Whenever an
+// initialization closure panics, the Once enters a "poisoned" state which means
+// that all future calls will immediately panic as well.
+//
+// So to implement this, one might first reach for a `Mutex`, but those cannot
+// be put into a `static`. It also gets a lot harder with poisoning to figure
+// out when the mutex needs to be deallocated because it's not after the closure
+// finishes, but after the first successful closure finishes.
+//
+// All in all, this is instead implemented with atomics and lock-free
+// operations! Whee!
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_arch = "wasm32", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+ target_os = "hermit",
+ ))] {
+ mod futex;
+ pub use futex::{Once, OnceState};
+ } else {
+ mod generic;
+ pub use generic::{Once, OnceState};
+ }
+}
diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sys_common/remutex.rs
index 8921af311..b448ae3a9 100644
--- a/library/std/src/sys_common/remutex.rs
+++ b/library/std/src/sys_common/remutex.rs
@@ -1,13 +1,11 @@
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
+use super::mutex as sys;
use crate::cell::UnsafeCell;
-use crate::marker::PhantomPinned;
use crate::ops::Deref;
use crate::panic::{RefUnwindSafe, UnwindSafe};
-use crate::pin::Pin;
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
-use crate::sys::locks as sys;
/// A re-entrant mutual exclusion
///
@@ -41,11 +39,10 @@ use crate::sys::locks as sys;
/// synchronization is left to the mutex, making relaxed memory ordering for
/// the `owner` field fine in all cases.
pub struct ReentrantMutex<T> {
- mutex: sys::Mutex,
+ mutex: sys::MovableMutex,
owner: AtomicUsize,
lock_count: UnsafeCell<u32>,
data: T,
- _pinned: PhantomPinned,
}
unsafe impl<T: Send> Send for ReentrantMutex<T> {}
@@ -68,39 +65,22 @@ impl<T> RefUnwindSafe for ReentrantMutex<T> {}
/// guarded data.
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct ReentrantMutexGuard<'a, T: 'a> {
- lock: Pin<&'a ReentrantMutex<T>>,
+ lock: &'a ReentrantMutex<T>,
}
impl<T> !Send for ReentrantMutexGuard<'_, T> {}
impl<T> ReentrantMutex<T> {
/// Creates a new reentrant mutex in an unlocked state.
- ///
- /// # Unsafety
- ///
- /// This function is unsafe because it is required that `init` is called
- /// once this mutex is in its final resting place, and only then are the
- /// lock/unlock methods safe.
- pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
+ pub const fn new(t: T) -> ReentrantMutex<T> {
ReentrantMutex {
- mutex: sys::Mutex::new(),
+ mutex: sys::MovableMutex::new(),
owner: AtomicUsize::new(0),
lock_count: UnsafeCell::new(0),
data: t,
- _pinned: PhantomPinned,
}
}
- /// Initializes this mutex so it's ready for use.
- ///
- /// # Unsafety
- ///
- /// Unsafe to call more than once, and must be called after this will no
- /// longer move in memory.
- pub unsafe fn init(self: Pin<&mut Self>) {
- self.get_unchecked_mut().mutex.init()
- }
-
/// Acquires a mutex, blocking the current thread until it is able to do so.
///
/// This function will block the caller until it is available to acquire the mutex.
@@ -113,15 +93,14 @@ impl<T> ReentrantMutex<T> {
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
+ pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
let this_thread = current_thread_unique_ptr();
- // Safety: We only touch lock_count when we own the lock,
- // and since self is pinned we can safely call the lock() on the mutex.
+ // Safety: We only touch lock_count when we own the lock.
unsafe {
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
} else {
- self.mutex.lock();
+ self.mutex.raw_lock();
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
@@ -142,10 +121,9 @@ impl<T> ReentrantMutex<T> {
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
+ pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
let this_thread = current_thread_unique_ptr();
- // Safety: We only touch lock_count when we own the lock,
- // and since self is pinned we can safely call the try_lock on the mutex.
+ // Safety: We only touch lock_count when we own the lock.
unsafe {
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
@@ -179,12 +157,12 @@ impl<T> Deref for ReentrantMutexGuard<'_, T> {
impl<T> Drop for ReentrantMutexGuard<'_, T> {
#[inline]
fn drop(&mut self) {
- // Safety: We own the lock, and the lock is pinned.
+ // Safety: We own the lock.
unsafe {
*self.lock.lock_count.get() -= 1;
if *self.lock.lock_count.get() == 0 {
self.lock.owner.store(0, Relaxed);
- self.lock.mutex.unlock();
+ self.lock.mutex.raw_unlock();
}
}
}
diff --git a/library/std/src/sys_common/remutex/tests.rs b/library/std/src/sys_common/remutex/tests.rs
index 64873b850..8e97ce11c 100644
--- a/library/std/src/sys_common/remutex/tests.rs
+++ b/library/std/src/sys_common/remutex/tests.rs
@@ -1,18 +1,11 @@
-use crate::boxed::Box;
use crate::cell::RefCell;
-use crate::pin::Pin;
use crate::sync::Arc;
use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
use crate::thread;
#[test]
fn smoke() {
- let m = unsafe {
- let mut m = Box::pin(ReentrantMutex::new(()));
- m.as_mut().init();
- m
- };
- let m = m.as_ref();
+ let m = ReentrantMutex::new(());
{
let a = m.lock();
{
@@ -29,20 +22,15 @@ fn smoke() {
#[test]
fn is_mutex() {
- let m = unsafe {
- // FIXME: Simplify this if Arc gets an Arc::get_pin_mut.
- let mut m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
- Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
- Pin::new_unchecked(m)
- };
+ let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
let m2 = m.clone();
- let lock = m.as_ref().lock();
+ let lock = m.lock();
let child = thread::spawn(move || {
- let lock = m2.as_ref().lock();
+ let lock = m2.lock();
assert_eq!(*lock.borrow(), 4950);
});
for i in 0..100 {
- let lock = m.as_ref().lock();
+ let lock = m.lock();
*lock.borrow_mut() += i;
}
drop(lock);
@@ -51,22 +39,17 @@ fn is_mutex() {
#[test]
fn trylock_works() {
- let m = unsafe {
- // FIXME: Simplify this if Arc gets an Arc::get_pin_mut.
- let mut m = Arc::new(ReentrantMutex::new(()));
- Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
- Pin::new_unchecked(m)
- };
+ let m = Arc::new(ReentrantMutex::new(()));
let m2 = m.clone();
- let _lock = m.as_ref().try_lock();
- let _lock2 = m.as_ref().try_lock();
+ let _lock = m.try_lock();
+ let _lock2 = m.try_lock();
thread::spawn(move || {
- let lock = m2.as_ref().try_lock();
+ let lock = m2.try_lock();
assert!(lock.is_none());
})
.join()
.unwrap();
- let _lock3 = m.as_ref().try_lock();
+ let _lock3 = m.try_lock();
}
pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
diff --git a/library/std/src/sys_common/rwlock.rs b/library/std/src/sys_common/rwlock.rs
index ba56f3a8f..042981dac 100644
--- a/library/std/src/sys_common/rwlock.rs
+++ b/library/std/src/sys_common/rwlock.rs
@@ -1,65 +1,5 @@
use crate::sys::locks as imp;
-/// An OS-based reader-writer lock, meant for use in static variables.
-///
-/// This rwlock does not implement poisoning.
-///
-/// This rwlock has a const constructor ([`StaticRwLock::new`]), does not
-/// implement `Drop` to cleanup resources.
-pub struct StaticRwLock(imp::RwLock);
-
-impl StaticRwLock {
- /// Creates a new rwlock for use.
- #[inline]
- pub const fn new() -> Self {
- Self(imp::RwLock::new())
- }
-
- /// Acquires shared access to the underlying lock, blocking the current
- /// thread to do so.
- ///
- /// The lock is automatically unlocked when the returned guard is dropped.
- #[inline]
- pub fn read(&'static self) -> StaticRwLockReadGuard {
- unsafe { self.0.read() };
- StaticRwLockReadGuard(&self.0)
- }
-
- /// Acquires write access to the underlying lock, blocking the current thread
- /// to do so.
- ///
- /// The lock is automatically unlocked when the returned guard is dropped.
- #[inline]
- pub fn write(&'static self) -> StaticRwLockWriteGuard {
- unsafe { self.0.write() };
- StaticRwLockWriteGuard(&self.0)
- }
-}
-
-#[must_use]
-pub struct StaticRwLockReadGuard(&'static imp::RwLock);
-
-impl Drop for StaticRwLockReadGuard {
- #[inline]
- fn drop(&mut self) {
- unsafe {
- self.0.read_unlock();
- }
- }
-}
-
-#[must_use]
-pub struct StaticRwLockWriteGuard(&'static imp::RwLock);
-
-impl Drop for StaticRwLockWriteGuard {
- #[inline]
- fn drop(&mut self) {
- unsafe {
- self.0.write_unlock();
- }
- }
-}
-
/// An OS-based reader-writer lock.
///
/// This rwlock cleans up its resources in its `Drop` implementation and may
@@ -75,6 +15,7 @@ pub struct MovableRwLock(imp::MovableRwLock);
impl MovableRwLock {
/// Creates a new reader-writer lock for use.
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Self {
Self(imp::MovableRwLock::new())
}
diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs
index 70beebe86..747579f17 100644
--- a/library/std/src/sys_common/thread_local_key.rs
+++ b/library/std/src/sys_common/thread_local_key.rs
@@ -53,7 +53,6 @@ mod tests;
use crate::sync::atomic::{self, AtomicUsize, Ordering};
use crate::sys::thread_local_key as imp;
-use crate::sys_common::mutex::StaticMutex;
/// A type for TLS keys that are statically allocated.
///
@@ -69,8 +68,10 @@ use crate::sys_common::mutex::StaticMutex;
/// ```ignore (cannot-doctest-private-modules)
/// use tls::os::{StaticKey, INIT};
///
+/// // Use a regular global static to store the key.
/// static KEY: StaticKey = INIT;
///
+/// // The state provided via `get` and `set` is thread-local.
/// unsafe {
/// assert!(KEY.get().is_null());
/// KEY.set(1 as *mut u8);
@@ -149,25 +150,6 @@ impl StaticKey {
}
unsafe fn lazy_init(&self) -> usize {
- // Currently the Windows implementation of TLS is pretty hairy, and
- // it greatly simplifies creation if we just synchronize everything.
- //
- // Additionally a 0-index of a tls key hasn't been seen on windows, so
- // we just simplify the whole branch.
- if imp::requires_synchronized_create() {
- // We never call `INIT_LOCK.init()`, so it is UB to attempt to
- // acquire this mutex reentrantly!
- static INIT_LOCK: StaticMutex = StaticMutex::new();
- let _guard = INIT_LOCK.lock();
- let mut key = self.key.load(Ordering::SeqCst);
- if key == 0 {
- key = imp::create(self.dtor) as usize;
- self.key.store(key, Ordering::SeqCst);
- }
- rtassert!(key != 0);
- return key;
- }
-
// POSIX allows the key created here to be 0, but the compare_exchange
// below relies on using 0 as a sentinel value to check who won the
// race to set the shared TLS key. As far as I know, there is no
@@ -230,8 +212,6 @@ impl Key {
impl Drop for Key {
fn drop(&mut self) {
- // Right now Windows doesn't support TLS key destruction, but this also
- // isn't used anywhere other than tests, so just leak the TLS key.
- // unsafe { imp::destroy(self.key) }
+ unsafe { imp::destroy(self.key) }
}
}
diff --git a/library/std/src/sys_common/thread_local_key/tests.rs b/library/std/src/sys_common/thread_local_key/tests.rs
index 968738a41..6f32b858f 100644
--- a/library/std/src/sys_common/thread_local_key/tests.rs
+++ b/library/std/src/sys_common/thread_local_key/tests.rs
@@ -1,4 +1,5 @@
use super::{Key, StaticKey};
+use core::ptr;
fn assert_sync<T: Sync>() {}
fn assert_send<T: Send>() {}
@@ -12,8 +13,8 @@ fn smoke() {
let k2 = Key::new(None);
assert!(k1.get().is_null());
assert!(k2.get().is_null());
- k1.set(1 as *mut _);
- k2.set(2 as *mut _);
+ k1.set(ptr::invalid_mut(1));
+ k2.set(ptr::invalid_mut(2));
assert_eq!(k1.get() as usize, 1);
assert_eq!(k2.get() as usize, 2);
}
@@ -26,8 +27,8 @@ fn statik() {
unsafe {
assert!(K1.get().is_null());
assert!(K2.get().is_null());
- K1.set(1 as *mut _);
- K2.set(2 as *mut _);
+ K1.set(ptr::invalid_mut(1));
+ K2.set(ptr::invalid_mut(2));
assert_eq!(K1.get() as usize, 1);
assert_eq!(K2.get() as usize, 2);
}
diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parker/mod.rs
index cbd7832eb..f86a9a555 100644
--- a/library/std/src/sys_common/thread_parker/mod.rs
+++ b/library/std/src/sys_common/thread_parker/mod.rs
@@ -7,6 +7,7 @@ cfg_if::cfg_if! {
target_os = "openbsd",
target_os = "dragonfly",
target_os = "fuchsia",
+ target_os = "hermit",
))] {
mod futex;
pub use futex::Parker;
diff --git a/library/std/src/sys_common/wtf8.rs b/library/std/src/sys_common/wtf8.rs
index 57fa49893..dd53767d4 100644
--- a/library/std/src/sys_common/wtf8.rs
+++ b/library/std/src/sys_common/wtf8.rs
@@ -89,6 +89,24 @@ impl CodePoint {
self.value
}
+ /// Returns the numeric value of the code point if it is a leading surrogate.
+ #[inline]
+ pub fn to_lead_surrogate(&self) -> Option<u16> {
+ match self.value {
+ lead @ 0xD800..=0xDBFF => Some(lead as u16),
+ _ => None,
+ }
+ }
+
+ /// Returns the numeric value of the code point if it is a trailing surrogate.
+ #[inline]
+ pub fn to_trail_surrogate(&self) -> Option<u16> {
+ match self.value {
+ trail @ 0xDC00..=0xDFFF => Some(trail as u16),
+ _ => None,
+ }
+ }
+
/// Optionally returns a Unicode scalar value for the code point.
///
/// Returns `None` if the code point is a surrogate (from U+D800 to U+DFFF).
@@ -117,6 +135,14 @@ impl CodePoint {
#[derive(Eq, PartialEq, Ord, PartialOrd, Clone)]
pub struct Wtf8Buf {
bytes: Vec<u8>,
+
+ /// Do we know that `bytes` holds a valid UTF-8 encoding? We can easily
+ /// know this if we're constructed from a `String` or `&str`.
+ ///
+ /// It is possible for `bytes` to have valid UTF-8 without this being
+ /// set, such as when we're concatenating `&Wtf8`'s and surrogates become
+ /// paired, as we don't bother to rescan the entire string.
+ is_known_utf8: bool,
}
impl ops::Deref for Wtf8Buf {
@@ -147,13 +173,13 @@ impl Wtf8Buf {
/// Creates a new, empty WTF-8 string.
#[inline]
pub fn new() -> Wtf8Buf {
- Wtf8Buf { bytes: Vec::new() }
+ Wtf8Buf { bytes: Vec::new(), is_known_utf8: true }
}
/// Creates a new, empty WTF-8 string with pre-allocated capacity for `capacity` bytes.
#[inline]
pub fn with_capacity(capacity: usize) -> Wtf8Buf {
- Wtf8Buf { bytes: Vec::with_capacity(capacity) }
+ Wtf8Buf { bytes: Vec::with_capacity(capacity), is_known_utf8: true }
}
/// Creates a WTF-8 string from a UTF-8 `String`.
@@ -163,7 +189,7 @@ impl Wtf8Buf {
/// Since WTF-8 is a superset of UTF-8, this always succeeds.
#[inline]
pub fn from_string(string: String) -> Wtf8Buf {
- Wtf8Buf { bytes: string.into_bytes() }
+ Wtf8Buf { bytes: string.into_bytes(), is_known_utf8: true }
}
/// Creates a WTF-8 string from a UTF-8 `&str` slice.
@@ -173,11 +199,12 @@ impl Wtf8Buf {
/// Since WTF-8 is a superset of UTF-8, this always succeeds.
#[inline]
pub fn from_str(str: &str) -> Wtf8Buf {
- Wtf8Buf { bytes: <[_]>::to_vec(str.as_bytes()) }
+ Wtf8Buf { bytes: <[_]>::to_vec(str.as_bytes()), is_known_utf8: true }
}
pub fn clear(&mut self) {
- self.bytes.clear()
+ self.bytes.clear();
+ self.is_known_utf8 = true;
}
/// Creates a WTF-8 string from a potentially ill-formed UTF-16 slice of 16-bit code units.
@@ -193,9 +220,11 @@ impl Wtf8Buf {
let surrogate = surrogate.unpaired_surrogate();
// Surrogates are known to be in the code point range.
let code_point = unsafe { CodePoint::from_u32_unchecked(surrogate as u32) };
+ // The string will now contain an unpaired surrogate.
+ string.is_known_utf8 = false;
// Skip the WTF-8 concatenation check,
// surrogate pairs are already decoded by decode_utf16
- string.push_code_point_unchecked(code_point)
+ string.push_code_point_unchecked(code_point);
}
}
}
@@ -203,7 +232,7 @@ impl Wtf8Buf {
}
/// Copied from String::push
- /// This does **not** include the WTF-8 concatenation check.
+ /// This does **not** include the WTF-8 concatenation check or `is_known_utf8` check.
fn push_code_point_unchecked(&mut self, code_point: CodePoint) {
let mut bytes = [0; 4];
let bytes = char::encode_utf8_raw(code_point.value, &mut bytes);
@@ -217,6 +246,9 @@ impl Wtf8Buf {
#[inline]
pub fn as_mut_slice(&mut self) -> &mut Wtf8 {
+ // Safety: `Wtf8` doesn't expose any way to mutate the bytes that would
+ // cause them to change from well-formed UTF-8 to ill-formed UTF-8,
+ // which would break the assumptions of the `is_known_utf8` field.
unsafe { Wtf8::from_mut_bytes_unchecked(&mut self.bytes) }
}
@@ -236,7 +268,8 @@ impl Wtf8Buf {
/// in the given `Wtf8Buf`. The `Wtf8Buf` may reserve more space to avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional`. Does nothing if
- /// capacity is already sufficient.
+ /// capacity is already sufficient. This method preserves the contents even
+ /// if an error occurs.
///
/// # Errors
///
@@ -313,7 +346,15 @@ impl Wtf8Buf {
self.push_char(decode_surrogate_pair(lead, trail));
self.bytes.extend_from_slice(other_without_trail_surrogate);
}
- _ => self.bytes.extend_from_slice(&other.bytes),
+ _ => {
+ // If we'll be pushing a string containing a surrogate, we may
+ // no longer have UTF-8.
+ if other.next_surrogate(0).is_some() {
+ self.is_known_utf8 = false;
+ }
+
+ self.bytes.extend_from_slice(&other.bytes);
+ }
}
}
@@ -330,13 +371,19 @@ impl Wtf8Buf {
/// like concatenating ill-formed UTF-16 strings effectively would.
#[inline]
pub fn push(&mut self, code_point: CodePoint) {
- if let trail @ 0xDC00..=0xDFFF = code_point.to_u32() {
+ if let Some(trail) = code_point.to_trail_surrogate() {
if let Some(lead) = (&*self).final_lead_surrogate() {
let len_without_lead_surrogate = self.len() - 3;
self.bytes.truncate(len_without_lead_surrogate);
- self.push_char(decode_surrogate_pair(lead, trail as u16));
+ self.push_char(decode_surrogate_pair(lead, trail));
return;
}
+
+ // We're pushing a trailing surrogate.
+ self.is_known_utf8 = false;
+ } else if code_point.to_lead_surrogate().is_some() {
+ // We're pushing a leading surrogate.
+ self.is_known_utf8 = false;
}
// No newly paired surrogates at the boundary.
@@ -363,9 +410,10 @@ impl Wtf8Buf {
/// (that is, if the string contains surrogates),
/// the original WTF-8 string is returned instead.
pub fn into_string(self) -> Result<String, Wtf8Buf> {
- match self.next_surrogate(0) {
- None => Ok(unsafe { String::from_utf8_unchecked(self.bytes) }),
- Some(_) => Err(self),
+ if self.is_known_utf8 || self.next_surrogate(0).is_none() {
+ Ok(unsafe { String::from_utf8_unchecked(self.bytes) })
+ } else {
+ Err(self)
}
}
@@ -375,6 +423,11 @@ impl Wtf8Buf {
///
/// Surrogates are replaced with `"\u{FFFD}"` (the replacement character “�”)
pub fn into_string_lossy(mut self) -> String {
+ // Fast path: If we already have UTF-8, we can return it immediately.
+ if self.is_known_utf8 {
+ return unsafe { String::from_utf8_unchecked(self.bytes) };
+ }
+
let mut pos = 0;
loop {
match self.next_surrogate(pos) {
@@ -397,7 +450,7 @@ impl Wtf8Buf {
/// Converts a `Box<Wtf8>` into a `Wtf8Buf`.
pub fn from_box(boxed: Box<Wtf8>) -> Wtf8Buf {
let bytes: Box<[u8]> = unsafe { mem::transmute(boxed) };
- Wtf8Buf { bytes: bytes.into_vec() }
+ Wtf8Buf { bytes: bytes.into_vec(), is_known_utf8: false }
}
}
@@ -575,6 +628,11 @@ impl Wtf8 {
}
}
+ /// Creates an owned `Wtf8Buf` from a borrowed `Wtf8`.
+ pub fn to_owned(&self) -> Wtf8Buf {
+ Wtf8Buf { bytes: self.bytes.to_vec(), is_known_utf8: false }
+ }
+
/// Lossily converts the string to UTF-8.
/// Returns a UTF-8 `&str` slice if the contents are well-formed in UTF-8.
///
@@ -664,7 +722,8 @@ impl Wtf8 {
}
pub fn clone_into(&self, buf: &mut Wtf8Buf) {
- self.bytes.clone_into(&mut buf.bytes)
+ buf.is_known_utf8 = false;
+ self.bytes.clone_into(&mut buf.bytes);
}
/// Boxes this `Wtf8`.
@@ -704,12 +763,12 @@ impl Wtf8 {
#[inline]
pub fn to_ascii_lowercase(&self) -> Wtf8Buf {
- Wtf8Buf { bytes: self.bytes.to_ascii_lowercase() }
+ Wtf8Buf { bytes: self.bytes.to_ascii_lowercase(), is_known_utf8: false }
}
#[inline]
pub fn to_ascii_uppercase(&self) -> Wtf8Buf {
- Wtf8Buf { bytes: self.bytes.to_ascii_uppercase() }
+ Wtf8Buf { bytes: self.bytes.to_ascii_uppercase(), is_known_utf8: false }
}
#[inline]
diff --git a/library/std/src/sys_common/wtf8/tests.rs b/library/std/src/sys_common/wtf8/tests.rs
index 931996791..1a302d646 100644
--- a/library/std/src/sys_common/wtf8/tests.rs
+++ b/library/std/src/sys_common/wtf8/tests.rs
@@ -20,6 +20,36 @@ fn code_point_to_u32() {
}
#[test]
+fn code_point_to_lead_surrogate() {
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+ assert_eq!(c(0).to_lead_surrogate(), None);
+ assert_eq!(c(0xE9).to_lead_surrogate(), None);
+ assert_eq!(c(0xD800).to_lead_surrogate(), Some(0xD800));
+ assert_eq!(c(0xDBFF).to_lead_surrogate(), Some(0xDBFF));
+ assert_eq!(c(0xDC00).to_lead_surrogate(), None);
+ assert_eq!(c(0xDFFF).to_lead_surrogate(), None);
+ assert_eq!(c(0x1F4A9).to_lead_surrogate(), None);
+ assert_eq!(c(0x10FFFF).to_lead_surrogate(), None);
+}
+
+#[test]
+fn code_point_to_trail_surrogate() {
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+ assert_eq!(c(0).to_trail_surrogate(), None);
+ assert_eq!(c(0xE9).to_trail_surrogate(), None);
+ assert_eq!(c(0xD800).to_trail_surrogate(), None);
+ assert_eq!(c(0xDBFF).to_trail_surrogate(), None);
+ assert_eq!(c(0xDC00).to_trail_surrogate(), Some(0xDC00));
+ assert_eq!(c(0xDFFF).to_trail_surrogate(), Some(0xDFFF));
+ assert_eq!(c(0x1F4A9).to_trail_surrogate(), None);
+ assert_eq!(c(0x10FFFF).to_trail_surrogate(), None);
+}
+
+#[test]
fn code_point_from_char() {
assert_eq!(CodePoint::from_char('a').to_u32(), 0x61);
assert_eq!(CodePoint::from_char('💩').to_u32(), 0x1F4A9);
@@ -70,35 +100,66 @@ fn wtf8buf_from_string() {
#[test]
fn wtf8buf_from_wide() {
- assert_eq!(Wtf8Buf::from_wide(&[]).bytes, b"");
- assert_eq!(
- Wtf8Buf::from_wide(&[0x61, 0xE9, 0x20, 0xD83D, 0xD83D, 0xDCA9]).bytes,
- b"a\xC3\xA9 \xED\xA0\xBD\xF0\x9F\x92\xA9"
- );
+ let buf = Wtf8Buf::from_wide(&[]);
+ assert_eq!(buf.bytes, b"");
+ assert!(buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0x61, 0xE9, 0x20, 0xD83D, 0xDCA9]);
+ assert_eq!(buf.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0x61, 0xE9, 0x20, 0xD83D, 0xD83D, 0xDCA9]);
+ assert_eq!(buf.bytes, b"a\xC3\xA9 \xED\xA0\xBD\xF0\x9F\x92\xA9");
+ assert!(!buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0xD800]);
+ assert_eq!(buf.bytes, b"\xED\xA0\x80");
+ assert!(!buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0xDBFF]);
+ assert_eq!(buf.bytes, b"\xED\xAF\xBF");
+ assert!(!buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0xDC00]);
+ assert_eq!(buf.bytes, b"\xED\xB0\x80");
+ assert!(!buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0xDFFF]);
+ assert_eq!(buf.bytes, b"\xED\xBF\xBF");
+ assert!(!buf.is_known_utf8);
}
#[test]
fn wtf8buf_push_str() {
let mut string = Wtf8Buf::new();
assert_eq!(string.bytes, b"");
+ assert!(string.is_known_utf8);
+
string.push_str("aé 💩");
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
}
#[test]
fn wtf8buf_push_char() {
let mut string = Wtf8Buf::from_str("aé ");
assert_eq!(string.bytes, b"a\xC3\xA9 ");
+ assert!(string.is_known_utf8);
+
string.push_char('💩');
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
}
#[test]
fn wtf8buf_push() {
let mut string = Wtf8Buf::from_str("aé ");
assert_eq!(string.bytes, b"a\xC3\xA9 ");
+ assert!(string.is_known_utf8);
+
string.push(CodePoint::from_char('💩'));
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
fn c(value: u32) -> CodePoint {
CodePoint::from_u32(value).unwrap()
@@ -106,37 +167,46 @@ fn wtf8buf_push() {
let mut string = Wtf8Buf::new();
string.push(c(0xD83D)); // lead
+ assert!(!string.is_known_utf8);
string.push(c(0xDCA9)); // trail
assert_eq!(string.bytes, b"\xF0\x9F\x92\xA9"); // Magic!
let mut string = Wtf8Buf::new();
string.push(c(0xD83D)); // lead
+ assert!(!string.is_known_utf8);
string.push(c(0x20)); // not surrogate
string.push(c(0xDCA9)); // trail
assert_eq!(string.bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
let mut string = Wtf8Buf::new();
string.push(c(0xD800)); // lead
+ assert!(!string.is_known_utf8);
string.push(c(0xDBFF)); // lead
assert_eq!(string.bytes, b"\xED\xA0\x80\xED\xAF\xBF");
let mut string = Wtf8Buf::new();
string.push(c(0xD800)); // lead
+ assert!(!string.is_known_utf8);
string.push(c(0xE000)); // not surrogate
assert_eq!(string.bytes, b"\xED\xA0\x80\xEE\x80\x80");
let mut string = Wtf8Buf::new();
string.push(c(0xD7FF)); // not surrogate
+ assert!(string.is_known_utf8);
string.push(c(0xDC00)); // trail
+ assert!(!string.is_known_utf8);
assert_eq!(string.bytes, b"\xED\x9F\xBF\xED\xB0\x80");
let mut string = Wtf8Buf::new();
string.push(c(0x61)); // not surrogate, < 3 bytes
+ assert!(string.is_known_utf8);
string.push(c(0xDC00)); // trail
+ assert!(!string.is_known_utf8);
assert_eq!(string.bytes, b"\x61\xED\xB0\x80");
let mut string = Wtf8Buf::new();
string.push(c(0xDC00)); // trail
+ assert!(!string.is_known_utf8);
assert_eq!(string.bytes, b"\xED\xB0\x80");
}
@@ -146,6 +216,7 @@ fn wtf8buf_push_wtf8() {
assert_eq!(string.bytes, b"a\xC3\xA9");
string.push_wtf8(Wtf8::from_str(" 💩"));
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
fn w(v: &[u8]) -> &Wtf8 {
unsafe { Wtf8::from_bytes_unchecked(v) }
@@ -161,37 +232,68 @@ fn wtf8buf_push_wtf8() {
string.push_wtf8(w(b" ")); // not surrogate
string.push_wtf8(w(b"\xED\xB2\xA9")); // trail
assert_eq!(string.bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\xA0\x80")); // lead
string.push_wtf8(w(b"\xED\xAF\xBF")); // lead
assert_eq!(string.bytes, b"\xED\xA0\x80\xED\xAF\xBF");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\xA0\x80")); // lead
string.push_wtf8(w(b"\xEE\x80\x80")); // not surrogate
assert_eq!(string.bytes, b"\xED\xA0\x80\xEE\x80\x80");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\x9F\xBF")); // not surrogate
string.push_wtf8(w(b"\xED\xB0\x80")); // trail
assert_eq!(string.bytes, b"\xED\x9F\xBF\xED\xB0\x80");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"a")); // not surrogate, < 3 bytes
string.push_wtf8(w(b"\xED\xB0\x80")); // trail
assert_eq!(string.bytes, b"\x61\xED\xB0\x80");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\xB0\x80")); // trail
assert_eq!(string.bytes, b"\xED\xB0\x80");
+ assert!(!string.is_known_utf8);
}
#[test]
fn wtf8buf_truncate() {
let mut string = Wtf8Buf::from_str("aé");
+ assert!(string.is_known_utf8);
+
+ string.truncate(3);
+ assert_eq!(string.bytes, b"a\xC3\xA9");
+ assert!(string.is_known_utf8);
+
string.truncate(1);
assert_eq!(string.bytes, b"a");
+ assert!(string.is_known_utf8);
+
+ string.truncate(0);
+ assert_eq!(string.bytes, b"");
+ assert!(string.is_known_utf8);
+}
+
+#[test]
+fn wtf8buf_truncate_around_non_bmp() {
+ let mut string = Wtf8Buf::from_str("💩");
+ assert!(string.is_known_utf8);
+
+ string.truncate(4);
+ assert_eq!(string.bytes, b"\xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
+
+ string.truncate(0);
+ assert_eq!(string.bytes, b"");
+ assert!(string.is_known_utf8);
}
#[test]
@@ -209,10 +311,36 @@ fn wtf8buf_truncate_fail_longer() {
}
#[test]
+#[should_panic]
+fn wtf8buf_truncate_splitting_non_bmp3() {
+ let mut string = Wtf8Buf::from_str("💩");
+ assert!(string.is_known_utf8);
+ string.truncate(3);
+}
+
+#[test]
+#[should_panic]
+fn wtf8buf_truncate_splitting_non_bmp2() {
+ let mut string = Wtf8Buf::from_str("💩");
+ assert!(string.is_known_utf8);
+ string.truncate(2);
+}
+
+#[test]
+#[should_panic]
+fn wtf8buf_truncate_splitting_non_bmp1() {
+ let mut string = Wtf8Buf::from_str("💩");
+ assert!(string.is_known_utf8);
+ string.truncate(1);
+}
+
+#[test]
fn wtf8buf_into_string() {
let mut string = Wtf8Buf::from_str("aé 💩");
+ assert!(string.is_known_utf8);
assert_eq!(string.clone().into_string(), Ok(String::from("aé 💩")));
string.push(CodePoint::from_u32(0xD800).unwrap());
+ assert!(!string.is_known_utf8);
assert_eq!(string.clone().into_string(), Err(string));
}
@@ -229,15 +357,33 @@ fn wtf8buf_from_iterator() {
fn f(values: &[u32]) -> Wtf8Buf {
values.iter().map(|&c| CodePoint::from_u32(c).unwrap()).collect::<Wtf8Buf>()
}
- assert_eq!(f(&[0x61, 0xE9, 0x20, 0x1F4A9]).bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert_eq!(
+ f(&[0x61, 0xE9, 0x20, 0x1F4A9]),
+ Wtf8Buf { bytes: b"a\xC3\xA9 \xF0\x9F\x92\xA9".to_vec(), is_known_utf8: true }
+ );
assert_eq!(f(&[0xD83D, 0xDCA9]).bytes, b"\xF0\x9F\x92\xA9"); // Magic!
- assert_eq!(f(&[0xD83D, 0x20, 0xDCA9]).bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
- assert_eq!(f(&[0xD800, 0xDBFF]).bytes, b"\xED\xA0\x80\xED\xAF\xBF");
- assert_eq!(f(&[0xD800, 0xE000]).bytes, b"\xED\xA0\x80\xEE\x80\x80");
- assert_eq!(f(&[0xD7FF, 0xDC00]).bytes, b"\xED\x9F\xBF\xED\xB0\x80");
- assert_eq!(f(&[0x61, 0xDC00]).bytes, b"\x61\xED\xB0\x80");
- assert_eq!(f(&[0xDC00]).bytes, b"\xED\xB0\x80");
+ assert_eq!(
+ f(&[0xD83D, 0x20, 0xDCA9]),
+ Wtf8Buf { bytes: b"\xED\xA0\xBD \xED\xB2\xA9".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ f(&[0xD800, 0xDBFF]),
+ Wtf8Buf { bytes: b"\xED\xA0\x80\xED\xAF\xBF".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ f(&[0xD800, 0xE000]),
+ Wtf8Buf { bytes: b"\xED\xA0\x80\xEE\x80\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ f(&[0xD7FF, 0xDC00]),
+ Wtf8Buf { bytes: b"\xED\x9F\xBF\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ f(&[0x61, 0xDC00]),
+ Wtf8Buf { bytes: b"\x61\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(f(&[0xDC00]), Wtf8Buf { bytes: b"\xED\xB0\x80".to_vec(), is_known_utf8: false });
}
#[test]
@@ -251,15 +397,36 @@ fn wtf8buf_extend() {
string
}
- assert_eq!(e(&[0x61, 0xE9], &[0x20, 0x1F4A9]).bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert_eq!(
+ e(&[0x61, 0xE9], &[0x20, 0x1F4A9]),
+ Wtf8Buf { bytes: b"a\xC3\xA9 \xF0\x9F\x92\xA9".to_vec(), is_known_utf8: true }
+ );
assert_eq!(e(&[0xD83D], &[0xDCA9]).bytes, b"\xF0\x9F\x92\xA9"); // Magic!
- assert_eq!(e(&[0xD83D, 0x20], &[0xDCA9]).bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
- assert_eq!(e(&[0xD800], &[0xDBFF]).bytes, b"\xED\xA0\x80\xED\xAF\xBF");
- assert_eq!(e(&[0xD800], &[0xE000]).bytes, b"\xED\xA0\x80\xEE\x80\x80");
- assert_eq!(e(&[0xD7FF], &[0xDC00]).bytes, b"\xED\x9F\xBF\xED\xB0\x80");
- assert_eq!(e(&[0x61], &[0xDC00]).bytes, b"\x61\xED\xB0\x80");
- assert_eq!(e(&[], &[0xDC00]).bytes, b"\xED\xB0\x80");
+ assert_eq!(
+ e(&[0xD83D, 0x20], &[0xDCA9]),
+ Wtf8Buf { bytes: b"\xED\xA0\xBD \xED\xB2\xA9".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[0xD800], &[0xDBFF]),
+ Wtf8Buf { bytes: b"\xED\xA0\x80\xED\xAF\xBF".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[0xD800], &[0xE000]),
+ Wtf8Buf { bytes: b"\xED\xA0\x80\xEE\x80\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[0xD7FF], &[0xDC00]),
+ Wtf8Buf { bytes: b"\xED\x9F\xBF\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[0x61], &[0xDC00]),
+ Wtf8Buf { bytes: b"\x61\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[], &[0xDC00]),
+ Wtf8Buf { bytes: b"\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
}
#[test]
@@ -407,3 +574,93 @@ fn wtf8_encode_wide_size_hint() {
assert_eq!((0, Some(0)), iter.size_hint());
assert!(iter.next().is_none());
}
+
+#[test]
+fn wtf8_clone_into() {
+ let mut string = Wtf8Buf::new();
+ Wtf8::from_str("green").clone_into(&mut string);
+ assert_eq!(string.bytes, b"green");
+
+ let mut string = Wtf8Buf::from_str("green");
+ Wtf8::from_str("").clone_into(&mut string);
+ assert_eq!(string.bytes, b"");
+
+ let mut string = Wtf8Buf::from_str("red");
+ Wtf8::from_str("green").clone_into(&mut string);
+ assert_eq!(string.bytes, b"green");
+
+ let mut string = Wtf8Buf::from_str("green");
+ Wtf8::from_str("red").clone_into(&mut string);
+ assert_eq!(string.bytes, b"red");
+
+ let mut string = Wtf8Buf::from_str("green");
+ assert!(string.is_known_utf8);
+ unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").clone_into(&mut string) };
+ assert_eq!(string.bytes, b"\xED\xA0\x80");
+ assert!(!string.is_known_utf8);
+}
+
+#[test]
+fn wtf8_to_ascii_lowercase() {
+ let lowercase = Wtf8::from_str("").to_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"");
+
+ let lowercase = Wtf8::from_str("GrEeN gRaPeS! 🍇").to_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"green grapes! \xf0\x9f\x8d\x87");
+
+ let lowercase = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_ascii_lowercase() };
+ assert_eq!(lowercase.bytes, b"\xED\xA0\x80");
+ assert!(!lowercase.is_known_utf8);
+}
+
+#[test]
+fn wtf8_to_ascii_uppercase() {
+ let uppercase = Wtf8::from_str("").to_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"");
+
+ let uppercase = Wtf8::from_str("GrEeN gRaPeS! 🍇").to_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"GREEN GRAPES! \xf0\x9f\x8d\x87");
+
+ let uppercase = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_ascii_uppercase() };
+ assert_eq!(uppercase.bytes, b"\xED\xA0\x80");
+ assert!(!uppercase.is_known_utf8);
+}
+
+#[test]
+fn wtf8_make_ascii_lowercase() {
+ let mut lowercase = Wtf8Buf::from_str("");
+ lowercase.make_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"");
+
+ let mut lowercase = Wtf8Buf::from_str("GrEeN gRaPeS! 🍇");
+ lowercase.make_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"green grapes! \xf0\x9f\x8d\x87");
+
+ let mut lowercase = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_owned() };
+ lowercase.make_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"\xED\xA0\x80");
+ assert!(!lowercase.is_known_utf8);
+}
+
+#[test]
+fn wtf8_make_ascii_uppercase() {
+ let mut uppercase = Wtf8Buf::from_str("");
+ uppercase.make_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"");
+
+ let mut uppercase = Wtf8Buf::from_str("GrEeN gRaPeS! 🍇");
+ uppercase.make_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"GREEN GRAPES! \xf0\x9f\x8d\x87");
+
+ let mut uppercase = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_owned() };
+ uppercase.make_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"\xED\xA0\x80");
+ assert!(!uppercase.is_known_utf8);
+}
+
+#[test]
+fn wtf8_to_owned() {
+ let string = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_owned() };
+ assert_eq!(string.bytes, b"\xED\xA0\x80");
+ assert!(!string.is_known_utf8);
+}
diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs
index f4750cdf7..5d267891b 100644
--- a/library/std/src/thread/local.rs
+++ b/library/std/src/thread/local.rs
@@ -95,6 +95,7 @@ use crate::fmt;
/// [loader lock]: https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-best-practices
/// [`JoinHandle::join`]: crate::thread::JoinHandle::join
/// [`with`]: LocalKey::with
+#[cfg_attr(not(test), rustc_diagnostic_item = "LocalKey")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct LocalKey<T: 'static> {
// This outer `LocalKey<T>` type is what's going to be stored in statics,
@@ -900,7 +901,7 @@ pub mod statik {
}
#[doc(hidden)]
-#[cfg(target_thread_local)]
+#[cfg(all(target_thread_local, not(all(target_family = "wasm", not(target_feature = "atomics"))),))]
pub mod fast {
use super::lazy::LazyKeyInner;
use crate::cell::Cell;
@@ -1036,6 +1037,10 @@ pub mod fast {
}
#[doc(hidden)]
+#[cfg(all(
+ not(target_thread_local),
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+))]
pub mod os {
use super::lazy::LazyKeyInner;
use crate::cell::Cell;
@@ -1044,6 +1049,8 @@ pub mod os {
use crate::ptr;
use crate::sys_common::thread_local_key::StaticKey as OsStaticKey;
+ /// Use a regular global static to store this key; the state provided will then be
+ /// thread-local.
pub struct Key<T> {
// OS-TLS key that we'll use to key off.
os: OsStaticKey,
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index 44c8a50fd..05023df1b 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -116,7 +116,7 @@
//! Threads are able to have associated names for identification purposes. By default, spawned
//! threads are unnamed. To specify a name for a thread, build the thread with [`Builder`] and pass
//! the desired thread name to [`Builder::name`]. To retrieve the thread name from within the
-//! thread, use [`Thread::name`]. A couple examples of where the name of a thread gets used:
+//! thread, use [`Thread::name`]. A couple of examples where the name of a thread gets used:
//!
//! * If a panic occurs in a named thread, the thread name will be printed in the panic message.
//! * The thread name is provided to the OS where applicable (e.g., `pthread_setname_np` in
@@ -150,6 +150,8 @@
#![stable(feature = "rust1", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)]
+// Under `test`, `__FastLocalKeyInner` seems unused.
+#![cfg_attr(test, allow(dead_code))]
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
@@ -160,7 +162,7 @@ use crate::ffi::{CStr, CString};
use crate::fmt;
use crate::io;
use crate::marker::PhantomData;
-use crate::mem;
+use crate::mem::{self, forget};
use crate::num::NonZeroU64;
use crate::num::NonZeroUsize;
use crate::panic;
@@ -170,7 +172,6 @@ use crate::ptr::addr_of_mut;
use crate::str;
use crate::sync::Arc;
use crate::sys::thread as imp;
-use crate::sys_common::mutex;
use crate::sys_common::thread;
use crate::sys_common::thread_info;
use crate::sys_common::thread_parker::Parker;
@@ -193,22 +194,40 @@ pub use scoped::{scope, Scope, ScopedJoinHandle};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::local::{AccessError, LocalKey};
-// The types used by the thread_local! macro to access TLS keys. Note that there
-// are two types, the "OS" type and the "fast" type. The OS thread local key
-// type is accessed via platform-specific API calls and is slow, while the fast
+// Provide the type used by the thread_local! macro to access TLS keys. This
+// needs to be kept in sync with the macro itself (in `local.rs`).
+// There are three types: "static", "fast", "OS". The "OS" thread local key
+// type is accessed via platform-specific API calls and is slow, while the "fast"
// key type is accessed via code generated via LLVM, where TLS keys are set up
-// by the elf linker. Note that the OS TLS type is always available: on macOS
-// the standard library is compiled with support for older platform versions
-// where fast TLS was not available; end-user code is compiled with fast TLS
-// where available, but both are needed.
+// by the elf linker. "static" is for single-threaded platforms where a global
+// static is sufficient.
#[unstable(feature = "libstd_thread_internals", issue = "none")]
-#[cfg(target_thread_local)]
+#[cfg(not(test))]
+#[cfg(all(
+ target_thread_local,
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+))]
#[doc(hidden)]
pub use self::local::fast::Key as __FastLocalKeyInner;
+
+// when building for tests, use real std's type
+#[unstable(feature = "libstd_thread_internals", issue = "none")]
+#[cfg(test)]
+#[cfg(all(
+ target_thread_local,
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+))]
+pub use realstd::thread::__FastLocalKeyInner;
+
#[unstable(feature = "libstd_thread_internals", issue = "none")]
+#[cfg(all(
+ not(target_thread_local),
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+))]
#[doc(hidden)]
pub use self::local::os::Key as __OsLocalKeyInner;
+
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
#[doc(hidden)]
@@ -490,6 +509,31 @@ impl Builder {
let output_capture = crate::io::set_output_capture(None);
crate::io::set_output_capture(output_capture.clone());
+ // Pass `f` in `MaybeUninit` because actually that closure might *run longer than the lifetime of `F`*.
+ // See <https://github.com/rust-lang/rust/issues/101983> for more details.
+ // To prevent leaks we use a wrapper that drops its contents.
+ #[repr(transparent)]
+ struct MaybeDangling<T>(mem::MaybeUninit<T>);
+ impl<T> MaybeDangling<T> {
+ fn new(x: T) -> Self {
+ MaybeDangling(mem::MaybeUninit::new(x))
+ }
+ fn into_inner(self) -> T {
+ // SAFETY: we are always initiailized.
+ let ret = unsafe { self.0.assume_init_read() };
+ // Make sure we don't drop.
+ mem::forget(self);
+ ret
+ }
+ }
+ impl<T> Drop for MaybeDangling<T> {
+ fn drop(&mut self) {
+ // SAFETY: we are always initiailized.
+ unsafe { self.0.assume_init_drop() };
+ }
+ }
+
+ let f = MaybeDangling::new(f);
let main = move || {
if let Some(name) = their_thread.cname() {
imp::Thread::set_name(name);
@@ -497,6 +541,8 @@ impl Builder {
crate::io::set_output_capture(output_capture);
+ // SAFETY: we constructed `f` initialized.
+ let f = f.into_inner();
// SAFETY: the stack guard passed is the one for the current thread.
// This means the current thread's stack and the new thread's stack
// are properly set and protected from each other.
@@ -509,6 +555,12 @@ impl Builder {
// same `JoinInner` as this closure meaning the mutation will be
// safe (not modify it and affect a value far away).
unsafe { *their_packet.result.get() = Some(try_result) };
+ // Here `their_packet` gets dropped, and if this is the last `Arc` for that packet that
+ // will call `decrement_num_running_threads` and therefore signal that this thread is
+ // done.
+ drop(their_packet);
+ // Here, the lifetime `'a` and even `'scope` can end. `main` keeps running for a bit
+ // after that before returning itself.
};
if let Some(scope_data) = &my_packet.scope {
@@ -770,6 +822,8 @@ pub fn panicking() -> bool {
panicking::panicking()
}
+/// Use [`sleep`].
+///
/// Puts the current thread to sleep for at least the specified amount of time.
///
/// The thread may sleep longer than the duration specified due to scheduling
@@ -840,10 +894,22 @@ pub fn sleep(dur: Duration) {
imp::Thread::sleep(dur)
}
+/// Used to ensure that `park` and `park_timeout` do not unwind, as that can
+/// cause undefined behaviour if not handled correctly (see #102398 for context).
+struct PanicGuard;
+
+impl Drop for PanicGuard {
+ fn drop(&mut self) {
+ rtabort!("an irrecoverable error occurred while synchronizing threads")
+ }
+}
+
/// Blocks unless or until the current thread's token is made available.
///
/// A call to `park` does not guarantee that the thread will remain parked
-/// forever, and callers should be prepared for this possibility.
+/// forever, and callers should be prepared for this possibility. However,
+/// it is guaranteed that this function will not panic (it may abort the
+/// process if the implementation encounters some rare errors).
///
/// # park and unpark
///
@@ -928,10 +994,13 @@ pub fn sleep(dur: Duration) {
/// [`thread::park_timeout`]: park_timeout
#[stable(feature = "rust1", since = "1.0.0")]
pub fn park() {
+ let guard = PanicGuard;
// SAFETY: park_timeout is called on the parker owned by this thread.
unsafe {
current().inner.as_ref().parker().park();
}
+ // No panic occurred, do not abort.
+ forget(guard);
}
/// Use [`park_timeout`].
@@ -992,10 +1061,13 @@ pub fn park_timeout_ms(ms: u32) {
/// ```
#[stable(feature = "park_timeout", since = "1.4.0")]
pub fn park_timeout(dur: Duration) {
+ let guard = PanicGuard;
// SAFETY: park_timeout is called on the parker owned by this thread.
unsafe {
current().inner.as_ref().parker().park_timeout(dur);
}
+ // No panic occurred, do not abort.
+ forget(guard);
}
////////////////////////////////////////////////////////////////////////////////
@@ -1033,24 +1105,45 @@ pub struct ThreadId(NonZeroU64);
impl ThreadId {
// Generate a new unique thread ID.
fn new() -> ThreadId {
- // It is UB to attempt to acquire this mutex reentrantly!
- static GUARD: mutex::StaticMutex = mutex::StaticMutex::new();
- static mut COUNTER: u64 = 1;
-
- unsafe {
- let guard = GUARD.lock();
-
- // If we somehow use up all our bits, panic so that we're not
- // covering up subtle bugs of IDs being reused.
- if COUNTER == u64::MAX {
- drop(guard); // in case the panic handler ends up calling `ThreadId::new()`, avoid reentrant lock acquire.
- panic!("failed to generate unique thread ID: bitspace exhausted");
- }
-
- let id = COUNTER;
- COUNTER += 1;
+ #[cold]
+ fn exhausted() -> ! {
+ panic!("failed to generate unique thread ID: bitspace exhausted")
+ }
- ThreadId(NonZeroU64::new(id).unwrap())
+ cfg_if::cfg_if! {
+ if #[cfg(target_has_atomic = "64")] {
+ use crate::sync::atomic::{AtomicU64, Ordering::Relaxed};
+
+ static COUNTER: AtomicU64 = AtomicU64::new(0);
+
+ let mut last = COUNTER.load(Relaxed);
+ loop {
+ let Some(id) = last.checked_add(1) else {
+ exhausted();
+ };
+
+ match COUNTER.compare_exchange_weak(last, id, Relaxed, Relaxed) {
+ Ok(_) => return ThreadId(NonZeroU64::new(id).unwrap()),
+ Err(id) => last = id,
+ }
+ }
+ } else {
+ use crate::sync::{Mutex, PoisonError};
+
+ static COUNTER: Mutex<u64> = Mutex::new(0);
+
+ let mut counter = COUNTER.lock().unwrap_or_else(PoisonError::into_inner);
+ let Some(id) = counter.checked_add(1) else {
+ // in case the panic handler ends up calling `ThreadId::new()`,
+ // avoid reentrant lock acquire.
+ drop(counter);
+ exhausted();
+ };
+
+ *counter = id;
+ drop(counter);
+ ThreadId(NonZeroU64::new(id).unwrap())
+ }
}
}
diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs
index ec68b5291..6c9ce6fa0 100644
--- a/library/std/src/thread/tests.rs
+++ b/library/std/src/thread/tests.rs
@@ -37,6 +37,37 @@ fn test_named_thread() {
.unwrap();
}
+#[cfg(any(
+ // Note: musl didn't add pthread_getname_np until 1.2.3
+ all(target_os = "linux", target_env = "gnu"),
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos"
+))]
+#[test]
+fn test_named_thread_truncation() {
+ use crate::ffi::CStr;
+
+ let long_name = crate::iter::once("test_named_thread_truncation")
+ .chain(crate::iter::repeat(" yada").take(100))
+ .collect::<String>();
+
+ let result = Builder::new().name(long_name.clone()).spawn(move || {
+ // Rust remembers the full thread name itself.
+ assert_eq!(thread::current().name(), Some(long_name.as_str()));
+
+ // But the system is limited -- make sure we successfully set a truncation.
+ let mut buf = vec![0u8; long_name.len() + 1];
+ unsafe {
+ libc::pthread_getname_np(libc::pthread_self(), buf.as_mut_ptr().cast(), buf.len());
+ }
+ let cstr = CStr::from_bytes_until_nul(&buf).unwrap();
+ assert!(cstr.to_bytes().len() > 0);
+ assert!(long_name.as_bytes().starts_with(cstr.to_bytes()));
+ });
+ result.unwrap().join().unwrap();
+}
+
#[test]
#[should_panic]
fn test_invalid_named_thread() {
@@ -245,6 +276,28 @@ fn test_try_panic_any_message_unit_struct() {
}
#[test]
+fn test_park_unpark_before() {
+ for _ in 0..10 {
+ thread::current().unpark();
+ thread::park();
+ }
+}
+
+#[test]
+fn test_park_unpark_called_other_thread() {
+ for _ in 0..10 {
+ let th = thread::current();
+
+ let _guard = thread::spawn(move || {
+ super::sleep(Duration::from_millis(50));
+ th.unpark();
+ });
+
+ thread::park();
+ }
+}
+
+#[test]
fn test_park_timeout_unpark_before() {
for _ in 0..10 {
thread::current().unpark();
@@ -329,3 +382,22 @@ fn test_scoped_threads_nll() {
let x = 42_u8;
foo(&x);
}
+
+// Regression test for https://github.com/rust-lang/rust/issues/98498.
+#[test]
+#[cfg(miri)] // relies on Miri's data race detector
+fn scope_join_race() {
+ for _ in 0..100 {
+ let a_bool = AtomicBool::new(false);
+
+ thread::scope(|s| {
+ for _ in 0..5 {
+ s.spawn(|| a_bool.load(Ordering::Relaxed));
+ }
+
+ for _ in 0..5 {
+ s.spawn(|| a_bool.load(Ordering::Relaxed));
+ }
+ });
+ }
+}
diff --git a/library/std/src/time.rs b/library/std/src/time.rs
index 759a59e1f..ecd06ebf7 100644
--- a/library/std/src/time.rs
+++ b/library/std/src/time.rs
@@ -43,8 +43,8 @@ use crate::sys_common::{FromInner, IntoInner};
#[stable(feature = "time", since = "1.3.0")]
pub use core::time::Duration;
-#[unstable(feature = "duration_checked_float", issue = "83400")]
-pub use core::time::FromFloatSecsError;
+#[stable(feature = "duration_checked_float", since = "1.66.0")]
+pub use core::time::TryFromFloatSecsError;
/// A measurement of a monotonically nondecreasing clock.
/// Opaque and useful only with [`Duration`].
@@ -356,7 +356,7 @@ impl Instant {
///
/// # Panics
///
- /// Previous rust versions panicked when self was earlier than the current time. Currently this
+ /// Previous rust versions panicked when the current time was earlier than self. Currently this
/// method returns a Duration of zero in that case. Future versions may reintroduce the panic.
/// See [Monotonicity].
///
diff --git a/library/std/src/time/tests.rs b/library/std/src/time/tests.rs
index d710a5744..6229556c8 100644
--- a/library/std/src/time/tests.rs
+++ b/library/std/src/time/tests.rs
@@ -31,7 +31,8 @@ fn instant_monotonic_concurrent() -> crate::thread::Result<()> {
.map(|_| {
crate::thread::spawn(|| {
let mut old = Instant::now();
- for _ in 0..5_000_000 {
+ let count = if cfg!(miri) { 1_000 } else { 5_000_000 };
+ for _ in 0..count {
let new = Instant::now();
assert!(new >= old);
old = new;
diff --git a/library/std/tests/run-time-detect.rs b/library/std/tests/run-time-detect.rs
index a57a52d9b..02c076f1b 100644
--- a/library/std/tests/run-time-detect.rs
+++ b/library/std/tests/run-time-detect.rs
@@ -14,77 +14,85 @@
#[cfg(all(target_arch = "arm", any(target_os = "linux", target_os = "android")))]
fn arm_linux() {
use std::arch::is_arm_feature_detected;
+ // tidy-alphabetical-start
+ println!("aes: {}", is_arm_feature_detected!("aes"));
+ println!("crc: {}", is_arm_feature_detected!("crc"));
+ println!("crypto: {}", is_arm_feature_detected!("crypto"));
println!("neon: {}", is_arm_feature_detected!("neon"));
println!("pmull: {}", is_arm_feature_detected!("pmull"));
- println!("crypto: {}", is_arm_feature_detected!("crypto"));
- println!("crc: {}", is_arm_feature_detected!("crc"));
- println!("aes: {}", is_arm_feature_detected!("aes"));
println!("sha2: {}", is_arm_feature_detected!("sha2"));
+ // tidy-alphabetical-end
}
#[test]
#[cfg(all(target_arch = "aarch64", any(target_os = "linux", target_os = "android")))]
fn aarch64_linux() {
use std::arch::is_aarch64_feature_detected;
- println!("neon: {}", is_aarch64_feature_detected!("neon"));
+ // tidy-alphabetical-start
+ println!("aes: {}", is_aarch64_feature_detected!("aes"));
println!("asimd: {}", is_aarch64_feature_detected!("asimd"));
- println!("pmull: {}", is_aarch64_feature_detected!("pmull"));
- println!("fp16: {}", is_aarch64_feature_detected!("fp16"));
- println!("sve: {}", is_aarch64_feature_detected!("sve"));
+ println!("bf16: {}", is_aarch64_feature_detected!("bf16"));
+ println!("bti: {}", is_aarch64_feature_detected!("bti"));
println!("crc: {}", is_aarch64_feature_detected!("crc"));
- println!("lse: {}", is_aarch64_feature_detected!("lse"));
- println!("lse2: {}", is_aarch64_feature_detected!("lse2"));
- println!("rdm: {}", is_aarch64_feature_detected!("rdm"));
- println!("rcpc: {}", is_aarch64_feature_detected!("rcpc"));
- println!("rcpc2: {}", is_aarch64_feature_detected!("rcpc2"));
+ println!("dit: {}", is_aarch64_feature_detected!("dit"));
println!("dotprod: {}", is_aarch64_feature_detected!("dotprod"));
- println!("tme: {}", is_aarch64_feature_detected!("tme"));
+ println!("dpb2: {}", is_aarch64_feature_detected!("dpb2"));
+ println!("dpb: {}", is_aarch64_feature_detected!("dpb"));
+ println!("f32mm: {}", is_aarch64_feature_detected!("f32mm"));
+ println!("f64mm: {}", is_aarch64_feature_detected!("f64mm"));
+ println!("fcma: {}", is_aarch64_feature_detected!("fcma"));
println!("fhm: {}", is_aarch64_feature_detected!("fhm"));
- println!("dit: {}", is_aarch64_feature_detected!("dit"));
println!("flagm: {}", is_aarch64_feature_detected!("flagm"));
- println!("ssbs: {}", is_aarch64_feature_detected!("ssbs"));
- println!("sb: {}", is_aarch64_feature_detected!("sb"));
- println!("paca: {}", is_aarch64_feature_detected!("paca"));
- println!("pacg: {}", is_aarch64_feature_detected!("pacg"));
- println!("dpb: {}", is_aarch64_feature_detected!("dpb"));
- println!("dpb2: {}", is_aarch64_feature_detected!("dpb2"));
- println!("sve2: {}", is_aarch64_feature_detected!("sve2"));
- println!("sve2-aes: {}", is_aarch64_feature_detected!("sve2-aes"));
- println!("sve2-sm4: {}", is_aarch64_feature_detected!("sve2-sm4"));
- println!("sve2-sha3: {}", is_aarch64_feature_detected!("sve2-sha3"));
- println!("sve2-bitperm: {}", is_aarch64_feature_detected!("sve2-bitperm"));
+ println!("fp16: {}", is_aarch64_feature_detected!("fp16"));
println!("frintts: {}", is_aarch64_feature_detected!("frintts"));
println!("i8mm: {}", is_aarch64_feature_detected!("i8mm"));
- println!("f32mm: {}", is_aarch64_feature_detected!("f32mm"));
- println!("f64mm: {}", is_aarch64_feature_detected!("f64mm"));
- println!("bf16: {}", is_aarch64_feature_detected!("bf16"));
- println!("rand: {}", is_aarch64_feature_detected!("rand"));
- println!("bti: {}", is_aarch64_feature_detected!("bti"));
- println!("mte: {}", is_aarch64_feature_detected!("mte"));
println!("jsconv: {}", is_aarch64_feature_detected!("jsconv"));
- println!("fcma: {}", is_aarch64_feature_detected!("fcma"));
- println!("aes: {}", is_aarch64_feature_detected!("aes"));
+ println!("lse2: {}", is_aarch64_feature_detected!("lse2"));
+ println!("lse: {}", is_aarch64_feature_detected!("lse"));
+ println!("mte: {}", is_aarch64_feature_detected!("mte"));
+ println!("neon: {}", is_aarch64_feature_detected!("neon"));
+ println!("paca: {}", is_aarch64_feature_detected!("paca"));
+ println!("pacg: {}", is_aarch64_feature_detected!("pacg"));
+ println!("pmull: {}", is_aarch64_feature_detected!("pmull"));
+ println!("rand: {}", is_aarch64_feature_detected!("rand"));
+ println!("rcpc2: {}", is_aarch64_feature_detected!("rcpc2"));
+ println!("rcpc: {}", is_aarch64_feature_detected!("rcpc"));
+ println!("rdm: {}", is_aarch64_feature_detected!("rdm"));
+ println!("sb: {}", is_aarch64_feature_detected!("sb"));
println!("sha2: {}", is_aarch64_feature_detected!("sha2"));
println!("sha3: {}", is_aarch64_feature_detected!("sha3"));
println!("sm4: {}", is_aarch64_feature_detected!("sm4"));
+ println!("ssbs: {}", is_aarch64_feature_detected!("ssbs"));
+ println!("sve2-aes: {}", is_aarch64_feature_detected!("sve2-aes"));
+ println!("sve2-bitperm: {}", is_aarch64_feature_detected!("sve2-bitperm"));
+ println!("sve2-sha3: {}", is_aarch64_feature_detected!("sve2-sha3"));
+ println!("sve2-sm4: {}", is_aarch64_feature_detected!("sve2-sm4"));
+ println!("sve2: {}", is_aarch64_feature_detected!("sve2"));
+ println!("sve: {}", is_aarch64_feature_detected!("sve"));
+ println!("tme: {}", is_aarch64_feature_detected!("tme"));
+ // tidy-alphabetical-end
}
#[test]
#[cfg(all(target_arch = "powerpc", target_os = "linux"))]
fn powerpc_linux() {
use std::arch::is_powerpc_feature_detected;
+ // tidy-alphabetical-start
println!("altivec: {}", is_powerpc_feature_detected!("altivec"));
- println!("vsx: {}", is_powerpc_feature_detected!("vsx"));
println!("power8: {}", is_powerpc_feature_detected!("power8"));
+ println!("vsx: {}", is_powerpc_feature_detected!("vsx"));
+ // tidy-alphabetical-end
}
#[test]
#[cfg(all(target_arch = "powerpc64", target_os = "linux"))]
fn powerpc64_linux() {
use std::arch::is_powerpc64_feature_detected;
+ // tidy-alphabetical-start
println!("altivec: {}", is_powerpc64_feature_detected!("altivec"));
- println!("vsx: {}", is_powerpc64_feature_detected!("vsx"));
println!("power8: {}", is_powerpc64_feature_detected!("power8"));
+ println!("vsx: {}", is_powerpc64_feature_detected!("vsx"));
+ // tidy-alphabetical-end
}
#[test]
@@ -102,9 +110,9 @@ fn x86_all() {
// the below is in alphabetical order and matches
// the order of X86_ALLOWED_FEATURES in rustc_codegen_ssa's target_features.rs
+ // tidy-alphabetical-start
println!("adx: {:?}", is_x86_feature_detected!("adx"));
println!("aes: {:?}", is_x86_feature_detected!("aes"));
- println!("avx: {:?}", is_x86_feature_detected!("avx"));
println!("avx2: {:?}", is_x86_feature_detected!("avx2"));
println!("avx512bf16: {:?}", is_x86_feature_detected!("avx512bf16"));
println!("avx512bitalg: {:?}", is_x86_feature_detected!("avx512bitalg"));
@@ -117,13 +125,14 @@ fn x86_all() {
println!("avx512ifma: {:?}", is_x86_feature_detected!("avx512ifma"));
println!("avx512pf: {:?}", is_x86_feature_detected!("avx512pf"));
println!("avx512vaes: {:?}", is_x86_feature_detected!("avx512vaes"));
- println!("avx512vbmi: {:?}", is_x86_feature_detected!("avx512vbmi"));
println!("avx512vbmi2: {:?}", is_x86_feature_detected!("avx512vbmi2"));
+ println!("avx512vbmi: {:?}", is_x86_feature_detected!("avx512vbmi"));
println!("avx512vl: {:?}", is_x86_feature_detected!("avx512vl"));
println!("avx512vnni: {:?}", is_x86_feature_detected!("avx512vnni"));
println!("avx512vp2intersect: {:?}", is_x86_feature_detected!("avx512vp2intersect"));
println!("avx512vpclmulqdq: {:?}", is_x86_feature_detected!("avx512vpclmulqdq"));
println!("avx512vpopcntdq: {:?}", is_x86_feature_detected!("avx512vpopcntdq"));
+ println!("avx: {:?}", is_x86_feature_detected!("avx"));
println!("bmi1: {:?}", is_x86_feature_detected!("bmi1"));
println!("bmi2: {:?}", is_x86_feature_detected!("bmi2"));
println!("cmpxchg16b: {:?}", is_x86_feature_detected!("cmpxchg16b"));
@@ -138,16 +147,17 @@ fn x86_all() {
println!("rdseed: {:?}", is_x86_feature_detected!("rdseed"));
println!("rtm: {:?}", is_x86_feature_detected!("rtm"));
println!("sha: {:?}", is_x86_feature_detected!("sha"));
- println!("sse: {:?}", is_x86_feature_detected!("sse"));
println!("sse2: {:?}", is_x86_feature_detected!("sse2"));
println!("sse3: {:?}", is_x86_feature_detected!("sse3"));
println!("sse4.1: {:?}", is_x86_feature_detected!("sse4.1"));
println!("sse4.2: {:?}", is_x86_feature_detected!("sse4.2"));
println!("sse4a: {:?}", is_x86_feature_detected!("sse4a"));
+ println!("sse: {:?}", is_x86_feature_detected!("sse"));
println!("ssse3: {:?}", is_x86_feature_detected!("ssse3"));
println!("tbm: {:?}", is_x86_feature_detected!("tbm"));
println!("xsave: {:?}", is_x86_feature_detected!("xsave"));
println!("xsavec: {:?}", is_x86_feature_detected!("xsavec"));
println!("xsaveopt: {:?}", is_x86_feature_detected!("xsaveopt"));
println!("xsaves: {:?}", is_x86_feature_detected!("xsaves"));
+ // tidy-alphabetical-end
}
diff --git a/library/stdarch/CONTRIBUTING.md b/library/stdarch/CONTRIBUTING.md
index ebccd73ea..4212abcd7 100644
--- a/library/stdarch/CONTRIBUTING.md
+++ b/library/stdarch/CONTRIBUTING.md
@@ -9,7 +9,7 @@ $ cd stdarch
$ TARGET="<your-target-arch>" ci/run.sh
```
-Where `<your-target-arch>` is the target triple as used by `rustup`, e.g. `x86_x64-unknown-linux-gnu` (without any preceding `nightly-` or similar).
+Where `<your-target-arch>` is the target triple as used by `rustup`, e.g. `x86_64-unknown-linux-gnu` (without any preceding `nightly-` or similar).
Also remember that this repository requires the nightly channel of Rust!
The above tests do in fact require nightly rust to be the default on your system, to set that use `rustup default nightly` (and `rustup default stable` to revert).
diff --git a/library/stdarch/ci/android-install-sdk.sh b/library/stdarch/ci/android-install-sdk.sh
index 1beeb312a..3383dcb7f 100644
--- a/library/stdarch/ci/android-install-sdk.sh
+++ b/library/stdarch/ci/android-install-sdk.sh
@@ -19,8 +19,8 @@ set -ex
# which apparently magically accepts the licenses.
mkdir sdk
-curl --retry 5 https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip -O
-unzip -d sdk sdk-tools-linux-3859397.zip
+curl --retry 5 https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip -O
+unzip -d sdk sdk-tools-linux-4333796.zip
case "$1" in
arm | armv7)
diff --git a/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile b/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile
index 27bde89c5..6cf9b5061 100644
--- a/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile
+++ b/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile
@@ -1,17 +1,16 @@
-FROM ubuntu:16.04
+FROM ubuntu:22.04
-RUN dpkg --add-architecture i386 && \
- apt-get update && \
+RUN apt-get update && \
apt-get install -y --no-install-recommends \
file \
make \
curl \
ca-certificates \
- python \
+ python-is-python3 \
unzip \
expect \
- openjdk-9-jre \
- libstdc++6:i386 \
+ openjdk-8-jre \
+ libstdc++6-i386-cross \
libpulse0 \
gcc \
libc6-dev
diff --git a/library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
index 49464dacf..2f99999da 100644
--- a/library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
+++ b/library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-FROM ubuntu:21.10
+FROM ubuntu:22.04
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc \
g++ \
diff --git a/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile b/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile
index 995a9e30e..fb1a0cecf 100644
--- a/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile
+++ b/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile
@@ -1,17 +1,16 @@
-FROM ubuntu:16.04
+FROM ubuntu:22.04
-RUN dpkg --add-architecture i386 && \
- apt-get update && \
+RUN apt-get update && \
apt-get install -y --no-install-recommends \
file \
make \
curl \
ca-certificates \
- python \
+ python-is-python3 \
unzip \
expect \
- openjdk-9-jre \
- libstdc++6:i386 \
+ openjdk-8-jre \
+ libstdc++6-i386-cross \
libpulse0 \
gcc \
libc6-dev
diff --git a/library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile b/library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile
index 74181a4cb..b4cd0a68a 100644
--- a/library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile
+++ b/library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile
@@ -1,4 +1,4 @@
-FROM ubuntu:21.10
+FROM ubuntu:22.04
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc \
g++ \
diff --git a/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
index 1618db22f..b9b3c682e 100644
--- a/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
+++ b/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-FROM ubuntu:21.10
+FROM ubuntu:22.04
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc libc6-dev qemu-user ca-certificates \
diff --git a/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile b/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile
index c2830b15f..82119be74 100644
--- a/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile
+++ b/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile
@@ -1,4 +1,4 @@
-FROM ubuntu:16.04
+FROM ubuntu:22.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
@@ -6,7 +6,7 @@ RUN apt-get update && \
curl \
gcc \
libc-dev \
- python \
+ python-is-python3 \
unzip \
file \
make
diff --git a/library/stdarch/ci/dox.sh b/library/stdarch/ci/dox.sh
index e70a32b2d..3e507b456 100755
--- a/library/stdarch/ci/dox.sh
+++ b/library/stdarch/ci/dox.sh
@@ -22,9 +22,6 @@ dox() {
rm -rf "target/doc/${arch}"
mkdir "target/doc/${arch}"
- export RUSTFLAGS="--cfg core_arch_docs"
- export RUSTDOCFLAGS="--cfg core_arch_docs"
-
cargo build --verbose --target "${target}" --manifest-path crates/core_arch/Cargo.toml
cargo build --verbose --target "${target}" --manifest-path crates/std_detect/Cargo.toml
@@ -32,16 +29,14 @@ dox() {
-o "target/doc/${arch}" crates/core_arch/src/lib.rs \
--edition=2018 \
--crate-name core_arch \
- --library-path "target/${target}/debug/deps" \
- --cfg core_arch_docs
+ --library-path "target/${target}/debug/deps"
rustdoc --verbose --target "${target}" \
-o "target/doc/${arch}" crates/std_detect/src/lib.rs \
--edition=2018 \
--crate-name std_detect \
--library-path "target/${target}/debug/deps" \
--extern cfg_if="$(ls target/"${target}"/debug/deps/libcfg_if-*.rlib)" \
- --extern libc="$(ls target/"${target}"/debug/deps/liblibc-*.rlib)" \
- --cfg core_arch_docs
+ --extern libc="$(ls target/"${target}"/debug/deps/liblibc-*.rlib)"
}
dox i686 i686-unknown-linux-gnu
diff --git a/library/stdarch/crates/core_arch/Cargo.toml b/library/stdarch/crates/core_arch/Cargo.toml
index 14b5479d1..e2b332af2 100644
--- a/library/stdarch/crates/core_arch/Cargo.toml
+++ b/library/stdarch/crates/core_arch/Cargo.toml
@@ -13,7 +13,6 @@ readme = "README.md"
keywords = ["core", "simd", "arch", "intrinsics"]
categories = ["hardware-support", "no-std"]
license = "MIT OR Apache-2.0"
-build = "build.rs"
edition = "2018"
[badges]
diff --git a/library/stdarch/crates/core_arch/build.rs b/library/stdarch/crates/core_arch/build.rs
deleted file mode 100644
index 4d65e9ddc..000000000
--- a/library/stdarch/crates/core_arch/build.rs
+++ /dev/null
@@ -1,3 +0,0 @@
-fn main() {
- println!("cargo:rustc-cfg=core_arch_docs");
-}
diff --git a/library/stdarch/crates/core_arch/src/aarch64/crc.rs b/library/stdarch/crates/core_arch/src/aarch64/crc.rs
index 6e8128534..ac3f8d815 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/crc.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/crc.rs
@@ -10,6 +10,8 @@ extern "unadjusted" {
use stdarch_test::assert_instr;
/// CRC32 single round checksum for quad words (64 bits).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)
#[inline]
#[target_feature(enable = "crc")]
#[cfg_attr(test, assert_instr(crc32x))]
@@ -18,6 +20,8 @@ pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 {
}
/// CRC32-C single round checksum for quad words (64 bits).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)
#[inline]
#[target_feature(enable = "crc")]
#[cfg_attr(test, assert_instr(crc32cx))]
diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
index 74ea2963c..ac05a0c23 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
@@ -10,6 +10,8 @@ use super::*;
use stdarch_test::assert_instr;
/// Three-way exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(eor3))]
@@ -23,6 +25,8 @@ pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
}
/// Three-way exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(eor3))]
@@ -36,6 +40,8 @@ pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t
}
/// Three-way exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(eor3))]
@@ -49,6 +55,8 @@ pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t
}
/// Three-way exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(eor3))]
@@ -62,6 +70,8 @@ pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t
}
/// Three-way exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(eor3))]
@@ -75,6 +85,8 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16
}
/// Three-way exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(eor3))]
@@ -88,6 +100,8 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x
}
/// Three-way exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(eor3))]
@@ -101,6 +115,8 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x
}
/// Three-way exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(eor3))]
@@ -114,6 +130,8 @@ pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x
}
/// Absolute difference between the arguments of Floating
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fabd))]
@@ -128,6 +146,8 @@ pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Absolute difference between the arguments of Floating
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fabd))]
@@ -142,6 +162,8 @@ pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point absolute difference
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fabd))]
@@ -151,6 +173,8 @@ pub unsafe fn vabds_f32(a: f32, b: f32) -> f32 {
}
/// Floating-point absolute difference
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fabd))]
@@ -160,6 +184,8 @@ pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 {
}
/// Unsigned Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uabdl))]
@@ -171,6 +197,8 @@ pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
}
/// Unsigned Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uabdl))]
@@ -182,6 +210,8 @@ pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
}
/// Unsigned Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uabdl))]
@@ -193,6 +223,8 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
}
/// Signed Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sabdl))]
@@ -205,6 +237,8 @@ pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
}
/// Signed Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sabdl))]
@@ -217,6 +251,8 @@ pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
}
/// Signed Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sabdl))]
@@ -229,6 +265,8 @@ pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -238,6 +276,8 @@ pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -247,6 +287,8 @@ pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -256,6 +298,8 @@ pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -265,6 +309,8 @@ pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -274,6 +320,8 @@ pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -283,6 +331,8 @@ pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
}
/// Floating-point compare equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmeq))]
@@ -292,6 +342,8 @@ pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmeq))]
@@ -301,6 +353,8 @@ pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Compare bitwise equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -310,6 +364,8 @@ pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 {
}
/// Compare bitwise equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -319,6 +375,8 @@ pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 {
}
/// Floating-point compare equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -328,6 +386,8 @@ pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point compare equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -337,6 +397,8 @@ pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -347,6 +409,8 @@ pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -357,6 +421,8 @@ pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -367,6 +433,8 @@ pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -377,6 +445,8 @@ pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -387,6 +457,8 @@ pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -397,6 +469,8 @@ pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -407,6 +481,8 @@ pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -417,6 +493,8 @@ pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -427,6 +505,8 @@ pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -437,6 +517,8 @@ pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -447,6 +529,8 @@ pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
}
/// Signed compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -457,6 +541,8 @@ pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
}
/// Unsigned compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -467,6 +553,8 @@ pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
}
/// Unsigned compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -477,6 +565,8 @@ pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
}
/// Unsigned compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -487,6 +577,8 @@ pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
}
/// Unsigned compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -497,6 +589,8 @@ pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
}
/// Unsigned compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -507,6 +601,8 @@ pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
}
/// Unsigned compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -517,6 +613,8 @@ pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
}
/// Unsigned compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -527,6 +625,8 @@ pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
}
/// Unsigned compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmeq))]
@@ -537,6 +637,8 @@ pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
}
/// Floating-point compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmeq))]
@@ -547,6 +649,8 @@ pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmeq))]
@@ -557,6 +661,8 @@ pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmeq))]
@@ -567,6 +673,8 @@ pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmeq))]
@@ -577,6 +685,8 @@ pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
}
/// Compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -586,6 +696,8 @@ pub unsafe fn vceqzd_s64(a: i64) -> u64 {
}
/// Compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -595,6 +707,8 @@ pub unsafe fn vceqzd_u64(a: u64) -> u64 {
}
/// Floating-point compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -604,6 +718,8 @@ pub unsafe fn vceqzs_f32(a: f32) -> u32 {
}
/// Floating-point compare bitwise equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -613,6 +729,8 @@ pub unsafe fn vceqzd_f64(a: f64) -> u64 {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmtst))]
@@ -624,6 +742,8 @@ pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmtst))]
@@ -635,6 +755,8 @@ pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmtst))]
@@ -646,6 +768,8 @@ pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmtst))]
@@ -657,6 +781,8 @@ pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
}
/// Unsigned compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmtst))]
@@ -668,6 +794,8 @@ pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Unsigned compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmtst))]
@@ -679,6 +807,8 @@ pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Compare bitwise test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(tst))]
@@ -688,6 +818,8 @@ pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 {
}
/// Compare bitwise test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(tst))]
@@ -697,6 +829,8 @@ pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 {
}
/// Signed saturating accumulate of unsigned value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(suqadd))]
@@ -711,6 +845,8 @@ pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 {
}
/// Signed saturating accumulate of unsigned value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(suqadd))]
@@ -725,6 +861,8 @@ pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 {
}
/// Signed saturating accumulate of unsigned value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(suqadd))]
@@ -734,6 +872,8 @@ pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 {
}
/// Signed saturating accumulate of unsigned value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(suqadd))]
@@ -743,6 +883,8 @@ pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 {
}
/// Floating-point absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fabs))]
@@ -752,6 +894,8 @@ pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t {
}
/// Floating-point absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fabs))]
@@ -761,6 +905,8 @@ pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t {
}
/// Compare signed greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -770,6 +916,8 @@ pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
}
/// Compare signed greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -779,6 +927,8 @@ pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
}
/// Compare unsigned highe
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmhi))]
@@ -788,6 +938,8 @@ pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Compare unsigned highe
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmhi))]
@@ -797,6 +949,8 @@ pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Floating-point compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmgt))]
@@ -806,6 +960,8 @@ pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmgt))]
@@ -815,6 +971,8 @@ pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -824,6 +982,8 @@ pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 {
}
/// Compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -833,6 +993,8 @@ pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 {
}
/// Floating-point compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -842,6 +1004,8 @@ pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -851,6 +1015,8 @@ pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 {
}
/// Compare signed less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -860,6 +1026,8 @@ pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
}
/// Compare signed less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -869,6 +1037,8 @@ pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
}
/// Compare unsigned less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmhi))]
@@ -878,6 +1048,8 @@ pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Compare unsigned less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmhi))]
@@ -887,6 +1059,8 @@ pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Floating-point compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmgt))]
@@ -896,6 +1070,8 @@ pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmgt))]
@@ -905,6 +1081,8 @@ pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -914,6 +1092,8 @@ pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 {
}
/// Compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -923,6 +1103,8 @@ pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 {
}
/// Floating-point compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -932,6 +1114,8 @@ pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -941,6 +1125,8 @@ pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 {
}
/// Compare signed less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -950,6 +1136,8 @@ pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
}
/// Compare signed less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -959,6 +1147,8 @@ pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
}
/// Compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -968,6 +1158,8 @@ pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 {
}
/// Compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -977,6 +1169,8 @@ pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 {
}
/// Floating-point compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -986,6 +1180,8 @@ pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -995,6 +1191,8 @@ pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 {
}
/// Compare unsigned less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmhs))]
@@ -1004,6 +1202,8 @@ pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Compare unsigned less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmhs))]
@@ -1013,6 +1213,8 @@ pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Floating-point compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmge))]
@@ -1022,6 +1224,8 @@ pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmge))]
@@ -1031,6 +1235,8 @@ pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -1040,6 +1246,8 @@ pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 {
}
/// Compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -1049,6 +1257,8 @@ pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 {
}
/// Floating-point compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1058,6 +1268,8 @@ pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1067,6 +1279,8 @@ pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 {
}
/// Compare signed greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1076,6 +1290,8 @@ pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
}
/// Compare signed greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1085,6 +1301,8 @@ pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
}
/// Compare unsigned greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmhs))]
@@ -1094,6 +1312,8 @@ pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Compare unsigned greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmhs))]
@@ -1103,6 +1323,8 @@ pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Floating-point compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmge))]
@@ -1112,6 +1334,8 @@ pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmge))]
@@ -1121,6 +1345,8 @@ pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1131,6 +1357,8 @@ pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1141,6 +1369,8 @@ pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1151,6 +1381,8 @@ pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1161,6 +1393,8 @@ pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1171,6 +1405,8 @@ pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1181,6 +1417,8 @@ pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1191,6 +1429,8 @@ pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmge))]
@@ -1201,6 +1441,8 @@ pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
}
/// Floating-point compare greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmge))]
@@ -1211,6 +1453,8 @@ pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmge))]
@@ -1221,6 +1465,8 @@ pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point compare greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmge))]
@@ -1231,6 +1477,8 @@ pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmge))]
@@ -1241,6 +1489,8 @@ pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
}
/// Compare signed greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(eor))]
@@ -1250,6 +1500,8 @@ pub unsafe fn vcgezd_s64(a: i64) -> u64 {
}
/// Floating-point compare greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1259,6 +1511,8 @@ pub unsafe fn vcgezs_f32(a: f32) -> u32 {
}
/// Floating-point compare greater than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1268,6 +1522,8 @@ pub unsafe fn vcgezd_f64(a: f64) -> u64 {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1278,6 +1534,8 @@ pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1288,6 +1546,8 @@ pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1298,6 +1558,8 @@ pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1308,6 +1570,8 @@ pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1318,6 +1582,8 @@ pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1328,6 +1594,8 @@ pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1338,6 +1606,8 @@ pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1348,6 +1618,8 @@ pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
}
/// Floating-point compare greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmgt))]
@@ -1358,6 +1630,8 @@ pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmgt))]
@@ -1368,6 +1642,8 @@ pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point compare greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmgt))]
@@ -1378,6 +1654,8 @@ pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmgt))]
@@ -1388,6 +1666,8 @@ pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
}
/// Compare signed greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -1397,6 +1677,8 @@ pub unsafe fn vcgtzd_s64(a: i64) -> u64 {
}
/// Floating-point compare greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1406,6 +1688,8 @@ pub unsafe fn vcgtzs_f32(a: f32) -> u32 {
}
/// Floating-point compare greater than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1415,6 +1699,8 @@ pub unsafe fn vcgtzd_f64(a: f64) -> u64 {
}
/// Compare signed less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1425,6 +1711,8 @@ pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t {
}
/// Compare signed less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1435,6 +1723,8 @@ pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
}
/// Compare signed less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1445,6 +1735,8 @@ pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t {
}
/// Compare signed less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1455,6 +1747,8 @@ pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
}
/// Compare signed less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1465,6 +1759,8 @@ pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t {
}
/// Compare signed less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1475,6 +1771,8 @@ pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
}
/// Compare signed less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1485,6 +1783,8 @@ pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t {
}
/// Compare signed less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmgt))]
@@ -1495,6 +1795,8 @@ pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
}
/// Floating-point compare less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmle))]
@@ -1505,6 +1807,8 @@ pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmle))]
@@ -1515,6 +1819,8 @@ pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point compare less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmle))]
@@ -1525,6 +1831,8 @@ pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmle))]
@@ -1535,6 +1843,8 @@ pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
}
/// Compare less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmp))]
@@ -1544,6 +1854,8 @@ pub unsafe fn vclezd_s64(a: i64) -> u64 {
}
/// Floating-point compare less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1553,6 +1865,8 @@ pub unsafe fn vclezs_f32(a: f32) -> u32 {
}
/// Floating-point compare less than or equal to zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1562,6 +1876,8 @@ pub unsafe fn vclezd_f64(a: f64) -> u64 {
}
/// Compare signed less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmlt))]
@@ -1572,6 +1888,8 @@ pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
}
/// Compare signed less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmlt))]
@@ -1582,6 +1900,8 @@ pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
}
/// Compare signed less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmlt))]
@@ -1592,6 +1912,8 @@ pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
}
/// Compare signed less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmlt))]
@@ -1602,6 +1924,8 @@ pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
}
/// Compare signed less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmlt))]
@@ -1612,6 +1936,8 @@ pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
}
/// Compare signed less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmlt))]
@@ -1622,6 +1948,8 @@ pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
}
/// Compare signed less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmlt))]
@@ -1632,6 +1960,8 @@ pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
}
/// Compare signed less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(cmlt))]
@@ -1642,6 +1972,8 @@ pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
}
/// Floating-point compare less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmlt))]
@@ -1652,6 +1984,8 @@ pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmlt))]
@@ -1662,6 +1996,8 @@ pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point compare less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmlt))]
@@ -1672,6 +2008,8 @@ pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point compare less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmlt))]
@@ -1682,6 +2020,8 @@ pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
}
/// Compare less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(asr))]
@@ -1691,6 +2031,8 @@ pub unsafe fn vcltzd_s64(a: i64) -> u64 {
}
/// Floating-point compare less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1700,6 +2042,8 @@ pub unsafe fn vcltzs_f32(a: f32) -> u32 {
}
/// Floating-point compare less than zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcmp))]
@@ -1709,6 +2053,8 @@ pub unsafe fn vcltzd_f64(a: f64) -> u64 {
}
/// Floating-point absolute compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facgt))]
@@ -1723,6 +2069,8 @@ pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point absolute compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facgt))]
@@ -1737,6 +2085,8 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Floating-point absolute compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facgt))]
@@ -1751,6 +2101,8 @@ pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point absolute compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facgt))]
@@ -1765,6 +2117,8 @@ pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 {
}
/// Floating-point absolute compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facge))]
@@ -1779,6 +2133,8 @@ pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point absolute compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facge))]
@@ -1793,6 +2149,8 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Floating-point absolute compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facge))]
@@ -1807,6 +2165,8 @@ pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point absolute compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facge))]
@@ -1821,6 +2181,8 @@ pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 {
}
/// Floating-point absolute compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facgt))]
@@ -1830,6 +2192,8 @@ pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point absolute compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facgt))]
@@ -1839,6 +2203,8 @@ pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Floating-point absolute compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facgt))]
@@ -1848,6 +2214,8 @@ pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point absolute compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facgt))]
@@ -1857,6 +2225,8 @@ pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 {
}
/// Floating-point absolute compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facge))]
@@ -1866,6 +2236,8 @@ pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
}
/// Floating-point absolute compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facge))]
@@ -1875,6 +2247,8 @@ pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
}
/// Floating-point absolute compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facge))]
@@ -1884,6 +2258,8 @@ pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 {
}
/// Floating-point absolute compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(facge))]
@@ -1893,6 +2269,8 @@ pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -1915,6 +2293,8 @@ pub unsafe fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b:
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -1945,6 +2325,8 @@ pub unsafe fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -1963,6 +2345,8 @@ pub unsafe fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -1985,6 +2369,8 @@ pub unsafe fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2001,6 +2387,8 @@ pub unsafe fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2019,6 +2407,8 @@ pub unsafe fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2035,6 +2425,8 @@ pub unsafe fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2057,6 +2449,8 @@ pub unsafe fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b:
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2087,6 +2481,8 @@ pub unsafe fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2105,6 +2501,8 @@ pub unsafe fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2127,6 +2525,8 @@ pub unsafe fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2143,6 +2543,8 @@ pub unsafe fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2161,6 +2563,8 @@ pub unsafe fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2177,6 +2581,8 @@ pub unsafe fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2199,6 +2605,8 @@ pub unsafe fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b:
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2229,6 +2637,8 @@ pub unsafe fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2247,6 +2657,8 @@ pub unsafe fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2269,6 +2681,8 @@ pub unsafe fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2285,6 +2699,8 @@ pub unsafe fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2301,6 +2717,8 @@ pub unsafe fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2319,6 +2737,8 @@ pub unsafe fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2335,6 +2755,8 @@ pub unsafe fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2358,6 +2780,8 @@ pub unsafe fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b:
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2377,6 +2801,8 @@ pub unsafe fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2394,6 +2820,8 @@ pub unsafe fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2417,6 +2845,8 @@ pub unsafe fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2436,6 +2866,8 @@ pub unsafe fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2453,6 +2885,8 @@ pub unsafe fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2476,6 +2910,8 @@ pub unsafe fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2495,6 +2931,8 @@ pub unsafe fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2512,6 +2950,8 @@ pub unsafe fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2543,6 +2983,8 @@ pub unsafe fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2566,6 +3008,8 @@ pub unsafe fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2585,6 +3029,8 @@ pub unsafe fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2616,6 +3062,8 @@ pub unsafe fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2639,6 +3087,8 @@ pub unsafe fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2658,6 +3108,8 @@ pub unsafe fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2689,6 +3141,8 @@ pub unsafe fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
@@ -2712,6 +3166,8 @@ pub unsafe fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
@@ -2729,6 +3185,8 @@ pub unsafe fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
@@ -2746,6 +3204,8 @@ pub unsafe fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
@@ -2763,6 +3223,8 @@ pub unsafe fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t,
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
@@ -2782,6 +3244,8 @@ pub unsafe fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
@@ -2799,6 +3263,8 @@ pub unsafe fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -2808,6 +3274,8 @@ pub unsafe fn vcreate_f64(a: u64) -> float64x1_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf))]
@@ -2817,6 +3285,8 @@ pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf))]
@@ -2826,6 +3296,8 @@ pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf))]
@@ -2835,6 +3307,8 @@ pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf))]
@@ -2844,6 +3318,8 @@ pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
}
/// Floating-point convert to higher precision long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtl))]
@@ -2853,6 +3329,8 @@ pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
}
/// Floating-point convert to higher precision long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtl))]
@@ -2863,6 +3341,8 @@ pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
}
/// Floating-point convert to lower precision narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtn))]
@@ -2872,6 +3352,8 @@ pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
}
/// Floating-point convert to lower precision narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtn))]
@@ -2881,6 +3363,8 @@ pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
}
/// Floating-point convert to lower precision narrow, rounding to odd
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtxn))]
@@ -2895,6 +3379,8 @@ pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
}
/// Floating-point convert to lower precision narrow, rounding to odd
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtxn))]
@@ -2904,6 +3390,8 @@ pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 {
}
/// Floating-point convert to lower precision narrow, rounding to odd
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtxn))]
@@ -2913,6 +3401,8 @@ pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf, N = 2))]
@@ -2929,6 +3419,8 @@ pub unsafe fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf, N = 2))]
@@ -2945,6 +3437,8 @@ pub unsafe fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf, N = 2))]
@@ -2961,6 +3455,8 @@ pub unsafe fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf, N = 2))]
@@ -2977,6 +3473,8 @@ pub unsafe fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
@@ -2993,6 +3491,8 @@ pub unsafe fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
@@ -3009,6 +3509,8 @@ pub unsafe fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
@@ -3025,6 +3527,8 @@ pub unsafe fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
@@ -3041,6 +3545,8 @@ pub unsafe fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
@@ -3057,6 +3563,8 @@ pub unsafe fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
@@ -3073,6 +3581,8 @@ pub unsafe fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
@@ -3089,6 +3599,8 @@ pub unsafe fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
@@ -3105,6 +3617,8 @@ pub unsafe fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
@@ -3121,6 +3635,8 @@ pub unsafe fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
@@ -3137,6 +3653,8 @@ pub unsafe fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
@@ -3153,6 +3671,8 @@ pub unsafe fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
@@ -3169,6 +3689,8 @@ pub unsafe fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf))]
@@ -3178,6 +3700,8 @@ pub unsafe fn vcvts_f32_s32(a: i32) -> f32 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf))]
@@ -3187,6 +3711,8 @@ pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf))]
@@ -3196,6 +3722,8 @@ pub unsafe fn vcvts_f32_u32(a: u32) -> f32 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf))]
@@ -3205,6 +3733,8 @@ pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs))]
@@ -3214,6 +3744,8 @@ pub unsafe fn vcvts_s32_f32(a: f32) -> i32 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs))]
@@ -3223,6 +3755,8 @@ pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu))]
@@ -3232,6 +3766,8 @@ pub unsafe fn vcvts_u32_f32(a: f32) -> u32 {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu))]
@@ -3241,6 +3777,8 @@ pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 {
}
/// Floating-point convert to signed fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs))]
@@ -3255,6 +3793,8 @@ pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
}
/// Floating-point convert to signed fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs))]
@@ -3269,6 +3809,8 @@ pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
}
/// Floating-point convert to unsigned fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu))]
@@ -3283,6 +3825,8 @@ pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point convert to unsigned fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu))]
@@ -3297,6 +3841,8 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtas))]
@@ -3311,6 +3857,8 @@ pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtas))]
@@ -3325,6 +3873,8 @@ pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtas))]
@@ -3339,6 +3889,8 @@ pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtas))]
@@ -3353,6 +3905,8 @@ pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
}
/// Floating-point convert to integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtas))]
@@ -3367,6 +3921,8 @@ pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 {
}
/// Floating-point convert to integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtas))]
@@ -3381,6 +3937,8 @@ pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 {
}
/// Floating-point convert to integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtau))]
@@ -3395,6 +3953,8 @@ pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 {
}
/// Floating-point convert to integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtau))]
@@ -3409,6 +3969,8 @@ pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtns))]
@@ -3423,6 +3985,8 @@ pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtns))]
@@ -3437,6 +4001,8 @@ pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtns))]
@@ -3451,6 +4017,8 @@ pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtns))]
@@ -3465,6 +4033,8 @@ pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtns))]
@@ -3479,6 +4049,8 @@ pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 {
}
/// Floating-point convert to signed integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtns))]
@@ -3493,6 +4065,8 @@ pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 {
}
/// Floating-point convert to signed integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtms))]
@@ -3507,6 +4081,8 @@ pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
}
/// Floating-point convert to signed integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtms))]
@@ -3521,6 +4097,8 @@ pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
}
/// Floating-point convert to signed integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtms))]
@@ -3535,6 +4113,8 @@ pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
}
/// Floating-point convert to signed integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtms))]
@@ -3549,6 +4129,8 @@ pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
}
/// Floating-point convert to signed integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtms))]
@@ -3563,6 +4145,8 @@ pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 {
}
/// Floating-point convert to signed integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtms))]
@@ -3577,6 +4161,8 @@ pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 {
}
/// Floating-point convert to signed integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtps))]
@@ -3591,6 +4177,8 @@ pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
}
/// Floating-point convert to signed integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtps))]
@@ -3605,6 +4193,8 @@ pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
}
/// Floating-point convert to signed integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtps))]
@@ -3619,6 +4209,8 @@ pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
}
/// Floating-point convert to signed integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtps))]
@@ -3633,6 +4225,8 @@ pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
}
/// Floating-point convert to signed integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtps))]
@@ -3647,6 +4241,8 @@ pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 {
}
/// Floating-point convert to signed integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtps))]
@@ -3661,6 +4257,8 @@ pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtau))]
@@ -3675,6 +4273,8 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtau))]
@@ -3689,6 +4289,8 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtau))]
@@ -3703,6 +4305,8 @@ pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtau))]
@@ -3717,6 +4321,8 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtnu))]
@@ -3731,6 +4337,8 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtnu))]
@@ -3745,6 +4353,8 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtnu))]
@@ -3759,6 +4369,8 @@ pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtnu))]
@@ -3773,6 +4385,8 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtnu))]
@@ -3787,6 +4401,8 @@ pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 {
}
/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtnu))]
@@ -3801,6 +4417,8 @@ pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 {
}
/// Floating-point convert to unsigned integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtmu))]
@@ -3815,6 +4433,8 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point convert to unsigned integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtmu))]
@@ -3829,6 +4449,8 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point convert to unsigned integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtmu))]
@@ -3843,6 +4465,8 @@ pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point convert to unsigned integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtmu))]
@@ -3857,6 +4481,8 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
}
/// Floating-point convert to unsigned integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtmu))]
@@ -3871,6 +4497,8 @@ pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 {
}
/// Floating-point convert to unsigned integer, rounding toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtmu))]
@@ -3885,6 +4513,8 @@ pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 {
}
/// Floating-point convert to unsigned integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtpu))]
@@ -3899,6 +4529,8 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
}
/// Floating-point convert to unsigned integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtpu))]
@@ -3913,6 +4545,8 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
}
/// Floating-point convert to unsigned integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtpu))]
@@ -3927,6 +4561,8 @@ pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
}
/// Floating-point convert to unsigned integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtpu))]
@@ -3941,6 +4577,8 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
}
/// Floating-point convert to unsigned integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtpu))]
@@ -3955,6 +4593,8 @@ pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 {
}
/// Floating-point convert to unsigned integer, rounding toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtpu))]
@@ -3969,6 +4609,8 @@ pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(dup, N = 1))]
@@ -3980,6 +4622,8 @@ pub unsafe fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(dup, N = 0))]
@@ -3991,6 +4635,8 @@ pub unsafe fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(dup, N = 1))]
@@ -4002,6 +4648,8 @@ pub unsafe fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(dup, N = 0))]
@@ -4013,6 +4661,8 @@ pub unsafe fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 0))]
@@ -4024,6 +4674,8 @@ pub unsafe fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 0))]
@@ -4035,6 +4687,8 @@ pub unsafe fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 1))]
@@ -4046,6 +4700,8 @@ pub unsafe fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 1))]
@@ -4057,6 +4713,8 @@ pub unsafe fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 4))]
@@ -4068,6 +4726,8 @@ pub unsafe fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 8))]
@@ -4079,6 +4739,8 @@ pub unsafe fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 2))]
@@ -4090,6 +4752,8 @@ pub unsafe fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 4))]
@@ -4101,6 +4765,8 @@ pub unsafe fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 1))]
@@ -4112,6 +4778,8 @@ pub unsafe fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 2))]
@@ -4123,6 +4791,8 @@ pub unsafe fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 0))]
@@ -4134,6 +4804,8 @@ pub unsafe fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 1))]
@@ -4145,6 +4817,8 @@ pub unsafe fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 4))]
@@ -4156,6 +4830,8 @@ pub unsafe fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 8))]
@@ -4167,6 +4843,8 @@ pub unsafe fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 2))]
@@ -4178,6 +4856,8 @@ pub unsafe fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 4))]
@@ -4189,6 +4869,8 @@ pub unsafe fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 1))]
@@ -4200,6 +4882,8 @@ pub unsafe fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 2))]
@@ -4211,6 +4895,8 @@ pub unsafe fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 0))]
@@ -4222,6 +4908,8 @@ pub unsafe fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 1))]
@@ -4233,6 +4921,8 @@ pub unsafe fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 4))]
@@ -4244,6 +4934,8 @@ pub unsafe fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 8))]
@@ -4255,6 +4947,8 @@ pub unsafe fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 2))]
@@ -4266,6 +4960,8 @@ pub unsafe fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 4))]
@@ -4277,6 +4973,8 @@ pub unsafe fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 1))]
@@ -4288,6 +4986,8 @@ pub unsafe fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 2))]
@@ -4299,6 +4999,8 @@ pub unsafe fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 0))]
@@ -4310,6 +5012,8 @@ pub unsafe fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, N = 1))]
@@ -4321,6 +5025,8 @@ pub unsafe fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ext, N = 1))]
@@ -4336,6 +5042,8 @@ pub unsafe fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ext, N = 1))]
@@ -4351,6 +5059,8 @@ pub unsafe fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64
}
/// Floating-point multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul))]
@@ -4360,6 +5070,8 @@ pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6
}
/// Floating-point multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul))]
@@ -4369,6 +5081,8 @@ pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float
}
/// Signed multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2))]
@@ -4380,6 +5094,8 @@ pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8
}
/// Signed multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2))]
@@ -4391,6 +5107,8 @@ pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x
}
/// Signed multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2))]
@@ -4402,6 +5120,8 @@ pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x
}
/// Unsigned multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2))]
@@ -4413,6 +5133,8 @@ pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint
}
/// Unsigned multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2))]
@@ -4424,6 +5146,8 @@ pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin
}
/// Unsigned multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2))]
@@ -4435,6 +5159,8 @@ pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2))]
@@ -4444,6 +5170,8 @@ pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2))]
@@ -4453,6 +5181,8 @@ pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2))]
@@ -4462,6 +5192,8 @@ pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2))]
@@ -4471,6 +5203,8 @@ pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
@@ -4482,6 +5216,8 @@ pub unsafe fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
@@ -4493,6 +5229,8 @@ pub unsafe fn vmlal_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t,
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
@@ -4504,6 +5242,8 @@ pub unsafe fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
@@ -4515,6 +5255,8 @@ pub unsafe fn vmlal_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t,
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
@@ -4526,6 +5268,8 @@ pub unsafe fn vmlal_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t,
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
@@ -4537,6 +5281,8 @@ pub unsafe fn vmlal_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
@@ -4548,6 +5294,8 @@ pub unsafe fn vmlal_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t,
}
/// Multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
@@ -4559,6 +5307,8 @@ pub unsafe fn vmlal_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t
}
/// Floating-point multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul))]
@@ -4568,6 +5318,8 @@ pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6
}
/// Floating-point multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul))]
@@ -4577,6 +5329,8 @@ pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float
}
/// Signed multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2))]
@@ -4588,6 +5342,8 @@ pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8
}
/// Signed multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2))]
@@ -4599,6 +5355,8 @@ pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x
}
/// Signed multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2))]
@@ -4610,6 +5368,8 @@ pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x
}
/// Unsigned multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2))]
@@ -4621,6 +5381,8 @@ pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint
}
/// Unsigned multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2))]
@@ -4632,6 +5394,8 @@ pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin
}
/// Unsigned multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2))]
@@ -4643,6 +5407,8 @@ pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2))]
@@ -4652,6 +5418,8 @@ pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2))]
@@ -4661,6 +5429,8 @@ pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2))]
@@ -4670,6 +5440,8 @@ pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2))]
@@ -4679,6 +5451,8 @@ pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
@@ -4690,6 +5464,8 @@ pub unsafe fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
@@ -4701,6 +5477,8 @@ pub unsafe fn vmlsl_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t,
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
@@ -4712,6 +5490,8 @@ pub unsafe fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
@@ -4723,6 +5503,8 @@ pub unsafe fn vmlsl_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t,
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
@@ -4734,6 +5516,8 @@ pub unsafe fn vmlsl_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t,
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
@@ -4745,6 +5529,8 @@ pub unsafe fn vmlsl_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
@@ -4756,6 +5542,8 @@ pub unsafe fn vmlsl_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t,
}
/// Multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
@@ -4767,6 +5555,8 @@ pub unsafe fn vmlsl_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t
}
/// Extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(xtn2))]
@@ -4777,6 +5567,8 @@ pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
}
/// Extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(xtn2))]
@@ -4787,6 +5579,8 @@ pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
}
/// Extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(xtn2))]
@@ -4797,6 +5591,8 @@ pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
}
/// Extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(xtn2))]
@@ -4807,6 +5603,8 @@ pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
}
/// Extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(xtn2))]
@@ -4817,6 +5615,8 @@ pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
}
/// Extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(xtn2))]
@@ -4827,6 +5627,8 @@ pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(neg))]
@@ -4836,6 +5638,8 @@ pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(neg))]
@@ -4845,6 +5649,8 @@ pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(neg))]
@@ -4854,6 +5660,8 @@ pub unsafe fn vnegd_s64(a: i64) -> i64 {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fneg))]
@@ -4863,6 +5671,8 @@ pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fneg))]
@@ -4872,6 +5682,8 @@ pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t {
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqneg))]
@@ -4886,6 +5698,8 @@ pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t {
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqneg))]
@@ -4900,6 +5714,8 @@ pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqneg))]
@@ -4909,6 +5725,8 @@ pub unsafe fn vqnegb_s8(a: i8) -> i8 {
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqneg))]
@@ -4918,6 +5736,8 @@ pub unsafe fn vqnegh_s16(a: i16) -> i16 {
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqneg))]
@@ -4927,6 +5747,8 @@ pub unsafe fn vqnegs_s32(a: i32) -> i32 {
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqneg))]
@@ -4936,6 +5758,8 @@ pub unsafe fn vqnegd_s64(a: i64) -> i64 {
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqsub))]
@@ -4947,6 +5771,8 @@ pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 {
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqsub))]
@@ -4958,6 +5784,8 @@ pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 {
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqsub))]
@@ -4969,6 +5797,8 @@ pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 {
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqsub))]
@@ -4980,6 +5810,8 @@ pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 {
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqsub))]
@@ -4994,6 +5826,8 @@ pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 {
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqsub))]
@@ -5008,6 +5842,8 @@ pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 {
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqsub))]
@@ -5022,6 +5858,8 @@ pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 {
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqsub))]
@@ -5036,6 +5874,8 @@ pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 {
}
/// Reverse bit order
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rbit))]
@@ -5050,6 +5890,8 @@ pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t {
}
/// Reverse bit order
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rbit))]
@@ -5064,6 +5906,8 @@ pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
}
/// Reverse bit order
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rbit))]
@@ -5073,6 +5917,8 @@ pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
}
/// Reverse bit order
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rbit))]
@@ -5082,6 +5928,8 @@ pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
}
/// Reverse bit order
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rbit))]
@@ -5091,6 +5939,8 @@ pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
}
/// Reverse bit order
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rbit))]
@@ -5100,6 +5950,8 @@ pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
}
/// Floating-point round to integral exact, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintx))]
@@ -5114,6 +5966,8 @@ pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to integral exact, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintx))]
@@ -5128,6 +5982,8 @@ pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to integral exact, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintx))]
@@ -5142,6 +5998,8 @@ pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t {
}
/// Floating-point round to integral exact, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintx))]
@@ -5156,6 +6014,8 @@ pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
}
/// Floating-point round to integral, to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frinta))]
@@ -5170,6 +6030,8 @@ pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to integral, to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frinta))]
@@ -5184,6 +6046,8 @@ pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to integral, to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frinta))]
@@ -5198,6 +6062,8 @@ pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t {
}
/// Floating-point round to integral, to nearest with ties to away
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frinta))]
@@ -5212,6 +6078,8 @@ pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
}
/// Floating-point round to integral, to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintn))]
@@ -5226,6 +6094,8 @@ pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t {
}
/// Floating-point round to integral, to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintn))]
@@ -5240,6 +6110,8 @@ pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
}
/// Floating-point round to integral, to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintn))]
@@ -5254,6 +6126,8 @@ pub unsafe fn vrndns_f32(a: f32) -> f32 {
}
/// Floating-point round to integral, toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintm))]
@@ -5268,6 +6142,8 @@ pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to integral, toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintm))]
@@ -5282,6 +6158,8 @@ pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to integral, toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintm))]
@@ -5296,6 +6174,8 @@ pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t {
}
/// Floating-point round to integral, toward minus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintm))]
@@ -5310,6 +6190,8 @@ pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
}
/// Floating-point round to integral, toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintp))]
@@ -5324,6 +6206,8 @@ pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to integral, toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintp))]
@@ -5338,6 +6222,8 @@ pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to integral, toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintp))]
@@ -5352,6 +6238,8 @@ pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t {
}
/// Floating-point round to integral, toward plus infinity
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintp))]
@@ -5366,6 +6254,8 @@ pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
}
/// Floating-point round to integral, toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintz))]
@@ -5380,6 +6270,8 @@ pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to integral, toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintz))]
@@ -5394,6 +6286,8 @@ pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to integral, toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintz))]
@@ -5408,6 +6302,8 @@ pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t {
}
/// Floating-point round to integral, toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frintz))]
@@ -5422,6 +6318,8 @@ pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t {
}
/// Floating-point round to integral, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frinti))]
@@ -5436,6 +6334,8 @@ pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to integral, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frinti))]
@@ -5450,6 +6350,8 @@ pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to integral, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frinti))]
@@ -5464,6 +6366,8 @@ pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t {
}
/// Floating-point round to integral, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frinti))]
@@ -5478,6 +6382,8 @@ pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqadd))]
@@ -5489,6 +6395,8 @@ pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 {
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqadd))]
@@ -5500,6 +6408,8 @@ pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 {
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqadd))]
@@ -5511,6 +6421,8 @@ pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 {
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqadd))]
@@ -5522,6 +6434,8 @@ pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 {
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqadd))]
@@ -5536,6 +6450,8 @@ pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 {
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqadd))]
@@ -5550,6 +6466,8 @@ pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 {
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqadd))]
@@ -5564,6 +6482,8 @@ pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 {
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqadd))]
@@ -5578,6 +6498,8 @@ pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld1))]
@@ -5592,6 +6514,8 @@ pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld1))]
@@ -5606,6 +6530,8 @@ pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld1))]
@@ -5620,6 +6546,8 @@ pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld1))]
@@ -5634,6 +6562,8 @@ pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld1))]
@@ -5648,6 +6578,8 @@ pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld1))]
@@ -5662,6 +6594,8 @@ pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
@@ -5676,6 +6610,8 @@ pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
@@ -5685,6 +6621,8 @@ pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld2))]
@@ -5694,6 +6632,8 @@ pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -5708,6 +6648,8 @@ pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
@@ -5722,6 +6664,8 @@ pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
@@ -5736,6 +6680,8 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
@@ -5745,6 +6691,8 @@ pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld2r))]
@@ -5754,6 +6702,8 @@ pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
@@ -5768,6 +6718,8 @@ pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
@@ -5782,6 +6734,8 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5798,6 +6752,8 @@ pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> in
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5814,6 +6770,8 @@ pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> i
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5830,6 +6788,8 @@ pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5841,6 +6801,8 @@ pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5852,6 +6814,8 @@ pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5863,6 +6827,8 @@ pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> u
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5874,6 +6840,8 @@ pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5885,6 +6853,8 @@ pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5896,6 +6866,8 @@ pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> p
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5912,6 +6884,8 @@ pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
@@ -5928,6 +6902,8 @@ pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
@@ -5942,6 +6918,8 @@ pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
@@ -5951,6 +6929,8 @@ pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld3))]
@@ -5960,6 +6940,8 @@ pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -5974,6 +6956,8 @@ pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
@@ -5988,6 +6972,8 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
@@ -6002,6 +6988,8 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
@@ -6011,6 +6999,8 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld3r))]
@@ -6020,6 +7010,8 @@ pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
@@ -6034,6 +7026,8 @@ pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
@@ -6048,6 +7042,8 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6064,6 +7060,8 @@ pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> in
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6080,6 +7078,8 @@ pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> i
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6096,6 +7096,8 @@ pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6107,6 +7109,8 @@ pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6118,6 +7122,8 @@ pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6129,6 +7135,8 @@ pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> p
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6140,6 +7148,8 @@ pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> u
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6151,6 +7161,8 @@ pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6162,6 +7174,8 @@ pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6178,6 +7192,8 @@ pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
@@ -6194,6 +7210,8 @@ pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
@@ -6208,6 +7226,8 @@ pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
@@ -6217,6 +7237,8 @@ pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld4))]
@@ -6226,6 +7248,8 @@ pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -6240,6 +7264,8 @@ pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
@@ -6254,6 +7280,8 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
@@ -6268,6 +7296,8 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
@@ -6277,6 +7307,8 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld4r))]
@@ -6286,6 +7318,8 @@ pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
@@ -6300,6 +7334,8 @@ pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
@@ -6314,6 +7350,8 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6330,6 +7368,8 @@ pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> in
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6346,6 +7386,8 @@ pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> i
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6362,6 +7404,8 @@ pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6373,6 +7417,8 @@ pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6384,6 +7430,8 @@ pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6395,6 +7443,8 @@ pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> p
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6406,6 +7456,8 @@ pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> u
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6417,6 +7469,8 @@ pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6428,6 +7482,8 @@ pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6444,6 +7500,8 @@ pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
@@ -6460,6 +7518,8 @@ pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, LANE = 0))]
@@ -6471,6 +7531,8 @@ pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, LANE = 0))]
@@ -6482,6 +7544,8 @@ pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
@@ -6496,6 +7560,8 @@ pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
@@ -6510,6 +7576,8 @@ pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
@@ -6524,6 +7592,8 @@ pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
@@ -6538,6 +7608,8 @@ pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
@@ -6552,6 +7624,8 @@ pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
@@ -6566,6 +7640,8 @@ pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
@@ -6580,6 +7656,8 @@ pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
@@ -6589,6 +7667,8 @@ pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st2))]
@@ -6598,6 +7678,8 @@ pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
@@ -6612,6 +7694,8 @@ pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
@@ -6626,6 +7710,8 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6642,6 +7728,8 @@ pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6658,6 +7746,8 @@ pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6674,6 +7764,8 @@ pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6685,6 +7777,8 @@ pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6696,6 +7790,8 @@ pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6707,6 +7803,8 @@ pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6718,6 +7816,8 @@ pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6729,6 +7829,8 @@ pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6740,6 +7842,8 @@ pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6756,6 +7860,8 @@ pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
@@ -6772,6 +7878,8 @@ pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
@@ -6786,6 +7894,8 @@ pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
@@ -6795,6 +7905,8 @@ pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st3))]
@@ -6804,6 +7916,8 @@ pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -6818,6 +7932,8 @@ pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
@@ -6832,6 +7948,8 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6848,6 +7966,8 @@ pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6864,6 +7984,8 @@ pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6880,6 +8002,8 @@ pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6891,6 +8015,8 @@ pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6902,6 +8028,8 @@ pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6913,6 +8041,8 @@ pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6924,6 +8054,8 @@ pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6935,6 +8067,8 @@ pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6946,6 +8080,8 @@ pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6962,6 +8098,8 @@ pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
@@ -6978,6 +8116,8 @@ pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
@@ -6992,6 +8132,8 @@ pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
@@ -7001,6 +8143,8 @@ pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st4))]
@@ -7010,6 +8154,8 @@ pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -7024,6 +8170,8 @@ pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
@@ -7038,6 +8186,8 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7054,6 +8204,8 @@ pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7070,6 +8222,8 @@ pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7086,6 +8240,8 @@ pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7097,6 +8253,8 @@ pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7108,6 +8266,8 @@ pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7119,6 +8279,8 @@ pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7130,6 +8292,8 @@ pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7141,6 +8305,8 @@ pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7152,6 +8318,8 @@ pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7168,6 +8336,8 @@ pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
@@ -7184,6 +8354,8 @@ pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul))]
@@ -7193,6 +8365,8 @@ pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul))]
@@ -7202,6 +8376,8 @@ pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul))]
@@ -7211,6 +8387,8 @@ pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul))]
@@ -7220,6 +8398,8 @@ pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
@@ -7231,6 +8411,8 @@ pub unsafe fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) ->
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
@@ -7242,6 +8424,8 @@ pub unsafe fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) ->
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
@@ -7253,6 +8437,8 @@ pub unsafe fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) ->
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
@@ -7264,6 +8450,8 @@ pub unsafe fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
@@ -7276,6 +8464,8 @@ pub unsafe fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
@@ -7288,6 +8478,8 @@ pub unsafe fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
@@ -7300,6 +8492,8 @@ pub unsafe fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
@@ -7312,6 +8506,8 @@ pub unsafe fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
}
/// Signed multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2))]
@@ -7323,6 +8519,8 @@ pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
}
/// Signed multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2))]
@@ -7334,6 +8532,8 @@ pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
}
/// Signed multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2))]
@@ -7345,6 +8545,8 @@ pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
}
/// Unsigned multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2))]
@@ -7356,6 +8558,8 @@ pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
}
/// Unsigned multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2))]
@@ -7367,6 +8571,8 @@ pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
}
/// Unsigned multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2))]
@@ -7378,6 +8584,8 @@ pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
}
/// Polynomial multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(pmull))]
@@ -7392,6 +8600,8 @@ pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 {
}
/// Polynomial multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(pmull))]
@@ -7403,6 +8613,8 @@ pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
}
/// Polynomial multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(pmull))]
@@ -7412,6 +8624,8 @@ pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2))]
@@ -7421,6 +8635,8 @@ pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2))]
@@ -7430,6 +8646,8 @@ pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2))]
@@ -7439,6 +8657,8 @@ pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2))]
@@ -7448,6 +8668,8 @@ pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
@@ -7459,6 +8681,8 @@ pub unsafe fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
@@ -7470,6 +8694,8 @@ pub unsafe fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t)
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
@@ -7481,6 +8707,8 @@ pub unsafe fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
@@ -7492,6 +8720,8 @@ pub unsafe fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t)
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
@@ -7503,6 +8733,8 @@ pub unsafe fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t)
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
@@ -7514,6 +8746,8 @@ pub unsafe fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
@@ -7525,6 +8759,8 @@ pub unsafe fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t)
}
/// Multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
@@ -7536,6 +8772,8 @@ pub unsafe fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx))]
@@ -7550,6 +8788,8 @@ pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx))]
@@ -7564,6 +8804,8 @@ pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx))]
@@ -7578,6 +8820,8 @@ pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx))]
@@ -7592,6 +8836,8 @@ pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7603,6 +8849,8 @@ pub unsafe fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) ->
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7614,6 +8862,8 @@ pub unsafe fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7625,6 +8875,8 @@ pub unsafe fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) ->
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7636,6 +8888,8 @@ pub unsafe fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7647,6 +8901,8 @@ pub unsafe fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7658,6 +8914,8 @@ pub unsafe fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t)
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7669,6 +8927,8 @@ pub unsafe fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7680,6 +8940,8 @@ pub unsafe fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t)
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx))]
@@ -7694,6 +8956,8 @@ pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx))]
@@ -7708,6 +8972,8 @@ pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7719,6 +8985,8 @@ pub unsafe fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7730,6 +8998,8 @@ pub unsafe fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7741,6 +9011,8 @@ pub unsafe fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
}
/// Floating-point multiply extended
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
@@ -7752,6 +9024,8 @@ pub unsafe fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
}
/// Floating-point fused Multiply-Add to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmadd))]
@@ -7766,6 +9040,8 @@ pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6
}
/// Floating-point fused Multiply-Add to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla))]
@@ -7780,6 +9056,8 @@ pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float
}
/// Floating-point fused Multiply-Add to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmadd))]
@@ -7789,6 +9067,8 @@ pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t
}
/// Floating-point fused Multiply-Add to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla))]
@@ -7798,6 +9078,8 @@ pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7809,6 +9091,8 @@ pub unsafe fn vfma_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7820,6 +9104,8 @@ pub unsafe fn vfma_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7831,6 +9117,8 @@ pub unsafe fn vfmaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c:
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7842,6 +9130,8 @@ pub unsafe fn vfmaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
@@ -7853,6 +9143,8 @@ pub unsafe fn vfma_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c:
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7864,6 +9156,8 @@ pub unsafe fn vfma_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c:
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7875,6 +9169,8 @@ pub unsafe fn vfmaq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c:
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7886,6 +9182,8 @@ pub unsafe fn vfmaq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7903,6 +9201,8 @@ pub unsafe fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) ->
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7920,6 +9220,8 @@ pub unsafe fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
@@ -7937,6 +9239,8 @@ pub unsafe fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) ->
}
/// Floating-point fused multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
@@ -7954,6 +9258,8 @@ pub unsafe fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -
}
/// Floating-point fused multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmsub))]
@@ -7964,6 +9270,8 @@ pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6
}
/// Floating-point fused multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls))]
@@ -7974,6 +9282,8 @@ pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float
}
/// Floating-point fused Multiply-subtract to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmsub))]
@@ -7983,6 +9293,8 @@ pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t
}
/// Floating-point fused Multiply-subtract to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls))]
@@ -7992,6 +9304,8 @@ pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8003,6 +9317,8 @@ pub unsafe fn vfms_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8014,6 +9330,8 @@ pub unsafe fn vfms_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8025,6 +9343,8 @@ pub unsafe fn vfmsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c:
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8036,6 +9356,8 @@ pub unsafe fn vfmsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
@@ -8047,6 +9369,8 @@ pub unsafe fn vfms_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c:
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8058,6 +9382,8 @@ pub unsafe fn vfms_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c:
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8069,6 +9395,8 @@ pub unsafe fn vfmsq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c:
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8080,6 +9408,8 @@ pub unsafe fn vfmsq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8090,6 +9420,8 @@ pub unsafe fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) ->
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8100,6 +9432,8 @@ pub unsafe fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
@@ -8110,6 +9444,8 @@ pub unsafe fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) ->
}
/// Floating-point fused multiply-subtract to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
@@ -8120,6 +9456,8 @@ pub unsafe fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -
}
/// Divide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fdiv))]
@@ -8129,6 +9467,8 @@ pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Divide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fdiv))]
@@ -8138,6 +9478,8 @@ pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Divide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fdiv))]
@@ -8147,6 +9489,8 @@ pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Divide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fdiv))]
@@ -8156,6 +9500,8 @@ pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fsub))]
@@ -8165,6 +9511,8 @@ pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fsub))]
@@ -8174,6 +9522,8 @@ pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -8183,6 +9533,8 @@ pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -8192,6 +9544,8 @@ pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 {
}
/// Add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -8201,6 +9555,8 @@ pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 {
}
/// Add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -8210,6 +9566,8 @@ pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 {
}
/// Floating-point add across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(faddp))]
@@ -8224,6 +9582,8 @@ pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 {
}
/// Floating-point add across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(faddp))]
@@ -8238,6 +9598,8 @@ pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 {
}
/// Floating-point add across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(faddp))]
@@ -8252,6 +9614,8 @@ pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 {
}
/// Signed Add Long across Vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(saddlv))]
@@ -8266,6 +9630,8 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 {
}
/// Signed Add Long across Vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(saddlv))]
@@ -8280,6 +9646,8 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 {
}
/// Signed Add Long across Vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(saddlp))]
@@ -8294,6 +9662,8 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 {
}
/// Signed Add Long across Vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(saddlv))]
@@ -8308,6 +9678,8 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 {
}
/// Unsigned Add Long across Vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uaddlv))]
@@ -8322,6 +9694,8 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 {
}
/// Unsigned Add Long across Vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uaddlv))]
@@ -8336,6 +9710,8 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 {
}
/// Unsigned Add Long across Vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uaddlp))]
@@ -8350,6 +9726,8 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 {
}
/// Unsigned Add Long across Vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uaddlv))]
@@ -8364,6 +9742,8 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 {
}
/// Signed Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ssubw))]
@@ -8374,6 +9754,8 @@ pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
}
/// Signed Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ssubw))]
@@ -8384,6 +9766,8 @@ pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
}
/// Signed Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ssubw))]
@@ -8394,6 +9778,8 @@ pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
}
/// Unsigned Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usubw))]
@@ -8404,6 +9790,8 @@ pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
}
/// Unsigned Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usubw))]
@@ -8414,6 +9802,8 @@ pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
}
/// Unsigned Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usubw))]
@@ -8424,6 +9814,8 @@ pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
}
/// Signed Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ssubl))]
@@ -8437,6 +9829,8 @@ pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
}
/// Signed Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ssubl))]
@@ -8450,6 +9844,8 @@ pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
}
/// Signed Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ssubl))]
@@ -8463,6 +9859,8 @@ pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
}
/// Unsigned Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usubl))]
@@ -8476,6 +9874,8 @@ pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
}
/// Unsigned Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usubl))]
@@ -8489,6 +9889,8 @@ pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
}
/// Unsigned Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usubl))]
@@ -8502,6 +9904,8 @@ pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
}
/// Bit clear and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(bcax))]
@@ -8515,6 +9919,8 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
}
/// Bit clear and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(bcax))]
@@ -8528,6 +9934,8 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t
}
/// Bit clear and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(bcax))]
@@ -8541,6 +9949,8 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t
}
/// Bit clear and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(bcax))]
@@ -8554,6 +9964,8 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t
}
/// Bit clear and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(bcax))]
@@ -8567,6 +9979,8 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16
}
/// Bit clear and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(bcax))]
@@ -8580,6 +9994,8 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x
}
/// Bit clear and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(bcax))]
@@ -8593,6 +10009,8 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x
}
/// Bit clear and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(bcax))]
@@ -8606,6 +10024,8 @@ pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x
}
/// Floating-point complex add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcadd))]
@@ -8619,6 +10039,8 @@ pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Floating-point complex add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcadd))]
@@ -8632,6 +10054,8 @@ pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Floating-point complex add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcadd))]
@@ -8645,6 +10069,8 @@ pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point complex add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcadd))]
@@ -8658,6 +10084,8 @@ pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Floating-point complex add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcadd))]
@@ -8671,6 +10099,8 @@ pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Floating-point complex add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcadd))]
@@ -8684,6 +10114,8 @@ pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8697,6 +10129,8 @@ pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8710,6 +10144,8 @@ pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8723,6 +10159,8 @@ pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8736,6 +10174,8 @@ pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) ->
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8749,6 +10189,8 @@ pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8762,6 +10204,8 @@ pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8775,6 +10219,8 @@ pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8788,6 +10234,8 @@ pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t)
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8801,6 +10249,8 @@ pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t)
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8814,6 +10264,8 @@ pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8827,6 +10279,8 @@ pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t)
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla))]
@@ -8840,6 +10294,8 @@ pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t)
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8851,6 +10307,8 @@ pub unsafe fn vcmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8862,6 +10320,8 @@ pub unsafe fn vcmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8873,6 +10333,8 @@ pub unsafe fn vcmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8884,6 +10346,8 @@ pub unsafe fn vcmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t,
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8895,6 +10359,8 @@ pub unsafe fn vcmla_rot90_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8906,6 +10372,8 @@ pub unsafe fn vcmla_rot90_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8917,6 +10385,8 @@ pub unsafe fn vcmlaq_rot90_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8928,6 +10398,8 @@ pub unsafe fn vcmlaq_rot90_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8939,6 +10411,8 @@ pub unsafe fn vcmla_rot180_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8950,6 +10424,8 @@ pub unsafe fn vcmla_rot180_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8961,6 +10437,8 @@ pub unsafe fn vcmlaq_rot180_lane_f32<const LANE: i32>(a: float32x4_t, b: float32
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8972,6 +10450,8 @@ pub unsafe fn vcmlaq_rot180_laneq_f32<const LANE: i32>(a: float32x4_t, b: float3
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8983,6 +10463,8 @@ pub unsafe fn vcmla_rot270_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -8994,6 +10476,8 @@ pub unsafe fn vcmla_rot270_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -9005,6 +10489,8 @@ pub unsafe fn vcmlaq_rot270_lane_f32<const LANE: i32>(a: float32x4_t, b: float32
}
/// Floating-point complex multiply accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)
#[inline]
#[target_feature(enable = "neon,fcma")]
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
@@ -9016,6 +10502,8 @@ pub unsafe fn vcmlaq_rot270_laneq_f32<const LANE: i32>(a: float32x4_t, b: float3
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(sdot))]
@@ -9029,6 +10517,8 @@ pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t {
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(sdot))]
@@ -9042,6 +10532,8 @@ pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(udot))]
@@ -9055,6 +10547,8 @@ pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(udot))]
@@ -9068,6 +10562,8 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
@@ -9079,6 +10575,8 @@ pub unsafe fn vdot_lane_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
@@ -9090,6 +10588,8 @@ pub unsafe fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
@@ -9101,6 +10601,8 @@ pub unsafe fn vdotq_lane_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
@@ -9112,6 +10614,8 @@ pub unsafe fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: in
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(udot, LANE = 0))]
@@ -9123,6 +10627,8 @@ pub unsafe fn vdot_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uin
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(udot, LANE = 0))]
@@ -9134,6 +10640,8 @@ pub unsafe fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: ui
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(udot, LANE = 0))]
@@ -9145,6 +10653,8 @@ pub unsafe fn vdotq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: u
}
/// Dot product arithmetic
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)
#[inline]
#[target_feature(enable = "neon,dotprod")]
#[cfg_attr(test, assert_instr(udot, LANE = 0))]
@@ -9156,6 +10666,8 @@ pub unsafe fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c:
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmax))]
@@ -9170,6 +10682,8 @@ pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmax))]
@@ -9184,6 +10698,8 @@ pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point Maximum Number (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnm))]
@@ -9198,6 +10714,8 @@ pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Floating-point Maximum Number (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnm))]
@@ -9212,6 +10730,8 @@ pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point maximum number across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnmp))]
@@ -9226,6 +10746,8 @@ pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 {
}
/// Floating-point maximum number across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnmp))]
@@ -9240,6 +10762,8 @@ pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
}
/// Floating-point maximum number across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnmv))]
@@ -9254,6 +10778,8 @@ pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
}
/// Floating-point Maximum Number Pairwise (vector).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnmp))]
@@ -9268,6 +10794,8 @@ pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Floating-point Maximum Number Pairwise (vector).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnmp))]
@@ -9282,6 +10810,8 @@ pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point Maximum Number Pairwise (vector).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnmp))]
@@ -9296,6 +10826,8 @@ pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Floating-point maximum number pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnmp))]
@@ -9310,6 +10842,8 @@ pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 {
}
/// Floating-point maximum number pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxnmp))]
@@ -9324,6 +10858,8 @@ pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
}
/// Floating-point maximum pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxp))]
@@ -9338,6 +10874,8 @@ pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 {
}
/// Floating-point maximum pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmaxp))]
@@ -9352,6 +10890,8 @@ pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 {
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmin))]
@@ -9366,6 +10906,8 @@ pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fmin))]
@@ -9380,6 +10922,8 @@ pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point Minimum Number (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnm))]
@@ -9394,6 +10938,8 @@ pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Floating-point Minimum Number (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnm))]
@@ -9408,6 +10954,8 @@ pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point minimum number across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnmp))]
@@ -9422,6 +10970,8 @@ pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 {
}
/// Floating-point minimum number across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnmp))]
@@ -9436,6 +10986,8 @@ pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 {
}
/// Floating-point minimum number across vector
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnmv))]
@@ -9450,6 +11002,8 @@ pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 {
}
/// Vector move
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sxtl2))]
@@ -9460,6 +11014,8 @@ pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
}
/// Vector move
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sxtl2))]
@@ -9470,6 +11026,8 @@ pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
}
/// Vector move
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sxtl2))]
@@ -9480,6 +11038,8 @@ pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
}
/// Vector move
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uxtl2))]
@@ -9490,6 +11050,8 @@ pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
}
/// Vector move
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uxtl2))]
@@ -9500,6 +11062,8 @@ pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
}
/// Vector move
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uxtl2))]
@@ -9510,6 +11074,8 @@ pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
}
/// Floating-point add pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(faddp))]
@@ -9524,6 +11090,8 @@ pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Floating-point add pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(faddp))]
@@ -9538,6 +11106,8 @@ pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point add pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -9549,6 +11119,8 @@ pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 {
}
/// Floating-point add pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -9560,6 +11132,8 @@ pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 {
}
/// Floating-point Minimum Number Pairwise (vector).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnmp))]
@@ -9574,6 +11148,8 @@ pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Floating-point Minimum Number Pairwise (vector).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnmp))]
@@ -9588,6 +11164,8 @@ pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point Minimum Number Pairwise (vector).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnmp))]
@@ -9602,6 +11180,8 @@ pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Floating-point minimum number pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnmp))]
@@ -9616,6 +11196,8 @@ pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 {
}
/// Floating-point minimum number pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminnmp))]
@@ -9630,6 +11212,8 @@ pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 {
}
/// Floating-point minimum pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminp))]
@@ -9644,6 +11228,8 @@ pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 {
}
/// Floating-point minimum pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fminp))]
@@ -9658,6 +11244,8 @@ pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull))]
@@ -9669,6 +11257,8 @@ pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull))]
@@ -9683,6 +11273,8 @@ pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull2))]
@@ -9694,6 +11286,8 @@ pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull2))]
@@ -9705,6 +11299,8 @@ pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull2))]
@@ -9716,6 +11312,8 @@ pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull2))]
@@ -9727,6 +11325,8 @@ pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
}
/// Vector saturating doubling long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
@@ -9739,6 +11339,8 @@ pub unsafe fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int
}
/// Vector saturating doubling long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
@@ -9751,6 +11353,8 @@ pub unsafe fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
@@ -9763,6 +11367,8 @@ pub unsafe fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
@@ -9775,6 +11381,8 @@ pub unsafe fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
@@ -9787,6 +11395,8 @@ pub unsafe fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
@@ -9799,6 +11409,8 @@ pub unsafe fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
@@ -9812,6 +11424,8 @@ pub unsafe fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) ->
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
@@ -9825,6 +11439,8 @@ pub unsafe fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) ->
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
@@ -9838,6 +11454,8 @@ pub unsafe fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
@@ -9851,6 +11469,8 @@ pub unsafe fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal2))]
@@ -9860,6 +11480,8 @@ pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int3
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal2))]
@@ -9869,6 +11491,8 @@ pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int6
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal2))]
@@ -9878,6 +11502,8 @@ pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal2))]
@@ -9887,6 +11513,8 @@ pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_
}
/// Vector widening saturating doubling multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
@@ -9898,6 +11526,8 @@ pub unsafe fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int
}
/// Vector widening saturating doubling multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
@@ -9909,6 +11539,8 @@ pub unsafe fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
@@ -9920,6 +11552,8 @@ pub unsafe fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c:
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
@@ -9931,6 +11565,8 @@ pub unsafe fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
@@ -9942,6 +11578,8 @@ pub unsafe fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c:
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
@@ -9953,6 +11591,8 @@ pub unsafe fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull))]
@@ -9963,6 +11603,8 @@ pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull))]
@@ -9973,6 +11615,8 @@ pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
@@ -9984,6 +11628,8 @@ pub unsafe fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
@@ -9995,6 +11641,8 @@ pub unsafe fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t)
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, LANE = 0))]
@@ -10006,6 +11654,8 @@ pub unsafe fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, LANE = 0))]
@@ -10017,6 +11667,8 @@ pub unsafe fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t)
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl2))]
@@ -10026,6 +11678,8 @@ pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int3
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl2))]
@@ -10035,6 +11689,8 @@ pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int6
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl2))]
@@ -10044,6 +11700,8 @@ pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl2))]
@@ -10053,6 +11711,8 @@ pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_
}
/// Vector widening saturating doubling multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
@@ -10064,6 +11724,8 @@ pub unsafe fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int
}
/// Vector widening saturating doubling multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
@@ -10075,6 +11737,8 @@ pub unsafe fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
@@ -10086,6 +11750,8 @@ pub unsafe fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c:
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
@@ -10097,6 +11763,8 @@ pub unsafe fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
@@ -10108,6 +11776,8 @@ pub unsafe fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c:
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
@@ -10119,6 +11789,8 @@ pub unsafe fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull))]
@@ -10129,6 +11801,8 @@ pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull))]
@@ -10139,6 +11813,8 @@ pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
@@ -10150,6 +11826,8 @@ pub unsafe fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
@@ -10161,6 +11839,8 @@ pub unsafe fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t)
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, LANE = 0))]
@@ -10172,6 +11852,8 @@ pub unsafe fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmull, LANE = 0))]
@@ -10183,6 +11865,8 @@ pub unsafe fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t)
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh))]
@@ -10194,6 +11878,8 @@ pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh))]
@@ -10205,6 +11891,8 @@ pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
@@ -10217,6 +11905,8 @@ pub unsafe fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
@@ -10229,6 +11919,8 @@ pub unsafe fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
@@ -10241,6 +11933,8 @@ pub unsafe fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
@@ -10253,6 +11947,8 @@ pub unsafe fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
}
/// Vector saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
@@ -10264,6 +11960,8 @@ pub unsafe fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> i
}
/// Vector saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
@@ -10275,6 +11973,8 @@ pub unsafe fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) ->
}
/// Vector saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
@@ -10286,6 +11986,8 @@ pub unsafe fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> i
}
/// Vector saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
@@ -10297,6 +11999,8 @@ pub unsafe fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) ->
}
/// Saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtn))]
@@ -10306,6 +12010,8 @@ pub unsafe fn vqmovnh_s16(a: i16) -> i8 {
}
/// Saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtn))]
@@ -10315,6 +12021,8 @@ pub unsafe fn vqmovns_s32(a: i32) -> i16 {
}
/// Saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqxtn))]
@@ -10324,6 +12032,8 @@ pub unsafe fn vqmovnh_u16(a: u16) -> u8 {
}
/// Saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqxtn))]
@@ -10333,6 +12043,8 @@ pub unsafe fn vqmovns_u32(a: u32) -> u16 {
}
/// Saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtn))]
@@ -10347,6 +12059,8 @@ pub unsafe fn vqmovnd_s64(a: i64) -> i32 {
}
/// Saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqxtn))]
@@ -10361,6 +12075,8 @@ pub unsafe fn vqmovnd_u64(a: u64) -> u32 {
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtn2))]
@@ -10370,6 +12086,8 @@ pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtn2))]
@@ -10379,6 +12097,8 @@ pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtn2))]
@@ -10388,6 +12108,8 @@ pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqxtn2))]
@@ -10397,6 +12119,8 @@ pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqxtn2))]
@@ -10406,6 +12130,8 @@ pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqxtn2))]
@@ -10415,6 +12141,8 @@ pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtun))]
@@ -10424,6 +12152,8 @@ pub unsafe fn vqmovunh_s16(a: i16) -> u8 {
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtun))]
@@ -10433,6 +12163,8 @@ pub unsafe fn vqmovuns_s32(a: i32) -> u16 {
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtun))]
@@ -10442,6 +12174,8 @@ pub unsafe fn vqmovund_s64(a: i64) -> u32 {
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtun2))]
@@ -10451,6 +12185,8 @@ pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtun2))]
@@ -10460,6 +12196,8 @@ pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqxtun2))]
@@ -10469,6 +12207,8 @@ pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrdmulh))]
@@ -10478,6 +12218,8 @@ pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrdmulh))]
@@ -10487,6 +12229,8 @@ pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
@@ -10498,6 +12242,8 @@ pub unsafe fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
@@ -10509,6 +12255,8 @@ pub unsafe fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
@@ -10520,6 +12268,8 @@ pub unsafe fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
@@ -10531,6 +12281,8 @@ pub unsafe fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah))]
@@ -10545,6 +12297,8 @@ pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah))]
@@ -10559,6 +12313,8 @@ pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah))]
@@ -10573,6 +12329,8 @@ pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah))]
@@ -10587,6 +12345,8 @@ pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah))]
@@ -10599,6 +12359,8 @@ pub unsafe fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah))]
@@ -10611,6 +12373,8 @@ pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10623,6 +12387,8 @@ pub unsafe fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c:
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10635,6 +12401,8 @@ pub unsafe fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c:
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10647,6 +12415,8 @@ pub unsafe fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c:
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10659,6 +12429,8 @@ pub unsafe fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10671,6 +12443,8 @@ pub unsafe fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c:
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10683,6 +12457,8 @@ pub unsafe fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c:
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10695,6 +12471,8 @@ pub unsafe fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c:
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10707,6 +12485,8 @@ pub unsafe fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10718,6 +12498,8 @@ pub unsafe fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t)
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10729,6 +12511,8 @@ pub unsafe fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t)
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10740,6 +12524,8 @@ pub unsafe fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t)
}
/// Signed saturating rounding doubling multiply accumulate returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
@@ -10751,6 +12537,8 @@ pub unsafe fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t)
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh))]
@@ -10765,6 +12553,8 @@ pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh))]
@@ -10779,6 +12569,8 @@ pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh))]
@@ -10793,6 +12585,8 @@ pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh))]
@@ -10807,6 +12601,8 @@ pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh))]
@@ -10819,6 +12615,8 @@ pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh))]
@@ -10831,6 +12629,8 @@ pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10843,6 +12643,8 @@ pub unsafe fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c:
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10855,6 +12657,8 @@ pub unsafe fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c:
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10867,6 +12671,8 @@ pub unsafe fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c:
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10879,6 +12685,8 @@ pub unsafe fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10891,6 +12699,8 @@ pub unsafe fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c:
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10903,6 +12713,8 @@ pub unsafe fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c:
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10915,6 +12727,8 @@ pub unsafe fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c:
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10927,6 +12741,8 @@ pub unsafe fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10938,6 +12754,8 @@ pub unsafe fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t)
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10949,6 +12767,8 @@ pub unsafe fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t)
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10960,6 +12780,8 @@ pub unsafe fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t)
}
/// Signed saturating rounding doubling multiply subtract returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)
#[inline]
#[target_feature(enable = "rdm")]
#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
@@ -10971,6 +12793,8 @@ pub unsafe fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t)
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshl))]
@@ -10985,6 +12809,8 @@ pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 {
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshl))]
@@ -10999,6 +12825,8 @@ pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 {
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshl))]
@@ -11010,6 +12838,8 @@ pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 {
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshl))]
@@ -11021,6 +12851,8 @@ pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 {
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshl))]
@@ -11035,6 +12867,8 @@ pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 {
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshl))]
@@ -11049,6 +12883,8 @@ pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 {
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshl))]
@@ -11060,6 +12896,8 @@ pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 {
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshl))]
@@ -11071,6 +12909,8 @@ pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 {
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
@@ -11083,6 +12923,8 @@ pub unsafe fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
@@ -11095,6 +12937,8 @@ pub unsafe fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
@@ -11107,6 +12951,8 @@ pub unsafe fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
@@ -11118,6 +12964,8 @@ pub unsafe fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
@@ -11129,6 +12977,8 @@ pub unsafe fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> in
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
@@ -11140,6 +12990,8 @@ pub unsafe fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> in
}
/// Unsigned saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
@@ -11152,6 +13004,8 @@ pub unsafe fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
}
/// Unsigned saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
@@ -11164,6 +13018,8 @@ pub unsafe fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
}
/// Unsigned saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
@@ -11176,6 +13032,8 @@ pub unsafe fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
}
/// Unsigned saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
@@ -11187,6 +13045,8 @@ pub unsafe fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> u
}
/// Unsigned saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
@@ -11198,6 +13058,8 @@ pub unsafe fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) ->
}
/// Unsigned saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
@@ -11209,6 +13071,8 @@ pub unsafe fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) ->
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
@@ -11221,6 +13085,8 @@ pub unsafe fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
@@ -11233,6 +13099,8 @@ pub unsafe fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
@@ -11245,6 +13113,8 @@ pub unsafe fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
@@ -11256,6 +13126,8 @@ pub unsafe fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> u
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
@@ -11267,6 +13139,8 @@ pub unsafe fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) ->
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
@@ -11278,6 +13152,8 @@ pub unsafe fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) ->
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshl))]
@@ -11292,6 +13168,8 @@ pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshl))]
@@ -11302,6 +13180,8 @@ pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshl))]
@@ -11312,6 +13192,8 @@ pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshl))]
@@ -11322,6 +13204,8 @@ pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshl))]
@@ -11336,6 +13220,8 @@ pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshl))]
@@ -11346,6 +13232,8 @@ pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshl))]
@@ -11356,6 +13244,8 @@ pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshl))]
@@ -11366,6 +13256,8 @@ pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshl, N = 2))]
@@ -11377,6 +13269,8 @@ pub unsafe fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshl, N = 2))]
@@ -11388,6 +13282,8 @@ pub unsafe fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshl, N = 2))]
@@ -11399,6 +13295,8 @@ pub unsafe fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshl, N = 2))]
@@ -11410,6 +13308,8 @@ pub unsafe fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshl, N = 2))]
@@ -11421,6 +13321,8 @@ pub unsafe fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshl, N = 2))]
@@ -11432,6 +13334,8 @@ pub unsafe fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshl, N = 2))]
@@ -11443,6 +13347,8 @@ pub unsafe fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshl, N = 2))]
@@ -11454,6 +13360,8 @@ pub unsafe fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
@@ -11465,6 +13373,8 @@ pub unsafe fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
@@ -11476,6 +13386,8 @@ pub unsafe fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
@@ -11487,6 +13399,8 @@ pub unsafe fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
@@ -11498,6 +13412,8 @@ pub unsafe fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
@@ -11514,6 +13430,8 @@ pub unsafe fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
@@ -11525,6 +13443,8 @@ pub unsafe fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
@@ -11536,6 +13456,8 @@ pub unsafe fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
@@ -11547,6 +13469,8 @@ pub unsafe fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
@@ -11558,6 +13482,8 @@ pub unsafe fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
@@ -11569,6 +13495,8 @@ pub unsafe fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
@@ -11585,6 +13513,8 @@ pub unsafe fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
@@ -11596,6 +13526,8 @@ pub unsafe fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
@@ -11607,6 +13539,8 @@ pub unsafe fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
@@ -11618,6 +13552,8 @@ pub unsafe fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> ui
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
@@ -11629,6 +13565,8 @@ pub unsafe fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> u
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
@@ -11640,6 +13578,8 @@ pub unsafe fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> u
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
@@ -11651,6 +13591,8 @@ pub unsafe fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
@@ -11662,6 +13604,8 @@ pub unsafe fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
@@ -11673,6 +13617,8 @@ pub unsafe fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
@@ -11684,6 +13630,8 @@ pub unsafe fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> ui
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
@@ -11695,6 +13643,8 @@ pub unsafe fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> u
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
@@ -11706,6 +13656,8 @@ pub unsafe fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> u
}
/// Unsigned saturating accumulate of signed value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usqadd))]
@@ -11715,6 +13667,8 @@ pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 {
}
/// Unsigned saturating accumulate of signed value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usqadd))]
@@ -11724,6 +13678,8 @@ pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 {
}
/// Unsigned saturating accumulate of signed value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usqadd))]
@@ -11738,6 +13694,8 @@ pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 {
}
/// Unsigned saturating accumulate of signed value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(usqadd))]
@@ -11752,6 +13710,8 @@ pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 {
}
/// Calculates the square root of each lane.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fsqrt))]
@@ -11761,6 +13721,8 @@ pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
}
/// Calculates the square root of each lane.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fsqrt))]
@@ -11770,6 +13732,8 @@ pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
}
/// Calculates the square root of each lane.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fsqrt))]
@@ -11779,6 +13743,8 @@ pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
}
/// Calculates the square root of each lane.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fsqrt))]
@@ -11788,6 +13754,8 @@ pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
}
/// Reciprocal square-root estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frsqrte))]
@@ -11802,6 +13770,8 @@ pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
}
/// Reciprocal square-root estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frsqrte))]
@@ -11816,6 +13786,8 @@ pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
}
/// Reciprocal square-root estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frsqrte))]
@@ -11830,6 +13802,8 @@ pub unsafe fn vrsqrtes_f32(a: f32) -> f32 {
}
/// Reciprocal square-root estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frsqrte))]
@@ -11844,6 +13818,8 @@ pub unsafe fn vrsqrted_f64(a: f64) -> f64 {
}
/// Floating-point reciprocal square root step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frsqrts))]
@@ -11858,6 +13834,8 @@ pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Floating-point reciprocal square root step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frsqrts))]
@@ -11872,6 +13850,8 @@ pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point reciprocal square root step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frsqrts))]
@@ -11886,6 +13866,8 @@ pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
}
/// Floating-point reciprocal square root step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frsqrts))]
@@ -11900,6 +13882,8 @@ pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
}
/// Reciprocal estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecpe))]
@@ -11914,6 +13898,8 @@ pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
}
/// Reciprocal estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecpe))]
@@ -11928,6 +13914,8 @@ pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
}
/// Reciprocal estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecpe))]
@@ -11942,6 +13930,8 @@ pub unsafe fn vrecpes_f32(a: f32) -> f32 {
}
/// Reciprocal estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecpe))]
@@ -11956,6 +13946,8 @@ pub unsafe fn vrecped_f64(a: f64) -> f64 {
}
/// Floating-point reciprocal step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecps))]
@@ -11970,6 +13962,8 @@ pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
}
/// Floating-point reciprocal step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecps))]
@@ -11984,6 +13978,8 @@ pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Floating-point reciprocal step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecps))]
@@ -11998,6 +13994,8 @@ pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 {
}
/// Floating-point reciprocal step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecps))]
@@ -12012,6 +14010,8 @@ pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 {
}
/// Floating-point reciprocal exponent
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecpx))]
@@ -12026,6 +14026,8 @@ pub unsafe fn vrecpxs_f32(a: f32) -> f32 {
}
/// Floating-point reciprocal exponent
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(frecpx))]
@@ -12040,6 +14042,8 @@ pub unsafe fn vrecpxd_f64(a: f64) -> f64 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12049,6 +14053,8 @@ pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12058,6 +14064,8 @@ pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12067,6 +14075,8 @@ pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12076,6 +14086,8 @@ pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12085,6 +14097,8 @@ pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12094,6 +14108,8 @@ pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12103,6 +14119,8 @@ pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12112,6 +14130,8 @@ pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12121,6 +14141,8 @@ pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12130,6 +14152,8 @@ pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12139,6 +14163,8 @@ pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12148,6 +14174,8 @@ pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12157,6 +14185,8 @@ pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12166,6 +14196,8 @@ pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12175,6 +14207,8 @@ pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12184,6 +14218,8 @@ pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12193,6 +14229,8 @@ pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12202,6 +14240,8 @@ pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12211,6 +14251,8 @@ pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12220,6 +14262,8 @@ pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12229,6 +14273,8 @@ pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12238,6 +14284,8 @@ pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12247,6 +14295,8 @@ pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12256,6 +14306,8 @@ pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12265,6 +14317,8 @@ pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12274,6 +14328,8 @@ pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12283,6 +14339,8 @@ pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12292,6 +14350,8 @@ pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12301,6 +14361,8 @@ pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12310,6 +14372,8 @@ pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12319,6 +14383,8 @@ pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12328,6 +14394,8 @@ pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12337,6 +14405,8 @@ pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12346,6 +14416,8 @@ pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12355,6 +14427,8 @@ pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12364,6 +14438,8 @@ pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12373,6 +14449,8 @@ pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12382,6 +14460,8 @@ pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12391,6 +14471,8 @@ pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12400,6 +14482,8 @@ pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12409,6 +14493,8 @@ pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12418,6 +14504,8 @@ pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12427,6 +14515,8 @@ pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12436,6 +14526,8 @@ pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12445,6 +14537,8 @@ pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12454,6 +14548,8 @@ pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12463,6 +14559,8 @@ pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12472,6 +14570,8 @@ pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12481,6 +14581,8 @@ pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12490,6 +14592,8 @@ pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12499,6 +14603,8 @@ pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12508,6 +14614,8 @@ pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12517,6 +14625,8 @@ pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12526,6 +14636,8 @@ pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12535,6 +14647,8 @@ pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12544,6 +14658,8 @@ pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12553,6 +14669,8 @@ pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12562,6 +14680,8 @@ pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12571,6 +14691,8 @@ pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12580,6 +14702,8 @@ pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12589,6 +14713,8 @@ pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
@@ -12598,6 +14724,8 @@ pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(srshl))]
@@ -12612,6 +14740,8 @@ pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 {
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(urshl))]
@@ -12626,6 +14756,8 @@ pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 {
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(srshr, N = 2))]
@@ -12637,6 +14769,8 @@ pub unsafe fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(urshr, N = 2))]
@@ -12648,6 +14782,8 @@ pub unsafe fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
@@ -12659,6 +14795,8 @@ pub unsafe fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
@@ -12670,6 +14808,8 @@ pub unsafe fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
@@ -12681,6 +14821,8 @@ pub unsafe fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
@@ -12692,6 +14834,8 @@ pub unsafe fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> ui
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
@@ -12703,6 +14847,8 @@ pub unsafe fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> u
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
@@ -12714,6 +14860,8 @@ pub unsafe fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> u
}
/// Signed rounding shift right and accumulate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(srsra, N = 2))]
@@ -12726,6 +14874,8 @@ pub unsafe fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
}
/// Ungisned rounding shift right and accumulate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ursra, N = 2))]
@@ -12738,6 +14888,8 @@ pub unsafe fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rsubhn2))]
@@ -12748,6 +14900,8 @@ pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rsubhn2))]
@@ -12758,6 +14912,8 @@ pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int1
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rsubhn2))]
@@ -12768,6 +14924,8 @@ pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int3
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rsubhn2))]
@@ -12778,6 +14936,8 @@ pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> ui
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rsubhn2))]
@@ -12788,6 +14948,8 @@ pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> u
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rsubhn2))]
@@ -12798,6 +14960,8 @@ pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, LANE = 0))]
@@ -12809,6 +14973,8 @@ pub unsafe fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop, LANE = 0))]
@@ -12820,6 +14986,8 @@ pub unsafe fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sshl))]
@@ -12829,6 +14997,8 @@ pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 {
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ushl))]
@@ -12838,6 +15008,8 @@ pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sshll2, N = 2))]
@@ -12850,6 +15022,8 @@ pub unsafe fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sshll2, N = 2))]
@@ -12862,6 +15036,8 @@ pub unsafe fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sshll2, N = 2))]
@@ -12874,6 +15050,8 @@ pub unsafe fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ushll2, N = 2))]
@@ -12886,6 +15064,8 @@ pub unsafe fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ushll2, N = 2))]
@@ -12898,6 +15078,8 @@ pub unsafe fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ushll2, N = 2))]
@@ -12910,6 +15092,8 @@ pub unsafe fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(shrn2, N = 2))]
@@ -12921,6 +15105,8 @@ pub unsafe fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(shrn2, N = 2))]
@@ -12932,6 +15118,8 @@ pub unsafe fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int1
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(shrn2, N = 2))]
@@ -12943,6 +15131,8 @@ pub unsafe fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int3
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(shrn2, N = 2))]
@@ -12954,6 +15144,8 @@ pub unsafe fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uin
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(shrn2, N = 2))]
@@ -12965,6 +15157,8 @@ pub unsafe fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> ui
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(shrn2, N = 2))]
@@ -12976,6 +15170,8 @@ pub unsafe fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> ui
}
/// SM3PARTW1
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm3partw1))]
@@ -12989,6 +15185,8 @@ pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui
}
/// SM3PARTW2
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm3partw2))]
@@ -13002,6 +15200,8 @@ pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui
}
/// SM3SS1
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm3ss1))]
@@ -13015,6 +15215,8 @@ pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint3
}
/// SM4 key
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm4ekey))]
@@ -13028,6 +15230,8 @@ pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// SM4 encode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm4e))]
@@ -13041,6 +15245,8 @@ pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Rotate and exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(rax1))]
@@ -13054,6 +15260,8 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// SHA512 hash update part 1
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(sha512h))]
@@ -13067,6 +15275,8 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint
}
/// SHA512 hash update part 2
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(sha512h2))]
@@ -13080,6 +15290,8 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin
}
/// SHA512 schedule update 0
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(sha512su0))]
@@ -13093,6 +15305,8 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// SHA512 schedule update 1
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(sha512su1))]
@@ -13106,6 +15320,8 @@ pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> ui
}
/// Floating-point round to 32-bit integer, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)
#[inline]
#[target_feature(enable = "neon,frintts")]
#[cfg_attr(test, assert_instr(frint32x))]
@@ -13119,6 +15335,8 @@ pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to 32-bit integer, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)
#[inline]
#[target_feature(enable = "neon,frintts")]
#[cfg_attr(test, assert_instr(frint32x))]
@@ -13132,6 +15350,8 @@ pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to 32-bit integer toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)
#[inline]
#[target_feature(enable = "neon,frintts")]
#[cfg_attr(test, assert_instr(frint32z))]
@@ -13145,6 +15365,8 @@ pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to 32-bit integer toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)
#[inline]
#[target_feature(enable = "neon,frintts")]
#[cfg_attr(test, assert_instr(frint32z))]
@@ -13158,6 +15380,8 @@ pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to 64-bit integer, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)
#[inline]
#[target_feature(enable = "neon,frintts")]
#[cfg_attr(test, assert_instr(frint64x))]
@@ -13171,6 +15395,8 @@ pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to 64-bit integer, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)
#[inline]
#[target_feature(enable = "neon,frintts")]
#[cfg_attr(test, assert_instr(frint64x))]
@@ -13184,6 +15410,8 @@ pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
}
/// Floating-point round to 64-bit integer toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)
#[inline]
#[target_feature(enable = "neon,frintts")]
#[cfg_attr(test, assert_instr(frint64z))]
@@ -13197,6 +15425,8 @@ pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point round to 64-bit integer toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)
#[inline]
#[target_feature(enable = "neon,frintts")]
#[cfg_attr(test, assert_instr(frint64z))]
@@ -13210,6 +15440,8 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13219,6 +15451,8 @@ pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13228,6 +15462,8 @@ pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13237,6 +15473,8 @@ pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13246,6 +15484,8 @@ pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13255,6 +15495,8 @@ pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13264,6 +15506,8 @@ pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13273,6 +15517,8 @@ pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13282,6 +15528,8 @@ pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13291,6 +15539,8 @@ pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13300,6 +15550,8 @@ pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13309,6 +15561,8 @@ pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13318,6 +15572,8 @@ pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13327,6 +15583,8 @@ pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13336,6 +15594,8 @@ pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13345,6 +15605,8 @@ pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13354,6 +15616,8 @@ pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13363,6 +15627,8 @@ pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13372,6 +15638,8 @@ pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13381,6 +15649,8 @@ pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn1))]
@@ -13390,6 +15660,8 @@ pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13399,6 +15671,8 @@ pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13408,6 +15682,8 @@ pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13417,6 +15693,8 @@ pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13426,6 +15704,8 @@ pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13435,6 +15715,8 @@ pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13444,6 +15726,8 @@ pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13453,6 +15737,8 @@ pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13462,6 +15748,8 @@ pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13471,6 +15759,8 @@ pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13480,6 +15770,8 @@ pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13489,6 +15781,8 @@ pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13498,6 +15792,8 @@ pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13507,6 +15803,8 @@ pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13516,6 +15814,8 @@ pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13525,6 +15825,8 @@ pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13534,6 +15836,8 @@ pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13543,6 +15847,8 @@ pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13552,6 +15858,8 @@ pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13561,6 +15869,8 @@ pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13570,6 +15880,8 @@ pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13579,6 +15891,8 @@ pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(trn2))]
@@ -13588,6 +15902,8 @@ pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13597,6 +15913,8 @@ pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Transpose vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13606,6 +15924,8 @@ pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13615,6 +15935,8 @@ pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13624,6 +15946,8 @@ pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13633,6 +15957,8 @@ pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13642,6 +15968,8 @@ pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13651,6 +15979,8 @@ pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13660,6 +15990,8 @@ pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13669,6 +16001,8 @@ pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13678,6 +16012,8 @@ pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13687,6 +16023,8 @@ pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13696,6 +16034,8 @@ pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13705,6 +16045,8 @@ pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13714,6 +16056,8 @@ pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13723,6 +16067,8 @@ pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13732,6 +16078,8 @@ pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13741,6 +16089,8 @@ pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13750,6 +16100,8 @@ pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13759,6 +16111,8 @@ pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13768,6 +16122,8 @@ pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13777,6 +16133,8 @@ pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13786,6 +16144,8 @@ pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13795,6 +16155,8 @@ pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -13804,6 +16166,8 @@ pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13813,6 +16177,8 @@ pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13822,6 +16188,8 @@ pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13831,6 +16199,8 @@ pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13840,6 +16210,8 @@ pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13849,6 +16221,8 @@ pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13858,6 +16232,8 @@ pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13867,6 +16243,8 @@ pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13876,6 +16254,8 @@ pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13885,6 +16265,8 @@ pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13894,6 +16276,8 @@ pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13903,6 +16287,8 @@ pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13912,6 +16298,8 @@ pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13921,6 +16309,8 @@ pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13930,6 +16320,8 @@ pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13939,6 +16331,8 @@ pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13948,6 +16342,8 @@ pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13957,6 +16353,8 @@ pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13966,6 +16364,8 @@ pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13975,6 +16375,8 @@ pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13984,6 +16386,8 @@ pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -13993,6 +16397,8 @@ pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -14002,6 +16408,8 @@ pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14011,6 +16419,8 @@ pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14020,6 +16430,8 @@ pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14029,6 +16441,8 @@ pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14038,6 +16452,8 @@ pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14047,6 +16463,8 @@ pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14056,6 +16474,8 @@ pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14065,6 +16485,8 @@ pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14074,6 +16496,8 @@ pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14083,6 +16507,8 @@ pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14092,6 +16518,8 @@ pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14101,6 +16529,8 @@ pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14110,6 +16540,8 @@ pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14119,6 +16551,8 @@ pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14128,6 +16562,8 @@ pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -14137,6 +16573,8 @@ pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -14146,6 +16584,8 @@ pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -14155,6 +16595,8 @@ pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -14164,6 +16606,8 @@ pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -14173,6 +16617,8 @@ pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp1))]
@@ -14182,6 +16628,8 @@ pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -14191,6 +16639,8 @@ pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip1))]
@@ -14200,6 +16650,8 @@ pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14209,6 +16661,8 @@ pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14218,6 +16672,8 @@ pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14227,6 +16683,8 @@ pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14236,6 +16694,8 @@ pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14245,6 +16705,8 @@ pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14254,6 +16716,8 @@ pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14263,6 +16727,8 @@ pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14272,6 +16738,8 @@ pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14281,6 +16749,8 @@ pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14290,6 +16760,8 @@ pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14299,6 +16771,8 @@ pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14308,6 +16782,8 @@ pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14317,6 +16793,8 @@ pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14326,6 +16804,8 @@ pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -14335,6 +16815,8 @@ pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -14344,6 +16826,8 @@ pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -14353,6 +16837,8 @@ pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -14362,6 +16848,8 @@ pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -14371,6 +16859,8 @@ pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uzp2))]
@@ -14380,6 +16870,8 @@ pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -14389,6 +16881,8 @@ pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(zip2))]
@@ -14398,6 +16892,8 @@ pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
}
/// Unsigned Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uabal))]
@@ -14410,6 +16906,8 @@ pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint
}
/// Unsigned Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uabal))]
@@ -14422,6 +16920,8 @@ pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin
}
/// Unsigned Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uabal))]
@@ -14434,6 +16934,8 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin
}
/// Signed Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sabal))]
@@ -14447,6 +16949,8 @@ pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8
}
/// Signed Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sabal))]
@@ -14460,6 +16964,8 @@ pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x
}
/// Signed Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sabal))]
@@ -14473,6 +16979,8 @@ pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x
}
/// Singned saturating Absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqabs))]
@@ -14487,6 +16995,8 @@ pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t {
}
/// Singned saturating Absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqabs))]
@@ -14501,6 +17011,8 @@ pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
}
/// Signed saturating absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqabs))]
@@ -14510,6 +17022,8 @@ pub unsafe fn vqabsb_s8(a: i8) -> i8 {
}
/// Signed saturating absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqabs))]
@@ -14519,6 +17033,8 @@ pub unsafe fn vqabsh_s16(a: i16) -> i16 {
}
/// Signed saturating absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqabs))]
@@ -14533,6 +17049,8 @@ pub unsafe fn vqabss_s32(a: i32) -> i32 {
}
/// Signed saturating absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqabs))]
@@ -14547,6 +17065,8 @@ pub unsafe fn vqabsd_s64(a: i64) -> i64 {
}
/// Shift left and insert
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sli, N = 2))]
@@ -14558,6 +17078,8 @@ pub unsafe fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
}
/// Shift left and insert
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sli, N = 2))]
@@ -14569,6 +17091,8 @@ pub unsafe fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
}
/// Shift right and insert
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sri, N = 2))]
@@ -14580,6 +17104,8 @@ pub unsafe fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
}
/// Shift right and insert
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sri, N = 2))]
@@ -17603,18 +20129,18 @@ mod test {
#[simd_test(enable = "neon")]
unsafe fn test_vextq_p64() {
- let a: i64x2 = i64x2::new(0, 8);
- let b: i64x2 = i64x2::new(9, 11);
- let e: i64x2 = i64x2::new(8, 9);
+ let a: i64x2 = i64x2::new(1, 1);
+ let b: i64x2 = i64x2::new(2, 2);
+ let e: i64x2 = i64x2::new(1, 2);
let r: i64x2 = transmute(vextq_p64::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_f64() {
- let a: f64x2 = f64x2::new(0., 2.);
- let b: f64x2 = f64x2::new(3., 4.);
- let e: f64x2 = f64x2::new(2., 3.);
+ let a: f64x2 = f64x2::new(1., 1.);
+ let b: f64x2 = f64x2::new(2., 2.);
+ let e: f64x2 = f64x2::new(1., 2.);
let r: f64x2 = transmute(vextq_f64::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
index 65ba527ee..9d9946b4f 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
@@ -28,14 +28,17 @@ types! {
}
/// ARM-specific type containing two `float64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub struct float64x1x2_t(pub float64x1_t, pub float64x1_t);
/// ARM-specific type containing three `float64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub struct float64x1x3_t(pub float64x1_t, pub float64x1_t, pub float64x1_t);
/// ARM-specific type containing four `float64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub struct float64x1x4_t(
@@ -46,14 +49,17 @@ pub struct float64x1x4_t(
);
/// ARM-specific type containing two `float64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub struct float64x2x2_t(pub float64x2_t, pub float64x2_t);
/// ARM-specific type containing three `float64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub struct float64x2x3_t(pub float64x2_t, pub float64x2_t, pub float64x2_t);
/// ARM-specific type containing four `float64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub struct float64x2x4_t(
@@ -658,6 +664,8 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
}
/// Load multiple single-element structures to one, two, three, or four registers.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ldr))]
@@ -667,6 +675,8 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
}
/// Load multiple single-element structures to one, two, three, or four registers.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(ldr))]
@@ -953,6 +963,8 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
}
// Store multiple single-element structures from one, two, three, or four registers.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(str))]
@@ -963,6 +975,8 @@ pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
}
// Store multiple single-element structures from one, two, three, or four registers.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(str))]
@@ -1045,7 +1059,11 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(bsl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
- simd_select(transmute::<_, int64x1_t>(a), b, c)
+ let not = int64x1_t(-1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
#[inline]
@@ -1053,7 +1071,11 @@ pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64
#[cfg_attr(test, assert_instr(bsl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_t {
- simd_select(transmute::<_, int64x1_t>(a), b, c)
+ let not = int64x1_t(-1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
#[inline]
@@ -1061,7 +1083,11 @@ pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_
#[cfg_attr(test, assert_instr(bsl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
- simd_select(transmute::<_, int64x2_t>(a), b, c)
+ let not = int64x2_t(-1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
#[inline]
@@ -1069,7 +1095,11 @@ pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float6
#[cfg_attr(test, assert_instr(bsl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2_t {
- simd_select(transmute::<_, int64x2_t>(a), b, c)
+ let not = int64x2_t(-1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Signed saturating Accumulate of Unsigned value.
@@ -3365,7 +3395,10 @@ pub unsafe fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x
static_assert_imm4!(N);
transmute(vsliq_n_s16_(transmute(a), transmute(b), N))
}
+
/// Shift Left and Insert (immediate)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(sli, N = 1))]
@@ -3375,7 +3408,10 @@ pub unsafe fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1
static_assert!(N: i32 where N >= 0 && N <= 63);
transmute(vsli_n_s64_(transmute(a), transmute(b), N))
}
+
/// Shift Left and Insert (immediate)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(sli, N = 1))]
@@ -3585,7 +3621,10 @@ pub unsafe fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x
static_assert!(N: i32 where N >= 1 && N <= 16);
transmute(vsriq_n_s16_(transmute(a), transmute(b), N))
}
+
/// Shift Right and Insert (immediate)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(sri, N = 1))]
@@ -3595,7 +3634,10 @@ pub unsafe fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1
static_assert!(N: i32 where N >= 1 && N <= 64);
transmute(vsri_n_s64_(transmute(a), transmute(b), N))
}
+
/// Shift Right and Insert (immediate)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(sri, N = 1))]
@@ -5136,37 +5178,37 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_f64() {
- let a = u64x1::new(u64::MAX);
- let b = f64x1::new(f64::MAX);
- let c = f64x1::new(f64::MIN);
- let e = f64x1::new(f64::MAX);
+ let a = u64x1::new(0x8000000000000000);
+ let b = f64x1::new(-1.23f64);
+ let c = f64x1::new(2.34f64);
+ let e = f64x1::new(-2.34f64);
let r: f64x1 = transmute(vbsl_f64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_p64() {
- let a = u64x1::new(u64::MAX);
+ let a = u64x1::new(1);
let b = u64x1::new(u64::MAX);
let c = u64x1::new(u64::MIN);
- let e = u64x1::new(u64::MAX);
+ let e = u64x1::new(1);
let r: u64x1 = transmute(vbsl_p64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_f64() {
- let a = u64x2::new(u64::MAX, 0);
- let b = f64x2::new(f64::MAX, f64::MAX);
- let c = f64x2::new(f64::MIN, f64::MIN);
- let e = f64x2::new(f64::MAX, f64::MIN);
+ let a = u64x2::new(1, 0x8000000000000000);
+ let b = f64x2::new(f64::MAX, -1.23f64);
+ let c = f64x2::new(f64::MIN, 2.34f64);
+ let e = f64x2::new(f64::MIN, -2.34f64);
let r: f64x2 = transmute(vbslq_f64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_p64() {
- let a = u64x2::new(u64::MAX, 0);
+ let a = u64x2::new(u64::MAX, 1);
let b = u64x2::new(u64::MAX, u64::MAX);
let c = u64x2::new(u64::MIN, u64::MIN);
- let e = u64x2::new(u64::MAX, u64::MIN);
+ let e = u64x2::new(u64::MAX, 1);
let r: u64x2 = transmute(vbslq_p64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
diff --git a/library/stdarch/crates/core_arch/src/arm/neon.rs b/library/stdarch/crates/core_arch/src/arm/neon.rs
index a0ad92c33..a6291c95c 100644
--- a/library/stdarch/crates/core_arch/src/arm/neon.rs
+++ b/library/stdarch/crates/core_arch/src/arm/neon.rs
@@ -289,6 +289,8 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
}
/// Load multiple single-element structures to one, two, three, or four registers.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr(vldr))]
@@ -297,6 +299,8 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
}
/// Load multiple single-element structures to one, two, three, or four registers.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(test, assert_instr("vld1.64"))]
@@ -481,6 +485,8 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)
#[inline]
#[target_feature(enable = "neon,aes,v8")]
#[cfg_attr(test, assert_instr("vst1.64"))]
@@ -489,6 +495,8 @@ pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)
#[inline]
#[target_feature(enable = "neon,aes,v8")]
#[cfg_attr(test, assert_instr("vst1.64"))]
@@ -1033,6 +1041,7 @@ pub unsafe fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4
int16x4_t(n, n, n, n),
))
}
+
/// Shift Left and Insert (immediate)
#[inline]
#[target_feature(enable = "neon,v7")]
@@ -1047,7 +1056,10 @@ pub unsafe fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x
int16x8_t(n, n, n, n, n, n, n, n),
))
}
+
/// Shift Left and Insert (immediate)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)
#[inline]
#[target_feature(enable = "neon,v7,aes")]
#[cfg_attr(test, assert_instr("vsli.64", N = 1))]
@@ -1060,7 +1072,10 @@ pub unsafe fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1
int64x1_t(N as i64),
))
}
+
/// Shift Left and Insert (immediate)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)
#[inline]
#[target_feature(enable = "neon,v7,aes")]
#[cfg_attr(test, assert_instr("vsli.64", N = 1))]
@@ -1317,7 +1332,10 @@ pub unsafe fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x
int16x8_t(n, n, n, n, n, n, n, n),
))
}
+
/// Shift Right and Insert (immediate)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)
#[inline]
#[target_feature(enable = "neon,v7,aes")]
#[cfg_attr(test, assert_instr("vsri.64", N = 1))]
@@ -1330,7 +1348,10 @@ pub unsafe fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1
int64x1_t(-N as i64),
))
}
+
/// Shift Right and Insert (immediate)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)
#[inline]
#[target_feature(enable = "neon,v7,aes")]
#[cfg_attr(test, assert_instr("vsri.64", N = 1))]
diff --git a/library/stdarch/crates/core_arch/src/arm/v7.rs b/library/stdarch/crates/core_arch/src/arm/v7.rs
index e7507f9b9..59beaf722 100644
--- a/library/stdarch/crates/core_arch/src/arm/v7.rs
+++ b/library/stdarch/crates/core_arch/src/arm/v7.rs
@@ -76,7 +76,6 @@ mod tests {
}
#[test]
- #[cfg(dont_compile_me)] // FIXME need to add `v7` upstream in rustc
fn _rbit_u32() {
unsafe {
assert_eq!(
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/crc.rs b/library/stdarch/crates/core_arch/src/arm_shared/crc.rs
index e0d0fbe35..779d1ed42 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/crc.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/crc.rs
@@ -24,6 +24,8 @@ extern "unadjusted" {
use stdarch_test::assert_instr;
/// CRC32 single round checksum for bytes (8 bits).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32b)
#[inline]
#[target_feature(enable = "crc")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
@@ -33,6 +35,8 @@ pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 {
}
/// CRC32 single round checksum for half words (16 bits).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32h)
#[inline]
#[target_feature(enable = "crc")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
@@ -42,6 +46,8 @@ pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 {
}
/// CRC32 single round checksum for words (32 bits).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32w)
#[inline]
#[target_feature(enable = "crc")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
@@ -51,6 +57,8 @@ pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 {
}
/// CRC32-C single round checksum for bytes (8 bits).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cb)
#[inline]
#[target_feature(enable = "crc")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
@@ -60,6 +68,8 @@ pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 {
}
/// CRC32-C single round checksum for half words (16 bits).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32ch)
#[inline]
#[target_feature(enable = "crc")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
@@ -69,6 +79,8 @@ pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 {
}
/// CRC32-C single round checksum for words (32 bits).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cw)
#[inline]
#[target_feature(enable = "crc")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs b/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs
index 3e9515e59..060091136 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs
@@ -52,6 +52,8 @@ extern "unadjusted" {
use stdarch_test::assert_instr;
/// AES single round encryption.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -61,6 +63,8 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
}
/// AES single round decryption.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -70,6 +74,8 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
}
/// AES mix columns.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -79,6 +85,8 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
}
/// AES inverse mix columns.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -88,6 +96,8 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
}
/// SHA1 fixed rotate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -97,6 +107,8 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
}
/// SHA1 hash update accelerator, choose.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -106,6 +118,8 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
}
/// SHA1 hash update accelerator, majority.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -115,6 +129,8 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
}
/// SHA1 hash update accelerator, parity.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -124,6 +140,8 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
}
/// SHA1 schedule update accelerator, first part.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -133,6 +151,8 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_
}
/// SHA1 schedule update accelerator, second part.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -142,6 +162,8 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t
}
/// SHA256 hash update accelerator.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -155,6 +177,8 @@ pub unsafe fn vsha256hq_u32(
}
/// SHA256 hash update accelerator, upper part.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -168,6 +192,8 @@ pub unsafe fn vsha256h2q_u32(
}
/// SHA256 schedule update accelerator, first part.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
@@ -177,6 +203,8 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t
}
/// SHA256 schedule update accelerator, second part.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)
#[inline]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
index d69fbd8e8..ac2709744 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
@@ -10,6 +10,8 @@ use super::*;
use stdarch_test::assert_instr;
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21,6 +23,8 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -32,6 +36,8 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -43,6 +49,8 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -54,6 +62,8 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -65,6 +75,8 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -76,6 +88,8 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -87,6 +101,8 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -98,6 +114,8 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -109,6 +127,8 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -120,6 +140,8 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -131,6 +153,8 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -142,6 +166,8 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -153,6 +179,8 @@ pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -164,6 +192,8 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -175,6 +205,8 @@ pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Vector bitwise and
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -186,6 +218,8 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -197,6 +231,8 @@ pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -208,6 +244,8 @@ pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -219,6 +257,8 @@ pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -230,6 +270,8 @@ pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -241,6 +283,8 @@ pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -252,6 +296,8 @@ pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -263,6 +309,8 @@ pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -274,6 +322,8 @@ pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -285,6 +335,8 @@ pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -296,6 +348,8 @@ pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -307,6 +361,8 @@ pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -318,6 +374,8 @@ pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -329,6 +387,8 @@ pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -340,6 +400,8 @@ pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -351,6 +413,8 @@ pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Vector bitwise or (immediate, inclusive)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -362,6 +426,8 @@ pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -373,6 +439,8 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -384,6 +452,8 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -395,6 +465,8 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -406,6 +478,8 @@ pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -417,6 +491,8 @@ pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -428,6 +504,8 @@ pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -439,6 +517,8 @@ pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -450,6 +530,8 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -461,6 +543,8 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -472,6 +556,8 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -483,6 +569,8 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -494,6 +582,8 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -505,6 +595,8 @@ pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -516,6 +608,8 @@ pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -527,6 +621,8 @@ pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Vector bitwise exclusive or (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -538,6 +634,8 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -555,6 +653,8 @@ vabd_s8_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -572,6 +672,8 @@ vabdq_s8_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -589,6 +691,8 @@ vabd_s16_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -606,6 +710,8 @@ vabdq_s16_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -623,6 +729,8 @@ vabd_s32_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -640,6 +748,8 @@ vabdq_s32_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -657,6 +767,8 @@ vabd_u8_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -674,6 +786,8 @@ vabdq_u8_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -691,6 +805,8 @@ vabd_u16_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -708,6 +824,8 @@ vabdq_u16_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -725,6 +843,8 @@ vabd_u32_(a, b)
}
/// Absolute difference between the arguments
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -742,6 +862,8 @@ vabdq_u32_(a, b)
}
/// Absolute difference between the arguments of Floating
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -759,6 +881,8 @@ vabd_f32_(a, b)
}
/// Absolute difference between the arguments of Floating
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -776,6 +900,8 @@ vabdq_f32_(a, b)
}
/// Unsigned Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -787,6 +913,8 @@ pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t {
}
/// Unsigned Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -798,6 +926,8 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
}
/// Unsigned Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -809,6 +939,8 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
}
/// Signed Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -821,6 +953,8 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t {
}
/// Signed Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -833,6 +967,8 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
}
/// Signed Absolute difference Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -845,6 +981,8 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -856,6 +994,8 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -867,6 +1007,8 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -878,6 +1020,8 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -889,6 +1033,8 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -900,6 +1046,8 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -911,6 +1059,8 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -922,6 +1072,8 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -933,6 +1085,8 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -944,6 +1098,8 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -955,6 +1111,8 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -966,6 +1124,8 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -977,6 +1137,8 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -988,6 +1150,8 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t {
}
/// Compare bitwise Equal (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -999,6 +1163,8 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t {
}
/// Floating-point compare equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1010,6 +1176,8 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1021,6 +1189,8 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1034,6 +1204,8 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1047,6 +1219,8 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1060,6 +1234,8 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1073,6 +1249,8 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1086,6 +1264,8 @@ pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1099,6 +1279,8 @@ pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1112,6 +1294,8 @@ pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1125,6 +1309,8 @@ pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1138,6 +1324,8 @@ pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t {
}
/// Signed compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1151,6 +1339,8 @@ pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t {
}
/// Unsigned compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1164,6 +1354,8 @@ pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Unsigned compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1177,6 +1369,8 @@ pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Unsigned compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1190,6 +1384,8 @@ pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Unsigned compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1203,6 +1399,8 @@ pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Unsigned compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1216,6 +1414,8 @@ pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Unsigned compare bitwise Test bits nonzero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1229,6 +1429,8 @@ pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Floating-point absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1240,6 +1442,8 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t {
}
/// Floating-point absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1251,6 +1455,8 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t {
}
/// Compare signed greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1262,6 +1468,8 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
}
/// Compare signed greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1273,6 +1481,8 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
}
/// Compare signed greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1284,6 +1494,8 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
}
/// Compare signed greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1295,6 +1507,8 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
}
/// Compare signed greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1306,6 +1520,8 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
}
/// Compare signed greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1317,6 +1533,8 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
}
/// Compare unsigned highe
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1328,6 +1546,8 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Compare unsigned highe
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1339,6 +1559,8 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Compare unsigned highe
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1350,6 +1572,8 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Compare unsigned highe
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1361,6 +1585,8 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Compare unsigned highe
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1372,6 +1598,8 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Compare unsigned highe
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1383,6 +1611,8 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Floating-point compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1394,6 +1624,8 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1405,6 +1637,8 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
}
/// Compare signed less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1416,6 +1650,8 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
}
/// Compare signed less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1427,6 +1663,8 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
}
/// Compare signed less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1438,6 +1676,8 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
}
/// Compare signed less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1449,6 +1689,8 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
}
/// Compare signed less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1460,6 +1702,8 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
}
/// Compare signed less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1471,6 +1715,8 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
}
/// Compare unsigned less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1482,6 +1728,8 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Compare unsigned less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1493,6 +1741,8 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Compare unsigned less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1504,6 +1754,8 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Compare unsigned less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1515,6 +1767,8 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Compare unsigned less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1526,6 +1780,8 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Compare unsigned less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1537,6 +1793,8 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Floating-point compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1548,6 +1806,8 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1559,6 +1819,8 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
}
/// Compare signed less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1570,6 +1832,8 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
}
/// Compare signed less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1581,6 +1845,8 @@ pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
}
/// Compare signed less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1592,6 +1858,8 @@ pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
}
/// Compare signed less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1603,6 +1871,8 @@ pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
}
/// Compare signed less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1614,6 +1884,8 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
}
/// Compare signed less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1625,6 +1897,8 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
}
/// Compare unsigned less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1636,6 +1910,8 @@ pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Compare unsigned less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1647,6 +1923,8 @@ pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Compare unsigned less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1658,6 +1936,8 @@ pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Compare unsigned less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1669,6 +1949,8 @@ pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Compare unsigned less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1680,6 +1962,8 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Compare unsigned less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1691,6 +1975,8 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Floating-point compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1702,6 +1988,8 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1713,6 +2001,8 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
}
/// Compare signed greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1724,6 +2014,8 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
}
/// Compare signed greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1735,6 +2027,8 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
}
/// Compare signed greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1746,6 +2040,8 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
}
/// Compare signed greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1757,6 +2053,8 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
}
/// Compare signed greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1768,6 +2066,8 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
}
/// Compare signed greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1779,6 +2079,8 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
}
/// Compare unsigned greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1790,6 +2092,8 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Compare unsigned greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1801,6 +2105,8 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Compare unsigned greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1812,6 +2118,8 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Compare unsigned greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1823,6 +2131,8 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Compare unsigned greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1834,6 +2144,8 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Compare unsigned greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1845,6 +2157,8 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Floating-point compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1856,6 +2170,8 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
}
/// Floating-point compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1867,6 +2183,8 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1884,6 +2202,8 @@ vcls_s8_(a)
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1901,6 +2221,8 @@ vclsq_s8_(a)
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1918,6 +2240,8 @@ vcls_s16_(a)
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1935,6 +2259,8 @@ vclsq_s16_(a)
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1952,6 +2278,8 @@ vcls_s32_(a)
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1969,6 +2297,8 @@ vclsq_s32_(a)
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1980,6 +2310,8 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t {
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1991,6 +2323,8 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t {
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2002,6 +2336,8 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t {
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2013,6 +2349,8 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t {
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2024,6 +2362,8 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t {
}
/// Count leading sign bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2035,6 +2375,8 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2046,6 +2388,8 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2057,6 +2401,8 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2068,6 +2414,8 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2079,6 +2427,8 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2090,6 +2440,8 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2101,6 +2453,8 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2112,6 +2466,8 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2123,6 +2479,8 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2134,6 +2492,8 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2145,6 +2505,8 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2156,6 +2518,8 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t {
}
/// Count leading zero bits
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2167,6 +2531,8 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t {
}
/// Floating-point absolute compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2184,6 +2550,8 @@ vcagt_f32_(a, b)
}
/// Floating-point absolute compare greater than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2201,6 +2569,8 @@ vcagtq_f32_(a, b)
}
/// Floating-point absolute compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2218,6 +2588,8 @@ vcage_f32_(a, b)
}
/// Floating-point absolute compare greater than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2235,6 +2607,8 @@ vcageq_f32_(a, b)
}
/// Floating-point absolute compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2246,6 +2620,8 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
}
/// Floating-point absolute compare less than
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2257,6 +2633,8 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
}
/// Floating-point absolute compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2268,6 +2646,8 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
}
/// Floating-point absolute compare less than or equal
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2279,6 +2659,8 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2290,6 +2672,8 @@ pub unsafe fn vcreate_s8(a: u64) -> int8x8_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2301,6 +2685,8 @@ pub unsafe fn vcreate_s16(a: u64) -> int16x4_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2312,6 +2698,8 @@ pub unsafe fn vcreate_s32(a: u64) -> int32x2_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2323,6 +2711,8 @@ pub unsafe fn vcreate_s64(a: u64) -> int64x1_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2334,6 +2724,8 @@ pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2345,6 +2737,8 @@ pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2356,6 +2750,8 @@ pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2367,6 +2763,8 @@ pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2378,6 +2776,8 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2389,6 +2789,8 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -2400,6 +2802,8 @@ pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2411,6 +2815,8 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2422,6 +2828,8 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2433,6 +2841,8 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2444,6 +2854,8 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2455,6 +2867,8 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t {
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -2471,6 +2885,8 @@ vcvt_n_f32_s32_(a, N)
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -2488,6 +2904,8 @@ vcvt_n_f32_s32_(a, N)
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -2504,6 +2922,8 @@ vcvtq_n_f32_s32_(a, N)
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -2521,6 +2941,8 @@ vcvtq_n_f32_s32_(a, N)
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -2537,6 +2959,8 @@ vcvt_n_f32_u32_(a, N)
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -2554,6 +2978,8 @@ vcvt_n_f32_u32_(a, N)
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -2570,6 +2996,8 @@ vcvtq_n_f32_u32_(a, N)
}
/// Fixed-point convert to floating-point
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -2587,6 +3015,8 @@ vcvtq_n_f32_u32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -2603,6 +3033,8 @@ vcvt_n_s32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -2620,6 +3052,8 @@ vcvt_n_s32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -2636,6 +3070,8 @@ vcvtq_n_s32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -2653,6 +3089,8 @@ vcvtq_n_s32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -2669,6 +3107,8 @@ vcvt_n_u32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -2686,6 +3126,8 @@ vcvt_n_u32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -2702,6 +3144,8 @@ vcvtq_n_u32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -2719,6 +3163,8 @@ vcvtq_n_u32_f32_(a, N)
}
/// Floating-point convert to signed fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2736,6 +3182,8 @@ vcvt_s32_f32_(a)
}
/// Floating-point convert to signed fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2753,6 +3201,8 @@ vcvtq_s32_f32_(a)
}
/// Floating-point convert to unsigned fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2770,6 +3220,8 @@ vcvt_u32_f32_(a)
}
/// Floating-point convert to unsigned fixed-point, rounding toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2787,6 +3239,8 @@ vcvtq_u32_f32_(a)
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2800,6 +3254,8 @@ pub unsafe fn vdup_lane_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2813,6 +3269,8 @@ pub unsafe fn vdupq_laneq_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2826,6 +3284,8 @@ pub unsafe fn vdup_lane_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2839,6 +3299,8 @@ pub unsafe fn vdupq_laneq_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2852,6 +3314,8 @@ pub unsafe fn vdup_lane_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2865,6 +3329,8 @@ pub unsafe fn vdupq_laneq_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2878,6 +3344,8 @@ pub unsafe fn vdup_laneq_s8<const N: i32>(a: int8x16_t) -> int8x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2891,6 +3359,8 @@ pub unsafe fn vdup_laneq_s16<const N: i32>(a: int16x8_t) -> int16x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2904,6 +3374,8 @@ pub unsafe fn vdup_laneq_s32<const N: i32>(a: int32x4_t) -> int32x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2917,6 +3389,8 @@ pub unsafe fn vdupq_lane_s8<const N: i32>(a: int8x8_t) -> int8x16_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2930,6 +3404,8 @@ pub unsafe fn vdupq_lane_s16<const N: i32>(a: int16x4_t) -> int16x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2943,6 +3419,8 @@ pub unsafe fn vdupq_lane_s32<const N: i32>(a: int32x2_t) -> int32x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2956,6 +3434,8 @@ pub unsafe fn vdup_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2969,6 +3449,8 @@ pub unsafe fn vdupq_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2982,6 +3464,8 @@ pub unsafe fn vdup_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -2995,6 +3479,8 @@ pub unsafe fn vdupq_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3008,6 +3494,8 @@ pub unsafe fn vdup_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3021,6 +3509,8 @@ pub unsafe fn vdupq_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3034,6 +3524,8 @@ pub unsafe fn vdup_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3047,6 +3539,8 @@ pub unsafe fn vdup_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3060,6 +3554,8 @@ pub unsafe fn vdup_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3073,6 +3569,8 @@ pub unsafe fn vdupq_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x16_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3086,6 +3584,8 @@ pub unsafe fn vdupq_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3099,6 +3599,8 @@ pub unsafe fn vdupq_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3112,6 +3614,8 @@ pub unsafe fn vdup_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3125,6 +3629,8 @@ pub unsafe fn vdupq_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x16_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3138,6 +3644,8 @@ pub unsafe fn vdup_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3151,6 +3659,8 @@ pub unsafe fn vdupq_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3164,6 +3674,8 @@ pub unsafe fn vdup_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3177,6 +3689,8 @@ pub unsafe fn vdup_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3190,6 +3704,8 @@ pub unsafe fn vdupq_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x16_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3203,6 +3719,8 @@ pub unsafe fn vdupq_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x8_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3216,6 +3734,8 @@ pub unsafe fn vdupq_laneq_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3229,6 +3749,8 @@ pub unsafe fn vdupq_lane_s64<const N: i32>(a: int64x1_t) -> int64x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3242,6 +3764,8 @@ pub unsafe fn vdupq_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3255,6 +3779,8 @@ pub unsafe fn vdupq_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3268,6 +3794,8 @@ pub unsafe fn vdup_lane_f32<const N: i32>(a: float32x2_t) -> float32x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3281,6 +3809,8 @@ pub unsafe fn vdupq_laneq_f32<const N: i32>(a: float32x4_t) -> float32x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3294,6 +3824,8 @@ pub unsafe fn vdup_laneq_f32<const N: i32>(a: float32x4_t) -> float32x2_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3307,6 +3839,8 @@ pub unsafe fn vdupq_lane_f32<const N: i32>(a: float32x2_t) -> float32x4_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3320,6 +3854,8 @@ pub unsafe fn vdup_lane_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3333,6 +3869,8 @@ pub unsafe fn vdup_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3346,6 +3884,8 @@ pub unsafe fn vdup_laneq_s64<const N: i32>(a: int64x2_t) -> int64x1_t {
}
/// Set all vector lanes to the same value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3359,11 +3899,13 @@ pub unsafe fn vdup_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x1_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
@@ -3382,11 +3924,13 @@ pub unsafe fn vext_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 8))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 8))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
@@ -3413,11 +3957,13 @@ pub unsafe fn vextq_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
@@ -3432,11 +3978,13 @@ pub unsafe fn vext_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
@@ -3455,6 +4003,8 @@ pub unsafe fn vextq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3472,11 +4022,13 @@ pub unsafe fn vext_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
@@ -3491,11 +4043,13 @@ pub unsafe fn vextq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
@@ -3514,11 +4068,13 @@ pub unsafe fn vext_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 8))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 8))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
@@ -3545,11 +4101,13 @@ pub unsafe fn vextq_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
@@ -3564,11 +4122,13 @@ pub unsafe fn vext_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
@@ -3587,6 +4147,8 @@ pub unsafe fn vextq_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3604,11 +4166,13 @@ pub unsafe fn vext_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
@@ -3623,11 +4187,13 @@ pub unsafe fn vextq_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
@@ -3646,11 +4212,13 @@ pub unsafe fn vext_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 8))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 8))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
@@ -3677,11 +4245,13 @@ pub unsafe fn vextq_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
@@ -3696,11 +4266,13 @@ pub unsafe fn vext_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 4))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 4))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
@@ -3719,6 +4291,8 @@ pub unsafe fn vextq_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3736,6 +4310,8 @@ pub unsafe fn vextq_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3753,6 +4329,8 @@ pub unsafe fn vextq_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3770,11 +4348,13 @@ pub unsafe fn vext_f32<const N: i32>(a: float32x2_t, b: float32x2_t) -> float32x
}
/// Extract vector from pair of vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 2))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 2))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_f32<const N: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
@@ -3789,6 +4369,8 @@ pub unsafe fn vextq_f32<const N: i32>(a: float32x4_t, b: float32x4_t) -> float32
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3800,6 +4382,8 @@ pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3811,6 +4395,8 @@ pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3822,6 +4408,8 @@ pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3833,6 +4421,8 @@ pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3844,6 +4434,8 @@ pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3855,6 +4447,8 @@ pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3866,6 +4460,8 @@ pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3877,6 +4473,8 @@ pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3888,6 +4486,8 @@ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3899,6 +4499,8 @@ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3910,6 +4512,8 @@ pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_
}
/// Multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3921,6 +4525,8 @@ pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4
}
/// Floating-point multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3932,6 +4538,8 @@ pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3
}
/// Floating-point multiply-add to accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3943,6 +4551,8 @@ pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3954,6 +4564,8 @@ pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t {
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3965,6 +4577,8 @@ pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t {
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3976,6 +4590,8 @@ pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t {
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3987,6 +4603,8 @@ pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t {
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3998,6 +4616,8 @@ pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t {
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4009,6 +4629,8 @@ pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t {
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4020,6 +4642,8 @@ pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t {
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4031,6 +4655,8 @@ pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t {
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4042,6 +4668,8 @@ pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4053,6 +4681,8 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4066,6 +4696,8 @@ pub unsafe fn vmla_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int1
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4079,6 +4711,8 @@ pub unsafe fn vmla_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4092,6 +4726,8 @@ pub unsafe fn vmlaq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4105,6 +4741,8 @@ pub unsafe fn vmlaq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: in
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4118,6 +4756,8 @@ pub unsafe fn vmla_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int3
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4131,6 +4771,8 @@ pub unsafe fn vmla_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4144,6 +4786,8 @@ pub unsafe fn vmlaq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4157,6 +4801,8 @@ pub unsafe fn vmlaq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: in
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4170,6 +4816,8 @@ pub unsafe fn vmla_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: ui
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4183,6 +4831,8 @@ pub unsafe fn vmla_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: u
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4196,6 +4846,8 @@ pub unsafe fn vmlaq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: u
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4209,6 +4861,8 @@ pub unsafe fn vmlaq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c:
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4222,6 +4876,8 @@ pub unsafe fn vmla_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: ui
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4235,6 +4891,8 @@ pub unsafe fn vmla_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: u
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4248,6 +4906,8 @@ pub unsafe fn vmlaq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: u
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4261,6 +4921,8 @@ pub unsafe fn vmlaq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c:
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4274,6 +4936,8 @@ pub unsafe fn vmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4287,6 +4951,8 @@ pub unsafe fn vmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4300,6 +4966,8 @@ pub unsafe fn vmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c:
}
/// Vector multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4313,6 +4981,8 @@ pub unsafe fn vmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c
}
/// Signed multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4324,6 +4994,8 @@ pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t {
}
/// Signed multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4335,6 +5007,8 @@ pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
}
/// Signed multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4346,6 +5020,8 @@ pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
}
/// Unsigned multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4357,6 +5033,8 @@ pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t
}
/// Unsigned multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4368,6 +5046,8 @@ pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4
}
/// Unsigned multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4379,6 +5059,8 @@ pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4390,6 +5072,8 @@ pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t {
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4401,6 +5085,8 @@ pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4412,6 +5098,8 @@ pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t {
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4423,6 +5111,8 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t {
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4436,6 +5126,8 @@ pub unsafe fn vmlal_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4449,6 +5141,8 @@ pub unsafe fn vmlal_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: in
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4462,6 +5156,8 @@ pub unsafe fn vmlal_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4475,6 +5171,8 @@ pub unsafe fn vmlal_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: in
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4488,6 +5186,8 @@ pub unsafe fn vmlal_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: u
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4501,6 +5201,8 @@ pub unsafe fn vmlal_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c:
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4514,6 +5216,8 @@ pub unsafe fn vmlal_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: u
}
/// Vector widening multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4527,6 +5231,8 @@ pub unsafe fn vmlal_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c:
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4538,6 +5244,8 @@ pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4549,6 +5257,8 @@ pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4560,6 +5270,8 @@ pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4571,6 +5283,8 @@ pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4582,6 +5296,8 @@ pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4593,6 +5309,8 @@ pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4604,6 +5322,8 @@ pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4615,6 +5335,8 @@ pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4626,6 +5348,8 @@ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4637,6 +5361,8 @@ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4648,6 +5374,8 @@ pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_
}
/// Multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4659,6 +5387,8 @@ pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4
}
/// Floating-point multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4670,6 +5400,8 @@ pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3
}
/// Floating-point multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4681,6 +5413,8 @@ pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4692,6 +5426,8 @@ pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t {
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4703,6 +5439,8 @@ pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t {
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4714,6 +5452,8 @@ pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t {
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4725,6 +5465,8 @@ pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t {
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4736,6 +5478,8 @@ pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t {
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4747,6 +5491,8 @@ pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t {
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4758,6 +5504,8 @@ pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t {
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4769,6 +5517,8 @@ pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t {
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4780,6 +5530,8 @@ pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4791,6 +5543,8 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4804,6 +5558,8 @@ pub unsafe fn vmls_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int1
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4817,6 +5573,8 @@ pub unsafe fn vmls_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4830,6 +5588,8 @@ pub unsafe fn vmlsq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4843,6 +5603,8 @@ pub unsafe fn vmlsq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: in
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4856,6 +5618,8 @@ pub unsafe fn vmls_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int3
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4869,6 +5633,8 @@ pub unsafe fn vmls_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4882,6 +5648,8 @@ pub unsafe fn vmlsq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4895,6 +5663,8 @@ pub unsafe fn vmlsq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: in
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4908,6 +5678,8 @@ pub unsafe fn vmls_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: ui
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4921,6 +5693,8 @@ pub unsafe fn vmls_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: u
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4934,6 +5708,8 @@ pub unsafe fn vmlsq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: u
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4947,6 +5723,8 @@ pub unsafe fn vmlsq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c:
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4960,6 +5738,8 @@ pub unsafe fn vmls_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: ui
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4973,6 +5753,8 @@ pub unsafe fn vmls_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: u
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4986,6 +5768,8 @@ pub unsafe fn vmlsq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: u
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -4999,6 +5783,8 @@ pub unsafe fn vmlsq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c:
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5012,6 +5798,8 @@ pub unsafe fn vmls_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5025,6 +5813,8 @@ pub unsafe fn vmls_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5038,6 +5828,8 @@ pub unsafe fn vmlsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c:
}
/// Vector multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5051,6 +5843,8 @@ pub unsafe fn vmlsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c
}
/// Signed multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5062,6 +5856,8 @@ pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t {
}
/// Signed multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5073,6 +5869,8 @@ pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
}
/// Signed multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5084,6 +5882,8 @@ pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
}
/// Unsigned multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5095,6 +5895,8 @@ pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t
}
/// Unsigned multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5106,6 +5908,8 @@ pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4
}
/// Unsigned multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5117,6 +5921,8 @@ pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5128,6 +5934,8 @@ pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t {
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5139,6 +5947,8 @@ pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5150,6 +5960,8 @@ pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t {
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5161,6 +5973,8 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t {
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5174,6 +5988,8 @@ pub unsafe fn vmlsl_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5187,6 +6003,8 @@ pub unsafe fn vmlsl_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: in
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5200,6 +6018,8 @@ pub unsafe fn vmlsl_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5213,6 +6033,8 @@ pub unsafe fn vmlsl_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: in
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5226,6 +6048,8 @@ pub unsafe fn vmlsl_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: u
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5239,6 +6063,8 @@ pub unsafe fn vmlsl_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c:
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5252,6 +6078,8 @@ pub unsafe fn vmlsl_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: u
}
/// Vector widening multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5265,6 +6093,8 @@ pub unsafe fn vmlsl_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c:
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5276,6 +6106,8 @@ pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5287,6 +6119,8 @@ pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5298,6 +6132,8 @@ pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5309,6 +6145,8 @@ pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5320,6 +6158,8 @@ pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5331,6 +6171,8 @@ pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5342,6 +6184,8 @@ pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t {
}
/// Negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5353,6 +6197,8 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t {
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5370,6 +6216,8 @@ vqneg_s8_(a)
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5387,6 +6235,8 @@ vqnegq_s8_(a)
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5404,6 +6254,8 @@ vqneg_s16_(a)
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5421,6 +6273,8 @@ vqnegq_s16_(a)
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5438,6 +6292,8 @@ vqneg_s32_(a)
}
/// Signed saturating negate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5455,6 +6311,8 @@ vqnegq_s32_(a)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5472,6 +6330,8 @@ vqsub_u8_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5489,6 +6349,8 @@ vqsubq_u8_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5506,6 +6368,8 @@ vqsub_u16_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5523,6 +6387,8 @@ vqsubq_u16_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5540,6 +6406,8 @@ vqsub_u32_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5557,6 +6425,8 @@ vqsubq_u32_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5574,6 +6444,8 @@ vqsub_u64_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5591,6 +6463,8 @@ vqsubq_u64_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5608,6 +6482,8 @@ vqsub_s8_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5625,6 +6501,8 @@ vqsubq_s8_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5642,6 +6520,8 @@ vqsub_s16_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5659,6 +6539,8 @@ vqsubq_s16_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5676,6 +6558,8 @@ vqsub_s32_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5693,6 +6577,8 @@ vqsubq_s32_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5710,6 +6596,8 @@ vqsub_s64_(a, b)
}
/// Saturating subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5727,6 +6615,8 @@ vqsubq_s64_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5744,6 +6634,8 @@ vhadd_u8_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5761,6 +6653,8 @@ vhaddq_u8_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5778,6 +6672,8 @@ vhadd_u16_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5795,6 +6691,8 @@ vhaddq_u16_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5812,6 +6710,8 @@ vhadd_u32_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5829,6 +6729,8 @@ vhaddq_u32_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5846,6 +6748,8 @@ vhadd_s8_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5863,6 +6767,8 @@ vhaddq_s8_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5880,6 +6786,8 @@ vhadd_s16_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5897,6 +6805,8 @@ vhaddq_s16_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5914,6 +6824,8 @@ vhadd_s32_(a, b)
}
/// Halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5931,6 +6843,8 @@ vhaddq_s32_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5948,6 +6862,8 @@ vrhadd_u8_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5965,6 +6881,8 @@ vrhaddq_u8_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5982,6 +6900,8 @@ vrhadd_u16_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -5999,6 +6919,8 @@ vrhaddq_u16_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6016,6 +6938,8 @@ vrhadd_u32_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6033,6 +6957,8 @@ vrhaddq_u32_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6050,6 +6976,8 @@ vrhadd_s8_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6067,6 +6995,8 @@ vrhaddq_s8_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6084,6 +7014,8 @@ vrhadd_s16_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6101,6 +7033,8 @@ vrhaddq_s16_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6118,6 +7052,8 @@ vrhadd_s32_(a, b)
}
/// Rounding halving add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6135,6 +7071,8 @@ vrhaddq_s32_(a, b)
}
/// Floating-point round to integral, to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
@@ -6152,6 +7090,8 @@ vrndn_f32_(a)
}
/// Floating-point round to integral, to nearest with ties to even
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
@@ -6169,6 +7109,8 @@ vrndnq_f32_(a)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6186,6 +7128,8 @@ vqadd_u8_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6203,6 +7147,8 @@ vqaddq_u8_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6220,6 +7166,8 @@ vqadd_u16_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6237,6 +7185,8 @@ vqaddq_u16_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6254,6 +7204,8 @@ vqadd_u32_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6271,6 +7223,8 @@ vqaddq_u32_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6288,6 +7242,8 @@ vqadd_u64_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6305,6 +7261,8 @@ vqaddq_u64_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6322,6 +7280,8 @@ vqadd_s8_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6339,6 +7299,8 @@ vqaddq_s8_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6356,6 +7318,8 @@ vqadd_s16_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6373,6 +7337,8 @@ vqaddq_s16_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6390,6 +7356,8 @@ vqadd_s32_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6407,6 +7375,8 @@ vqaddq_s32_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6424,6 +7394,8 @@ vqadd_s64_(a, b)
}
/// Saturating add
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6441,6 +7413,8 @@ vqaddq_s64_(a, b)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6458,6 +7432,8 @@ vld1_s8_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6475,6 +7451,8 @@ vld1_s16_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6492,6 +7470,8 @@ vld1_s32_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6509,6 +7489,8 @@ vld1_s64_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6526,6 +7508,8 @@ vld1q_s8_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6543,6 +7527,8 @@ vld1q_s16_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6560,6 +7546,8 @@ vld1q_s32_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6577,6 +7565,8 @@ vld1q_s64_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6594,6 +7584,8 @@ vld1_s8_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6611,6 +7603,8 @@ vld1_s16_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6628,6 +7622,8 @@ vld1_s32_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6645,6 +7641,8 @@ vld1_s64_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6662,6 +7660,8 @@ vld1q_s8_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6679,6 +7679,8 @@ vld1q_s16_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6696,6 +7698,8 @@ vld1q_s32_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6713,6 +7717,8 @@ vld1q_s64_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6730,6 +7736,8 @@ vld1_s8_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6747,6 +7755,8 @@ vld1_s16_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6764,6 +7774,8 @@ vld1_s32_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6781,6 +7793,8 @@ vld1_s64_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6798,6 +7812,8 @@ vld1q_s8_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6815,6 +7831,8 @@ vld1q_s16_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6832,6 +7850,8 @@ vld1q_s32_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6849,6 +7869,8 @@ vld1q_s64_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6860,6 +7882,8 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6871,6 +7895,8 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6882,6 +7908,8 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6893,6 +7921,8 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6904,6 +7934,8 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6915,6 +7947,8 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6926,6 +7960,8 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6937,6 +7973,8 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6948,6 +7986,8 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6959,6 +7999,8 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6970,6 +8012,8 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6981,6 +8025,8 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -6992,6 +8038,8 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7003,6 +8051,8 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7014,6 +8064,8 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7025,6 +8077,8 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7036,6 +8090,8 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7047,6 +8103,8 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7058,6 +8116,8 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7069,6 +8129,8 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7080,6 +8142,8 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7091,6 +8155,8 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7102,6 +8168,8 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7113,6 +8181,8 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7124,6 +8194,8 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7135,6 +8207,8 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7146,6 +8220,8 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7157,6 +8233,8 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7168,6 +8246,8 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7179,6 +8259,8 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7190,6 +8272,8 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7201,6 +8285,8 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7212,6 +8298,8 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7223,6 +8311,8 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7234,6 +8324,8 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7245,6 +8337,8 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -7256,6 +8350,8 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -7267,6 +8363,8 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -7278,6 +8376,8 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -7289,6 +8389,8 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -7300,6 +8402,8 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -7311,6 +8415,8 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t {
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7328,6 +8434,8 @@ vld1_f32_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7345,6 +8453,8 @@ vld1q_f32_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7362,6 +8472,8 @@ vld1_f32_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7379,6 +8491,8 @@ vld1q_f32_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7396,6 +8510,8 @@ vld1_f32_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7413,6 +8529,8 @@ vld1q_f32_x4_(a)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7427,6 +8545,8 @@ vld2_s8_(a as *const i8, 1)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7442,6 +8562,8 @@ vld2_s8_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7456,6 +8578,8 @@ vld2_s16_(a as *const i8, 2)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7471,6 +8595,8 @@ vld2_s16_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7485,6 +8611,8 @@ vld2_s32_(a as *const i8, 4)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7500,6 +8628,8 @@ vld2_s32_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7514,6 +8644,8 @@ vld2q_s8_(a as *const i8, 1)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7529,6 +8661,8 @@ vld2q_s8_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7543,6 +8677,8 @@ vld2q_s16_(a as *const i8, 2)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7558,6 +8694,8 @@ vld2q_s16_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7572,6 +8710,8 @@ vld2q_s32_(a as *const i8, 4)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7587,6 +8727,8 @@ vld2q_s32_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7601,6 +8743,8 @@ vld2_s64_(a as *const i8, 8)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7616,6 +8760,8 @@ vld2_s64_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7627,6 +8773,8 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7638,6 +8786,8 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7649,6 +8799,8 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7660,6 +8812,8 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7671,6 +8825,8 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7682,6 +8838,8 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7693,6 +8851,8 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7704,6 +8864,8 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7715,6 +8877,8 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7726,6 +8890,8 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -7737,6 +8903,8 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -7748,6 +8916,8 @@ pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t {
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7762,6 +8932,8 @@ vld2_f32_(a as *const i8, 4)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7777,6 +8949,8 @@ vld2_f32_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7791,6 +8965,8 @@ vld2q_f32_(a as *const i8, 4)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7806,6 +8982,8 @@ vld2q_f32_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7820,6 +8998,8 @@ vld2_dup_s8_(a as *const i8, 1)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7835,6 +9015,8 @@ vld2_dup_s8_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7849,6 +9031,8 @@ vld2_dup_s16_(a as *const i8, 2)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7864,6 +9048,8 @@ vld2_dup_s16_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7878,6 +9064,8 @@ vld2_dup_s32_(a as *const i8, 4)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7893,6 +9081,8 @@ vld2_dup_s32_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7907,6 +9097,8 @@ vld2q_dup_s8_(a as *const i8, 1)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7922,6 +9114,8 @@ vld2q_dup_s8_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7936,6 +9130,8 @@ vld2q_dup_s16_(a as *const i8, 2)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7951,6 +9147,8 @@ vld2q_dup_s16_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7965,6 +9163,8 @@ vld2q_dup_s32_(a as *const i8, 4)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -7980,6 +9180,8 @@ vld2q_dup_s32_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -7994,6 +9196,8 @@ vld2_dup_s64_(a as *const i8, 8)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8009,6 +9213,8 @@ vld2_dup_s64_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8020,6 +9226,8 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8031,6 +9239,8 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8042,6 +9252,8 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8053,6 +9265,8 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8064,6 +9278,8 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8075,6 +9291,8 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8086,6 +9304,8 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8097,6 +9317,8 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8108,6 +9330,8 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8119,6 +9343,8 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8130,6 +9356,8 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -8141,6 +9369,8 @@ pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t {
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8155,6 +9385,8 @@ vld2_dup_f32_(a as *const i8, 4)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8170,6 +9402,8 @@ vld2_dup_f32_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8184,6 +9418,8 @@ vld2q_dup_f32_(a as *const i8, 4)
}
/// Load single 2-element structure and replicate to all lanes of two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8199,6 +9435,8 @@ vld2q_dup_f32_(a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8215,6 +9453,8 @@ vld2_lane_s8_(a as _, b.0, b.1, LANE, 1)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8232,6 +9472,8 @@ vld2_lane_s8_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8248,6 +9490,8 @@ vld2_lane_s16_(a as _, b.0, b.1, LANE, 2)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8265,6 +9509,8 @@ vld2_lane_s16_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8281,6 +9527,8 @@ vld2_lane_s32_(a as _, b.0, b.1, LANE, 4)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8298,6 +9546,8 @@ vld2_lane_s32_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8314,6 +9564,8 @@ vld2q_lane_s16_(a as _, b.0, b.1, LANE, 2)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8331,6 +9583,8 @@ vld2q_lane_s16_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8347,6 +9601,8 @@ vld2q_lane_s32_(a as _, b.0, b.1, LANE, 4)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8364,6 +9620,8 @@ vld2q_lane_s32_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8377,6 +9635,8 @@ pub unsafe fn vld2_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x2_t) -> uin
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8390,6 +9650,8 @@ pub unsafe fn vld2_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8403,6 +9665,8 @@ pub unsafe fn vld2_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8416,6 +9680,8 @@ pub unsafe fn vld2q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8429,6 +9695,8 @@ pub unsafe fn vld2q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8442,6 +9710,8 @@ pub unsafe fn vld2_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x2_t) -> pol
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8455,6 +9725,8 @@ pub unsafe fn vld2_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8468,6 +9740,8 @@ pub unsafe fn vld2q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x2_t) ->
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8484,6 +9758,8 @@ vld2_lane_f32_(a as _, b.0, b.1, LANE, 4)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8501,6 +9777,8 @@ vld2_lane_f32_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8517,6 +9795,8 @@ vld2q_lane_f32_(a as _, b.0, b.1, LANE, 4)
}
/// Load multiple 2-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8534,6 +9814,8 @@ vld2q_lane_f32_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8548,6 +9830,8 @@ vld3_s8_(a as *const i8, 1)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8563,6 +9847,8 @@ vld3_s8_(a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8577,6 +9863,8 @@ vld3_s16_(a as *const i8, 2)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8592,6 +9880,8 @@ vld3_s16_(a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8606,6 +9896,8 @@ vld3_s32_(a as *const i8, 4)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8621,6 +9913,8 @@ vld3_s32_(a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8635,6 +9929,8 @@ vld3q_s8_(a as *const i8, 1)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8650,6 +9946,8 @@ vld3q_s8_(a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8664,6 +9962,8 @@ vld3q_s16_(a as *const i8, 2)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8679,6 +9979,8 @@ vld3q_s16_(a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8693,6 +9995,8 @@ vld3q_s32_(a as *const i8, 4)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8708,6 +10012,8 @@ vld3q_s32_(a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8722,6 +10028,8 @@ vld3_s64_(a as *const i8, 8)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8737,6 +10045,8 @@ vld3_s64_(a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8748,6 +10058,8 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8759,6 +10071,8 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8770,6 +10084,8 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8781,6 +10097,8 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8792,6 +10110,8 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8803,6 +10123,8 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8814,6 +10136,8 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8825,6 +10149,8 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8836,6 +10162,8 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8847,6 +10175,8 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -8858,6 +10188,8 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -8869,6 +10201,8 @@ pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t {
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8883,6 +10217,8 @@ vld3_f32_(a as *const i8, 4)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8898,6 +10234,8 @@ vld3_f32_(a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8912,6 +10250,8 @@ vld3q_f32_(a as *const i8, 4)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8927,6 +10267,8 @@ vld3q_f32_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8941,6 +10283,8 @@ vld3_dup_s8_(a as *const i8, 1)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8956,6 +10300,8 @@ vld3_dup_s8_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8970,6 +10316,8 @@ vld3_dup_s16_(a as *const i8, 2)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -8985,6 +10333,8 @@ vld3_dup_s16_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -8999,6 +10349,8 @@ vld3_dup_s32_(a as *const i8, 4)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9014,6 +10366,8 @@ vld3_dup_s32_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9028,6 +10382,8 @@ vld3q_dup_s8_(a as *const i8, 1)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9043,6 +10399,8 @@ vld3q_dup_s8_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9057,6 +10415,8 @@ vld3q_dup_s16_(a as *const i8, 2)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9072,6 +10432,8 @@ vld3q_dup_s16_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9086,6 +10448,8 @@ vld3q_dup_s32_(a as *const i8, 4)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9101,6 +10465,8 @@ vld3q_dup_s32_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9115,6 +10481,8 @@ vld3_dup_s64_(a as *const i8, 8)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9130,6 +10498,8 @@ vld3_dup_s64_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9141,6 +10511,8 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9152,6 +10524,8 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9163,6 +10537,8 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9174,6 +10550,8 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9185,6 +10563,8 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9196,6 +10576,8 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9207,6 +10589,8 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9218,6 +10602,8 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9229,6 +10615,8 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9240,6 +10628,8 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9251,6 +10641,8 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -9262,6 +10654,8 @@ pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t {
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9276,6 +10670,8 @@ vld3_dup_f32_(a as *const i8, 4)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9291,6 +10687,8 @@ vld3_dup_f32_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9305,6 +10703,8 @@ vld3q_dup_f32_(a as *const i8, 4)
}
/// Load single 3-element structure and replicate to all lanes of three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9320,6 +10720,8 @@ vld3q_dup_f32_(a as _)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9336,6 +10738,8 @@ vld3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9353,6 +10757,8 @@ vld3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9369,6 +10775,8 @@ vld3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9386,6 +10794,8 @@ vld3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9402,6 +10812,8 @@ vld3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9419,6 +10831,8 @@ vld3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9435,6 +10849,8 @@ vld3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9452,6 +10868,8 @@ vld3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9468,6 +10886,8 @@ vld3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Load multiple 3-element structures to two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9485,6 +10905,8 @@ vld3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9498,6 +10920,8 @@ pub unsafe fn vld3_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x3_t) -> uin
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9511,6 +10935,8 @@ pub unsafe fn vld3_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9524,6 +10950,8 @@ pub unsafe fn vld3_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9537,6 +10965,8 @@ pub unsafe fn vld3q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9550,6 +10980,8 @@ pub unsafe fn vld3q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9563,6 +10995,8 @@ pub unsafe fn vld3_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x3_t) -> pol
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9576,6 +11010,8 @@ pub unsafe fn vld3_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9589,6 +11025,8 @@ pub unsafe fn vld3q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x3_t) ->
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9605,6 +11043,8 @@ vld3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9622,6 +11062,8 @@ vld3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9638,6 +11080,8 @@ vld3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Load multiple 3-element structures to three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9655,6 +11099,8 @@ vld3q_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9669,6 +11115,8 @@ vld4_s8_(a as *const i8, 1)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9684,6 +11132,8 @@ vld4_s8_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9698,6 +11148,8 @@ vld4_s16_(a as *const i8, 2)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9713,6 +11165,8 @@ vld4_s16_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9727,6 +11181,8 @@ vld4_s32_(a as *const i8, 4)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9742,6 +11198,8 @@ vld4_s32_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9756,6 +11214,8 @@ vld4q_s8_(a as *const i8, 1)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9771,6 +11231,8 @@ vld4q_s8_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9785,6 +11247,8 @@ vld4q_s16_(a as *const i8, 2)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9800,6 +11264,8 @@ vld4q_s16_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9814,6 +11280,8 @@ vld4q_s32_(a as *const i8, 4)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9829,6 +11297,8 @@ vld4q_s32_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -9843,6 +11313,8 @@ vld4_s64_(a as *const i8, 8)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -9858,6 +11330,8 @@ vld4_s64_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9869,6 +11343,8 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9880,6 +11356,8 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9891,6 +11369,8 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9902,6 +11382,8 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9913,6 +11395,8 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9924,6 +11408,8 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9935,6 +11421,8 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9946,6 +11434,8 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9957,6 +11447,8 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9968,6 +11460,8 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -9979,6 +11473,8 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -9990,6 +11486,8 @@ pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t {
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10004,6 +11502,8 @@ vld4_f32_(a as *const i8, 4)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10019,6 +11519,8 @@ vld4_f32_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10033,6 +11535,8 @@ vld4q_f32_(a as *const i8, 4)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10048,6 +11552,8 @@ vld4q_f32_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10062,6 +11568,8 @@ vld4_dup_s8_(a as *const i8, 1)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10077,6 +11585,8 @@ vld4_dup_s8_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10091,6 +11601,8 @@ vld4_dup_s16_(a as *const i8, 2)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10106,6 +11618,8 @@ vld4_dup_s16_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10120,6 +11634,8 @@ vld4_dup_s32_(a as *const i8, 4)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10135,6 +11651,8 @@ vld4_dup_s32_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10149,6 +11667,8 @@ vld4q_dup_s8_(a as *const i8, 1)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10164,6 +11684,8 @@ vld4q_dup_s8_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10178,6 +11700,8 @@ vld4q_dup_s16_(a as *const i8, 2)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10193,6 +11717,8 @@ vld4q_dup_s16_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10207,6 +11733,8 @@ vld4q_dup_s32_(a as *const i8, 4)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10222,6 +11750,8 @@ vld4q_dup_s32_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10236,6 +11766,8 @@ vld4_dup_s64_(a as *const i8, 8)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10251,6 +11783,8 @@ vld4_dup_s64_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10262,6 +11796,8 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10273,6 +11809,8 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10284,6 +11822,8 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10295,6 +11835,8 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10306,6 +11848,8 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10317,6 +11861,8 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10328,6 +11874,8 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10339,6 +11887,8 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10350,6 +11900,8 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10361,6 +11913,8 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10372,6 +11926,8 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -10383,6 +11939,8 @@ pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t {
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10397,6 +11955,8 @@ vld4_dup_f32_(a as *const i8, 4)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10412,6 +11972,8 @@ vld4_dup_f32_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10426,6 +11988,8 @@ vld4q_dup_f32_(a as *const i8, 4)
}
/// Load single 4-element structure and replicate to all lanes of four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10441,6 +12005,8 @@ vld4q_dup_f32_(a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10457,6 +12023,8 @@ vld4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10474,6 +12042,8 @@ vld4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10490,6 +12060,8 @@ vld4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10507,6 +12079,8 @@ vld4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10523,6 +12097,8 @@ vld4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10540,6 +12116,8 @@ vld4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10556,6 +12134,8 @@ vld4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10573,6 +12153,8 @@ vld4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10589,6 +12171,8 @@ vld4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10606,6 +12190,8 @@ vld4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10619,6 +12205,8 @@ pub unsafe fn vld4_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x4_t) -> uin
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10632,6 +12220,8 @@ pub unsafe fn vld4_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10645,6 +12235,8 @@ pub unsafe fn vld4_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10658,6 +12250,8 @@ pub unsafe fn vld4q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10671,6 +12265,8 @@ pub unsafe fn vld4q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10684,6 +12280,8 @@ pub unsafe fn vld4_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x4_t) -> pol
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10697,6 +12295,8 @@ pub unsafe fn vld4_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10710,6 +12310,8 @@ pub unsafe fn vld4q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x4_t) ->
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10726,6 +12328,8 @@ vld4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10743,6 +12347,8 @@ vld4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -10759,6 +12365,8 @@ vld4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Load multiple 4-element structures to four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -10776,6 +12384,8 @@ vld4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10789,6 +12399,8 @@ pub unsafe fn vst1_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10802,6 +12414,8 @@ pub unsafe fn vst1_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10815,6 +12429,8 @@ pub unsafe fn vst1_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10828,6 +12444,8 @@ pub unsafe fn vst1_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10841,6 +12459,8 @@ pub unsafe fn vst1q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10854,6 +12474,8 @@ pub unsafe fn vst1q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10867,6 +12489,8 @@ pub unsafe fn vst1q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10880,6 +12504,8 @@ pub unsafe fn vst1q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10893,6 +12519,8 @@ pub unsafe fn vst1_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10906,6 +12534,8 @@ pub unsafe fn vst1_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10919,6 +12549,8 @@ pub unsafe fn vst1_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10932,6 +12564,8 @@ pub unsafe fn vst1_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10945,6 +12579,8 @@ pub unsafe fn vst1q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10958,6 +12594,8 @@ pub unsafe fn vst1q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10971,6 +12609,8 @@ pub unsafe fn vst1q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10984,6 +12624,8 @@ pub unsafe fn vst1q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -10997,6 +12639,8 @@ pub unsafe fn vst1_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11010,6 +12654,8 @@ pub unsafe fn vst1_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11023,6 +12669,8 @@ pub unsafe fn vst1q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11036,6 +12684,8 @@ pub unsafe fn vst1q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -11049,6 +12699,8 @@ pub unsafe fn vst1_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -11062,6 +12714,8 @@ pub unsafe fn vst1q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11075,6 +12729,8 @@ pub unsafe fn vst1_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11088,6 +12744,8 @@ pub unsafe fn vst1q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4_t) {
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11102,6 +12760,8 @@ vst1_s8_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11117,6 +12777,8 @@ vst1_s8_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11131,6 +12793,8 @@ vst1_s16_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11146,6 +12810,8 @@ vst1_s16_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11160,6 +12826,8 @@ vst1_s32_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11175,6 +12843,8 @@ vst1_s32_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11189,6 +12859,8 @@ vst1_s64_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11204,6 +12876,8 @@ vst1_s64_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11218,6 +12892,8 @@ vst1q_s8_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11233,6 +12909,8 @@ vst1q_s8_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11247,6 +12925,8 @@ vst1q_s16_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11262,6 +12942,8 @@ vst1q_s16_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11276,6 +12958,8 @@ vst1q_s32_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11291,6 +12975,8 @@ vst1q_s32_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11305,6 +12991,8 @@ vst1q_s64_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11320,6 +13008,8 @@ vst1q_s64_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11334,6 +13024,8 @@ vst1_s8_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11349,6 +13041,8 @@ vst1_s8_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11363,6 +13057,8 @@ vst1_s16_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11378,6 +13074,8 @@ vst1_s16_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11392,6 +13090,8 @@ vst1_s32_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11407,6 +13107,8 @@ vst1_s32_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11421,6 +13123,8 @@ vst1_s64_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11436,6 +13140,8 @@ vst1_s64_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11450,6 +13156,8 @@ vst1q_s8_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11465,6 +13173,8 @@ vst1q_s8_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11479,6 +13189,8 @@ vst1q_s16_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11494,6 +13206,8 @@ vst1q_s16_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11508,6 +13222,8 @@ vst1q_s32_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11523,6 +13239,8 @@ vst1q_s32_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11537,6 +13255,8 @@ vst1q_s64_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11552,6 +13272,8 @@ vst1q_s64_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11566,6 +13288,8 @@ vst1_s8_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11581,6 +13305,8 @@ vst1_s8_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11595,6 +13321,8 @@ vst1_s16_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11610,6 +13338,8 @@ vst1_s16_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11624,6 +13354,8 @@ vst1_s32_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11639,6 +13371,8 @@ vst1_s32_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11653,6 +13387,8 @@ vst1_s64_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11668,6 +13404,8 @@ vst1_s64_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11682,6 +13420,8 @@ vst1q_s8_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11697,6 +13437,8 @@ vst1q_s8_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11711,6 +13453,8 @@ vst1q_s16_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11726,6 +13470,8 @@ vst1q_s16_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11740,6 +13486,8 @@ vst1q_s32_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11755,6 +13503,8 @@ vst1q_s32_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -11769,6 +13519,8 @@ vst1q_s64_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -11784,6 +13536,8 @@ vst1q_s64_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11795,6 +13549,8 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11806,6 +13562,8 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11817,6 +13575,8 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11828,6 +13588,8 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11839,6 +13601,8 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11850,6 +13614,8 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11861,6 +13627,8 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11872,6 +13640,8 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11883,6 +13653,8 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11894,6 +13666,8 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11905,6 +13679,8 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11916,6 +13692,8 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11927,6 +13705,8 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11938,6 +13718,8 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11949,6 +13731,8 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11960,6 +13744,8 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11971,6 +13757,8 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11982,6 +13770,8 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -11993,6 +13783,8 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12004,6 +13796,8 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12015,6 +13809,8 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12026,6 +13822,8 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12037,6 +13835,8 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12048,6 +13848,8 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12059,6 +13861,8 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12070,6 +13874,8 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12081,6 +13887,8 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12092,6 +13900,8 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12103,6 +13913,8 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12114,6 +13926,8 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12125,6 +13939,8 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12136,6 +13952,8 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12147,6 +13965,8 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12158,6 +13978,8 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12169,6 +13991,8 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12180,6 +14004,8 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -12191,6 +14017,8 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -12202,6 +14030,8 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -12213,6 +14043,8 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -12224,6 +14056,8 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -12235,6 +14069,8 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -12246,6 +14082,8 @@ pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) {
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12260,6 +14098,8 @@ vst1_f32_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12275,6 +14115,8 @@ vst1_f32_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12289,6 +14131,8 @@ vst1q_f32_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12304,6 +14148,8 @@ vst1q_f32_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12318,6 +14164,8 @@ vst1_f32_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12333,6 +14181,8 @@ vst1_f32_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12347,6 +14197,8 @@ vst1q_f32_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12362,6 +14214,8 @@ vst1q_f32_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12376,6 +14230,8 @@ vst1_f32_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12391,6 +14247,8 @@ vst1_f32_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12405,6 +14263,8 @@ vst1q_f32_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures to one, two, three, or four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12420,6 +14280,8 @@ vst1q_f32_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12434,6 +14296,8 @@ vst2_s8_(a as _, b.0, b.1, 1)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12449,6 +14313,8 @@ vst2_s8_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12463,6 +14329,8 @@ vst2_s16_(a as _, b.0, b.1, 2)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12478,6 +14346,8 @@ vst2_s16_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12492,6 +14362,8 @@ vst2_s32_(a as _, b.0, b.1, 4)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12507,6 +14379,8 @@ vst2_s32_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12521,6 +14395,8 @@ vst2q_s8_(a as _, b.0, b.1, 1)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12536,6 +14412,8 @@ vst2q_s8_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12550,6 +14428,8 @@ vst2q_s16_(a as _, b.0, b.1, 2)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12565,6 +14445,8 @@ vst2q_s16_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12579,6 +14461,8 @@ vst2q_s32_(a as _, b.0, b.1, 4)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12594,6 +14478,8 @@ vst2q_s32_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12608,6 +14494,8 @@ vst2_s64_(a as _, b.0, b.1, 8)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12623,6 +14511,8 @@ vst2_s64_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12634,6 +14524,8 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12645,6 +14537,8 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12656,6 +14550,8 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12667,6 +14563,8 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12678,6 +14576,8 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12689,6 +14589,8 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12700,6 +14602,8 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12711,6 +14615,8 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12722,6 +14628,8 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12733,6 +14641,8 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12744,6 +14654,8 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -12755,6 +14667,8 @@ pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12769,6 +14683,8 @@ vst2_f32_(a as _, b.0, b.1, 4)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12784,6 +14700,8 @@ vst2_f32_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12798,6 +14716,8 @@ vst2q_f32_(a as _, b.0, b.1, 4)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12813,6 +14733,8 @@ vst2q_f32_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12829,6 +14751,8 @@ vst2_lane_s8_(a as _, b.0, b.1, LANE, 1)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12846,6 +14770,8 @@ vst2_lane_s8_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12862,6 +14788,8 @@ vst2_lane_s16_(a as _, b.0, b.1, LANE, 2)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12879,6 +14807,8 @@ vst2_lane_s16_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12895,6 +14825,8 @@ vst2_lane_s32_(a as _, b.0, b.1, LANE, 4)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12912,6 +14844,8 @@ vst2_lane_s32_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12928,6 +14862,8 @@ vst2q_lane_s16_(a as _, b.0, b.1, LANE, 2)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12945,6 +14881,8 @@ vst2q_lane_s16_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -12961,6 +14899,8 @@ vst2q_lane_s32_(a as _, b.0, b.1, LANE, 4)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -12978,6 +14918,8 @@ vst2q_lane_s32_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -12991,6 +14933,8 @@ pub unsafe fn vst2_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13004,6 +14948,8 @@ pub unsafe fn vst2_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13017,6 +14963,8 @@ pub unsafe fn vst2_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13030,6 +14978,8 @@ pub unsafe fn vst2q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13043,6 +14993,8 @@ pub unsafe fn vst2q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13056,6 +15008,8 @@ pub unsafe fn vst2_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13069,6 +15023,8 @@ pub unsafe fn vst2_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13082,6 +15038,8 @@ pub unsafe fn vst2q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x2_t) {
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13098,6 +15056,8 @@ vst2_lane_f32_(a as _, b.0, b.1, LANE, 4)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13115,6 +15075,8 @@ vst2_lane_f32_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13131,6 +15093,8 @@ vst2q_lane_f32_(a as _, b.0, b.1, LANE, 4)
}
/// Store multiple 2-element structures from two registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13148,6 +15112,8 @@ vst2q_lane_f32_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13162,6 +15128,8 @@ vst3_s8_(a as _, b.0, b.1, b.2, 1)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13177,6 +15145,8 @@ vst3_s8_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13191,6 +15161,8 @@ vst3_s16_(a as _, b.0, b.1, b.2, 2)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13206,6 +15178,8 @@ vst3_s16_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13220,6 +15194,8 @@ vst3_s32_(a as _, b.0, b.1, b.2, 4)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13235,6 +15211,8 @@ vst3_s32_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13249,6 +15227,8 @@ vst3q_s8_(a as _, b.0, b.1, b.2, 1)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13264,6 +15244,8 @@ vst3q_s8_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13278,6 +15260,8 @@ vst3q_s16_(a as _, b.0, b.1, b.2, 2)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13293,6 +15277,8 @@ vst3q_s16_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13307,6 +15293,8 @@ vst3q_s32_(a as _, b.0, b.1, b.2, 4)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13322,6 +15310,8 @@ vst3q_s32_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13336,6 +15326,8 @@ vst3_s64_(a as _, b.0, b.1, b.2, 8)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13351,6 +15343,8 @@ vst3_s64_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13362,6 +15356,8 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13373,6 +15369,8 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13384,6 +15382,8 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13395,6 +15395,8 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13406,6 +15408,8 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13417,6 +15421,8 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13428,6 +15434,8 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13439,6 +15447,8 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13450,6 +15460,8 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13461,6 +15473,8 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13472,6 +15486,8 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -13483,6 +15499,8 @@ pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13497,6 +15515,8 @@ vst3_f32_(a as _, b.0, b.1, b.2, 4)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13512,6 +15532,8 @@ vst3_f32_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13526,6 +15548,8 @@ vst3q_f32_(a as _, b.0, b.1, b.2, 4)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13541,6 +15565,8 @@ vst3q_f32_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13557,6 +15583,8 @@ vst3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13574,6 +15602,8 @@ vst3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13590,6 +15620,8 @@ vst3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13607,6 +15639,8 @@ vst3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13623,6 +15657,8 @@ vst3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13640,6 +15676,8 @@ vst3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13656,6 +15694,8 @@ vst3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13673,6 +15713,8 @@ vst3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13689,6 +15731,8 @@ vst3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13706,6 +15750,8 @@ vst3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13719,6 +15765,8 @@ pub unsafe fn vst3_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13732,6 +15780,8 @@ pub unsafe fn vst3_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13745,6 +15795,8 @@ pub unsafe fn vst3_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13758,6 +15810,8 @@ pub unsafe fn vst3q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13771,6 +15825,8 @@ pub unsafe fn vst3q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13784,6 +15840,8 @@ pub unsafe fn vst3_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13797,6 +15855,8 @@ pub unsafe fn vst3_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -13810,6 +15870,8 @@ pub unsafe fn vst3q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x3_t) {
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13826,6 +15888,8 @@ vst3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13843,6 +15907,8 @@ vst3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13859,6 +15925,8 @@ vst3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Store multiple 3-element structures from three registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13876,6 +15944,8 @@ vst3q_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13890,6 +15960,8 @@ vst4_s8_(a as _, b.0, b.1, b.2, b.3, 1)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13905,6 +15977,8 @@ vst4_s8_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13919,6 +15993,8 @@ vst4_s16_(a as _, b.0, b.1, b.2, b.3, 2)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13934,6 +16010,8 @@ vst4_s16_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13948,6 +16026,8 @@ vst4_s32_(a as _, b.0, b.1, b.2, b.3, 4)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13963,6 +16043,8 @@ vst4_s32_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -13977,6 +16059,8 @@ vst4q_s8_(a as _, b.0, b.1, b.2, b.3, 1)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -13992,6 +16076,8 @@ vst4q_s8_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14006,6 +16092,8 @@ vst4q_s16_(a as _, b.0, b.1, b.2, b.3, 2)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14021,6 +16109,8 @@ vst4q_s16_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14035,6 +16125,8 @@ vst4q_s32_(a as _, b.0, b.1, b.2, b.3, 4)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14050,6 +16142,8 @@ vst4q_s32_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14064,6 +16158,8 @@ vst4_s64_(a as _, b.0, b.1, b.2, b.3, 8)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14079,6 +16175,8 @@ vst4_s64_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14090,6 +16188,8 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14101,6 +16201,8 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14112,6 +16214,8 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14123,6 +16227,8 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14134,6 +16240,8 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14145,6 +16253,8 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14156,6 +16266,8 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14167,6 +16279,8 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14178,6 +16292,8 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14189,6 +16305,8 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14200,6 +16318,8 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -14211,6 +16331,8 @@ pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14225,6 +16347,8 @@ vst4_f32_(a as _, b.0, b.1, b.2, b.3, 4)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14240,6 +16364,8 @@ vst4_f32_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14254,6 +16380,8 @@ vst4q_f32_(a as _, b.0, b.1, b.2, b.3, 4)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14269,6 +16397,8 @@ vst4q_f32_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14285,6 +16415,8 @@ vst4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14302,6 +16434,8 @@ vst4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14318,6 +16452,8 @@ vst4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14335,6 +16471,8 @@ vst4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14351,6 +16489,8 @@ vst4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14368,6 +16508,8 @@ vst4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14384,6 +16526,8 @@ vst4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14401,6 +16545,8 @@ vst4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14417,6 +16563,8 @@ vst4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14434,6 +16582,8 @@ vst4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14447,6 +16597,8 @@ pub unsafe fn vst4_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14460,6 +16612,8 @@ pub unsafe fn vst4_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14473,6 +16627,8 @@ pub unsafe fn vst4_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14486,6 +16642,8 @@ pub unsafe fn vst4q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14499,6 +16657,8 @@ pub unsafe fn vst4q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14512,6 +16672,8 @@ pub unsafe fn vst4_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14525,6 +16687,8 @@ pub unsafe fn vst4_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14538,6 +16702,8 @@ pub unsafe fn vst4q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x4_t) {
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14554,6 +16720,8 @@ vst4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14571,6 +16739,8 @@ vst4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -14587,6 +16757,8 @@ vst4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Store multiple 4-element structures from four registers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -14604,6 +16776,8 @@ vst4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14615,6 +16789,8 @@ pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14626,6 +16802,8 @@ pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14637,6 +16815,8 @@ pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14648,6 +16828,8 @@ pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14659,6 +16841,8 @@ pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14670,6 +16854,8 @@ pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14681,6 +16867,8 @@ pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14692,6 +16880,8 @@ pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14703,6 +16893,8 @@ pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14714,6 +16906,8 @@ pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14725,6 +16919,8 @@ pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14736,6 +16932,8 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Polynomial multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14753,6 +16951,8 @@ vmul_p8_(a, b)
}
/// Polynomial multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14770,6 +16970,8 @@ vmulq_p8_(a, b)
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14781,6 +16983,8 @@ pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14792,6 +16996,8 @@ pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14803,6 +17009,8 @@ pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14814,6 +17022,8 @@ pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14825,6 +17035,8 @@ pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14836,6 +17048,8 @@ pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14847,6 +17061,8 @@ pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14858,6 +17074,8 @@ pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14869,6 +17087,8 @@ pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14880,6 +17100,8 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14891,6 +17113,8 @@ pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t {
}
/// Vector multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14902,6 +17126,8 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t {
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14915,6 +17141,8 @@ pub unsafe fn vmul_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int1
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14928,6 +17156,8 @@ pub unsafe fn vmul_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14941,6 +17171,8 @@ pub unsafe fn vmulq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14954,6 +17186,8 @@ pub unsafe fn vmulq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> in
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14967,6 +17201,8 @@ pub unsafe fn vmul_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int3
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14980,6 +17216,8 @@ pub unsafe fn vmul_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -14993,6 +17231,8 @@ pub unsafe fn vmulq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15006,6 +17246,8 @@ pub unsafe fn vmulq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> in
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15019,6 +17261,8 @@ pub unsafe fn vmul_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> ui
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15032,6 +17276,8 @@ pub unsafe fn vmul_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> u
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15045,6 +17291,8 @@ pub unsafe fn vmulq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> u
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15058,6 +17306,8 @@ pub unsafe fn vmulq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) ->
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15071,6 +17321,8 @@ pub unsafe fn vmul_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> ui
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15084,6 +17336,8 @@ pub unsafe fn vmul_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> u
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15097,6 +17351,8 @@ pub unsafe fn vmulq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> u
}
/// Multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15110,6 +17366,8 @@ pub unsafe fn vmulq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) ->
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15123,6 +17381,8 @@ pub unsafe fn vmul_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) ->
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15136,6 +17396,8 @@ pub unsafe fn vmul_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) ->
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15149,6 +17411,8 @@ pub unsafe fn vmulq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) ->
}
/// Floating-point multiply
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15162,6 +17426,8 @@ pub unsafe fn vmulq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -
}
/// Signed multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15179,6 +17445,8 @@ vmull_s8_(a, b)
}
/// Signed multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15196,6 +17464,8 @@ vmull_s16_(a, b)
}
/// Signed multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15213,6 +17483,8 @@ vmull_s32_(a, b)
}
/// Unsigned multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15230,6 +17502,8 @@ vmull_u8_(a, b)
}
/// Unsigned multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15247,6 +17521,8 @@ vmull_u16_(a, b)
}
/// Unsigned multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15264,6 +17540,8 @@ vmull_u32_(a, b)
}
/// Polynomial multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15281,6 +17559,8 @@ vmull_p8_(a, b)
}
/// Vector long multiply with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15292,6 +17572,8 @@ pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t {
}
/// Vector long multiply with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15303,6 +17585,8 @@ pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t {
}
/// Vector long multiply with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15314,6 +17598,8 @@ pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t {
}
/// Vector long multiply with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15325,6 +17611,8 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t {
}
/// Vector long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15338,6 +17626,8 @@ pub unsafe fn vmull_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int
}
/// Vector long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15351,6 +17641,8 @@ pub unsafe fn vmull_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> in
}
/// Vector long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15364,6 +17656,8 @@ pub unsafe fn vmull_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int
}
/// Vector long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15377,6 +17671,8 @@ pub unsafe fn vmull_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> in
}
/// Vector long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15390,6 +17686,8 @@ pub unsafe fn vmull_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> u
}
/// Vector long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15403,6 +17701,8 @@ pub unsafe fn vmull_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) ->
}
/// Vector long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15416,6 +17716,8 @@ pub unsafe fn vmull_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> u
}
/// Vector long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15429,6 +17731,8 @@ pub unsafe fn vmull_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) ->
}
/// Floating-point fused Multiply-Add to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
@@ -15446,6 +17750,8 @@ vfma_f32_(b, c, a)
}
/// Floating-point fused Multiply-Add to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
@@ -15463,6 +17769,8 @@ vfmaq_f32_(b, c, a)
}
/// Floating-point fused Multiply-Add to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
@@ -15474,6 +17782,8 @@ pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t
}
/// Floating-point fused Multiply-Add to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
@@ -15485,6 +17795,8 @@ pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t
}
/// Floating-point fused multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
@@ -15497,6 +17809,8 @@ pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3
}
/// Floating-point fused multiply-subtract from accumulator
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
@@ -15509,6 +17823,8 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float
}
/// Floating-point fused Multiply-subtract to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
@@ -15520,6 +17836,8 @@ pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t
}
/// Floating-point fused Multiply-subtract to accumulator(vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
@@ -15531,6 +17849,8 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15542,6 +17862,8 @@ pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15553,6 +17875,8 @@ pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15564,6 +17888,8 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15575,6 +17901,8 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15586,6 +17914,8 @@ pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15597,6 +17927,8 @@ pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15608,6 +17940,8 @@ pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15619,6 +17953,8 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15630,6 +17966,8 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15641,6 +17979,8 @@ pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15652,6 +17992,8 @@ pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15663,6 +18005,8 @@ pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15674,6 +18018,8 @@ pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15685,6 +18031,8 @@ pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15696,6 +18044,8 @@ pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15707,6 +18057,8 @@ pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15718,6 +18070,8 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
}
/// Subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15729,6 +18083,8 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
}
/// Bitwise exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15740,6 +18096,8 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
}
/// Bitwise exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15751,6 +18109,8 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
}
/// Bitwise exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15762,6 +18122,8 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
}
/// Bitwise exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15773,6 +18135,8 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
}
/// Bitwise exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15784,6 +18148,8 @@ pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
}
/// Bitwise exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15795,6 +18161,8 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
}
/// Bitwise exclusive OR
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15806,6 +18174,8 @@ pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 {
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15818,6 +18188,8 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15830,6 +18202,8 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15842,6 +18216,8 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15854,6 +18230,8 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15866,6 +18244,8 @@ pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15878,6 +18258,8 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15890,6 +18272,8 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15902,6 +18286,8 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15914,6 +18300,8 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15926,6 +18314,8 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15938,6 +18328,8 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui
}
/// Subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15950,6 +18342,8 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15967,6 +18361,8 @@ vhsub_u8_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -15984,6 +18380,8 @@ vhsubq_u8_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16001,6 +18399,8 @@ vhsub_u16_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16018,6 +18418,8 @@ vhsubq_u16_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16035,6 +18437,8 @@ vhsub_u32_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16052,6 +18456,8 @@ vhsubq_u32_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16069,6 +18475,8 @@ vhsub_s8_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16086,6 +18494,8 @@ vhsubq_s8_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16103,6 +18513,8 @@ vhsub_s16_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16120,6 +18532,8 @@ vhsubq_s16_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16137,6 +18551,8 @@ vhsub_s32_(a, b)
}
/// Signed halving subtract
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16154,6 +18570,8 @@ vhsubq_s32_(a, b)
}
/// Signed Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16165,6 +18583,8 @@ pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t {
}
/// Signed Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16176,6 +18596,8 @@ pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t {
}
/// Signed Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16187,6 +18609,8 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t {
}
/// Unsigned Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16198,6 +18622,8 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
}
/// Unsigned Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16209,6 +18635,8 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t {
}
/// Unsigned Subtract Wide
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16220,6 +18648,8 @@ pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t {
}
/// Signed Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16233,6 +18663,8 @@ pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t {
}
/// Signed Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16246,6 +18678,8 @@ pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
}
/// Signed Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16259,6 +18693,8 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
}
/// Unsigned Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16272,6 +18708,8 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t {
}
/// Unsigned Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16285,6 +18723,8 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
}
/// Unsigned Subtract Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16298,6 +18738,8 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16315,6 +18757,8 @@ vmax_s8_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16332,6 +18776,8 @@ vmaxq_s8_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16349,6 +18795,8 @@ vmax_s16_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16366,6 +18814,8 @@ vmaxq_s16_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16383,6 +18833,8 @@ vmax_s32_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16400,6 +18852,8 @@ vmaxq_s32_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16417,6 +18871,8 @@ vmax_u8_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16434,6 +18890,8 @@ vmaxq_u8_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16451,6 +18909,8 @@ vmax_u16_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16468,6 +18928,8 @@ vmaxq_u16_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16485,6 +18947,8 @@ vmax_u32_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16502,6 +18966,8 @@ vmaxq_u32_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16519,6 +18985,8 @@ vmax_f32_(a, b)
}
/// Maximum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16536,6 +19004,8 @@ vmaxq_f32_(a, b)
}
/// Floating-point Maximum Number (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
@@ -16553,6 +19023,8 @@ vmaxnm_f32_(a, b)
}
/// Floating-point Maximum Number (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
@@ -16570,6 +19042,8 @@ vmaxnmq_f32_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16587,6 +19061,8 @@ vmin_s8_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16604,6 +19080,8 @@ vminq_s8_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16621,6 +19099,8 @@ vmin_s16_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16638,6 +19118,8 @@ vminq_s16_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16655,6 +19137,8 @@ vmin_s32_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16672,6 +19156,8 @@ vminq_s32_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16689,6 +19175,8 @@ vmin_u8_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16706,6 +19194,8 @@ vminq_u8_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16723,6 +19213,8 @@ vmin_u16_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16740,6 +19232,8 @@ vminq_u16_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16757,6 +19251,8 @@ vmin_u32_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16774,6 +19270,8 @@ vminq_u32_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16791,6 +19289,8 @@ vmin_f32_(a, b)
}
/// Minimum (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16808,6 +19308,8 @@ vminq_f32_(a, b)
}
/// Floating-point Minimum Number (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
@@ -16825,6 +19327,8 @@ vminnm_f32_(a, b)
}
/// Floating-point Minimum Number (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
@@ -16842,6 +19346,8 @@ vminnmq_f32_(a, b)
}
/// Floating-point add pairwise
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16859,6 +19365,8 @@ vpadd_f32_(a, b)
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16876,6 +19384,8 @@ vqdmull_s16_(a, b)
}
/// Signed saturating doubling multiply long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16893,6 +19403,8 @@ vqdmull_s32_(a, b)
}
/// Vector saturating doubling long multiply with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16904,6 +19416,8 @@ pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t {
}
/// Vector saturating doubling long multiply with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16915,6 +19429,8 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t {
}
/// Vector saturating doubling long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16929,6 +19445,8 @@ pub unsafe fn vqdmull_lane_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int3
}
/// Vector saturating doubling long multiply by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16943,6 +19461,8 @@ pub unsafe fn vqdmull_lane_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int6
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16954,6 +19474,8 @@ pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t
}
/// Signed saturating doubling multiply-add long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16965,6 +19487,8 @@ pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t
}
/// Vector widening saturating doubling multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16976,6 +19500,8 @@ pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t {
}
/// Vector widening saturating doubling multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -16987,6 +19513,8 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
}
/// Vector widening saturating doubling multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17000,6 +19528,8 @@ pub unsafe fn vqdmlal_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int1
}
/// Vector widening saturating doubling multiply accumulate with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17013,6 +19543,8 @@ pub unsafe fn vqdmlal_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int3
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17024,6 +19556,8 @@ pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t
}
/// Signed saturating doubling multiply-subtract long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17035,6 +19569,8 @@ pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t
}
/// Vector widening saturating doubling multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17046,6 +19582,8 @@ pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t {
}
/// Vector widening saturating doubling multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17057,6 +19595,8 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
}
/// Vector widening saturating doubling multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17070,6 +19610,8 @@ pub unsafe fn vqdmlsl_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int1
}
/// Vector widening saturating doubling multiply subtract with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17083,6 +19625,8 @@ pub unsafe fn vqdmlsl_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int3
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17100,6 +19644,8 @@ vqdmulh_s16_(a, b)
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17117,6 +19663,8 @@ vqdmulhq_s16_(a, b)
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17134,6 +19682,8 @@ vqdmulh_s32_(a, b)
}
/// Signed saturating doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17151,6 +19701,8 @@ vqdmulhq_s32_(a, b)
}
/// Vector saturating doubling multiply high with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17163,6 +19715,8 @@ pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t {
}
/// Vector saturating doubling multiply high with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17175,6 +19729,8 @@ pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t {
}
/// Vector saturating doubling multiply high with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17187,6 +19743,8 @@ pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t {
}
/// Vector saturating doubling multiply high with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17199,6 +19757,8 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t {
}
/// Vector saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17212,6 +19772,8 @@ pub unsafe fn vqdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) ->
}
/// Vector saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17225,6 +19787,8 @@ pub unsafe fn vqdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) ->
}
/// Vector saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17238,6 +19802,8 @@ pub unsafe fn vqdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) ->
}
/// Vector saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17251,6 +19817,8 @@ pub unsafe fn vqdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) ->
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17268,6 +19836,8 @@ vqmovn_s16_(a)
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17285,6 +19855,8 @@ vqmovn_s32_(a)
}
/// Signed saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17302,6 +19874,8 @@ vqmovn_s64_(a)
}
/// Unsigned saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17319,6 +19893,8 @@ vqmovn_u16_(a)
}
/// Unsigned saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17336,6 +19912,8 @@ vqmovn_u32_(a)
}
/// Unsigned saturating extract narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17353,6 +19931,8 @@ vqmovn_u64_(a)
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17370,6 +19950,8 @@ vqmovun_s16_(a)
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17387,6 +19969,8 @@ vqmovun_s32_(a)
}
/// Signed saturating extract unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17404,6 +19988,8 @@ vqmovun_s64_(a)
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17421,6 +20007,8 @@ vqrdmulh_s16_(a, b)
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17438,6 +20026,8 @@ vqrdmulhq_s16_(a, b)
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17455,6 +20045,8 @@ vqrdmulh_s32_(a, b)
}
/// Signed saturating rounding doubling multiply returning high half
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17472,6 +20064,8 @@ vqrdmulhq_s32_(a, b)
}
/// Vector saturating rounding doubling multiply high with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17483,6 +20077,8 @@ pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t {
}
/// Vector saturating rounding doubling multiply high with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17494,6 +20090,8 @@ pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t {
}
/// Vector saturating rounding doubling multiply high with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17505,6 +20103,8 @@ pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t {
}
/// Vector saturating rounding doubling multiply high with scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17516,6 +20116,8 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t {
}
/// Vector rounding saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17530,6 +20132,8 @@ pub unsafe fn vqrdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) ->
}
/// Vector rounding saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17544,6 +20148,8 @@ pub unsafe fn vqrdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) ->
}
/// Vector rounding saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17558,6 +20164,8 @@ pub unsafe fn vqrdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) ->
}
/// Vector rounding saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17572,6 +20180,8 @@ pub unsafe fn vqrdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -
}
/// Vector rounding saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17586,6 +20196,8 @@ pub unsafe fn vqrdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) ->
}
/// Vector rounding saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17600,6 +20212,8 @@ pub unsafe fn vqrdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) ->
}
/// Vector rounding saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17614,6 +20228,8 @@ pub unsafe fn vqrdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) ->
}
/// Vector rounding saturating doubling multiply high by scalar
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17628,6 +20244,8 @@ pub unsafe fn vqrdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17645,6 +20263,8 @@ vqrshl_s8_(a, b)
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17662,6 +20282,8 @@ vqrshlq_s8_(a, b)
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17679,6 +20301,8 @@ vqrshl_s16_(a, b)
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17696,6 +20320,8 @@ vqrshlq_s16_(a, b)
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17713,6 +20339,8 @@ vqrshl_s32_(a, b)
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17730,6 +20358,8 @@ vqrshlq_s32_(a, b)
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17747,6 +20377,8 @@ vqrshl_s64_(a, b)
}
/// Signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17764,6 +20396,8 @@ vqrshlq_s64_(a, b)
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17781,6 +20415,8 @@ vqrshl_u8_(a, b)
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17798,6 +20434,8 @@ vqrshlq_u8_(a, b)
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17815,6 +20453,8 @@ vqrshl_u16_(a, b)
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17832,6 +20472,8 @@ vqrshlq_u16_(a, b)
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17849,6 +20491,8 @@ vqrshl_u32_(a, b)
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17866,6 +20510,8 @@ vqrshlq_u32_(a, b)
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17883,6 +20529,8 @@ vqrshl_u64_(a, b)
}
/// Unsigned signed saturating rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -17900,6 +20548,8 @@ vqrshlq_u64_(a, b)
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -17916,6 +20566,8 @@ vqrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -17933,6 +20585,8 @@ vqrshrn_n_s16_(a, N)
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -17949,6 +20603,8 @@ vqrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -17966,6 +20622,8 @@ vqrshrn_n_s32_(a, N)
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -17982,6 +20640,8 @@ vqrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -17999,6 +20659,8 @@ vqrshrn_n_s64_(a, N)
}
/// Unsigned signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18015,6 +20677,8 @@ vqrshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u
}
/// Unsigned signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18032,6 +20696,8 @@ vqrshrn_n_u16_(a, N)
}
/// Unsigned signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18048,6 +20714,8 @@ vqrshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32))
}
/// Unsigned signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18065,6 +20733,8 @@ vqrshrn_n_u32_(a, N)
}
/// Unsigned signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18081,6 +20751,8 @@ vqrshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64))
}
/// Unsigned signed saturating rounded shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18098,6 +20770,8 @@ vqrshrn_n_u64_(a, N)
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18114,6 +20788,8 @@ vqrshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18131,6 +20807,8 @@ vqrshrun_n_s16_(a, N)
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18147,6 +20825,8 @@ vqrshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18164,6 +20844,8 @@ vqrshrun_n_s32_(a, N)
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18180,6 +20862,8 @@ vqrshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Signed saturating rounded shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18197,6 +20881,8 @@ vqrshrun_n_s64_(a, N)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18214,6 +20900,8 @@ vqshl_s8_(a, b)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18231,6 +20919,8 @@ vqshlq_s8_(a, b)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18248,6 +20938,8 @@ vqshl_s16_(a, b)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18265,6 +20957,8 @@ vqshlq_s16_(a, b)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18282,6 +20976,8 @@ vqshl_s32_(a, b)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18299,6 +20995,8 @@ vqshlq_s32_(a, b)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18316,6 +21014,8 @@ vqshl_s64_(a, b)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18333,6 +21033,8 @@ vqshlq_s64_(a, b)
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18350,6 +21052,8 @@ vqshl_u8_(a, b)
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18367,6 +21071,8 @@ vqshlq_u8_(a, b)
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18384,6 +21090,8 @@ vqshl_u16_(a, b)
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18401,6 +21109,8 @@ vqshlq_u16_(a, b)
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18418,6 +21128,8 @@ vqshl_u32_(a, b)
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18435,6 +21147,8 @@ vqshlq_u32_(a, b)
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18452,6 +21166,8 @@ vqshl_u64_(a, b)
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18469,6 +21185,8 @@ vqshlq_u64_(a, b)
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18482,6 +21200,8 @@ pub unsafe fn vqshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18495,6 +21215,8 @@ pub unsafe fn vqshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18508,6 +21230,8 @@ pub unsafe fn vqshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18521,6 +21245,8 @@ pub unsafe fn vqshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18534,6 +21260,8 @@ pub unsafe fn vqshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18547,6 +21275,8 @@ pub unsafe fn vqshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18560,6 +21290,8 @@ pub unsafe fn vqshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
}
/// Signed saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18573,6 +21305,8 @@ pub unsafe fn vqshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18586,6 +21320,8 @@ pub unsafe fn vqshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18599,6 +21335,8 @@ pub unsafe fn vqshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18612,6 +21350,8 @@ pub unsafe fn vqshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18625,6 +21365,8 @@ pub unsafe fn vqshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18638,6 +21380,8 @@ pub unsafe fn vqshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18651,6 +21395,8 @@ pub unsafe fn vqshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18664,6 +21410,8 @@ pub unsafe fn vqshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
}
/// Unsigned saturating shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -18677,6 +21425,8 @@ pub unsafe fn vqshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18693,6 +21443,8 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18710,6 +21462,8 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18726,6 +21480,8 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18743,6 +21499,8 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18759,6 +21517,8 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18776,6 +21536,8 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18792,6 +21554,8 @@ vqshlu_n_s64_(a, int64x1_t(N as i64))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18809,6 +21573,8 @@ vqshlu_n_s64_(a, int64x1_t(N as i64))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18825,6 +21591,8 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8,
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18842,6 +21610,8 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8,
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18858,6 +21628,8 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18875,6 +21647,8 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18891,6 +21665,8 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18908,6 +21684,8 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18924,6 +21702,8 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64))
}
/// Signed saturating shift left unsigned
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18941,6 +21721,8 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64))
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18957,6 +21739,8 @@ vqshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -18974,6 +21758,8 @@ vqshrn_n_s16_(a, N)
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -18990,6 +21776,8 @@ vqshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -19007,6 +21795,8 @@ vqshrn_n_s32_(a, N)
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -19023,6 +21813,8 @@ vqshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Signed saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -19040,6 +21832,8 @@ vqshrn_n_s64_(a, N)
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -19056,6 +21850,8 @@ vqshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u1
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -19073,6 +21869,8 @@ vqshrn_n_u16_(a, N)
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -19089,6 +21887,8 @@ vqshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32))
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -19106,6 +21906,8 @@ vqshrn_n_u32_(a, N)
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -19122,6 +21924,8 @@ vqshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64))
}
/// Unsigned saturating shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -19139,6 +21943,8 @@ vqshrn_n_u64_(a, N)
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -19155,6 +21961,8 @@ vqshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -19172,6 +21980,8 @@ vqshrun_n_s16_(a, N)
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -19188,6 +21998,8 @@ vqshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -19205,6 +22017,8 @@ vqshrun_n_s32_(a, N)
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -19221,6 +22035,8 @@ vqshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Signed saturating shift right unsigned narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -19238,6 +22054,8 @@ vqshrun_n_s64_(a, N)
}
/// Reciprocal square-root estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19255,6 +22073,8 @@ vrsqrte_f32_(a)
}
/// Reciprocal square-root estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19272,6 +22092,8 @@ vrsqrteq_f32_(a)
}
/// Unsigned reciprocal square root estimate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19289,6 +22111,8 @@ vrsqrte_u32_(a)
}
/// Unsigned reciprocal square root estimate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19306,6 +22130,8 @@ vrsqrteq_u32_(a)
}
/// Floating-point reciprocal square root step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19323,6 +22149,8 @@ vrsqrts_f32_(a, b)
}
/// Floating-point reciprocal square root step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19340,6 +22168,8 @@ vrsqrtsq_f32_(a, b)
}
/// Reciprocal estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19357,6 +22187,8 @@ vrecpe_f32_(a)
}
/// Reciprocal estimate.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19374,6 +22206,8 @@ vrecpeq_f32_(a)
}
/// Unsigned reciprocal estimate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19391,6 +22225,8 @@ vrecpe_u32_(a)
}
/// Unsigned reciprocal estimate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19408,6 +22244,8 @@ vrecpeq_u32_(a)
}
/// Floating-point reciprocal step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19425,6 +22263,8 @@ vrecps_f32_(a, b)
}
/// Floating-point reciprocal step
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19442,6 +22282,8 @@ vrecpsq_f32_(a, b)
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19453,6 +22295,8 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19464,6 +22308,8 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19475,6 +22321,8 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19486,6 +22334,8 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19497,6 +22347,8 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19508,6 +22360,8 @@ pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19519,6 +22373,8 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19530,6 +22386,8 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19541,6 +22399,8 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19552,6 +22412,8 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19563,6 +22425,8 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19574,6 +22438,8 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19585,6 +22451,8 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19596,6 +22464,8 @@ pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19607,6 +22477,8 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19618,6 +22490,8 @@ pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19629,6 +22503,8 @@ pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19640,6 +22516,8 @@ pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19651,6 +22529,8 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19662,6 +22542,8 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19673,6 +22555,8 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19684,6 +22568,8 @@ pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19695,6 +22581,8 @@ pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19706,6 +22594,8 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19717,6 +22607,8 @@ pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19728,6 +22620,8 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19739,6 +22633,8 @@ pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19750,6 +22646,8 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19761,6 +22659,8 @@ pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19772,6 +22672,8 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19783,6 +22685,8 @@ pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19794,6 +22698,8 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19805,6 +22711,8 @@ pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19816,6 +22724,8 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19827,6 +22737,8 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19838,6 +22750,8 @@ pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19849,6 +22763,8 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19860,6 +22776,8 @@ pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19871,6 +22789,8 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19882,6 +22802,8 @@ pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19893,6 +22815,8 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19904,6 +22828,8 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19915,6 +22841,8 @@ pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19926,6 +22854,8 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19937,6 +22867,8 @@ pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19948,6 +22880,8 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19959,6 +22893,8 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19970,6 +22906,8 @@ pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19981,6 +22919,8 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -19992,6 +22932,8 @@ pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20003,6 +22945,8 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20014,6 +22958,8 @@ pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20025,6 +22971,8 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20036,6 +22984,8 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20047,6 +22997,8 @@ pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20058,6 +23010,8 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20069,6 +23023,8 @@ pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20080,6 +23036,8 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20091,6 +23049,8 @@ pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20102,6 +23062,8 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20113,6 +23075,8 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20124,6 +23088,8 @@ pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20135,6 +23101,8 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20146,6 +23114,8 @@ pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20157,6 +23127,8 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20168,6 +23140,8 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20179,6 +23153,8 @@ pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20190,6 +23166,8 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20201,6 +23179,8 @@ pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20212,6 +23192,8 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20223,6 +23205,8 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20234,6 +23218,8 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20245,6 +23231,8 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20256,6 +23244,8 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20267,6 +23257,8 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20278,6 +23270,8 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20289,6 +23283,8 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20300,6 +23296,8 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20311,6 +23309,8 @@ pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20322,6 +23322,8 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20333,6 +23335,8 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20344,6 +23348,8 @@ pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20355,6 +23361,8 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20366,6 +23374,8 @@ pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20377,6 +23387,8 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20388,6 +23400,8 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20399,6 +23413,8 @@ pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20410,6 +23426,8 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20421,6 +23439,8 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20432,6 +23452,8 @@ pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20443,6 +23465,8 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20454,6 +23478,8 @@ pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20465,6 +23491,8 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20476,6 +23504,8 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20487,6 +23517,8 @@ pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20498,6 +23530,8 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20509,6 +23543,8 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20520,6 +23556,8 @@ pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20531,6 +23569,8 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20542,6 +23582,8 @@ pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20553,6 +23595,8 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20564,6 +23608,8 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20575,6 +23621,8 @@ pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20586,6 +23634,8 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20597,6 +23647,8 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20608,6 +23660,8 @@ pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20619,6 +23673,8 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20630,6 +23686,8 @@ pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20641,6 +23699,8 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20652,6 +23712,8 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20663,6 +23725,8 @@ pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20674,6 +23738,8 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20685,6 +23751,8 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20696,6 +23764,8 @@ pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20707,6 +23777,8 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20718,6 +23790,8 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20729,6 +23803,8 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20740,6 +23816,8 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20751,6 +23829,8 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20762,6 +23842,8 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20773,6 +23855,8 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -20784,6 +23868,8 @@ pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20795,6 +23881,8 @@ pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20806,6 +23894,8 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20817,6 +23907,8 @@ pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20828,6 +23920,8 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20839,6 +23933,8 @@ pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20850,6 +23946,8 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20861,6 +23959,8 @@ pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20872,6 +23972,8 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20883,6 +23985,8 @@ pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20894,6 +23998,8 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20905,6 +24011,8 @@ pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20916,6 +24024,8 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20927,6 +24037,8 @@ pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20938,6 +24050,8 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20949,6 +24063,8 @@ pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20960,6 +24076,8 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20971,6 +24089,8 @@ pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20982,6 +24102,8 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -20993,6 +24115,8 @@ pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21004,6 +24128,8 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21015,6 +24141,8 @@ pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21026,6 +24154,8 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21037,6 +24167,8 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21048,6 +24180,8 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21059,6 +24193,8 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21070,6 +24206,8 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21081,6 +24219,8 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21092,6 +24232,8 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21103,6 +24245,8 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21114,6 +24258,8 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21125,6 +24271,8 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21136,6 +24284,8 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21147,6 +24297,8 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21158,6 +24310,8 @@ pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21169,6 +24323,8 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21180,6 +24336,8 @@ pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21191,6 +24349,8 @@ pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21202,6 +24362,8 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21213,6 +24375,8 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21224,6 +24388,8 @@ pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21235,6 +24401,8 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21246,6 +24414,8 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21257,6 +24427,8 @@ pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21268,6 +24440,8 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21279,6 +24453,8 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21290,6 +24466,8 @@ pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21301,6 +24479,8 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21312,6 +24492,8 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21323,6 +24505,8 @@ pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21334,6 +24518,8 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21345,6 +24531,8 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21356,6 +24544,8 @@ pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21367,6 +24557,8 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21378,6 +24570,8 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21389,6 +24583,8 @@ pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21400,6 +24596,8 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21411,6 +24609,8 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21422,6 +24622,8 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21433,6 +24635,8 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21444,6 +24648,8 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21455,6 +24661,8 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21466,6 +24674,8 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21477,6 +24687,8 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21488,6 +24700,8 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21499,6 +24713,8 @@ pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21510,6 +24726,8 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21521,6 +24739,8 @@ pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21532,6 +24752,8 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21543,6 +24765,8 @@ pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21554,6 +24778,8 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21565,6 +24791,8 @@ pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21576,6 +24804,8 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21587,6 +24817,8 @@ pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21598,6 +24830,8 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21609,6 +24843,8 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21620,6 +24856,8 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21631,6 +24869,8 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21642,6 +24882,8 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21653,6 +24895,8 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21664,6 +24908,8 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21675,6 +24921,8 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21686,6 +24934,8 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21697,6 +24947,8 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21708,6 +24960,8 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21719,6 +24973,8 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21730,6 +24986,8 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21741,6 +24999,8 @@ pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21752,6 +25012,8 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21763,6 +25025,8 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21774,6 +25038,8 @@ pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21785,6 +25051,8 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21796,6 +25064,8 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21807,6 +25077,8 @@ pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21818,6 +25090,8 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21829,6 +25103,8 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21840,6 +25116,8 @@ pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -21851,6 +25129,8 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21862,6 +25142,8 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21873,6 +25155,8 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21884,6 +25168,8 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21895,6 +25181,8 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21906,6 +25194,8 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21917,6 +25207,8 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21928,6 +25220,8 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21939,6 +25233,8 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21950,6 +25246,8 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21961,6 +25259,8 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21972,6 +25272,8 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21983,6 +25285,8 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -21994,6 +25298,8 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -22005,6 +25311,8 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -22016,6 +25324,8 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22027,6 +25337,8 @@ pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22038,6 +25350,8 @@ pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22049,6 +25363,8 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22060,6 +25376,8 @@ pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22071,6 +25389,8 @@ pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22082,6 +25402,8 @@ pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22093,6 +25415,8 @@ pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22104,6 +25428,8 @@ pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22115,6 +25441,8 @@ pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22126,6 +25454,8 @@ pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22137,6 +25467,8 @@ pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22148,6 +25480,8 @@ pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22159,6 +25493,8 @@ pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22170,6 +25506,8 @@ pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22181,6 +25519,8 @@ pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22192,6 +25532,8 @@ pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22203,6 +25545,8 @@ pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22214,6 +25558,8 @@ pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22225,6 +25571,8 @@ pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22236,6 +25584,8 @@ pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22247,6 +25597,8 @@ pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22258,6 +25610,8 @@ pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22269,6 +25623,8 @@ pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22280,6 +25636,8 @@ pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22291,6 +25649,8 @@ pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22302,6 +25662,8 @@ pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22313,6 +25675,8 @@ pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22324,6 +25688,8 @@ pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22335,6 +25701,8 @@ pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22346,6 +25714,8 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22357,6 +25727,8 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22368,6 +25740,8 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22379,6 +25753,8 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22390,6 +25766,8 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22401,6 +25779,8 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22412,6 +25792,8 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22423,6 +25805,8 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22434,6 +25818,8 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22445,6 +25831,8 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22456,6 +25844,8 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22467,6 +25857,8 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t {
}
/// Vector reinterpret cast operation
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22478,6 +25870,8 @@ pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t {
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22495,6 +25889,8 @@ vrshl_s8_(a, b)
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22512,6 +25908,8 @@ vrshlq_s8_(a, b)
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22529,6 +25927,8 @@ vrshl_s16_(a, b)
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22546,6 +25946,8 @@ vrshlq_s16_(a, b)
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22563,6 +25965,8 @@ vrshl_s32_(a, b)
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22580,6 +25984,8 @@ vrshlq_s32_(a, b)
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22597,6 +26003,8 @@ vrshl_s64_(a, b)
}
/// Signed rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22614,6 +26022,8 @@ vrshlq_s64_(a, b)
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22631,6 +26041,8 @@ vrshl_u8_(a, b)
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22648,6 +26060,8 @@ vrshlq_u8_(a, b)
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22665,6 +26079,8 @@ vrshl_u16_(a, b)
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22682,6 +26098,8 @@ vrshlq_u16_(a, b)
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22699,6 +26117,8 @@ vrshl_u32_(a, b)
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22716,6 +26136,8 @@ vrshlq_u32_(a, b)
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22733,6 +26155,8 @@ vrshl_u64_(a, b)
}
/// Unsigned rounding shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22750,6 +26174,8 @@ vrshlq_u64_(a, b)
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22763,6 +26189,8 @@ pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22776,6 +26204,8 @@ pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22789,6 +26219,8 @@ pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22802,6 +26234,8 @@ pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22815,6 +26249,8 @@ pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22828,6 +26264,8 @@ pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22841,6 +26279,8 @@ pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
}
/// Signed rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22854,6 +26294,8 @@ pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22867,6 +26309,8 @@ pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22880,6 +26324,8 @@ pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22893,6 +26339,8 @@ pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22906,6 +26354,8 @@ pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22919,6 +26369,8 @@ pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22932,6 +26384,8 @@ pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22945,6 +26399,8 @@ pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
}
/// Unsigned rounding shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -22958,6 +26414,8 @@ pub unsafe fn vrshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -22974,6 +26432,8 @@ vrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -22991,6 +26451,8 @@ vrshrn_n_s16_(a, N)
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -23007,6 +26469,8 @@ vrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -23024,6 +26488,8 @@ vrshrn_n_s32_(a, N)
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
@@ -23040,6 +26506,8 @@ vrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
@@ -23057,6 +26525,8 @@ vrshrn_n_s64_(a, N)
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23070,6 +26540,8 @@ pub unsafe fn vrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23083,6 +26555,8 @@ pub unsafe fn vrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
}
/// Rounding shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23096,6 +26570,8 @@ pub unsafe fn vrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
}
/// Signed rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23109,6 +26585,8 @@ pub unsafe fn vrsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Signed rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23122,6 +26600,8 @@ pub unsafe fn vrsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t
}
/// Signed rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23135,6 +26615,8 @@ pub unsafe fn vrsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t
}
/// Signed rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23148,6 +26630,8 @@ pub unsafe fn vrsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_
}
/// Signed rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23161,6 +26645,8 @@ pub unsafe fn vrsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t
}
/// Signed rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23174,6 +26660,8 @@ pub unsafe fn vrsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_
}
/// Signed rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23187,6 +26675,8 @@ pub unsafe fn vrsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t
}
/// Signed rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23200,6 +26690,8 @@ pub unsafe fn vrsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_
}
/// Unsigned rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23213,6 +26705,8 @@ pub unsafe fn vrsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t
}
/// Unsigned rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23226,6 +26720,8 @@ pub unsafe fn vrsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x1
}
/// Unsigned rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23239,6 +26735,8 @@ pub unsafe fn vrsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x
}
/// Unsigned rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23252,6 +26750,8 @@ pub unsafe fn vrsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16
}
/// Unsigned rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23265,6 +26765,8 @@ pub unsafe fn vrsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x
}
/// Unsigned rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23278,6 +26780,8 @@ pub unsafe fn vrsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32
}
/// Unsigned rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23291,6 +26795,8 @@ pub unsafe fn vrsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x
}
/// Unsigned rounding shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23304,6 +26810,8 @@ pub unsafe fn vrsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23321,6 +26829,8 @@ vrsubhn_s16_(a, b)
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23338,6 +26848,8 @@ vrsubhn_s32_(a, b)
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23355,6 +26867,8 @@ vrsubhn_s64_(a, b)
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23366,6 +26880,8 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23377,6 +26893,8 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
}
/// Rounding subtract returning high narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23388,6 +26906,8 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23401,6 +26921,8 @@ pub unsafe fn vset_lane_s8<const LANE: i32>(a: i8, b: int8x8_t) -> int8x8_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23414,6 +26936,8 @@ pub unsafe fn vset_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> int16x4_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23427,6 +26951,8 @@ pub unsafe fn vset_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> int32x2_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23440,6 +26966,8 @@ pub unsafe fn vset_lane_s64<const LANE: i32>(a: i64, b: int64x1_t) -> int64x1_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23453,6 +26981,8 @@ pub unsafe fn vset_lane_u8<const LANE: i32>(a: u8, b: uint8x8_t) -> uint8x8_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23466,6 +26996,8 @@ pub unsafe fn vset_lane_u16<const LANE: i32>(a: u16, b: uint16x4_t) -> uint16x4_
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23479,6 +27011,8 @@ pub unsafe fn vset_lane_u32<const LANE: i32>(a: u32, b: uint32x2_t) -> uint32x2_
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23492,6 +27026,8 @@ pub unsafe fn vset_lane_u64<const LANE: i32>(a: u64, b: uint64x1_t) -> uint64x1_
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23505,6 +27041,8 @@ pub unsafe fn vset_lane_p8<const LANE: i32>(a: p8, b: poly8x8_t) -> poly8x8_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23518,6 +27056,8 @@ pub unsafe fn vset_lane_p16<const LANE: i32>(a: p16, b: poly16x4_t) -> poly16x4_
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -23531,6 +27071,8 @@ pub unsafe fn vset_lane_p64<const LANE: i32>(a: p64, b: poly64x1_t) -> poly64x1_
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23544,6 +27086,8 @@ pub unsafe fn vsetq_lane_s8<const LANE: i32>(a: i8, b: int8x16_t) -> int8x16_t {
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23557,6 +27101,8 @@ pub unsafe fn vsetq_lane_s16<const LANE: i32>(a: i16, b: int16x8_t) -> int16x8_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23570,6 +27116,8 @@ pub unsafe fn vsetq_lane_s32<const LANE: i32>(a: i32, b: int32x4_t) -> int32x4_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23583,6 +27131,8 @@ pub unsafe fn vsetq_lane_s64<const LANE: i32>(a: i64, b: int64x2_t) -> int64x2_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23596,6 +27146,8 @@ pub unsafe fn vsetq_lane_u8<const LANE: i32>(a: u8, b: uint8x16_t) -> uint8x16_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23609,6 +27161,8 @@ pub unsafe fn vsetq_lane_u16<const LANE: i32>(a: u16, b: uint16x8_t) -> uint16x8
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23622,6 +27176,8 @@ pub unsafe fn vsetq_lane_u32<const LANE: i32>(a: u32, b: uint32x4_t) -> uint32x4
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23635,6 +27191,8 @@ pub unsafe fn vsetq_lane_u64<const LANE: i32>(a: u64, b: uint64x2_t) -> uint64x2
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23648,6 +27206,8 @@ pub unsafe fn vsetq_lane_p8<const LANE: i32>(a: p8, b: poly8x16_t) -> poly8x16_t
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23661,6 +27221,8 @@ pub unsafe fn vsetq_lane_p16<const LANE: i32>(a: p16, b: poly16x8_t) -> poly16x8
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
@@ -23674,6 +27236,8 @@ pub unsafe fn vsetq_lane_p64<const LANE: i32>(a: p64, b: poly64x2_t) -> poly64x2
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23687,6 +27251,8 @@ pub unsafe fn vset_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> float32x
}
/// Insert vector element from another vector element
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23700,6 +27266,8 @@ pub unsafe fn vsetq_lane_f32<const LANE: i32>(a: f32, b: float32x4_t) -> float32
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23717,6 +27285,8 @@ vshl_s8_(a, b)
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23734,6 +27304,8 @@ vshlq_s8_(a, b)
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23751,6 +27323,8 @@ vshl_s16_(a, b)
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23768,6 +27342,8 @@ vshlq_s16_(a, b)
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23785,6 +27361,8 @@ vshl_s32_(a, b)
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23802,6 +27380,8 @@ vshlq_s32_(a, b)
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23819,6 +27399,8 @@ vshl_s64_(a, b)
}
/// Signed Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23836,6 +27418,8 @@ vshlq_s64_(a, b)
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23853,6 +27437,8 @@ vshl_u8_(a, b)
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23870,6 +27456,8 @@ vshlq_u8_(a, b)
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23887,6 +27475,8 @@ vshl_u16_(a, b)
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23904,6 +27494,8 @@ vshlq_u16_(a, b)
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23921,6 +27513,8 @@ vshl_u32_(a, b)
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23938,6 +27532,8 @@ vshlq_u32_(a, b)
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23955,6 +27551,8 @@ vshl_u64_(a, b)
}
/// Unsigned Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23972,6 +27570,8 @@ vshlq_u64_(a, b)
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23985,6 +27585,8 @@ pub unsafe fn vshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -23998,6 +27600,8 @@ pub unsafe fn vshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24011,6 +27615,8 @@ pub unsafe fn vshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24024,6 +27630,8 @@ pub unsafe fn vshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24037,6 +27645,8 @@ pub unsafe fn vshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24050,6 +27660,8 @@ pub unsafe fn vshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24063,6 +27675,8 @@ pub unsafe fn vshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24076,6 +27690,8 @@ pub unsafe fn vshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24089,6 +27705,8 @@ pub unsafe fn vshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24102,6 +27720,8 @@ pub unsafe fn vshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24115,6 +27735,8 @@ pub unsafe fn vshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24128,6 +27750,8 @@ pub unsafe fn vshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24141,6 +27765,8 @@ pub unsafe fn vshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24154,6 +27780,8 @@ pub unsafe fn vshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24167,6 +27795,8 @@ pub unsafe fn vshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
}
/// Shift left
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24180,6 +27810,8 @@ pub unsafe fn vshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24193,6 +27825,8 @@ pub unsafe fn vshll_n_s8<const N: i32>(a: int8x8_t) -> int16x8_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24206,6 +27840,8 @@ pub unsafe fn vshll_n_s16<const N: i32>(a: int16x4_t) -> int32x4_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24219,6 +27855,8 @@ pub unsafe fn vshll_n_s32<const N: i32>(a: int32x2_t) -> int64x2_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24232,6 +27870,8 @@ pub unsafe fn vshll_n_u8<const N: i32>(a: uint8x8_t) -> uint16x8_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24245,6 +27885,8 @@ pub unsafe fn vshll_n_u16<const N: i32>(a: uint16x4_t) -> uint32x4_t {
}
/// Signed shift left long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24258,6 +27900,8 @@ pub unsafe fn vshll_n_u32<const N: i32>(a: uint32x2_t) -> uint64x2_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24272,6 +27916,8 @@ pub unsafe fn vshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24286,6 +27932,8 @@ pub unsafe fn vshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24300,6 +27948,8 @@ pub unsafe fn vshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24314,6 +27964,8 @@ pub unsafe fn vshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24328,6 +27980,8 @@ pub unsafe fn vshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24342,6 +27996,8 @@ pub unsafe fn vshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24356,6 +28012,8 @@ pub unsafe fn vshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24370,6 +28028,8 @@ pub unsafe fn vshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24384,6 +28044,8 @@ pub unsafe fn vshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24398,6 +28060,8 @@ pub unsafe fn vshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24412,6 +28076,8 @@ pub unsafe fn vshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24426,6 +28092,8 @@ pub unsafe fn vshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24440,6 +28108,8 @@ pub unsafe fn vshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24454,6 +28124,8 @@ pub unsafe fn vshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24468,6 +28140,8 @@ pub unsafe fn vshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
}
/// Shift right
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24482,6 +28156,8 @@ pub unsafe fn vshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24495,6 +28171,8 @@ pub unsafe fn vshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24508,6 +28186,8 @@ pub unsafe fn vshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24521,6 +28201,8 @@ pub unsafe fn vshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24534,6 +28216,8 @@ pub unsafe fn vshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24547,6 +28231,8 @@ pub unsafe fn vshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
}
/// Shift right narrow
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24560,6 +28246,8 @@ pub unsafe fn vshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
}
/// Signed shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24573,6 +28261,8 @@ pub unsafe fn vsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
}
/// Signed shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24586,6 +28276,8 @@ pub unsafe fn vsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t
}
/// Signed shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24599,6 +28291,8 @@ pub unsafe fn vsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t
}
/// Signed shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24612,6 +28306,8 @@ pub unsafe fn vsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t
}
/// Signed shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24625,6 +28321,8 @@ pub unsafe fn vsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t
}
/// Signed shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24638,6 +28336,8 @@ pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t
}
/// Signed shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24651,6 +28351,8 @@ pub unsafe fn vsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t
}
/// Signed shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24664,6 +28366,8 @@ pub unsafe fn vsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t
}
/// Unsigned shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24677,6 +28381,8 @@ pub unsafe fn vsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
}
/// Unsigned shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24690,6 +28396,8 @@ pub unsafe fn vsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16
}
/// Unsigned shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24703,6 +28411,8 @@ pub unsafe fn vsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4
}
/// Unsigned shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24716,6 +28426,8 @@ pub unsafe fn vsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x
}
/// Unsigned shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24729,6 +28441,8 @@ pub unsafe fn vsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2
}
/// Unsigned shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24742,6 +28456,8 @@ pub unsafe fn vsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x
}
/// Unsigned shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24755,6 +28471,8 @@ pub unsafe fn vsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1
}
/// Unsigned shift right and accumulate
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24768,6 +28486,8 @@ pub unsafe fn vsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24781,6 +28501,8 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24794,6 +28516,8 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24807,6 +28531,8 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24820,6 +28546,8 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24833,6 +28561,8 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24846,6 +28576,8 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24859,6 +28591,8 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24872,6 +28606,8 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24885,6 +28621,8 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24898,6 +28636,8 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24911,6 +28651,8 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24924,6 +28666,8 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24937,6 +28681,8 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24950,6 +28696,8 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24963,6 +28711,8 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24976,6 +28726,8 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -24989,6 +28741,8 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
}
/// Transpose elements
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25002,6 +28756,8 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25015,6 +28771,8 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25028,6 +28786,8 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25041,6 +28801,8 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25054,6 +28816,8 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25067,6 +28831,8 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25080,6 +28846,8 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25093,6 +28861,8 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25106,6 +28876,8 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25119,6 +28891,8 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25132,6 +28906,8 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25145,6 +28921,8 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25158,6 +28936,8 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25171,6 +28951,8 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25184,6 +28966,8 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25197,6 +28981,8 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25210,6 +28996,8 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25223,6 +29011,8 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
}
/// Zip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25236,6 +29026,8 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25249,6 +29041,8 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25262,6 +29056,8 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25275,6 +29071,8 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25288,6 +29086,8 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25301,6 +29101,8 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25314,6 +29116,8 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25327,6 +29131,8 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25340,6 +29146,8 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25353,6 +29161,8 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25366,6 +29176,8 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25379,6 +29191,8 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25392,6 +29206,8 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25405,6 +29221,8 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25418,6 +29236,8 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25431,6 +29251,8 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25444,6 +29266,8 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25457,6 +29281,8 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
}
/// Unzip vectors
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25470,6 +29296,8 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
}
/// Unsigned Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25482,6 +29310,8 @@ pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t
}
/// Unsigned Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25494,6 +29324,8 @@ pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4
}
/// Unsigned Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25506,6 +29338,8 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2
}
/// Signed Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25519,6 +29353,8 @@ pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t {
}
/// Signed Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25532,6 +29368,8 @@ pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
}
/// Signed Absolute difference and Accumulate Long
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25545,6 +29383,8 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
}
/// Singned saturating Absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25562,6 +29402,8 @@ vqabs_s8_(a)
}
/// Singned saturating Absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25579,6 +29421,8 @@ vqabsq_s8_(a)
}
/// Singned saturating Absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25596,6 +29440,8 @@ vqabs_s16_(a)
}
/// Singned saturating Absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25613,6 +29459,8 @@ vqabsq_s16_(a)
}
/// Singned saturating Absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -25630,6 +29478,8 @@ vqabs_s32_(a)
}
/// Singned saturating Absolute value
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -28096,181 +31946,181 @@ mod test {
#[simd_test(enable = "neon")]
unsafe fn test_vext_s8() {
- let a: i8x8 = i8x8::new(0, 8, 8, 9, 8, 9, 9, 11);
- let b: i8x8 = i8x8::new(9, 11, 14, 15, 16, 17, 18, 19);
- let e: i8x8 = i8x8::new(8, 9, 9, 11, 9, 11, 14, 15);
- let r: i8x8 = transmute(vext_s8::<4>(transmute(a), transmute(b)));
+ let a: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
+ let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
+ let e: i8x8 = i8x8::new(1, 2, 2, 2, 2, 2, 2, 2);
+ let r: i8x8 = transmute(vext_s8::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_s8() {
- let a: i8x16 = i8x16::new(0, 8, 8, 9, 8, 9, 9, 11, 8, 9, 9, 11, 9, 11, 14, 15);
- let b: i8x16 = i8x16::new(9, 11, 14, 15, 16, 17, 18, 19, 0, 8, 8, 9, 8, 9, 9, 11);
- let e: i8x16 = i8x16::new(8, 9, 9, 11, 9, 11, 14, 15, 9, 11, 14, 15, 16, 17, 18, 19);
- let r: i8x16 = transmute(vextq_s8::<8>(transmute(a), transmute(b)));
+ let a: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
+ let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
+ let e: i8x16 = i8x16::new(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
+ let r: i8x16 = transmute(vextq_s8::<15>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_s16() {
- let a: i16x4 = i16x4::new(0, 8, 8, 9);
- let b: i16x4 = i16x4::new(9, 11, 14, 15);
- let e: i16x4 = i16x4::new(8, 9, 9, 11);
- let r: i16x4 = transmute(vext_s16::<2>(transmute(a), transmute(b)));
+ let a: i16x4 = i16x4::new(1, 1, 1, 1);
+ let b: i16x4 = i16x4::new(2, 2, 2, 2);
+ let e: i16x4 = i16x4::new(1, 2, 2, 2);
+ let r: i16x4 = transmute(vext_s16::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_s16() {
- let a: i16x8 = i16x8::new(0, 8, 8, 9, 8, 9, 9, 11);
- let b: i16x8 = i16x8::new(9, 11, 14, 15, 16, 17, 18, 19);
- let e: i16x8 = i16x8::new(8, 9, 9, 11, 9, 11, 14, 15);
- let r: i16x8 = transmute(vextq_s16::<4>(transmute(a), transmute(b)));
+ let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
+ let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
+ let e: i16x8 = i16x8::new(1, 2, 2, 2, 2, 2, 2, 2);
+ let r: i16x8 = transmute(vextq_s16::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_s32() {
- let a: i32x2 = i32x2::new(0, 8);
- let b: i32x2 = i32x2::new(9, 11);
- let e: i32x2 = i32x2::new(8, 9);
+ let a: i32x2 = i32x2::new(1, 1);
+ let b: i32x2 = i32x2::new(2, 2);
+ let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vext_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_s32() {
- let a: i32x4 = i32x4::new(0, 8, 8, 9);
- let b: i32x4 = i32x4::new(9, 11, 14, 15);
- let e: i32x4 = i32x4::new(8, 9, 9, 11);
- let r: i32x4 = transmute(vextq_s32::<2>(transmute(a), transmute(b)));
+ let a: i32x4 = i32x4::new(1, 1, 1, 1);
+ let b: i32x4 = i32x4::new(2, 2, 2, 2);
+ let e: i32x4 = i32x4::new(1, 2, 2, 2);
+ let r: i32x4 = transmute(vextq_s32::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_u8() {
- let a: u8x8 = u8x8::new(0, 8, 8, 9, 8, 9, 9, 11);
- let b: u8x8 = u8x8::new(9, 11, 14, 15, 16, 17, 18, 19);
- let e: u8x8 = u8x8::new(8, 9, 9, 11, 9, 11, 14, 15);
- let r: u8x8 = transmute(vext_u8::<4>(transmute(a), transmute(b)));
+ let a: u8x8 = u8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
+ let b: u8x8 = u8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
+ let e: u8x8 = u8x8::new(1, 2, 2, 2, 2, 2, 2, 2);
+ let r: u8x8 = transmute(vext_u8::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_u8() {
- let a: u8x16 = u8x16::new(0, 8, 8, 9, 8, 9, 9, 11, 8, 9, 9, 11, 9, 11, 14, 15);
- let b: u8x16 = u8x16::new(9, 11, 14, 15, 16, 17, 18, 19, 0, 8, 8, 9, 8, 9, 9, 11);
- let e: u8x16 = u8x16::new(8, 9, 9, 11, 9, 11, 14, 15, 9, 11, 14, 15, 16, 17, 18, 19);
- let r: u8x16 = transmute(vextq_u8::<8>(transmute(a), transmute(b)));
+ let a: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
+ let b: u8x16 = u8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
+ let e: u8x16 = u8x16::new(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
+ let r: u8x16 = transmute(vextq_u8::<15>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_u16() {
- let a: u16x4 = u16x4::new(0, 8, 8, 9);
- let b: u16x4 = u16x4::new(9, 11, 14, 15);
- let e: u16x4 = u16x4::new(8, 9, 9, 11);
- let r: u16x4 = transmute(vext_u16::<2>(transmute(a), transmute(b)));
+ let a: u16x4 = u16x4::new(1, 1, 1, 1);
+ let b: u16x4 = u16x4::new(2, 2, 2, 2);
+ let e: u16x4 = u16x4::new(1, 2, 2, 2);
+ let r: u16x4 = transmute(vext_u16::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_u16() {
- let a: u16x8 = u16x8::new(0, 8, 8, 9, 8, 9, 9, 11);
- let b: u16x8 = u16x8::new(9, 11, 14, 15, 16, 17, 18, 19);
- let e: u16x8 = u16x8::new(8, 9, 9, 11, 9, 11, 14, 15);
- let r: u16x8 = transmute(vextq_u16::<4>(transmute(a), transmute(b)));
+ let a: u16x8 = u16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
+ let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
+ let e: u16x8 = u16x8::new(1, 2, 2, 2, 2, 2, 2, 2);
+ let r: u16x8 = transmute(vextq_u16::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_u32() {
- let a: u32x2 = u32x2::new(0, 8);
- let b: u32x2 = u32x2::new(9, 11);
- let e: u32x2 = u32x2::new(8, 9);
+ let a: u32x2 = u32x2::new(1, 1);
+ let b: u32x2 = u32x2::new(2, 2);
+ let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vext_u32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_u32() {
- let a: u32x4 = u32x4::new(0, 8, 8, 9);
- let b: u32x4 = u32x4::new(9, 11, 14, 15);
- let e: u32x4 = u32x4::new(8, 9, 9, 11);
- let r: u32x4 = transmute(vextq_u32::<2>(transmute(a), transmute(b)));
+ let a: u32x4 = u32x4::new(1, 1, 1, 1);
+ let b: u32x4 = u32x4::new(2, 2, 2, 2);
+ let e: u32x4 = u32x4::new(1, 2, 2, 2);
+ let r: u32x4 = transmute(vextq_u32::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_p8() {
- let a: i8x8 = i8x8::new(0, 8, 8, 9, 8, 9, 9, 11);
- let b: i8x8 = i8x8::new(9, 11, 14, 15, 16, 17, 18, 19);
- let e: i8x8 = i8x8::new(8, 9, 9, 11, 9, 11, 14, 15);
- let r: i8x8 = transmute(vext_p8::<4>(transmute(a), transmute(b)));
+ let a: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
+ let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
+ let e: i8x8 = i8x8::new(1, 2, 2, 2, 2, 2, 2, 2);
+ let r: i8x8 = transmute(vext_p8::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_p8() {
- let a: i8x16 = i8x16::new(0, 8, 8, 9, 8, 9, 9, 11, 8, 9, 9, 11, 9, 11, 14, 15);
- let b: i8x16 = i8x16::new(9, 11, 14, 15, 16, 17, 18, 19, 0, 8, 8, 9, 8, 9, 9, 11);
- let e: i8x16 = i8x16::new(8, 9, 9, 11, 9, 11, 14, 15, 9, 11, 14, 15, 16, 17, 18, 19);
- let r: i8x16 = transmute(vextq_p8::<8>(transmute(a), transmute(b)));
+ let a: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
+ let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
+ let e: i8x16 = i8x16::new(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
+ let r: i8x16 = transmute(vextq_p8::<15>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_p16() {
- let a: i16x4 = i16x4::new(0, 8, 8, 9);
- let b: i16x4 = i16x4::new(9, 11, 14, 15);
- let e: i16x4 = i16x4::new(8, 9, 9, 11);
- let r: i16x4 = transmute(vext_p16::<2>(transmute(a), transmute(b)));
+ let a: i16x4 = i16x4::new(1, 1, 1, 1);
+ let b: i16x4 = i16x4::new(2, 2, 2, 2);
+ let e: i16x4 = i16x4::new(1, 2, 2, 2);
+ let r: i16x4 = transmute(vext_p16::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_p16() {
- let a: i16x8 = i16x8::new(0, 8, 8, 9, 8, 9, 9, 11);
- let b: i16x8 = i16x8::new(9, 11, 14, 15, 16, 17, 18, 19);
- let e: i16x8 = i16x8::new(8, 9, 9, 11, 9, 11, 14, 15);
- let r: i16x8 = transmute(vextq_p16::<4>(transmute(a), transmute(b)));
+ let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
+ let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
+ let e: i16x8 = i16x8::new(1, 2, 2, 2, 2, 2, 2, 2);
+ let r: i16x8 = transmute(vextq_p16::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_s64() {
- let a: i64x2 = i64x2::new(0, 8);
- let b: i64x2 = i64x2::new(9, 11);
- let e: i64x2 = i64x2::new(8, 9);
+ let a: i64x2 = i64x2::new(1, 1);
+ let b: i64x2 = i64x2::new(2, 2);
+ let e: i64x2 = i64x2::new(1, 2);
let r: i64x2 = transmute(vextq_s64::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_u64() {
- let a: u64x2 = u64x2::new(0, 8);
- let b: u64x2 = u64x2::new(9, 11);
- let e: u64x2 = u64x2::new(8, 9);
+ let a: u64x2 = u64x2::new(1, 1);
+ let b: u64x2 = u64x2::new(2, 2);
+ let e: u64x2 = u64x2::new(1, 2);
let r: u64x2 = transmute(vextq_u64::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_f32() {
- let a: f32x2 = f32x2::new(0., 2.);
- let b: f32x2 = f32x2::new(3., 4.);
- let e: f32x2 = f32x2::new(2., 3.);
+ let a: f32x2 = f32x2::new(1., 1.);
+ let b: f32x2 = f32x2::new(2., 2.);
+ let e: f32x2 = f32x2::new(1., 2.);
let r: f32x2 = transmute(vext_f32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_f32() {
- let a: f32x4 = f32x4::new(0., 2., 2., 3.);
- let b: f32x4 = f32x4::new(3., 4., 5., 6.);
- let e: f32x4 = f32x4::new(2., 3., 3., 4.);
- let r: f32x4 = transmute(vextq_f32::<2>(transmute(a), transmute(b)));
+ let a: f32x4 = f32x4::new(1., 1., 1., 1.);
+ let b: f32x4 = f32x4::new(2., 2., 2., 2.);
+ let e: f32x4 = f32x4::new(1., 2., 2., 2.);
+ let r: f32x4 = transmute(vextq_f32::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
index 952d1ca2e..0559aea83 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
@@ -106,6 +106,7 @@ types! {
}
/// ARM-specific type containing two `int8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -113,6 +114,7 @@ types! {
)]
pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t);
/// ARM-specific type containing three `int8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -120,6 +122,7 @@ pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t);
)]
pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t);
/// ARM-specific type containing four `int8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -128,6 +131,7 @@ pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t);
pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t);
/// ARM-specific type containing two `int8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -135,6 +139,7 @@ pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t);
)]
pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t);
/// ARM-specific type containing three `int8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -142,6 +147,7 @@ pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t);
)]
pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t);
/// ARM-specific type containing four `int8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -150,6 +156,7 @@ pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t);
pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_t);
/// ARM-specific type containing two `uint8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -157,6 +164,7 @@ pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_
)]
pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t);
/// ARM-specific type containing three `uint8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -164,6 +172,7 @@ pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t);
)]
pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t);
/// ARM-specific type containing four `uint8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -172,6 +181,7 @@ pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t);
pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_t);
/// ARM-specific type containing two `uint8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -179,6 +189,7 @@ pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_
)]
pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t);
/// ARM-specific type containing three `uint8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -186,6 +197,7 @@ pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t);
)]
pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t);
/// ARM-specific type containing four `uint8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -199,6 +211,7 @@ pub struct uint8x16x4_t(
);
/// ARM-specific type containing two `poly8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -206,6 +219,7 @@ pub struct uint8x16x4_t(
)]
pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t);
/// ARM-specific type containing three `poly8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -213,6 +227,7 @@ pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t);
)]
pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t);
/// ARM-specific type containing four `poly8x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -221,6 +236,7 @@ pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t);
pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_t);
/// ARM-specific type containing two `poly8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -228,6 +244,7 @@ pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_
)]
pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t);
/// ARM-specific type containing three `poly8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -235,6 +252,7 @@ pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t);
)]
pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t);
/// ARM-specific type containing four `poly8x16_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -248,6 +266,7 @@ pub struct poly8x16x4_t(
);
/// ARM-specific type containing two `int16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -255,6 +274,7 @@ pub struct poly8x16x4_t(
)]
pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t);
/// ARM-specific type containing three `int16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -262,6 +282,7 @@ pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t);
)]
pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t);
/// ARM-specific type containing four `int16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -270,6 +291,7 @@ pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t);
pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_t);
/// ARM-specific type containing two `int16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -277,6 +299,7 @@ pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_
)]
pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t);
/// ARM-specific type containing three `int16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -284,6 +307,7 @@ pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t);
)]
pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t);
/// ARM-specific type containing four `int16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -292,6 +316,7 @@ pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t);
pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_t);
/// ARM-specific type containing two `uint16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -299,6 +324,7 @@ pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_
)]
pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t);
/// ARM-specific type containing three `uint16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -306,6 +332,7 @@ pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t);
)]
pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t);
/// ARM-specific type containing four `uint16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -319,6 +346,7 @@ pub struct uint16x4x4_t(
);
/// ARM-specific type containing two `uint16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -326,6 +354,7 @@ pub struct uint16x4x4_t(
)]
pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t);
/// ARM-specific type containing three `uint16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -333,6 +362,7 @@ pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t);
)]
pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t);
/// ARM-specific type containing four `uint16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -346,6 +376,7 @@ pub struct uint16x8x4_t(
);
/// ARM-specific type containing two `poly16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -353,6 +384,7 @@ pub struct uint16x8x4_t(
)]
pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t);
/// ARM-specific type containing three `poly16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -360,6 +392,7 @@ pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t);
)]
pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t);
/// ARM-specific type containing four `poly16x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -373,6 +406,7 @@ pub struct poly16x4x4_t(
);
/// ARM-specific type containing two `poly16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -380,6 +414,7 @@ pub struct poly16x4x4_t(
)]
pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t);
/// ARM-specific type containing three `poly16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -387,6 +422,7 @@ pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t);
)]
pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t);
/// ARM-specific type containing four `poly16x8_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -400,6 +436,7 @@ pub struct poly16x8x4_t(
);
/// ARM-specific type containing two `int32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -407,6 +444,7 @@ pub struct poly16x8x4_t(
)]
pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t);
/// ARM-specific type containing three `int32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -414,6 +452,7 @@ pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t);
)]
pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t);
/// ARM-specific type containing four `int32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -422,6 +461,7 @@ pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t);
pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_t);
/// ARM-specific type containing two `int32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -429,6 +469,7 @@ pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_
)]
pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t);
/// ARM-specific type containing three `int32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -436,6 +477,7 @@ pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t);
)]
pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t);
/// ARM-specific type containing four `int32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -444,6 +486,7 @@ pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t);
pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_t);
/// ARM-specific type containing two `uint32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -451,6 +494,7 @@ pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_
)]
pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t);
/// ARM-specific type containing three `uint32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -458,6 +502,7 @@ pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t);
)]
pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t);
/// ARM-specific type containing four `uint32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -471,6 +516,7 @@ pub struct uint32x2x4_t(
);
/// ARM-specific type containing two `uint32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -478,6 +524,7 @@ pub struct uint32x2x4_t(
)]
pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t);
/// ARM-specific type containing three `uint32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -485,6 +532,7 @@ pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t);
)]
pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t);
/// ARM-specific type containing four `uint32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -498,6 +546,7 @@ pub struct uint32x4x4_t(
);
/// ARM-specific type containing two `float32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -505,6 +554,7 @@ pub struct uint32x4x4_t(
)]
pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t);
/// ARM-specific type containing three `float32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -512,6 +562,7 @@ pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t);
)]
pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t);
/// ARM-specific type containing four `float32x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -525,6 +576,7 @@ pub struct float32x2x4_t(
);
/// ARM-specific type containing two `float32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -532,6 +584,7 @@ pub struct float32x2x4_t(
)]
pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t);
/// ARM-specific type containing three `float32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -539,6 +592,7 @@ pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t);
)]
pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t);
/// ARM-specific type containing four `float32x4_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -552,6 +606,7 @@ pub struct float32x4x4_t(
);
/// ARM-specific type containing four `int64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -559,6 +614,7 @@ pub struct float32x4x4_t(
)]
pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t);
/// ARM-specific type containing four `int64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -566,6 +622,7 @@ pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t);
)]
pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t);
/// ARM-specific type containing four `int64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -574,6 +631,7 @@ pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t);
pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_t);
/// ARM-specific type containing four `int64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -581,6 +639,7 @@ pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_
)]
pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t);
/// ARM-specific type containing four `int64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -588,6 +647,7 @@ pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t);
)]
pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t);
/// ARM-specific type containing four `int64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -596,6 +656,7 @@ pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t);
pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_t);
/// ARM-specific type containing four `uint64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -603,6 +664,7 @@ pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_
)]
pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t);
/// ARM-specific type containing four `uint64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -610,6 +672,7 @@ pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t);
)]
pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t);
/// ARM-specific type containing four `uint64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -623,6 +686,7 @@ pub struct uint64x1x4_t(
);
/// ARM-specific type containing four `uint64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -630,6 +694,7 @@ pub struct uint64x1x4_t(
)]
pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t);
/// ARM-specific type containing four `uint64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -637,6 +702,7 @@ pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t);
)]
pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t);
/// ARM-specific type containing four `uint64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -650,6 +716,7 @@ pub struct uint64x2x4_t(
);
/// ARM-specific type containing four `poly64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -657,6 +724,7 @@ pub struct uint64x2x4_t(
)]
pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t);
/// ARM-specific type containing four `poly64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -664,6 +732,7 @@ pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t);
)]
pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t);
/// ARM-specific type containing four `poly64x1_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -677,6 +746,7 @@ pub struct poly64x1x4_t(
);
/// ARM-specific type containing four `poly64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -684,6 +754,7 @@ pub struct poly64x1x4_t(
)]
pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t);
/// ARM-specific type containing four `poly64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -691,6 +762,7 @@ pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t);
)]
pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t);
/// ARM-specific type containing four `poly64x2_t` vectors.
+#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
target_arch = "aarch64",
@@ -1212,6 +1284,8 @@ pub unsafe fn vld1q_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x8_t)
}
/// Load one single-element structure to one lane of one register.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1228,6 +1302,8 @@ pub unsafe fn vld1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -
}
/// Load one single-element structure to one lane of one register.
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1603,6 +1679,8 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t {
}
/// Load one single-element structure and Replicate to all lanes (of one register).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -1624,6 +1702,8 @@ pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t {
}
/// Load one single-element structure and Replicate to all lanes (of one register).
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
@@ -3733,7 +3813,11 @@ pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
- simd_select(transmute::<_, int8x8_t>(a), b, c)
+ let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3747,7 +3831,11 @@ pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
- simd_select(transmute::<_, int16x4_t>(a), b, c)
+ let not = int16x4_t(-1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3761,7 +3849,11 @@ pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
- simd_select(transmute::<_, int32x2_t>(a), b, c)
+ let not = int32x2_t(-1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3775,7 +3867,11 @@ pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t {
- simd_select(transmute::<_, int64x1_t>(a), b, c)
+ let not = int64x1_t(-1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3789,7 +3885,11 @@ pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
- simd_select(transmute::<_, int8x8_t>(a), b, c)
+ let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3803,7 +3903,11 @@ pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
- simd_select(transmute::<_, int16x4_t>(a), b, c)
+ let not = int16x4_t(-1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3817,7 +3921,11 @@ pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
- simd_select(transmute::<_, int32x2_t>(a), b, c)
+ let not = int32x2_t(-1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3831,7 +3939,11 @@ pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t {
- simd_select(transmute::<_, int64x1_t>(a), b, c)
+ let not = int64x1_t(-1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3845,7 +3957,11 @@ pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- simd_select(transmute::<_, int32x2_t>(a), b, c)
+ let not = int32x2_t(-1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3859,7 +3975,11 @@ pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t {
- simd_select(transmute::<_, int8x8_t>(a), b, c)
+ let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select.
@@ -3873,7 +3993,11 @@ pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t {
- simd_select(transmute::<_, int16x4_t>(a), b, c)
+ let not = int16x4_t(-1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3887,7 +4011,13 @@ pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
- simd_select(transmute::<_, int8x16_t>(a), b, c)
+ let not = int8x16_t(
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ );
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3901,7 +4031,11 @@ pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
- simd_select(transmute::<_, int16x8_t>(a), b, c)
+ let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3915,7 +4049,11 @@ pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
- simd_select(transmute::<_, int32x4_t>(a), b, c)
+ let not = int32x4_t(-1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3929,7 +4067,11 @@ pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
- simd_select(transmute::<_, int64x2_t>(a), b, c)
+ let not = int64x2_t(-1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3943,7 +4085,13 @@ pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
- simd_select(transmute::<_, int8x16_t>(a), b, c)
+ let not = int8x16_t(
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ );
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3957,7 +4105,11 @@ pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
- simd_select(transmute::<_, int16x8_t>(a), b, c)
+ let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3971,7 +4123,11 @@ pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
- simd_select(transmute::<_, int32x4_t>(a), b, c)
+ let not = int32x4_t(-1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3985,7 +4141,11 @@ pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
- simd_select(transmute::<_, int64x2_t>(a), b, c)
+ let not = int64x2_t(-1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -3999,7 +4159,13 @@ pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t {
- simd_select(transmute::<_, int8x16_t>(a), b, c)
+ let not = int8x16_t(
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ );
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -4013,7 +4179,11 @@ pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t {
- simd_select(transmute::<_, int16x8_t>(a), b, c)
+ let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Bitwise Select. (128-bit)
@@ -4027,7 +4197,11 @@ pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- simd_select(transmute::<_, int32x4_t>(a), b, c)
+ let not = int32x4_t(-1, -1, -1, -1);
+ transmute(simd_or(
+ simd_and(a, transmute(b)),
+ simd_and(simd_xor(a, transmute(not)), transmute(c)),
+ ))
}
/// Vector bitwise inclusive OR NOT
@@ -9206,7 +9380,7 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_s8() {
- let a = u8x8::new(u8::MAX, 0, u8::MAX, 0, u8::MAX, 0, u8::MAX, 0);
+ let a = u8x8::new(u8::MAX, 1, u8::MAX, 2, u8::MAX, 0, u8::MAX, 0);
let b = i8x8::new(
i8::MAX,
i8::MAX,
@@ -9229,9 +9403,9 @@ mod tests {
);
let e = i8x8::new(
i8::MAX,
- i8::MIN,
+ i8::MIN | 1,
i8::MAX,
- i8::MIN,
+ i8::MIN | 2,
i8::MAX,
i8::MIN,
i8::MAX,
@@ -9242,34 +9416,34 @@ mod tests {
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_s16() {
- let a = u16x4::new(u16::MAX, 0, u16::MAX, 0);
+ let a = u16x4::new(u16::MAX, 0, 1, 2);
let b = i16x4::new(i16::MAX, i16::MAX, i16::MAX, i16::MAX);
let c = i16x4::new(i16::MIN, i16::MIN, i16::MIN, i16::MIN);
- let e = i16x4::new(i16::MAX, i16::MIN, i16::MAX, i16::MIN);
+ let e = i16x4::new(i16::MAX, i16::MIN, i16::MIN | 1, i16::MIN | 2);
let r: i16x4 = transmute(vbsl_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_s32() {
- let a = u32x2::new(u32::MAX, u32::MIN);
+ let a = u32x2::new(u32::MAX, 1);
let b = i32x2::new(i32::MAX, i32::MAX);
let c = i32x2::new(i32::MIN, i32::MIN);
- let e = i32x2::new(i32::MAX, i32::MIN);
+ let e = i32x2::new(i32::MAX, i32::MIN | 1);
let r: i32x2 = transmute(vbsl_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_s64() {
- let a = u64x1::new(u64::MAX);
+ let a = u64x1::new(1);
let b = i64x1::new(i64::MAX);
let c = i64x1::new(i64::MIN);
- let e = i64x1::new(i64::MAX);
+ let e = i64x1::new(i64::MIN | 1);
let r: i64x1 = transmute(vbsl_s64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_u8() {
- let a = u8x8::new(u8::MAX, 0, u8::MAX, 0, u8::MAX, 0, u8::MAX, 0);
+ let a = u8x8::new(u8::MAX, 1, u8::MAX, 2, u8::MAX, 0, u8::MAX, 0);
let b = u8x8::new(
u8::MAX,
u8::MAX,
@@ -9290,58 +9464,49 @@ mod tests {
u8::MIN,
u8::MIN,
);
- let e = u8x8::new(
- u8::MAX,
- u8::MIN,
- u8::MAX,
- u8::MIN,
- u8::MAX,
- u8::MIN,
- u8::MAX,
- u8::MIN,
- );
+ let e = u8x8::new(u8::MAX, 1, u8::MAX, 2, u8::MAX, u8::MIN, u8::MAX, u8::MIN);
let r: u8x8 = transmute(vbsl_u8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_u16() {
- let a = u16x4::new(u16::MAX, 0, u16::MAX, 0);
+ let a = u16x4::new(u16::MAX, 0, 1, 2);
let b = u16x4::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX);
let c = u16x4::new(u16::MIN, u16::MIN, u16::MIN, u16::MIN);
- let e = u16x4::new(u16::MAX, u16::MIN, u16::MAX, u16::MIN);
+ let e = u16x4::new(u16::MAX, 0, 1, 2);
let r: u16x4 = transmute(vbsl_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_u32() {
- let a = u32x2::new(u32::MAX, 0);
+ let a = u32x2::new(u32::MAX, 2);
let b = u32x2::new(u32::MAX, u32::MAX);
let c = u32x2::new(u32::MIN, u32::MIN);
- let e = u32x2::new(u32::MAX, u32::MIN);
+ let e = u32x2::new(u32::MAX, 2);
let r: u32x2 = transmute(vbsl_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_u64() {
- let a = u64x1::new(u64::MAX);
+ let a = u64x1::new(2);
let b = u64x1::new(u64::MAX);
let c = u64x1::new(u64::MIN);
- let e = u64x1::new(u64::MAX);
+ let e = u64x1::new(2);
let r: u64x1 = transmute(vbsl_u64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_f32() {
- let a = u32x2::new(u32::MAX, 0);
- let b = f32x2::new(f32::MAX, f32::MAX);
- let c = f32x2::new(f32::MIN, f32::MIN);
- let e = f32x2::new(f32::MAX, f32::MIN);
+ let a = u32x2::new(1, 0x80000000);
+ let b = f32x2::new(8388609f32, -1.23f32);
+ let c = f32x2::new(2097152f32, 2.34f32);
+ let e = f32x2::new(2097152.25f32, -2.34f32);
let r: f32x2 = transmute(vbsl_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_p8() {
- let a = u8x8::new(u8::MAX, 0, u8::MAX, 0, u8::MAX, 0, u8::MAX, 0);
+ let a = u8x8::new(u8::MAX, 1, u8::MAX, 2, u8::MAX, 0, u8::MAX, 0);
let b = u8x8::new(
u8::MAX,
u8::MAX,
@@ -9362,25 +9527,16 @@ mod tests {
u8::MIN,
u8::MIN,
);
- let e = u8x8::new(
- u8::MAX,
- u8::MIN,
- u8::MAX,
- u8::MIN,
- u8::MAX,
- u8::MIN,
- u8::MAX,
- u8::MIN,
- );
+ let e = u8x8::new(u8::MAX, 1, u8::MAX, 2, u8::MAX, u8::MIN, u8::MAX, u8::MIN);
let r: u8x8 = transmute(vbsl_p8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbsl_p16() {
- let a = u16x4::new(u16::MAX, 0, u16::MAX, 0);
+ let a = u16x4::new(u16::MAX, 0, 1, 2);
let b = u16x4::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX);
let c = u16x4::new(u16::MIN, u16::MIN, u16::MIN, u16::MIN);
- let e = u16x4::new(u16::MAX, u16::MIN, u16::MAX, u16::MIN);
+ let e = u16x4::new(u16::MAX, 0, 1, 2);
let r: u16x4 = transmute(vbsl_p16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
@@ -9388,9 +9544,9 @@ mod tests {
unsafe fn test_vbslq_s8() {
let a = u8x16::new(
u8::MAX,
- 0,
+ 1,
u8::MAX,
- 0,
+ 2,
u8::MAX,
0,
u8::MAX,
@@ -9442,9 +9598,9 @@ mod tests {
);
let e = i8x16::new(
i8::MAX,
- i8::MIN,
+ i8::MIN | 1,
i8::MAX,
- i8::MIN,
+ i8::MIN | 2,
i8::MAX,
i8::MIN,
i8::MAX,
@@ -9463,7 +9619,7 @@ mod tests {
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_s16() {
- let a = u16x8::new(u16::MAX, 0, u16::MAX, 0, u16::MAX, 0, u16::MAX, 0);
+ let a = u16x8::new(u16::MAX, 1, u16::MAX, 2, u16::MAX, 0, u16::MAX, 0);
let b = i16x8::new(
i16::MAX,
i16::MAX,
@@ -9486,9 +9642,9 @@ mod tests {
);
let e = i16x8::new(
i16::MAX,
- i16::MIN,
+ i16::MIN | 1,
i16::MAX,
- i16::MIN,
+ i16::MIN | 2,
i16::MAX,
i16::MIN,
i16::MAX,
@@ -9499,19 +9655,19 @@ mod tests {
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_s32() {
- let a = u32x4::new(u32::MAX, 0, u32::MAX, 0);
+ let a = u32x4::new(u32::MAX, 1, u32::MAX, 2);
let b = i32x4::new(i32::MAX, i32::MAX, i32::MAX, i32::MAX);
let c = i32x4::new(i32::MIN, i32::MIN, i32::MIN, i32::MIN);
- let e = i32x4::new(i32::MAX, i32::MIN, i32::MAX, i32::MIN);
+ let e = i32x4::new(i32::MAX, i32::MIN | 1, i32::MAX, i32::MIN | 2);
let r: i32x4 = transmute(vbslq_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_s64() {
- let a = u64x2::new(u64::MAX, 0);
+ let a = u64x2::new(u64::MAX, 1);
let b = i64x2::new(i64::MAX, i64::MAX);
let c = i64x2::new(i64::MIN, i64::MIN);
- let e = i64x2::new(i64::MAX, i64::MIN);
+ let e = i64x2::new(i64::MAX, i64::MIN | 1);
let r: i64x2 = transmute(vbslq_s64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
@@ -9519,9 +9675,9 @@ mod tests {
unsafe fn test_vbslq_u8() {
let a = u8x16::new(
u8::MAX,
- 0,
+ 1,
u8::MAX,
- 0,
+ 2,
u8::MAX,
0,
u8::MAX,
@@ -9573,9 +9729,9 @@ mod tests {
);
let e = u8x16::new(
u8::MAX,
- u8::MIN,
+ 1,
u8::MAX,
- u8::MIN,
+ 2,
u8::MAX,
u8::MIN,
u8::MAX,
@@ -9594,7 +9750,7 @@ mod tests {
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_u16() {
- let a = u16x8::new(u16::MAX, 0, u16::MAX, 0, u16::MAX, 0, u16::MAX, 0);
+ let a = u16x8::new(u16::MAX, 1, u16::MAX, 2, u16::MAX, 0, u16::MAX, 0);
let b = u16x8::new(
u16::MAX,
u16::MAX,
@@ -9617,9 +9773,9 @@ mod tests {
);
let e = u16x8::new(
u16::MAX,
- u16::MIN,
+ 1,
u16::MAX,
- u16::MIN,
+ 2,
u16::MAX,
u16::MIN,
u16::MAX,
@@ -9630,28 +9786,28 @@ mod tests {
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_u32() {
- let a = u32x4::new(u32::MAX, 0, u32::MAX, 0);
+ let a = u32x4::new(u32::MAX, 1, u32::MAX, 2);
let b = u32x4::new(u32::MAX, u32::MAX, u32::MAX, u32::MAX);
let c = u32x4::new(u32::MIN, u32::MIN, u32::MIN, u32::MIN);
- let e = u32x4::new(u32::MAX, u32::MIN, u32::MAX, u32::MIN);
+ let e = u32x4::new(u32::MAX, 1, u32::MAX, 2);
let r: u32x4 = transmute(vbslq_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_u64() {
- let a = u64x2::new(u64::MAX, 0);
+ let a = u64x2::new(u64::MAX, 1);
let b = u64x2::new(u64::MAX, u64::MAX);
let c = u64x2::new(u64::MIN, u64::MIN);
- let e = u64x2::new(u64::MAX, u64::MIN);
+ let e = u64x2::new(u64::MAX, 1);
let r: u64x2 = transmute(vbslq_u64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_f32() {
- let a = u32x4::new(u32::MAX, 0, u32::MAX, 0);
- let b = f32x4::new(f32::MAX, f32::MAX, f32::MAX, f32::MAX);
- let c = f32x4::new(f32::MIN, f32::MIN, f32::MIN, f32::MIN);
- let e = f32x4::new(f32::MAX, f32::MIN, f32::MAX, f32::MIN);
+ let a = u32x4::new(u32::MAX, 0, 1, 0x80000000);
+ let b = f32x4::new(-1.23f32, -1.23f32, 8388609f32, -1.23f32);
+ let c = f32x4::new(2.34f32, 2.34f32, 2097152f32, 2.34f32);
+ let e = f32x4::new(-1.23f32, 2.34f32, 2097152.25f32, -2.34f32);
let r: f32x4 = transmute(vbslq_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
@@ -9659,9 +9815,9 @@ mod tests {
unsafe fn test_vbslq_p8() {
let a = u8x16::new(
u8::MAX,
- 0,
+ 1,
u8::MAX,
- 0,
+ 2,
u8::MAX,
0,
u8::MAX,
@@ -9713,9 +9869,9 @@ mod tests {
);
let e = u8x16::new(
u8::MAX,
- u8::MIN,
+ 1,
u8::MAX,
- u8::MIN,
+ 2,
u8::MAX,
u8::MIN,
u8::MAX,
@@ -9734,7 +9890,7 @@ mod tests {
}
#[simd_test(enable = "neon")]
unsafe fn test_vbslq_p16() {
- let a = u16x8::new(u16::MAX, 0, u16::MAX, 0, u16::MAX, 0, u16::MAX, 0);
+ let a = u16x8::new(u16::MAX, 1, u16::MAX, 2, u16::MAX, 0, u16::MAX, 0);
let b = u16x8::new(
u16::MAX,
u16::MAX,
@@ -9757,9 +9913,9 @@ mod tests {
);
let e = u16x8::new(
u16::MAX,
- u16::MIN,
+ 1,
u16::MAX,
- u16::MIN,
+ 2,
u16::MAX,
u16::MIN,
u16::MAX,
@@ -12305,30 +12461,30 @@ mod tests {
}
#[simd_test(enable = "neon,i8mm")]
unsafe fn test_vmmlaq_s32() {
- let a: i32x4 = i32x4::new(1, 3, 4, 9);
- let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16);
- let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16);
- let e: i32x4 = i32x4::new(1, 2, 3, 4);
+ let a = i32x4::new(1, 3, 4, -0x10000);
+ let b = i8x16::new(1, 21, 31, 14, 5, 6, -128, 8, 9, 13, 15, 12, 13, -1, 20, 16);
+ let c = i8x16::new(12, 22, 3, 4, -1, 56, 7, 8, 91, 10, -128, 15, 13, 14, 17, 16);
+ let e = i32x4::new(123, -5353, 690, -65576);
let r: i32x4 = transmute(vmmlaq_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,i8mm")]
unsafe fn test_vmmlaq_u32() {
- let a: u32x4 = u32x4::new(1, 3, 4, 9);
- let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16);
- let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16);
- let e: u32x4 = u32x4::new(1, 2, 3, 4);
+ let a = u32x4::new(1, 3, 4, 0xffff0000);
+ let b = u8x16::new(1, 21, 31, 14, 5, 6, 128, 8, 9, 13, 15, 12, 13, 255, 20, 16);
+ let c = u8x16::new(12, 22, 3, 4, 255, 56, 7, 8, 91, 10, 128, 15, 13, 14, 17, 16);
+ let e = u32x4::new(3195, 6935, 18354, 4294909144);
let r: u32x4 = transmute(vmmlaq_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,i8mm")]
unsafe fn test_vusmmlaq_s32() {
- let a: i32x4 = i32x4::new(1, 3, 4, 9);
- let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16);
- let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16);
- let e: i32x4 = i32x4::new(1, 2, 3, 4);
+ let a = i32x4::new(1, 3, 4, -0x10000);
+ let b = u8x16::new(1, 21, 31, 14, 5, 6, 128, 8, 9, 13, 15, 12, 13, 255, 20, 16);
+ let c = i8x16::new(12, 22, 3, 4, -1, 56, 7, 8, 91, 10, -128, 15, 13, 14, 17, 16);
+ let e = i32x4::new(1915, -1001, 15026, -61992);
let r: i32x4 = transmute(vusmmlaq_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs
index 9240d0e84..5a9727a0a 100644
--- a/library/stdarch/crates/core_arch/src/lib.rs
+++ b/library/stdarch/crates/core_arch/src/lib.rs
@@ -19,6 +19,7 @@
doc_cfg,
tbm_target_feature,
sse4a_target_feature,
+ riscv_target_feature,
arm_target_feature,
cmpxchg16b_target_feature,
avx512_target_feature,
@@ -30,8 +31,8 @@
f16c_target_feature,
allow_internal_unstable,
decl_macro,
- bench_black_box,
- asm_const
+ asm_const,
+ target_feature_11
)]
#![cfg_attr(test, feature(test, abi_vectorcall))]
#![deny(clippy::missing_inline_in_public_items)]
diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs
index 1e6a3f405..1c917c52b 100644
--- a/library/stdarch/crates/core_arch/src/macros.rs
+++ b/library/stdarch/crates/core_arch/src/macros.rs
@@ -101,11 +101,11 @@ macro_rules! simd_shuffle2 {
const IDX: [u32; 2] = $idx;
}
- simd_shuffle2($x, $y, ConstParam::<$($imm),+>::IDX)
+ simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
}};
($x:expr, $y:expr, $idx:expr $(,)?) => {{
const IDX: [u32; 2] = $idx;
- simd_shuffle2($x, $y, IDX)
+ simd_shuffle($x, $y, IDX)
}};
}
@@ -117,11 +117,11 @@ macro_rules! simd_shuffle4 {
const IDX: [u32; 4] = $idx;
}
- simd_shuffle4($x, $y, ConstParam::<$($imm),+>::IDX)
+ simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
}};
($x:expr, $y:expr, $idx:expr $(,)?) => {{
const IDX: [u32; 4] = $idx;
- simd_shuffle4($x, $y, IDX)
+ simd_shuffle($x, $y, IDX)
}};
}
@@ -133,11 +133,11 @@ macro_rules! simd_shuffle8 {
const IDX: [u32; 8] = $idx;
}
- simd_shuffle8($x, $y, ConstParam::<$($imm),+>::IDX)
+ simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
}};
($x:expr, $y:expr, $idx:expr $(,)?) => {{
const IDX: [u32; 8] = $idx;
- simd_shuffle8($x, $y, IDX)
+ simd_shuffle($x, $y, IDX)
}};
}
@@ -149,11 +149,11 @@ macro_rules! simd_shuffle16 {
const IDX: [u32; 16] = $idx;
}
- simd_shuffle16($x, $y, ConstParam::<$($imm),+>::IDX)
+ simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
}};
($x:expr, $y:expr, $idx:expr $(,)?) => {{
const IDX: [u32; 16] = $idx;
- simd_shuffle16($x, $y, IDX)
+ simd_shuffle($x, $y, IDX)
}};
}
@@ -165,11 +165,11 @@ macro_rules! simd_shuffle32 {
const IDX: [u32; 32] = $idx;
}
- simd_shuffle32($x, $y, ConstParam::<$($imm),+>::IDX)
+ simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
}};
($x:expr, $y:expr, $idx:expr $(,)?) => {{
const IDX: [u32; 32] = $idx;
- simd_shuffle32($x, $y, IDX)
+ simd_shuffle($x, $y, IDX)
}};
}
@@ -181,10 +181,10 @@ macro_rules! simd_shuffle64 {
const IDX: [u32; 64] = $idx;
}
- simd_shuffle64($x, $y, ConstParam::<$($imm),+>::IDX)
+ simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
}};
($x:expr, $y:expr, $idx:expr $(,)?) => {{
const IDX: [u32; 64] = $idx;
- simd_shuffle64($x, $y, IDX)
+ simd_shuffle($x, $y, IDX)
}};
}
diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs
index 20751eeec..2f7af22cb 100644
--- a/library/stdarch/crates/core_arch/src/mod.rs
+++ b/library/stdarch/crates/core_arch/src/mod.rs
@@ -3,6 +3,9 @@
#[macro_use]
mod macros;
+#[cfg(any(target_arch = "riscv32", target_arch = "riscv64", doc))]
+mod riscv_shared;
+
#[cfg(any(target_arch = "arm", target_arch = "aarch64", doc))]
mod arm_shared;
@@ -276,10 +279,6 @@ mod aarch64;
#[doc(cfg(any(target_arch = "arm")))]
mod arm;
-#[cfg(any(target_arch = "riscv32", target_arch = "riscv64", doc))]
-#[doc(cfg(any(target_arch = "riscv32", target_arch = "riscv64")))]
-mod riscv_shared;
-
#[cfg(any(target_arch = "riscv64", doc))]
#[doc(cfg(any(target_arch = "riscv64")))]
mod riscv64;
diff --git a/library/stdarch/crates/core_arch/src/powerpc/altivec.rs b/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
index 8b2be39dc..70344c88b 100644
--- a/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
+++ b/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
@@ -51,6 +51,8 @@ types! {
#[allow(improper_ctypes)]
extern "C" {
+ #[link_name = "llvm.ppc.altivec.lvx"]
+ fn lvx(p: *const i8) -> vector_unsigned_int;
#[link_name = "llvm.ppc.altivec.vperm"]
fn vperm(
a: vector_signed_int,
@@ -442,8 +444,7 @@ mod sealed {
#[inline(always)]
unsafe fn load(off: i32, p: *const i8) -> u32x4 {
let addr = p.offset(off as isize);
-
- *(addr as *const u32x4)
+ transmute(lvx(addr))
}
pub trait VectorLd {
diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
index 347735df1..0e35fe1f1 100644
--- a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
+++ b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
@@ -1,4 +1,7 @@
//! Shared RISC-V intrinsics
+mod p;
+
+pub use p::*;
use crate::arch::asm;
@@ -469,6 +472,17 @@ pub unsafe fn hinval_gvma_vmid(vmid: usize) {
asm!(".insn r 0x73, 0, 0x33, x0, x0, {}", in(reg) vmid, options(nostack))
}
+/// Invalidate hypervisor translation cache for all virtual machines and guest physical addresses
+///
+/// This instruction invalidates any address-translation cache entries that an
+/// `HFENCE.GVMA` instruction with the same values of `gaddr` and `vmid` would invalidate.
+///
+/// This fence specifies all guest physical addresses and all virtual machines.
+#[inline]
+pub unsafe fn hinval_gvma_all() {
+ asm!(".insn r 0x73, 0, 0x33, x0, x0, x0", options(nostack))
+}
+
/// Reads the floating-point control and status register `fcsr`
///
/// Register `fcsr` is a 32-bit read/write register that selects the dynamic rounding mode
@@ -574,17 +588,6 @@ pub fn fsflags(value: u32) -> u32 {
original
}
-/// Invalidate hypervisor translation cache for all virtual machines and guest physical addresses
-///
-/// This instruction invalidates any address-translation cache entries that an
-/// `HFENCE.GVMA` instruction with the same values of `gaddr` and `vmid` would invalidate.
-///
-/// This fence specifies all guest physical addresses and all virtual machines.
-#[inline]
-pub unsafe fn hinval_gvma_all() {
- asm!(".insn r 0x73, 0, 0x33, x0, x0, x0", options(nostack))
-}
-
/// `P0` transformation function as is used in the SM3 hash algorithm
///
/// This function is included in `Zksh` extension. It's defined as:
@@ -602,12 +605,10 @@ pub unsafe fn hinval_gvma_all() {
/// According to RISC-V Cryptography Extensions, Volume I, the execution latency of
/// this instruction must always be independent from the data it operates on.
#[inline]
+#[target_feature(enable = "zksh")]
pub fn sm3p0(x: u32) -> u32 {
let ans: u32;
- unsafe {
- // asm!("sm3p0 {}, {}", out(reg) ans, in(reg) x, options(nomem, nostack))
- asm!(".insn i 0x13, 0x1, {}, {}, 0x108", out(reg) ans, in(reg) x, options(nomem, nostack))
- };
+ unsafe { asm!("sm3p0 {}, {}", lateout(reg) ans, in(reg) x, options(pure, nomem, nostack)) };
ans
}
@@ -634,12 +635,10 @@ pub fn sm3p0(x: u32) -> u32 {
/// According to RISC-V Cryptography Extensions, Volume I, the execution latency of
/// this instruction must always be independent from the data it operates on.
#[inline]
+#[target_feature(enable = "zksh")]
pub fn sm3p1(x: u32) -> u32 {
let ans: u32;
- unsafe {
- // asm!("sm3p1 {}, {}", out(reg) ans, in(reg) x, options(nomem, nostack))
- asm!(".insn i 0x13, 0x1, {}, {}, 0x109", out(reg) ans, in(reg) x, options(nomem, nostack))
- };
+ unsafe { asm!("sm3p1 {}, {}", lateout(reg) ans, in(reg) x, options(pure, nomem, nostack)) };
ans
}
@@ -674,33 +673,28 @@ pub fn sm3p1(x: u32) -> u32 {
/// It can be implemented by `sm4ed` instruction like:
///
/// ```no_run
+/// # #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+/// # fn round_function(x0: u32, x1: u32, x2: u32, x3: u32, rk: u32) -> u32 {
+/// # #[cfg(target_arch = "riscv32")] use core::arch::riscv32::sm4ed;
+/// # #[cfg(target_arch = "riscv64")] use core::arch::riscv64::sm4ed;
/// let a = x1 ^ x2 ^ x3 ^ rk;
/// let c0 = sm4ed::<0>(x0, a);
/// let c1 = sm4ed::<1>(c0, a); // c1 represents c[0..=1], etc.
/// let c2 = sm4ed::<2>(c1, a);
/// let c3 = sm4ed::<3>(c2, a);
/// return c3; // c3 represents c[0..=3]
+/// # }
/// ```
///
/// According to RISC-V Cryptography Extensions, Volume I, the execution latency of
/// this instruction must always be independent from the data it operates on.
+#[inline]
+#[target_feature(enable = "zksed")]
pub fn sm4ed<const BS: u8>(x: u32, a: u32) -> u32 {
static_assert!(BS: u8 where BS <= 3);
let ans: u32;
- match BS {
- 0 => unsafe {
- asm!(".insn r 0x33, 0, 0x18, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) a, options(nomem, nostack))
- },
- 1 => unsafe {
- asm!(".insn r 0x33, 0, 0x38, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) a, options(nomem, nostack))
- },
- 2 => unsafe {
- asm!(".insn r 0x33, 0, 0x58, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) a, options(nomem, nostack))
- },
- 3 => unsafe {
- asm!(".insn r 0x33, 0, 0x78, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) a, options(nomem, nostack))
- },
- _ => unreachable!(),
+ unsafe {
+ asm!("sm4ed {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) a, const BS, options(pure, nomem, nostack))
};
ans
}
@@ -739,33 +733,28 @@ pub fn sm4ed<const BS: u8>(x: u32, a: u32) -> u32 {
/// Hence, the key schedule operation can be implemented by `sm4ks` instruction like:
///
/// ```no_run
+/// # #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+/// # fn key_schedule(k0: u32, k1: u32, k2: u32, k3: u32, ck_i: u32) -> u32 {
+/// # #[cfg(target_arch = "riscv32")] use core::arch::riscv32::sm4ks;
+/// # #[cfg(target_arch = "riscv64")] use core::arch::riscv64::sm4ks;
/// let k = k1 ^ k2 ^ k3 ^ ck_i;
/// let c0 = sm4ks::<0>(k0, k);
/// let c1 = sm4ks::<1>(c0, k); // c1 represents c[0..=1], etc.
/// let c2 = sm4ks::<2>(c1, k);
/// let c3 = sm4ks::<3>(c2, k);
/// return c3; // c3 represents c[0..=3]
+/// # }
/// ```
///
/// According to RISC-V Cryptography Extensions, Volume I, the execution latency of
/// this instruction must always be independent from the data it operates on.
+#[inline]
+#[target_feature(enable = "zksed")]
pub fn sm4ks<const BS: u8>(x: u32, k: u32) -> u32 {
static_assert!(BS: u8 where BS <= 3);
let ans: u32;
- match BS {
- 0 => unsafe {
- asm!(".insn r 0x33, 0, 0x1A, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) k, options(nomem, nostack))
- },
- 1 => unsafe {
- asm!(".insn r 0x33, 0, 0x3A, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) k, options(nomem, nostack))
- },
- 2 => unsafe {
- asm!(".insn r 0x33, 0, 0x5A, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) k, options(nomem, nostack))
- },
- 3 => unsafe {
- asm!(".insn r 0x33, 0, 0x7A, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) k, options(nomem, nostack))
- },
- _ => unreachable!(),
+ unsafe {
+ asm!("sm4ks {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) k, const BS, options(pure, nomem, nostack))
};
ans
}
diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/p.rs b/library/stdarch/crates/core_arch/src/riscv_shared/p.rs
new file mode 100644
index 000000000..a26044aee
--- /dev/null
+++ b/library/stdarch/crates/core_arch/src/riscv_shared/p.rs
@@ -0,0 +1,1061 @@
+//! RISC-V Packed SIMD intrinsics; shared part.
+//!
+//! RV64 only part is placed in riscv64 folder.
+use crate::arch::asm;
+
+/// Adds packed 16-bit signed numbers, discarding overflow bits
+#[inline]
+pub fn add16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x20, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Halves the sum of packed 16-bit signed numbers, dropping least bits
+#[inline]
+pub fn radd16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x00, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Halves the sum of packed 16-bit unsigned numbers, dropping least bits
+#[inline]
+pub fn uradd16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x10, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Adds packed 16-bit signed numbers, saturating at the numeric bounds
+#[inline]
+pub fn kadd16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x08, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Adds packed 16-bit unsigned numbers, saturating at the numeric bounds
+#[inline]
+pub fn ukadd16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x18, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Subtracts packed 16-bit signed numbers, discarding overflow bits
+#[inline]
+pub fn sub16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x21, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Halves the subtraction result of packed 16-bit signed numbers, dropping least bits
+#[inline]
+pub fn rsub16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x01, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Halves the subtraction result of packed 16-bit unsigned numbers, dropping least bits
+#[inline]
+pub fn ursub16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x11, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Subtracts packed 16-bit signed numbers, saturating at the numeric bounds
+#[inline]
+pub fn ksub16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x09, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds
+#[inline]
+pub fn uksub16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x19, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross adds and subtracts packed 16-bit signed numbers, discarding overflow bits
+#[inline]
+pub fn cras16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x22, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross halves of adds and subtracts packed 16-bit signed numbers, dropping least bits
+#[inline]
+pub fn rcras16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x02, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross halves of adds and subtracts packed 16-bit unsigned numbers, dropping least bits
+#[inline]
+pub fn urcras16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x12, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross adds and subtracts packed 16-bit signed numbers, saturating at the numeric bounds
+#[inline]
+pub fn kcras16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x0A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross adds and subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds
+#[inline]
+pub fn ukcras16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x1A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross subtracts and adds packed 16-bit signed numbers, discarding overflow bits
+#[inline]
+pub fn crsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x23, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross halves of subtracts and adds packed 16-bit signed numbers, dropping least bits
+#[inline]
+pub fn rcrsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x03, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross halves of subtracts and adds packed 16-bit unsigned numbers, dropping least bits
+#[inline]
+pub fn urcrsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x13, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross subtracts and adds packed 16-bit signed numbers, saturating at the numeric bounds
+#[inline]
+pub fn kcrsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x0B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Cross subtracts and adds packed 16-bit unsigned numbers, saturating at the numeric bounds
+#[inline]
+pub fn ukcrsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x1B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight adds and subtracts packed 16-bit signed numbers, discarding overflow bits
+#[inline]
+pub fn stas16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x7A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight halves of adds and subtracts packed 16-bit signed numbers, dropping least bits
+#[inline]
+pub fn rstas16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x5A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight halves of adds and subtracts packed 16-bit unsigned numbers, dropping least bits
+#[inline]
+pub fn urstas16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x6A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight adds and subtracts packed 16-bit signed numbers, saturating at the numeric bounds
+#[inline]
+pub fn kstas16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x62, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight adds and subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds
+#[inline]
+pub fn ukstas16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x72, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight subtracts and adds packed 16-bit signed numbers, discarding overflow bits
+#[inline]
+pub fn stsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x7B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight halves of subtracts and adds packed 16-bit signed numbers, dropping least bits
+#[inline]
+pub fn rstsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x5B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight halves of subtracts and adds packed 16-bit unsigned numbers, dropping least bits
+#[inline]
+pub fn urstsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x6B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight subtracts and adds packed 16-bit signed numbers, saturating at the numeric bounds
+#[inline]
+pub fn kstsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x63, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Straight subtracts and adds packed 16-bit unsigned numbers, saturating at the numeric bounds
+#[inline]
+pub fn ukstsa16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x73, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Adds packed 8-bit signed numbers, discarding overflow bits
+#[inline]
+pub fn add8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x24, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Halves the sum of packed 8-bit signed numbers, dropping least bits
+#[inline]
+pub fn radd8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x04, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Halves the sum of packed 8-bit unsigned numbers, dropping least bits
+#[inline]
+pub fn uradd8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x14, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Adds packed 8-bit signed numbers, saturating at the numeric bounds
+#[inline]
+pub fn kadd8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x0C, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Adds packed 8-bit unsigned numbers, saturating at the numeric bounds
+#[inline]
+pub fn ukadd8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x1C, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Subtracts packed 8-bit signed numbers, discarding overflow bits
+#[inline]
+pub fn sub8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x25, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Halves the subtraction result of packed 8-bit signed numbers, dropping least bits
+#[inline]
+pub fn rsub8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x05, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Halves the subtraction result of packed 8-bit unsigned numbers, dropping least bits
+#[inline]
+pub fn ursub8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x15, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Subtracts packed 8-bit signed numbers, saturating at the numeric bounds
+#[inline]
+pub fn ksub8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x0D, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Subtracts packed 8-bit unsigned numbers, saturating at the numeric bounds
+#[inline]
+pub fn uksub8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x1D, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Arithmetic right shift packed 16-bit elements without rounding up
+#[inline]
+pub fn sra16(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x28, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Arithmetic right shift packed 16-bit elements with rounding up
+#[inline]
+pub fn sra16u(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x30, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical right shift packed 16-bit elements without rounding up
+#[inline]
+pub fn srl16(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x29, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical right shift packed 16-bit elements with rounding up
+#[inline]
+pub fn srl16u(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x31, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical left shift packed 16-bit elements, discarding overflow bits
+#[inline]
+pub fn sll16(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x2A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical left shift packed 16-bit elements, saturating at the numeric bounds
+#[inline]
+pub fn ksll16(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x32, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical saturating left then arithmetic right shift packed 16-bit elements
+#[inline]
+pub fn kslra16(a: usize, b: i32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x2B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical saturating left then arithmetic right shift packed 16-bit elements
+#[inline]
+pub fn kslra16u(a: usize, b: i32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x33, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Arithmetic right shift packed 8-bit elements without rounding up
+#[inline]
+pub fn sra8(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x2C, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Arithmetic right shift packed 8-bit elements with rounding up
+#[inline]
+pub fn sra8u(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x34, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical right shift packed 8-bit elements without rounding up
+#[inline]
+pub fn srl8(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x2D, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical right shift packed 8-bit elements with rounding up
+#[inline]
+pub fn srl8u(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x35, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical left shift packed 8-bit elements, discarding overflow bits
+#[inline]
+pub fn sll8(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x2E, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical left shift packed 8-bit elements, saturating at the numeric bounds
+#[inline]
+pub fn ksll8(a: usize, b: u32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x36, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical saturating left then arithmetic right shift packed 8-bit elements
+#[inline]
+pub fn kslra8(a: usize, b: i32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x2F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Logical saturating left then arithmetic right shift packed 8-bit elements
+#[inline]
+pub fn kslra8u(a: usize, b: i32) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x37, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare equality for packed 16-bit elements
+#[inline]
+pub fn cmpeq16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x26, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare whether 16-bit packed signed integers are less than the others
+#[inline]
+pub fn scmplt16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x06, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare whether 16-bit packed signed integers are less than or equal to the others
+#[inline]
+pub fn scmple16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x0E, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare whether 16-bit packed unsigned integers are less than the others
+#[inline]
+pub fn ucmplt16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x16, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare whether 16-bit packed unsigned integers are less than or equal to the others
+#[inline]
+pub fn ucmple16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x1E, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare equality for packed 8-bit elements
+#[inline]
+pub fn cmpeq8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x27, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare whether 8-bit packed signed integers are less than the others
+#[inline]
+pub fn scmplt8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x07, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare whether 8-bit packed signed integers are less than or equal to the others
+#[inline]
+pub fn scmple8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x0F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare whether 8-bit packed unsigned integers are less than the others
+#[inline]
+pub fn ucmplt8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x17, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Compare whether 8-bit packed unsigned integers are less than or equal to the others
+#[inline]
+pub fn ucmple8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x1F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Get minimum values from 16-bit packed signed integers
+#[inline]
+pub fn smin16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x40, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Get minimum values from 16-bit packed unsigned integers
+#[inline]
+pub fn umin16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x48, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Get maximum values from 16-bit packed signed integers
+#[inline]
+pub fn smax16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x41, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Get maximum values from 16-bit packed unsigned integers
+#[inline]
+pub fn umax16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x49, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/* todo: sclip16, uclip16 */
+
+/// Compute the absolute value of packed 16-bit signed integers
+#[inline]
+pub fn kabs16(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAD1", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Count the number of redundant sign bits of the packed 16-bit elements
+#[inline]
+pub fn clrs16(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAE8", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Count the number of leading zero bits of the packed 16-bit elements
+#[inline]
+pub fn clz16(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAE9", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Swap the 16-bit halfwords within each 32-bit word of a register
+#[inline]
+pub fn swap16(a: usize) -> usize {
+ let value: usize;
+ // this instruction is an alias for `pkbt rd, rs1, rs1`.
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x0F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Get minimum values from 8-bit packed signed integers
+#[inline]
+pub fn smin8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x44, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Get minimum values from 8-bit packed unsigned integers
+#[inline]
+pub fn umin8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x4C, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Get maximum values from 8-bit packed signed integers
+#[inline]
+pub fn smax8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x45, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Get maximum values from 8-bit packed unsigned integers
+#[inline]
+pub fn umax8(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x4D, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/* todo: sclip8, uclip8 */
+
+/// Compute the absolute value of packed 8-bit signed integers
+#[inline]
+pub fn kabs8(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAD0", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Count the number of redundant sign bits of the packed 8-bit elements
+#[inline]
+pub fn clrs8(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAE0", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Count the number of leading zero bits of the packed 8-bit elements
+#[inline]
+pub fn clz8(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAE1", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Swap the 8-bit bytes within each 16-bit halfword of a register.
+#[inline]
+pub fn swap8(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAD8", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack first and zeroth into two 16-bit signed halfwords in each 32-bit chunk
+#[inline]
+pub fn sunpkd810(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAC8", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack second and zeroth into two 16-bit signed halfwords in each 32-bit chunk
+#[inline]
+pub fn sunpkd820(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAC9", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack third and zeroth into two 16-bit signed halfwords in each 32-bit chunk
+#[inline]
+pub fn sunpkd830(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xACA", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack third and first into two 16-bit signed halfwords in each 32-bit chunk
+#[inline]
+pub fn sunpkd831(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xACB", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack third and second into two 16-bit signed halfwords in each 32-bit chunk
+#[inline]
+pub fn sunpkd832(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAD3", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack first and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk
+#[inline]
+pub fn zunpkd810(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xACC", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack second and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk
+#[inline]
+pub fn zunpkd820(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xACD", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack third and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk
+#[inline]
+pub fn zunpkd830(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xACE", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack third and first into two 16-bit unsigned halfwords in each 32-bit chunk
+#[inline]
+pub fn zunpkd831(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xACF", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Unpack third and second into two 16-bit unsigned halfwords in each 32-bit chunk
+#[inline]
+pub fn zunpkd832(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAD7", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+// todo: pkbb16, pktt16
+
+/// Pack two 16-bit data from bottom and top half from 32-bit chunks
+#[inline]
+pub fn pkbt16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x1, 0x0F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Pack two 16-bit data from top and bottom half from 32-bit chunks
+#[inline]
+pub fn pktb16(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x1, 0x1F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Count the number of redundant sign bits of the packed 32-bit elements
+#[inline]
+pub fn clrs32(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAF8", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Count the number of leading zero bits of the packed 32-bit elements
+#[inline]
+pub fn clz32(a: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn i 0x77, 0x0, {}, {}, 0xAF9", lateout(reg) value, in(reg) a, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Calculate the sum of absolute difference of unsigned 8-bit data elements
+#[inline]
+pub fn pbsad(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x7E, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Calculate and accumulate the sum of absolute difference of unsigned 8-bit data elements
+#[inline]
+pub fn pbsada(t: usize, a: usize, b: usize) -> usize {
+ let mut value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x7F, {}, {}, {}", inlateout(reg) t => value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Multiply signed 8-bit elements and add 16-bit elements on results for packed 32-bit chunks
+#[inline]
+pub fn smaqa(t: usize, a: usize, b: usize) -> usize {
+ let mut value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x64, {}, {}, {}", inlateout(reg) t => value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Multiply unsigned 8-bit elements and add 16-bit elements on results for packed 32-bit chunks
+#[inline]
+pub fn umaqa(t: usize, a: usize, b: usize) -> usize {
+ let mut value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x66, {}, {}, {}", inlateout(reg) t => value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Multiply signed to unsigned 8-bit and add 16-bit elements on results for packed 32-bit chunks
+#[inline]
+pub fn smaqasu(t: usize, a: usize, b: usize) -> usize {
+ let mut value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x0, 0x65, {}, {}, {}", inlateout(reg) t => value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Adds signed lower 16-bit content of two registers with Q15 saturation
+#[inline]
+pub fn kaddh(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x1, 0x02, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Subtracts signed lower 16-bit content of two registers with Q15 saturation
+#[inline]
+pub fn ksubh(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x1, 0x03, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Adds signed lower 16-bit content of two registers with U16 saturation
+#[inline]
+pub fn ukaddh(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x1, 0x0A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
+
+/// Subtracts signed lower 16-bit content of two registers with U16 saturation
+#[inline]
+pub fn uksubh(a: usize, b: usize) -> usize {
+ let value: usize;
+ unsafe {
+ asm!(".insn r 0x77, 0x1, 0x0B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack))
+ }
+ value
+}
diff --git a/library/stdarch/crates/core_arch/src/simd_llvm.rs b/library/stdarch/crates/core_arch/src/simd_llvm.rs
index 1970e5c69..decdecaaf 100644
--- a/library/stdarch/crates/core_arch/src/simd_llvm.rs
+++ b/library/stdarch/crates/core_arch/src/simd_llvm.rs
@@ -9,13 +9,7 @@ extern "platform-intrinsic" {
pub fn simd_gt<T, U>(x: T, y: T) -> U;
pub fn simd_ge<T, U>(x: T, y: T) -> U;
- pub fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U;
- pub fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U;
- pub fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
- pub fn simd_shuffle16<T, U>(x: T, y: T, idx: [u32; 16]) -> U;
- pub fn simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U;
- pub fn simd_shuffle64<T, U>(x: T, y: T, idx: [u32; 64]) -> U;
- pub fn simd_shuffle128<T, U>(x: T, y: T, idx: [u32; 128]) -> U;
+ pub fn simd_shuffle<T, U, V>(x: T, y: T, idx: U) -> V;
#[rustc_const_unstable(feature = "const_simd_insert", issue = "none")]
pub fn simd_insert<T, U>(x: T, idx: u32, val: U) -> T;
diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs
index 081609ece..16add3dbb 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs
@@ -1195,7 +1195,7 @@ pub unsafe fn _mm_mask_i32gather_epi64<const SCALE: i32>(
/// Returns values from `slice` at offsets determined by `offsets * scale`,
/// where
-/// `scale` should be 1, 2, 4 and 8.
+/// `scale` should be 1, 2, 4 or 8.
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i32gather_epi64)
#[inline]
@@ -2001,7 +2001,7 @@ pub unsafe fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i {
#[cfg_attr(test, assert_instr(vpmovmskb))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_movemask_epi8(a: __m256i) -> i32 {
- pmovmskb(a.as_i8x32())
+ simd_bitmask::<_, u32>(a.as_i8x32()) as i32
}
/// Computes the sum of absolute differences (SADs) of quadruplets of unsigned
@@ -3642,8 +3642,6 @@ extern "C" {
fn pminud(a: u32x8, b: u32x8) -> u32x8;
#[link_name = "llvm.x86.avx2.pminu.b"]
fn pminub(a: u8x32, b: u8x32) -> u8x32;
- #[link_name = "llvm.x86.avx2.pmovmskb"]
- fn pmovmskb(a: i8x32) -> i32;
#[link_name = "llvm.x86.avx2.mpsadbw"]
fn mpsadbw(a: u8x32, b: u8x32, imm8: i32) -> u16x16;
#[link_name = "llvm.x86.avx2.pmulhu.w"]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
index 47d565cea..49d78ed60 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
@@ -8545,9 +8545,6 @@ pub unsafe fn _mm_movm_epi8(k: __mmask16) -> __m128i {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kadd_mask32&expand=3207)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(all(test, target_arch = "x86"), assert_instr(add))]
-#[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(lea))] // generate normal lea/add code instead of kaddd
- //llvm.x86.avx512.kadd.d
pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
transmute(a + b)
}
@@ -8557,9 +8554,6 @@ pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kadd_mask64&expand=3208)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(all(test, target_arch = "x86"), assert_instr(add))]
-#[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(lea))] // generate normal lea/add code instead of kaddd
- //llvm.x86.avx512.kadd.d
pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
transmute(a + b)
}
@@ -8569,7 +8563,6 @@ pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kand_mask32&expand=3213)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandd
pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
transmute(a & b)
}
@@ -8579,7 +8572,6 @@ pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kand_mask64&expand=3214)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandq
pub unsafe fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
transmute(a & b)
}
@@ -8607,7 +8599,6 @@ pub unsafe fn _knot_mask64(a: __mmask64) -> __mmask64 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kandn_mask32&expand=3219)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(not))] // generate normal and code instead of kandnd
pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
transmute(_knot_mask32(a) & b)
}
@@ -8617,7 +8608,6 @@ pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kandn_mask64&expand=3220)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(not))] // generate normal and code instead of kandnq
pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
transmute(_knot_mask64(a) & b)
}
@@ -8627,7 +8617,6 @@ pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kor_mask32&expand=3240)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(or))] // generate normal and code instead of kord
pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
transmute(a | b)
}
@@ -8637,7 +8626,6 @@ pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kor_mask64&expand=3241)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(or))] // generate normal and code instead of korq
pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
transmute(a | b)
}
@@ -8647,7 +8635,6 @@ pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxor_mask32&expand=3292)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kxord
pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
transmute(a ^ b)
}
@@ -8657,7 +8644,6 @@ pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxor_mask64&expand=3293)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kxorq
pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
transmute(a ^ b)
}
@@ -8667,7 +8653,6 @@ pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxnor_mask32&expand=3286)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kxnord
pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
transmute(_knot_mask32(a ^ b))
}
@@ -8677,7 +8662,6 @@ pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxnor_mask64&expand=3287)
#[inline]
#[target_feature(enable = "avx512bw")]
-#[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kxnorq
pub unsafe fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
transmute(_knot_mask64(a ^ b))
}
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs b/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs
index d8ac5c29c..66fd1c2e1 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs
@@ -829,21 +829,21 @@ mod tests {
#[target_feature(enable = "sse2")]
unsafe fn load_m128i_word<T>(data: &[T], word_index: usize) -> __m128i {
let byte_offset = word_index * 16 / size_of::<T>();
- let pointer = data.as_ptr().offset(byte_offset as isize) as *const __m128i;
+ let pointer = data.as_ptr().add(byte_offset) as *const __m128i;
_mm_loadu_si128(black_box(pointer))
}
#[target_feature(enable = "avx")]
unsafe fn load_m256i_word<T>(data: &[T], word_index: usize) -> __m256i {
let byte_offset = word_index * 32 / size_of::<T>();
- let pointer = data.as_ptr().offset(byte_offset as isize) as *const __m256i;
+ let pointer = data.as_ptr().add(byte_offset) as *const __m256i;
_mm256_loadu_si256(black_box(pointer))
}
#[target_feature(enable = "avx512f")]
unsafe fn load_m512i_word<T>(data: &[T], word_index: usize) -> __m512i {
let byte_offset = word_index * 64 / size_of::<T>();
- let pointer = data.as_ptr().offset(byte_offset as isize) as *const i32;
+ let pointer = data.as_ptr().add(byte_offset) as *const i32;
_mm512_loadu_si512(black_box(pointer))
}
diff --git a/library/stdarch/crates/core_arch/src/x86/cpuid.rs b/library/stdarch/crates/core_arch/src/x86/cpuid.rs
index 6b90295ef..2624e8bdf 100644
--- a/library/stdarch/crates/core_arch/src/x86/cpuid.rs
+++ b/library/stdarch/crates/core_arch/src/x86/cpuid.rs
@@ -62,27 +62,27 @@ pub unsafe fn __cpuid_count(leaf: u32, sub_leaf: u32) -> CpuidResult {
#[cfg(target_arch = "x86")]
{
asm!(
- "movl %ebx, {0}",
+ "mov {0}, ebx",
"cpuid",
- "xchgl %ebx, {0}",
- lateout(reg) ebx,
- inlateout("eax") leaf => eax,
- inlateout("ecx") sub_leaf => ecx,
- lateout("edx") edx,
- options(nostack, preserves_flags, att_syntax),
+ "xchg {0}, ebx",
+ out(reg) ebx,
+ inout("eax") leaf => eax,
+ inout("ecx") sub_leaf => ecx,
+ out("edx") edx,
+ options(nostack, preserves_flags),
);
}
#[cfg(target_arch = "x86_64")]
{
asm!(
- "movq %rbx, {0:r}",
+ "mov {0:r}, rbx",
"cpuid",
- "xchgq %rbx, {0:r}",
- lateout(reg) ebx,
- inlateout("eax") leaf => eax,
- inlateout("ecx") sub_leaf => ecx,
- lateout("edx") edx,
- options(nostack, preserves_flags, att_syntax),
+ "xchg {0:r}, rbx",
+ out(reg) ebx,
+ inout("eax") leaf => eax,
+ inout("ecx") sub_leaf => ecx,
+ out("edx") edx,
+ options(nostack, preserves_flags),
);
}
CpuidResult { eax, ebx, ecx, edx }
diff --git a/library/stdarch/crates/core_arch/src/x86/mod.rs b/library/stdarch/crates/core_arch/src/x86/mod.rs
index 547bfe67d..6b50e95b2 100644
--- a/library/stdarch/crates/core_arch/src/x86/mod.rs
+++ b/library/stdarch/crates/core_arch/src/x86/mod.rs
@@ -306,7 +306,7 @@ types! {
/// 256-bit wide set of 16 'u16' types, x86-specific
///
- /// This type is the same as the `__m128bh` type defined by Intel,
+ /// This type is the same as the `__m256bh` type defined by Intel,
/// representing a 256-bit SIMD register which internally is consisted of
/// 16 packed `u16` instances. Its purpose is for bf16 related intrinsic
/// implementations.
@@ -317,7 +317,7 @@ types! {
/// 512-bit wide set of 32 'u16' types, x86-specific
///
- /// This type is the same as the `__m128bh` type defined by Intel,
+ /// This type is the same as the `__m512bh` type defined by Intel,
/// representing a 512-bit SIMD register which internally is consisted of
/// 32 packed `u16` instances. Its purpose is for bf16 related intrinsic
/// implementations.
diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs
index 2c4295ef6..03c3a14a5 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse.rs
@@ -1185,9 +1185,9 @@ pub unsafe fn _mm_loadu_ps(p: *const f32) -> __m128 {
///
/// ```text
/// let a0 = *p;
-/// let a1 = *p.offset(1);
-/// let a2 = *p.offset(2);
-/// let a3 = *p.offset(3);
+/// let a1 = *p.add(1);
+/// let a2 = *p.add(2);
+/// let a3 = *p.add(3);
/// __m128::new(a3, a2, a1, a0)
/// ```
///
@@ -1241,9 +1241,9 @@ pub unsafe fn _mm_store_ss(p: *mut f32, a: __m128) {
/// ```text
/// let x = a.extract(0);
/// *p = x;
-/// *p.offset(1) = x;
-/// *p.offset(2) = x;
-/// *p.offset(3) = x;
+/// *p.add(1) = x;
+/// *p.add(2) = x;
+/// *p.add(3) = x;
/// ```
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store1_ps)
@@ -1317,9 +1317,9 @@ pub unsafe fn _mm_storeu_ps(p: *mut f32, a: __m128) {
///
/// ```text
/// *p = a.extract(3);
-/// *p.offset(1) = a.extract(2);
-/// *p.offset(2) = a.extract(1);
-/// *p.offset(3) = a.extract(0);
+/// *p.add(1) = a.extract(2);
+/// *p.add(2) = a.extract(1);
+/// *p.add(3) = a.extract(0);
/// ```
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_ps)
@@ -3006,9 +3006,9 @@ mod tests {
let unalignment = (p as usize) & 0xf;
if unalignment != 0 {
- let delta = ((16 - unalignment) >> 2) as isize;
+ let delta = (16 - unalignment) >> 2;
fixup = delta as f32;
- p = p.offset(delta);
+ p = p.add(delta);
}
let r = _mm_load_ps(p);
@@ -3019,7 +3019,7 @@ mod tests {
#[simd_test(enable = "sse")]
unsafe fn test_mm_loadu_ps() {
let vals = &[1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
- let p = vals.as_ptr().offset(3);
+ let p = vals.as_ptr().add(3);
let r = _mm_loadu_ps(black_box(p));
assert_eq_m128(r, _mm_setr_ps(4.0, 5.0, 6.0, 7.0));
}
@@ -3036,9 +3036,9 @@ mod tests {
let unalignment = (p as usize) & 0xf;
if unalignment != 0 {
- let delta = ((16 - unalignment) >> 2) as isize;
+ let delta = (16 - unalignment) >> 2;
fixup = delta as f32;
- p = p.offset(delta);
+ p = p.add(delta);
}
let r = _mm_loadr_ps(p);
@@ -3057,7 +3057,7 @@ mod tests {
unsafe fn test_mm_store_ss() {
let mut vals = [0.0f32; 8];
let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0);
- _mm_store_ss(vals.as_mut_ptr().offset(1), a);
+ _mm_store_ss(vals.as_mut_ptr().add(1), a);
assert_eq!(vals[0], 0.0);
assert_eq!(vals[1], 1.0);
@@ -3152,7 +3152,7 @@ mod tests {
// Make sure p is **not** aligned to 16-byte boundary
if (p as usize) & 0xf == 0 {
ofs = 1;
- p = p.offset(1);
+ p = p.add(1);
}
_mm_storeu_ps(p, *black_box(&a));
diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs
index 5a9120042..3e79b3539 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs
@@ -1378,7 +1378,7 @@ pub unsafe fn _mm_insert_epi16<const IMM8: i32>(a: __m128i, i: i32) -> __m128i {
#[cfg_attr(test, assert_instr(pmovmskb))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_movemask_epi8(a: __m128i) -> i32 {
- pmovmskb(a.as_i8x16())
+ simd_bitmask::<_, u16>(a.as_i8x16()) as u32 as i32
}
/// Shuffles 32-bit integers in `a` using the control in `IMM8`.
@@ -2856,8 +2856,6 @@ extern "C" {
fn packssdw(a: i32x4, b: i32x4) -> i16x8;
#[link_name = "llvm.x86.sse2.packuswb.128"]
fn packuswb(a: i16x8, b: i16x8) -> u8x16;
- #[link_name = "llvm.x86.sse2.pmovmskb.128"]
- fn pmovmskb(a: i8x16) -> i32;
#[link_name = "llvm.x86.sse2.max.sd"]
fn maxsd(a: __m128d, b: __m128d) -> __m128d;
#[link_name = "llvm.x86.sse2.max.pd"]
@@ -4518,7 +4516,7 @@ mod tests {
// Make sure p is **not** aligned to 16-byte boundary
if (p as usize) & 0xf == 0 {
ofs = 1;
- p = p.offset(1);
+ p = p.add(1);
}
_mm_storeu_pd(p, *black_box(&a));
@@ -4606,7 +4604,7 @@ mod tests {
let mut offset = 0;
if (d as usize) & 0xf == 0 {
offset = 1;
- d = d.offset(offset as isize);
+ d = d.add(offset);
}
let r = _mm_loadu_pd(d);
diff --git a/library/stdarch/crates/core_arch/src/x86/sse3.rs b/library/stdarch/crates/core_arch/src/x86/sse3.rs
index ab0dd38fe..61f8a4e78 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse3.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse3.rs
@@ -1,11 +1,7 @@
//! Streaming SIMD Extensions 3 (SSE3)
use crate::{
- core_arch::{
- simd::*,
- simd_llvm::{simd_shuffle2, simd_shuffle4},
- x86::*,
- },
+ core_arch::{simd::*, simd_llvm::simd_shuffle, x86::*},
mem::transmute,
};
diff --git a/library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs b/library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs
index 391daed20..a262932af 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs
@@ -34,11 +34,11 @@ use stdarch_test::assert_instr;
/// support `cmpxchg16b` and the program enters an execution path that
/// eventually would reach this function the behavior is undefined.
///
-/// The `success` ordering must also be stronger or equal to `failure`, or this
-/// function call is undefined. See the `Atomic*` documentation's
-/// `compare_exchange` function for more information. When `compare_exchange`
-/// panics, this is undefined behavior. Currently this function aborts the
-/// process with an undefined instruction.
+/// The failure ordering must be [`Ordering::SeqCst`], [`Ordering::Acquire`] or
+/// [`Ordering::Relaxed`], or this function call is undefined. See the `Atomic*`
+/// documentation's `compare_exchange` function for more information. When
+/// `compare_exchange` panics, this is undefined behavior. Currently this
+/// function aborts the process with an undefined instruction.
#[inline]
#[cfg_attr(test, assert_instr(cmpxchg16b, success = Ordering::SeqCst, failure = Ordering::SeqCst))]
#[target_feature(enable = "cmpxchg16b")]
@@ -54,15 +54,21 @@ pub unsafe fn cmpxchg16b(
debug_assert!(dst as usize % 16 == 0);
let (val, _ok) = match (success, failure) {
- (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
- (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
- (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
- (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
- (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
- (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
- (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
- (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
- (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
+ (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed_relaxed(dst, old, new),
+ (Relaxed, Acquire) => intrinsics::atomic_cxchg_relaxed_acquire(dst, old, new),
+ (Relaxed, SeqCst) => intrinsics::atomic_cxchg_relaxed_seqcst(dst, old, new),
+ (Acquire, Relaxed) => intrinsics::atomic_cxchg_acquire_relaxed(dst, old, new),
+ (Acquire, Acquire) => intrinsics::atomic_cxchg_acquire_acquire(dst, old, new),
+ (Acquire, SeqCst) => intrinsics::atomic_cxchg_acquire_seqcst(dst, old, new),
+ (Release, Relaxed) => intrinsics::atomic_cxchg_release_relaxed(dst, old, new),
+ (Release, Acquire) => intrinsics::atomic_cxchg_release_acquire(dst, old, new),
+ (Release, SeqCst) => intrinsics::atomic_cxchg_release_seqcst(dst, old, new),
+ (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_relaxed(dst, old, new),
+ (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel_acquire(dst, old, new),
+ (AcqRel, SeqCst) => intrinsics::atomic_cxchg_acqrel_seqcst(dst, old, new),
+ (SeqCst, Relaxed) => intrinsics::atomic_cxchg_seqcst_relaxed(dst, old, new),
+ (SeqCst, Acquire) => intrinsics::atomic_cxchg_seqcst_acquire(dst, old, new),
+ (SeqCst, SeqCst) => intrinsics::atomic_cxchg_seqcst_seqcst(dst, old, new),
// The above block is all copied from libcore, and this statement is
// also copied from libcore except that it's a panic in libcore and we
diff --git a/library/stdarch/crates/intrinsic-test/missing_aarch64.txt b/library/stdarch/crates/intrinsic-test/missing_aarch64.txt
index 56ec274b5..93fc126e5 100644
--- a/library/stdarch/crates/intrinsic-test/missing_aarch64.txt
+++ b/library/stdarch/crates/intrinsic-test/missing_aarch64.txt
@@ -67,20 +67,6 @@ vrnd64xq_f64
vrnd64z_f64
vrnd64zq_f64
-# Takes too long to compile tests
-vcopyq_laneq_u8
-vcopyq_laneq_s8
-vcopyq_laneq_p8
-vcopyq_lane_u8
-vcopyq_lane_s8
-vcopyq_lane_p8
-vcopy_laneq_u8
-vcopy_laneq_s8
-vcopy_laneq_p8
-vcopy_lane_u8
-vcopy_lane_s8
-vcopy_lane_p8
-
# QEMU 6.0 doesn't support these instructions
vmmlaq_s32
vmmlaq_u32
diff --git a/library/stdarch/crates/intrinsic-test/src/argument.rs b/library/stdarch/crates/intrinsic-test/src/argument.rs
index f4cb77992..798854c03 100644
--- a/library/stdarch/crates/intrinsic-test/src/argument.rs
+++ b/library/stdarch/crates/intrinsic-test/src/argument.rs
@@ -1,6 +1,6 @@
use std::ops::Range;
-use crate::types::IntrinsicType;
+use crate::types::{IntrinsicType, TypeKind};
use crate::Language;
/// An argument for the intrinsic.
@@ -90,49 +90,108 @@ impl ArgumentList {
.join(", ")
}
- /// Creates a line that initializes this argument for C code.
- /// e.g. `int32x2_t a = { 0x1, 0x2 };`
- pub fn init_random_values_c(&self, pass: usize) -> String {
+ /// Creates a line for each argument that initializes an array for C from which `loads` argument
+ /// values can be loaded as a sliding window.
+ /// e.g `const int32x2_t a_vals = {0x3effffff, 0x3effffff, 0x3f7fffff}`, if loads=2.
+ pub fn gen_arglists_c(&self, loads: u32) -> String {
self.iter()
.filter_map(|arg| {
(!arg.has_constraint()).then(|| {
format!(
- "{ty} {name} = {{ {values} }};",
- ty = arg.to_c_type(),
+ "const {ty} {name}_vals[] = {{ {values} }};",
+ ty = arg.ty.c_scalar_type(),
name = arg.name,
- values = arg.ty.populate_random(pass, &Language::C)
+ values = arg.ty.populate_random(loads, &Language::C)
)
})
})
.collect::<Vec<_>>()
- .join("\n ")
+ .join("\n")
}
- /// Creates a line that initializes this argument for Rust code.
- /// e.g. `let a = transmute([0x1, 0x2]);`
- pub fn init_random_values_rust(&self, pass: usize) -> String {
+ /// Creates a line for each argument that initializes an array for Rust from which `loads` argument
+ /// values can be loaded as a sliding window, e.g `const A_VALS: [u32; 20] = [...];`
+ pub fn gen_arglists_rust(&self, loads: u32) -> String {
self.iter()
.filter_map(|arg| {
(!arg.has_constraint()).then(|| {
- if arg.is_simd() {
- format!(
- "let {name} = ::std::mem::transmute([{values}]);",
- name = arg.name,
- values = arg.ty.populate_random(pass, &Language::Rust),
- )
- } else {
- format!(
- "let {name} = {value};",
- name = arg.name,
- value = arg.ty.populate_random(pass, &Language::Rust)
- )
- }
+ format!(
+ "const {upper_name}_VALS: [{ty}; {load_size}] = unsafe{{ [{values}] }};",
+ upper_name = arg.name.to_uppercase(),
+ ty = arg.ty.rust_scalar_type(),
+ load_size = arg.ty.num_lanes() * arg.ty.num_vectors() + loads - 1,
+ values = arg.ty.populate_random(loads, &Language::Rust)
+ )
+ })
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ }
+
+ /// Creates a line for each argument that initalizes the argument from an array [arg]_vals at
+ /// an offset i using a load intrinsic, in C.
+ /// e.g `uint8x8_t a = vld1_u8(&a_vals[i]);`
+ pub fn load_values_c(&self, p64_armv7_workaround: bool) -> String {
+ self.iter()
+ .filter_map(|arg| {
+ // The ACLE doesn't support 64-bit polynomial loads on Armv7
+ // This and the cast are a workaround for this
+ let armv7_p64 = if let TypeKind::Poly = arg.ty.kind() {
+ p64_armv7_workaround
+ } else {
+ false
+ };
+
+ (!arg.has_constraint()).then(|| {
+ format!(
+ "{ty} {name} = {open_cast}{load}(&{name}_vals[i]){close_cast};",
+ ty = arg.to_c_type(),
+ name = arg.name,
+ load = if arg.is_simd() {
+ arg.ty.get_load_function(p64_armv7_workaround)
+ } else {
+ "*".to_string()
+ },
+ open_cast = if armv7_p64 {
+ format!("cast<{}>(", arg.to_c_type())
+ } else {
+ "".to_string()
+ },
+ close_cast = if armv7_p64 {
+ ")".to_string()
+ } else {
+ "".to_string()
+ }
+ )
})
})
.collect::<Vec<_>>()
.join("\n ")
}
+ /// Creates a line for each argument that initalizes the argument from array [ARG]_VALS at
+ /// an offset i using a load intrinsic, in Rust.
+ /// e.g `let a = vld1_u8(A_VALS.as_ptr().offset(i));`
+ pub fn load_values_rust(&self) -> String {
+ self.iter()
+ .filter_map(|arg| {
+ (!arg.has_constraint()).then(|| {
+ format!(
+ "let {name} = {load}({upper_name}_VALS.as_ptr().offset(i));",
+ name = arg.name,
+ upper_name = arg.name.to_uppercase(),
+ load = if arg.is_simd() {
+ arg.ty.get_load_function(false)
+ } else {
+ "*".to_string()
+ },
+ )
+ })
+ })
+ .collect::<Vec<_>>()
+ .join("\n ")
+ }
+
pub fn iter(&self) -> std::slice::Iter<'_, Argument> {
self.args.iter()
}
diff --git a/library/stdarch/crates/intrinsic-test/src/intrinsic.rs b/library/stdarch/crates/intrinsic-test/src/intrinsic.rs
index 2b7130440..e0645a36b 100644
--- a/library/stdarch/crates/intrinsic-test/src/intrinsic.rs
+++ b/library/stdarch/crates/intrinsic-test/src/intrinsic.rs
@@ -20,8 +20,9 @@ pub struct Intrinsic {
impl Intrinsic {
/// Generates a std::cout for the intrinsics results that will match the
- /// rust debug output format for the return type.
- pub fn print_result_c(&self, index: usize, additional: &str) -> String {
+ /// rust debug output format for the return type. The generated line assumes
+ /// there is an int i in scope which is the current pass number.
+ pub fn print_result_c(&self, additional: &str) -> String {
let lanes = if self.results.num_vectors() > 1 {
(0..self.results.num_vectors())
.map(|vector| {
@@ -72,7 +73,7 @@ impl Intrinsic {
};
format!(
- r#"std::cout << "Result {additional}-{idx}: {ty}" << std::fixed << std::setprecision(150) << {lanes} << "{close}" << std::endl;"#,
+ r#"std::cout << "Result {additional}-" << i+1 << ": {ty}" << std::fixed << std::setprecision(150) << {lanes} << "{close}" << std::endl;"#,
ty = if self.results.is_simd() {
format!("{}(", self.results.c_type())
} else {
@@ -81,11 +82,31 @@ impl Intrinsic {
close = if self.results.is_simd() { ")" } else { "" },
lanes = lanes,
additional = additional,
- idx = index,
)
}
- pub fn generate_pass_rust(&self, index: usize, additional: &str) -> String {
+ pub fn generate_loop_c(
+ &self,
+ additional: &str,
+ passes: u32,
+ p64_armv7_workaround: bool,
+ ) -> String {
+ format!(
+ r#" {{
+ for (int i=0; i<{passes}; i++) {{
+ {loaded_args}
+ auto __return_value = {intrinsic_call}({args});
+ {print_result}
+ }}
+ }}"#,
+ loaded_args = self.arguments.load_values_c(p64_armv7_workaround),
+ intrinsic_call = self.name,
+ args = self.arguments.as_call_param_c(),
+ print_result = self.print_result_c(additional)
+ )
+ }
+
+ pub fn generate_loop_rust(&self, additional: &str, passes: u32) -> String {
let constraints = self.arguments.as_constraint_parameters_rust();
let constraints = if !constraints.is_empty() {
format!("::<{}>", constraints)
@@ -94,32 +115,20 @@ impl Intrinsic {
};
format!(
- r#"
- unsafe {{
- {initialized_args}
- let res = {intrinsic_call}{const}({args});
- println!("Result {additional}-{idx}: {{:.150?}}", res);
- }}"#,
- initialized_args = self.arguments.init_random_values_rust(index),
- intrinsic_call = self.name,
- args = self.arguments.as_call_param_rust(),
- additional = additional,
- idx = index,
- const = constraints,
- )
- }
-
- pub fn generate_pass_c(&self, index: usize, additional: &str) -> String {
- format!(
r#" {{
- {initialized_args}
- auto __return_value = {intrinsic_call}({args});
- {print_result}
+ for i in 0..{passes} {{
+ unsafe {{
+ {loaded_args}
+ let __return_value = {intrinsic_call}{const}({args});
+ println!("Result {additional}-{{}}: {{:.150?}}", i+1, __return_value);
+ }}
+ }}
}}"#,
- initialized_args = self.arguments.init_random_values_c(index),
+ loaded_args = self.arguments.load_values_rust(),
intrinsic_call = self.name,
- args = self.arguments.as_call_param_c(),
- print_result = self.print_result_c(index, additional)
+ const = constraints,
+ args = self.arguments.as_call_param_rust(),
+ additional = additional,
)
}
}
diff --git a/library/stdarch/crates/intrinsic-test/src/main.rs b/library/stdarch/crates/intrinsic-test/src/main.rs
index 1b58da2fd..43f2df08b 100644
--- a/library/stdarch/crates/intrinsic-test/src/main.rs
+++ b/library/stdarch/crates/intrinsic-test/src/main.rs
@@ -23,13 +23,21 @@ mod intrinsic;
mod types;
mod values;
+// The number of times each intrinsic will be called.
+const PASSES: u32 = 20;
+
#[derive(Debug, PartialEq)]
pub enum Language {
Rust,
C,
}
-fn gen_code_c(intrinsic: &Intrinsic, constraints: &[&Argument], name: String) -> String {
+fn gen_code_c(
+ intrinsic: &Intrinsic,
+ constraints: &[&Argument],
+ name: String,
+ p64_armv7_workaround: bool,
+) -> String {
if let Some((current, constraints)) = constraints.split_last() {
let range = current
.constraints
@@ -47,19 +55,25 @@ fn gen_code_c(intrinsic: &Intrinsic, constraints: &[&Argument], name: String) ->
name = current.name,
ty = current.ty.c_type(),
val = i,
- pass = gen_code_c(intrinsic, constraints, format!("{}-{}", name, i))
+ pass = gen_code_c(
+ intrinsic,
+ constraints,
+ format!("{}-{}", name, i),
+ p64_armv7_workaround
+ )
)
})
.collect()
} else {
- (1..20)
- .map(|idx| intrinsic.generate_pass_c(idx, &name))
- .collect::<Vec<_>>()
- .join("\n")
+ intrinsic.generate_loop_c(&name, PASSES, p64_armv7_workaround)
}
}
-fn generate_c_program(header_files: &[&str], intrinsic: &Intrinsic) -> String {
+fn generate_c_program(
+ header_files: &[&str],
+ intrinsic: &Intrinsic,
+ p64_armv7_workaround: bool,
+) -> String {
let constraints = intrinsic
.arguments
.iter()
@@ -75,7 +89,7 @@ fn generate_c_program(header_files: &[&str], intrinsic: &Intrinsic) -> String {
template<typename T1, typename T2> T1 cast(T2 x) {{
static_assert(sizeof(T1) == sizeof(T2), "sizeof T1 and T2 must be the same");
- T1 ret = 0;
+ T1 ret{{}};
memcpy(&ret, &x, sizeof(T1));
return ret;
}}
@@ -95,6 +109,8 @@ std::ostream& operator<<(std::ostream& os, poly128_t value) {{
}}
#endif
+{arglists}
+
int main(int argc, char **argv) {{
{passes}
return 0;
@@ -104,7 +120,13 @@ int main(int argc, char **argv) {{
.map(|header| format!("#include <{}>", header))
.collect::<Vec<_>>()
.join("\n"),
- passes = gen_code_c(intrinsic, constraints.as_slice(), Default::default()),
+ arglists = intrinsic.arguments.gen_arglists_c(PASSES),
+ passes = gen_code_c(
+ intrinsic,
+ constraints.as_slice(),
+ Default::default(),
+ p64_armv7_workaround
+ ),
)
}
@@ -131,10 +153,7 @@ fn gen_code_rust(intrinsic: &Intrinsic, constraints: &[&Argument], name: String)
})
.collect()
} else {
- (1..20)
- .map(|idx| intrinsic.generate_pass_rust(idx, &name))
- .collect::<Vec<_>>()
- .join("\n")
+ intrinsic.generate_loop_rust(&name, PASSES)
}
}
@@ -153,11 +172,14 @@ fn generate_rust_program(intrinsic: &Intrinsic, a32: bool) -> String {
#![allow(non_upper_case_globals)]
use core_arch::arch::{target_arch}::*;
+{arglists}
+
fn main() {{
{passes}
}}
"#,
target_arch = if a32 { "arm" } else { "aarch64" },
+ arglists = intrinsic.arguments.gen_arglists_rust(PASSES),
passes = gen_code_rust(intrinsic, &constraints, Default::default())
)
}
@@ -203,7 +225,7 @@ fn build_c(intrinsics: &Vec<Intrinsic>, compiler: &str, a32: bool) -> bool {
let c_filename = format!(r#"c_programs/{}.cpp"#, i.name);
let mut file = File::create(&c_filename).unwrap();
- let c_code = generate_c_program(&["arm_neon.h", "arm_acle.h"], &i);
+ let c_code = generate_c_program(&["arm_neon.h", "arm_acle.h"], &i, a32);
file.write_all(c_code.into_bytes().as_slice()).unwrap();
compile_c(&c_filename, &i, compiler, a32)
})
@@ -259,7 +281,7 @@ path = "{intrinsic}/main.rs""#,
.current_dir("rust_programs")
.arg("-c")
.arg(format!(
- "cargo {toolchain} build --target {target}",
+ "cargo {toolchain} build --target {target} --release",
toolchain = toolchain,
target = if a32 {
"armv7-unknown-linux-gnueabihf"
@@ -407,7 +429,7 @@ fn compare_outputs(intrinsics: &Vec<Intrinsic>, toolchain: &str, runner: &str, a
.current_dir("rust_programs")
.arg("-c")
.arg(format!(
- "cargo {toolchain} run --target {target} --bin {intrinsic}",
+ "cargo {toolchain} run --target {target} --bin {intrinsic} --release",
intrinsic = intrinsic.name,
toolchain = toolchain,
target = if a32 {
diff --git a/library/stdarch/crates/intrinsic-test/src/types.rs b/library/stdarch/crates/intrinsic-test/src/types.rs
index e51e61649..dd23586e7 100644
--- a/library/stdarch/crates/intrinsic-test/src/types.rs
+++ b/library/stdarch/crates/intrinsic-test/src/types.rs
@@ -1,7 +1,7 @@
use std::fmt;
use std::str::FromStr;
-use crate::values::values_for_pass;
+use crate::values::value_for_array;
use crate::Language;
#[derive(Debug, PartialEq, Copy, Clone)]
@@ -160,8 +160,7 @@ impl IntrinsicType {
}
}
- #[allow(unused)]
- fn c_scalar_type(&self) -> String {
+ pub fn c_scalar_type(&self) -> String {
format!(
"{prefix}{bits}_t",
prefix = self.kind().c_prefix(),
@@ -169,7 +168,7 @@ impl IntrinsicType {
)
}
- fn rust_scalar_type(&self) -> String {
+ pub fn rust_scalar_type(&self) -> String {
format!(
"{prefix}{bits}",
prefix = self.kind().rust_prefix(),
@@ -289,18 +288,19 @@ impl IntrinsicType {
}
}
- /// Generates a comma list of values that can be used to initialize an
- /// argument for the intrinsic call.
+ /// Generates a comma list of values that can be used to initialize the array that
+ /// an argument for the intrinsic call is loaded from.
/// This is determistic based on the pass number.
///
- /// * `pass`: The pass index, i.e. the iteration index for the call to an intrinsic
+ /// * `loads`: The number of values that need to be loaded from the argument array
+ /// * e.g for argument type uint32x2, loads=2 results in a string representing 4 32-bit values
///
/// Returns a string such as
/// * `0x1, 0x7F, 0xFF` if `language` is `Language::C`
/// * `0x1 as _, 0x7F as _, 0xFF as _` if `language` is `Language::Rust`
- pub fn populate_random(&self, pass: usize, language: &Language) -> String {
+ pub fn populate_random(&self, loads: u32, language: &Language) -> String {
match self {
- IntrinsicType::Ptr { child, .. } => child.populate_random(pass, language),
+ IntrinsicType::Ptr { child, .. } => child.populate_random(loads, language),
IntrinsicType::Type {
bit_len: Some(bit_len),
kind,
@@ -308,11 +308,11 @@ impl IntrinsicType {
vec_len,
..
} if kind == &TypeKind::Int || kind == &TypeKind::UInt || kind == &TypeKind::Poly => (0
- ..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1)))
+ ..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1) + loads - 1))
.map(|i| {
format!(
"{}{}",
- values_for_pass(*bit_len, i, pass),
+ value_for_array(*bit_len, i),
match language {
&Language::Rust => format!(" as {ty} ", ty = self.rust_scalar_type()),
&Language::C => String::from(""),
@@ -327,15 +327,15 @@ impl IntrinsicType {
simd_len,
vec_len,
..
- } => (0..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1)))
+ } => (0..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1) + loads - 1))
.map(|i| {
format!(
"{}({})",
match language {
- &Language::Rust => "f32::from_bits",
+ &Language::Rust => "std::mem::transmute",
&Language::C => "cast<float, uint32_t>",
},
- values_for_pass(32, i, pass),
+ value_for_array(32, i),
)
})
.collect::<Vec<_>>()
@@ -346,15 +346,15 @@ impl IntrinsicType {
simd_len,
vec_len,
..
- } => (0..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1)))
+ } => (0..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1) + loads - 1))
.map(|i| {
format!(
"{}({}{})",
match language {
- &Language::Rust => "f64::from_bits",
+ &Language::Rust => "std::mem::transmute",
&Language::C => "cast<double, uint64_t>",
},
- values_for_pass(64, i, pass),
+ value_for_array(64, i),
match language {
&Language::Rust => " as u64",
&Language::C => "",
@@ -368,10 +368,9 @@ impl IntrinsicType {
}
/// Determines the load function for this type.
- #[allow(unused)]
- pub fn get_load_function(&self) -> String {
+ pub fn get_load_function(&self, armv7_p64_workaround: bool) -> String {
match self {
- IntrinsicType::Ptr { child, .. } => child.get_load_function(),
+ IntrinsicType::Ptr { child, .. } => child.get_load_function(armv7_p64_workaround),
IntrinsicType::Type {
kind: k,
bit_len: Some(bl),
@@ -379,7 +378,7 @@ impl IntrinsicType {
vec_len,
..
} => {
- let quad = if (simd_len.unwrap_or(1) * bl) > 64 {
+ let quad = if simd_len.unwrap_or(1) * bl > 64 {
"q"
} else {
""
@@ -390,7 +389,8 @@ impl IntrinsicType {
TypeKind::UInt => "u",
TypeKind::Int => "s",
TypeKind::Float => "f",
- TypeKind::Poly => "p",
+ // The ACLE doesn't support 64-bit polynomial loads on Armv7
+ TypeKind::Poly => if armv7_p64_workaround && *bl == 64 {"s"} else {"p"},
x => todo!("get_load_function TypeKind: {:#?}", x),
},
size = bl,
diff --git a/library/stdarch/crates/intrinsic-test/src/values.rs b/library/stdarch/crates/intrinsic-test/src/values.rs
index 4565edca0..64b4d9fc9 100644
--- a/library/stdarch/crates/intrinsic-test/src/values.rs
+++ b/library/stdarch/crates/intrinsic-test/src/values.rs
@@ -1,9 +1,8 @@
-/// Gets a hex constant value for a single lane in in a determistic way
+/// Gets a hex constant value for a single value in the argument values array in a determistic way
/// * `bits`: The number of bits for the type, only 8, 16, 32, 64 are valid values
-/// * `simd`: The index of the simd lane we are generating for
-/// * `pass`: The index of the pass we are generating the values for
-pub fn values_for_pass(bits: u32, simd: u32, pass: usize) -> String {
- let index = pass + (simd as usize);
+/// * `index`: The position in the array we are generating for
+pub fn value_for_array(bits: u32, index: u32) -> String {
+ let index = index as usize;
if bits == 8 {
format!("{:#X}", VALUES_8[index % VALUES_8.len()])
diff --git a/library/stdarch/crates/std_detect/Cargo.toml b/library/stdarch/crates/std_detect/Cargo.toml
index 1ca0d9c5d..3a482564e 100644
--- a/library/stdarch/crates/std_detect/Cargo.toml
+++ b/library/stdarch/crates/std_detect/Cargo.toml
@@ -22,7 +22,7 @@ maintenance = { status = "experimental" }
[dependencies]
libc = { version = "0.2", optional = true, default-features = false }
-cfg-if = "0.1.10"
+cfg-if = "1.0.0"
# When built as part of libstd
core = { version = "1.0.0", optional = true, package = "rustc-std-workspace-core" }
diff --git a/library/stdarch/crates/std_detect/src/detect/arch/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/arch/aarch64.rs
index f32f961ae..5f46c7696 100644
--- a/library/stdarch/crates/std_detect/src/detect/arch/aarch64.rs
+++ b/library/stdarch/crates/std_detect/src/detect/arch/aarch64.rs
@@ -72,7 +72,8 @@ features! {
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] pmull: "pmull";
/// FEAT_PMULL (Polynomial Multiply)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] fp: "fp";
- /// FEAT_FP (Floating point support)
+ implied by target_features: ["neon"];
+ /// FEAT_FP (Floating point support) - Implied by `neon` target_feature
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] fp16: "fp16";
/// FEAT_FP16 (Half-float support)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sve: "sve";
diff --git a/library/stdarch/crates/std_detect/src/detect/macros.rs b/library/stdarch/crates/std_detect/src/detect/macros.rs
index 7548c9780..a467f9db6 100644
--- a/library/stdarch/crates/std_detect/src/detect/macros.rs
+++ b/library/stdarch/crates/std_detect/src/detect/macros.rs
@@ -1,3 +1,15 @@
+#[macro_export]
+#[allow_internal_unstable(stdsimd)]
+macro_rules! detect_feature {
+ ($feature:tt, $feature_lit:tt) => {
+ $crate::detect_feature!($feature, $feature_lit : $feature_lit)
+ };
+ ($feature:tt, $feature_lit:tt : $($target_feature_lit:tt),*) => {
+ $(cfg!(target_feature = $target_feature_lit) ||)*
+ $crate::detect::__is_feature_detected::$feature()
+ };
+}
+
#[allow(unused)]
macro_rules! features {
(
@@ -7,7 +19,9 @@ macro_rules! features {
@MACRO_ATTRS: $(#[$macro_attrs:meta])*
$(@BIND_FEATURE_NAME: $bind_feature:tt; $feature_impl:tt; )*
$(@NO_RUNTIME_DETECTION: $nort_feature:tt; )*
- $(@FEATURE: #[$stability_attr:meta] $feature:ident: $feature_lit:tt; $(#[$feature_comment:meta])*)*
+ $(@FEATURE: #[$stability_attr:meta] $feature:ident: $feature_lit:tt;
+ $(implied by target_features: [$($target_feature_lit:tt),*];)?
+ $(#[$feature_comment:meta])*)*
) => {
#[macro_export]
$(#[$macro_attrs])*
@@ -17,12 +31,11 @@ macro_rules! features {
macro_rules! $macro_name {
$(
($feature_lit) => {
- cfg!(target_feature = $feature_lit) ||
- $crate::detect::__is_feature_detected::$feature()
+ $crate::detect_feature!($feature, $feature_lit $(: $($target_feature_lit),*)?)
};
)*
$(
- ($bind_feature) => { $macro_name!($feature_impl) };
+ ($bind_feature) => { $crate::$macro_name!($feature_impl) };
)*
$(
($nort_feature) => {
@@ -35,7 +48,7 @@ macro_rules! features {
};
)*
($t:tt,) => {
- $macro_name!($t);
+ $crate::$macro_name!($t);
};
($t:tt) => {
compile_error!(
@@ -66,7 +79,7 @@ macro_rules! features {
};
)*
$(
- ($bind_feature) => { $macro_name!($feature_impl) };
+ ($bind_feature) => { $crate::$macro_name!($feature_impl) };
)*
$(
($nort_feature) => {
@@ -79,7 +92,7 @@ macro_rules! features {
};
)*
($t:tt,) => {
- $macro_name!($t);
+ $crate::$macro_name!($t);
};
($t:tt) => {
compile_error!(
diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs
index b6a2e5218..6c79ba86d 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs
@@ -23,58 +23,62 @@ pub(crate) fn detect_features() -> cache::Initializer {
/// The names match those used for cpuinfo.
///
/// [hwcap]: https://github.com/torvalds/linux/blob/master/arch/arm64/include/uapi/asm/hwcap.h
+#[derive(Debug, Default, PartialEq)]
struct AtHwcap {
- fp: bool, // 0
- asimd: bool, // 1
- // evtstrm: bool, // 2 No LLVM support
- aes: bool, // 3
- pmull: bool, // 4
- sha1: bool, // 5
- sha2: bool, // 6
- crc32: bool, // 7
- atomics: bool, // 8
- fphp: bool, // 9
- asimdhp: bool, // 10
- // cpuid: bool, // 11 No LLVM support
- asimdrdm: bool, // 12
- jscvt: bool, // 13
- fcma: bool, // 14
- lrcpc: bool, // 15
- dcpop: bool, // 16
- sha3: bool, // 17
- sm3: bool, // 18
- sm4: bool, // 19
- asimddp: bool, // 20
- sha512: bool, // 21
- sve: bool, // 22
- fhm: bool, // 23
- dit: bool, // 24
- uscat: bool, // 25
- ilrcpc: bool, // 26
- flagm: bool, // 27
- ssbs: bool, // 28
- sb: bool, // 29
- paca: bool, // 30
- pacg: bool, // 31
- dcpodp: bool, // 32
- sve2: bool, // 33
- sveaes: bool, // 34
- // svepmull: bool, // 35 No LLVM support
- svebitperm: bool, // 36
- svesha3: bool, // 37
- svesm4: bool, // 38
- // flagm2: bool, // 39 No LLVM support
- frint: bool, // 40
- // svei8mm: bool, // 41 See i8mm feature
- svef32mm: bool, // 42
- svef64mm: bool, // 43
- // svebf16: bool, // 44 See bf16 feature
- i8mm: bool, // 45
- bf16: bool, // 46
- // dgh: bool, // 47 No LLVM support
- rng: bool, // 48
- bti: bool, // 49
- mte: bool, // 50
+ // AT_HWCAP
+ fp: bool,
+ asimd: bool,
+ // evtstrm: No LLVM support.
+ aes: bool,
+ pmull: bool,
+ sha1: bool,
+ sha2: bool,
+ crc32: bool,
+ atomics: bool,
+ fphp: bool,
+ asimdhp: bool,
+ // cpuid: No LLVM support.
+ asimdrdm: bool,
+ jscvt: bool,
+ fcma: bool,
+ lrcpc: bool,
+ dcpop: bool,
+ sha3: bool,
+ sm3: bool,
+ sm4: bool,
+ asimddp: bool,
+ sha512: bool,
+ sve: bool,
+ fhm: bool,
+ dit: bool,
+ uscat: bool,
+ ilrcpc: bool,
+ flagm: bool,
+ ssbs: bool,
+ sb: bool,
+ paca: bool,
+ pacg: bool,
+
+ // AT_HWCAP2
+ dcpodp: bool,
+ sve2: bool,
+ sveaes: bool,
+ // svepmull: No LLVM support.
+ svebitperm: bool,
+ svesha3: bool,
+ svesm4: bool,
+ // flagm2: No LLVM support.
+ frint: bool,
+ // svei8mm: See i8mm feature.
+ svef32mm: bool,
+ svef64mm: bool,
+ // svebf16: See bf16 feature.
+ i8mm: bool,
+ bf16: bool,
+ // dgh: No LLVM support.
+ rng: bool,
+ bti: bool,
+ mte: bool,
}
impl From<auxvec::AuxVec> for AtHwcap {
@@ -113,25 +117,25 @@ impl From<auxvec::AuxVec> for AtHwcap {
sb: bit::test(auxv.hwcap, 29),
paca: bit::test(auxv.hwcap, 30),
pacg: bit::test(auxv.hwcap, 31),
- dcpodp: bit::test(auxv.hwcap, 32),
- sve2: bit::test(auxv.hwcap, 33),
- sveaes: bit::test(auxv.hwcap, 34),
- // svepmull: bit::test(auxv.hwcap, 35),
- svebitperm: bit::test(auxv.hwcap, 36),
- svesha3: bit::test(auxv.hwcap, 37),
- svesm4: bit::test(auxv.hwcap, 38),
- // flagm2: bit::test(auxv.hwcap, 39),
- frint: bit::test(auxv.hwcap, 40),
- // svei8mm: bit::test(auxv.hwcap, 41),
- svef32mm: bit::test(auxv.hwcap, 42),
- svef64mm: bit::test(auxv.hwcap, 43),
- // svebf16: bit::test(auxv.hwcap, 44),
- i8mm: bit::test(auxv.hwcap, 45),
- bf16: bit::test(auxv.hwcap, 46),
- // dgh: bit::test(auxv.hwcap, 47),
- rng: bit::test(auxv.hwcap, 48),
- bti: bit::test(auxv.hwcap, 49),
- mte: bit::test(auxv.hwcap, 50),
+ dcpodp: bit::test(auxv.hwcap2, 0),
+ sve2: bit::test(auxv.hwcap2, 1),
+ sveaes: bit::test(auxv.hwcap2, 2),
+ // svepmull: bit::test(auxv.hwcap2, 3),
+ svebitperm: bit::test(auxv.hwcap2, 4),
+ svesha3: bit::test(auxv.hwcap2, 5),
+ svesm4: bit::test(auxv.hwcap2, 6),
+ // flagm2: bit::test(auxv.hwcap2, 7),
+ frint: bit::test(auxv.hwcap2, 8),
+ // svei8mm: bit::test(auxv.hwcap2, 9),
+ svef32mm: bit::test(auxv.hwcap2, 10),
+ svef64mm: bit::test(auxv.hwcap2, 11),
+ // svebf16: bit::test(auxv.hwcap2, 12),
+ i8mm: bit::test(auxv.hwcap2, 13),
+ bf16: bit::test(auxv.hwcap2, 14),
+ // dgh: bit::test(auxv.hwcap2, 15),
+ rng: bit::test(auxv.hwcap2, 16),
+ bti: bit::test(auxv.hwcap2, 17),
+ mte: bit::test(auxv.hwcap2, 18),
}
}
}
@@ -288,3 +292,86 @@ impl AtHwcap {
value
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[cfg(feature = "std_detect_file_io")]
+ mod auxv_from_file {
+ use super::auxvec::auxv_from_file;
+ use super::*;
+ // The baseline hwcaps used in the (artificial) auxv test files.
+ fn baseline_hwcaps() -> AtHwcap {
+ AtHwcap {
+ fp: true,
+ asimd: true,
+ aes: true,
+ pmull: true,
+ sha1: true,
+ sha2: true,
+ crc32: true,
+ atomics: true,
+ fphp: true,
+ asimdhp: true,
+ asimdrdm: true,
+ lrcpc: true,
+ dcpop: true,
+ asimddp: true,
+ ssbs: true,
+ ..AtHwcap::default()
+ }
+ }
+
+ #[test]
+ fn linux_empty_hwcap2_aarch64() {
+ let file = concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv"
+ );
+ println!("file: {}", file);
+ let v = auxv_from_file(file).unwrap();
+ println!("HWCAP : 0x{:0x}", v.hwcap);
+ println!("HWCAP2: 0x{:0x}", v.hwcap2);
+ assert_eq!(AtHwcap::from(v), baseline_hwcaps());
+ }
+ #[test]
+ fn linux_no_hwcap2_aarch64() {
+ let file = concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv"
+ );
+ println!("file: {}", file);
+ let v = auxv_from_file(file).unwrap();
+ println!("HWCAP : 0x{:0x}", v.hwcap);
+ println!("HWCAP2: 0x{:0x}", v.hwcap2);
+ assert_eq!(AtHwcap::from(v), baseline_hwcaps());
+ }
+ #[test]
+ fn linux_hwcap2_aarch64() {
+ let file = concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/src/detect/test_data/linux-hwcap2-aarch64.auxv"
+ );
+ println!("file: {}", file);
+ let v = auxv_from_file(file).unwrap();
+ println!("HWCAP : 0x{:0x}", v.hwcap);
+ println!("HWCAP2: 0x{:0x}", v.hwcap2);
+ assert_eq!(
+ AtHwcap::from(v),
+ AtHwcap {
+ // Some other HWCAP bits.
+ paca: true,
+ pacg: true,
+ // HWCAP2-only bits.
+ dcpodp: true,
+ frint: true,
+ rng: true,
+ bti: true,
+ mte: true,
+ ..baseline_hwcaps()
+ }
+ );
+ }
+ }
+}
diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
index e6447d0cd..c903903bd 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
@@ -7,6 +7,7 @@ pub(crate) const AT_NULL: usize = 0;
pub(crate) const AT_HWCAP: usize = 16;
/// Key to access the CPU Hardware capabilities 2 bitfield.
#[cfg(any(
+ target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64"
@@ -21,6 +22,7 @@ pub(crate) const AT_HWCAP2: usize = 26;
pub(crate) struct AuxVec {
pub hwcap: usize,
#[cfg(any(
+ target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64"
@@ -64,13 +66,14 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
if let Ok(hwcap) = getauxval(AT_HWCAP) {
// Targets with only AT_HWCAP:
#[cfg(any(
- target_arch = "aarch64",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "mips",
target_arch = "mips64"
))]
{
+ // Zero could indicate that no features were detected, but it's also used to
+ // indicate an error. In either case, try the fallback.
if hwcap != 0 {
return Ok(AuxVec { hwcap });
}
@@ -78,13 +81,18 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
// Targets with AT_HWCAP and AT_HWCAP2:
#[cfg(any(
+ target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64"
))]
{
if let Ok(hwcap2) = getauxval(AT_HWCAP2) {
- if hwcap != 0 && hwcap2 != 0 {
+ // Zero could indicate that no features were detected, but it's also used to
+ // indicate an error. In particular, on many platforms AT_HWCAP2 will be
+ // legitimately zero, since it contains the most recent feature flags. Use the
+ // fallback only if no features were detected at all.
+ if hwcap != 0 || hwcap2 != 0 {
return Ok(AuxVec { hwcap, hwcap2 });
}
}
@@ -97,7 +105,6 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
{
// Targets with only AT_HWCAP:
#[cfg(any(
- target_arch = "aarch64",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "mips",
@@ -105,6 +112,8 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
))]
{
let hwcap = unsafe { libc::getauxval(AT_HWCAP as libc::c_ulong) as usize };
+ // Zero could indicate that no features were detected, but it's also used to indicate
+ // an error. In either case, try the fallback.
if hwcap != 0 {
return Ok(AuxVec { hwcap });
}
@@ -112,6 +121,7 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
// Targets with AT_HWCAP and AT_HWCAP2:
#[cfg(any(
+ target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64"
@@ -119,7 +129,11 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
{
let hwcap = unsafe { libc::getauxval(AT_HWCAP as libc::c_ulong) as usize };
let hwcap2 = unsafe { libc::getauxval(AT_HWCAP2 as libc::c_ulong) as usize };
- if hwcap != 0 && hwcap2 != 0 {
+ // Zero could indicate that no features were detected, but it's also used to indicate
+ // an error. In particular, on many platforms AT_HWCAP2 will be legitimately zero,
+ // since it contains the most recent feature flags. Use the fallback only if no
+ // features were detected at all.
+ if hwcap != 0 || hwcap2 != 0 {
return Ok(AuxVec { hwcap, hwcap2 });
}
}
@@ -158,7 +172,7 @@ fn getauxval(key: usize) -> Result<usize, ()> {
/// Tries to read the auxiliary vector from the `file`. If this fails, this
/// function returns `Err`.
#[cfg(feature = "std_detect_file_io")]
-fn auxv_from_file(file: &str) -> Result<AuxVec, ()> {
+pub(super) fn auxv_from_file(file: &str) -> Result<AuxVec, ()> {
let file = super::read_file(file)?;
// See <https://github.com/torvalds/linux/blob/v3.19/include/uapi/linux/auxvec.h>.
@@ -181,7 +195,6 @@ fn auxv_from_file(file: &str) -> Result<AuxVec, ()> {
fn auxv_from_buf(buf: &[usize; 64]) -> Result<AuxVec, ()> {
// Targets with only AT_HWCAP:
#[cfg(any(
- target_arch = "aarch64",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "mips",
@@ -198,23 +211,25 @@ fn auxv_from_buf(buf: &[usize; 64]) -> Result<AuxVec, ()> {
}
// Targets with AT_HWCAP and AT_HWCAP2:
#[cfg(any(
+ target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64"
))]
{
let mut hwcap = None;
- let mut hwcap2 = None;
+ // For some platforms, AT_HWCAP2 was added recently, so let it default to zero.
+ let mut hwcap2 = 0;
for el in buf.chunks(2) {
match el[0] {
AT_NULL => break,
AT_HWCAP => hwcap = Some(el[1]),
- AT_HWCAP2 => hwcap2 = Some(el[1]),
+ AT_HWCAP2 => hwcap2 = el[1],
_ => (),
}
}
- if let (Some(hwcap), Some(hwcap2)) = (hwcap, hwcap2) {
+ if let Some(hwcap) = hwcap {
return Ok(AuxVec { hwcap, hwcap2 });
}
}
@@ -256,7 +271,6 @@ mod tests {
// FIXME: on mips/mips64 getauxval returns 0, and /proc/self/auxv
// does not always contain the AT_HWCAP key under qemu.
#[cfg(any(
- target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64"
@@ -271,6 +285,7 @@ mod tests {
// Targets with AT_HWCAP and AT_HWCAP2:
#[cfg(any(
+ target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64"
@@ -305,24 +320,31 @@ mod tests {
}
#[test]
- #[should_panic]
fn linux_macos_vb() {
let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv");
println!("file: {}", file);
+ // The file contains HWCAP but not HWCAP2. In that case, we treat HWCAP2 as zero.
let v = auxv_from_file(file).unwrap();
- // this file is incomplete (contains hwcap but not hwcap2), we
- // want to fall back to /proc/cpuinfo in this case, so
- // reading should fail. assert_eq!(v.hwcap, 126614527);
- // assert_eq!(v.hwcap2, 0);
- let _ = v;
+ assert_eq!(v.hwcap, 126614527);
+ assert_eq!(v.hwcap2, 0);
}
} else if #[cfg(target_arch = "aarch64")] {
#[test]
- fn linux_x64() {
- let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-x64-i7-6850k.auxv");
+ fn linux_artificial_aarch64() {
+ let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-artificial-aarch64.auxv");
println!("file: {}", file);
let v = auxv_from_file(file).unwrap();
- assert_eq!(v.hwcap, 3219913727);
+ assert_eq!(v.hwcap, 0x0123456789abcdef);
+ assert_eq!(v.hwcap2, 0x02468ace13579bdf);
+ }
+ #[test]
+ fn linux_no_hwcap2_aarch64() {
+ let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv");
+ println!("file: {}", file);
+ let v = auxv_from_file(file).unwrap();
+ // An absent HWCAP2 is treated as zero, and does not prevent acceptance of HWCAP.
+ assert_ne!(v.hwcap, 0);
+ assert_eq!(v.hwcap2, 0);
}
}
}
@@ -353,6 +375,7 @@ mod tests {
// Targets with AT_HWCAP and AT_HWCAP2:
#[cfg(any(
+ target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64"
diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv
new file mode 100644
index 000000000..ec826afcf
--- /dev/null
+++ b/library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv
Binary files differ
diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv
new file mode 100644
index 000000000..95537b73f
--- /dev/null
+++ b/library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv
Binary files differ
diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv
new file mode 100644
index 000000000..1d87264b2
--- /dev/null
+++ b/library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv
Binary files differ
diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv
new file mode 100644
index 000000000..35f01cc76
--- /dev/null
+++ b/library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv
Binary files differ
diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-x64-i7-6850k.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-x64-i7-6850k.auxv
deleted file mode 100644
index 6afe1b3b4..000000000
--- a/library/stdarch/crates/std_detect/src/detect/test_data/linux-x64-i7-6850k.auxv
+++ /dev/null
Binary files differ
diff --git a/library/stdarch/crates/stdarch-gen/neon.spec b/library/stdarch/crates/stdarch-gen/neon.spec
index 68a50fbe9..95fbc354c 100644
--- a/library/stdarch/crates/stdarch-gen/neon.spec
+++ b/library/stdarch/crates/stdarch-gen/neon.spec
@@ -1570,10 +1570,10 @@ name = vext
constn = N
multi_fn = static_assert_imm-out_exp_len-N
multi_fn = matchn-out_exp_len-N, simd_shuffle-out_len-!, a, b, {asc-n-out_len}
-a = 0, 8, 8, 9, 8, 9, 9, 11, 8, 9, 9, 11, 9, 11, 14, 15
-b = 9, 11, 14, 15, 16, 17, 18, 19, 0, 8, 8, 9, 8, 9, 9, 11
-n = HFLEN
-validate 8, 9, 9, 11, 9, 11, 14, 15, 9, 11, 14, 15, 16, 17, 18, 19
+a = 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+n = LEN_M1
+validate 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
arm = "vext.8"
aarch64 = ext
@@ -1584,10 +1584,10 @@ name = vext
constn = N
multi_fn = static_assert_imm-out_exp_len-N
multi_fn = matchn-out_exp_len-N, simd_shuffle-out_len-!, a, b, {asc-n-out_len}
-a = 0, 8, 8, 9, 8, 9, 9, 11, 8, 9, 9, 11, 9, 11, 14, 15
-b = 9, 11, 14, 15, 16, 17, 18, 19, 0, 8, 8, 9, 8, 9, 9, 11
-n = HFLEN
-validate 8, 9, 9, 11, 9, 11, 14, 15, 9, 11, 14, 15, 16, 17, 18, 19
+a = 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+n = LEN_M1
+validate 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
aarch64 = ext
generate poly64x2_t
@@ -1600,10 +1600,10 @@ name = vext
constn = N
multi_fn = static_assert_imm-out_exp_len-N
multi_fn = matchn-out_exp_len-N, simd_shuffle-out_len-!, a, b, {asc-n-out_len}
-a = 0., 2., 2., 3.
-b = 3., 4., 5., 6.,
-n = HFLEN
-validate 2., 3., 3., 4.
+a = 1., 1., 1., 1.
+b = 2., 2., 2., 2.,
+n = LEN_M1
+validate 1., 2., 2., 2.
aarch64 = ext
generate float64x2_t
diff --git a/library/stdarch/crates/stdarch-gen/src/main.rs b/library/stdarch/crates/stdarch-gen/src/main.rs
index a2ae250a7..d2f865753 100644
--- a/library/stdarch/crates/stdarch-gen/src/main.rs
+++ b/library/stdarch/crates/stdarch-gen/src/main.rs
@@ -856,6 +856,40 @@ fn type_len_str(t: &str) -> &'static str {
}
}
+fn type_len_minus_one_str(t: &str) -> &'static str {
+ match t {
+ "int8x8_t" => "7",
+ "int8x16_t" => "15",
+ "int16x4_t" => "3",
+ "int16x8_t" => "7",
+ "int32x2_t" => "1",
+ "int32x4_t" => "3",
+ "int64x1_t" => "0",
+ "int64x2_t" => "1",
+ "uint8x8_t" => "7",
+ "uint8x16_t" => "15",
+ "uint16x4_t" => "3",
+ "uint16x8_t" => "7",
+ "uint32x2_t" => "1",
+ "uint32x4_t" => "3",
+ "uint64x1_t" => "0",
+ "uint64x2_t" => "1",
+ "float16x4_t" => "3",
+ "float16x8_t" => "7",
+ "float32x2_t" => "1",
+ "float32x4_t" => "3",
+ "float64x1_t" => "0",
+ "float64x2_t" => "1",
+ "poly8x8_t" => "7",
+ "poly8x16_t" => "15",
+ "poly16x4_t" => "3",
+ "poly16x8_t" => "7",
+ "poly64x1_t" => "0",
+ "poly64x2_t" => "1",
+ _ => panic!("unknown type: {}", t),
+ }
+}
+
fn type_half_len_str(t: &str) -> &'static str {
match t {
"int8x8_t" => "4",
@@ -901,6 +935,7 @@ fn map_val<'v>(t: &str, v: &'v str) -> &'v str {
"BITS_M1" => bits_minus_one(t),
"HFBITS" => half_bits(t),
"LEN" => type_len_str(t),
+ "LEN_M1" => type_len_minus_one_str(t),
"HFLEN" => type_half_len_str(t),
o => o,
}
@@ -971,6 +1006,15 @@ fn is_vstx(name: &str) -> bool {
&& (s[1].starts_with("s") || s[1].starts_with("f"))
}
+fn create_doc_string(comment_string: &str, fn_name: &str) -> String {
+ format!(
+ r#"{}
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/{})"#,
+ comment_string, fn_name
+ )
+}
+
#[allow(clippy::too_many_arguments)]
fn gen_aarch64(
current_comment: &str,
@@ -1374,6 +1418,7 @@ fn gen_aarch64(
RDM => String::from("\n#[stable(feature = \"rdm_intrinsics\", since = \"1.62.0\")]"),
_ => String::new(),
};
+ let function_doc = create_doc_string(current_comment, &name);
let function = format!(
r#"
{}
@@ -1384,7 +1429,7 @@ fn gen_aarch64(
{}
}}
"#,
- current_comment,
+ function_doc,
current_target,
current_aarch64,
const_assert,
@@ -2342,6 +2387,7 @@ fn gen_arm(
RDM => String::from("\n#[stable(feature = \"rdm_intrinsics\", since = \"1.62.0\")]"),
_ => String::new(),
};
+ let function_doc = create_doc_string(current_comment, &name);
format!(
r#"
{}
@@ -2358,13 +2404,13 @@ fn gen_arm(
#[cfg_attr(test, assert_instr({}{}))]{}{}
{}
"#,
- current_comment,
+ function_doc,
current_target_arm,
expand_intrinsic(&current_arm, in_t[1]),
const_assert,
const_legacy,
call_arm,
- current_comment,
+ function_doc,
current_target_aarch64,
expand_intrinsic(&current_aarch64, in_t[1]),
const_assert,
@@ -2410,6 +2456,7 @@ fn gen_arm(
RDM => String::from("\n#[cfg_attr(target_arch = \"aarch64\", stable(feature = \"rdm_intrinsics\", since = \"1.62.0\"))]"),
_ => String::new(),
};
+ let function_doc = create_doc_string(current_comment, &name);
format!(
r#"
{}
@@ -2420,7 +2467,7 @@ fn gen_arm(
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({}{}))]{}{}
{}
"#,
- current_comment,
+ function_doc,
current_target_aarch64,
current_target_arm,
expand_intrinsic(&current_arm, in_t[1]),
diff --git a/library/stdarch/crates/stdarch-test/Cargo.toml b/library/stdarch/crates/stdarch-test/Cargo.toml
index 9ac1057be..012b4e959 100644
--- a/library/stdarch/crates/stdarch-test/Cargo.toml
+++ b/library/stdarch/crates/stdarch-test/Cargo.toml
@@ -10,7 +10,7 @@ simd-test-macro = { path = "../simd-test-macro" }
cc = "1.0"
lazy_static = "1.0"
rustc-demangle = "0.1.8"
-cfg-if = "0.1"
+cfg-if = "1.0"
# We use a crates.io dependency to disassemble wasm binaries to look for
# instructions for `#[assert_instr]`. Note that we use an `=` dependency here
diff --git a/library/stdarch/crates/stdarch-test/src/lib.rs b/library/stdarch/crates/stdarch-test/src/lib.rs
index 078736c66..eba17771c 100644
--- a/library/stdarch/crates/stdarch-test/src/lib.rs
+++ b/library/stdarch/crates/stdarch-test/src/lib.rs
@@ -3,7 +3,6 @@
//! This basically just disassembles the current executable and then parses the
//! output once globally and then provides the `assert` function which makes
//! assertions about the disassembly of a function.
-#![feature(bench_black_box)] // For black_box
#![deny(rust_2018_idioms)]
#![allow(clippy::missing_docs_in_private_items, clippy::print_stdout)]
diff --git a/library/stdarch/examples/hex.rs b/library/stdarch/examples/hex.rs
index 812836d66..d982a71b9 100644
--- a/library/stdarch/examples/hex.rs
+++ b/library/stdarch/examples/hex.rs
@@ -76,7 +76,7 @@ unsafe fn hex_encode_avx2<'a>(mut src: &[u8], dst: &'a mut [u8]) -> Result<&'a s
let ascii_a = _mm256_set1_epi8((b'a' - 9 - 1) as i8);
let and4bits = _mm256_set1_epi8(0xf);
- let mut i = 0_isize;
+ let mut i = 0_usize;
while src.len() >= 32 {
let invec = _mm256_loadu_si256(src.as_ptr() as *const _);
@@ -96,18 +96,17 @@ unsafe fn hex_encode_avx2<'a>(mut src: &[u8], dst: &'a mut [u8]) -> Result<&'a s
let res2 = _mm256_unpackhi_epi8(masked2, masked1);
// Store everything into the right destination now
- let base = dst.as_mut_ptr().offset(i * 2);
- let base1 = base.offset(0) as *mut _;
- let base2 = base.offset(16) as *mut _;
- let base3 = base.offset(32) as *mut _;
- let base4 = base.offset(48) as *mut _;
+ let base = dst.as_mut_ptr().add(i * 2);
+ let base1 = base.add(0) as *mut _;
+ let base2 = base.add(16) as *mut _;
+ let base3 = base.add(32) as *mut _;
+ let base4 = base.add(48) as *mut _;
_mm256_storeu2_m128i(base3, base1, res1);
_mm256_storeu2_m128i(base4, base2, res2);
src = &src[32..];
i += 32;
}
- let i = i as usize;
let _ = hex_encode_sse41(src, &mut dst[i * 2..]);
Ok(str::from_utf8_unchecked(&dst[..src.len() * 2 + i * 2]))
@@ -122,7 +121,7 @@ unsafe fn hex_encode_sse41<'a>(mut src: &[u8], dst: &'a mut [u8]) -> Result<&'a
let ascii_a = _mm_set1_epi8((b'a' - 9 - 1) as i8);
let and4bits = _mm_set1_epi8(0xf);
- let mut i = 0_isize;
+ let mut i = 0_usize;
while src.len() >= 16 {
let invec = _mm_loadu_si128(src.as_ptr() as *const _);
@@ -141,13 +140,12 @@ unsafe fn hex_encode_sse41<'a>(mut src: &[u8], dst: &'a mut [u8]) -> Result<&'a
let res1 = _mm_unpacklo_epi8(masked2, masked1);
let res2 = _mm_unpackhi_epi8(masked2, masked1);
- _mm_storeu_si128(dst.as_mut_ptr().offset(i * 2) as *mut _, res1);
- _mm_storeu_si128(dst.as_mut_ptr().offset(i * 2 + 16) as *mut _, res2);
+ _mm_storeu_si128(dst.as_mut_ptr().add(i * 2) as *mut _, res1);
+ _mm_storeu_si128(dst.as_mut_ptr().add(i * 2 + 16) as *mut _, res2);
src = &src[16..];
i += 16;
}
- let i = i as usize;
let _ = hex_encode_fallback(src, &mut dst[i * 2..]);
Ok(str::from_utf8_unchecked(&dst[..src.len() * 2 + i * 2]))
@@ -163,7 +161,7 @@ unsafe fn hex_encode_simd128<'a>(mut src: &[u8], dst: &'a mut [u8]) -> Result<&'
let ascii_a = u8x16_splat(b'a' - 9 - 1);
let and4bits = u8x16_splat(0xf);
- let mut i = 0_isize;
+ let mut i = 0_usize;
while src.len() >= 16 {
let invec = v128_load(src.as_ptr() as *const _);
@@ -189,13 +187,12 @@ unsafe fn hex_encode_simd128<'a>(mut src: &[u8], dst: &'a mut [u8]) -> Result<&'
masked2, masked1,
);
- v128_store(dst.as_mut_ptr().offset(i * 2) as *mut _, res1);
- v128_store(dst.as_mut_ptr().offset(i * 2 + 16) as *mut _, res2);
+ v128_store(dst.as_mut_ptr().add(i * 2) as *mut _, res1);
+ v128_store(dst.as_mut_ptr().add(i * 2 + 16) as *mut _, res2);
src = &src[16..];
i += 16;
}
- let i = i as usize;
let _ = hex_encode_fallback(src, &mut dst[i * 2..]);
Ok(str::from_utf8_unchecked(&dst[..src.len() * 2 + i * 2]))
diff --git a/library/test/src/bench.rs b/library/test/src/bench.rs
index 7869ba2c0..23925e6ea 100644
--- a/library/test/src/bench.rs
+++ b/library/test/src/bench.rs
@@ -49,12 +49,12 @@ impl Bencher {
self.summary = Some(iter(&mut inner));
}
- pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
+ pub fn bench<F>(&mut self, mut f: F) -> Result<Option<stats::Summary>, String>
where
- F: FnMut(&mut Bencher),
+ F: FnMut(&mut Bencher) -> Result<(), String>,
{
- f(self);
- self.summary
+ let result = f(self);
+ result.map(|_| self.summary)
}
}
@@ -195,7 +195,7 @@ pub fn benchmark<F>(
nocapture: bool,
f: F,
) where
- F: FnMut(&mut Bencher),
+ F: FnMut(&mut Bencher) -> Result<(), String>,
{
let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 };
@@ -211,14 +211,14 @@ pub fn benchmark<F>(
let test_result = match result {
//bs.bench(f) {
- Ok(Some(ns_iter_summ)) => {
+ Ok(Ok(Some(ns_iter_summ))) => {
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
let mb_s = bs.bytes * 1000 / ns_iter;
let bs = BenchSamples { ns_iter_summ, mb_s: mb_s as usize };
TestResult::TrBench(bs)
}
- Ok(None) => {
+ Ok(Ok(None)) => {
// iter not called, so no data.
// FIXME: error in this case?
let samples: &mut [f64] = &mut [0.0_f64; 1];
@@ -226,6 +226,7 @@ pub fn benchmark<F>(
TestResult::TrBench(bs)
}
Err(_) => TestResult::TrFailed,
+ Ok(Err(_)) => TestResult::TrFailed,
};
let stdout = data.lock().unwrap().to_vec();
@@ -233,10 +234,10 @@ pub fn benchmark<F>(
monitor_ch.send(message).unwrap();
}
-pub fn run_once<F>(f: F)
+pub fn run_once<F>(f: F) -> Result<(), String>
where
- F: FnMut(&mut Bencher),
+ F: FnMut(&mut Bencher) -> Result<(), String>,
{
let mut bs = Bencher { mode: BenchMode::Single, summary: None, bytes: 0 };
- bs.bench(f);
+ bs.bench(f).map(|_| ())
}
diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs
index f981b9c49..8be32183f 100644
--- a/library/test/src/cli.rs
+++ b/library/test/src/cli.rs
@@ -3,9 +3,9 @@
use std::env;
use std::path::PathBuf;
-use super::helpers::isatty;
use super::options::{ColorConfig, Options, OutputFormat, RunIgnored};
use super::time::TestTimeOptions;
+use std::io::{self, IsTerminal};
#[derive(Debug)]
pub struct TestOpts {
@@ -32,7 +32,7 @@ pub struct TestOpts {
impl TestOpts {
pub fn use_color(&self) -> bool {
match self.color {
- ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(),
+ ColorConfig::AutoColor => !self.nocapture && io::stdout().is_terminal(),
ColorConfig::AlwaysColor => true,
ColorConfig::NeverColor => false,
}
diff --git a/library/test/src/console.rs b/library/test/src/console.rs
index e9dda9896..b1270c272 100644
--- a/library/test/src/console.rs
+++ b/library/test/src/console.rs
@@ -147,7 +147,7 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Res
let mut ntest = 0;
let mut nbench = 0;
- for test in filter_tests(&opts, tests) {
+ for test in filter_tests(&opts, tests).into_iter() {
use crate::TestFn::*;
let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
diff --git a/library/test/src/helpers/isatty.rs b/library/test/src/helpers/isatty.rs
deleted file mode 100644
index 874ecc376..000000000
--- a/library/test/src/helpers/isatty.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-//! Helper module which provides a function to test
-//! if stdout is a tty.
-
-cfg_if::cfg_if! {
- if #[cfg(unix)] {
- pub fn stdout_isatty() -> bool {
- unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
- }
- } else if #[cfg(windows)] {
- pub fn stdout_isatty() -> bool {
- type DWORD = u32;
- type BOOL = i32;
- type HANDLE = *mut u8;
- type LPDWORD = *mut u32;
- const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
- extern "system" {
- fn GetStdHandle(which: DWORD) -> HANDLE;
- fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
- }
- unsafe {
- let handle = GetStdHandle(STD_OUTPUT_HANDLE);
- let mut out = 0;
- GetConsoleMode(handle, &mut out) != 0
- }
- }
- } else {
- // FIXME: Implement isatty on SGX
- pub fn stdout_isatty() -> bool {
- false
- }
- }
-}
diff --git a/library/test/src/helpers/mod.rs b/library/test/src/helpers/mod.rs
index 049cadf86..6f366a911 100644
--- a/library/test/src/helpers/mod.rs
+++ b/library/test/src/helpers/mod.rs
@@ -3,6 +3,5 @@
pub mod concurrency;
pub mod exit_code;
-pub mod isatty;
pub mod metrics;
pub mod shuffle;
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index 3b7193adc..141f16d17 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -6,7 +6,8 @@
//! benchmarks themselves) should be done via the `#[test]` and
//! `#[bench]` attributes.
//!
-//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
+//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more
+//! details.
// Currently, not much of this is meant for users. It is intended to
// support the simplest interface possible for representing and
@@ -15,10 +16,11 @@
#![unstable(feature = "test", issue = "50297")]
#![doc(test(attr(deny(warnings))))]
-#![feature(bench_black_box)]
#![feature(internal_output_capture)]
+#![feature(is_terminal)]
#![feature(staged_api)]
#![feature(process_exitcode_internals)]
+#![feature(panic_can_unwind)]
#![feature(test)]
// Public reexports
@@ -53,6 +55,7 @@ use std::{
collections::VecDeque,
env, io,
io::prelude::Write,
+ mem::ManuallyDrop,
panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo},
process::{self, Command, Termination},
sync::mpsc::{channel, Sender},
@@ -77,6 +80,7 @@ mod types;
#[cfg(test)]
mod tests;
+use core::any::Any;
use event::{CompletedTest, TestEvent};
use helpers::concurrency::get_concurrency;
use helpers::exit_code::get_exit_code;
@@ -110,6 +114,29 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Opt
process::exit(ERROR_EXIT_CODE);
}
} else {
+ if !opts.nocapture {
+ // If we encounter a non-unwinding panic, flush any captured output from the current test,
+ // and stop capturing output to ensure that the non-unwinding panic message is visible.
+ // We also acquire the locks for both output streams to prevent output from other threads
+ // from interleaving with the panic message or appearing after it.
+ let builtin_panic_hook = panic::take_hook();
+ let hook = Box::new({
+ move |info: &'_ PanicInfo<'_>| {
+ if !info.can_unwind() {
+ std::mem::forget(std::io::stderr().lock());
+ let mut stdout = ManuallyDrop::new(std::io::stdout().lock());
+ if let Some(captured) = io::set_output_capture(None) {
+ if let Ok(data) = captured.lock() {
+ let _ = stdout.write_all(&data);
+ let _ = stdout.flush();
+ }
+ }
+ }
+ builtin_panic_hook(info);
+ }
+ });
+ panic::set_hook(hook);
+ }
match console::run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => process::exit(ERROR_EXIT_CODE),
@@ -176,17 +203,20 @@ fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
}
}
-/// Invoked when unit tests terminate. Should panic if the unit
-/// Tests is considered a failure. By default, invokes `report()`
-/// and checks for a `0` result.
-pub fn assert_test_result<T: Termination>(result: T) {
+/// Invoked when unit tests terminate. Returns `Result::Err` if the test is
+/// considered a failure. By default, invokes `report() and checks for a `0`
+/// result.
+pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
let code = result.report().to_i32();
- assert_eq!(
- code, 0,
- "the test returned a termination value with a non-zero status code ({}) \
- which indicates a failure",
- code
- );
+ if code == 0 {
+ Ok(())
+ } else {
+ Err(format!(
+ "the test returned a termination value with a non-zero status code \
+ ({}) which indicates a failure",
+ code
+ ))
+ }
}
pub fn run_tests<F>(
@@ -242,7 +272,7 @@ where
let event = TestEvent::TeFiltered(filtered_descs, shuffle_seed);
notify_about_test_event(event)?;
- let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
+ let (mut filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
.into_iter()
.enumerate()
.map(|(i, e)| (TestId(i), e))
@@ -250,12 +280,12 @@ where
let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
- let mut remaining = filtered_tests;
if let Some(shuffle_seed) = shuffle_seed {
- shuffle_tests(shuffle_seed, &mut remaining);
- } else {
- remaining.reverse();
+ shuffle_tests(shuffle_seed, &mut filtered_tests);
}
+ // Store the tests in a VecDeque so we can efficiently remove the first element to run the
+ // tests in the order they were passed (unless shuffled).
+ let mut remaining = VecDeque::from(filtered_tests);
let mut pending = 0;
let (tx, rx) = channel::<CompletedTest>();
@@ -295,7 +325,7 @@ where
if concurrency == 1 {
while !remaining.is_empty() {
- let (id, test) = remaining.pop().unwrap();
+ let (id, test) = remaining.pop_front().unwrap();
let event = TestEvent::TeWait(test.desc.clone());
notify_about_test_event(event)?;
let join_handle =
@@ -309,7 +339,7 @@ where
} else {
while pending > 0 || !remaining.is_empty() {
while pending < concurrency && !remaining.is_empty() {
- let (id, test) = remaining.pop().unwrap();
+ let (id, test) = remaining.pop_front().unwrap();
let timeout = time::get_default_test_timeout();
let desc = test.desc.clone();
@@ -421,9 +451,6 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
RunIgnored::No => {}
}
- // Sort the tests alphabetically
- filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
-
filtered
}
@@ -479,7 +506,7 @@ pub fn run_test(
id: TestId,
desc: TestDesc,
monitor_ch: Sender<CompletedTest>,
- testfn: Box<dyn FnOnce() + Send>,
+ testfn: Box<dyn FnOnce() -> Result<(), String> + Send>,
opts: TestRunOpts,
) -> Option<thread::JoinHandle<()>> {
let concurrency = opts.concurrency;
@@ -568,11 +595,11 @@ pub fn run_test(
/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
#[inline(never)]
-fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
- f();
+fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T {
+ let result = f();
// prevent this frame from being tail-call optimised away
- black_box(());
+ black_box(result)
}
fn run_test_in_process(
@@ -580,7 +607,7 @@ fn run_test_in_process(
desc: TestDesc,
nocapture: bool,
report_time: bool,
- testfn: Box<dyn FnOnce() + Send>,
+ testfn: Box<dyn FnOnce() -> Result<(), String> + Send>,
monitor_ch: Sender<CompletedTest>,
time_opts: Option<time::TestTimeOptions>,
) {
@@ -592,7 +619,7 @@ fn run_test_in_process(
}
let start = report_time.then(Instant::now);
- let result = catch_unwind(AssertUnwindSafe(testfn));
+ let result = fold_err(catch_unwind(AssertUnwindSafe(testfn)));
let exec_time = start.map(|start| {
let duration = start.elapsed();
TestExecTime(duration)
@@ -609,6 +636,19 @@ fn run_test_in_process(
monitor_ch.send(message).unwrap();
}
+fn fold_err<T, E>(
+ result: Result<Result<T, E>, Box<dyn Any + Send>>,
+) -> Result<T, Box<dyn Any + Send>>
+where
+ E: Send + 'static,
+{
+ match result {
+ Ok(Err(e)) => Err(Box::new(e)),
+ Ok(Ok(v)) => Ok(v),
+ Err(e) => Err(e),
+ }
+}
+
fn spawn_test_subprocess(
id: TestId,
desc: TestDesc,
@@ -664,7 +704,10 @@ fn spawn_test_subprocess(
monitor_ch.send(message).unwrap();
}
-fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Send>) -> ! {
+fn run_test_in_spawned_subprocess(
+ desc: TestDesc,
+ testfn: Box<dyn FnOnce() -> Result<(), String> + Send>,
+) -> ! {
let builtin_panic_hook = panic::take_hook();
let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| {
let test_result = match panic_info {
@@ -690,7 +733,9 @@ fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Sen
});
let record_result2 = record_result.clone();
panic::set_hook(Box::new(move |info| record_result2(Some(&info))));
- testfn();
+ if let Err(message) = testfn() {
+ panic!("{}", message);
+ }
record_result(None);
unreachable!("panic=abort callback should have exited the process")
}
diff --git a/library/test/src/stats.rs b/library/test/src/stats.rs
index 40b05704b..b33b08012 100644
--- a/library/test/src/stats.rs
+++ b/library/test/src/stats.rs
@@ -14,7 +14,7 @@ pub trait Stats {
/// Sum of the samples.
///
/// Note: this method sacrifices performance at the altar of accuracy
- /// Depends on IEEE-754 arithmetic guarantees. See proof of correctness at:
+ /// Depends on IEEE 754 arithmetic guarantees. See proof of correctness at:
/// ["Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric
/// Predicates"][paper]
///
diff --git a/library/test/src/term.rs b/library/test/src/term.rs
index b256ab7b8..a14b0d4f5 100644
--- a/library/test/src/term.rs
+++ b/library/test/src/term.rs
@@ -39,7 +39,7 @@ pub(crate) fn stdout() -> Option<Box<StdoutTerminal>> {
pub(crate) fn stdout() -> Option<Box<StdoutTerminal>> {
TerminfoTerminal::new(io::stdout())
.map(|t| Box::new(t) as Box<StdoutTerminal>)
- .or_else(|| WinConsole::new(io::stdout()).ok().map(|t| Box::new(t) as Box<StdoutTerminal>))
+ .or_else(|| Some(Box::new(WinConsole::new(io::stdout())) as Box<StdoutTerminal>))
}
/// Terminal color definitions
diff --git a/library/test/src/term/terminfo/mod.rs b/library/test/src/term/terminfo/mod.rs
index 694473f52..355859019 100644
--- a/library/test/src/term/terminfo/mod.rs
+++ b/library/test/src/term/terminfo/mod.rs
@@ -80,6 +80,17 @@ impl TermInfo {
/// Creates a TermInfo for the named terminal.
pub(crate) fn from_name(name: &str) -> Result<TermInfo, Error> {
+ if cfg!(miri) {
+ // Avoid all the work of parsing the terminfo (it's pretty slow under Miri), and just
+ // assume that the standard color codes work (like e.g. the 'colored' crate).
+ return Ok(TermInfo {
+ names: Default::default(),
+ bools: Default::default(),
+ numbers: Default::default(),
+ strings: Default::default(),
+ });
+ }
+
get_dbpath_for_term(name)
.ok_or_else(|| {
Error::IoError(io::Error::new(io::ErrorKind::NotFound, "terminfo file not found"))
@@ -119,6 +130,12 @@ pub(crate) struct TerminfoTerminal<T> {
impl<T: Write + Send> Terminal for TerminfoTerminal<T> {
fn fg(&mut self, color: color::Color) -> io::Result<bool> {
let color = self.dim_if_necessary(color);
+ if cfg!(miri) && color < 8 {
+ // The Miri logic for this only works for the most basic 8 colors, which we just assume
+ // the terminal will support. (`num_colors` is always 0 in Miri, so higher colors will
+ // just fail. But libtest doesn't use any higher colors anyway.)
+ return write!(self.out, "\x1B[3{color}m").and(Ok(true));
+ }
if self.num_colors > color {
return self.apply_cap("setaf", &[Param::Number(color as i32)]);
}
@@ -126,6 +143,9 @@ impl<T: Write + Send> Terminal for TerminfoTerminal<T> {
}
fn reset(&mut self) -> io::Result<bool> {
+ if cfg!(miri) {
+ return write!(self.out, "\x1B[0m").and(Ok(true));
+ }
// are there any terminals that have color/attrs and not sgr0?
// Try falling back to sgr, then op
let cmd = match ["sgr0", "sgr", "op"].iter().find_map(|cap| self.ti.strings.get(*cap)) {
diff --git a/library/test/src/term/win.rs b/library/test/src/term/win.rs
index 4bdbd6ee7..55020141a 100644
--- a/library/test/src/term/win.rs
+++ b/library/test/src/term/win.rs
@@ -113,8 +113,7 @@ impl<T: Write + Send + 'static> WinConsole<T> {
}
}
- /// Returns `None` whenever the terminal cannot be created for some reason.
- pub(crate) fn new(out: T) -> io::Result<WinConsole<T>> {
+ pub(crate) fn new(out: T) -> WinConsole<T> {
use std::mem::MaybeUninit;
let fg;
@@ -132,13 +131,13 @@ impl<T: Write + Send + 'static> WinConsole<T> {
bg = color::BLACK;
}
}
- Ok(WinConsole {
+ WinConsole {
buf: out,
def_foreground: fg,
def_background: bg,
foreground: fg,
background: bg,
- })
+ }
}
}
diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs
index 0b81aff59..b54be64ef 100644
--- a/library/test/src/tests.rs
+++ b/library/test/src/tests.rs
@@ -67,7 +67,7 @@ fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
no_run: false,
test_type: TestType::Unknown,
},
- testfn: DynTestFn(Box::new(move || {})),
+ testfn: DynTestFn(Box::new(move || Ok(()))),
},
TestDescAndFn {
desc: TestDesc {
@@ -79,14 +79,14 @@ fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
no_run: false,
test_type: TestType::Unknown,
},
- testfn: DynTestFn(Box::new(move || {})),
+ testfn: DynTestFn(Box::new(move || Ok(()))),
},
]
}
#[test]
pub fn do_not_run_ignored_tests() {
- fn f() {
+ fn f() -> Result<(), String> {
panic!();
}
let desc = TestDescAndFn {
@@ -109,7 +109,9 @@ pub fn do_not_run_ignored_tests() {
#[test]
pub fn ignored_tests_result_in_ignored() {
- fn f() {}
+ fn f() -> Result<(), String> {
+ Ok(())
+ }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
@@ -132,7 +134,7 @@ pub fn ignored_tests_result_in_ignored() {
#[test]
#[cfg(not(target_os = "emscripten"))]
fn test_should_panic() {
- fn f() {
+ fn f() -> Result<(), String> {
panic!();
}
let desc = TestDescAndFn {
@@ -157,7 +159,7 @@ fn test_should_panic() {
#[test]
#[cfg(not(target_os = "emscripten"))]
fn test_should_panic_good_message() {
- fn f() {
+ fn f() -> Result<(), String> {
panic!("an error message");
}
let desc = TestDescAndFn {
@@ -183,7 +185,7 @@ fn test_should_panic_good_message() {
#[cfg(not(target_os = "emscripten"))]
fn test_should_panic_bad_message() {
use crate::tests::TrFailedMsg;
- fn f() {
+ fn f() -> Result<(), String> {
panic!("an error message");
}
let expected = "foobar";
@@ -214,7 +216,7 @@ fn test_should_panic_bad_message() {
fn test_should_panic_non_string_message_type() {
use crate::tests::TrFailedMsg;
use std::any::TypeId;
- fn f() {
+ fn f() -> Result<(), String> {
std::panic::panic_any(1i32);
}
let expected = "foobar";
@@ -249,7 +251,9 @@ fn test_should_panic_but_succeeds() {
let should_panic_variants = [ShouldPanic::Yes, ShouldPanic::YesWithMessage("error message")];
for &should_panic in should_panic_variants.iter() {
- fn f() {}
+ fn f() -> Result<(), String> {
+ Ok(())
+ }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
@@ -283,7 +287,9 @@ fn test_should_panic_but_succeeds() {
}
fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
- fn f() {}
+ fn f() -> Result<(), String> {
+ Ok(())
+ }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
@@ -318,7 +324,9 @@ fn test_should_report_time() {
}
fn time_test_failure_template(test_type: TestType) -> TestResult {
- fn f() {}
+ fn f() -> Result<(), String> {
+ Ok(())
+ }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
@@ -480,7 +488,7 @@ pub fn exclude_should_panic_option() {
no_run: false,
test_type: TestType::Unknown,
},
- testfn: DynTestFn(Box::new(move || {})),
+ testfn: DynTestFn(Box::new(move || Ok(()))),
});
let filtered = filter_tests(&opts, tests);
@@ -504,7 +512,7 @@ pub fn exact_filter_match() {
no_run: false,
test_type: TestType::Unknown,
},
- testfn: DynTestFn(Box::new(move || {})),
+ testfn: DynTestFn(Box::new(move || Ok(()))),
})
.collect()
}
@@ -580,7 +588,9 @@ fn sample_tests() -> Vec<TestDescAndFn> {
"test::run_include_ignored_option".to_string(),
"test::sort_tests".to_string(),
];
- fn testfn() {}
+ fn testfn() -> Result<(), String> {
+ Ok(())
+ }
let mut tests = Vec::new();
for name in &names {
let test = TestDescAndFn {
@@ -601,33 +611,6 @@ fn sample_tests() -> Vec<TestDescAndFn> {
}
#[test]
-pub fn sort_tests() {
- let mut opts = TestOpts::new();
- opts.run_tests = true;
-
- let tests = sample_tests();
- let filtered = filter_tests(&opts, tests);
-
- let expected = vec![
- "isize::test_pow".to_string(),
- "isize::test_to_str".to_string(),
- "sha1::test".to_string(),
- "test::do_not_run_ignored_tests".to_string(),
- "test::filter_for_ignored_option".to_string(),
- "test::first_free_arg_should_be_a_filter".to_string(),
- "test::ignored_tests_result_in_ignored".to_string(),
- "test::parse_ignored_flag".to_string(),
- "test::parse_include_ignored_flag".to_string(),
- "test::run_include_ignored_option".to_string(),
- "test::sort_tests".to_string(),
- ];
-
- for (a, b) in expected.iter().zip(filtered) {
- assert_eq!(*a, b.desc.name.to_string());
- }
-}
-
-#[test]
pub fn shuffle_tests() {
let mut opts = TestOpts::new();
opts.shuffle = true;
@@ -717,21 +700,26 @@ pub fn test_metricmap_compare() {
#[test]
pub fn test_bench_once_no_iter() {
- fn f(_: &mut Bencher) {}
- bench::run_once(f);
+ fn f(_: &mut Bencher) -> Result<(), String> {
+ Ok(())
+ }
+ bench::run_once(f).unwrap();
}
#[test]
pub fn test_bench_once_iter() {
- fn f(b: &mut Bencher) {
- b.iter(|| {})
+ fn f(b: &mut Bencher) -> Result<(), String> {
+ b.iter(|| {});
+ Ok(())
}
- bench::run_once(f);
+ bench::run_once(f).unwrap();
}
#[test]
pub fn test_bench_no_iter() {
- fn f(_: &mut Bencher) {}
+ fn f(_: &mut Bencher) -> Result<(), String> {
+ Ok(())
+ }
let (tx, rx) = channel();
@@ -751,8 +739,9 @@ pub fn test_bench_no_iter() {
#[test]
pub fn test_bench_iter() {
- fn f(b: &mut Bencher) {
- b.iter(|| {})
+ fn f(b: &mut Bencher) -> Result<(), String> {
+ b.iter(|| {});
+ Ok(())
}
let (tx, rx) = channel();
@@ -821,3 +810,33 @@ fn should_sort_failures_before_printing_them() {
let bpos = s.find("b").unwrap();
assert!(apos < bpos);
}
+
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_dyn_bench_returning_err_fails_when_run_as_test() {
+ fn f(_: &mut Bencher) -> Result<(), String> {
+ Result::Err("An error".into())
+ }
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynBenchFn(Box::new(f)),
+ };
+ let (tx, rx) = channel();
+ let notify = move |event: TestEvent| {
+ if let TestEvent::TeResult(result) = event {
+ tx.send(result).unwrap();
+ }
+ Ok(())
+ };
+ run_tests(&TestOpts { run_tests: true, ..TestOpts::new() }, vec![desc], notify).unwrap();
+ let result = rx.recv().unwrap().result;
+ assert_eq!(result, TrFailed);
+}
diff --git a/library/test/src/types.rs b/library/test/src/types.rs
index ffb1efe18..888afff79 100644
--- a/library/test/src/types.rs
+++ b/library/test/src/types.rs
@@ -75,14 +75,15 @@ impl fmt::Display for TestName {
}
// A function that runs a test. If the function returns successfully,
-// the test succeeds; if the function panics then the test fails. We
-// may need to come up with a more clever definition of test in order
-// to support isolation of tests into threads.
+// the test succeeds; if the function panics or returns Result::Err
+// then the test fails. We may need to come up with a more clever
+// definition of test in order to support isolation of tests into
+// threads.
pub enum TestFn {
- StaticTestFn(fn()),
- StaticBenchFn(fn(&mut Bencher)),
- DynTestFn(Box<dyn FnOnce() + Send>),
- DynBenchFn(Box<dyn Fn(&mut Bencher) + Send>),
+ StaticTestFn(fn() -> Result<(), String>),
+ StaticBenchFn(fn(&mut Bencher) -> Result<(), String>),
+ DynTestFn(Box<dyn FnOnce() -> Result<(), String> + Send>),
+ DynBenchFn(Box<dyn Fn(&mut Bencher) -> Result<(), String> + Send>),
}
impl TestFn {
diff --git a/library/unwind/build.rs b/library/unwind/build.rs
index f88e6a924..31af39025 100644
--- a/library/unwind/build.rs
+++ b/library/unwind/build.rs
@@ -2,8 +2,14 @@ use std::env;
fn main() {
println!("cargo:rerun-if-changed=build.rs");
- let target = env::var("TARGET").expect("TARGET was not set");
+ println!("cargo:rerun-if-env-changed=CARGO_CFG_MIRI");
+
+ if env::var_os("CARGO_CFG_MIRI").is_some() {
+ // Miri doesn't need the linker flags or a libunwind build.
+ return;
+ }
+ let target = env::var("TARGET").expect("TARGET was not set");
if target.contains("android") {
let build = cc::Build::new();
@@ -13,13 +19,8 @@ fn main() {
let has_unwind = build.is_flag_supported("-lunwind").expect("Unable to invoke compiler");
if has_unwind {
- println!("cargo:rustc-link-lib=unwind");
- } else {
- println!("cargo:rustc-link-lib=gcc");
+ println!("cargo:rustc-cfg=feature=\"system-llvm-libunwind\"");
}
-
- // Android's unwinding library depends on dl_iterate_phdr in `libdl`.
- println!("cargo:rustc-link-lib=dl");
} else if target.contains("freebsd") {
println!("cargo:rustc-link-lib=gcc_s");
} else if target.contains("netbsd") {
diff --git a/library/unwind/src/lib.rs b/library/unwind/src/lib.rs
index 4a6ba6e10..46fe50cb9 100644
--- a/library/unwind/src/lib.rs
+++ b/library/unwind/src/lib.rs
@@ -55,6 +55,26 @@ cfg_if::cfg_if! {
}
}
+#[cfg(target_os = "android")]
+cfg_if::cfg_if! {
+ if #[cfg(feature = "llvm-libunwind")] {
+ compile_error!("`llvm-libunwind` is not supported for Android targets");
+ } else if #[cfg(feature = "system-llvm-libunwind")] {
+ #[link(name = "unwind", kind = "static", modifiers = "-bundle", cfg(target_feature = "crt-static"))]
+ #[link(name = "unwind", cfg(not(target_feature = "crt-static")))]
+ extern "C" {}
+ } else {
+ #[link(name = "gcc", kind = "static", modifiers = "-bundle", cfg(target_feature = "crt-static"))]
+ #[link(name = "gcc", cfg(not(target_feature = "crt-static")))]
+ extern "C" {}
+ }
+}
+// Android's unwinding library depends on dl_iterate_phdr in `libdl`.
+#[cfg(target_os = "android")]
+#[link(name = "dl", kind = "static", modifiers = "-bundle", cfg(target_feature = "crt-static"))]
+#[link(name = "dl", cfg(not(target_feature = "crt-static")))]
+extern "C" {}
+
// When building with crt-static, we get `gcc_eh` from the `libc` crate, since
// glibc needs it, and needs it listed later on the linker command line. We
// don't want to duplicate it here.