summaryrefslogtreecommitdiffstats
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to 'library')
-rw-r--r--library/alloc/src/alloc.rs32
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs4
-rw-r--r--library/alloc/src/fmt.rs4
-rw-r--r--library/alloc/src/lib.rs6
-rw-r--r--library/alloc/src/macros.rs21
-rw-r--r--library/alloc/src/rc.rs23
-rw-r--r--library/alloc/src/slice.rs2
-rw-r--r--library/alloc/src/sync.rs31
-rw-r--r--library/alloc/src/vec/mod.rs87
-rw-r--r--library/alloc/tests/vec.rs11
-rw-r--r--library/backtrace/.github/workflows/check-binary-size.yml83
-rw-r--r--library/backtrace/.github/workflows/main.yml2
-rw-r--r--library/backtrace/Cargo.toml12
-rw-r--r--library/backtrace/build.rs12
-rw-r--r--library/backtrace/crates/as-if-std/Cargo.toml16
-rw-r--r--library/backtrace/crates/as-if-std/build.rs8
-rw-r--r--library/backtrace/src/print.rs3
-rw-r--r--library/backtrace/src/print/fuchsia.rs7
-rw-r--r--library/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs81
-rw-r--r--library/backtrace/src/symbolize/mod.rs2
-rw-r--r--library/backtrace/src/windows.rs4
-rw-r--r--library/core/primitive_docs/box_into_raw.md1
-rw-r--r--library/core/primitive_docs/fs_file.md1
-rw-r--r--library/core/primitive_docs/io_bufread.md1
-rw-r--r--library/core/primitive_docs/io_read.md1
-rw-r--r--library/core/primitive_docs/io_seek.md1
-rw-r--r--library/core/primitive_docs/io_write.md1
-rw-r--r--library/core/primitive_docs/net_tosocketaddrs.md1
-rw-r--r--library/core/primitive_docs/process_exit.md1
-rw-r--r--library/core/primitive_docs/string_string.md1
-rw-r--r--library/core/src/ascii/ascii_char.rs41
-rw-r--r--library/core/src/cell.rs51
-rw-r--r--library/core/src/char/convert.rs38
-rw-r--r--library/core/src/char/methods.rs52
-rw-r--r--library/core/src/cmp.rs118
-rw-r--r--library/core/src/convert/num.rs4
-rw-r--r--library/core/src/error.md2
-rw-r--r--library/core/src/ffi/c_str.rs107
-rw-r--r--library/core/src/fmt/builders.rs8
-rw-r--r--library/core/src/fmt/mod.rs28
-rw-r--r--library/core/src/hash/mod.rs5
-rw-r--r--library/core/src/hint.rs41
-rw-r--r--library/core/src/intrinsics.rs31
-rw-r--r--library/core/src/intrinsics/mir.rs45
-rw-r--r--library/core/src/iter/adapters/take.rs101
-rw-r--r--library/core/src/iter/range.rs130
-rw-r--r--library/core/src/lib.rs31
-rw-r--r--library/core/src/macros/mod.rs93
-rw-r--r--library/core/src/macros/panic.md30
-rw-r--r--library/core/src/marker.rs7
-rw-r--r--library/core/src/mem/mod.rs13
-rw-r--r--library/core/src/net/ip_addr.rs8
-rw-r--r--library/core/src/num/dec2flt/fpu.rs11
-rw-r--r--library/core/src/num/dec2flt/number.rs1
-rw-r--r--library/core/src/num/f32.rs21
-rw-r--r--library/core/src/num/f64.rs21
-rw-r--r--library/core/src/num/int_macros.rs54
-rw-r--r--library/core/src/num/mod.rs3
-rw-r--r--library/core/src/num/nonzero.rs14
-rw-r--r--library/core/src/num/saturating.rs438
-rw-r--r--library/core/src/num/uint_macros.rs64
-rw-r--r--library/core/src/num/wrapping.rs1
-rw-r--r--library/core/src/ops/deref.rs10
-rw-r--r--library/core/src/ops/drop.rs12
-rw-r--r--library/core/src/ops/function.rs6
-rw-r--r--library/core/src/ops/range.rs2
-rw-r--r--library/core/src/option.rs2
-rw-r--r--library/core/src/panic.rs4
-rw-r--r--library/core/src/panic/panic_info.rs15
-rw-r--r--library/core/src/panicking.rs57
-rw-r--r--library/core/src/pin.rs5
-rw-r--r--library/core/src/primitive_docs.rs29
-rw-r--r--library/core/src/ptr/const_ptr.rs23
-rw-r--r--library/core/src/ptr/mod.rs10
-rw-r--r--library/core/src/ptr/mut_ptr.rs41
-rw-r--r--library/core/src/ptr/non_null.rs20
-rw-r--r--library/core/src/slice/ascii.rs2
-rw-r--r--library/core/src/slice/mod.rs4
-rw-r--r--library/core/src/str/mod.rs6
-rw-r--r--library/core/src/sync/atomic.rs3
-rw-r--r--library/core/src/sync/exclusive.rs46
-rw-r--r--library/core/src/time.rs8
-rw-r--r--library/core/src/tuple.rs2
-rw-r--r--library/core/tests/iter/range.rs18
-rw-r--r--library/core/tests/lib.rs5
-rw-r--r--library/core/tests/macros.rs154
-rw-r--r--library/core/tests/num/int_macros.rs32
-rw-r--r--library/core/tests/num/uint_macros.rs29
-rw-r--r--library/core/tests/time.rs1
-rw-r--r--library/panic_abort/src/android.rs4
-rw-r--r--library/panic_abort/src/lib.rs10
-rw-r--r--library/panic_unwind/src/lib.rs7
-rw-r--r--library/proc_macro/src/lib.rs19
-rw-r--r--library/profiler_builtins/src/lib.rs2
-rw-r--r--library/std/Cargo.toml10
-rw-r--r--library/std/build.rs4
-rw-r--r--library/std/primitive_docs/box_into_raw.md1
-rw-r--r--library/std/primitive_docs/fs_file.md1
-rw-r--r--library/std/primitive_docs/io_bufread.md1
-rw-r--r--library/std/primitive_docs/io_read.md1
-rw-r--r--library/std/primitive_docs/io_seek.md1
-rw-r--r--library/std/primitive_docs/io_write.md1
-rw-r--r--library/std/primitive_docs/net_tosocketaddrs.md1
-rw-r--r--library/std/primitive_docs/process_exit.md1
-rw-r--r--library/std/primitive_docs/string_string.md1
-rw-r--r--library/std/src/alloc.rs26
-rw-r--r--library/std/src/f32.rs5
-rw-r--r--library/std/src/f64.rs5
-rw-r--r--library/std/src/ffi/mod.rs4
-rw-r--r--library/std/src/ffi/os_str.rs68
-rw-r--r--library/std/src/fs.rs17
-rw-r--r--library/std/src/io/buffered/bufwriter.rs2
-rw-r--r--library/std/src/io/copy.rs51
-rw-r--r--library/std/src/io/copy/tests.rs12
-rw-r--r--library/std/src/io/error.rs25
-rw-r--r--library/std/src/io/error/repr_bitpacked.rs3
-rw-r--r--library/std/src/io/mod.rs99
-rw-r--r--library/std/src/keyword_docs.rs10
-rw-r--r--library/std/src/lib.rs37
-rw-r--r--library/std/src/macros.rs12
-rw-r--r--library/std/src/net/tcp.rs2
-rw-r--r--library/std/src/net/udp.rs2
-rw-r--r--library/std/src/num.rs2
-rw-r--r--library/std/src/os/fd/owned.rs12
-rw-r--r--library/std/src/os/fd/raw.rs5
-rw-r--r--library/std/src/os/fortanix_sgx/io.rs23
-rw-r--r--library/std/src/os/hurd/fs.rs348
-rw-r--r--library/std/src/os/hurd/mod.rs6
-rw-r--r--library/std/src/os/hurd/raw.rs33
-rw-r--r--library/std/src/os/mod.rs6
-rw-r--r--library/std/src/os/solid/io.rs22
-rw-r--r--library/std/src/os/uefi/env.rs92
-rw-r--r--library/std/src/os/uefi/mod.rs8
-rw-r--r--library/std/src/os/unix/fs.rs6
-rw-r--r--library/std/src/os/unix/io/mod.rs16
-rw-r--r--library/std/src/os/unix/mod.rs2
-rw-r--r--library/std/src/os/unix/net/tests.rs2
-rw-r--r--library/std/src/os/unix/process.rs42
-rw-r--r--library/std/src/os/wasi/fs.rs4
-rw-r--r--library/std/src/os/windows/io/mod.rs10
-rw-r--r--library/std/src/os/windows/io/raw.rs6
-rw-r--r--library/std/src/os/windows/io/socket.rs8
-rw-r--r--library/std/src/os/windows/process.rs108
-rw-r--r--library/std/src/os/xous/ffi.rs647
-rw-r--r--library/std/src/os/xous/ffi/definitions.rs283
-rw-r--r--library/std/src/os/xous/ffi/definitions/memoryflags.rs176
-rw-r--r--library/std/src/os/xous/mod.rs17
-rw-r--r--library/std/src/os/xous/services.rs132
-rw-r--r--library/std/src/os/xous/services/log.rs63
-rw-r--r--library/std/src/os/xous/services/systime.rs28
-rw-r--r--library/std/src/os/xous/services/ticktimer.rs42
-rw-r--r--library/std/src/panicking.rs74
-rw-r--r--library/std/src/path.rs34
-rw-r--r--library/std/src/primitive_docs.rs1593
-rw-r--r--library/std/src/process.rs210
-rw-r--r--library/std/src/process/tests.rs132
-rw-r--r--library/std/src/sync/mpsc/mod.rs15
-rw-r--r--library/std/src/sys/common/small_c_string.rs2
-rw-r--r--library/std/src/sys/common/tests.rs4
-rw-r--r--library/std/src/sys/common/thread_local/mod.rs2
-rw-r--r--library/std/src/sys/hermit/mod.rs8
-rw-r--r--library/std/src/sys/hermit/net.rs10
-rw-r--r--library/std/src/sys/itron/error.rs5
-rw-r--r--library/std/src/sys/mod.rs9
-rw-r--r--library/std/src/sys/sgx/mod.rs6
-rw-r--r--library/std/src/sys/solid/mod.rs5
-rw-r--r--library/std/src/sys/solid/net.rs5
-rw-r--r--library/std/src/sys/solid/os.rs2
-rw-r--r--library/std/src/sys/uefi/alloc.rs33
-rw-r--r--library/std/src/sys/uefi/env.rs9
-rw-r--r--library/std/src/sys/uefi/helpers.rs141
-rw-r--r--library/std/src/sys/uefi/mod.rs244
-rw-r--r--library/std/src/sys/uefi/os.rs237
-rw-r--r--library/std/src/sys/uefi/path.rs25
-rw-r--r--library/std/src/sys/uefi/tests.rs21
-rw-r--r--library/std/src/sys/unix/alloc.rs6
-rw-r--r--library/std/src/sys/unix/args.rs1
-rw-r--r--library/std/src/sys/unix/env.rs11
-rw-r--r--library/std/src/sys/unix/fd.rs18
-rw-r--r--library/std/src/sys/unix/fs.rs62
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs4
-rw-r--r--library/std/src/sys/unix/mod.rs11
-rw-r--r--library/std/src/sys/unix/net.rs9
-rw-r--r--library/std/src/sys/unix/os.rs15
-rw-r--r--library/std/src/sys/unix/os_str.rs10
-rw-r--r--library/std/src/sys/unix/os_str/tests.rs4
-rw-r--r--library/std/src/sys/unix/path.rs2
-rw-r--r--library/std/src/sys/unix/pipe.rs9
-rw-r--r--library/std/src/sys/unix/process/process_common.rs51
-rw-r--r--library/std/src/sys/unix/process/process_unix.rs15
-rw-r--r--library/std/src/sys/unix/stack_overflow.rs2
-rw-r--r--library/std/src/sys/unix/thread.rs37
-rw-r--r--library/std/src/sys/unix/thread_local_dtor.rs4
-rw-r--r--library/std/src/sys/unix/thread_parking/darwin.rs3
-rw-r--r--library/std/src/sys/unix/time.rs2
-rw-r--r--library/std/src/sys/unsupported/common.rs4
-rw-r--r--library/std/src/sys/unsupported/process.rs20
-rw-r--r--library/std/src/sys/wasi/mod.rs5
-rw-r--r--library/std/src/sys/windows/args.rs4
-rw-r--r--library/std/src/sys/windows/c.rs6
-rw-r--r--library/std/src/sys/windows/c/windows_sys.lst21
-rw-r--r--library/std/src/sys/windows/c/windows_sys.rs201
-rw-r--r--library/std/src/sys/windows/handle.rs9
-rw-r--r--library/std/src/sys/windows/mod.rs5
-rw-r--r--library/std/src/sys/windows/net.rs38
-rw-r--r--library/std/src/sys/windows/os_str.rs8
-rw-r--r--library/std/src/sys/windows/path.rs28
-rw-r--r--library/std/src/sys/windows/pipe.rs8
-rw-r--r--library/std/src/sys/windows/process.rs151
-rw-r--r--library/std/src/sys/xous/alloc.rs62
-rw-r--r--library/std/src/sys/xous/locks/condvar.rs111
-rw-r--r--library/std/src/sys/xous/locks/mod.rs7
-rw-r--r--library/std/src/sys/xous/locks/mutex.rs116
-rw-r--r--library/std/src/sys/xous/locks/rwlock.rs72
-rw-r--r--library/std/src/sys/xous/mod.rs37
-rw-r--r--library/std/src/sys/xous/os.rs147
-rw-r--r--library/std/src/sys/xous/stdio.rs131
-rw-r--r--library/std/src/sys/xous/thread.rs144
-rw-r--r--library/std/src/sys/xous/thread_local_key.rs190
-rw-r--r--library/std/src/sys/xous/time.rs57
-rw-r--r--library/std/src/sys_common/mod.rs2
-rw-r--r--library/std/src/sys_common/net.rs1
-rw-r--r--library/std/src/sys_common/process.rs4
-rw-r--r--library/std/src/sys_common/thread_info.rs37
-rw-r--r--library/std/src/thread/mod.rs82
-rw-r--r--library/std/src/time.rs26
-rw-r--r--library/std/tests/env.rs20
-rw-r--r--library/stdarch/.github/workflows/main.yml36
-rw-r--r--library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile7
-rwxr-xr-xlibrary/stdarch/ci/dox.sh6
-rwxr-xr-xlibrary/stdarch/ci/run-docker.sh5
-rwxr-xr-xlibrary/stdarch/ci/run.sh6
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/armclang.rs23
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/mod.rs14
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs758
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs24
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/tme.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/v8.rs104
-rw-r--r--library/stdarch/crates/core_arch/src/arm/armclang.rs35
-rw-r--r--library/stdarch/crates/core_arch/src/arm/ex.rs125
-rw-r--r--library/stdarch/crates/core_arch/src/arm/mod.rs14
-rw-r--r--library/stdarch/crates/core_arch/src/arm/neon.rs28
-rw-r--r--library/stdarch/crates/core_arch/src/arm/v6.rs49
-rw-r--r--library/stdarch/crates/core_arch/src/arm/v7.rs87
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/mod.rs3
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs846
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/registers/aarch32.rs9
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/registers/mod.rs121
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/registers/v6m.rs39
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/registers/v7m.rs17
-rw-r--r--library/stdarch/crates/core_arch/src/lib.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/mod.rs9
-rw-r--r--library/stdarch/crates/core_arch/src/powerpc/altivec.rs107
-rw-r--r--library/stdarch/crates/core_arch/src/riscv32/mod.rs5
-rw-r--r--library/stdarch/crates/core_arch/src/riscv32/zk.rs367
-rw-r--r--library/stdarch/crates/core_arch/src/riscv64/mod.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/riscv64/zk.rs281
-rw-r--r--library/stdarch/crates/core_arch/src/riscv_shared/mod.rs223
-rw-r--r--library/stdarch/crates/core_arch/src/riscv_shared/zb.rs150
-rw-r--r--library/stdarch/crates/core_arch/src/riscv_shared/zk.rs462
-rw-r--r--library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/wasm32/simd128.rs38
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx.rs28
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx2.rs58
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512bw.rs158
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512f.rs502
-rw-r--r--library/stdarch/crates/core_arch/src/x86/mod.rs6
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse2.rs64
-rw-r--r--library/stdarch/crates/intrinsic-test/missing_aarch64.txt21
-rw-r--r--library/stdarch/crates/intrinsic-test/missing_arm.txt18
-rw-r--r--library/stdarch/crates/intrinsic-test/src/argument.rs8
-rw-r--r--library/stdarch/crates/std_detect/src/detect/arch/arm.rs4
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs8
-rw-r--r--library/stdarch/crates/std_detect/src/lib.rs2
-rw-r--r--library/stdarch/crates/std_detect/tests/cpu-detection.rs19
-rw-r--r--library/stdarch/crates/stdarch-gen/neon.spec343
-rw-r--r--library/stdarch/crates/stdarch-gen/src/main.rs21
-rw-r--r--library/stdarch/crates/stdarch-test/Cargo.toml4
-rw-r--r--library/stdarch/crates/stdarch-test/src/disassembly.rs58
-rw-r--r--library/stdarch/crates/stdarch-test/src/lib.rs15
-rw-r--r--library/stdarch/examples/connect5.rs2
-rw-r--r--library/test/src/lib.rs2
-rw-r--r--library/unwind/src/lib.rs6
283 files changed, 10841 insertions, 4586 deletions
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index 5205ed9fb..a548de814 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -343,18 +343,31 @@ extern "Rust" {
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
}
-/// Abort on memory allocation error or failure.
+/// Signal a memory allocation error.
///
-/// Callers of memory allocation APIs wishing to abort computation
+/// Callers of memory allocation APIs wishing to cease execution
/// in response to an allocation error are encouraged to call this function,
-/// rather than directly invoking `panic!` or similar.
+/// rather than directly invoking [`panic!`] or similar.
///
-/// The default behavior of this function is to print a message to standard error
-/// and abort the process.
-/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
+/// This function is guaranteed to diverge (not return normally with a value), but depending on
+/// global configuration, it may either panic (resulting in unwinding or aborting as per
+/// configuration for all panics), or abort the process (with no unwinding).
+///
+/// The default behavior is:
+///
+/// * If the binary links against `std` (typically the case), then
+/// print a message to standard error and abort the process.
+/// This behavior can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
+/// Future versions of Rust may panic by default instead.
+///
+/// * If the binary does not link against `std` (all of its crates are marked
+/// [`#![no_std]`][no_std]), then call [`panic!`] with a message.
+/// [The panic handler] applies as to any panic.
///
/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
+/// [The panic handler]: https://doc.rust-lang.org/reference/runtime.html#the-panic_handler-attribute
+/// [no_std]: https://doc.rust-lang.org/reference/names/preludes.html#the-no_std-attribute
#[stable(feature = "global_alloc", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")]
#[cfg(all(not(no_global_oom_handling), not(test)))]
@@ -395,9 +408,10 @@ pub mod __alloc_error_handler {
if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
panic!("memory allocation of {size} bytes failed")
} else {
- core::panicking::panic_nounwind_fmt(format_args!(
- "memory allocation of {size} bytes failed"
- ))
+ core::panicking::panic_nounwind_fmt(
+ format_args!("memory allocation of {size} bytes failed"),
+ /* force_no_backtrace */ false,
+ )
}
}
}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 5965ec2af..4ef8af9b0 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -1015,8 +1015,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Shortens the deque, keeping the first `len` elements and dropping
/// the rest.
///
- /// If `len` is greater than the deque's current length, this has no
- /// effect.
+ /// If `len` is greater or equal to the deque's current length, this has
+ /// no effect.
///
/// # Examples
///
diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs
index fb8d00e8d..1e2c35bf7 100644
--- a/library/alloc/src/fmt.rs
+++ b/library/alloc/src/fmt.rs
@@ -177,8 +177,8 @@
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
-//! should always be printed. Positive signs are never printed by
-//! default, and the negative sign is only printed by default for signed values.
+//! should always be printed. By default only the negative sign of signed values
+//! is printed, and the sign of positive or unsigned values is omitted.
//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag indicates that the "alternate" form of printing should
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index ffe6d6373..f435f503f 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -88,8 +88,8 @@
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
#![warn(multiple_supertrait_upcastable)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
-#![cfg_attr(not(bootstrap), allow(rustdoc::redundant_explicit_links))]
+#![allow(internal_features)]
+#![allow(rustdoc::redundant_explicit_links)]
//
// Library features:
// tidy-alphabetical-start
@@ -120,6 +120,7 @@
#![feature(const_waker)]
#![feature(core_intrinsics)]
#![feature(core_panic)]
+#![feature(deprecated_suggestion)]
#![feature(dispatch_from_dyn)]
#![feature(error_generic_member_access)]
#![feature(error_in_core)]
@@ -143,7 +144,6 @@
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
#![feature(receiver_trait)]
-#![feature(saturating_int_impl)]
#![feature(set_ptr_value)]
#![feature(sized_type_properties)]
#![feature(slice_from_ptr_range)]
diff --git a/library/alloc/src/macros.rs b/library/alloc/src/macros.rs
index 4c6ae8f25..0f767df60 100644
--- a/library/alloc/src/macros.rs
+++ b/library/alloc/src/macros.rs
@@ -79,23 +79,28 @@ macro_rules! vec {
///
/// The first argument `format!` receives is a format string. This must be a string
/// literal. The power of the formatting string is in the `{}`s contained.
-///
/// Additional parameters passed to `format!` replace the `{}`s within the
/// formatting string in the order given unless named or positional parameters
-/// are used; see [`std::fmt`] for more information.
+/// are used.
+///
+/// See [the formatting syntax documentation in `std::fmt`](../std/fmt/index.html)
+/// for details.
///
/// A common use for `format!` is concatenation and interpolation of strings.
/// The same convention is used with [`print!`] and [`write!`] macros,
-/// depending on the intended destination of the string.
+/// depending on the intended destination of the string; all these macros internally use [`format_args!`].
///
/// To convert a single value to a string, use the [`to_string`] method. This
/// will use the [`Display`] formatting trait.
///
-/// [`std::fmt`]: ../std/fmt/index.html
+/// To concatenate literals into a `&'static str`, use the [`concat!`] macro.
+///
/// [`print!`]: ../std/macro.print.html
/// [`write!`]: core::write
+/// [`format_args!`]: core::format_args
/// [`to_string`]: crate::string::ToString
/// [`Display`]: core::fmt::Display
+/// [`concat!`]: core::concat
///
/// # Panics
///
@@ -106,11 +111,11 @@ macro_rules! vec {
/// # Examples
///
/// ```
-/// format!("test");
-/// format!("hello {}", "world!");
-/// format!("x = {}, y = {y}", 10, y = 30);
+/// format!("test"); // => "test"
+/// format!("hello {}", "world!"); // => "hello world!"
+/// format!("x = {}, y = {val}", 10, val = 30); // => "x = 10, y = 30"
/// let (x, y) = (1, 2);
-/// format!("{x} + {y} = 3");
+/// format!("{x} + {y} = 3"); // => "1 + 2 = 3"
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index c485680f9..38339117c 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -1304,6 +1304,7 @@ impl<T: ?Sized, A: Allocator> Rc<T, A> {
/// assert_eq!(unsafe { &*x_ptr }, "hello");
/// ```
#[stable(feature = "rc_raw", since = "1.17.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub fn into_raw(this: Self) -> *const T {
let ptr = Self::as_ptr(&this);
mem::forget(this);
@@ -1327,6 +1328,7 @@ impl<T: ?Sized, A: Allocator> Rc<T, A> {
/// assert_eq!(unsafe { &*x_ptr }, "hello");
/// ```
#[stable(feature = "weak_into_raw", since = "1.45.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub fn as_ptr(this: &Self) -> *const T {
let ptr: *mut RcBox<T> = NonNull::as_ptr(this.ptr);
@@ -2407,6 +2409,27 @@ impl<T> From<T> for Rc<T> {
}
#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_array", since = "1.74.0")]
+impl<T, const N: usize> From<[T; N]> for Rc<[T]> {
+ /// Converts a [`[T; N]`](prim@array) into an `Rc<[T]>`.
+ ///
+ /// The conversion moves the array into a newly allocated `Rc`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: [i32; 3] = [1, 2, 3];
+ /// let shared: Rc<[i32]> = Rc::from(original);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: [T; N]) -> Rc<[T]> {
+ Rc::<[T; N]>::from(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_slice", since = "1.21.0")]
impl<T: Clone> From<&[T]> for Rc<[T]> {
/// Allocate a reference-counted slice and fill it by cloning `v`'s items.
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index 093dcbbe8..aa3b7b7e1 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -592,7 +592,7 @@ impl<T> [T] {
/// ```
#[rustc_allow_incoherent_impl]
#[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(since = "1.3.0", note = "renamed to join")]
+ #[deprecated(since = "1.3.0", note = "renamed to join", suggestion = "join")]
pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
where
Self: Join<Separator>,
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index d3b755844..838987f67 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -1454,6 +1454,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
/// ```
#[must_use = "losing the pointer will leak memory"]
#[stable(feature = "rc_raw", since = "1.17.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub fn into_raw(this: Self) -> *const T {
let ptr = Self::as_ptr(&this);
mem::forget(this);
@@ -1478,6 +1479,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
/// ```
#[must_use]
#[stable(feature = "rc_as_ptr", since = "1.45.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub fn as_ptr(this: &Self) -> *const T {
let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
@@ -1616,7 +1618,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
#[must_use]
#[stable(feature = "arc_counts", since = "1.15.0")]
pub fn weak_count(this: &Self) -> usize {
- let cnt = this.inner().weak.load(Acquire);
+ let cnt = this.inner().weak.load(Relaxed);
// If the weak count is currently locked, the value of the
// count was 0 just before taking the lock.
if cnt == usize::MAX { 0 } else { cnt - 1 }
@@ -1646,7 +1648,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
#[must_use]
#[stable(feature = "arc_counts", since = "1.15.0")]
pub fn strong_count(this: &Self) -> usize {
- this.inner().strong.load(Acquire)
+ this.inner().strong.load(Relaxed)
}
/// Increments the strong reference count on the `Arc<T>` associated with the
@@ -2801,7 +2803,7 @@ impl<T: ?Sized, A: Allocator> Weak<T, A> {
#[must_use]
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn strong_count(&self) -> usize {
- if let Some(inner) = self.inner() { inner.strong.load(Acquire) } else { 0 }
+ if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
}
/// Gets an approximation of the number of `Weak` pointers pointing to this
@@ -2820,7 +2822,7 @@ impl<T: ?Sized, A: Allocator> Weak<T, A> {
pub fn weak_count(&self) -> usize {
if let Some(inner) = self.inner() {
let weak = inner.weak.load(Acquire);
- let strong = inner.strong.load(Acquire);
+ let strong = inner.strong.load(Relaxed);
if strong == 0 {
0
} else {
@@ -3268,6 +3270,27 @@ impl<T> From<T> for Arc<T> {
}
#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_array", since = "1.74.0")]
+impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
+ /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
+ ///
+ /// The conversion moves the array into a newly allocated `Arc`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let original: [i32; 3] = [1, 2, 3];
+ /// let shared: Arc<[i32]> = Arc::from(original);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: [T; N]) -> Arc<[T]> {
+ Arc::<[T; N]>::from(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_slice", since = "1.21.0")]
impl<T: Clone> From<&[T]> for Arc<[T]> {
/// Allocate a reference-counted slice and fill it by cloning `v`'s items.
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index e45ddc789..56fc6bc40 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -1110,8 +1110,8 @@ impl<T, A: Allocator> Vec<T, A> {
/// Shortens the vector, keeping the first `len` elements and dropping
/// the rest.
///
- /// If `len` is greater than the vector's current length, this has no
- /// effect.
+ /// If `len` is greater or equal to the vector's current length, this has
+ /// no effect.
///
/// The [`drain`] method can emulate `truncate`, but causes the excess
/// elements to be returned instead of dropped.
@@ -1218,6 +1218,15 @@ impl<T, A: Allocator> Vec<T, A> {
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
+ /// This method guarantees that for the purpose of the aliasing model, this method
+ /// does not materialize a reference to the underlying slice, and thus the returned pointer
+ /// will remain valid when mixed with other calls to [`as_ptr`] and [`as_mut_ptr`].
+ /// Note that calling other methods that materialize mutable references to the slice,
+ /// or mutable references to specific elements you are planning on accessing through this pointer,
+ /// as well as writing to those elements, may still invalidate this pointer.
+ /// See the second example below for how this guarantee can be used.
+ ///
+ ///
/// # Examples
///
/// ```
@@ -1231,8 +1240,25 @@ impl<T, A: Allocator> Vec<T, A> {
/// }
/// ```
///
+ /// Due to the aliasing guarantee, the following code is legal:
+ ///
+ /// ```rust
+ /// unsafe {
+ /// let mut v = vec![0, 1, 2];
+ /// let ptr1 = v.as_ptr();
+ /// let _ = ptr1.read();
+ /// let ptr2 = v.as_mut_ptr().offset(2);
+ /// ptr2.write(2);
+ /// // Notably, the write to `ptr2` did *not* invalidate `ptr1`
+ /// // because it mutated a different element:
+ /// let _ = ptr1.read();
+ /// }
+ /// ```
+ ///
/// [`as_mut_ptr`]: Vec::as_mut_ptr
+ /// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
#[inline]
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
@@ -1248,6 +1274,15 @@ impl<T, A: Allocator> Vec<T, A> {
/// Modifying the vector may cause its buffer to be reallocated,
/// which would also make any pointers to it invalid.
///
+ /// This method guarantees that for the purpose of the aliasing model, this method
+ /// does not materialize a reference to the underlying slice, and thus the returned pointer
+ /// will remain valid when mixed with other calls to [`as_ptr`] and [`as_mut_ptr`].
+ /// Note that calling other methods that materialize references to the slice,
+ /// or references to specific elements you are planning on accessing through this pointer,
+ /// may still invalidate this pointer.
+ /// See the second example below for how this guarantee can be used.
+ ///
+ ///
/// # Examples
///
/// ```
@@ -1265,7 +1300,25 @@ impl<T, A: Allocator> Vec<T, A> {
/// }
/// assert_eq!(&*x, &[0, 1, 2, 3]);
/// ```
+ ///
+ /// Due to the aliasing guarantee, the following code is legal:
+ ///
+ /// ```rust
+ /// unsafe {
+ /// let mut v = vec![0];
+ /// let ptr1 = v.as_mut_ptr();
+ /// ptr1.write(1);
+ /// let ptr2 = v.as_mut_ptr();
+ /// ptr2.write(2);
+ /// // Notably, the write to `ptr2` did *not* invalidate `ptr1`:
+ /// ptr1.write(3);
+ /// }
+ /// ```
+ ///
+ /// [`as_mut_ptr`]: Vec::as_mut_ptr
+ /// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
@@ -3102,6 +3155,36 @@ impl<T: Clone> From<&mut [T]> for Vec<T> {
}
#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_array_ref", since = "1.74.0")]
+impl<T: Clone, const N: usize> From<&[T; N]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&[1, 2, 3]), vec![1, 2, 3]);
+ /// ```
+ fn from(s: &[T; N]) -> Vec<T> {
+ Self::from(s.as_slice())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_array_ref", since = "1.74.0")]
+impl<T: Clone, const N: usize> From<&mut [T; N]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&mut [1, 2, 3]), vec![1, 2, 3]);
+ /// ```
+ fn from(s: &mut [T; N]) -> Vec<T> {
+ Self::from(s.as_mut_slice())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_from_array", since = "1.44.0")]
impl<T, const N: usize> From<[T; N]> for Vec<T> {
/// Allocate a `Vec<T>` and move `s`'s items into it.
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index 183dd8e6e..d44dcfbf6 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -2499,7 +2499,6 @@ fn test_into_flattened_size_overflow() {
let _ = v.into_flattened();
}
-#[cfg(not(bootstrap))]
#[test]
fn test_box_zero_allocator() {
use core::{alloc::AllocError, cell::RefCell};
@@ -2563,3 +2562,13 @@ fn test_box_zero_allocator() {
// Ensure all ZSTs have been freed.
assert!(alloc.state.borrow().0.is_empty());
}
+
+#[test]
+fn test_vec_from_array_ref() {
+ assert_eq!(Vec::from(&[1, 2, 3]), vec![1, 2, 3]);
+}
+
+#[test]
+fn test_vec_from_array_mut_ref() {
+ assert_eq!(Vec::from(&mut [1, 2, 3]), vec![1, 2, 3]);
+}
diff --git a/library/backtrace/.github/workflows/check-binary-size.yml b/library/backtrace/.github/workflows/check-binary-size.yml
new file mode 100644
index 000000000..0beae1da9
--- /dev/null
+++ b/library/backtrace/.github/workflows/check-binary-size.yml
@@ -0,0 +1,83 @@
+# This workflow checks if a PR commit has changed the size of a hello world Rust program.
+# It downloads Rustc and compiles two versions of a stage0 compiler - one using the base commit
+# of the PR, and one using the latest commit in the PR.
+# If the size of the hello world program has changed, it posts a comment to the PR.
+name: Check binary size
+
+on:
+ pull_request_target:
+ branches:
+ - master
+
+jobs:
+ test:
+ name: Check binary size
+ runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
+ steps:
+ - name: Print info
+ run: |
+ echo "Current SHA: ${{ github.event.pull_request.head.sha }}"
+ echo "Base SHA: ${{ github.event.pull_request.base.sha }}"
+ - name: Clone Rustc
+ uses: actions/checkout@v3
+ with:
+ repository: rust-lang/rust
+ fetch-depth: 1
+ - name: Fetch backtrace
+ run: git submodule update --init library/backtrace
+ - name: Create hello world program that uses backtrace
+ run: printf "fn main() { panic!(); }" > foo.rs
+ - name: Build binary with base version of backtrace
+ run: |
+ printf "[llvm]\ndownload-ci-llvm = true\n\n[rust]\nincremental = false\n" > config.toml
+ cd library/backtrace
+ git remote add head-pr https://github.com/${{ github.event.pull_request.head.repo.full_name }}
+ git fetch --all
+ git checkout ${{ github.event.pull_request.base.sha }}
+ cd ../..
+ git add library/backtrace
+ python3 x.py build library --stage 0
+ ./build/x86_64-unknown-linux-gnu/stage0-sysroot/bin/rustc -O foo.rs -o binary-reference
+ - name: Build binary with PR version of backtrace
+ run: |
+ cd library/backtrace
+ git checkout ${{ github.event.pull_request.head.sha }}
+ cd ../..
+ git add library/backtrace
+ rm -rf build/x86_64-unknown-linux-gnu/stage0-std
+ python3 x.py build library --stage 0
+ ./build/x86_64-unknown-linux-gnu/stage0-sysroot/bin/rustc -O foo.rs -o binary-updated
+ - name: Display binary size
+ run: |
+ ls -la binary-*
+ echo "SIZE_REFERENCE=$(stat -c '%s' binary-reference)" >> "$GITHUB_ENV"
+ echo "SIZE_UPDATED=$(stat -c '%s' binary-updated)" >> "$GITHUB_ENV"
+ - name: Post a PR comment if the size has changed
+ uses: actions/github-script@v6
+ with:
+ script: |
+ const reference = process.env.SIZE_REFERENCE;
+ const updated = process.env.SIZE_UPDATED;
+ const diff = updated - reference;
+ const plus = diff > 0 ? "+" : "";
+ const diff_str = `${plus}${diff}B`;
+
+ if (diff !== 0) {
+ const percent = (((updated / reference) - 1) * 100).toFixed(2);
+ // The body is created here and wrapped so "weirdly" to avoid whitespace at the start of the lines,
+ // which is interpreted as a code block by Markdown.
+ const body = `Below is the size of a hello-world Rust program linked with libstd with backtrace.
+
+ Original binary size: **${reference}B**
+ Updated binary size: **${updated}B**
+ Difference: **${diff_str}** (${percent}%)`;
+
+ github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body
+ })
+ }
diff --git a/library/backtrace/.github/workflows/main.yml b/library/backtrace/.github/workflows/main.yml
index 29fff2795..21d7cb492 100644
--- a/library/backtrace/.github/workflows/main.yml
+++ b/library/backtrace/.github/workflows/main.yml
@@ -229,7 +229,7 @@ jobs:
with:
submodules: true
- name: Install Rust
- run: rustup update 1.55.0 && rustup default 1.55.0
+ run: rustup update 1.65.0 && rustup default 1.65.0
- run: cargo build
miri:
diff --git a/library/backtrace/Cargo.toml b/library/backtrace/Cargo.toml
index cff2c9e66..6714b3b7d 100644
--- a/library/backtrace/Cargo.toml
+++ b/library/backtrace/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "backtrace"
-version = "0.3.68"
+version = "0.3.69"
authors = ["The Rust Project Developers"]
build = "build.rs"
license = "MIT OR Apache-2.0"
@@ -14,6 +14,7 @@ A library to acquire a stack trace (backtrace) at runtime in a Rust program.
autoexamples = true
autotests = true
edition = "2018"
+exclude = ["/ci/"]
[workspace]
members = ['crates/cpp_smoke_test', 'crates/as-if-std']
@@ -27,7 +28,6 @@ exclude = [
[dependencies]
cfg-if = "1.0"
rustc-demangle = "0.1.4"
-libc = { version = "0.2.146", default-features = false }
# Optionally enable the ability to serialize a `Backtrace`, controlled through
# the `serialize-*` features below.
@@ -37,11 +37,13 @@ rustc-serialize = { version = "0.3", optional = true }
# Optionally demangle C++ frames' symbols in backtraces.
cpp_demangle = { default-features = false, version = "0.4.0", optional = true, features = ["alloc"] }
-addr2line = { version = "0.20.0", default-features = false }
+[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies]
miniz_oxide = { version = "0.7.0", default-features = false }
+addr2line = { version = "0.21.0", default-features = false }
+libc = { version = "0.2.146", default-features = false }
-[dependencies.object]
-version = "0.31.1"
+[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies.object]
+version = "0.32.0"
default-features = false
features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
diff --git a/library/backtrace/build.rs b/library/backtrace/build.rs
index 812fbb1fe..9bd3abd16 100644
--- a/library/backtrace/build.rs
+++ b/library/backtrace/build.rs
@@ -1,8 +1,10 @@
extern crate cc;
use std::env;
+use std::path::Path;
-fn main() {
+// Must be public so the build script of `std` can call it.
+pub fn main() {
match env::var("CARGO_CFG_TARGET_OS").unwrap_or_default().as_str() {
"android" => build_android(),
_ => {}
@@ -10,7 +12,13 @@ fn main() {
}
fn build_android() {
- let expansion = match cc::Build::new().file("src/android-api.c").try_expand() {
+ // Resolve `src/android-api.c` relative to this file.
+ // Required to support calling this from the `std` build script.
+ let android_api_c = Path::new(file!())
+ .parent()
+ .unwrap()
+ .join("src/android-api.c");
+ let expansion = match cc::Build::new().file(android_api_c).try_expand() {
Ok(result) => result,
Err(e) => {
println!("failed to run C compiler: {}", e);
diff --git a/library/backtrace/crates/as-if-std/Cargo.toml b/library/backtrace/crates/as-if-std/Cargo.toml
index 012e60f8f..bcbcfe159 100644
--- a/library/backtrace/crates/as-if-std/Cargo.toml
+++ b/library/backtrace/crates/as-if-std/Cargo.toml
@@ -15,15 +15,21 @@ bench = false
cfg-if = "1.0"
rustc-demangle = "0.1.21"
libc = { version = "0.2.146", default-features = false }
-addr2line = { version = "0.20.0", default-features = false, optional = true }
-miniz_oxide = { version = "0.7", default-features = false }
-[dependencies.object]
-version = "0.31.1"
+[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies]
+miniz_oxide = { version = "0.7.0", optional = true, default-features = false }
+addr2line = { version = "0.21.0", optional = true, default-features = false }
+
+[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies.object]
+version = "0.32.0"
default-features = false
optional = true
features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
+[build-dependencies]
+# Dependency of the `backtrace` crate
+cc = "1.0.67"
+
[features]
default = ['backtrace']
-backtrace = ['addr2line', 'object']
+backtrace = ['addr2line', 'miniz_oxide', 'object']
diff --git a/library/backtrace/crates/as-if-std/build.rs b/library/backtrace/crates/as-if-std/build.rs
index 7018b1017..7669f555d 100644
--- a/library/backtrace/crates/as-if-std/build.rs
+++ b/library/backtrace/crates/as-if-std/build.rs
@@ -1,3 +1,11 @@
+// backtrace-rs requires a feature check on Android targets, so
+// we need to run its build.rs as well.
+#[allow(unused_extern_crates)]
+#[path = "../../build.rs"]
+mod backtrace_build_rs;
+
fn main() {
println!("cargo:rustc-cfg=backtrace_in_libstd");
+
+ backtrace_build_rs::main();
}
diff --git a/library/backtrace/src/print.rs b/library/backtrace/src/print.rs
index 8d9cbe3d4..395328a0a 100644
--- a/library/backtrace/src/print.rs
+++ b/library/backtrace/src/print.rs
@@ -83,7 +83,8 @@ impl<'a, 'b> BacktraceFmt<'a, 'b> {
/// This is currently a no-op but is added for future compatibility with
/// backtrace formats.
pub fn finish(&mut self) -> fmt::Result {
- // Currently a no-op-- including this hook to allow for future additions.
+ #[cfg(target_os = "fuchsia")]
+ fuchsia::finish_context(self.fmt)?;
Ok(())
}
diff --git a/library/backtrace/src/print/fuchsia.rs b/library/backtrace/src/print/fuchsia.rs
index ce3f17862..cb872697d 100644
--- a/library/backtrace/src/print/fuchsia.rs
+++ b/library/backtrace/src/print/fuchsia.rs
@@ -425,7 +425,7 @@ impl DsoPrinter<'_, '_> {
/// This function prints the Fuchsia symbolizer markup for all information contained in a DSO.
pub fn print_dso_context(out: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
- out.write_str("{{{reset}}}\n")?;
+ out.write_str("{{{reset:begin}}}\n")?;
let mut visitor = DsoPrinter {
writer: out,
module_count: 0,
@@ -434,3 +434,8 @@ pub fn print_dso_context(out: &mut core::fmt::Formatter<'_>) -> core::fmt::Resul
for_each_dso(&mut visitor);
visitor.error
}
+
+/// This function prints the Fuchsia symbolizer markup to end the backtrace.
+pub fn finish_context(out: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ out.write_str("{{{reset:end}}}\n")
+}
diff --git a/library/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs b/library/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs
index deeeb2971..5d4b34675 100644
--- a/library/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs
+++ b/library/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs
@@ -85,16 +85,37 @@ impl FromStr for MapsEntry {
// e.g.: "ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]"
// e.g.: "7f5985f46000-7f5985f48000 rw-p 00039000 103:06 76021795 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2"
// e.g.: "35b1a21000-35b1a22000 rw-p 00000000 00:00 0"
+ //
+ // Note that paths may contain spaces, so we can't use `str::split` for parsing (until
+ // Split::remainder is stabilized #77998).
fn from_str(s: &str) -> Result<Self, Self::Err> {
- let mut parts = s
- .split(' ') // space-separated fields
- .filter(|s| s.len() > 0); // multiple spaces implies empty strings that need to be skipped.
- let range_str = parts.next().ok_or("Couldn't find address")?;
- let perms_str = parts.next().ok_or("Couldn't find permissions")?;
- let offset_str = parts.next().ok_or("Couldn't find offset")?;
- let dev_str = parts.next().ok_or("Couldn't find dev")?;
- let inode_str = parts.next().ok_or("Couldn't find inode")?;
- let pathname_str = parts.next().unwrap_or(""); // pathname may be omitted.
+ let (range_str, s) = s.trim_start().split_once(' ').unwrap_or((s, ""));
+ if range_str.is_empty() {
+ return Err("Couldn't find address");
+ }
+
+ let (perms_str, s) = s.trim_start().split_once(' ').unwrap_or((s, ""));
+ if perms_str.is_empty() {
+ return Err("Couldn't find permissions");
+ }
+
+ let (offset_str, s) = s.trim_start().split_once(' ').unwrap_or((s, ""));
+ if offset_str.is_empty() {
+ return Err("Couldn't find offset");
+ }
+
+ let (dev_str, s) = s.trim_start().split_once(' ').unwrap_or((s, ""));
+ if dev_str.is_empty() {
+ return Err("Couldn't find dev");
+ }
+
+ let (inode_str, s) = s.trim_start().split_once(' ').unwrap_or((s, ""));
+ if inode_str.is_empty() {
+ return Err("Couldn't find inode");
+ }
+
+ // Pathname may be omitted in which case it will be empty
+ let pathname_str = s.trim_start();
let hex = |s| usize::from_str_radix(s, 16).map_err(|_| "Couldn't parse hex number");
let address = if let Some((start, limit)) = range_str.split_once('-') {
@@ -229,4 +250,46 @@ fn check_maps_entry_parsing_32bit() {
pathname: Default::default(),
}
);
+ assert_eq!(
+ "b7c79000-b7e02000 r--p 00000000 08:01 60662705 \
+ /executable/path/with some spaces"
+ .parse::<MapsEntry>()
+ .unwrap(),
+ MapsEntry {
+ address: (0xb7c79000, 0xb7e02000),
+ perms: ['r', '-', '-', 'p'],
+ offset: 0x00000000,
+ dev: (0x08, 0x01),
+ inode: 0x60662705,
+ pathname: "/executable/path/with some spaces".into(),
+ }
+ );
+ assert_eq!(
+ "b7c79000-b7e02000 r--p 00000000 08:01 60662705 \
+ /executable/path/with multiple-continuous spaces "
+ .parse::<MapsEntry>()
+ .unwrap(),
+ MapsEntry {
+ address: (0xb7c79000, 0xb7e02000),
+ perms: ['r', '-', '-', 'p'],
+ offset: 0x00000000,
+ dev: (0x08, 0x01),
+ inode: 0x60662705,
+ pathname: "/executable/path/with multiple-continuous spaces ".into(),
+ }
+ );
+ assert_eq!(
+ " b7c79000-b7e02000 r--p 00000000 08:01 60662705 \
+ /executable/path/starts-with-spaces"
+ .parse::<MapsEntry>()
+ .unwrap(),
+ MapsEntry {
+ address: (0xb7c79000, 0xb7e02000),
+ perms: ['r', '-', '-', 'p'],
+ offset: 0x00000000,
+ dev: (0x08, 0x01),
+ inode: 0x60662705,
+ pathname: "/executable/path/starts-with-spaces".into(),
+ }
+ );
}
diff --git a/library/backtrace/src/symbolize/mod.rs b/library/backtrace/src/symbolize/mod.rs
index dbc346522..a7c199506 100644
--- a/library/backtrace/src/symbolize/mod.rs
+++ b/library/backtrace/src/symbolize/mod.rs
@@ -471,7 +471,7 @@ cfg_if::cfg_if! {
mod dbghelp;
use dbghelp as imp;
} else if #[cfg(all(
- any(unix, windows),
+ any(unix, all(windows, target_env = "gnu")),
not(target_vendor = "uwp"),
not(target_os = "emscripten"),
any(not(backtrace_in_libstd), feature = "backtrace"),
diff --git a/library/backtrace/src/windows.rs b/library/backtrace/src/windows.rs
index 9ec3ba99b..92c2b2e66 100644
--- a/library/backtrace/src/windows.rs
+++ b/library/backtrace/src/windows.rs
@@ -177,9 +177,9 @@ macro_rules! ffi {
assert_eq!($name as usize, winapi::$name as usize);
let mut x: unsafe extern "system" fn($($args)*) -> $ret;
x = $name;
- drop(x);
+ let _ = x;
x = winapi::$name;
- drop(x);
+ let _ = x;
}
}
)*
diff --git a/library/core/primitive_docs/box_into_raw.md b/library/core/primitive_docs/box_into_raw.md
deleted file mode 100644
index 9dd0344c7..000000000
--- a/library/core/primitive_docs/box_into_raw.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/boxed/struct.Box.html#method.into_raw
diff --git a/library/core/primitive_docs/fs_file.md b/library/core/primitive_docs/fs_file.md
deleted file mode 100644
index 4023e340a..000000000
--- a/library/core/primitive_docs/fs_file.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/fs/struct.File.html
diff --git a/library/core/primitive_docs/io_bufread.md b/library/core/primitive_docs/io_bufread.md
deleted file mode 100644
index 7beda2cd3..000000000
--- a/library/core/primitive_docs/io_bufread.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/io/trait.BufRead.html
diff --git a/library/core/primitive_docs/io_read.md b/library/core/primitive_docs/io_read.md
deleted file mode 100644
index b7ecf5e27..000000000
--- a/library/core/primitive_docs/io_read.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/io/trait.Read.html
diff --git a/library/core/primitive_docs/io_seek.md b/library/core/primitive_docs/io_seek.md
deleted file mode 100644
index db0274d29..000000000
--- a/library/core/primitive_docs/io_seek.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/io/trait.Seek.html
diff --git a/library/core/primitive_docs/io_write.md b/library/core/primitive_docs/io_write.md
deleted file mode 100644
index 92a3b88a7..000000000
--- a/library/core/primitive_docs/io_write.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/io/trait.Write.html
diff --git a/library/core/primitive_docs/net_tosocketaddrs.md b/library/core/primitive_docs/net_tosocketaddrs.md
deleted file mode 100644
index 4daa10ddb..000000000
--- a/library/core/primitive_docs/net_tosocketaddrs.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/net/trait.ToSocketAddrs.html
diff --git a/library/core/primitive_docs/process_exit.md b/library/core/primitive_docs/process_exit.md
deleted file mode 100644
index cae34d12d..000000000
--- a/library/core/primitive_docs/process_exit.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/process/fn.exit.html
diff --git a/library/core/primitive_docs/string_string.md b/library/core/primitive_docs/string_string.md
deleted file mode 100644
index 303dc07b1..000000000
--- a/library/core/primitive_docs/string_string.md
+++ /dev/null
@@ -1 +0,0 @@
-../std/string/struct.String.html
diff --git a/library/core/src/ascii/ascii_char.rs b/library/core/src/ascii/ascii_char.rs
index 5378b210e..cc872a534 100644
--- a/library/core/src/ascii/ascii_char.rs
+++ b/library/core/src/ascii/ascii_char.rs
@@ -3,7 +3,7 @@
//! suggestions from rustc if you get anything slightly wrong in here, and overall
//! helps with clarity as we're also referring to `char` intentionally in here.
-use crate::fmt;
+use crate::fmt::{self, Write};
use crate::mem::transmute;
/// One of the 128 Unicode characters from U+0000 through U+007F,
@@ -54,7 +54,7 @@ use crate::mem::transmute;
/// [chart]: https://www.unicode.org/charts/PDF/U0000.pdf
/// [NIST FIPS 1-2]: https://nvlpubs.nist.gov/nistpubs/Legacy/FIPS/fipspub1-2-1977.pdf
/// [NamesList]: https://www.unicode.org/Public/15.0.0/ucd/NamesList.txt
-#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[unstable(feature = "ascii_char", issue = "110998")]
#[repr(u8)]
pub enum AsciiChar {
@@ -563,3 +563,40 @@ impl fmt::Display for AsciiChar {
<str as fmt::Display>::fmt(self.as_str(), f)
}
}
+
+#[unstable(feature = "ascii_char", issue = "110998")]
+impl fmt::Debug for AsciiChar {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ #[inline]
+ fn backslash(a: AsciiChar) -> ([AsciiChar; 4], u8) {
+ ([AsciiChar::ReverseSolidus, a, AsciiChar::Null, AsciiChar::Null], 2)
+ }
+
+ let (buf, len) = match self {
+ AsciiChar::Null => backslash(AsciiChar::Digit0),
+ AsciiChar::CharacterTabulation => backslash(AsciiChar::SmallT),
+ AsciiChar::CarriageReturn => backslash(AsciiChar::SmallR),
+ AsciiChar::LineFeed => backslash(AsciiChar::SmallN),
+ AsciiChar::ReverseSolidus => backslash(AsciiChar::ReverseSolidus),
+ AsciiChar::Apostrophe => backslash(AsciiChar::Apostrophe),
+ _ => {
+ let byte = self.to_u8();
+ if !byte.is_ascii_control() {
+ ([*self, AsciiChar::Null, AsciiChar::Null, AsciiChar::Null], 1)
+ } else {
+ const HEX_DIGITS: [AsciiChar; 16] = *b"0123456789abcdef".as_ascii().unwrap();
+
+ let hi = HEX_DIGITS[usize::from(byte >> 4)];
+ let lo = HEX_DIGITS[usize::from(byte & 0xf)];
+ ([AsciiChar::ReverseSolidus, AsciiChar::SmallX, hi, lo], 4)
+ }
+ }
+ };
+
+ f.write_char('\'')?;
+ for byte in &buf[..len as usize] {
+ f.write_str(byte.as_str())?;
+ }
+ f.write_char('\'')
+ }
+}
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index bf4c682d3..3b4d99221 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -237,6 +237,7 @@
use crate::cmp::Ordering;
use crate::fmt::{self, Debug, Display};
+use crate::intrinsics::is_nonoverlapping;
use crate::marker::{PhantomData, Unsize};
use crate::mem;
use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn};
@@ -415,6 +416,12 @@ impl<T> Cell<T> {
/// Swaps the values of two `Cell`s.
/// Difference with `std::mem::swap` is that this function doesn't require `&mut` reference.
///
+ /// # Panics
+ ///
+ /// This function will panic if `self` and `other` are different `Cell`s that partially overlap.
+ /// (Using just standard library methods, it is impossible to create such partially overlapping `Cell`s.
+ /// However, unsafe code is allowed to e.g. create two `&Cell<[i32; 2]>` that partially overlap.)
+ ///
/// # Examples
///
/// ```
@@ -430,14 +437,20 @@ impl<T> Cell<T> {
#[stable(feature = "move_cell", since = "1.17.0")]
pub fn swap(&self, other: &Self) {
if ptr::eq(self, other) {
+ // Swapping wouldn't change anything.
return;
}
+ if !is_nonoverlapping(self, other, 1) {
+ // See <https://github.com/rust-lang/rust/issues/80778> for why we need to stop here.
+ panic!("`Cell::swap` on overlapping non-identical `Cell`s");
+ }
// SAFETY: This can be risky if called from separate threads, but `Cell`
// is `!Sync` so this won't happen. This also won't invalidate any
// pointers since `Cell` makes sure nothing else will be pointing into
- // either of these `Cell`s.
+ // either of these `Cell`s. We also excluded shenanigans like partially overlapping `Cell`s,
+ // so `swap` will just properly copy two full values of type `T` back and forth.
unsafe {
- ptr::swap(self.value.get(), other.value.get());
+ mem::swap(&mut *self.value.get(), &mut *other.value.get());
}
}
@@ -543,6 +556,7 @@ impl<T: ?Sized> Cell<T> {
#[inline]
#[stable(feature = "cell_as_ptr", since = "1.12.0")]
#[rustc_const_stable(feature = "const_cell_as_ptr", since = "1.32.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn as_ptr(&self) -> *mut T {
self.value.get()
}
@@ -740,6 +754,22 @@ impl Display for BorrowMutError {
}
}
+// This ensures the panicking code is outlined from `borrow_mut` for `RefCell`.
+#[inline(never)]
+#[track_caller]
+#[cold]
+fn panic_already_borrowed(err: BorrowMutError) -> ! {
+ panic!("already borrowed: {:?}", err)
+}
+
+// This ensures the panicking code is outlined from `borrow` for `RefCell`.
+#[inline(never)]
+#[track_caller]
+#[cold]
+fn panic_already_mutably_borrowed(err: BorrowError) -> ! {
+ panic!("already mutably borrowed: {:?}", err)
+}
+
// Positive values represent the number of `Ref` active. Negative values
// represent the number of `RefMut` active. Multiple `RefMut`s can only be
// active at a time if they refer to distinct, nonoverlapping components of a
@@ -921,7 +951,10 @@ impl<T: ?Sized> RefCell<T> {
#[inline]
#[track_caller]
pub fn borrow(&self) -> Ref<'_, T> {
- self.try_borrow().expect("already mutably borrowed")
+ match self.try_borrow() {
+ Ok(b) => b,
+ Err(err) => panic_already_mutably_borrowed(err),
+ }
}
/// Immutably borrows the wrapped value, returning an error if the value is currently mutably
@@ -1014,7 +1047,10 @@ impl<T: ?Sized> RefCell<T> {
#[inline]
#[track_caller]
pub fn borrow_mut(&self) -> RefMut<'_, T> {
- self.try_borrow_mut().expect("already borrowed")
+ match self.try_borrow_mut() {
+ Ok(b) => b,
+ Err(err) => panic_already_borrowed(err),
+ }
}
/// Mutably borrows the wrapped value, returning an error if the value is currently borrowed.
@@ -1076,6 +1112,7 @@ impl<T: ?Sized> RefCell<T> {
/// ```
#[inline]
#[stable(feature = "cell_as_ptr", since = "1.12.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub fn as_ptr(&self) -> *mut T {
self.value.get()
}
@@ -1893,8 +1930,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
/// on an _exclusive_ `UnsafeCell<T>`. Even though `T` and `UnsafeCell<T>` have the
/// same memory layout, the following is not allowed and undefined behavior:
///
-#[cfg_attr(bootstrap, doc = "```rust,no_run")]
-#[cfg_attr(not(bootstrap), doc = "```rust,compile_fail")]
+/// ```rust,compile_fail
/// # use std::cell::UnsafeCell;
/// unsafe fn not_allowed<T>(ptr: &UnsafeCell<T>) -> &mut T {
/// let t = ptr as *const UnsafeCell<T> as *mut T;
@@ -2071,6 +2107,7 @@ impl<T: ?Sized> UnsafeCell<T> {
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_unsafecell_get", since = "1.32.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn get(&self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
// #[repr(transparent)]. This exploits std's special status, there is
@@ -2131,6 +2168,7 @@ impl<T: ?Sized> UnsafeCell<T> {
#[inline(always)]
#[stable(feature = "unsafe_cell_raw_get", since = "1.56.0")]
#[rustc_const_stable(feature = "unsafe_cell_raw_get", since = "1.56.0")]
+ #[rustc_diagnostic_item = "unsafe_cell_raw_get"]
pub const fn raw_get(this: *const Self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
// #[repr(transparent)]. This exploits std's special status, there is
@@ -2213,6 +2251,7 @@ impl<T: ?Sized> SyncUnsafeCell<T> {
/// when casting to `&mut T`, and ensure that there are no mutations
/// or mutable aliases going on when casting to `&T`
#[inline]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn get(&self) -> *mut T {
self.value.get()
}
diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs
index b84e4b35b..453de9754 100644
--- a/library/core/src/char/convert.rs
+++ b/library/core/src/char/convert.rs
@@ -87,20 +87,54 @@ impl From<char> for u128 {
}
}
-/// Map `char` with code point in U+0000..=U+00FF to byte in 0x00..=0xFF with same value, failing
-/// if the code point is greater than U+00FF.
+/// Maps a `char` with code point in U+0000..=U+00FF to a byte in 0x00..=0xFF with same value,
+/// failing if the code point is greater than U+00FF.
///
/// See [`impl From<u8> for char`](char#impl-From<u8>-for-char) for details on the encoding.
#[stable(feature = "u8_from_char", since = "1.59.0")]
impl TryFrom<char> for u8 {
type Error = TryFromCharError;
+ /// Tries to convert a [`char`] into a [`u8`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = 'ÿ'; // U+00FF
+ /// let b = 'Ā'; // U+0100
+ /// assert_eq!(u8::try_from(a), Ok(0xFF_u8));
+ /// assert!(u8::try_from(b).is_err());
+ /// ```
#[inline]
fn try_from(c: char) -> Result<u8, Self::Error> {
u8::try_from(u32::from(c)).map_err(|_| TryFromCharError(()))
}
}
+/// Maps a `char` with code point in U+0000..=U+FFFF to a `u16` in 0x0000..=0xFFFF with same value,
+/// failing if the code point is greater than U+FFFF.
+///
+/// This corresponds to the UCS-2 encoding, as specified in ISO/IEC 10646:2003.
+#[stable(feature = "u16_from_char", since = "1.74.0")]
+impl TryFrom<char> for u16 {
+ type Error = TryFromCharError;
+
+ /// Tries to convert a [`char`] into a [`u16`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let trans_rights = '⚧'; // U+26A7
+ /// let ninjas = '🥷'; // U+1F977
+ /// assert_eq!(u16::try_from(trans_rights), Ok(0x26A7_u16));
+ /// assert!(u16::try_from(ninjas).is_err());
+ /// ```
+ #[inline]
+ fn try_from(c: char) -> Result<u16, Self::Error> {
+ u16::try_from(u32::from(c)).map_err(|_| TryFromCharError(()))
+ }
+}
+
/// Maps a byte in 0x00..=0xFF to a `char` whose code point has the same value, in U+0000..=U+00FF.
///
/// Unicode is designed such that this effectively decodes bytes
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index 515b8d20e..4ac956e7b 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -9,8 +9,58 @@ use crate::unicode::{self, conversions};
use super::*;
impl char {
+ /// The lowest valid code point a `char` can have, `'\0'`.
+ ///
+ /// Unlike integer types, `char` actually has a gap in the middle,
+ /// meaning that the range of possible `char`s is smaller than you
+ /// might expect. Ranges of `char` will automatically hop this gap
+ /// for you:
+ ///
+ /// ```
+ /// #![feature(char_min)]
+ /// let dist = u32::from(char::MAX) - u32::from(char::MIN);
+ /// let size = (char::MIN..=char::MAX).count() as u32;
+ /// assert!(size < dist);
+ /// ```
+ ///
+ /// Despite this gap, the `MIN` and [`MAX`] values can be used as bounds for
+ /// all `char` values.
+ ///
+ /// [`MAX`]: char::MAX
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(char_min)]
+ /// # fn something_which_returns_char() -> char { 'a' }
+ /// let c: char = something_which_returns_char();
+ /// assert!(char::MIN <= c);
+ ///
+ /// let value_at_min = u32::from(char::MIN);
+ /// assert_eq!(char::from_u32(value_at_min), Some('\0'));
+ /// ```
+ #[unstable(feature = "char_min", issue = "114298")]
+ pub const MIN: char = '\0';
+
/// The highest valid code point a `char` can have, `'\u{10FFFF}'`.
///
+ /// Unlike integer types, `char` actually has a gap in the middle,
+ /// meaning that the range of possible `char`s is smaller than you
+ /// might expect. Ranges of `char` will automatically hop this gap
+ /// for you:
+ ///
+ /// ```
+ /// #![feature(char_min)]
+ /// let dist = u32::from(char::MAX) - u32::from(char::MIN);
+ /// let size = (char::MIN..=char::MAX).count() as u32;
+ /// assert!(size < dist);
+ /// ```
+ ///
+ /// Despite this gap, the [`MIN`] and `MAX` values can be used as bounds for
+ /// all `char` values.
+ ///
+ /// [`MIN`]: char::MIN
+ ///
/// # Examples
///
/// ```
@@ -18,7 +68,7 @@ impl char {
/// let c: char = something_which_returns_char();
/// assert!(c <= char::MAX);
///
- /// let value_at_max = char::MAX as u32;
+ /// let value_at_max = u32::from(char::MAX);
/// assert_eq!(char::from_u32(value_at_max), Some('\u{10FFFF}'));
/// assert_eq!(char::from_u32(value_at_max + 1), None);
/// ```
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index 3c127efb3..360806167 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -63,6 +63,11 @@ use self::Ordering::*;
/// (transitive) impls are not forced to exist, but these requirements apply
/// whenever they do exist.
///
+/// Violating these requirements is a logic error. The behavior resulting from a logic error is not
+/// specified, but users of the trait must ensure that such logic errors do *not* result in
+/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of these
+/// methods.
+///
/// ## Derivable
///
/// This trait can be used with `#[derive]`. When `derive`d on structs, two
@@ -250,6 +255,11 @@ pub macro PartialEq($item:item) {
/// This property cannot be checked by the compiler, and therefore `Eq` implies
/// [`PartialEq`], and has no extra methods.
///
+/// Violating this property is a logic error. The behavior resulting from a logic error is not
+/// specified, but users of the trait must ensure that such logic errors do *not* result in
+/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of these
+/// methods.
+///
/// ## Derivable
///
/// This trait can be used with `#[derive]`. When `derive`d, because `Eq` has
@@ -281,15 +291,16 @@ pub macro PartialEq($item:item) {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "Eq"]
pub trait Eq: PartialEq<Self> {
- // this method is used solely by #[deriving] to assert
- // that every component of a type implements #[deriving]
- // itself, the current deriving infrastructure means doing this
+ // this method is used solely by #[derive(Eq)] to assert
+ // that every component of a type implements `Eq`
+ // itself. The current deriving infrastructure means doing this
// assertion without using a method on this trait is nearly
// impossible.
//
// This should never be implemented by hand.
#[doc(hidden)]
- #[no_coverage] // rust-lang/rust#84605
+ #[cfg_attr(bootstrap, no_coverage)] // rust-lang/rust#84605
+ #[cfg_attr(not(bootstrap), coverage(off))] //
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn assert_receiver_is_total_eq(&self) {}
@@ -298,7 +309,9 @@ pub trait Eq: PartialEq<Self> {
/// Derive macro generating an impl of the trait [`Eq`].
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
-#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match, no_coverage)]
+#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match)]
+#[cfg_attr(bootstrap, allow_internal_unstable(no_coverage))]
+#[cfg_attr(not(bootstrap), allow_internal_unstable(coverage_attribute))]
pub macro Eq($item:item) {
/* compiler built-in */
}
@@ -656,6 +669,11 @@ impl<T: Clone> Clone for Reverse<T> {
/// It's easy to accidentally make `cmp` and `partial_cmp` disagree by
/// deriving some of the traits and manually implementing others.
///
+/// Violating these requirements is a logic error. The behavior resulting from a logic error is not
+/// specified, but users of the trait must ensure that such logic errors do *not* result in
+/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of these
+/// methods.
+///
/// ## Corollaries
///
/// From the above and the requirements of `PartialOrd`, it follows that `<` defines a strict total order.
@@ -889,6 +907,11 @@ pub macro Ord($item:item) {
/// transitively: if `T: PartialOrd<U>` and `U: PartialOrd<V>` then `U: PartialOrd<T>` and `T:
/// PartialOrd<V>`.
///
+/// Violating these requirements is a logic error. The behavior resulting from a logic error is not
+/// specified, but users of the trait must ensure that such logic errors do *not* result in
+/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of these
+/// methods.
+///
/// ## Corollaries
///
/// The following corollaries follow from the above requirements:
@@ -1266,6 +1289,91 @@ pub fn max_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
}
+/// Compares and sorts two values, returning minimum and maximum.
+///
+/// Returns `[v1, v2]` if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(cmp_minmax)]
+/// use std::cmp;
+///
+/// assert_eq!(cmp::minmax(1, 2), [1, 2]);
+/// assert_eq!(cmp::minmax(2, 2), [2, 2]);
+///
+/// // You can destructure the result using array patterns
+/// let [min, max] = cmp::minmax(42, 17);
+/// assert_eq!(min, 17);
+/// assert_eq!(max, 42);
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "cmp_minmax", issue = "115939")]
+pub fn minmax<T>(v1: T, v2: T) -> [T; 2]
+where
+ T: Ord,
+{
+ if v1 <= v2 { [v1, v2] } else { [v2, v1] }
+}
+
+/// Returns minimum and maximum values with respect to the specified comparison function.
+///
+/// Returns `[v1, v2]` if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(cmp_minmax)]
+/// use std::cmp;
+///
+/// assert_eq!(cmp::minmax_by(-2, 1, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), [1, -2]);
+/// assert_eq!(cmp::minmax_by(-2, 2, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), [-2, 2]);
+///
+/// // You can destructure the result using array patterns
+/// let [min, max] = cmp::minmax_by(-42, 17, |x: &i32, y: &i32| x.abs().cmp(&y.abs()));
+/// assert_eq!(min, 17);
+/// assert_eq!(max, -42);
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "cmp_minmax", issue = "115939")]
+pub fn minmax_by<T, F>(v1: T, v2: T, compare: F) -> [T; 2]
+where
+ F: FnOnce(&T, &T) -> Ordering,
+{
+ if compare(&v1, &v2).is_le() { [v1, v2] } else { [v2, v1] }
+}
+
+/// Returns minimum and maximum values with respect to the specified key function.
+///
+/// Returns `[v1, v2]` if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(cmp_minmax)]
+/// use std::cmp;
+///
+/// assert_eq!(cmp::minmax_by_key(-2, 1, |x: &i32| x.abs()), [1, -2]);
+/// assert_eq!(cmp::minmax_by_key(-2, 2, |x: &i32| x.abs()), [-2, 2]);
+///
+/// // You can destructure the result using array patterns
+/// let [min, max] = cmp::minmax_by_key(-42, 17, |x: &i32| x.abs());
+/// assert_eq!(min, 17);
+/// assert_eq!(max, -42);
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "cmp_minmax", issue = "115939")]
+pub fn minmax_by_key<T, F, K>(v1: T, v2: T, mut f: F) -> [T; 2]
+where
+ F: FnMut(&T) -> K,
+ K: Ord,
+{
+ minmax_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+}
+
// Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types
mod impls {
use crate::cmp::Ordering::{self, Equal, Greater, Less};
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
index 56ab63be2..b048b5135 100644
--- a/library/core/src/convert/num.rs
+++ b/library/core/src/convert/num.rs
@@ -142,9 +142,9 @@ impl_from! { i16, isize, #[stable(feature = "lossless_iusize_conv", since = "1.2
// RISC-V defines the possibility of a 128-bit address space (RV128).
-// CHERI proposes 256-bit “capabilities”. Unclear if this would be relevant to usize/isize.
+// CHERI proposes 128-bit “capabilities”. Unclear if this would be relevant to usize/isize.
// https://www.cl.cam.ac.uk/research/security/ctsrd/pdfs/20171017a-cheri-poster.pdf
-// https://www.csl.sri.com/users/neumann/2012resolve-cheri.pdf
+// https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-951.pdf
// Note: integers can only be represented with full precision in a float if
// they fit in the significand, which is 24 bits in f32 and 53 bits in f64.
diff --git a/library/core/src/error.md b/library/core/src/error.md
index 7771b8adc..a5deb71e6 100644
--- a/library/core/src/error.md
+++ b/library/core/src/error.md
@@ -37,7 +37,7 @@ responsibilities they cover:
The panic and error systems are not entirely distinct. Often times errors
that are anticipated runtime failures in an API might instead represent bugs
to a caller. For these situations the standard library provides APIs for
-constructing panics with an `Error` as it's source.
+constructing panics with an `Error` as its source.
* [`Result::unwrap`]
* [`Result::expect`]
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index 163a65c90..93a6716d7 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -214,6 +214,8 @@ impl CStr {
/// * The memory referenced by the returned `CStr` must not be mutated for
/// the duration of lifetime `'a`.
///
+ /// * The nul terminator must be within `isize::MAX` from `ptr`
+ ///
/// > **Note**: This operation is intended to be a 0-cost cast but it is
/// > currently implemented with an up-front calculation of the length of
/// > the string. This is not guaranteed to always be the case.
@@ -259,42 +261,16 @@ impl CStr {
#[rustc_const_unstable(feature = "const_cstr_from_ptr", issue = "113219")]
pub const unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
// SAFETY: The caller has provided a pointer that points to a valid C
- // string with a NUL terminator of size less than `isize::MAX`, whose
- // content remain valid and doesn't change for the lifetime of the
- // returned `CStr`.
- //
- // Thus computing the length is fine (a NUL byte exists), the call to
- // from_raw_parts is safe because we know the length is at most `isize::MAX`, meaning
- // the call to `from_bytes_with_nul_unchecked` is correct.
+ // string with a NUL terminator less than `isize::MAX` from `ptr`.
+ let len = unsafe { const_strlen(ptr) };
+
+ // SAFETY: The caller has provided a valid pointer with length less than
+ // `isize::MAX`, so `from_raw_parts` is safe. The content remains valid
+ // and doesn't change for the lifetime of the returned `CStr`. This
+ // means the call to `from_bytes_with_nul_unchecked` is correct.
//
// The cast from c_char to u8 is ok because a c_char is always one byte.
- unsafe {
- const fn strlen_ct(s: *const c_char) -> usize {
- let mut len = 0;
-
- // SAFETY: Outer caller has provided a pointer to a valid C string.
- while unsafe { *s.add(len) } != 0 {
- len += 1;
- }
-
- len
- }
-
- // `inline` is necessary for codegen to see strlen.
- #[inline]
- fn strlen_rt(s: *const c_char) -> usize {
- extern "C" {
- /// Provided by libc or compiler_builtins.
- fn strlen(s: *const c_char) -> usize;
- }
-
- // SAFETY: Outer caller has provided a pointer to a valid C string.
- unsafe { strlen(s) }
- }
-
- let len = intrinsics::const_eval_select((ptr,), strlen_ct, strlen_rt);
- Self::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr.cast(), len + 1))
- }
+ unsafe { Self::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr.cast(), len + 1)) }
}
/// Creates a C string wrapper from a byte slice with any number of nuls.
@@ -511,10 +487,39 @@ impl CStr {
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_str_as_ptr", since = "1.32.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn as_ptr(&self) -> *const c_char {
self.inner.as_ptr()
}
+ /// Returns the length of `self`. Like C's `strlen`, this does not include the nul terminator.
+ ///
+ /// > **Note**: This method is currently implemented as a constant-time
+ /// > cast, but it is planned to alter its definition in the future to
+ /// > perform the length calculation whenever this method is called.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cstr_count_bytes)]
+ ///
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"foo\0").unwrap();
+ /// assert_eq!(cstr.count_bytes(), 3);
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"\0").unwrap();
+ /// assert_eq!(cstr.count_bytes(), 0);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[doc(alias("len", "strlen"))]
+ #[unstable(feature = "cstr_count_bytes", issue = "114441")]
+ #[rustc_const_unstable(feature = "const_cstr_from_ptr", issue = "113219")]
+ pub const fn count_bytes(&self) -> usize {
+ self.inner.len() - 1
+ }
+
/// Returns `true` if `self.to_bytes()` has a length of 0.
///
/// # Examples
@@ -681,3 +686,37 @@ impl AsRef<CStr> for CStr {
self
}
}
+
+/// Calculate the length of a nul-terminated string. Defers to C's `strlen` when possible.
+///
+/// # Safety
+///
+/// The pointer must point to a valid buffer that contains a NUL terminator. The NUL must be
+/// located within `isize::MAX` from `ptr`.
+#[inline]
+const unsafe fn const_strlen(ptr: *const c_char) -> usize {
+ const fn strlen_ct(s: *const c_char) -> usize {
+ let mut len = 0;
+
+ // SAFETY: Outer caller has provided a pointer to a valid C string.
+ while unsafe { *s.add(len) } != 0 {
+ len += 1;
+ }
+
+ len
+ }
+
+ #[inline]
+ fn strlen_rt(s: *const c_char) -> usize {
+ extern "C" {
+ /// Provided by libc or compiler_builtins.
+ fn strlen(s: *const c_char) -> usize;
+ }
+
+ // SAFETY: Outer caller has provided a pointer to a valid C string.
+ unsafe { strlen(s) }
+ }
+
+ // SAFETY: the two functions always provide equivalent functionality
+ unsafe { intrinsics::const_eval_select((ptr,), strlen_ct, strlen_rt) }
+}
diff --git a/library/core/src/fmt/builders.rs b/library/core/src/fmt/builders.rs
index d2c9f9800..922724804 100644
--- a/library/core/src/fmt/builders.rs
+++ b/library/core/src/fmt/builders.rs
@@ -40,6 +40,14 @@ impl fmt::Write for PadAdapter<'_, '_> {
Ok(())
}
+
+ fn write_char(&mut self, c: char) -> fmt::Result {
+ if self.state.on_newline {
+ self.buf.write_str(" ")?;
+ }
+ self.state.on_newline = c == '\n';
+ self.buf.write_char(c)
+ }
}
/// A struct to help with [`fmt::Debug`](Debug) implementations.
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index 9ce6093f1..fc91d1afc 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -112,9 +112,9 @@ pub trait Write {
///
/// # Errors
///
- /// This function will return an instance of [`Error`] on error.
+ /// This function will return an instance of [`std::fmt::Error`][Error] on error.
///
- /// The purpose of std::fmt::Error is to abort the formatting operation when the underlying
+ /// The purpose of that error is to abort the formatting operation when the underlying
/// destination encounters some error preventing it from accepting more text; it should
/// generally be propagated rather than handled, at least when implementing formatting traits.
///
@@ -188,8 +188,28 @@ pub trait Write {
/// assert_eq!(&buf, "world");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- fn write_fmt(mut self: &mut Self, args: Arguments<'_>) -> Result {
- write(&mut self, args)
+ fn write_fmt(&mut self, args: Arguments<'_>) -> Result {
+ // We use a specialization for `Sized` types to avoid an indirection
+ // through `&mut self`
+ trait SpecWriteFmt {
+ fn spec_write_fmt(self, args: Arguments<'_>) -> Result;
+ }
+
+ impl<W: Write + ?Sized> SpecWriteFmt for &mut W {
+ #[inline]
+ default fn spec_write_fmt(mut self, args: Arguments<'_>) -> Result {
+ write(&mut self, args)
+ }
+ }
+
+ impl<W: Write> SpecWriteFmt for &mut W {
+ #[inline]
+ fn spec_write_fmt(self, args: Arguments<'_>) -> Result {
+ write(self, args)
+ }
+ }
+
+ self.spec_write_fmt(args)
}
}
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index 794a57f09..35b757dc1 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -153,6 +153,11 @@ mod sip;
/// Thankfully, you won't need to worry about upholding this property when
/// deriving both [`Eq`] and `Hash` with `#[derive(PartialEq, Eq, Hash)]`.
///
+/// Violating this property is a logic error. The behavior resulting from a logic error is not
+/// specified, but users of the trait must ensure that such logic errors do *not* result in
+/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of these
+/// methods.
+///
/// ## Prefix collisions
///
/// Implementations of `hash` should ensure that the data they
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
index 75c104ce2..4bf3da073 100644
--- a/library/core/src/hint.rs
+++ b/library/core/src/hint.rs
@@ -175,34 +175,27 @@ pub fn spin_loop() {
unsafe { crate::arch::x86_64::_mm_pause() };
}
- // RISC-V platform spin loop hint implementation
+ #[cfg(target_arch = "riscv32")]
{
- // RISC-V RV32 and RV64 share the same PAUSE instruction, but they are located in different
- // modules in `core::arch`.
- // In this case, here we call `pause` function in each core arch module.
- #[cfg(target_arch = "riscv32")]
- {
- crate::arch::riscv32::pause();
- }
- #[cfg(target_arch = "riscv64")]
- {
- crate::arch::riscv64::pause();
- }
+ crate::arch::riscv32::pause();
}
- #[cfg(any(target_arch = "aarch64", all(target_arch = "arm", target_feature = "v6")))]
+ #[cfg(target_arch = "riscv64")]
{
- #[cfg(target_arch = "aarch64")]
- {
- // SAFETY: the `cfg` attr ensures that we only execute this on aarch64 targets.
- unsafe { crate::arch::aarch64::__isb(crate::arch::aarch64::SY) };
- }
- #[cfg(target_arch = "arm")]
- {
- // SAFETY: the `cfg` attr ensures that we only execute this on arm targets
- // with support for the v6 feature.
- unsafe { crate::arch::arm::__yield() };
- }
+ crate::arch::riscv64::pause();
+ }
+
+ #[cfg(target_arch = "aarch64")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on aarch64 targets.
+ unsafe { crate::arch::aarch64::__isb(crate::arch::aarch64::SY) };
+ }
+
+ #[cfg(all(target_arch = "arm", target_feature = "v6"))]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on arm targets
+ // with support for the v6 feature.
+ unsafe { crate::arch::arm::__yield() };
}
}
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 676d4f2f3..4c76662ac 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -1130,7 +1130,10 @@ extern "rust-intrinsic" {
/// may lead to unexpected and unstable compilation results. This makes `transmute` **incredibly
/// unsafe**. `transmute` should be the absolute last resort.
///
- /// Transmuting pointers to integers in a `const` context is [undefined behavior][ub].
+ /// Transmuting pointers *to* integers in a `const` context is [undefined behavior][ub],
+ /// unless the pointer was originally created *from* an integer.
+ /// (That includes this function specifically, integer-to-pointer casts, and helpers like [`invalid`][crate::ptr::invalid],
+ /// but also semantically-equivalent conversions such as punning through `repr(C)` union fields.)
/// Any attempt to use the resulting value for integer operations will abort const-evaluation.
/// (And even outside `const`, such transmutation is touching on many unspecified aspects of the
/// Rust memory model and should be avoided. See below for alternatives.)
@@ -2399,7 +2402,6 @@ extern "rust-intrinsic" {
/// that differs. That allows optimizations that can read in large chunks.
///
/// [valid]: crate::ptr#safety
- #[cfg(not(bootstrap))]
#[rustc_const_unstable(feature = "const_intrinsic_compare_bytes", issue = "none")]
#[rustc_nounwind]
pub fn compare_bytes(left: *const u8, right: *const u8, bytes: usize) -> i32;
@@ -2568,7 +2570,7 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -
let size = mem::size_of::<T>()
.checked_mul(count)
.expect("is_nonoverlapping: `size_of::<T>() * count` overflows a usize");
- let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize };
+ let diff = src_usize.abs_diff(dst_usize);
// If the absolute distance between the ptrs is at least as big as the size of the buffer,
// they do not overlap.
diff >= size
@@ -2705,9 +2707,13 @@ pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: us
///
/// Behavior is undefined if any of the following conditions are violated:
///
-/// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
+/// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes, and must remain valid even
+/// when `dst` is written for `count * size_of::<T>()` bytes. (This means if the memory ranges
+/// overlap, the two pointers must not be subject to aliasing restrictions relative to each
+/// other.)
///
-/// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
+/// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes, and must remain valid even
+/// when `src` is read for `count * size_of::<T>()` bytes.
///
/// * Both `src` and `dst` must be properly aligned.
///
@@ -2844,18 +2850,3 @@ pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
write_bytes(dst, val, count)
}
}
-
-/// Backfill for bootstrap
-#[cfg(bootstrap)]
-pub unsafe fn compare_bytes(left: *const u8, right: *const u8, bytes: usize) -> i32 {
- extern "C" {
- fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> crate::ffi::c_int;
- }
-
- if bytes != 0 {
- // SAFETY: Since bytes is non-zero, the caller has met `memcmp`'s requirements.
- unsafe { memcmp(left, right, bytes).into() }
- } else {
- 0
- }
-}
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
index ef0a2fd4e..cab195dad 100644
--- a/library/core/src/intrinsics/mir.rs
+++ b/library/core/src/intrinsics/mir.rs
@@ -12,9 +12,10 @@
//!
//! Typical usage will look like this:
//!
-//! ```rust
+#![cfg_attr(bootstrap, doc = "```rust,ignore")]
+#![cfg_attr(not(bootstrap), doc = "```rust")]
//! #![feature(core_intrinsics, custom_mir)]
-#![cfg_attr(not(bootstrap), doc = "#![allow(internal_features)]")]
+//! #![allow(internal_features)]
//!
//! use core::intrinsics::mir::*;
//!
@@ -62,9 +63,10 @@
//!
//! # Examples
//!
-//! ```rust
+#![cfg_attr(bootstrap, doc = "```rust,ignore")]
+#![cfg_attr(not(bootstrap), doc = "```rust")]
//! #![feature(core_intrinsics, custom_mir)]
-#![cfg_attr(not(bootstrap), doc = "#![allow(internal_features)]")]
+//! #![allow(internal_features)]
//!
//! use core::intrinsics::mir::*;
//!
@@ -317,8 +319,9 @@ define!(
///
/// # Examples
///
- /// ```rust
- #[cfg_attr(not(bootstrap), doc = "#![allow(internal_features)]")]
+ #[cfg_attr(bootstrap, doc = "```rust,ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```rust")]
+ /// #![allow(internal_features)]
/// #![feature(custom_mir, core_intrinsics)]
///
/// use core::intrinsics::mir::*;
@@ -361,6 +364,11 @@ define!(
#[doc(hidden)]
fn __internal_make_place<T>(place: T) -> *mut T
);
+define!(
+ "mir_debuginfo",
+ #[doc(hidden)]
+ fn __debuginfo<T>(name: &'static str, s: T)
+);
/// Macro for generating custom MIR.
///
@@ -371,6 +379,7 @@ pub macro mir {
(
$(type RET = $ret_ty:ty ;)?
$(let $local_decl:ident $(: $local_decl_ty:ty)? ;)*
+ $(debug $dbg_name:ident => $dbg_data:expr ;)*
{
$($entry:tt)*
@@ -394,26 +403,32 @@ pub macro mir {
$(
let $local_decl $(: $local_decl_ty)? ;
)*
-
::core::intrinsics::mir::__internal_extract_let!($($entry)*);
$(
::core::intrinsics::mir::__internal_extract_let!($($block)*);
)*
{
- // Finally, the contents of the basic blocks
- ::core::intrinsics::mir::__internal_remove_let!({
- {}
- { $($entry)* }
- });
+ // Now debuginfo
$(
+ __debuginfo(stringify!($dbg_name), $dbg_data);
+ )*
+
+ {
+ // Finally, the contents of the basic blocks
::core::intrinsics::mir::__internal_remove_let!({
{}
- { $($block)* }
+ { $($entry)* }
});
- )*
+ $(
+ ::core::intrinsics::mir::__internal_remove_let!({
+ {}
+ { $($block)* }
+ });
+ )*
- RET
+ RET
+ }
}
}
}}
diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs
index ce18bffe7..c1d8cc4ff 100644
--- a/library/core/src/iter/adapters/take.rs
+++ b/library/core/src/iter/adapters/take.rs
@@ -1,5 +1,7 @@
use crate::cmp;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedLen};
+use crate::iter::{
+ adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedLen, TrustedRandomAccess,
+};
use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
@@ -98,26 +100,18 @@ where
}
}
- impl_fold_via_try_fold! { fold -> try_fold }
-
#[inline]
- fn for_each<F: FnMut(Self::Item)>(mut self, f: F) {
- // The default implementation would use a unit accumulator, so we can
- // avoid a stateful closure by folding over the remaining number
- // of items we wish to return instead.
- fn check<'a, Item>(
- mut action: impl FnMut(Item) + 'a,
- ) -> impl FnMut(usize, Item) -> Option<usize> + 'a {
- move |more, x| {
- action(x);
- more.checked_sub(1)
- }
- }
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ Self::spec_fold(self, init, f)
+ }
- let remaining = self.n;
- if remaining > 0 {
- self.iter.try_fold(remaining - 1, check(f));
- }
+ #[inline]
+ fn for_each<F: FnMut(Self::Item)>(self, f: F) {
+ Self::spec_for_each(self, f)
}
#[inline]
@@ -249,3 +243,72 @@ impl<I> FusedIterator for Take<I> where I: FusedIterator {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<I: TrustedLen> TrustedLen for Take<I> {}
+
+trait SpecTake: Iterator {
+ fn spec_fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B;
+
+ fn spec_for_each<F: FnMut(Self::Item)>(self, f: F);
+}
+
+impl<I: Iterator> SpecTake for Take<I> {
+ #[inline]
+ default fn spec_fold<B, F>(mut self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ use crate::ops::NeverShortCircuit;
+ self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0
+ }
+
+ #[inline]
+ default fn spec_for_each<F: FnMut(Self::Item)>(mut self, f: F) {
+ // The default implementation would use a unit accumulator, so we can
+ // avoid a stateful closure by folding over the remaining number
+ // of items we wish to return instead.
+ fn check<'a, Item>(
+ mut action: impl FnMut(Item) + 'a,
+ ) -> impl FnMut(usize, Item) -> Option<usize> + 'a {
+ move |more, x| {
+ action(x);
+ more.checked_sub(1)
+ }
+ }
+
+ let remaining = self.n;
+ if remaining > 0 {
+ self.iter.try_fold(remaining - 1, check(f));
+ }
+ }
+}
+
+impl<I: Iterator + TrustedRandomAccess> SpecTake for Take<I> {
+ #[inline]
+ fn spec_fold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut acc = init;
+ let end = self.n.min(self.iter.size());
+ for i in 0..end {
+ // SAFETY: i < end <= self.iter.size() and we discard the iterator at the end
+ let val = unsafe { self.iter.__iterator_get_unchecked(i) };
+ acc = f(acc, val);
+ }
+ acc
+ }
+
+ #[inline]
+ fn spec_for_each<F: FnMut(Self::Item)>(mut self, mut f: F) {
+ let end = self.n.min(self.iter.size());
+ for i in 0..end {
+ // SAFETY: i < end <= self.iter.size() and we discard the iterator at the end
+ let val = unsafe { self.iter.__iterator_get_unchecked(i) };
+ f(val);
+ }
+ }
+}
diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs
index 462f7170a..0e03d0c2d 100644
--- a/library/core/src/iter/range.rs
+++ b/library/core/src/iter/range.rs
@@ -1,5 +1,7 @@
+use crate::ascii::Char as AsciiChar;
use crate::convert::TryFrom;
use crate::mem;
+use crate::net::{Ipv4Addr, Ipv6Addr};
use crate::num::NonZeroUsize;
use crate::ops::{self, Try};
@@ -14,7 +16,7 @@ macro_rules! unsafe_impl_trusted_step {
unsafe impl TrustedStep for $type {}
)*};
}
-unsafe_impl_trusted_step![char i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize];
+unsafe_impl_trusted_step![AsciiChar char i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize Ipv4Addr Ipv6Addr];
/// Objects that have a notion of *successor* and *predecessor* operations.
///
@@ -484,6 +486,112 @@ impl Step for char {
}
}
+#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+impl Step for AsciiChar {
+ #[inline]
+ fn steps_between(&start: &AsciiChar, &end: &AsciiChar) -> Option<usize> {
+ Step::steps_between(&start.to_u8(), &end.to_u8())
+ }
+
+ #[inline]
+ fn forward_checked(start: AsciiChar, count: usize) -> Option<AsciiChar> {
+ let end = Step::forward_checked(start.to_u8(), count)?;
+ AsciiChar::from_u8(end)
+ }
+
+ #[inline]
+ fn backward_checked(start: AsciiChar, count: usize) -> Option<AsciiChar> {
+ let end = Step::backward_checked(start.to_u8(), count)?;
+
+ // SAFETY: Values below that of a valid ASCII character are also valid ASCII
+ Some(unsafe { AsciiChar::from_u8_unchecked(end) })
+ }
+
+ #[inline]
+ unsafe fn forward_unchecked(start: AsciiChar, count: usize) -> AsciiChar {
+ // SAFETY: Caller asserts that result is a valid ASCII character,
+ // and therefore it is a valid u8.
+ let end = unsafe { Step::forward_unchecked(start.to_u8(), count) };
+
+ // SAFETY: Caller asserts that result is a valid ASCII character.
+ unsafe { AsciiChar::from_u8_unchecked(end) }
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: AsciiChar, count: usize) -> AsciiChar {
+ // SAFETY: Caller asserts that result is a valid ASCII character,
+ // and therefore it is a valid u8.
+ let end = unsafe { Step::backward_unchecked(start.to_u8(), count) };
+
+ // SAFETY: Caller asserts that result is a valid ASCII character.
+ unsafe { AsciiChar::from_u8_unchecked(end) }
+ }
+}
+
+#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+impl Step for Ipv4Addr {
+ #[inline]
+ fn steps_between(&start: &Ipv4Addr, &end: &Ipv4Addr) -> Option<usize> {
+ u32::steps_between(&start.to_bits(), &end.to_bits())
+ }
+
+ #[inline]
+ fn forward_checked(start: Ipv4Addr, count: usize) -> Option<Ipv4Addr> {
+ u32::forward_checked(start.to_bits(), count).map(Ipv4Addr::from_bits)
+ }
+
+ #[inline]
+ fn backward_checked(start: Ipv4Addr, count: usize) -> Option<Ipv4Addr> {
+ u32::backward_checked(start.to_bits(), count).map(Ipv4Addr::from_bits)
+ }
+
+ #[inline]
+ unsafe fn forward_unchecked(start: Ipv4Addr, count: usize) -> Ipv4Addr {
+ // SAFETY: Since u32 and Ipv4Addr are losslessly convertible,
+ // this is as safe as the u32 version.
+ Ipv4Addr::from_bits(unsafe { u32::forward_unchecked(start.to_bits(), count) })
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: Ipv4Addr, count: usize) -> Ipv4Addr {
+ // SAFETY: Since u32 and Ipv4Addr are losslessly convertible,
+ // this is as safe as the u32 version.
+ Ipv4Addr::from_bits(unsafe { u32::backward_unchecked(start.to_bits(), count) })
+ }
+}
+
+#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+impl Step for Ipv6Addr {
+ #[inline]
+ fn steps_between(&start: &Ipv6Addr, &end: &Ipv6Addr) -> Option<usize> {
+ u128::steps_between(&start.to_bits(), &end.to_bits())
+ }
+
+ #[inline]
+ fn forward_checked(start: Ipv6Addr, count: usize) -> Option<Ipv6Addr> {
+ u128::forward_checked(start.to_bits(), count).map(Ipv6Addr::from_bits)
+ }
+
+ #[inline]
+ fn backward_checked(start: Ipv6Addr, count: usize) -> Option<Ipv6Addr> {
+ u128::backward_checked(start.to_bits(), count).map(Ipv6Addr::from_bits)
+ }
+
+ #[inline]
+ unsafe fn forward_unchecked(start: Ipv6Addr, count: usize) -> Ipv6Addr {
+ // SAFETY: Since u128 and Ipv6Addr are losslessly convertible,
+ // this is as safe as the u128 version.
+ Ipv6Addr::from_bits(unsafe { u128::forward_unchecked(start.to_bits(), count) })
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: Ipv6Addr, count: usize) -> Ipv6Addr {
+ // SAFETY: Since u128 and Ipv6Addr are losslessly convertible,
+ // this is as safe as the u128 version.
+ Ipv6Addr::from_bits(unsafe { u128::backward_unchecked(start.to_bits(), count) })
+ }
+}
+
macro_rules! range_exact_iter_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
@@ -723,6 +831,15 @@ impl<A: Step> Iterator for ops::Range<A> {
}
#[inline]
+ fn count(self) -> usize {
+ if self.start < self.end {
+ Step::steps_between(&self.start, &self.end).expect("count overflowed usize")
+ } else {
+ 0
+ }
+ }
+
+ #[inline]
fn nth(&mut self, n: usize) -> Option<A> {
self.spec_nth(n)
}
@@ -1120,6 +1237,17 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> {
}
#[inline]
+ fn count(self) -> usize {
+ if self.is_empty() {
+ return 0;
+ }
+
+ Step::steps_between(&self.start, &self.end)
+ .and_then(|steps| steps.checked_add(1))
+ .expect("count overflowed usize")
+ }
+
+ #[inline]
fn nth(&mut self, n: usize) -> Option<A> {
if self.is_empty() {
return None;
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index a2729b374..8b15e8269 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -20,11 +20,19 @@
// FIXME: Fill me in with more detail when the interface settles
//! This library is built on the assumption of a few existing symbols:
//!
-//! * `memcpy`, `memcmp`, `memset`, `strlen` - These are core memory routines which are
-//! often generated by LLVM. Additionally, this library can make explicit
-//! calls to these functions. Their signatures are the same as found in C.
-//! These functions are often provided by the system libc, but can also be
-//! provided by the [compiler-builtins crate](https://crates.io/crates/compiler_builtins).
+//! * `memcpy`, `memmove`, `memset`, `memcmp`, `bcmp`, `strlen` - These are core memory routines
+//! which are generated by Rust codegen backends. Additionally, this library can make explicit
+//! calls to `strlen`. Their signatures are the same as found in C, but there are extra
+//! assumptions about their semantics: For `memcpy`, `memmove`, `memset`, `memcmp`, and `bcmp`, if
+//! the `n` parameter is 0, the function is assumed to not be UB. Furthermore, for `memcpy`, if
+//! source and target pointer are equal, the function is assumed to not be UB.
+//! (Note that these are [standard assumptions](https://reviews.llvm.org/D86993) among compilers.)
+//! These functions are often provided by the system libc, but can also be provided by the
+//! [compiler-builtins crate](https://crates.io/crates/compiler_builtins).
+//! Note that the library does not guarantee that it will always make these assumptions, so Rust
+//! user code directly calling the C functions should follow the C specification! The advice for
+//! Rust user code is to call the functions provided by this library instead (such as
+//! `ptr::copy`).
//!
//! * `rust_begin_panic` - This function takes four arguments, a
//! `fmt::Arguments`, a `&'static str`, and two `u32`'s. These four arguments
@@ -96,12 +104,14 @@
#![allow(explicit_outlives_requirements)]
#![allow(incomplete_features)]
#![warn(multiple_supertrait_upcastable)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
// Do not check link redundancy on bootstraping phase
-#![cfg_attr(not(bootstrap), allow(rustdoc::redundant_explicit_links))]
+#![allow(rustdoc::redundant_explicit_links)]
//
// Library features:
// tidy-alphabetical-start
+#![cfg_attr(bootstrap, feature(no_coverage))] // rust-lang/rust#84605
+#![cfg_attr(not(bootstrap), feature(coverage_attribute))] // rust-lang/rust#84605
#![feature(char_indices_offset)]
#![feature(const_align_of_val)]
#![feature(const_align_of_val_raw)]
@@ -152,12 +162,10 @@
#![feature(const_slice_from_raw_parts_mut)]
#![feature(const_slice_from_ref)]
#![feature(const_slice_index)]
-#![feature(const_slice_is_ascii)]
#![feature(const_slice_ptr_len)]
#![feature(const_slice_split_at_mut)]
#![feature(const_str_from_utf8_unchecked_mut)]
#![feature(const_swap)]
-#![feature(const_transmute_copy)]
#![feature(const_try)]
#![feature(const_type_id)]
#![feature(const_type_name)]
@@ -170,6 +178,7 @@
#![feature(ip)]
#![feature(ip_bits)]
#![feature(is_ascii_octdigit)]
+#![feature(isqrt)]
#![feature(maybe_uninit_uninit_array)]
#![feature(ptr_alignment_type)]
#![feature(ptr_metadata)]
@@ -228,7 +237,6 @@
#![feature(negative_impls)]
#![feature(never_type)]
#![feature(no_core)]
-#![feature(no_coverage)] // rust-lang/rust#84605
#![feature(platform_intrinsics)]
#![feature(prelude_import)]
#![feature(repr_simd)]
@@ -282,6 +290,9 @@ pub mod assert_matches {
pub use crate::macros::{assert_matches, debug_assert_matches};
}
+#[unstable(feature = "cfg_match", issue = "115585")]
+pub use crate::macros::cfg_match;
+
#[macro_use]
mod internal_macros;
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index 14cc523b0..123661b35 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -168,6 +168,94 @@ pub macro assert_matches {
},
}
+/// A macro for defining `#[cfg]` match-like statements.
+///
+/// It is similar to the `if/elif` C preprocessor macro by allowing definition of a cascade of
+/// `#[cfg]` cases, emitting the implementation which matches first.
+///
+/// This allows you to conveniently provide a long list `#[cfg]`'d blocks of code
+/// without having to rewrite each clause multiple times.
+///
+/// Trailing `_` wildcard match arms are **optional** and they indicate a fallback branch when
+/// all previous declarations do not evaluate to true.
+///
+/// # Example
+///
+/// ```
+/// #![feature(cfg_match)]
+///
+/// cfg_match! {
+/// cfg(unix) => {
+/// fn foo() { /* unix specific functionality */ }
+/// }
+/// cfg(target_pointer_width = "32") => {
+/// fn foo() { /* non-unix, 32-bit functionality */ }
+/// }
+/// _ => {
+/// fn foo() { /* fallback implementation */ }
+/// }
+/// }
+/// ```
+#[unstable(feature = "cfg_match", issue = "115585")]
+#[rustc_diagnostic_item = "cfg_match"]
+pub macro cfg_match {
+ // with a final wildcard
+ (
+ $(cfg($initial_meta:meta) => { $($initial_tokens:item)* })+
+ _ => { $($extra_tokens:item)* }
+ ) => {
+ cfg_match! {
+ @__items ();
+ $((($initial_meta) ($($initial_tokens)*)),)+
+ (() ($($extra_tokens)*)),
+ }
+ },
+
+ // without a final wildcard
+ (
+ $(cfg($extra_meta:meta) => { $($extra_tokens:item)* })*
+ ) => {
+ cfg_match! {
+ @__items ();
+ $((($extra_meta) ($($extra_tokens)*)),)*
+ }
+ },
+
+ // Internal and recursive macro to emit all the items
+ //
+ // Collects all the previous cfgs in a list at the beginning, so they can be
+ // negated. After the semicolon is all the remaining items.
+ (@__items ($($_:meta,)*);) => {},
+ (
+ @__items ($($no:meta,)*);
+ (($($yes:meta)?) ($($tokens:item)*)),
+ $($rest:tt,)*
+ ) => {
+ // Emit all items within one block, applying an appropriate #[cfg]. The
+ // #[cfg] will require all `$yes` matchers specified and must also negate
+ // all previous matchers.
+ #[cfg(all(
+ $($yes,)?
+ not(any($($no),*))
+ ))]
+ cfg_match! { @__identity $($tokens)* }
+
+ // Recurse to emit all other items in `$rest`, and when we do so add all
+ // our `$yes` matchers to the list of `$no` matchers as future emissions
+ // will have to negate everything we just matched as well.
+ cfg_match! {
+ @__items ($($no,)* $($yes,)?);
+ $($rest,)*
+ }
+ },
+
+ // Internal macro to make __apply work out right for different match types,
+ // because of how macros match/expand stuff.
+ (@__identity $($tokens:item)*) => {
+ $($tokens)*
+ }
+}
+
/// Asserts that a boolean expression is `true` at runtime.
///
/// This will invoke the [`panic!`] macro if the provided expression cannot be
@@ -849,7 +937,8 @@ pub(crate) mod builtin {
/// assert_eq!(display, debug);
/// ```
///
- /// For more information, see the documentation in [`std::fmt`].
+ /// See [the formatting documentation in `std::fmt`](../std/fmt/index.html)
+ /// for details of the macro argument syntax, and further information.
///
/// [`Display`]: crate::fmt::Display
/// [`Debug`]: crate::fmt::Debug
@@ -1043,7 +1132,7 @@ pub(crate) mod builtin {
/// expression of type `&'static str` which represents all of the literals
/// concatenated left-to-right.
///
- /// Integer and floating point literals are stringified in order to be
+ /// Integer and floating point literals are [stringified](core::stringify) in order to be
/// concatenated.
///
/// # Examples
diff --git a/library/core/src/macros/panic.md b/library/core/src/macros/panic.md
index 8b549e187..60100c265 100644
--- a/library/core/src/macros/panic.md
+++ b/library/core/src/macros/panic.md
@@ -8,8 +8,8 @@ tests. `panic!` is closely tied with the `unwrap` method of both
[`Option`][ounwrap] and [`Result`][runwrap] enums. Both implementations call
`panic!` when they are set to [`None`] or [`Err`] variants.
-When using `panic!()` you can specify a string payload, that is built using
-the [`format!`] syntax. That payload is used when injecting the panic into
+When using `panic!()` you can specify a string payload that is built using
+[formatting syntax]. That payload is used when injecting the panic into
the calling Rust thread, causing the thread to panic entirely.
The behavior of the default `std` hook, i.e. the code that runs directly
@@ -18,6 +18,7 @@ after the panic is invoked, is to print the message payload to
call. You can override the panic hook using [`std::panic::set_hook()`].
Inside the hook a panic can be accessed as a `&dyn Any + Send`,
which contains either a `&str` or `String` for regular `panic!()` invocations.
+(Whether a particular invocation contains the payload at type `&str` or `String` is unspecified and can change.)
To panic with a value of another other type, [`panic_any`] can be used.
See also the macro [`compile_error!`], for raising errors during compilation.
@@ -55,7 +56,7 @@ For more detailed information about error handling check out the [book] or the
[`panic_any`]: ../std/panic/fn.panic_any.html
[`Box`]: ../std/boxed/struct.Box.html
[`Any`]: crate::any::Any
-[`format!`]: ../std/macro.format.html
+[`format!` syntax]: ../std/fmt/index.html
[book]: ../book/ch09-00-error-handling.html
[`std::result`]: ../std/result/index.html
@@ -64,6 +65,29 @@ For more detailed information about error handling check out the [book] or the
If the main thread panics it will terminate all your threads and end your
program with code `101`.
+# Editions
+
+Behavior of the panic macros changed over editions.
+
+## 2021 and later
+
+In Rust 2021 and later, `panic!` always requires a format string and
+the applicable format arguments, and is the same in `core` and `std`.
+Use [`std::panic::panic_any(x)`](../std/panic/fn.panic_any.html) to
+panic with an arbitrary payload.
+
+## 2018 and 2015
+
+In Rust Editions prior to 2021, `std::panic!(x)` with a single
+argument directly uses that argument as a payload.
+This is true even if the argument is a string literal.
+For example, `panic!("problem: {reason}")` panics with a
+payload of literally `"problem: {reason}"` (a `&'static str`).
+
+`core::panic!(x)` with a single argument requires that `x` be `&str`,
+but otherwise behaves like `std::panic!`. In particular, the string
+need not be a literal, and is not interpreted as a format string.
+
# Examples
```should_panic
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index 5ec751e51..5ed82e26a 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -986,11 +986,16 @@ pub trait Tuple {}
pub trait PointerLike {}
/// A marker for types which can be used as types of `const` generic parameters.
+///
+/// These types must have a proper equivalence relation (`Eq`) and it must be automatically
+/// derived (`StructuralPartialEq`). There's a hard-coded check in the compiler ensuring
+/// that all fields are also `ConstParamTy`, which implies that recursively, all fields
+/// are `StructuralPartialEq`.
#[lang = "const_param_ty"]
#[unstable(feature = "adt_const_params", issue = "95174")]
#[rustc_on_unimplemented(message = "`{Self}` can't be used as a const parameter type")]
#[allow(multiple_supertrait_upcastable)]
-pub trait ConstParamTy: StructuralEq + StructuralPartialEq {}
+pub trait ConstParamTy: StructuralEq + StructuralPartialEq + Eq {}
/// Derive macro generating an impl of the trait `ConstParamTy`.
#[rustc_builtin_macro]
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 2fff3f0ef..e478b217f 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -413,7 +413,7 @@ pub const unsafe fn size_of_val_raw<T: ?Sized>(val: *const T) -> usize {
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
-#[deprecated(note = "use `align_of` instead", since = "1.2.0")]
+#[deprecated(note = "use `align_of` instead", since = "1.2.0", suggestion = "align_of")]
pub fn min_align_of<T>() -> usize {
intrinsics::min_align_of::<T>()
}
@@ -436,7 +436,7 @@ pub fn min_align_of<T>() -> usize {
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
-#[deprecated(note = "use `align_of_val` instead", since = "1.2.0")]
+#[deprecated(note = "use `align_of_val` instead", since = "1.2.0", suggestion = "align_of_val")]
pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize {
// SAFETY: val is a reference, so it's a valid raw pointer
unsafe { intrinsics::min_align_of_val(val) }
@@ -1051,7 +1051,7 @@ pub const fn copy<T: Copy>(x: &T) -> T {
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_transmute_copy", issue = "83165")]
+#[rustc_const_stable(feature = "const_transmute_copy", since = "1.74.0")]
pub const unsafe fn transmute_copy<Src, Dst>(src: &Src) -> Dst {
assert!(
size_of::<Src>() >= size_of::<Dst>(),
@@ -1126,6 +1126,13 @@ impl<T> fmt::Debug for Discriminant<T> {
///
/// [Reference]: ../../reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
///
+/// The value of a [`Discriminant<T>`] is independent of any *free lifetimes* in `T`. As such,
+/// reading or writing a `Discriminant<Foo<'a>>` as a `Discriminant<Foo<'b>>` (whether via
+/// [`transmute`] or otherwise) is always sound. Note that this is **not** true for other kinds
+/// of generic parameters and for higher-ranked lifetimes; `Discriminant<Foo<A>>` and
+/// `Discriminant<Foo<B>>` as well as `Discriminant<Bar<dyn for<'a> Trait<'a>>>` and
+/// `Discriminant<Bar<dyn Trait<'static>>>` may be incompatible.
+///
/// # Examples
///
/// This can be used to compare enums that carry data, while disregarding
diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs
index 56460c75e..6a36dfec0 100644
--- a/library/core/src/net/ip_addr.rs
+++ b/library/core/src/net/ip_addr.rs
@@ -1856,13 +1856,7 @@ impl fmt::Display for Ipv6Addr {
if f.precision().is_none() && f.width().is_none() {
let segments = self.segments();
- // Special case for :: and ::1; otherwise they get written with the
- // IPv4 formatter
- if self.is_unspecified() {
- f.write_str("::")
- } else if self.is_loopback() {
- f.write_str("::1")
- } else if let Some(ipv4) = self.to_ipv4_mapped() {
+ if let Some(ipv4) = self.to_ipv4_mapped() {
write!(f, "::ffff:{}", ipv4)
} else {
#[derive(Copy, Clone, Default)]
diff --git a/library/core/src/num/dec2flt/fpu.rs b/library/core/src/num/dec2flt/fpu.rs
index 3806977f7..8d62684f8 100644
--- a/library/core/src/num/dec2flt/fpu.rs
+++ b/library/core/src/num/dec2flt/fpu.rs
@@ -8,6 +8,17 @@ pub use fpu_precision::set_precision;
// round to 80 bits causing double rounding to happen when values are eventually represented as
// 32/64 bit float values. To overcome this, the FPU control word can be set so that the
// computations are performed in the desired precision.
+//
+// Note that normally, it is Undefined Behavior to alter the FPU control word while Rust code runs.
+// The compiler assumes that the control word is always in its default state. However, in this
+// particular case the semantics with the altered control word are actually *more faithful*
+// to Rust semantics than the default -- arguably it is all the code that runs *outside* of the scope
+// of a `set_precision` guard that is wrong.
+// In other words, we are only using this to work around <https://github.com/rust-lang/rust/issues/114479>.
+// Sometimes killing UB with UB actually works...
+// (If this is used to set 32bit precision, there is still a risk that the compiler moves some 64bit
+// operation into the scope of the `set_precision` guard. So it's not like this is totally sound.
+// But it's not really any less sound than the default state of 80bit precision...)
#[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
mod fpu_precision {
use core::arch::asm;
diff --git a/library/core/src/num/dec2flt/number.rs b/library/core/src/num/dec2flt/number.rs
index 8589e2bbd..253899156 100644
--- a/library/core/src/num/dec2flt/number.rs
+++ b/library/core/src/num/dec2flt/number.rs
@@ -51,6 +51,7 @@ impl Number {
/// There is an exception: disguised fast-path cases, where we can shift
/// powers-of-10 from the exponent to the significant digits.
pub fn try_fast_path<F: RawFloat>(&self) -> Option<F> {
+ // Here we need to work around <https://github.com/rust-lang/rust/issues/114479>.
// The fast path crucially depends on arithmetic being rounded to the correct number of bits
// without any intermediate rounding. On x86 (without SSE or SSE2) this requires the precision
// of the x87 FPU stack to be changed so that it directly rounds to 64/32 bit.
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index d050d21c8..290f649f9 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -277,6 +277,14 @@ pub mod consts {
#[stable(feature = "tau_constant", since = "1.47.0")]
pub const TAU: f32 = 6.28318530717958647692528676655900577_f32;
+ /// The golden ratio (φ)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const PHI: f32 = 1.618033988749894848204586834365638118_f32;
+
+ /// The Euler-Mascheroni constant (γ)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const EGAMMA: f32 = 0.577215664901532860606512090082402431_f32;
+
/// π/2
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
@@ -301,6 +309,10 @@ pub mod consts {
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
+ /// 1/sqrt(π)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const FRAC_1_SQRT_PI: f32 = 0.564189583547756286948079451560772586_f32;
+
/// 2/π
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
@@ -317,6 +329,14 @@ pub mod consts {
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
+ /// sqrt(3)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const SQRT_3: f32 = 1.732050807568877293527446341505872367_f32;
+
+ /// 1/sqrt(3)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const FRAC_1_SQRT_3: f32 = 0.577350269189625764509148780501957456_f32;
+
/// Euler's number (e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
@@ -937,6 +957,7 @@ impl f32 {
} else if self == other {
if self.is_sign_negative() && other.is_sign_positive() { self } else { other }
} else {
+ // At least one input is NaN. Use `+` to perform NaN propagation and quieting.
self + other
}
}
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index d9a738191..7569d2cd6 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -277,6 +277,14 @@ pub mod consts {
#[stable(feature = "tau_constant", since = "1.47.0")]
pub const TAU: f64 = 6.28318530717958647692528676655900577_f64;
+ /// The golden ratio (φ)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const PHI: f64 = 1.618033988749894848204586834365638118_f64;
+
+ /// The Euler-Mascheroni constant (γ)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const EGAMMA: f64 = 0.577215664901532860606512090082402431_f64;
+
/// π/2
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f64 = 1.57079632679489661923132169163975144_f64;
@@ -301,6 +309,10 @@ pub mod consts {
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f64 = 0.318309886183790671537767526745028724_f64;
+ /// 1/sqrt(π)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const FRAC_1_SQRT_PI: f64 = 0.564189583547756286948079451560772586_f64;
+
/// 2/π
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f64 = 0.636619772367581343075535053490057448_f64;
@@ -317,6 +329,14 @@ pub mod consts {
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f64 = 0.707106781186547524400844362104849039_f64;
+ /// sqrt(3)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const SQRT_3: f64 = 1.732050807568877293527446341505872367_f64;
+
+ /// 1/sqrt(3)
+ #[unstable(feature = "more_float_constants", issue = "103883")]
+ pub const FRAC_1_SQRT_3: f64 = 0.577350269189625764509148780501957456_f64;
+
/// Euler's number (e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f64 = 2.71828182845904523536028747135266250_f64;
@@ -948,6 +968,7 @@ impl f64 {
} else if self == other {
if self.is_sign_negative() && other.is_sign_positive() { self } else { other }
} else {
+ // At least one input is NaN. Use `+` to perform NaN propagation and quieting.
self + other
}
}
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 1f43520e1..3cbb55af3 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -898,6 +898,30 @@ macro_rules! int_impl {
acc.checked_mul(base)
}
+ /// Returns the square root of the number, rounded down.
+ ///
+ /// Returns `None` if `self` is negative.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(isqrt)]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_isqrt(), Some(3));")]
+ /// ```
+ #[unstable(feature = "isqrt", issue = "116226")]
+ #[rustc_const_unstable(feature = "isqrt", issue = "116226")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_isqrt(self) -> Option<Self> {
+ if self < 0 {
+ None
+ } else {
+ Some((self as $UnsignedT).isqrt() as Self)
+ }
+ }
+
/// Saturating integer addition. Computes `self + rhs`, saturating at the numeric
/// bounds instead of overflowing.
///
@@ -2061,6 +2085,36 @@ macro_rules! int_impl {
acc * base
}
+ /// Returns the square root of the number, rounded down.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `self` is negative.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(isqrt)]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".isqrt(), 3);")]
+ /// ```
+ #[unstable(feature = "isqrt", issue = "116226")]
+ #[rustc_const_unstable(feature = "isqrt", issue = "116226")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn isqrt(self) -> Self {
+ // I would like to implement it as
+ // ```
+ // self.checked_isqrt().expect("argument of integer square root must be non-negative")
+ // ```
+ // but `expect` is not yet stable as a `const fn`.
+ match self.checked_isqrt() {
+ Some(sqrt) => sqrt,
+ None => panic!("argument of integer square root must be non-negative"),
+ }
+ }
+
/// Calculates the quotient of Euclidean division of `self` by `rhs`.
///
/// This computes the integer `q` such that `self = q * rhs + r`, with
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index 95dcaf5dd..8b127132c 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -44,11 +44,10 @@ mod uint_macros; // import uint_impl!
mod error;
mod int_log10;
mod nonzero;
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
mod saturating;
mod wrapping;
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub use saturating::Saturating;
#[stable(feature = "rust1", since = "1.0.0")]
pub use wrapping::Wrapping;
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index 5939dedbd..7f8d673c1 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -41,6 +41,20 @@ macro_rules! nonzero_integers {
/// with the exception that `0` is not a valid instance.
#[doc = concat!("`Option<", stringify!($Ty), ">` is guaranteed to be compatible with `", stringify!($Int), "`,")]
/// including in FFI.
+ ///
+ /// Thanks to the [null pointer optimization],
+ #[doc = concat!("`", stringify!($Ty), "` and `Option<", stringify!($Ty), ">`")]
+ /// are guaranteed to have the same size and alignment:
+ ///
+ /// ```
+ /// # use std::mem::{size_of, align_of};
+ #[doc = concat!("use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(size_of::<", stringify!($Ty), ">(), size_of::<Option<", stringify!($Ty), ">>());")]
+ #[doc = concat!("assert_eq!(align_of::<", stringify!($Ty), ">(), align_of::<Option<", stringify!($Ty), ">>());")]
+ /// ```
+ ///
+ /// [null pointer optimization]: crate::option#representation
#[$stability]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(transparent)]
diff --git a/library/core/src/num/saturating.rs b/library/core/src/num/saturating.rs
index 8982473b2..d9ccc73c4 100644
--- a/library/core/src/num/saturating.rs
+++ b/library/core/src/num/saturating.rs
@@ -4,7 +4,7 @@ use crate::fmt;
use crate::ops::{Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign};
use crate::ops::{BitXor, BitXorAssign, Div, DivAssign};
use crate::ops::{Mul, MulAssign, Neg, Not, Rem, RemAssign};
-use crate::ops::{Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign};
+use crate::ops::{Sub, SubAssign};
/// Provides intentionally-saturating arithmetic on `T`.
///
@@ -24,7 +24,6 @@ use crate::ops::{Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign};
/// # Examples
///
/// ```
-/// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
/// let max = Saturating(u32::MAX);
@@ -32,180 +31,186 @@ use crate::ops::{Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign};
///
/// assert_eq!(u32::MAX, (max + one).0);
/// ```
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)]
#[repr(transparent)]
-pub struct Saturating<T>(#[unstable(feature = "saturating_int_impl", issue = "87920")] pub T);
+#[rustc_diagnostic_item = "Saturating"]
+pub struct Saturating<T>(
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] pub T,
+);
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl<T: fmt::Debug> fmt::Debug for Saturating<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl<T: fmt::Display> fmt::Display for Saturating<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl<T: fmt::Binary> fmt::Binary for Saturating<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl<T: fmt::Octal> fmt::Octal for Saturating<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl<T: fmt::LowerHex> fmt::LowerHex for Saturating<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl<T: fmt::UpperHex> fmt::UpperHex for Saturating<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
-#[allow(unused_macros)]
-macro_rules! sh_impl_signed {
- ($t:ident, $f:ident) => {
- // FIXME what is the correct implementation here? see discussion https://github.com/rust-lang/rust/pull/87921#discussion_r695870065
- //
- // #[unstable(feature = "saturating_int_impl", issue = "87920")]
- // impl Shl<$f> for Saturating<$t> {
- // type Output = Saturating<$t>;
- //
- // #[inline]
- // fn shl(self, other: $f) -> Saturating<$t> {
- // if other < 0 {
- // Saturating(self.0.shr((-other & self::shift_max::$t as $f) as u32))
- // } else {
- // Saturating(self.0.shl((other & self::shift_max::$t as $f) as u32))
- // }
- // }
- // }
- // forward_ref_binop! { impl Shl, shl for Saturating<$t>, $f,
- // #[unstable(feature = "saturating_int_impl", issue = "87920")] }
- //
- // #[unstable(feature = "saturating_int_impl", issue = "87920")]
- // impl ShlAssign<$f> for Saturating<$t> {
- // #[inline]
- // fn shl_assign(&mut self, other: $f) {
- // *self = *self << other;
- // }
- // }
- // forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f }
-
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
- impl Shr<$f> for Saturating<$t> {
- type Output = Saturating<$t>;
-
- #[inline]
- fn shr(self, other: $f) -> Saturating<$t> {
- if other < 0 {
- Saturating(self.0.shl((-other & self::shift_max::$t as $f) as u32))
- } else {
- Saturating(self.0.shr((other & self::shift_max::$t as $f) as u32))
- }
- }
- }
- forward_ref_binop! { impl Shr, shr for Saturating<$t>, $f,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
-
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
- impl ShrAssign<$f> for Saturating<$t> {
- #[inline]
- fn shr_assign(&mut self, other: $f) {
- *self = *self >> other;
- }
- }
- forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f }
- };
-}
-
-macro_rules! sh_impl_unsigned {
- ($t:ident, $f:ident) => {
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
- impl Shl<$f> for Saturating<$t> {
- type Output = Saturating<$t>;
-
- #[inline]
- fn shl(self, other: $f) -> Saturating<$t> {
- Saturating(self.0.wrapping_shl(other as u32))
- }
- }
- forward_ref_binop! { impl Shl, shl for Saturating<$t>, $f,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
-
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
- impl ShlAssign<$f> for Saturating<$t> {
- #[inline]
- fn shl_assign(&mut self, other: $f) {
- *self = *self << other;
- }
- }
- forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f }
-
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
- impl Shr<$f> for Saturating<$t> {
- type Output = Saturating<$t>;
-
- #[inline]
- fn shr(self, other: $f) -> Saturating<$t> {
- Saturating(self.0.wrapping_shr(other as u32))
- }
- }
- forward_ref_binop! { impl Shr, shr for Saturating<$t>, $f,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
- impl ShrAssign<$f> for Saturating<$t> {
- #[inline]
- fn shr_assign(&mut self, other: $f) {
- *self = *self >> other;
- }
- }
- forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f }
- };
-}
-
-// FIXME (#23545): uncomment the remaining impls
-macro_rules! sh_impl_all {
- ($($t:ident)*) => ($(
- //sh_impl_unsigned! { $t, u8 }
- //sh_impl_unsigned! { $t, u16 }
- //sh_impl_unsigned! { $t, u32 }
- //sh_impl_unsigned! { $t, u64 }
- //sh_impl_unsigned! { $t, u128 }
- sh_impl_unsigned! { $t, usize }
-
- //sh_impl_signed! { $t, i8 }
- //sh_impl_signed! { $t, i16 }
- //sh_impl_signed! { $t, i32 }
- //sh_impl_signed! { $t, i64 }
- //sh_impl_signed! { $t, i128 }
- //sh_impl_signed! { $t, isize }
- )*)
-}
-
-sh_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
+// FIXME the correct implementation is not clear. Waiting for a real world use case at https://github.com/rust-lang/libs-team/issues/230
+//
+// #[allow(unused_macros)]
+// macro_rules! sh_impl_signed {
+// ($t:ident, $f:ident) => {
+// // FIXME what is the correct implementation here? see discussion https://github.com/rust-lang/rust/pull/87921#discussion_r695870065
+// //
+// // #[unstable(feature = "saturating_int_impl", issue = "87920")]
+// // impl Shl<$f> for Saturating<$t> {
+// // type Output = Saturating<$t>;
+// //
+// // #[inline]
+// // fn shl(self, other: $f) -> Saturating<$t> {
+// // if other < 0 {
+// // Saturating(self.0.shr((-other & self::shift_max::$t as $f) as u32))
+// // } else {
+// // Saturating(self.0.shl((other & self::shift_max::$t as $f) as u32))
+// // }
+// // }
+// // }
+// // forward_ref_binop! { impl Shl, shl for Saturating<$t>, $f,
+// // #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+// //
+// // #[unstable(feature = "saturating_int_impl", issue = "87920")]
+// // impl ShlAssign<$f> for Saturating<$t> {
+// // #[inline]
+// // fn shl_assign(&mut self, other: $f) {
+// // *self = *self << other;
+// // }
+// // }
+// // forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f }
+//
+// #[unstable(feature = "saturating_int_impl", issue = "87920")]
+// impl Shr<$f> for Saturating<$t> {
+// type Output = Saturating<$t>;
+//
+// #[inline]
+// fn shr(self, other: $f) -> Saturating<$t> {
+// if other < 0 {
+// Saturating(self.0.shl((-other & self::shift_max::$t as $f) as u32))
+// } else {
+// Saturating(self.0.shr((other & self::shift_max::$t as $f) as u32))
+// }
+// }
+// }
+// forward_ref_binop! { impl Shr, shr for Saturating<$t>, $f,
+// #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+//
+// #[unstable(feature = "saturating_int_impl", issue = "87920")]
+// impl ShrAssign<$f> for Saturating<$t> {
+// #[inline]
+// fn shr_assign(&mut self, other: $f) {
+// *self = *self >> other;
+// }
+// }
+// forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f }
+// };
+// }
+//
+// macro_rules! sh_impl_unsigned {
+// ($t:ident, $f:ident) => {
+// #[unstable(feature = "saturating_int_impl", issue = "87920")]
+// impl Shl<$f> for Saturating<$t> {
+// type Output = Saturating<$t>;
+//
+// #[inline]
+// fn shl(self, other: $f) -> Saturating<$t> {
+// Saturating(self.0.wrapping_shl(other as u32))
+// }
+// }
+// forward_ref_binop! { impl Shl, shl for Saturating<$t>, $f,
+// #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+//
+// #[unstable(feature = "saturating_int_impl", issue = "87920")]
+// impl ShlAssign<$f> for Saturating<$t> {
+// #[inline]
+// fn shl_assign(&mut self, other: $f) {
+// *self = *self << other;
+// }
+// }
+// forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f }
+//
+// #[unstable(feature = "saturating_int_impl", issue = "87920")]
+// impl Shr<$f> for Saturating<$t> {
+// type Output = Saturating<$t>;
+//
+// #[inline]
+// fn shr(self, other: $f) -> Saturating<$t> {
+// Saturating(self.0.wrapping_shr(other as u32))
+// }
+// }
+// forward_ref_binop! { impl Shr, shr for Saturating<$t>, $f,
+// #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+//
+// #[unstable(feature = "saturating_int_impl", issue = "87920")]
+// impl ShrAssign<$f> for Saturating<$t> {
+// #[inline]
+// fn shr_assign(&mut self, other: $f) {
+// *self = *self >> other;
+// }
+// }
+// forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f }
+// };
+// }
+//
+// // FIXME (#23545): uncomment the remaining impls
+// macro_rules! sh_impl_all {
+// ($($t:ident)*) => ($(
+// //sh_impl_unsigned! { $t, u8 }
+// //sh_impl_unsigned! { $t, u16 }
+// //sh_impl_unsigned! { $t, u32 }
+// //sh_impl_unsigned! { $t, u64 }
+// //sh_impl_unsigned! { $t, u128 }
+// sh_impl_unsigned! { $t, usize }
+//
+// //sh_impl_signed! { $t, i8 }
+// //sh_impl_signed! { $t, i16 }
+// //sh_impl_signed! { $t, i32 }
+// //sh_impl_signed! { $t, i64 }
+// //sh_impl_signed! { $t, i128 }
+// //sh_impl_signed! { $t, isize }
+// )*)
+// }
+//
+// sh_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
// FIXME(30524): impl Op<T> for Saturating<T>, impl OpAssign<T> for Saturating<T>
macro_rules! saturating_impl {
($($t:ty)*) => ($(
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl Add for Saturating<$t> {
type Output = Saturating<$t>;
@@ -215,9 +220,9 @@ macro_rules! saturating_impl {
}
}
forward_ref_binop! { impl Add, add for Saturating<$t>, Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl AddAssign for Saturating<$t> {
#[inline]
fn add_assign(&mut self, other: Saturating<$t>) {
@@ -226,7 +231,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl AddAssign, add_assign for Saturating<$t>, Saturating<$t> }
- #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
impl AddAssign<$t> for Saturating<$t> {
#[inline]
fn add_assign(&mut self, other: $t) {
@@ -235,7 +240,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl AddAssign, add_assign for Saturating<$t>, $t }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl Sub for Saturating<$t> {
type Output = Saturating<$t>;
@@ -245,9 +250,9 @@ macro_rules! saturating_impl {
}
}
forward_ref_binop! { impl Sub, sub for Saturating<$t>, Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl SubAssign for Saturating<$t> {
#[inline]
fn sub_assign(&mut self, other: Saturating<$t>) {
@@ -256,7 +261,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl SubAssign, sub_assign for Saturating<$t>, Saturating<$t> }
- #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
impl SubAssign<$t> for Saturating<$t> {
#[inline]
fn sub_assign(&mut self, other: $t) {
@@ -265,7 +270,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl SubAssign, sub_assign for Saturating<$t>, $t }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl Mul for Saturating<$t> {
type Output = Saturating<$t>;
@@ -275,9 +280,9 @@ macro_rules! saturating_impl {
}
}
forward_ref_binop! { impl Mul, mul for Saturating<$t>, Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl MulAssign for Saturating<$t> {
#[inline]
fn mul_assign(&mut self, other: Saturating<$t>) {
@@ -286,7 +291,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl MulAssign, mul_assign for Saturating<$t>, Saturating<$t> }
- #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
impl MulAssign<$t> for Saturating<$t> {
#[inline]
fn mul_assign(&mut self, other: $t) {
@@ -300,7 +305,6 @@ macro_rules! saturating_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert_eq!(Saturating(2", stringify!($t), "), Saturating(5", stringify!($t), ") / Saturating(2));")]
@@ -309,12 +313,11 @@ macro_rules! saturating_impl {
/// ```
///
/// ```should_panic
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let _ = Saturating(0", stringify!($t), ") / Saturating(0);")]
/// ```
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl Div for Saturating<$t> {
type Output = Saturating<$t>;
@@ -324,10 +327,10 @@ macro_rules! saturating_impl {
}
}
forward_ref_binop! { impl Div, div for Saturating<$t>, Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl DivAssign for Saturating<$t> {
#[inline]
fn div_assign(&mut self, other: Saturating<$t>) {
@@ -336,7 +339,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl DivAssign, div_assign for Saturating<$t>, Saturating<$t> }
- #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
impl DivAssign<$t> for Saturating<$t> {
#[inline]
fn div_assign(&mut self, other: $t) {
@@ -345,7 +348,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl DivAssign, div_assign for Saturating<$t>, $t }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl Rem for Saturating<$t> {
type Output = Saturating<$t>;
@@ -355,9 +358,9 @@ macro_rules! saturating_impl {
}
}
forward_ref_binop! { impl Rem, rem for Saturating<$t>, Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl RemAssign for Saturating<$t> {
#[inline]
fn rem_assign(&mut self, other: Saturating<$t>) {
@@ -366,7 +369,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl RemAssign, rem_assign for Saturating<$t>, Saturating<$t> }
- #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
impl RemAssign<$t> for Saturating<$t> {
#[inline]
fn rem_assign(&mut self, other: $t) {
@@ -375,7 +378,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl RemAssign, rem_assign for Saturating<$t>, $t }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl Not for Saturating<$t> {
type Output = Saturating<$t>;
@@ -385,9 +388,9 @@ macro_rules! saturating_impl {
}
}
forward_ref_unop! { impl Not, not for Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl BitXor for Saturating<$t> {
type Output = Saturating<$t>;
@@ -397,9 +400,9 @@ macro_rules! saturating_impl {
}
}
forward_ref_binop! { impl BitXor, bitxor for Saturating<$t>, Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl BitXorAssign for Saturating<$t> {
#[inline]
fn bitxor_assign(&mut self, other: Saturating<$t>) {
@@ -408,7 +411,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Saturating<$t>, Saturating<$t> }
- #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
impl BitXorAssign<$t> for Saturating<$t> {
#[inline]
fn bitxor_assign(&mut self, other: $t) {
@@ -417,7 +420,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Saturating<$t>, $t }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl BitOr for Saturating<$t> {
type Output = Saturating<$t>;
@@ -427,9 +430,9 @@ macro_rules! saturating_impl {
}
}
forward_ref_binop! { impl BitOr, bitor for Saturating<$t>, Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl BitOrAssign for Saturating<$t> {
#[inline]
fn bitor_assign(&mut self, other: Saturating<$t>) {
@@ -438,7 +441,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Saturating<$t>, Saturating<$t> }
- #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
impl BitOrAssign<$t> for Saturating<$t> {
#[inline]
fn bitor_assign(&mut self, other: $t) {
@@ -447,7 +450,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Saturating<$t>, $t }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl BitAnd for Saturating<$t> {
type Output = Saturating<$t>;
@@ -457,9 +460,9 @@ macro_rules! saturating_impl {
}
}
forward_ref_binop! { impl BitAnd, bitand for Saturating<$t>, Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl BitAndAssign for Saturating<$t> {
#[inline]
fn bitand_assign(&mut self, other: Saturating<$t>) {
@@ -468,7 +471,7 @@ macro_rules! saturating_impl {
}
forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Saturating<$t>, Saturating<$t> }
- #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
impl BitAndAssign<$t> for Saturating<$t> {
#[inline]
fn bitand_assign(&mut self, other: $t) {
@@ -492,12 +495,11 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert_eq!(<Saturating<", stringify!($t), ">>::MIN, Saturating(", stringify!($t), "::MIN));")]
/// ```
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const MIN: Self = Self(<$t>::MIN);
/// Returns the largest value that can be represented by this integer type.
@@ -507,12 +509,11 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert_eq!(<Saturating<", stringify!($t), ">>::MAX, Saturating(", stringify!($t), "::MAX));")]
/// ```
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const MAX: Self = Self(<$t>::MAX);
/// Returns the size of this integer type in bits.
@@ -522,12 +523,11 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert_eq!(<Saturating<", stringify!($t), ">>::BITS, ", stringify!($t), "::BITS);")]
/// ```
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const BITS: u32 = <$t>::BITS;
/// Returns the number of ones in the binary representation of `self`.
@@ -537,7 +537,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let n = Saturating(0b01001100", stringify!($t), ");")]
@@ -549,7 +548,8 @@ macro_rules! saturating_int_impl {
#[doc(alias = "popcnt")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn count_ones(self) -> u32 {
self.0.count_ones()
}
@@ -561,7 +561,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert_eq!(Saturating(!0", stringify!($t), ").count_zeros(), 0);")]
@@ -569,7 +568,8 @@ macro_rules! saturating_int_impl {
#[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn count_zeros(self) -> u32 {
self.0.count_zeros()
}
@@ -581,7 +581,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let n = Saturating(0b0101000", stringify!($t), ");")]
@@ -591,7 +590,8 @@ macro_rules! saturating_int_impl {
#[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn trailing_zeros(self) -> u32 {
self.0.trailing_zeros()
}
@@ -608,7 +608,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
/// let n: Saturating<i64> = Saturating(0x0123456789ABCDEF);
@@ -619,7 +618,8 @@ macro_rules! saturating_int_impl {
#[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn rotate_left(self, n: u32) -> Self {
Saturating(self.0.rotate_left(n))
}
@@ -636,7 +636,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
/// let n: Saturating<i64> = Saturating(0x0123456789ABCDEF);
@@ -647,7 +646,8 @@ macro_rules! saturating_int_impl {
#[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn rotate_right(self, n: u32) -> Self {
Saturating(self.0.rotate_right(n))
}
@@ -659,7 +659,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
/// let n: Saturating<i16> = Saturating(0b0000000_01010101);
@@ -673,7 +672,8 @@ macro_rules! saturating_int_impl {
#[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn swap_bytes(self) -> Self {
Saturating(self.0.swap_bytes())
}
@@ -688,7 +688,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
/// let n = Saturating(0b0000000_01010101i16);
@@ -700,8 +699,8 @@ macro_rules! saturating_int_impl {
/// assert_eq!(m, Saturating(-22016));
/// ```
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
- #[rustc_const_unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
pub const fn reverse_bits(self) -> Self {
@@ -718,7 +717,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let n = Saturating(0x1A", stringify!($t), ");")]
@@ -731,7 +729,8 @@ macro_rules! saturating_int_impl {
/// ```
#[inline]
#[must_use]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn from_be(x: Self) -> Self {
Saturating(<$t>::from_be(x.0))
}
@@ -746,7 +745,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let n = Saturating(0x1A", stringify!($t), ");")]
@@ -759,7 +757,8 @@ macro_rules! saturating_int_impl {
/// ```
#[inline]
#[must_use]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn from_le(x: Self) -> Self {
Saturating(<$t>::from_le(x.0))
}
@@ -774,7 +773,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let n = Saturating(0x1A", stringify!($t), ");")]
@@ -786,7 +784,8 @@ macro_rules! saturating_int_impl {
/// }
/// ```
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
pub const fn to_be(self) -> Self {
@@ -803,7 +802,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let n = Saturating(0x1A", stringify!($t), ");")]
@@ -815,7 +813,8 @@ macro_rules! saturating_int_impl {
/// }
/// ```
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
pub const fn to_le(self) -> Self {
@@ -829,7 +828,6 @@ macro_rules! saturating_int_impl {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert_eq!(Saturating(3", stringify!($t), ").pow(4), Saturating(81));")]
@@ -838,17 +836,17 @@ macro_rules! saturating_int_impl {
/// Results that are too large are saturated:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
/// assert_eq!(Saturating(3i8).pow(5), Saturating(127));
/// assert_eq!(Saturating(3i8).pow(6), Saturating(127));
/// ```
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- pub fn pow(self, exp: u32) -> Self {
+ pub const fn pow(self, exp: u32) -> Self {
Saturating(self.0.saturating_pow(exp))
}
}
@@ -867,7 +865,6 @@ macro_rules! saturating_int_impl_signed {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let n = Saturating(", stringify!($t), "::MAX >> 2);")]
@@ -875,7 +872,8 @@ macro_rules! saturating_int_impl_signed {
/// assert_eq!(n.leading_zeros(), 3);
/// ```
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
pub const fn leading_zeros(self) -> u32 {
@@ -890,7 +888,6 @@ macro_rules! saturating_int_impl_signed {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert_eq!(Saturating(100", stringify!($t), ").abs(), Saturating(100));")]
@@ -900,10 +897,11 @@ macro_rules! saturating_int_impl_signed {
#[doc = concat!("assert_eq!(Saturating(", stringify!($t), "::MIN).abs(), Saturating(", stringify!($t), "::MAX));")]
/// ```
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- pub fn abs(self) -> Saturating<$t> {
+ pub const fn abs(self) -> Saturating<$t> {
Saturating(self.0.saturating_abs())
}
@@ -918,7 +916,6 @@ macro_rules! saturating_int_impl_signed {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert_eq!(Saturating(10", stringify!($t), ").signum(), Saturating(1));")]
@@ -926,10 +923,11 @@ macro_rules! saturating_int_impl_signed {
#[doc = concat!("assert_eq!(Saturating(-10", stringify!($t), ").signum(), Saturating(-1));")]
/// ```
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- pub fn signum(self) -> Saturating<$t> {
+ pub const fn signum(self) -> Saturating<$t> {
Saturating(self.0.signum())
}
@@ -941,7 +939,6 @@ macro_rules! saturating_int_impl_signed {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert!(Saturating(10", stringify!($t), ").is_positive());")]
@@ -949,7 +946,8 @@ macro_rules! saturating_int_impl_signed {
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn is_positive(self) -> bool {
self.0.is_positive()
}
@@ -962,7 +960,6 @@ macro_rules! saturating_int_impl_signed {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert!(Saturating(-10", stringify!($t), ").is_negative());")]
@@ -970,13 +967,14 @@ macro_rules! saturating_int_impl_signed {
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub const fn is_negative(self) -> bool {
self.0.is_negative()
}
}
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl Neg for Saturating<$t> {
type Output = Self;
#[inline]
@@ -985,7 +983,7 @@ macro_rules! saturating_int_impl_signed {
}
}
forward_ref_unop! { impl Neg, neg for Saturating<$t>,
- #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
)*)
}
@@ -1001,7 +999,6 @@ macro_rules! saturating_int_impl_unsigned {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("let n = Saturating(", stringify!($t), "::MAX >> 2);")]
@@ -1009,7 +1006,8 @@ macro_rules! saturating_int_impl_unsigned {
/// assert_eq!(n.leading_zeros(), 2);
/// ```
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
pub const fn leading_zeros(self) -> u32 {
@@ -1023,7 +1021,6 @@ macro_rules! saturating_int_impl_unsigned {
/// Basic usage:
///
/// ```
- /// #![feature(saturating_int_impl)]
/// use std::num::Saturating;
///
#[doc = concat!("assert!(Saturating(16", stringify!($t), ").is_power_of_two());")]
@@ -1031,8 +1028,9 @@ macro_rules! saturating_int_impl_unsigned {
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "saturating_int_impl", issue = "87920")]
- pub fn is_power_of_two(self) -> bool {
+ #[rustc_const_stable(feature = "saturating_int_impl", since = "1.74.0")]
+ #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+ pub const fn is_power_of_two(self) -> bool {
self.0.is_power_of_two()
}
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 23ca37817..a9c5312a1 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -1259,6 +1259,10 @@ macro_rules! uint_impl {
/// This function exists, so that all operations
/// are accounted for in the wrapping operations.
///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
/// # Examples
///
/// Basic usage:
@@ -1284,6 +1288,10 @@ macro_rules! uint_impl {
/// definitions of division are equal, this
/// is exactly equal to `self.wrapping_div(rhs)`.
///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
/// # Examples
///
/// Basic usage:
@@ -1307,6 +1315,10 @@ macro_rules! uint_impl {
/// This function exists, so that all operations
/// are accounted for in the wrapping operations.
///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
/// # Examples
///
/// Basic usage:
@@ -1333,6 +1345,10 @@ macro_rules! uint_impl {
/// definitions of division are equal, this
/// is exactly equal to `self.wrapping_rem(rhs)`.
///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
/// # Examples
///
/// Basic usage:
@@ -1979,6 +1995,54 @@ macro_rules! uint_impl {
acc * base
}
+ /// Returns the square root of the number, rounded down.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(isqrt)]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".isqrt(), 3);")]
+ /// ```
+ #[unstable(feature = "isqrt", issue = "116226")]
+ #[rustc_const_unstable(feature = "isqrt", issue = "116226")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn isqrt(self) -> Self {
+ if self < 2 {
+ return self;
+ }
+
+ // The algorithm is based on the one presented in
+ // <https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Binary_numeral_system_(base_2)>
+ // which cites as source the following C code:
+ // <https://web.archive.org/web/20120306040058/http://medialab.freaknet.org/martin/src/sqrt/sqrt.c>.
+
+ let mut op = self;
+ let mut res = 0;
+ let mut one = 1 << (self.ilog2() & !1);
+
+ while one != 0 {
+ if op >= res + one {
+ op -= res + one;
+ res = (res >> 1) + one;
+ } else {
+ res >>= 1;
+ }
+ one >>= 2;
+ }
+
+ // SAFETY: the result is positive and fits in an integer with half as many bits.
+ // Inform the optimizer about it.
+ unsafe {
+ intrinsics::assume(0 < res);
+ intrinsics::assume(res < 1 << (Self::BITS / 2));
+ }
+
+ res
+ }
+
/// Performs Euclidean division.
///
/// Since, for the positive integers, all common
diff --git a/library/core/src/num/wrapping.rs b/library/core/src/num/wrapping.rs
index ed354a2e5..16f0b6d91 100644
--- a/library/core/src/num/wrapping.rs
+++ b/library/core/src/num/wrapping.rs
@@ -39,6 +39,7 @@ use crate::ops::{Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign};
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)]
#[repr(transparent)]
+#[rustc_diagnostic_item = "Wrapping"]
pub struct Wrapping<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T);
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs
index 08c35b6da..911761c6e 100644
--- a/library/core/src/ops/deref.rs
+++ b/library/core/src/ops/deref.rs
@@ -14,6 +14,11 @@
/// For similar reasons, **this trait should never fail**. Failure during
/// dereferencing can be extremely confusing when `Deref` is invoked implicitly.
///
+/// Violating these requirements is a logic error. The behavior resulting from a logic error is not
+/// specified, but users of the trait must ensure that such logic errors do *not* result in
+/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of this
+/// method.
+///
/// # More on `Deref` coercion
///
/// If `T` implements `Deref<Target = U>`, and `x` is a value of type `T`, then:
@@ -114,6 +119,11 @@ impl<T: ?Sized> Deref for &mut T {
/// dereferencing can be extremely confusing when `DerefMut` is invoked
/// implicitly.
///
+/// Violating these requirements is a logic error. The behavior resulting from a logic error is not
+/// specified, but users of the trait must ensure that such logic errors do *not* result in
+/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of this
+/// method.
+///
/// # More on `Deref` coercion
///
/// If `T` implements `DerefMut<Target = U>`, and `x` is a value of type `T`,
diff --git a/library/core/src/ops/drop.rs b/library/core/src/ops/drop.rs
index 9ebf426be..34dfa9e4c 100644
--- a/library/core/src/ops/drop.rs
+++ b/library/core/src/ops/drop.rs
@@ -202,7 +202,7 @@
/// [nomicon]: ../../nomicon/phantom-data.html#an-exception-the-special-case-of-the-standard-library-and-its-unstable-may_dangle
#[lang = "drop"]
#[stable(feature = "rust1", since = "1.0.0")]
-#[const_trait]
+// FIXME(effects) #[const_trait]
pub trait Drop {
/// Executes the destructor for this type.
///
@@ -217,8 +217,13 @@ pub trait Drop {
///
/// # Panics
///
- /// Given that a [`panic!`] will call `drop` as it unwinds, any [`panic!`]
- /// in a `drop` implementation will likely abort.
+ /// Implementations should generally avoid [`panic!`]ing, because `drop()` may itself be called
+ /// during unwinding due to a panic, and if the `drop()` panics in that situation (a “double
+ /// panic”), this will likely abort the program. It is possible to check [`panicking()`] first,
+ /// which may be desirable for a `Drop` implementation that is reporting a bug of the kind
+ /// “you didn't finish using this before it was dropped”; but most types should simply clean up
+ /// their owned allocations or other resources and return normally from `drop()`, regardless of
+ /// what state they are in.
///
/// Note that even if this panics, the value is considered to be dropped;
/// you must not cause `drop` to be called again. This is normally automatically
@@ -227,6 +232,7 @@ pub trait Drop {
///
/// [E0040]: ../../error_codes/E0040.html
/// [`panic!`]: crate::panic!
+ /// [`panicking()`]: ../../std/thread/fn.panicking.html
/// [`mem::drop`]: drop
/// [`ptr::drop_in_place`]: crate::ptr::drop_in_place
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index 67c8245f0..20f0bba4c 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -72,7 +72,7 @@ use crate::marker::Tuple;
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
+// FIXME(effects) #[const_trait]
pub trait Fn<Args: Tuple>: FnMut<Args> {
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
@@ -159,7 +159,7 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
+// FIXME(effects) #[const_trait]
pub trait FnMut<Args: Tuple>: FnOnce<Args> {
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
@@ -238,7 +238,7 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
+// FIXME(effects) #[const_trait]
pub trait FnOnce<Args: Tuple> {
/// The returned type after the call operator is used.
#[lang = "fn_once_output"]
diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs
index ba5e6ddc7..cc596293c 100644
--- a/library/core/src/ops/range.rs
+++ b/library/core/src/ops/range.rs
@@ -11,7 +11,7 @@ use crate::hash::Hash;
/// The `..` syntax is a `RangeFull`:
///
/// ```
-/// assert_eq!((..), std::ops::RangeFull);
+/// assert_eq!(.., std::ops::RangeFull);
/// ```
///
/// It does not have an [`IntoIterator`] implementation, so you can't use it in
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index becb63309..f2909a81d 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -119,7 +119,7 @@
//! # Representation
//!
//! Rust guarantees to optimize the following types `T` such that
-//! [`Option<T>`] has the same size as `T`:
+//! [`Option<T>`] has the same size and alignment as `T`:
//!
//! * [`Box<U>`]
//! * `&U`
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
index 20be60d35..386f5fcbd 100644
--- a/library/core/src/panic.rs
+++ b/library/core/src/panic.rs
@@ -99,7 +99,7 @@ pub macro unreachable_2021 {
/// use.
#[unstable(feature = "std_internals", issue = "none")]
#[doc(hidden)]
-pub unsafe trait BoxMeUp {
+pub unsafe trait PanicPayload {
/// Take full ownership of the contents.
/// The return type is actually `Box<dyn Any + Send>`, but we cannot use `Box` in core.
///
@@ -107,7 +107,7 @@ pub unsafe trait BoxMeUp {
/// Calling this method twice, or calling `get` after calling this method, is an error.
///
/// The argument is borrowed because the panic runtime (`__rust_start_panic`) only
- /// gets a borrowed `dyn BoxMeUp`.
+ /// gets a borrowed `dyn PanicPayload`.
fn take_box(&mut self) -> *mut (dyn Any + Send);
/// Just borrow the contents.
diff --git a/library/core/src/panic/panic_info.rs b/library/core/src/panic/panic_info.rs
index c7f04f11e..c77e9675a 100644
--- a/library/core/src/panic/panic_info.rs
+++ b/library/core/src/panic/panic_info.rs
@@ -28,6 +28,7 @@ pub struct PanicInfo<'a> {
message: Option<&'a fmt::Arguments<'a>>,
location: &'a Location<'a>,
can_unwind: bool,
+ force_no_backtrace: bool,
}
impl<'a> PanicInfo<'a> {
@@ -42,9 +43,10 @@ impl<'a> PanicInfo<'a> {
message: Option<&'a fmt::Arguments<'a>>,
location: &'a Location<'a>,
can_unwind: bool,
+ force_no_backtrace: bool,
) -> Self {
struct NoPayload;
- PanicInfo { location, message, payload: &NoPayload, can_unwind }
+ PanicInfo { location, message, payload: &NoPayload, can_unwind, force_no_backtrace }
}
#[unstable(
@@ -141,6 +143,17 @@ impl<'a> PanicInfo<'a> {
pub fn can_unwind(&self) -> bool {
self.can_unwind
}
+
+ #[unstable(
+ feature = "panic_internals",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ #[inline]
+ pub fn force_no_backtrace(&self) -> bool {
+ self.force_no_backtrace
+ }
}
#[stable(feature = "panic_hook_display", since = "1.26.0")]
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index 7b6249207..e6cdffd96 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -61,7 +61,12 @@ pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
fn panic_impl(pi: &PanicInfo<'_>) -> !;
}
- let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), true);
+ let pi = PanicInfo::internal_constructor(
+ Some(&fmt),
+ Location::caller(),
+ /* can_unwind */ true,
+ /* force_no_backtrace */ false,
+ );
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
unsafe { panic_impl(&pi) }
@@ -77,7 +82,7 @@ pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
// and unwinds anyway, we will hit the "unwinding out of nounwind function" guard,
// which causes a "panic in a function that cannot unwind".
#[rustc_nounwind]
-pub fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>) -> ! {
+pub fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>, force_no_backtrace: bool) -> ! {
if cfg!(feature = "panic_immediate_abort") {
super::intrinsics::abort()
}
@@ -90,7 +95,12 @@ pub fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>) -> ! {
}
// PanicInfo with the `can_unwind` flag set to false forces an abort.
- let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), false);
+ let pi = PanicInfo::internal_constructor(
+ Some(&fmt),
+ Location::caller(),
+ /* can_unwind */ false,
+ force_no_backtrace,
+ );
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
unsafe { panic_impl(&pi) }
@@ -123,7 +133,15 @@ pub const fn panic(expr: &'static str) -> ! {
#[lang = "panic_nounwind"] // needed by codegen for non-unwinding panics
#[rustc_nounwind]
pub fn panic_nounwind(expr: &'static str) -> ! {
- panic_nounwind_fmt(fmt::Arguments::new_const(&[expr]));
+ panic_nounwind_fmt(fmt::Arguments::new_const(&[expr]), /* force_no_backtrace */ false);
+}
+
+/// Like `panic_nounwind`, but also inhibits showing a backtrace.
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[rustc_nounwind]
+pub fn panic_nounwind_nobacktrace(expr: &'static str) -> ! {
+ panic_nounwind_fmt(fmt::Arguments::new_const(&[expr]), /* force_no_backtrace */ true);
}
#[inline]
@@ -172,13 +190,18 @@ fn panic_misaligned_pointer_dereference(required: usize, found: usize) -> ! {
super::intrinsics::abort()
}
- panic_nounwind_fmt(format_args!(
- "misaligned pointer dereference: address must be a multiple of {required:#x} but is {found:#x}"
- ))
+ panic_nounwind_fmt(
+ format_args!(
+ "misaligned pointer dereference: address must be a multiple of {required:#x} but is {found:#x}"
+ ),
+ /* force_no_backtrace */ false,
+ )
}
/// Panic because we cannot unwind out of a function.
///
+/// This is a separate function to avoid the codesize impact of each crate containing the string to
+/// pass to `panic_nounwind`.
/// This function is called directly by the codegen backend, and must not have
/// any extra arguments (including those synthesized by track_caller).
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
@@ -186,15 +209,33 @@ fn panic_misaligned_pointer_dereference(required: usize, found: usize) -> ! {
#[lang = "panic_cannot_unwind"] // needed by codegen for panic in nounwind function
#[rustc_nounwind]
fn panic_cannot_unwind() -> ! {
+ // Keep the text in sync with `UnwindTerminateReason::as_str` in `rustc_middle`.
panic_nounwind("panic in a function that cannot unwind")
}
+/// Panic because we are unwinding out of a destructor during cleanup.
+///
+/// This is a separate function to avoid the codesize impact of each crate containing the string to
+/// pass to `panic_nounwind`.
+/// This function is called directly by the codegen backend, and must not have
+/// any extra arguments (including those synthesized by track_caller).
+#[cfg(not(bootstrap))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[lang = "panic_in_cleanup"] // needed by codegen for panic in nounwind function
+#[rustc_nounwind]
+fn panic_in_cleanup() -> ! {
+ // Keep the text in sync with `UnwindTerminateReason::as_str` in `rustc_middle`.
+ panic_nounwind_nobacktrace("panic in a destructor during cleanup")
+}
+
/// This function is used instead of panic_fmt in const eval.
#[lang = "const_panic_fmt"]
#[rustc_const_unstable(feature = "core_panic", issue = "none")]
pub const fn const_panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
if let Some(msg) = fmt.as_str() {
- panic_str(msg);
+ // The panic_display function is hooked by const eval.
+ panic_display(&msg);
} else {
// SAFETY: This is only evaluated at compile time, which reliably
// handles this UB (in case this branch turns out to be reachable
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index 6b319b435..94c682b61 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -572,7 +572,10 @@ impl<P: Deref> Pin<P> {
/// // though we have previously pinned it! We have violated the pinning API contract.
/// }
/// ```
- /// A value, once pinned, must remain pinned forever (unless its type implements `Unpin`).
+ /// A value, once pinned, must remain pinned until it is dropped (unless its type implements
+ /// `Unpin`). Because `Pin<&mut T>` does not own the value, dropping the `Pin` will not drop
+ /// the value and will not end the pinning contract. So moving the value after dropping the
+ /// `Pin<&mut T>` is still a violation of the API contract.
///
/// Similarly, calling `Pin::new_unchecked` on an `Rc<T>` is unsafe because there could be
/// aliases to the same data that are not subject to the pinning restrictions:
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index 80289ca08..fd5fe5a04 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -1,6 +1,3 @@
-// `library/{std,core}/src/primitive_docs.rs` should have the same contents.
-// These are different files so that relative links work properly without
-// having to have `CARGO_PKG_NAME` set, but conceptually they should always be the same.
#[rustc_doc_primitive = "bool"]
#[doc(alias = "true")]
#[doc(alias = "false")]
@@ -106,7 +103,7 @@ mod prim_bool {}
/// behaviour of the `!` type - expressions with type `!` will coerce into any other type.
///
/// [`u32`]: prim@u32
-#[doc = concat!("[`exit`]: ", include_str!("../primitive_docs/process_exit.md"))]
+/// [`exit`]: ../std/process/fn.exit.html
///
/// # `!` and generics
///
@@ -191,7 +188,7 @@ mod prim_bool {}
/// because `!` coerces to `Result<!, ConnectionError>` automatically.
///
/// [`String::from_str`]: str::FromStr::from_str
-#[doc = concat!("[`String`]: ", include_str!("../primitive_docs/string_string.md"))]
+/// [`String`]: ../std/string/struct.String.html
/// [`FromStr`]: str::FromStr
///
/// # `!` and traits
@@ -267,7 +264,7 @@ mod prim_bool {}
/// `impl` for this which simply panics, but the same is true for any type (we could `impl
/// Default` for (eg.) [`File`] by just making [`default()`] panic.)
///
-#[doc = concat!("[`File`]: ", include_str!("../primitive_docs/fs_file.md"))]
+/// [`File`]: ../std/fs/struct.File.html
/// [`Debug`]: fmt::Debug
/// [`default()`]: Default::default
///
@@ -355,7 +352,7 @@ mod prim_never {}
/// assert_eq!(5, s.len() * std::mem::size_of::<u8>());
/// ```
///
-#[doc = concat!("[`String`]: ", include_str!("../primitive_docs/string_string.md"))]
+/// [`String`]: ../std/string/struct.String.html
///
/// As always, remember that a human intuition for 'character' might not map to
/// Unicode's definitions. For example, despite looking similar, the 'é'
@@ -572,7 +569,7 @@ impl Copy for () {
/// [`null_mut`]: ptr::null_mut
/// [`is_null`]: pointer::is_null
/// [`offset`]: pointer::offset
-#[doc = concat!("[`into_raw`]: ", include_str!("../primitive_docs/box_into_raw.md"))]
+/// [`into_raw`]: ../std/boxed/struct.Box.html#method.into_raw
/// [`write`]: ptr::write
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_pointer {}
@@ -612,7 +609,7 @@ mod prim_pointer {}
/// statically generated up to size 32.
///
/// Arrays of sizes from 1 to 12 (inclusive) implement [`From<Tuple>`], where `Tuple`
-/// is a homogenous [prim@tuple] of appropriate length.
+/// is a homogeneous [prim@tuple] of appropriate length.
///
/// Arrays coerce to [slices (`[T]`)][slice], so a slice method may be called on
/// an array. Indeed, this provides most of the API for working with arrays.
@@ -676,7 +673,7 @@ mod prim_pointer {}
/// move_away(roa);
/// ```
///
-/// Arrays can be created from homogenous tuples of appropriate length:
+/// Arrays can be created from homogeneous tuples of appropriate length:
///
/// ```
/// let tuple: (u32, u32, u32) = (1, 2, 3);
@@ -1065,7 +1062,7 @@ mod prim_str {}
/// assert_eq!(y, 5);
/// ```
///
-/// Homogenous tuples can be created from arrays of appropriate length:
+/// Homogeneous tuples can be created from arrays of appropriate length:
///
/// ```
/// let array: [u32; 3] = [1, 2, 3];
@@ -1361,7 +1358,7 @@ mod prim_usize {}
///
/// [`std::fmt`]: fmt
/// [`Hash`]: hash::Hash
-#[doc = concat!("[`ToSocketAddrs`]: ", include_str!("../primitive_docs/net_tosocketaddrs.md"))]
+/// [`ToSocketAddrs`]: ../std/net/trait.ToSocketAddrs.html
///
/// `&mut T` references get all of the above except `ToSocketAddrs`, plus the following, if `T`
/// implements that trait:
@@ -1381,10 +1378,10 @@ mod prim_usize {}
///
/// [`FusedIterator`]: iter::FusedIterator
/// [`TrustedLen`]: iter::TrustedLen
-#[doc = concat!("[`Seek`]: ", include_str!("../primitive_docs/io_seek.md"))]
-#[doc = concat!("[`BufRead`]: ", include_str!("../primitive_docs/io_bufread.md"))]
-#[doc = concat!("[`Read`]: ", include_str!("../primitive_docs/io_read.md"))]
-#[doc = concat!("[`io::Write`]: ", include_str!("../primitive_docs/io_write.md"))]
+/// [`Seek`]: ../std/io/trait.Seek.html
+/// [`BufRead`]: ../std/io/trait.BufRead.html
+/// [`Read`]: ../std/io/trait.Read.html
+/// [`io::Write`]: ../std/io/trait.Write.html
///
/// Note that due to method call deref coercion, simply calling a trait method will act like they
/// work on references as well as they do on owned values! The implementations described here are
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index ee69d89a4..9af8f1228 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -607,7 +607,16 @@ impl<T: ?Sized> *const T {
/// Calculates the distance between two pointers. The returned value is in
/// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
///
- /// This function is the inverse of [`offset`].
+ /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
+ /// except that it has a lot more opportunities for UB, in exchange for the compiler
+ /// better understanding what you are doing.
+ ///
+ /// The primary motivation of this method is for computing the `len` of an array/slice
+ /// of `T` that you are currently representing as a "start" and "end" pointer
+ /// (and "end" is "one past the end" of the array).
+ /// In that case, `end.offset_from(start)` gets you the length of the array.
+ ///
+ /// All of the following safety requirements are trivially satisfied for this usecase.
///
/// [`offset`]: #method.offset
///
@@ -616,7 +625,7 @@ impl<T: ?Sized> *const T {
/// If any of the following conditions are violated, the result is Undefined
/// Behavior:
///
- /// * Both the starting and other pointer must be either in bounds or one
+ /// * Both `self` and `origin` must be either in bounds or one
/// byte past the end of the same [allocated object].
///
/// * Both pointers must be *derived from* a pointer to the same object.
@@ -646,6 +655,14 @@ impl<T: ?Sized> *const T {
/// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
/// such large allocations either.)
///
+ /// The requirement for pointers to be derived from the same allocated object is primarily
+ /// needed for `const`-compatibility: the distance between pointers into *different* allocated
+ /// objects is not known at compile-time. However, the requirement also exists at
+ /// runtime and may be exploited by optimizations. If you wish to compute the difference between
+ /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
+ /// origin as isize) / mem::size_of::<T>()`.
+ // FIXME: recommend `addr()` instead of `as usize` once that is stable.
+ ///
/// [`add`]: #method.add
/// [allocated object]: crate::ptr#allocated-object
///
@@ -703,7 +720,7 @@ impl<T: ?Sized> *const T {
/// units of **bytes**.
///
/// This is purely a convenience for casting to a `u8` pointer and
- /// using [offset_from][pointer::offset_from] on it. See that method for
+ /// using [`offset_from`][pointer::offset_from] on it. See that method for
/// documentation and safety requirements.
///
/// For non-`Sized` pointees this operation considers only the data pointers,
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 5f094ac4e..d1286a1de 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -698,6 +698,7 @@ where
#[inline(always)]
#[must_use]
#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
#[rustc_diagnostic_item = "ptr_from_ref"]
pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
r
@@ -710,7 +711,7 @@ pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
#[inline(always)]
#[must_use]
#[unstable(feature = "ptr_from_ref", issue = "106116")]
-#[rustc_diagnostic_item = "ptr_from_mut"]
+#[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
r
}
@@ -795,7 +796,9 @@ pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
///
/// Behavior is undefined if any of the following conditions are violated:
///
-/// * Both `x` and `y` must be [valid] for both reads and writes.
+/// * Both `x` and `y` must be [valid] for both reads and writes. They must remain valid even when the
+/// other pointer is written. (This means if the memory ranges overlap, the two pointers must not
+/// be subject to aliasing restrictions relative to each other.)
///
/// * Both `x` and `y` must be properly aligned.
///
@@ -1357,6 +1360,7 @@ pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+#[rustc_diagnostic_item = "ptr_write"]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn write<T>(dst: *mut T, src: T) {
// Semantically, it would be fine for this to be implemented as a
@@ -1459,6 +1463,7 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
#[inline]
#[stable(feature = "ptr_unaligned", since = "1.17.0")]
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+#[rustc_diagnostic_item = "ptr_write_unaligned"]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
// SAFETY: the caller must guarantee that `dst` is valid for writes.
@@ -1607,6 +1612,7 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T {
/// ```
#[inline]
#[stable(feature = "volatile", since = "1.9.0")]
+#[rustc_diagnostic_item = "ptr_write_volatile"]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
// SAFETY: the caller must uphold the safety contract for `volatile_store`.
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 9dbb3f9d3..109c28692 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -109,7 +109,7 @@ impl<T: ?Sized> *mut T {
/// with [`cast_mut`] on `*const T` and may have documentation value if used instead of implicit
/// coercion.
///
- /// [`cast_mut`]: #method.cast_mut
+ /// [`cast_mut`]: pointer::cast_mut
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_diagnostic_item = "ptr_cast_const"]
@@ -121,7 +121,7 @@ impl<T: ?Sized> *mut T {
/// Casts a pointer to its raw bits.
///
/// This is equivalent to `as usize`, but is more specific to enhance readability.
- /// The inverse method is [`from_bits`](#method.from_bits-1).
+ /// The inverse method is [`from_bits`](pointer#method.from_bits-1).
///
/// In particular, `*p as usize` and `p as usize` will both compile for
/// pointers to numeric types but do very different things, so using this
@@ -157,7 +157,7 @@ impl<T: ?Sized> *mut T {
/// Creates a pointer from its raw bits.
///
/// This is equivalent to `as *mut T`, but is more specific to enhance readability.
- /// The inverse method is [`to_bits`](#method.to_bits-1).
+ /// The inverse method is [`to_bits`](pointer#method.to_bits-1).
///
/// # Examples
///
@@ -307,7 +307,7 @@ impl<T: ?Sized> *mut T {
///
/// For the mutable counterpart see [`as_mut`].
///
- /// [`as_uninit_ref`]: #method.as_uninit_ref-1
+ /// [`as_uninit_ref`]: pointer#method.as_uninit_ref-1
/// [`as_mut`]: #method.as_mut
///
/// # Safety
@@ -373,7 +373,7 @@ impl<T: ?Sized> *mut T {
///
/// For the mutable counterpart see [`as_uninit_mut`].
///
- /// [`as_ref`]: #method.as_ref-1
+ /// [`as_ref`]: pointer#method.as_ref-1
/// [`as_uninit_mut`]: #method.as_uninit_mut
///
/// # Safety
@@ -628,7 +628,7 @@ impl<T: ?Sized> *mut T {
/// For the shared counterpart see [`as_ref`].
///
/// [`as_uninit_mut`]: #method.as_uninit_mut
- /// [`as_ref`]: #method.as_ref-1
+ /// [`as_ref`]: pointer#method.as_ref-1
///
/// # Safety
///
@@ -693,7 +693,7 @@ impl<T: ?Sized> *mut T {
/// For the shared counterpart see [`as_uninit_ref`].
///
/// [`as_mut`]: #method.as_mut
- /// [`as_uninit_ref`]: #method.as_uninit_ref-1
+ /// [`as_uninit_ref`]: pointer#method.as_uninit_ref-1
///
/// # Safety
///
@@ -781,16 +781,25 @@ impl<T: ?Sized> *mut T {
/// Calculates the distance between two pointers. The returned value is in
/// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
///
- /// This function is the inverse of [`offset`].
+ /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
+ /// except that it has a lot more opportunities for UB, in exchange for the compiler
+ /// better understanding what you are doing.
///
- /// [`offset`]: #method.offset-1
+ /// The primary motivation of this method is for computing the `len` of an array/slice
+ /// of `T` that you are currently representing as a "start" and "end" pointer
+ /// (and "end" is "one past the end" of the array).
+ /// In that case, `end.offset_from(start)` gets you the length of the array.
+ ///
+ /// All of the following safety requirements are trivially satisfied for this usecase.
+ ///
+ /// [`offset`]: pointer#method.offset-1
///
/// # Safety
///
/// If any of the following conditions are violated, the result is Undefined
/// Behavior:
///
- /// * Both the starting and other pointer must be either in bounds or one
+ /// * Both `self` and `origin` must be either in bounds or one
/// byte past the end of the same [allocated object].
///
/// * Both pointers must be *derived from* a pointer to the same object.
@@ -820,6 +829,14 @@ impl<T: ?Sized> *mut T {
/// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
/// such large allocations either.)
///
+ /// The requirement for pointers to be derived from the same allocated object is primarily
+ /// needed for `const`-compatibility: the distance between pointers into *different* allocated
+ /// objects is not known at compile-time. However, the requirement also exists at
+ /// runtime and may be exploited by optimizations. If you wish to compute the difference between
+ /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
+ /// origin as isize) / mem::size_of::<T>()`.
+ // FIXME: recommend `addr()` instead of `as usize` once that is stable.
+ ///
/// [`add`]: #method.add
/// [allocated object]: crate::ptr#allocated-object
///
@@ -875,7 +892,7 @@ impl<T: ?Sized> *mut T {
/// units of **bytes**.
///
/// This is purely a convenience for casting to a `u8` pointer and
- /// using [offset_from][pointer::offset_from] on it. See that method for
+ /// using [`offset_from`][pointer::offset_from] on it. See that method for
/// documentation and safety requirements.
///
/// For non-`Sized` pointees this operation considers only the data pointers,
@@ -2064,7 +2081,7 @@ impl<T> *mut [T] {
///
/// For the mutable counterpart see [`as_uninit_slice_mut`].
///
- /// [`as_ref`]: #method.as_ref-1
+ /// [`as_ref`]: pointer#method.as_ref-1
/// [`as_uninit_slice_mut`]: #method.as_uninit_slice_mut
///
/// # Safety
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index e0fd347a0..d5bd54fd5 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -43,9 +43,27 @@ use crate::slice::{self, SliceIndex};
/// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr`
/// is never used for mutation.
///
+/// # Representation
+///
+/// Thanks to the [null pointer optimization],
+/// `NonNull<T>` and `Option<NonNull<T>>`
+/// are guaranteed to have the same size and alignment:
+///
+/// ```
+/// # use std::mem::{size_of, align_of};
+/// use std::ptr::NonNull;
+///
+/// assert_eq!(size_of::<NonNull<i16>>(), size_of::<Option<NonNull<i16>>>());
+/// assert_eq!(align_of::<NonNull<i16>>(), align_of::<Option<NonNull<i16>>>());
+///
+/// assert_eq!(size_of::<NonNull<str>>(), size_of::<Option<NonNull<str>>>());
+/// assert_eq!(align_of::<NonNull<str>>(), align_of::<Option<NonNull<str>>>());
+/// ```
+///
/// [covariant]: https://doc.rust-lang.org/reference/subtyping.html
/// [`PhantomData`]: crate::marker::PhantomData
/// [`UnsafeCell<T>`]: crate::cell::UnsafeCell
+/// [null pointer optimization]: crate::option#representation
#[stable(feature = "nonnull", since = "1.25.0")]
#[repr(transparent)]
#[rustc_layout_scalar_valid_range_start(1)]
@@ -320,6 +338,7 @@ impl<T: ?Sized> NonNull<T> {
/// ```
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_stable(feature = "const_nonnull_as_ptr", since = "1.32.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
#[must_use]
#[inline(always)]
pub const fn as_ptr(self) -> *mut T {
@@ -579,6 +598,7 @@ impl<T> NonNull<[T]> {
#[must_use]
#[unstable(feature = "slice_ptr_get", issue = "74265")]
#[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn as_mut_ptr(self) -> *mut T {
self.as_non_null_ptr().as_ptr()
}
diff --git a/library/core/src/slice/ascii.rs b/library/core/src/slice/ascii.rs
index f3311f76a..4cfccd2e3 100644
--- a/library/core/src/slice/ascii.rs
+++ b/library/core/src/slice/ascii.rs
@@ -10,7 +10,7 @@ use crate::ops;
impl [u8] {
/// Checks if all bytes in this slice are within the ASCII range.
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
- #[rustc_const_unstable(feature = "const_slice_is_ascii", issue = "111090")]
+ #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
#[must_use]
#[inline]
pub const fn is_ascii(&self) -> bool {
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index d95662afd..a19fcf93c 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -730,6 +730,7 @@ impl<T> [T] {
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
#[inline(always)]
#[must_use]
pub const fn as_ptr(&self) -> *const T {
@@ -760,6 +761,7 @@ impl<T> [T] {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[rustc_allow_const_fn_unstable(const_mut_refs)]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
#[inline(always)]
#[must_use]
pub const fn as_mut_ptr(&mut self) -> *mut T {
@@ -3408,7 +3410,7 @@ impl<T> [T] {
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
- /// Rotate a subslice:
+ /// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index e5f34952c..dfa2d4fd5 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -386,6 +386,7 @@ impl str {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "rustc_str_as_ptr", since = "1.32.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
#[must_use]
#[inline(always)]
pub const fn as_ptr(&self) -> *const u8 {
@@ -401,6 +402,7 @@ impl str {
/// It is your responsibility to make sure that the string slice only gets
/// modified in a way that it remains valid UTF-8.
#[stable(feature = "str_as_mut_ptr", since = "1.36.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
#[must_use]
#[inline(always)]
pub fn as_mut_ptr(&mut self) -> *mut u8 {
@@ -997,7 +999,7 @@ impl str {
/// An iterator over the lines of a string.
#[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(since = "1.4.0", note = "use lines() instead now")]
+ #[deprecated(since = "1.4.0", note = "use lines() instead now", suggestion = "lines")]
#[inline]
#[allow(deprecated)]
pub fn lines_any(&self) -> LinesAny<'_> {
@@ -2322,7 +2324,7 @@ impl str {
/// assert!(!non_ascii.is_ascii());
/// ```
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
- #[rustc_const_unstable(feature = "const_slice_is_ascii", issue = "111090")]
+ #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
#[must_use]
#[inline]
pub const fn is_ascii(&self) -> bool {
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 22a1c0978..cf1fbe2d3 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -1018,6 +1018,7 @@ impl AtomicBool {
#[inline]
#[stable(feature = "atomic_as_ptr", since = "1.70.0")]
#[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn as_ptr(&self) -> *mut bool {
self.v.get().cast()
}
@@ -1953,6 +1954,7 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable(feature = "atomic_as_ptr", since = "1.70.0")]
#[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn as_ptr(&self) -> *mut *mut T {
self.p.get()
}
@@ -2891,6 +2893,7 @@ macro_rules! atomic_int {
#[inline]
#[stable(feature = "atomic_as_ptr", since = "1.70.0")]
#[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
+ #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs
index 3f3e19c55..ff538d55c 100644
--- a/library/core/src/sync/exclusive.rs
+++ b/library/core/src/sync/exclusive.rs
@@ -2,6 +2,8 @@
use core::fmt;
use core::future::Future;
+use core::marker::Tuple;
+use core::ops::{Generator, GeneratorState};
use core::pin::Pin;
use core::task::{Context, Poll};
@@ -168,10 +170,52 @@ impl<T> From<T> for Exclusive<T> {
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
-impl<T: Future + ?Sized> Future for Exclusive<T> {
+impl<F, Args> FnOnce<Args> for Exclusive<F>
+where
+ F: FnOnce<Args>,
+ Args: Tuple,
+{
+ type Output = F::Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
+ self.into_inner().call_once(args)
+ }
+}
+
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+impl<F, Args> FnMut<Args> for Exclusive<F>
+where
+ F: FnMut<Args>,
+ Args: Tuple,
+{
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
+ self.get_mut().call_mut(args)
+ }
+}
+
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+impl<T> Future for Exclusive<T>
+where
+ T: Future + ?Sized,
+{
type Output = T::Output;
+
#[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.get_pin_mut().poll(cx)
}
}
+
+#[unstable(feature = "generator_trait", issue = "43122")] // also #98407
+impl<R, G> Generator<R> for Exclusive<G>
+where
+ G: Generator<R> + ?Sized,
+{
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ #[inline]
+ fn resume(self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume(self.get_pin_mut(), arg)
+ }
+}
diff --git a/library/core/src/time.rs b/library/core/src/time.rs
index b08d5782a..1e8d28979 100644
--- a/library/core/src/time.rs
+++ b/library/core/src/time.rs
@@ -656,10 +656,10 @@ impl Duration {
#[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
pub const fn checked_div(self, rhs: u32) -> Option<Duration> {
if rhs != 0 {
- let secs = self.secs / (rhs as u64);
- let carry = self.secs - secs * (rhs as u64);
- let extra_nanos = carry * (NANOS_PER_SEC as u64) / (rhs as u64);
- let nanos = self.nanos.0 / rhs + (extra_nanos as u32);
+ let (secs, extra_secs) = (self.secs / (rhs as u64), self.secs % (rhs as u64));
+ let (mut nanos, extra_nanos) = (self.nanos.0 / rhs, self.nanos.0 % rhs);
+ nanos +=
+ ((extra_secs * (NANOS_PER_SEC as u64) + extra_nanos as u64) / (rhs as u64)) as u32;
debug_assert!(nanos < NANOS_PER_SEC);
Some(Duration::new(secs, nanos))
} else {
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index 7782ace69..ff292ff2d 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -1,4 +1,4 @@
-// See src/libstd/primitive_docs.rs for documentation.
+// See core/src/primitive_docs.rs for documentation.
use crate::cmp::Ordering::{self, *};
use crate::marker::ConstParamTy;
diff --git a/library/core/tests/iter/range.rs b/library/core/tests/iter/range.rs
index 0a77ecddb..5b87d6c1f 100644
--- a/library/core/tests/iter/range.rs
+++ b/library/core/tests/iter/range.rs
@@ -1,5 +1,6 @@
-use core::num::NonZeroUsize;
use super::*;
+use core::ascii::Char as AsciiChar;
+use core::num::NonZeroUsize;
#[test]
fn test_range() {
@@ -40,6 +41,21 @@ fn test_char_range() {
}
#[test]
+fn test_ascii_char_range() {
+ let from = AsciiChar::Null;
+ let to = AsciiChar::Delete;
+ assert!((from..=to).eq((from as u8..=to as u8).filter_map(AsciiChar::from_u8)));
+ assert!((from..=to).rev().eq((from as u8..=to as u8).filter_map(AsciiChar::from_u8).rev()));
+
+ assert_eq!((AsciiChar::CapitalA..=AsciiChar::CapitalZ).count(), 26);
+ assert_eq!((AsciiChar::CapitalA..=AsciiChar::CapitalZ).size_hint(), (26, Some(26)));
+ assert_eq!((AsciiChar::SmallA..=AsciiChar::SmallZ).count(), 26);
+ assert_eq!((AsciiChar::SmallA..=AsciiChar::SmallZ).size_hint(), (26, Some(26)));
+ assert_eq!((AsciiChar::Digit0..=AsciiChar::Digit9).count(), 10);
+ assert_eq!((AsciiChar::Digit0..=AsciiChar::Digit9).size_hint(), (10, Some(10)));
+}
+
+#[test]
fn test_range_exhaustion() {
let mut r = 10..10;
assert!(r.is_empty());
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 7a6def37a..e4003a208 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -2,6 +2,8 @@
#![feature(array_chunks)]
#![feature(array_methods)]
#![feature(array_windows)]
+#![feature(ascii_char)]
+#![feature(ascii_char_variants)]
#![feature(bigint_helper_methods)]
#![feature(cell_update)]
#![feature(const_align_offset)]
@@ -54,6 +56,7 @@
#![feature(min_specialization)]
#![feature(numfmt)]
#![feature(num_midpoint)]
+#![feature(isqrt)]
#![feature(step_trait)]
#![feature(str_internals)]
#![feature(std_internals)]
@@ -94,6 +97,7 @@
#![feature(const_option_ext)]
#![feature(const_result)]
#![cfg_attr(target_has_atomic = "128", feature(integer_atomics))]
+#![cfg_attr(test, feature(cfg_match))]
#![feature(int_roundings)]
#![feature(slice_group_by)]
#![feature(split_array)]
@@ -137,6 +141,7 @@ mod hash;
mod intrinsics;
mod iter;
mod lazy;
+#[cfg(test)]
mod macros;
mod manually_drop;
mod mem;
diff --git a/library/core/tests/macros.rs b/library/core/tests/macros.rs
index ff3632e35..eb886def1 100644
--- a/library/core/tests/macros.rs
+++ b/library/core/tests/macros.rs
@@ -1,3 +1,25 @@
+trait Trait {
+ fn blah(&self);
+}
+
+#[allow(dead_code)]
+struct Struct;
+
+impl Trait for Struct {
+ cfg_match! {
+ cfg(feature = "blah") => {
+ fn blah(&self) {
+ unimplemented!();
+ }
+ }
+ _ => {
+ fn blah(&self) {
+ unimplemented!();
+ }
+ }
+ }
+}
+
#[test]
fn assert_eq_trailing_comma() {
assert_eq!(1, 1,);
@@ -18,3 +40,135 @@ fn assert_ne_trailing_comma() {
fn matches_leading_pipe() {
matches!(1, | 1 | 2 | 3);
}
+
+#[test]
+fn cfg_match_basic() {
+ cfg_match! {
+ cfg(target_pointer_width = "64") => { fn f0_() -> bool { true }}
+ }
+
+ cfg_match! {
+ cfg(unix) => { fn f1_() -> bool { true }}
+ cfg(any(target_os = "macos", target_os = "linux")) => { fn f1_() -> bool { false }}
+ }
+
+ cfg_match! {
+ cfg(target_pointer_width = "32") => { fn f2_() -> bool { false }}
+ cfg(target_pointer_width = "64") => { fn f2_() -> bool { true }}
+ }
+
+ cfg_match! {
+ cfg(target_pointer_width = "16") => { fn f3_() -> i32 { 1 }}
+ _ => { fn f3_() -> i32 { 2 }}
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ assert!(f0_());
+
+ #[cfg(unix)]
+ assert!(f1_());
+
+ #[cfg(target_pointer_width = "32")]
+ assert!(!f2_());
+ #[cfg(target_pointer_width = "64")]
+ assert!(f2_());
+
+ #[cfg(not(target_pointer_width = "16"))]
+ assert_eq!(f3_(), 2);
+}
+
+#[test]
+fn cfg_match_debug_assertions() {
+ cfg_match! {
+ cfg(debug_assertions) => {
+ assert!(cfg!(debug_assertions));
+ assert_eq!(4, 2+2);
+ }
+ _ => {
+ assert!(cfg!(not(debug_assertions)));
+ assert_eq!(10, 5+5);
+ }
+ }
+}
+
+#[cfg(target_pointer_width = "64")]
+#[test]
+fn cfg_match_no_duplication_on_64() {
+ cfg_match! {
+ cfg(windows) => {
+ fn foo() {}
+ }
+ cfg(unix) => {
+ fn foo() {}
+ }
+ cfg(target_pointer_width = "64") => {
+ fn foo() {}
+ }
+ }
+ foo();
+}
+
+#[test]
+fn cfg_match_options() {
+ cfg_match! {
+ cfg(test) => {
+ use core::option::Option as Option2;
+ fn works1() -> Option2<u32> { Some(1) }
+ }
+ _ => { fn works1() -> Option<u32> { None } }
+ }
+
+ cfg_match! {
+ cfg(feature = "foo") => { fn works2() -> bool { false } }
+ cfg(test) => { fn works2() -> bool { true } }
+ _ => { fn works2() -> bool { false } }
+ }
+
+ cfg_match! {
+ cfg(feature = "foo") => { fn works3() -> bool { false } }
+ _ => { fn works3() -> bool { true } }
+ }
+
+ cfg_match! {
+ cfg(test) => {
+ use core::option::Option as Option3;
+ fn works4() -> Option3<u32> { Some(1) }
+ }
+ }
+
+ cfg_match! {
+ cfg(feature = "foo") => { fn works5() -> bool { false } }
+ cfg(test) => { fn works5() -> bool { true } }
+ }
+
+ assert!(works1().is_some());
+ assert!(works2());
+ assert!(works3());
+ assert!(works4().is_some());
+ assert!(works5());
+}
+
+#[test]
+fn cfg_match_two_functions() {
+ cfg_match! {
+ cfg(target_pointer_width = "64") => {
+ fn foo1() {}
+ fn bar1() {}
+ }
+ _ => {
+ fn foo2() {}
+ fn bar2() {}
+ }
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ foo1();
+ bar1();
+ }
+ #[cfg(not(target_pointer_width = "64"))]
+ {
+ foo2();
+ bar2();
+ }
+}
diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs
index 439bbe669..165d9a296 100644
--- a/library/core/tests/num/int_macros.rs
+++ b/library/core/tests/num/int_macros.rs
@@ -291,6 +291,38 @@ macro_rules! int_module {
}
#[test]
+ fn test_isqrt() {
+ assert_eq!($T::MIN.checked_isqrt(), None);
+ assert_eq!((-1 as $T).checked_isqrt(), None);
+ assert_eq!((0 as $T).isqrt(), 0 as $T);
+ assert_eq!((1 as $T).isqrt(), 1 as $T);
+ assert_eq!((2 as $T).isqrt(), 1 as $T);
+ assert_eq!((99 as $T).isqrt(), 9 as $T);
+ assert_eq!((100 as $T).isqrt(), 10 as $T);
+ }
+
+ #[cfg(not(miri))] // Miri is too slow
+ #[test]
+ fn test_lots_of_isqrt() {
+ let n_max: $T = (1024 * 1024).min($T::MAX as u128) as $T;
+ for n in 0..=n_max {
+ let isqrt: $T = n.isqrt();
+
+ assert!(isqrt.pow(2) <= n);
+ let (square, overflow) = (isqrt + 1).overflowing_pow(2);
+ assert!(overflow || square > n);
+ }
+
+ for n in ($T::MAX - 127)..=$T::MAX {
+ let isqrt: $T = n.isqrt();
+
+ assert!(isqrt.pow(2) <= n);
+ let (square, overflow) = (isqrt + 1).overflowing_pow(2);
+ assert!(overflow || square > n);
+ }
+ }
+
+ #[test]
fn test_div_floor() {
let a: $T = 8;
let b = 3;
diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs
index 7d6203db0..955440647 100644
--- a/library/core/tests/num/uint_macros.rs
+++ b/library/core/tests/num/uint_macros.rs
@@ -207,6 +207,35 @@ macro_rules! uint_module {
}
#[test]
+ fn test_isqrt() {
+ assert_eq!((0 as $T).isqrt(), 0 as $T);
+ assert_eq!((1 as $T).isqrt(), 1 as $T);
+ assert_eq!((2 as $T).isqrt(), 1 as $T);
+ assert_eq!((99 as $T).isqrt(), 9 as $T);
+ assert_eq!((100 as $T).isqrt(), 10 as $T);
+ assert_eq!($T::MAX.isqrt(), (1 << ($T::BITS / 2)) - 1);
+ }
+
+ #[cfg(not(miri))] // Miri is too slow
+ #[test]
+ fn test_lots_of_isqrt() {
+ let n_max: $T = (1024 * 1024).min($T::MAX as u128) as $T;
+ for n in 0..=n_max {
+ let isqrt: $T = n.isqrt();
+
+ assert!(isqrt.pow(2) <= n);
+ assert!(isqrt + 1 == (1 as $T) << ($T::BITS / 2) || (isqrt + 1).pow(2) > n);
+ }
+
+ for n in ($T::MAX - 255)..=$T::MAX {
+ let isqrt: $T = n.isqrt();
+
+ assert!(isqrt.pow(2) <= n);
+ assert!(isqrt + 1 == (1 as $T) << ($T::BITS / 2) || (isqrt + 1).pow(2) > n);
+ }
+ }
+
+ #[test]
fn test_div_floor() {
assert_eq!((8 as $T).div_floor(3), 2);
}
diff --git a/library/core/tests/time.rs b/library/core/tests/time.rs
index 872611937..bd6e63edb 100644
--- a/library/core/tests/time.rs
+++ b/library/core/tests/time.rs
@@ -170,6 +170,7 @@ fn saturating_mul() {
fn div() {
assert_eq!(Duration::new(0, 1) / 2, Duration::new(0, 0));
assert_eq!(Duration::new(1, 1) / 3, Duration::new(0, 333_333_333));
+ assert_eq!(Duration::new(1, 1) / 7, Duration::new(0, 142_857_143));
assert_eq!(Duration::new(99, 999_999_000) / 100, Duration::new(0, 999_999_990));
}
diff --git a/library/panic_abort/src/android.rs b/library/panic_abort/src/android.rs
index 20b5b6b51..47c228345 100644
--- a/library/panic_abort/src/android.rs
+++ b/library/panic_abort/src/android.rs
@@ -1,6 +1,6 @@
use alloc::string::String;
use core::mem::transmute;
-use core::panic::BoxMeUp;
+use core::panic::PanicPayload;
use core::ptr::copy_nonoverlapping;
const ANDROID_SET_ABORT_MESSAGE: &[u8] = b"android_set_abort_message\0";
@@ -15,7 +15,7 @@ type SetAbortMessageType = unsafe extern "C" fn(*const libc::c_char) -> ();
//
// Weakly resolve the symbol for android_set_abort_message. This function is only available
// for API >= 21.
-pub(crate) unsafe fn android_set_abort_message(payload: &mut dyn BoxMeUp) {
+pub(crate) unsafe fn android_set_abort_message(payload: &mut dyn PanicPayload) {
let func_addr =
libc::dlsym(libc::RTLD_DEFAULT, ANDROID_SET_ABORT_MESSAGE.as_ptr() as *const libc::c_char)
as usize;
diff --git a/library/panic_abort/src/lib.rs b/library/panic_abort/src/lib.rs
index 76b359196..6e097e2ca 100644
--- a/library/panic_abort/src/lib.rs
+++ b/library/panic_abort/src/lib.rs
@@ -14,13 +14,13 @@
#![feature(staged_api)]
#![feature(rustc_attrs)]
#![feature(c_unwind)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[cfg(target_os = "android")]
mod android;
use core::any::Any;
-use core::panic::BoxMeUp;
+use core::panic::PanicPayload;
#[rustc_std_internal_symbol]
#[allow(improper_ctypes_definitions)]
@@ -30,7 +30,7 @@ pub unsafe extern "C" fn __rust_panic_cleanup(_: *mut u8) -> *mut (dyn Any + Sen
// "Leak" the payload and shim to the relevant abort on the platform in question.
#[rustc_std_internal_symbol]
-pub unsafe fn __rust_start_panic(_payload: &mut dyn BoxMeUp) -> u32 {
+pub unsafe fn __rust_start_panic(_payload: &mut dyn PanicPayload) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
android::android_set_abort_message(_payload);
@@ -43,7 +43,9 @@ pub unsafe fn __rust_start_panic(_payload: &mut dyn BoxMeUp) -> u32 {
libc::abort();
}
} else if #[cfg(any(target_os = "hermit",
- all(target_vendor = "fortanix", target_env = "sgx")
+ all(target_vendor = "fortanix", target_env = "sgx"),
+ target_os = "xous",
+ target_os = "uefi",
))] {
unsafe fn abort() -> ! {
// call std::sys::abort_internal
diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs
index 009014de5..9363fde5d 100644
--- a/library/panic_unwind/src/lib.rs
+++ b/library/panic_unwind/src/lib.rs
@@ -19,18 +19,17 @@
#![feature(panic_unwind)]
#![feature(staged_api)]
#![feature(std_internals)]
-#![cfg_attr(bootstrap, feature(abi_thiscall))]
#![feature(rustc_attrs)]
#![panic_runtime]
#![feature(panic_runtime)]
#![feature(c_unwind)]
// `real_imp` is unused with Miri, so silence warnings.
#![cfg_attr(miri, allow(dead_code))]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
use alloc::boxed::Box;
use core::any::Any;
-use core::panic::BoxMeUp;
+use core::panic::PanicPayload;
cfg_if::cfg_if! {
if #[cfg(target_os = "emscripten")] {
@@ -100,7 +99,7 @@ pub unsafe extern "C" fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any
// Entry point for raising an exception, just delegates to the platform-specific
// implementation.
#[rustc_std_internal_symbol]
-pub unsafe fn __rust_start_panic(payload: &mut dyn BoxMeUp) -> u32 {
+pub unsafe fn __rust_start_panic(payload: &mut dyn PanicPayload) -> u32 {
let payload = Box::from_raw(payload.take_box());
imp::panic(payload)
diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs
index 83d637b68..0a70c488a 100644
--- a/library/proc_macro/src/lib.rs
+++ b/library/proc_macro/src/lib.rs
@@ -32,7 +32,7 @@
#![feature(min_specialization)]
#![feature(strict_provenance)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[unstable(feature = "proc_macro_internals", issue = "27812")]
#[doc(hidden)]
@@ -1337,6 +1337,13 @@ impl Literal {
Literal::new(bridge::LitKind::Char, symbol, None)
}
+ /// Byte character literal.
+ #[unstable(feature = "proc_macro_byte_character", issue = "115268")]
+ pub fn byte_character(byte: u8) -> Literal {
+ let string = [byte].escape_ascii().to_string();
+ Literal::new(bridge::LitKind::Byte, &string, None)
+ }
+
/// Byte string literal.
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
pub fn byte_string(bytes: &[u8]) -> Literal {
@@ -1411,7 +1418,15 @@ impl Literal {
let hashes = get_hashes_str(n);
f(&["br", hashes, "\"", symbol, "\"", hashes, suffix])
}
- _ => f(&[symbol, suffix]),
+ bridge::LitKind::CStr => f(&["c\"", symbol, "\"", suffix]),
+ bridge::LitKind::CStrRaw(n) => {
+ let hashes = get_hashes_str(n);
+ f(&["cr", hashes, "\"", symbol, "\"", hashes, suffix])
+ }
+
+ bridge::LitKind::Integer | bridge::LitKind::Float | bridge::LitKind::Err => {
+ f(&[symbol, suffix])
+ }
})
}
}
diff --git a/library/profiler_builtins/src/lib.rs b/library/profiler_builtins/src/lib.rs
index a81d0a635..ac685b18c 100644
--- a/library/profiler_builtins/src/lib.rs
+++ b/library/profiler_builtins/src/lib.rs
@@ -7,5 +7,5 @@
issue = "none"
)]
#![allow(unused_features)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#![feature(staged_api)]
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index 33c9c6e63..965132bde 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -17,7 +17,7 @@ cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
panic_unwind = { path = "../panic_unwind", optional = true }
panic_abort = { path = "../panic_abort" }
core = { path = "../core", public = true }
-libc = { version = "0.2.146", default-features = false, features = ['rustc-dep-of-std'], public = true }
+libc = { version = "0.2.148", default-features = false, features = ['rustc-dep-of-std'], public = true }
compiler_builtins = { version = "0.1.100" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
@@ -36,8 +36,8 @@ object = { version = "0.32.0", default-features = false, optional = true, featur
rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
rand_xorshift = "0.3.0"
-[target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies]
-dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] }
+[target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), target_os = "xous", all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies]
+dlmalloc = { version = "0.2.4", features = ['rustc-dep-of-std'] }
[target.x86_64-fortanix-unknown-sgx.dependencies]
fortanix-sgx-abi = { version = "0.5.0", features = ['rustc-dep-of-std'], public = true }
@@ -48,6 +48,10 @@ hermit-abi = { version = "0.3.2", features = ['rustc-dep-of-std'], public = true
[target.'cfg(target_os = "wasi")'.dependencies]
wasi = { version = "0.11.0", features = ['rustc-dep-of-std'], default-features = false }
+[target.'cfg(target_os = "uefi")'.dependencies]
+r-efi = { version = "4.2.0", features = ['rustc-dep-of-std']}
+r-efi-alloc = { version = "1.0.0", features = ['rustc-dep-of-std']}
+
[features]
backtrace = [
"gimli-symbolize",
diff --git a/library/std/build.rs b/library/std/build.rs
index ddf6e84d8..36516978b 100644
--- a/library/std/build.rs
+++ b/library/std/build.rs
@@ -37,6 +37,9 @@ fn main() {
|| target.contains("nintendo-3ds")
|| target.contains("vita")
|| target.contains("nto")
+ || target.contains("xous")
+ || target.contains("hurd")
+ || target.contains("uefi")
// See src/bootstrap/synthetic_targets.rs
|| env::var("RUSTC_BOOTSTRAP_SYNTHETIC_TARGET").is_ok()
{
@@ -49,7 +52,6 @@ fn main() {
// - mipsel-sony-psp
// - nvptx64-nvidia-cuda
// - arch=avr
- // - uefi (x86_64-unknown-uefi, i686-unknown-uefi)
// - JSON targets
// - Any new targets that have not been explicitly added above.
println!("cargo:rustc-cfg=feature=\"restricted-std\"");
diff --git a/library/std/primitive_docs/box_into_raw.md b/library/std/primitive_docs/box_into_raw.md
deleted file mode 100644
index 307b9c85b..000000000
--- a/library/std/primitive_docs/box_into_raw.md
+++ /dev/null
@@ -1 +0,0 @@
-Box::into_raw
diff --git a/library/std/primitive_docs/fs_file.md b/library/std/primitive_docs/fs_file.md
deleted file mode 100644
index 13e454083..000000000
--- a/library/std/primitive_docs/fs_file.md
+++ /dev/null
@@ -1 +0,0 @@
-fs::File
diff --git a/library/std/primitive_docs/io_bufread.md b/library/std/primitive_docs/io_bufread.md
deleted file mode 100644
index bb688e3a5..000000000
--- a/library/std/primitive_docs/io_bufread.md
+++ /dev/null
@@ -1 +0,0 @@
-io::BufRead
diff --git a/library/std/primitive_docs/io_read.md b/library/std/primitive_docs/io_read.md
deleted file mode 100644
index 5118d7c48..000000000
--- a/library/std/primitive_docs/io_read.md
+++ /dev/null
@@ -1 +0,0 @@
-io::Read
diff --git a/library/std/primitive_docs/io_seek.md b/library/std/primitive_docs/io_seek.md
deleted file mode 100644
index 122e6df77..000000000
--- a/library/std/primitive_docs/io_seek.md
+++ /dev/null
@@ -1 +0,0 @@
-io::Seek
diff --git a/library/std/primitive_docs/io_write.md b/library/std/primitive_docs/io_write.md
deleted file mode 100644
index 15dfc907a..000000000
--- a/library/std/primitive_docs/io_write.md
+++ /dev/null
@@ -1 +0,0 @@
-io::Write
diff --git a/library/std/primitive_docs/net_tosocketaddrs.md b/library/std/primitive_docs/net_tosocketaddrs.md
deleted file mode 100644
index a01f318e8..000000000
--- a/library/std/primitive_docs/net_tosocketaddrs.md
+++ /dev/null
@@ -1 +0,0 @@
-net::ToSocketAddrs
diff --git a/library/std/primitive_docs/process_exit.md b/library/std/primitive_docs/process_exit.md
deleted file mode 100644
index 565a71375..000000000
--- a/library/std/primitive_docs/process_exit.md
+++ /dev/null
@@ -1 +0,0 @@
-process::exit
diff --git a/library/std/primitive_docs/string_string.md b/library/std/primitive_docs/string_string.md
deleted file mode 100644
index ce7815ff9..000000000
--- a/library/std/primitive_docs/string_string.md
+++ /dev/null
@@ -1 +0,0 @@
-string::String
diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs
index 1eae7fa6a..bb786bd59 100644
--- a/library/std/src/alloc.rs
+++ b/library/std/src/alloc.rs
@@ -290,15 +290,29 @@ static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
/// Registers a custom allocation error hook, replacing any that was previously registered.
///
-/// The allocation error hook is invoked when an infallible memory allocation fails, before
-/// the runtime aborts. The default hook prints a message to standard error,
-/// but this behavior can be customized with the [`set_alloc_error_hook`] and
-/// [`take_alloc_error_hook`] functions.
+/// The allocation error hook is invoked when an infallible memory allocation fails — that is,
+/// as a consequence of calling [`handle_alloc_error`] — before the runtime aborts.
///
-/// The hook is provided with a `Layout` struct which contains information
+/// The allocation error hook is a global resource. [`take_alloc_error_hook`] may be used to
+/// retrieve a previously registered hook and wrap or discard it.
+///
+/// # What the provided `hook` function should expect
+///
+/// The hook function is provided with a [`Layout`] struct which contains information
/// about the allocation that failed.
///
-/// The allocation error hook is a global resource.
+/// The hook function may choose to panic or abort; in the event that it returns normally, this
+/// will cause an immediate abort.
+///
+/// Since [`take_alloc_error_hook`] is a safe function that allows retrieving the hook, the hook
+/// function must be _sound_ to call even if no memory allocations were attempted.
+///
+/// # The default hook
+///
+/// The default hook, used if [`set_alloc_error_hook`] is never called, prints a message to
+/// standard error (and then returns, causing the runtime to abort the process).
+/// Compiler options may cause it to panic instead, and the default behavior may be changed
+/// to panicking in future versions of Rust.
///
/// # Examples
///
diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs
index a659b552f..c35061757 100644
--- a/library/std/src/f32.rs
+++ b/library/std/src/f32.rs
@@ -822,6 +822,7 @@ impl f32 {
///
/// assert!(abs_difference < 1e-10);
/// ```
+ #[doc(alias = "log1p")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -988,7 +989,9 @@ impl f32 {
unsafe { cmath::tgammaf(self) }
}
- /// Returns the natural logarithm of the gamma function.
+ /// Natural logarithm of the absolute value of the gamma function
+ ///
+ /// The integer part of the tuple indicates the sign of the gamma function.
///
/// # Examples
///
diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs
index 721e1fb75..e4b7bfeeb 100644
--- a/library/std/src/f64.rs
+++ b/library/std/src/f64.rs
@@ -822,6 +822,7 @@ impl f64 {
///
/// assert!(abs_difference < 1e-20);
/// ```
+ #[doc(alias = "log1p")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -988,7 +989,9 @@ impl f64 {
unsafe { cmath::tgamma(self) }
}
- /// Returns the natural logarithm of the gamma function.
+ /// Natural logarithm of the absolute value of the gamma function
+ ///
+ /// The integer part of the tuple indicates the sign of the gamma function.
///
/// # Examples
///
diff --git a/library/std/src/ffi/mod.rs b/library/std/src/ffi/mod.rs
index ee9f6ed08..97e78d177 100644
--- a/library/std/src/ffi/mod.rs
+++ b/library/std/src/ffi/mod.rs
@@ -132,8 +132,8 @@
//! On all platforms, [`OsStr`] consists of a sequence of bytes that is encoded as a superset of
//! UTF-8; see [`OsString`] for more details on its encoding on different platforms.
//!
-//! For limited, inexpensive conversions from and to bytes, see [`OsStr::as_os_str_bytes`] and
-//! [`OsStr::from_os_str_bytes_unchecked`].
+//! For limited, inexpensive conversions from and to bytes, see [`OsStr::as_encoded_bytes`] and
+//! [`OsStr::from_encoded_bytes_unchecked`].
//!
//! [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
//! [Unicode code point]: https://www.unicode.org/glossary/#code_point
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
index 43cecb19b..fa9d48771 100644
--- a/library/std/src/ffi/os_str.rs
+++ b/library/std/src/ffi/os_str.rs
@@ -154,36 +154,34 @@ impl OsString {
/// # Safety
///
/// As the encoding is unspecified, callers must pass in bytes that originated as a mixture of
- /// validated UTF-8 and bytes from [`OsStr::as_os_str_bytes`] from within the same rust version
+ /// validated UTF-8 and bytes from [`OsStr::as_encoded_bytes`] from within the same rust version
/// built for the same target platform. For example, reconstructing an `OsString` from bytes sent
/// over the network or stored in a file will likely violate these safety rules.
///
- /// Due to the encoding being self-synchronizing, the bytes from [`OsStr::as_os_str_bytes`] can be
+ /// Due to the encoding being self-synchronizing, the bytes from [`OsStr::as_encoded_bytes`] can be
/// split either immediately before or immediately after any valid non-empty UTF-8 substring.
///
/// # Example
///
/// ```
- /// #![feature(os_str_bytes)]
- ///
/// use std::ffi::OsStr;
///
/// let os_str = OsStr::new("Mary had a little lamb");
- /// let bytes = os_str.as_os_str_bytes();
+ /// let bytes = os_str.as_encoded_bytes();
/// let words = bytes.split(|b| *b == b' ');
/// let words: Vec<&OsStr> = words.map(|word| {
/// // SAFETY:
- /// // - Each `word` only contains content that originated from `OsStr::as_os_str_bytes`
+ /// // - Each `word` only contains content that originated from `OsStr::as_encoded_bytes`
/// // - Only split with ASCII whitespace which is a non-empty UTF-8 substring
- /// unsafe { OsStr::from_os_str_bytes_unchecked(word) }
+ /// unsafe { OsStr::from_encoded_bytes_unchecked(word) }
/// }).collect();
/// ```
///
/// [conversions]: super#conversions
#[inline]
- #[unstable(feature = "os_str_bytes", issue = "111544")]
- pub unsafe fn from_os_str_bytes_unchecked(bytes: Vec<u8>) -> Self {
- OsString { inner: Buf::from_os_str_bytes_unchecked(bytes) }
+ #[stable(feature = "os_str_bytes", since = "1.74.0")]
+ pub unsafe fn from_encoded_bytes_unchecked(bytes: Vec<u8>) -> Self {
+ OsString { inner: Buf::from_encoded_bytes_unchecked(bytes) }
}
/// Converts to an [`OsStr`] slice.
@@ -205,7 +203,7 @@ impl OsString {
}
/// Converts the `OsString` into a byte slice. To convert the byte slice back into an
- /// `OsString`, use the [`OsStr::from_os_str_bytes_unchecked`] function.
+ /// `OsString`, use the [`OsStr::from_encoded_bytes_unchecked`] function.
///
/// The byte encoding is an unspecified, platform-specific, self-synchronizing superset of UTF-8.
/// By being a self-synchronizing superset of UTF-8, this encoding is also a superset of 7-bit
@@ -219,9 +217,9 @@ impl OsString {
///
/// [`std::ffi`]: crate::ffi
#[inline]
- #[unstable(feature = "os_str_bytes", issue = "111544")]
- pub fn into_os_str_bytes(self) -> Vec<u8> {
- self.inner.into_os_str_bytes()
+ #[stable(feature = "os_str_bytes", since = "1.74.0")]
+ pub fn into_encoded_bytes(self) -> Vec<u8> {
+ self.inner.into_encoded_bytes()
}
/// Converts the `OsString` into a [`String`] if it contains valid Unicode data.
@@ -745,36 +743,34 @@ impl OsStr {
/// # Safety
///
/// As the encoding is unspecified, callers must pass in bytes that originated as a mixture of
- /// validated UTF-8 and bytes from [`OsStr::as_os_str_bytes`] from within the same rust version
+ /// validated UTF-8 and bytes from [`OsStr::as_encoded_bytes`] from within the same rust version
/// built for the same target platform. For example, reconstructing an `OsStr` from bytes sent
/// over the network or stored in a file will likely violate these safety rules.
///
- /// Due to the encoding being self-synchronizing, the bytes from [`OsStr::as_os_str_bytes`] can be
+ /// Due to the encoding being self-synchronizing, the bytes from [`OsStr::as_encoded_bytes`] can be
/// split either immediately before or immediately after any valid non-empty UTF-8 substring.
///
/// # Example
///
/// ```
- /// #![feature(os_str_bytes)]
- ///
/// use std::ffi::OsStr;
///
/// let os_str = OsStr::new("Mary had a little lamb");
- /// let bytes = os_str.as_os_str_bytes();
+ /// let bytes = os_str.as_encoded_bytes();
/// let words = bytes.split(|b| *b == b' ');
/// let words: Vec<&OsStr> = words.map(|word| {
/// // SAFETY:
- /// // - Each `word` only contains content that originated from `OsStr::as_os_str_bytes`
+ /// // - Each `word` only contains content that originated from `OsStr::as_encoded_bytes`
/// // - Only split with ASCII whitespace which is a non-empty UTF-8 substring
- /// unsafe { OsStr::from_os_str_bytes_unchecked(word) }
+ /// unsafe { OsStr::from_encoded_bytes_unchecked(word) }
/// }).collect();
/// ```
///
/// [conversions]: super#conversions
#[inline]
- #[unstable(feature = "os_str_bytes", issue = "111544")]
- pub unsafe fn from_os_str_bytes_unchecked(bytes: &[u8]) -> &Self {
- Self::from_inner(Slice::from_os_str_bytes_unchecked(bytes))
+ #[stable(feature = "os_str_bytes", since = "1.74.0")]
+ pub unsafe fn from_encoded_bytes_unchecked(bytes: &[u8]) -> &Self {
+ Self::from_inner(Slice::from_encoded_bytes_unchecked(bytes))
}
#[inline]
@@ -948,7 +944,7 @@ impl OsStr {
}
/// Converts an OS string slice to a byte slice. To convert the byte slice back into an OS
- /// string slice, use the [`OsStr::from_os_str_bytes_unchecked`] function.
+ /// string slice, use the [`OsStr::from_encoded_bytes_unchecked`] function.
///
/// The byte encoding is an unspecified, platform-specific, self-synchronizing superset of UTF-8.
/// By being a self-synchronizing superset of UTF-8, this encoding is also a superset of 7-bit
@@ -962,9 +958,9 @@ impl OsStr {
///
/// [`std::ffi`]: crate::ffi
#[inline]
- #[unstable(feature = "os_str_bytes", issue = "111544")]
- pub fn as_os_str_bytes(&self) -> &[u8] {
- self.inner.as_os_str_bytes()
+ #[stable(feature = "os_str_bytes", since = "1.74.0")]
+ pub fn as_encoded_bytes(&self) -> &[u8] {
+ self.inner.as_encoded_bytes()
}
/// Converts this string to its ASCII lower case equivalent in-place.
@@ -1270,7 +1266,7 @@ impl Default for &OsStr {
impl PartialEq for OsStr {
#[inline]
fn eq(&self, other: &OsStr) -> bool {
- self.as_os_str_bytes().eq(other.as_os_str_bytes())
+ self.as_encoded_bytes().eq(other.as_encoded_bytes())
}
}
@@ -1297,23 +1293,23 @@ impl Eq for OsStr {}
impl PartialOrd for OsStr {
#[inline]
fn partial_cmp(&self, other: &OsStr) -> Option<cmp::Ordering> {
- self.as_os_str_bytes().partial_cmp(other.as_os_str_bytes())
+ self.as_encoded_bytes().partial_cmp(other.as_encoded_bytes())
}
#[inline]
fn lt(&self, other: &OsStr) -> bool {
- self.as_os_str_bytes().lt(other.as_os_str_bytes())
+ self.as_encoded_bytes().lt(other.as_encoded_bytes())
}
#[inline]
fn le(&self, other: &OsStr) -> bool {
- self.as_os_str_bytes().le(other.as_os_str_bytes())
+ self.as_encoded_bytes().le(other.as_encoded_bytes())
}
#[inline]
fn gt(&self, other: &OsStr) -> bool {
- self.as_os_str_bytes().gt(other.as_os_str_bytes())
+ self.as_encoded_bytes().gt(other.as_encoded_bytes())
}
#[inline]
fn ge(&self, other: &OsStr) -> bool {
- self.as_os_str_bytes().ge(other.as_os_str_bytes())
+ self.as_encoded_bytes().ge(other.as_encoded_bytes())
}
}
@@ -1332,7 +1328,7 @@ impl PartialOrd<str> for OsStr {
impl Ord for OsStr {
#[inline]
fn cmp(&self, other: &OsStr) -> cmp::Ordering {
- self.as_os_str_bytes().cmp(other.as_os_str_bytes())
+ self.as_encoded_bytes().cmp(other.as_encoded_bytes())
}
}
@@ -1382,7 +1378,7 @@ impl_cmp!(Cow<'a, OsStr>, OsString);
impl Hash for OsStr {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
- self.as_os_str_bytes().hash(state)
+ self.as_encoded_bytes().hash(state)
}
}
diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs
index 4094e3780..73cce35ac 100644
--- a/library/std/src/fs.rs
+++ b/library/std/src/fs.rs
@@ -8,7 +8,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)]
-#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))]
+#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))]
mod tests;
use crate::ffi::OsString;
@@ -233,8 +233,8 @@ pub struct DirBuilder {
/// This function will return an error if `path` does not already exist.
/// Other errors may also be returned according to [`OpenOptions::open`].
///
-/// It will also return an error if it encounters while reading an error
-/// of a kind other than [`io::ErrorKind::Interrupted`].
+/// While reading from the file, this function handles [`io::ErrorKind::Interrupted`]
+/// with automatic retries. See [io::Read] documentation for details.
///
/// # Examples
///
@@ -271,9 +271,11 @@ pub fn read<P: AsRef<Path>>(path: P) -> io::Result<Vec<u8>> {
/// This function will return an error if `path` does not already exist.
/// Other errors may also be returned according to [`OpenOptions::open`].
///
-/// It will also return an error if it encounters while reading an error
-/// of a kind other than [`io::ErrorKind::Interrupted`],
-/// or if the contents of the file are not valid UTF-8.
+/// If the contents of the file are not valid UTF-8, then an error will also be
+/// returned.
+///
+/// While reading from the file, this function handles [`io::ErrorKind::Interrupted`]
+/// with automatic retries. See [io::Read] documentation for details.
///
/// # Examples
///
@@ -745,14 +747,17 @@ fn buffer_capacity_required(mut file: &File) -> Option<usize> {
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for &File {
+ #[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
+ #[inline]
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.read_vectored(bufs)
}
+ #[inline]
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
self.inner.read_buf(cursor)
}
diff --git a/library/std/src/io/buffered/bufwriter.rs b/library/std/src/io/buffered/bufwriter.rs
index 0f04f2911..95ba82e1e 100644
--- a/library/std/src/io/buffered/bufwriter.rs
+++ b/library/std/src/io/buffered/bufwriter.rs
@@ -237,7 +237,7 @@ impl<W: ?Sized + Write> BufWriter<W> {
));
}
Ok(n) => guard.consume(n),
- Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
diff --git a/library/std/src/io/copy.rs b/library/std/src/io/copy.rs
index 3322940d2..eafd078a7 100644
--- a/library/std/src/io/copy.rs
+++ b/library/std/src/io/copy.rs
@@ -1,6 +1,5 @@
-use super::{BorrowedBuf, BufReader, BufWriter, ErrorKind, Read, Result, Write, DEFAULT_BUF_SIZE};
+use super::{BorrowedBuf, BufReader, BufWriter, Read, Result, Write, DEFAULT_BUF_SIZE};
use crate::alloc::Allocator;
-use crate::cmp;
use crate::collections::VecDeque;
use crate::io::IoSlice;
use crate::mem::MaybeUninit;
@@ -30,6 +29,7 @@ mod tests;
///
/// [`read`]: Read::read
/// [`write`]: Write::write
+/// [`ErrorKind::Interrupted`]: crate::io::ErrorKind::Interrupted
///
/// # Examples
///
@@ -163,7 +163,7 @@ where
// from adding I: Read
match self.read(&mut []) {
Ok(_) => {}
- Err(e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) if e.is_interrupted() => continue,
Err(e) => return Err(e),
}
let buf = self.buffer();
@@ -243,7 +243,7 @@ impl<I: Write + ?Sized> BufferedWriterSpec for BufWriter<I> {
// Read again if the buffer still has enough capacity, as BufWriter itself would do
// This will occur if the reader returns short reads
}
- Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
} else {
@@ -254,47 +254,6 @@ impl<I: Write + ?Sized> BufferedWriterSpec for BufWriter<I> {
}
}
-impl<A: Allocator> BufferedWriterSpec for Vec<u8, A> {
- fn buffer_size(&self) -> usize {
- cmp::max(DEFAULT_BUF_SIZE, self.capacity() - self.len())
- }
-
- fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
- let mut bytes = 0;
-
- // avoid allocating before we have determined that there's anything to read
- if self.capacity() == 0 {
- bytes = stack_buffer_copy(&mut reader.take(DEFAULT_BUF_SIZE as u64), self)?;
- if bytes == 0 {
- return Ok(0);
- }
- }
-
- loop {
- self.reserve(DEFAULT_BUF_SIZE);
- let mut buf: BorrowedBuf<'_> = self.spare_capacity_mut().into();
- match reader.read_buf(buf.unfilled()) {
- Ok(()) => {}
- Err(e) if e.kind() == ErrorKind::Interrupted => continue,
- Err(e) => return Err(e),
- };
-
- let read = buf.filled().len();
- if read == 0 {
- break;
- }
-
- // SAFETY: BorrowedBuf guarantees all of its filled bytes are init
- // and the number of read bytes can't exceed the spare capacity since
- // that's what the buffer is borrowing from.
- unsafe { self.set_len(self.len() + read) };
- bytes += read as u64;
- }
-
- Ok(bytes)
- }
-}
-
fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
reader: &mut R,
writer: &mut W,
@@ -307,7 +266,7 @@ fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
loop {
match reader.read_buf(buf.unfilled()) {
Ok(()) => {}
- Err(e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) if e.is_interrupted() => continue,
Err(e) => return Err(e),
};
diff --git a/library/std/src/io/copy/tests.rs b/library/std/src/io/copy/tests.rs
index af137eaf8..d9998e87c 100644
--- a/library/std/src/io/copy/tests.rs
+++ b/library/std/src/io/copy/tests.rs
@@ -81,18 +81,6 @@ fn copy_specializes_bufreader() {
}
#[test]
-fn copy_specializes_to_vec() {
- let cap = 123456;
- let mut source = ShortReader { cap, observed_buffer: 0, read_size: 1337 };
- let mut sink = Vec::new();
- assert_eq!(cap as u64, io::copy(&mut source, &mut sink).unwrap());
- assert!(
- source.observed_buffer > DEFAULT_BUF_SIZE,
- "expected a large buffer to be provided to the reader"
- );
-}
-
-#[test]
fn copy_specializes_from_vecdeque() {
let mut source = VecDeque::with_capacity(100 * 1024);
for _ in 0..20 * 1024 {
diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs
index 34c0ce9dc..b63091dea 100644
--- a/library/std/src/io/error.rs
+++ b/library/std/src/io/error.rs
@@ -1,14 +1,14 @@
#[cfg(test)]
mod tests;
-#[cfg(target_pointer_width = "64")]
+#[cfg(all(target_pointer_width = "64", not(target_os = "uefi")))]
mod repr_bitpacked;
-#[cfg(target_pointer_width = "64")]
+#[cfg(all(target_pointer_width = "64", not(target_os = "uefi")))]
use repr_bitpacked::Repr;
-#[cfg(not(target_pointer_width = "64"))]
+#[cfg(any(not(target_pointer_width = "64"), target_os = "uefi"))]
mod repr_unpacked;
-#[cfg(not(target_pointer_width = "64"))]
+#[cfg(any(not(target_pointer_width = "64"), target_os = "uefi"))]
use repr_unpacked::Repr;
use crate::error;
@@ -102,7 +102,7 @@ enum ErrorData<C> {
///
/// [`into`]: Into::into
#[unstable(feature = "raw_os_error_ty", issue = "107792")]
-pub type RawOsError = i32;
+pub type RawOsError = sys::RawOsError;
// `#[repr(align(4))]` is probably redundant, it should have that value or
// higher already. We include it just because repr_bitpacked.rs's encoding
@@ -511,6 +511,7 @@ impl Error {
/// let eof_error = Error::from(ErrorKind::UnexpectedEof);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline(never)]
pub fn new<E>(kind: ErrorKind, error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
@@ -527,8 +528,6 @@ impl Error {
/// # Examples
///
/// ```
- /// #![feature(io_error_other)]
- ///
/// use std::io::Error;
///
/// // errors can be created from strings
@@ -537,7 +536,7 @@ impl Error {
/// // errors can also be created from other errors
/// let custom_error2 = Error::other(custom_error);
/// ```
- #[unstable(feature = "io_error_other", issue = "91946")]
+ #[stable(feature = "io_error_other", since = "1.74.0")]
pub fn other<E>(error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
@@ -916,6 +915,16 @@ impl Error {
ErrorData::SimpleMessage(m) => m.kind,
}
}
+
+ #[inline]
+ pub(crate) fn is_interrupted(&self) -> bool {
+ match self.repr.data() {
+ ErrorData::Os(code) => sys::is_interrupted(code),
+ ErrorData::Custom(c) => c.kind == ErrorKind::Interrupted,
+ ErrorData::Simple(kind) => kind == ErrorKind::Interrupted,
+ ErrorData::SimpleMessage(m) => m.kind == ErrorKind::Interrupted,
+ }
+ }
}
impl fmt::Debug for Repr {
diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs
index f94f88bac..6e7366b36 100644
--- a/library/std/src/io/error/repr_bitpacked.rs
+++ b/library/std/src/io/error/repr_bitpacked.rs
@@ -374,9 +374,6 @@ static_assert!((TAG_MASK + 1).is_power_of_two());
static_assert!(align_of::<SimpleMessage>() >= TAG_MASK + 1);
static_assert!(align_of::<Custom>() >= TAG_MASK + 1);
-// `RawOsError` must be an alias for `i32`.
-const _: fn(RawOsError) -> i32 = |os| os;
-
static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE_MESSAGE, TAG_SIMPLE_MESSAGE);
static_assert!(@usize_eq: TAG_MASK & TAG_CUSTOM, TAG_CUSTOM);
static_assert!(@usize_eq: TAG_MASK & TAG_OS, TAG_OS);
diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs
index 71d91f213..604b795cd 100644
--- a/library/std/src/io/mod.rs
+++ b/library/std/src/io/mod.rs
@@ -5,7 +5,7 @@
//! the [`Read`] and [`Write`] traits, which provide the
//! most general interface for reading and writing input and output.
//!
-//! # Read and Write
+//! ## Read and Write
//!
//! Because they are traits, [`Read`] and [`Write`] are implemented by a number
//! of other types, and you can implement them for your types too. As such,
@@ -238,6 +238,47 @@
//! contract. The implementation of many of these functions are subject to change over
//! time and may call fewer or more syscalls/library functions.
//!
+//! ## I/O Safety
+//!
+//! Rust follows an I/O safety discipline that is comparable to its memory safety discipline. This
+//! means that file descriptors can be *exclusively owned*. (Here, "file descriptor" is meant to
+//! subsume similar concepts that exist across a wide range of operating systems even if they might
+//! use a different name, such as "handle".) An exclusively owned file descriptor is one that no
+//! other code is allowed to access in any way, but the owner is allowed to access and even close
+//! it any time. A type that owns its file descriptor should usually close it in its `drop`
+//! function. Types like [`File`] own their file descriptor. Similarly, file descriptors
+//! can be *borrowed*, granting the temporary right to perform operations on this file descriptor.
+//! This indicates that the file descriptor will not be closed for the lifetime of the borrow, but
+//! it does *not* imply any right to close this file descriptor, since it will likely be owned by
+//! someone else.
+//!
+//! The platform-specific parts of the Rust standard library expose types that reflect these
+//! concepts, see [`os::unix`] and [`os::windows`].
+//!
+//! To uphold I/O safety, it is crucial that no code acts on file descriptors it does not own or
+//! borrow, and no code closes file descriptors it does not own. In other words, a safe function
+//! that takes a regular integer, treats it as a file descriptor, and acts on it, is *unsound*.
+//!
+//! Not upholding I/O safety and acting on a file descriptor without proof of ownership can lead to
+//! misbehavior and even Undefined Behavior in code that relies on ownership of its file
+//! descriptors: a closed file descriptor could be re-allocated, so the original owner of that file
+//! descriptor is now working on the wrong file. Some code might even rely on fully encapsulating
+//! its file descriptors with no operations being performed by any other part of the program.
+//!
+//! Note that exclusive ownership of a file descriptor does *not* imply exclusive ownership of the
+//! underlying kernel object that the file descriptor references (also called "file description" on
+//! some operating systems). File descriptors basically work like [`Arc`]: when you receive an owned
+//! file descriptor, you cannot know whether there are any other file descriptors that reference the
+//! same kernel object. However, when you create a new kernel object, you know that you are holding
+//! the only reference to it. Just be careful not to lend it to anyone, since they can obtain a
+//! clone and then you can no longer know what the reference count is! In that sense, [`OwnedFd`] is
+//! like `Arc` and [`BorrowedFd<'a>`] is like `&'a Arc` (and similar for the Windows types). In
+//! particular, given a `BorrowedFd<'a>`, you are not allowed to close the file descriptor -- just
+//! like how, given a `&'a Arc`, you are not allowed to decrement the reference count and
+//! potentially free the underlying object. There is no equivalent to `Box` for file descriptors in
+//! the standard library (that would be a type that guarantees that the reference count is `1`),
+//! however, it would be possible for a crate to define a type with those semantics.
+//!
//! [`File`]: crate::fs::File
//! [`TcpStream`]: crate::net::TcpStream
//! [`io::stdout`]: stdout
@@ -245,6 +286,11 @@
//! [`?` operator]: ../../book/appendix-02-operators.html
//! [`Result`]: crate::result::Result
//! [`.unwrap()`]: crate::result::Result::unwrap
+//! [`os::unix`]: ../os/unix/io/index.html
+//! [`os::windows`]: ../os/windows/io/index.html
+//! [`OwnedFd`]: ../os/fd/struct.OwnedFd.html
+//! [`BorrowedFd<'a>`]: ../os/fd/struct.BorrowedFd.html
+//! [`Arc`]: crate::sync::Arc
#![stable(feature = "rust1", since = "1.0.0")]
@@ -390,7 +436,7 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(
let mut cursor = read_buf.unfilled();
match r.read_buf(cursor.reborrow()) {
Ok(()) => {}
- Err(e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) if e.is_interrupted() => continue,
Err(e) => return Err(e),
}
@@ -421,7 +467,7 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(
buf.extend_from_slice(&probe[..n]);
break;
}
- Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(ref e) if e.is_interrupted() => continue,
Err(e) => return Err(e),
}
}
@@ -470,7 +516,7 @@ pub(crate) fn default_read_exact<R: Read + ?Sized>(this: &mut R, mut buf: &mut [
let tmp = buf;
buf = &mut tmp[n..];
}
- Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
@@ -860,7 +906,7 @@ pub trait Read {
let prev_written = cursor.written();
match self.read_buf(cursor.reborrow()) {
Ok(()) => {}
- Err(e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) if e.is_interrupted() => continue,
Err(e) => return Err(e),
}
@@ -1190,22 +1236,22 @@ impl<'a> IoSliceMut<'a> {
pub fn advance_slices(bufs: &mut &mut [IoSliceMut<'a>], n: usize) {
// Number of buffers to remove.
let mut remove = 0;
- // Total length of all the to be removed buffers.
- let mut accumulated_len = 0;
+ // Remaining length before reaching n.
+ let mut left = n;
for buf in bufs.iter() {
- if accumulated_len + buf.len() > n {
- break;
- } else {
- accumulated_len += buf.len();
+ if let Some(remainder) = left.checked_sub(buf.len()) {
+ left = remainder;
remove += 1;
+ } else {
+ break;
}
}
*bufs = &mut take(bufs)[remove..];
if bufs.is_empty() {
- assert!(n == accumulated_len, "advancing io slices beyond their length");
+ assert!(left == 0, "advancing io slices beyond their length");
} else {
- bufs[0].advance(n - accumulated_len)
+ bufs[0].advance(left);
}
}
}
@@ -1333,22 +1379,25 @@ impl<'a> IoSlice<'a> {
pub fn advance_slices(bufs: &mut &mut [IoSlice<'a>], n: usize) {
// Number of buffers to remove.
let mut remove = 0;
- // Total length of all the to be removed buffers.
- let mut accumulated_len = 0;
+ // Remaining length before reaching n. This prevents overflow
+ // that could happen if the length of slices in `bufs` were instead
+ // accumulated. Those slice may be aliased and, if they are large
+ // enough, their added length may overflow a `usize`.
+ let mut left = n;
for buf in bufs.iter() {
- if accumulated_len + buf.len() > n {
- break;
- } else {
- accumulated_len += buf.len();
+ if let Some(remainder) = left.checked_sub(buf.len()) {
+ left = remainder;
remove += 1;
+ } else {
+ break;
}
}
*bufs = &mut take(bufs)[remove..];
if bufs.is_empty() {
- assert!(n == accumulated_len, "advancing io slices beyond their length");
+ assert!(left == 0, "advancing io slices beyond their length");
} else {
- bufs[0].advance(n - accumulated_len)
+ bufs[0].advance(left);
}
}
}
@@ -1579,7 +1628,7 @@ pub trait Write {
));
}
Ok(n) => buf = &buf[n..],
- Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
@@ -1647,7 +1696,7 @@ pub trait Write {
));
}
Ok(n) => IoSlice::advance_slices(&mut bufs, n),
- Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
@@ -1943,7 +1992,7 @@ fn read_until<R: BufRead + ?Sized>(r: &mut R, delim: u8, buf: &mut Vec<u8>) -> R
let (done, used) = {
let available = match r.fill_buf() {
Ok(n) => n,
- Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(ref e) if e.is_interrupted() => continue,
Err(e) => return Err(e),
};
match memchr::memchr(delim, available) {
@@ -2734,7 +2783,7 @@ impl<R: Read> Iterator for Bytes<R> {
return match self.inner.read(slice::from_mut(&mut byte)) {
Ok(0) => None,
Ok(..) => Some(Ok(byte)),
- Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(ref e) if e.is_interrupted() => continue,
Err(e) => Some(Err(e)),
};
}
diff --git a/library/std/src/keyword_docs.rs b/library/std/src/keyword_docs.rs
index eb46f4e54..873bfb621 100644
--- a/library/std/src/keyword_docs.rs
+++ b/library/std/src/keyword_docs.rs
@@ -1820,7 +1820,7 @@ mod true_keyword {}
#[doc(keyword = "type")]
//
-/// Define an alias for an existing type.
+/// Define an [alias] for an existing type.
///
/// The syntax is `type Name = ExistingType;`.
///
@@ -1838,6 +1838,13 @@ mod true_keyword {}
/// assert_eq!(m, k);
/// ```
///
+/// A type can be generic:
+///
+/// ```rust
+/// # use std::sync::{Arc, Mutex};
+/// type ArcMutex<T> = Arc<Mutex<T>>;
+/// ```
+///
/// In traits, `type` is used to declare an [associated type]:
///
/// ```rust
@@ -1860,6 +1867,7 @@ mod true_keyword {}
///
/// [`trait`]: keyword.trait.html
/// [associated type]: ../reference/items/associated-items.html#associated-types
+/// [alias]: ../reference/items/type-aliases.html
mod type_keyword {}
#[doc(keyword = "unsafe")]
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 0ccbb16b1..f1f0f8b16 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -152,6 +152,31 @@
//! contains further primitive shared memory types, including [`atomic`] and
//! [`mpsc`], which contains the channel types for message passing.
//!
+//! # Use before and after `main()`
+//!
+//! Many parts of the standard library are expected to work before and after `main()`;
+//! but this is not guaranteed or ensured by tests. It is recommended that you write your own tests
+//! and run them on each platform you wish to support.
+//! This means that use of `std` before/after main, especially of features that interact with the
+//! OS or global state, is exempted from stability and portability guarantees and instead only
+//! provided on a best-effort basis. Nevertheless bug reports are appreciated.
+//!
+//! On the other hand `core` and `alloc` are most likely to work in such environments with
+//! the caveat that any hookable behavior such as panics, oom handling or allocators will also
+//! depend on the compatibility of the hooks.
+//!
+//! Some features may also behave differently outside main, e.g. stdio could become unbuffered,
+//! some panics might turn into aborts, backtraces might not get symbolicated or similar.
+//!
+//! Non-exhaustive list of known limitations:
+//!
+//! - after-main use of thread-locals, which also affects additional features:
+//! - [`thread::current()`]
+//! - [`thread::scope()`]
+//! - [`sync::mpsc`]
+//! - before-main stdio file descriptors are not guaranteed to be open on unix platforms
+//!
+//!
//! [I/O]: io
//! [`MIN`]: i32::MIN
//! [`MAX`]: i32::MAX
@@ -187,7 +212,6 @@
//! [rust-discord]: https://discord.gg/rust-lang
//! [array]: prim@array
//! [slice]: prim@slice
-
// To run std tests without x.py without ending up with two copies of std, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no effect there.
@@ -220,10 +244,10 @@
#![warn(missing_debug_implementations)]
#![allow(explicit_outlives_requirements)]
#![allow(unused_lifetimes)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#![deny(rustc::existing_doc_keyword)]
#![deny(fuzzy_provenance_casts)]
-#![cfg_attr(not(bootstrap), allow(rustdoc::redundant_explicit_links))]
+#![allow(rustdoc::redundant_explicit_links)]
// Ensure that std can be linked against panic_abort despite compiled with `-C panic=unwind`
#![deny(ffi_unwind_calls)]
// std may use features in a platform-specific way
@@ -236,6 +260,7 @@
feature(slice_index_methods, coerce_unsized, sgx_platform)
)]
#![cfg_attr(windows, feature(round_char_boundary))]
+#![cfg_attr(target_os = "xous", feature(slice_ptr_len))]
//
// Language features:
// tidy-alphabetical-start
@@ -351,7 +376,6 @@
#![feature(get_many_mut)]
#![feature(lazy_cell)]
#![feature(log_syntax)]
-#![feature(saturating_int_impl)]
#![feature(stdsimd)]
#![feature(test)]
#![feature(trace_macros)]
@@ -641,13 +665,16 @@ pub use core::{
)]
pub use core::concat_bytes;
+#[unstable(feature = "cfg_match", issue = "115585")]
+pub use core::cfg_match;
+
#[stable(feature = "core_primitive", since = "1.43.0")]
pub use core::primitive;
// Include a number of private modules that exist solely to provide
// the rustdoc documentation for primitive types. Using `include!`
// because rustdoc only looks for these modules at the crate level.
-include!("primitive_docs.rs");
+include!("../../core/src/primitive_docs.rs");
// Include a number of private modules that exist solely to provide
// the rustdoc documentation for the existing keywords. Using `include!`
diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs
index ba1b8cbfa..34b8b6b97 100644
--- a/library/std/src/macros.rs
+++ b/library/std/src/macros.rs
@@ -41,6 +41,9 @@ macro_rules! panic {
/// Use `print!` only for the primary output of your program. Use
/// [`eprint!`] instead to print error and progress messages.
///
+/// See [the formatting documentation in `std::fmt`](../std/fmt/index.html)
+/// for details of the macro argument syntax.
+///
/// [flush]: crate::io::Write::flush
/// [`println!`]: crate::println
/// [`eprint!`]: crate::eprint
@@ -103,6 +106,9 @@ macro_rules! print {
/// Use `println!` only for the primary output of your program. Use
/// [`eprintln!`] instead to print error and progress messages.
///
+/// See [the formatting documentation in `std::fmt`](../std/fmt/index.html)
+/// for details of the macro argument syntax.
+///
/// [`std::fmt`]: crate::fmt
/// [`eprintln!`]: crate::eprintln
/// [lock]: crate::io::Stdout
@@ -150,6 +156,9 @@ macro_rules! println {
/// [`io::stderr`]: crate::io::stderr
/// [`io::stdout`]: crate::io::stdout
///
+/// See [the formatting documentation in `std::fmt`](../std/fmt/index.html)
+/// for details of the macro argument syntax.
+///
/// # Panics
///
/// Panics if writing to `io::stderr` fails.
@@ -181,6 +190,9 @@ macro_rules! eprint {
/// Use `eprintln!` only for error and progress messages. Use `println!`
/// instead for the primary output of your program.
///
+/// See [the formatting documentation in `std::fmt`](../std/fmt/index.html)
+/// for details of the macro argument syntax.
+///
/// [`io::stderr`]: crate::io::stderr
/// [`io::stdout`]: crate::io::stdout
/// [`println!`]: crate::println
diff --git a/library/std/src/net/tcp.rs b/library/std/src/net/tcp.rs
index 32fd54c8e..9667d5f92 100644
--- a/library/std/src/net/tcp.rs
+++ b/library/std/src/net/tcp.rs
@@ -1,6 +1,6 @@
#![deny(unsafe_op_in_unsafe_fn)]
-#[cfg(all(test, not(target_os = "emscripten")))]
+#[cfg(all(test, not(any(target_os = "emscripten", target_os = "xous"))))]
mod tests;
use crate::io::prelude::*;
diff --git a/library/std/src/net/udp.rs b/library/std/src/net/udp.rs
index 5ca4ed832..227e418b7 100644
--- a/library/std/src/net/udp.rs
+++ b/library/std/src/net/udp.rs
@@ -1,4 +1,4 @@
-#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))]
+#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))]
mod tests;
use crate::fmt;
diff --git a/library/std/src/num.rs b/library/std/src/num.rs
index 46064bd28..3cd5fa458 100644
--- a/library/std/src/num.rs
+++ b/library/std/src/num.rs
@@ -12,7 +12,7 @@ mod tests;
#[cfg(test)]
mod benches;
-#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[stable(feature = "saturating_int_impl", since = "1.74.0")]
pub use core::num::Saturating;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::num::Wrapping;
diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs
index 2180d2974..81106d6c6 100644
--- a/library/std/src/os/fd/owned.rs
+++ b/library/std/src/os/fd/owned.rs
@@ -15,8 +15,9 @@ use crate::sys_common::{AsInner, FromInner, IntoInner};
/// A borrowed file descriptor.
///
-/// This has a lifetime parameter to tie it to the lifetime of something that
-/// owns the file descriptor.
+/// This has a lifetime parameter to tie it to the lifetime of something that owns the file
+/// descriptor. For the duration of that lifetime, it is guaranteed that nobody will close the file
+/// descriptor.
///
/// This uses `repr(transparent)` and has the representation of a host file
/// descriptor, so it can be used in FFI in places where a file descriptor is
@@ -42,7 +43,8 @@ pub struct BorrowedFd<'fd> {
/// An owned file descriptor.
///
-/// This closes the file descriptor on drop.
+/// This closes the file descriptor on drop. It is guaranteed that nobody else will close the file
+/// descriptor.
///
/// This uses `repr(transparent)` and has the representation of a host file
/// descriptor, so it can be used in FFI in places where a file descriptor is
@@ -155,7 +157,9 @@ impl FromRawFd for OwnedFd {
/// # Safety
///
/// The resource pointed to by `fd` must be open and suitable for assuming
- /// ownership. The resource must not require any cleanup other than `close`.
+ /// [ownership][io-safety]. The resource must not require any cleanup other than `close`.
+ ///
+ /// [io-safety]: io#io-safety
#[inline]
unsafe fn from_raw_fd(fd: RawFd) -> Self {
assert_ne!(fd, u32::MAX as RawFd);
diff --git a/library/std/src/os/fd/raw.rs b/library/std/src/os/fd/raw.rs
index 592e072ad..ef896ea95 100644
--- a/library/std/src/os/fd/raw.rs
+++ b/library/std/src/os/fd/raw.rs
@@ -84,7 +84,10 @@ pub trait FromRawFd {
///
/// # Safety
///
- /// The `fd` passed in must be a valid and open file descriptor.
+ /// The `fd` passed in must be an [owned file descriptor][io-safety];
+ /// in particular, it must be open.
+ ///
+ /// [io-safety]: io#io-safety
///
/// # Example
///
diff --git a/library/std/src/os/fortanix_sgx/io.rs b/library/std/src/os/fortanix_sgx/io.rs
index 7223ade68..7e57435b6 100644
--- a/library/std/src/os/fortanix_sgx/io.rs
+++ b/library/std/src/os/fortanix_sgx/io.rs
@@ -31,15 +31,22 @@ pub trait FromRawFd {
/// Constructs a new instance of `Self` from the given raw file
/// descriptor and metadata.
///
- /// This function **consumes ownership** of the specified file
- /// descriptor. The returned object will take responsibility for closing
- /// it when the object goes out of scope.
+ /// This function is typically used to **consume ownership** of the
+ /// specified file descriptor. When used in this way, the returned object
+ /// will take responsibility for closing it when the object goes out of
+ /// scope.
///
- /// This function is also unsafe as the primitives currently returned
- /// have the contract that they are the sole owner of the file
- /// descriptor they are wrapping. Usage of this function could
- /// accidentally allow violating this contract which can cause memory
- /// unsafety in code that relies on it being true.
+ /// However, consuming ownership is not strictly required. Use a
+ /// [`From<OwnedFd>::from`] implementation for an API which strictly
+ /// consumes ownership.
+ ///
+ /// # Safety
+ ///
+ /// The `fd` passed in must be an [owned file descriptor][io-safety];
+ /// in particular, it must be open.
+ // FIXME: say something about `metadata`.
+ ///
+ /// [io-safety]: io#io-safety
#[unstable(feature = "sgx_platform", issue = "56975")]
unsafe fn from_raw_fd(fd: RawFd, metadata: Self::Metadata) -> Self;
}
diff --git a/library/std/src/os/hurd/fs.rs b/library/std/src/os/hurd/fs.rs
new file mode 100644
index 000000000..00ff1560f
--- /dev/null
+++ b/library/std/src/os/hurd/fs.rs
@@ -0,0 +1,348 @@
+//! Hurd-specific extensions to primitives in the [`std::fs`] module.
+//!
+//! [`std::fs`]: crate::fs
+
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Returns the device ID on which this file resides.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_dev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ /// Returns the inode number.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ino());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ /// Returns the file type and mode.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mode());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ /// Returns the number of hard links to file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_nlink());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ /// Returns the user ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_uid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ /// Returns the group ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_gid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ /// Returns the device ID that this file represents. Only relevant for special file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_rdev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ /// Returns the size of the file (if it is a regular file or a symbolic link) in bytes.
+ ///
+ /// The size of a symbolic link is the length of the pathname it contains,
+ /// without a terminating null byte.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_size());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ /// Returns the last access time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ /// Returns the last access time of the file, in nanoseconds since [`st_atime`].
+ ///
+ /// [`st_atime`]: Self::st_atime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ /// Returns the last modification time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ /// Returns the last modification time of the file, in nanoseconds since [`st_mtime`].
+ ///
+ /// [`st_mtime`]: Self::st_mtime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ /// Returns the last status change time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ /// Returns the last status change time of the file, in nanoseconds since [`st_ctime`].
+ ///
+ /// [`st_ctime`]: Self::st_ctime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ /// Returns the "preferred" block size for efficient filesystem I/O.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blksize());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ /// Returns the number of blocks allocated to the file, 512-byte units.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::hurd::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blocks());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_fsid as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atim.tv_sec as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atim.tv_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtim.tv_sec as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtim.tv_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctim.tv_sec as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctim.tv_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/hurd/mod.rs b/library/std/src/os/hurd/mod.rs
new file mode 100644
index 000000000..aee86c7f6
--- /dev/null
+++ b/library/std/src/os/hurd/mod.rs
@@ -0,0 +1,6 @@
+//! Hurd-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/hurd/raw.rs b/library/std/src/os/hurd/raw.rs
new file mode 100644
index 000000000..fa2666635
--- /dev/null
+++ b/library/std/src/os/hurd/raw.rs
@@ -0,0 +1,33 @@
+//! Hurd-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::{c_long, c_uint, c_ulong};
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = c_long;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = c_ulong;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = c_uint;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = c_ulong;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = c_long;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = c_long;
diff --git a/library/std/src/os/mod.rs b/library/std/src/os/mod.rs
index 634c3cc4a..11ad21515 100644
--- a/library/std/src/os/mod.rs
+++ b/library/std/src/os/mod.rs
@@ -117,6 +117,8 @@ pub mod haiku;
pub mod hermit;
#[cfg(target_os = "horizon")]
pub mod horizon;
+#[cfg(target_os = "hurd")]
+pub mod hurd;
#[cfg(target_os = "illumos")]
pub mod illumos;
#[cfg(target_os = "ios")]
@@ -140,12 +142,16 @@ pub mod solid;
#[cfg(target_os = "tvos")]
#[path = "ios/mod.rs"]
pub(crate) mod tvos;
+#[cfg(target_os = "uefi")]
+pub mod uefi;
#[cfg(target_os = "vita")]
pub mod vita;
#[cfg(target_os = "vxworks")]
pub mod vxworks;
#[cfg(target_os = "watchos")]
pub(crate) mod watchos;
+#[cfg(target_os = "xous")]
+pub mod xous;
#[cfg(any(unix, target_os = "wasi", doc))]
pub mod fd;
diff --git a/library/std/src/os/solid/io.rs b/library/std/src/os/solid/io.rs
index 33cc5a015..f82034663 100644
--- a/library/std/src/os/solid/io.rs
+++ b/library/std/src/os/solid/io.rs
@@ -27,15 +27,21 @@ pub trait FromRawFd {
/// Constructs a new instance of `Self` from the given raw file
/// descriptor.
///
- /// This function **consumes ownership** of the specified file
- /// descriptor. The returned object will take responsibility for closing
- /// it when the object goes out of scope.
+ /// This function is typically used to **consume ownership** of the
+ /// specified file descriptor. When used in this way, the returned object
+ /// will take responsibility for closing it when the object goes out of
+ /// scope.
///
- /// This function is also unsafe as the primitives currently returned
- /// have the contract that they are the sole owner of the file
- /// descriptor they are wrapping. Usage of this function could
- /// accidentally allow violating this contract which can cause memory
- /// unsafety in code that relies on it being true.
+ /// However, consuming ownership is not strictly required. Use a
+ /// [`From<OwnedFd>::from`] implementation for an API which strictly
+ /// consumes ownership.
+ ///
+ /// # Safety
+ ///
+ /// The `fd` passed in must be an [owned file descriptor][io-safety];
+ /// in particular, it must be open.
+ ///
+ /// [io-safety]: io#io-safety
unsafe fn from_raw_fd(fd: RawFd) -> Self;
}
diff --git a/library/std/src/os/uefi/env.rs b/library/std/src/os/uefi/env.rs
new file mode 100644
index 000000000..5d082e7c1
--- /dev/null
+++ b/library/std/src/os/uefi/env.rs
@@ -0,0 +1,92 @@
+//! UEFI-specific extensions to the primitives in `std::env` module
+
+#![unstable(feature = "uefi_std", issue = "100499")]
+
+use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
+use crate::{ffi::c_void, ptr::NonNull};
+
+static SYSTEM_TABLE: AtomicPtr<c_void> = AtomicPtr::new(crate::ptr::null_mut());
+static IMAGE_HANDLE: AtomicPtr<c_void> = AtomicPtr::new(crate::ptr::null_mut());
+// Flag to check if BootServices are still valid.
+// Start with assuming that they are not available
+static BOOT_SERVICES_FLAG: AtomicBool = AtomicBool::new(false);
+
+/// Initializes the global System Table and Image Handle pointers.
+///
+/// The standard library requires access to the UEFI System Table and the Application Image Handle
+/// to operate. Those are provided to UEFI Applications via their application entry point. By
+/// calling `init_globals()`, those pointers are retained by the standard library for future use.
+/// Thus this function must be called before any of the standard library services are used.
+///
+/// The pointers are never exposed to any entity outside of this application and it is guaranteed
+/// that, once the application exited, these pointers are never dereferenced again.
+///
+/// Callers are required to ensure the pointers are valid for the entire lifetime of this
+/// application. In particular, UEFI Boot Services must not be exited while an application with the
+/// standard library is loaded.
+///
+/// # SAFETY
+/// Calling this function more than once will panic
+pub(crate) unsafe fn init_globals(handle: NonNull<c_void>, system_table: NonNull<c_void>) {
+ IMAGE_HANDLE
+ .compare_exchange(
+ crate::ptr::null_mut(),
+ handle.as_ptr(),
+ Ordering::Release,
+ Ordering::Acquire,
+ )
+ .unwrap();
+ SYSTEM_TABLE
+ .compare_exchange(
+ crate::ptr::null_mut(),
+ system_table.as_ptr(),
+ Ordering::Release,
+ Ordering::Acquire,
+ )
+ .unwrap();
+ BOOT_SERVICES_FLAG.store(true, Ordering::Release)
+}
+
+/// Get the SystemTable Pointer.
+/// If you want to use `BootServices` then please use [`boot_services`] as it performs some
+/// additional checks.
+///
+/// Note: This function panics if the System Table or Image Handle is not initialized
+pub fn system_table() -> NonNull<c_void> {
+ try_system_table().unwrap()
+}
+
+/// Get the ImageHandle Pointer.
+///
+/// Note: This function panics if the System Table or Image Handle is not initialized
+pub fn image_handle() -> NonNull<c_void> {
+ try_image_handle().unwrap()
+}
+
+/// Get the BootServices Pointer.
+/// This function also checks if `ExitBootServices` has already been called.
+pub fn boot_services() -> Option<NonNull<c_void>> {
+ if BOOT_SERVICES_FLAG.load(Ordering::Acquire) {
+ let system_table: NonNull<r_efi::efi::SystemTable> = try_system_table()?.cast();
+ let boot_services = unsafe { (*system_table.as_ptr()).boot_services };
+ NonNull::new(boot_services).map(|x| x.cast())
+ } else {
+ None
+ }
+}
+
+/// Get the SystemTable Pointer.
+/// This function is mostly intended for places where panic is not an option
+pub(crate) fn try_system_table() -> Option<NonNull<c_void>> {
+ NonNull::new(SYSTEM_TABLE.load(Ordering::Acquire))
+}
+
+/// Get the SystemHandle Pointer.
+/// This function is mostly intended for places where panicking is not an option
+pub(crate) fn try_image_handle() -> Option<NonNull<c_void>> {
+ NonNull::new(IMAGE_HANDLE.load(Ordering::Acquire))
+}
+
+pub(crate) fn disable_boot_services() {
+ BOOT_SERVICES_FLAG.store(false, Ordering::Release)
+}
diff --git a/library/std/src/os/uefi/mod.rs b/library/std/src/os/uefi/mod.rs
new file mode 100644
index 000000000..8ef05eee1
--- /dev/null
+++ b/library/std/src/os/uefi/mod.rs
@@ -0,0 +1,8 @@
+//! Platform-specific extensions to `std` for UEFI.
+
+#![unstable(feature = "uefi_std", issue = "100499")]
+#![doc(cfg(target_os = "uefi"))]
+
+pub mod env;
+#[path = "../windows/ffi.rs"]
+pub mod ffi;
diff --git a/library/std/src/os/unix/fs.rs b/library/std/src/os/unix/fs.rs
index 029de8fbf..0eb4e88cf 100644
--- a/library/std/src/os/unix/fs.rs
+++ b/library/std/src/os/unix/fs.rs
@@ -123,7 +123,7 @@ pub trait FileExt {
buf = &mut tmp[n..];
offset += n as u64;
}
- Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
@@ -155,7 +155,7 @@ pub trait FileExt {
/// flag fail to respect the offset parameter, always appending to the end
/// of the file instead.
///
- /// It is possible to inadvertantly set this flag, like in the example below.
+ /// It is possible to inadvertently set this flag, like in the example below.
/// Therefore, it is important to be vigilant while changing options to mitigate
/// unexpected behaviour.
///
@@ -258,7 +258,7 @@ pub trait FileExt {
buf = &buf[n..];
offset += n as u64
}
- Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
diff --git a/library/std/src/os/unix/io/mod.rs b/library/std/src/os/unix/io/mod.rs
index 25b5dbff1..827278f8b 100644
--- a/library/std/src/os/unix/io/mod.rs
+++ b/library/std/src/os/unix/io/mod.rs
@@ -6,7 +6,8 @@
//!
//! This module provides three types for representing file descriptors,
//! with different ownership properties: raw, borrowed, and owned, which are
-//! analogous to types used for representing pointers:
+//! analogous to types used for representing pointers. These types reflect concepts of [I/O
+//! safety][io-safety] on Unix.
//!
//! | Type | Analogous to |
//! | ------------------ | ------------ |
@@ -17,8 +18,8 @@
//! Like raw pointers, `RawFd` values are primitive values. And in new code,
//! they should be considered unsafe to do I/O on (analogous to dereferencing
//! them). Rust did not always provide this guidance, so existing code in the
-//! Rust ecosystem often doesn't mark `RawFd` usage as unsafe. Once the
-//! `io_safety` feature is stable, libraries will be encouraged to migrate,
+//! Rust ecosystem often doesn't mark `RawFd` usage as unsafe.
+//! Libraries are encouraged to migrate,
//! either by adding `unsafe` to APIs that dereference `RawFd` values, or by
//! using to `BorrowedFd` or `OwnedFd` instead.
//!
@@ -54,6 +55,8 @@
//! Like boxes, `OwnedFd` values conceptually own the resource they point to,
//! and free (close) it when they are dropped.
//!
+//! See the [`io` module docs][io-safety] for a general explanation of I/O safety.
+//!
//! ## `/proc/self/mem` and similar OS features
//!
//! Some platforms have special files, such as `/proc/self/mem`, which
@@ -65,15 +68,16 @@
//! to be opened and read from or written must be `unsafe`. Rust's safety guarantees
//! only cover what the program itself can do, and not what entities outside
//! the program can do to it. `/proc/self/mem` is considered to be such an
-//! external entity, along with debugging interfaces, and people with physical access to
-//! the hardware. This is true even in cases where the program is controlling
-//! the external entity.
+//! external entity, along with `/proc/self/fd/*`, debugging interfaces, and people with physical
+//! access to the hardware. This is true even in cases where the program is controlling the external
+//! entity.
//!
//! If you desire to comprehensively prevent programs from reaching out and
//! causing external entities to reach back in and violate memory safety, it's
//! necessary to use *sandboxing*, which is outside the scope of `std`.
//!
//! [`BorrowedFd<'a>`]: crate::os::unix::io::BorrowedFd
+//! [io-safety]: crate::io#io-safety
#![stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/std/src/os/unix/mod.rs b/library/std/src/os/unix/mod.rs
index 401ec1e7a..3724e90af 100644
--- a/library/std/src/os/unix/mod.rs
+++ b/library/std/src/os/unix/mod.rs
@@ -53,6 +53,8 @@ mod platform {
pub use crate::os::haiku::*;
#[cfg(target_os = "horizon")]
pub use crate::os::horizon::*;
+ #[cfg(target_os = "hurd")]
+ pub use crate::os::hurd::*;
#[cfg(target_os = "illumos")]
pub use crate::os::illumos::*;
#[cfg(target_os = "ios")]
diff --git a/library/std/src/os/unix/net/tests.rs b/library/std/src/os/unix/net/tests.rs
index 3d4302e66..6a6af9efd 100644
--- a/library/std/src/os/unix/net/tests.rs
+++ b/library/std/src/os/unix/net/tests.rs
@@ -662,7 +662,7 @@ fn test_send_vectored_fds_unix_stream() {
}
}
-#[cfg(any(target_os = "android", target_os = "linux", target_os = "freebsd"))]
+#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_send_vectored_with_ancillary_to_unix_datagram() {
diff --git a/library/std/src/os/unix/process.rs b/library/std/src/os/unix/process.rs
index 2b40b672d..ac5510304 100644
--- a/library/std/src/os/unix/process.rs
+++ b/library/std/src/os/unix/process.rs
@@ -434,6 +434,20 @@ impl From<crate::process::ChildStdin> for OwnedFd {
}
}
+/// Create a `ChildStdin` from the provided `OwnedFd`.
+///
+/// The provided file descriptor must point to a pipe
+/// with the `CLOEXEC` flag set.
+#[stable(feature = "child_stream_from_fd", since = "1.74.0")]
+impl From<OwnedFd> for process::ChildStdin {
+ #[inline]
+ fn from(fd: OwnedFd) -> process::ChildStdin {
+ let fd = sys::fd::FileDesc::from_inner(fd);
+ let pipe = sys::pipe::AnonPipe::from_inner(fd);
+ process::ChildStdin::from_inner(pipe)
+ }
+}
+
#[stable(feature = "io_safety", since = "1.63.0")]
impl AsFd for crate::process::ChildStdout {
#[inline]
@@ -450,6 +464,20 @@ impl From<crate::process::ChildStdout> for OwnedFd {
}
}
+/// Create a `ChildStdout` from the provided `OwnedFd`.
+///
+/// The provided file descriptor must point to a pipe
+/// with the `CLOEXEC` flag set.
+#[stable(feature = "child_stream_from_fd", since = "1.74.0")]
+impl From<OwnedFd> for process::ChildStdout {
+ #[inline]
+ fn from(fd: OwnedFd) -> process::ChildStdout {
+ let fd = sys::fd::FileDesc::from_inner(fd);
+ let pipe = sys::pipe::AnonPipe::from_inner(fd);
+ process::ChildStdout::from_inner(pipe)
+ }
+}
+
#[stable(feature = "io_safety", since = "1.63.0")]
impl AsFd for crate::process::ChildStderr {
#[inline]
@@ -466,6 +494,20 @@ impl From<crate::process::ChildStderr> for OwnedFd {
}
}
+/// Create a `ChildStderr` from the provided `OwnedFd`.
+///
+/// The provided file descriptor must point to a pipe
+/// with the `CLOEXEC` flag set.
+#[stable(feature = "child_stream_from_fd", since = "1.74.0")]
+impl From<OwnedFd> for process::ChildStderr {
+ #[inline]
+ fn from(fd: OwnedFd) -> process::ChildStderr {
+ let fd = sys::fd::FileDesc::from_inner(fd);
+ let pipe = sys::pipe::AnonPipe::from_inner(fd);
+ process::ChildStderr::from_inner(pipe)
+ }
+}
+
/// Returns the OS-assigned process identifier associated with this process's parent.
#[must_use]
#[stable(feature = "unix_ppid", since = "1.27.0")]
diff --git a/library/std/src/os/wasi/fs.rs b/library/std/src/os/wasi/fs.rs
index 160c8f1ec..3da8c8355 100644
--- a/library/std/src/os/wasi/fs.rs
+++ b/library/std/src/os/wasi/fs.rs
@@ -82,7 +82,7 @@ pub trait FileExt {
buf = &mut tmp[n..];
offset += n as u64;
}
- Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
@@ -162,7 +162,7 @@ pub trait FileExt {
buf = &buf[n..];
offset += n as u64
}
- Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
diff --git a/library/std/src/os/windows/io/mod.rs b/library/std/src/os/windows/io/mod.rs
index e2a401fb6..3d3ae3878 100644
--- a/library/std/src/os/windows/io/mod.rs
+++ b/library/std/src/os/windows/io/mod.rs
@@ -6,7 +6,8 @@
//!
//! This module provides three types for representing raw handles and sockets
//! with different ownership properties: raw, borrowed, and owned, which are
-//! analogous to types used for representing pointers:
+//! analogous to types used for representing pointers. These types reflect concepts of [I/O
+//! safety][io-safety] on Windows.
//!
//! | Type | Analogous to |
//! | ---------------------- | ------------ |
@@ -23,8 +24,8 @@
//! And in new code, they should be considered unsafe to do I/O on (analogous
//! to dereferencing them). Rust did not always provide this guidance, so
//! existing code in the Rust ecosystem often doesn't mark `RawHandle` and
-//! `RawSocket` usage as unsafe. Once the `io_safety` feature is stable,
-//! libraries will be encouraged to migrate, either by adding `unsafe` to APIs
+//! `RawSocket` usage as unsafe.
+//! Libraries are encouraged to migrate, either by adding `unsafe` to APIs
//! that dereference `RawHandle` and `RawSocket` values, or by using to
//! `BorrowedHandle`, `BorrowedSocket`, `OwnedHandle`, or `OwnedSocket`.
//!
@@ -45,8 +46,11 @@
//! Like boxes, `OwnedHandle` and `OwnedSocket` values conceptually own the
//! resource they point to, and free (close) it when they are dropped.
//!
+//! See the [`io` module docs][io-safety] for a general explanation of I/O safety.
+//!
//! [`BorrowedHandle<'a>`]: crate::os::windows::io::BorrowedHandle
//! [`BorrowedSocket<'a>`]: crate::os::windows::io::BorrowedSocket
+//! [io-safety]: crate::io#io-safety
#![stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/std/src/os/windows/io/raw.rs b/library/std/src/os/windows/io/raw.rs
index 1759e2e7f..770583a9c 100644
--- a/library/std/src/os/windows/io/raw.rs
+++ b/library/std/src/os/windows/io/raw.rs
@@ -62,7 +62,7 @@ pub trait FromRawHandle {
/// # Safety
///
/// The `handle` passed in must:
- /// - be a valid an open handle,
+ /// - be an [owned handle][io-safety]; in particular, it must be open.
/// - be a handle for a resource that may be freed via [`CloseHandle`]
/// (as opposed to `RegCloseKey` or other close functions).
///
@@ -71,6 +71,7 @@ pub trait FromRawHandle {
///
/// [`CloseHandle`]: https://docs.microsoft.com/en-us/windows/win32/api/handleapi/nf-handleapi-closehandle
/// [here]: https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
+ /// [io-safety]: io#io-safety
#[stable(feature = "from_raw_os", since = "1.1.0")]
unsafe fn from_raw_handle(handle: RawHandle) -> Self;
}
@@ -207,10 +208,11 @@ pub trait FromRawSocket {
/// # Safety
///
/// The `socket` passed in must:
- /// - be a valid an open socket,
+ /// - be an [owned socket][io-safety]; in particular, it must be open.
/// - be a socket that may be freed via [`closesocket`].
///
/// [`closesocket`]: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-closesocket
+ /// [io-safety]: io#io-safety
#[stable(feature = "from_raw_os", since = "1.1.0")]
unsafe fn from_raw_socket(sock: RawSocket) -> Self;
}
diff --git a/library/std/src/os/windows/io/socket.rs b/library/std/src/os/windows/io/socket.rs
index 6359835ca..c80b9e284 100644
--- a/library/std/src/os/windows/io/socket.rs
+++ b/library/std/src/os/windows/io/socket.rs
@@ -116,7 +116,7 @@ impl BorrowedSocket<'_> {
let mut info = unsafe { mem::zeroed::<sys::c::WSAPROTOCOL_INFOW>() };
let result = unsafe {
sys::c::WSADuplicateSocketW(
- self.as_raw_socket(),
+ self.as_raw_socket() as sys::c::SOCKET,
sys::c::GetCurrentProcessId(),
&mut info,
)
@@ -134,7 +134,7 @@ impl BorrowedSocket<'_> {
};
if socket != sys::c::INVALID_SOCKET {
- unsafe { Ok(OwnedSocket::from_raw_socket(socket)) }
+ unsafe { Ok(OwnedSocket::from_raw_socket(socket as RawSocket)) }
} else {
let error = unsafe { sys::c::WSAGetLastError() };
@@ -158,7 +158,7 @@ impl BorrowedSocket<'_> {
}
unsafe {
- let socket = OwnedSocket::from_raw_socket(socket);
+ let socket = OwnedSocket::from_raw_socket(socket as RawSocket);
socket.set_no_inherit()?;
Ok(socket)
}
@@ -211,7 +211,7 @@ impl Drop for OwnedSocket {
#[inline]
fn drop(&mut self) {
unsafe {
- let _ = sys::c::closesocket(self.socket);
+ let _ = sys::c::closesocket(self.socket as sys::c::SOCKET);
}
}
}
diff --git a/library/std/src/os/windows/process.rs b/library/std/src/os/windows/process.rs
index 073168cf2..d00e79476 100644
--- a/library/std/src/os/windows/process.rs
+++ b/library/std/src/os/windows/process.rs
@@ -106,6 +106,45 @@ impl IntoRawHandle for process::ChildStderr {
}
}
+/// Create a `ChildStdin` from the provided `OwnedHandle`.
+///
+/// The provided handle must be asynchronous, as reading and
+/// writing from and to it is implemented using asynchronous APIs.
+#[stable(feature = "child_stream_from_fd", since = "1.74.0")]
+impl From<OwnedHandle> for process::ChildStdin {
+ fn from(handle: OwnedHandle) -> process::ChildStdin {
+ let handle = sys::handle::Handle::from_inner(handle);
+ let pipe = sys::pipe::AnonPipe::from_inner(handle);
+ process::ChildStdin::from_inner(pipe)
+ }
+}
+
+/// Create a `ChildStdout` from the provided `OwnedHandle`.
+///
+/// The provided handle must be asynchronous, as reading and
+/// writing from and to it is implemented using asynchronous APIs.
+#[stable(feature = "child_stream_from_fd", since = "1.74.0")]
+impl From<OwnedHandle> for process::ChildStdout {
+ fn from(handle: OwnedHandle) -> process::ChildStdout {
+ let handle = sys::handle::Handle::from_inner(handle);
+ let pipe = sys::pipe::AnonPipe::from_inner(handle);
+ process::ChildStdout::from_inner(pipe)
+ }
+}
+
+/// Create a `ChildStderr` from the provided `OwnedHandle`.
+///
+/// The provided handle must be asynchronous, as reading and
+/// writing from and to it is implemented using asynchronous APIs.
+#[stable(feature = "child_stream_from_fd", since = "1.74.0")]
+impl From<OwnedHandle> for process::ChildStderr {
+ fn from(handle: OwnedHandle) -> process::ChildStderr {
+ let handle = sys::handle::Handle::from_inner(handle);
+ let pipe = sys::pipe::AnonPipe::from_inner(handle);
+ process::ChildStderr::from_inner(pipe)
+ }
+}
+
/// Windows-specific extensions to [`process::ExitStatus`].
///
/// This trait is sealed: it cannot be implemented outside the standard library.
@@ -192,6 +231,66 @@ pub trait CommandExt: Sealed {
/// ```
#[unstable(feature = "windows_process_extensions_async_pipes", issue = "98289")]
fn async_pipes(&mut self, always_async: bool) -> &mut process::Command;
+
+ /// Sets a raw attribute on the command, providing extended configuration options for Windows processes.
+ ///
+ /// This method allows you to specify custom attributes for a child process on Windows systems using raw attribute values.
+ /// Raw attributes provide extended configurability for process creation, but their usage can be complex and potentially unsafe.
+ ///
+ /// The `attribute` parameter specifies the raw attribute to be set, while the `value` parameter holds the value associated with that attribute.
+ /// Please refer to the [`windows-rs`](https://microsoft.github.io/windows-docs-rs/doc/windows/) documentation or the [`Win32 API documentation`](https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-updateprocthreadattribute) for detailed information about available attributes and their meanings.
+ ///
+ /// # Note
+ ///
+ /// The maximum number of raw attributes is the value of [`u32::MAX`].
+ /// If this limit is exceeded, the call to [`process::Command::spawn`] will return an `Error` indicating that the maximum number of attributes has been exceeded.
+ /// # Safety
+ ///
+ /// The usage of raw attributes is potentially unsafe and should be done with caution. Incorrect attribute values or improper configuration can lead to unexpected behavior or errors.
+ ///
+ /// # Example
+ ///
+ /// The following example demonstrates how to create a child process with a specific parent process ID using a raw attribute.
+ ///
+ /// ```rust
+ /// #![feature(windows_process_extensions_raw_attribute)]
+ /// use std::os::windows::{process::CommandExt, io::AsRawHandle};
+ /// use std::process::Command;
+ ///
+ /// # struct ProcessDropGuard(std::process::Child);
+ /// # impl Drop for ProcessDropGuard {
+ /// # fn drop(&mut self) {
+ /// # let _ = self.0.kill();
+ /// # }
+ /// # }
+ ///
+ /// let parent = Command::new("cmd").spawn()?;
+ ///
+ /// let mut child_cmd = Command::new("cmd");
+ ///
+ /// const PROC_THREAD_ATTRIBUTE_PARENT_PROCESS: usize = 0x00020000;
+ ///
+ /// unsafe {
+ /// child_cmd.raw_attribute(PROC_THREAD_ATTRIBUTE_PARENT_PROCESS, parent.as_raw_handle() as isize);
+ /// }
+ /// #
+ /// # let parent = ProcessDropGuard(parent);
+ ///
+ /// let mut child = child_cmd.spawn()?;
+ ///
+ /// # child.kill()?;
+ /// # Ok::<(), std::io::Error>(())
+ /// ```
+ ///
+ /// # Safety Note
+ ///
+ /// Remember that improper use of raw attributes can lead to undefined behavior or security vulnerabilities. Always consult the documentation and ensure proper attribute values are used.
+ #[unstable(feature = "windows_process_extensions_raw_attribute", issue = "114854")]
+ unsafe fn raw_attribute<T: Copy + Send + Sync + 'static>(
+ &mut self,
+ attribute: usize,
+ value: T,
+ ) -> &mut process::Command;
}
#[stable(feature = "windows_process_extensions", since = "1.16.0")]
@@ -219,6 +318,15 @@ impl CommandExt for process::Command {
let _ = always_async;
self
}
+
+ unsafe fn raw_attribute<T: Copy + Send + Sync + 'static>(
+ &mut self,
+ attribute: usize,
+ value: T,
+ ) -> &mut process::Command {
+ self.as_inner_mut().raw_attribute(attribute, value);
+ self
+ }
}
#[unstable(feature = "windows_process_extensions_main_thread_handle", issue = "96723")]
diff --git a/library/std/src/os/xous/ffi.rs b/library/std/src/os/xous/ffi.rs
new file mode 100644
index 000000000..8be7fbb10
--- /dev/null
+++ b/library/std/src/os/xous/ffi.rs
@@ -0,0 +1,647 @@
+#![allow(dead_code)]
+#![allow(unused_variables)]
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[path = "../unix/ffi/os_str.rs"]
+mod os_str;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::os_str::{OsStrExt, OsStringExt};
+
+mod definitions;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use definitions::*;
+
+fn lend_mut_impl(
+ connection: Connection,
+ opcode: usize,
+ data: &mut [u8],
+ arg1: usize,
+ arg2: usize,
+ blocking: bool,
+) -> Result<(usize, usize), Error> {
+ let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize;
+ let mut a1: usize = connection.try_into().unwrap();
+ let mut a2 = InvokeType::LendMut as usize;
+ let a3 = opcode;
+ let a4 = data.as_mut_ptr() as usize;
+ let a5 = data.len();
+ let a6 = arg1;
+ let a7 = arg2;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::MemoryReturned as usize {
+ Ok((a1, a2))
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+pub(crate) fn lend_mut(
+ connection: Connection,
+ opcode: usize,
+ data: &mut [u8],
+ arg1: usize,
+ arg2: usize,
+) -> Result<(usize, usize), Error> {
+ lend_mut_impl(connection, opcode, data, arg1, arg2, true)
+}
+
+pub(crate) fn try_lend_mut(
+ connection: Connection,
+ opcode: usize,
+ data: &mut [u8],
+ arg1: usize,
+ arg2: usize,
+) -> Result<(usize, usize), Error> {
+ lend_mut_impl(connection, opcode, data, arg1, arg2, false)
+}
+
+fn lend_impl(
+ connection: Connection,
+ opcode: usize,
+ data: &[u8],
+ arg1: usize,
+ arg2: usize,
+ blocking: bool,
+) -> Result<(usize, usize), Error> {
+ let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize;
+ let a1: usize = connection.try_into().unwrap();
+ let a2 = InvokeType::Lend as usize;
+ let a3 = opcode;
+ let a4 = data.as_ptr() as usize;
+ let a5 = data.len();
+ let mut a6 = arg1;
+ let mut a7 = arg2;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1 => _,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6,
+ inlateout("a7") a7,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::MemoryReturned as usize {
+ Ok((a6, a7))
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+pub(crate) fn lend(
+ connection: Connection,
+ opcode: usize,
+ data: &[u8],
+ arg1: usize,
+ arg2: usize,
+) -> Result<(usize, usize), Error> {
+ lend_impl(connection, opcode, data, arg1, arg2, true)
+}
+
+pub(crate) fn try_lend(
+ connection: Connection,
+ opcode: usize,
+ data: &[u8],
+ arg1: usize,
+ arg2: usize,
+) -> Result<(usize, usize), Error> {
+ lend_impl(connection, opcode, data, arg1, arg2, false)
+}
+
+fn scalar_impl(connection: Connection, args: [usize; 5], blocking: bool) -> Result<(), Error> {
+ let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize;
+ let mut a1: usize = connection.try_into().unwrap();
+ let a2 = InvokeType::Scalar as usize;
+ let a3 = args[0];
+ let a4 = args[1];
+ let a5 = args[2];
+ let a6 = args[3];
+ let a7 = args[4];
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::Ok as usize {
+ Ok(())
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+pub(crate) fn scalar(connection: Connection, args: [usize; 5]) -> Result<(), Error> {
+ scalar_impl(connection, args, true)
+}
+
+pub(crate) fn try_scalar(connection: Connection, args: [usize; 5]) -> Result<(), Error> {
+ scalar_impl(connection, args, false)
+}
+
+fn blocking_scalar_impl(
+ connection: Connection,
+ args: [usize; 5],
+ blocking: bool,
+) -> Result<[usize; 5], Error> {
+ let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize;
+ let mut a1: usize = connection.try_into().unwrap();
+ let mut a2 = InvokeType::BlockingScalar as usize;
+ let mut a3 = args[0];
+ let mut a4 = args[1];
+ let mut a5 = args[2];
+ let a6 = args[3];
+ let a7 = args[4];
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2,
+ inlateout("a3") a3,
+ inlateout("a4") a4,
+ inlateout("a5") a5,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::Scalar1 as usize {
+ Ok([a1, 0, 0, 0, 0])
+ } else if result == SyscallResult::Scalar2 as usize {
+ Ok([a1, a2, 0, 0, 0])
+ } else if result == SyscallResult::Scalar5 as usize {
+ Ok([a1, a2, a3, a4, a5])
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+pub(crate) fn blocking_scalar(
+ connection: Connection,
+ args: [usize; 5],
+) -> Result<[usize; 5], Error> {
+ blocking_scalar_impl(connection, args, true)
+}
+
+pub(crate) fn try_blocking_scalar(
+ connection: Connection,
+ args: [usize; 5],
+) -> Result<[usize; 5], Error> {
+ blocking_scalar_impl(connection, args, false)
+}
+
+fn connect_impl(address: ServerAddress, blocking: bool) -> Result<Connection, Error> {
+ let a0 = if blocking { Syscall::Connect } else { Syscall::TryConnect } as usize;
+ let address: [u32; 4] = address.into();
+ let a1: usize = address[0].try_into().unwrap();
+ let a2: usize = address[1].try_into().unwrap();
+ let a3: usize = address[2].try_into().unwrap();
+ let a4: usize = address[3].try_into().unwrap();
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ let mut result: usize;
+ let mut value: usize;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0 => result,
+ inlateout("a1") a1 => value,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+ if result == SyscallResult::ConnectionId as usize {
+ Ok(value.try_into().unwrap())
+ } else if result == SyscallResult::Error as usize {
+ Err(value.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+/// Connect to a Xous server represented by the specified `address`.
+///
+/// The current thread will block until the server is available. Returns
+/// an error if the server cannot accept any more connections.
+pub(crate) fn connect(address: ServerAddress) -> Result<Connection, Error> {
+ connect_impl(address, true)
+}
+
+/// Attempt to connect to a Xous server represented by the specified `address`.
+///
+/// If the server does not exist then None is returned.
+pub(crate) fn try_connect(address: ServerAddress) -> Result<Option<Connection>, Error> {
+ match connect_impl(address, false) {
+ Ok(conn) => Ok(Some(conn)),
+ Err(Error::ServerNotFound) => Ok(None),
+ Err(e) => Err(e),
+ }
+}
+
+/// Terminate the current process and return the specified code to the parent process.
+pub(crate) fn exit(return_code: u32) -> ! {
+ let a0 = Syscall::TerminateProcess as usize;
+ let a1 = return_code as usize;
+ let a2 = 0;
+ let a3 = 0;
+ let a4 = 0;
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ in("a0") a0,
+ in("a1") a1,
+ in("a2") a2,
+ in("a3") a3,
+ in("a4") a4,
+ in("a5") a5,
+ in("a6") a6,
+ in("a7") a7,
+ )
+ };
+ unreachable!();
+}
+
+/// Suspend the current thread and allow another thread to run. This thread may
+/// continue executing again immediately if there are no other threads available
+/// to run on the system.
+pub(crate) fn do_yield() {
+ let a0 = Syscall::Yield as usize;
+ let a1 = 0;
+ let a2 = 0;
+ let a3 = 0;
+ let a4 = 0;
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0 => _,
+ inlateout("a1") a1 => _,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+}
+
+/// Allocate memory from the system. An optional physical and/or virtual address
+/// may be specified in order to ensure memory is allocated at specific offsets,
+/// otherwise the kernel will select an address.
+///
+/// # Safety
+///
+/// This function is safe unless a virtual address is specified. In that case,
+/// the kernel will return an alias to the existing range. This violates Rust's
+/// pointer uniqueness guarantee.
+pub(crate) unsafe fn map_memory<T>(
+ phys: Option<core::ptr::NonNull<T>>,
+ virt: Option<core::ptr::NonNull<T>>,
+ count: usize,
+ flags: MemoryFlags,
+) -> Result<&'static mut [T], Error> {
+ let mut a0 = Syscall::MapMemory as usize;
+ let mut a1 = phys.map(|p| p.as_ptr() as usize).unwrap_or_default();
+ let mut a2 = virt.map(|p| p.as_ptr() as usize).unwrap_or_default();
+ let a3 = count * core::mem::size_of::<T>();
+ let a4 = flags.bits();
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::MemoryRange as usize {
+ let start = core::ptr::from_exposed_addr_mut::<T>(a1);
+ let len = a2 / core::mem::size_of::<T>();
+ let end = unsafe { start.add(len) };
+ Ok(unsafe { core::slice::from_raw_parts_mut(start, len) })
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+/// Destroy the given memory, returning it to the compiler.
+///
+/// Safety: The memory pointed to by `range` should not be used after this
+/// function returns, even if this function returns Err().
+pub(crate) unsafe fn unmap_memory<T>(range: *mut [T]) -> Result<(), Error> {
+ let mut a0 = Syscall::UnmapMemory as usize;
+ let mut a1 = range.as_mut_ptr() as usize;
+ let a2 = range.len();
+ let a3 = 0;
+ let a4 = 0;
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::Ok as usize {
+ Ok(())
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+/// Adjust the memory flags for the given range. This can be used to remove flags
+/// from a given region in order to harden memory access. Note that flags may
+/// only be removed and may never be added.
+///
+/// Safety: The memory pointed to by `range` may become inaccessible or have its
+/// mutability removed. It is up to the caller to ensure that the flags specified
+/// by `new_flags` are upheld, otherwise the program will crash.
+pub(crate) unsafe fn update_memory_flags<T>(
+ range: *mut [T],
+ new_flags: MemoryFlags,
+) -> Result<(), Error> {
+ let mut a0 = Syscall::UpdateMemoryFlags as usize;
+ let mut a1 = range.as_mut_ptr() as usize;
+ let a2 = range.len();
+ let a3 = new_flags.bits();
+ let a4 = 0; // Process ID is currently None
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::Ok as usize {
+ Ok(())
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+/// Create a thread with a given stack and up to four arguments
+pub(crate) fn create_thread(
+ start: *mut usize,
+ stack: *mut [u8],
+ arg0: usize,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+) -> Result<ThreadId, Error> {
+ let mut a0 = Syscall::CreateThread as usize;
+ let mut a1 = start as usize;
+ let a2 = stack.as_mut_ptr() as usize;
+ let a3 = stack.len();
+ let a4 = arg0;
+ let a5 = arg1;
+ let a6 = arg2;
+ let a7 = arg3;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::ThreadId as usize {
+ Ok(a1.into())
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+/// Wait for the given thread to terminate and return the exit code from that thread.
+pub(crate) fn join_thread(thread_id: ThreadId) -> Result<usize, Error> {
+ let mut a0 = Syscall::JoinThread as usize;
+ let mut a1 = thread_id.into();
+ let a2 = 0;
+ let a3 = 0;
+ let a4 = 0;
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::Scalar1 as usize {
+ Ok(a1)
+ } else if result == SyscallResult::Scalar2 as usize {
+ Ok(a1)
+ } else if result == SyscallResult::Scalar5 as usize {
+ Ok(a1)
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+/// Get the current thread's ID
+pub(crate) fn thread_id() -> Result<ThreadId, Error> {
+ let mut a0 = Syscall::GetThreadId as usize;
+ let mut a1 = 0;
+ let a2 = 0;
+ let a3 = 0;
+ let a4 = 0;
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::ThreadId as usize {
+ Ok(a1.into())
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
+
+/// Adjust the given `knob` limit to match the new value `new`. The current value must
+/// match the `current` in order for this to take effect.
+///
+/// The new value is returned as a result of this call. If the call fails, then the old
+/// value is returned. In either case, this function returns successfully.
+///
+/// An error is generated if the `knob` is not a valid limit, or if the call
+/// would not succeed.
+pub(crate) fn adjust_limit(knob: Limits, current: usize, new: usize) -> Result<usize, Error> {
+ let mut a0 = Syscall::JoinThread as usize;
+ let mut a1 = knob as usize;
+ let a2 = current;
+ let a3 = new;
+ let a4 = 0;
+ let a5 = 0;
+ let a6 = 0;
+ let a7 = 0;
+
+ unsafe {
+ core::arch::asm!(
+ "ecall",
+ inlateout("a0") a0,
+ inlateout("a1") a1,
+ inlateout("a2") a2 => _,
+ inlateout("a3") a3 => _,
+ inlateout("a4") a4 => _,
+ inlateout("a5") a5 => _,
+ inlateout("a6") a6 => _,
+ inlateout("a7") a7 => _,
+ )
+ };
+
+ let result = a0;
+
+ if result == SyscallResult::Scalar2 as usize && a1 == knob as usize {
+ Ok(a2)
+ } else if result == SyscallResult::Scalar5 as usize && a1 == knob as usize {
+ Ok(a1)
+ } else if result == SyscallResult::Error as usize {
+ Err(a1.into())
+ } else {
+ Err(Error::InternalError)
+ }
+}
diff --git a/library/std/src/os/xous/ffi/definitions.rs b/library/std/src/os/xous/ffi/definitions.rs
new file mode 100644
index 000000000..345005bcc
--- /dev/null
+++ b/library/std/src/os/xous/ffi/definitions.rs
@@ -0,0 +1,283 @@
+mod memoryflags;
+pub(crate) use memoryflags::*;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+/// Indicates a particular syscall number as used by the Xous kernel.
+#[derive(Copy, Clone)]
+#[repr(usize)]
+pub enum Syscall {
+ MapMemory = 2,
+ Yield = 3,
+ UpdateMemoryFlags = 12,
+ ReceiveMessage = 15,
+ SendMessage = 16,
+ Connect = 17,
+ CreateThread = 18,
+ UnmapMemory = 19,
+ ReturnMemory = 20,
+ TerminateProcess = 22,
+ TrySendMessage = 24,
+ TryConnect = 25,
+ GetThreadId = 32,
+ JoinThread = 36,
+ AdjustProcessLimit = 38,
+ ReturnScalar = 40,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+/// Copies of these invocation types here for when we're running
+/// in environments without libxous.
+#[derive(Copy, Clone)]
+#[repr(usize)]
+pub enum SyscallResult {
+ Ok = 0,
+ Error = 1,
+ MemoryRange = 3,
+ ConnectionId = 7,
+ Message = 9,
+ ThreadId = 10,
+ Scalar1 = 14,
+ Scalar2 = 15,
+ MemoryReturned = 18,
+ Scalar5 = 20,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Copy, Clone)]
+/// A list of all known errors that may be returned by the Xous kernel.
+#[repr(usize)]
+pub enum Error {
+ NoError = 0,
+ BadAlignment = 1,
+ BadAddress = 2,
+ OutOfMemory = 3,
+ MemoryInUse = 4,
+ InterruptNotFound = 5,
+ InterruptInUse = 6,
+ InvalidString = 7,
+ ServerExists = 8,
+ ServerNotFound = 9,
+ ProcessNotFound = 10,
+ ProcessNotChild = 11,
+ ProcessTerminated = 12,
+ Timeout = 13,
+ InternalError = 14,
+ ServerQueueFull = 15,
+ ThreadNotAvailable = 16,
+ UnhandledSyscall = 17,
+ InvalidSyscall = 18,
+ ShareViolation = 19,
+ InvalidThread = 20,
+ InvalidPid = 21,
+ UnknownError = 22,
+ AccessDenied = 23,
+ UseBeforeInit = 24,
+ DoubleFree = 25,
+ DebugInProgress = 26,
+ InvalidLimit = 27,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<usize> for Error {
+ fn from(src: usize) -> Self {
+ match src {
+ 0 => Self::NoError,
+ 1 => Self::BadAlignment,
+ 2 => Self::BadAddress,
+ 3 => Self::OutOfMemory,
+ 4 => Self::MemoryInUse,
+ 5 => Self::InterruptNotFound,
+ 6 => Self::InterruptInUse,
+ 7 => Self::InvalidString,
+ 8 => Self::ServerExists,
+ 9 => Self::ServerNotFound,
+ 10 => Self::ProcessNotFound,
+ 11 => Self::ProcessNotChild,
+ 12 => Self::ProcessTerminated,
+ 13 => Self::Timeout,
+ 14 => Self::InternalError,
+ 15 => Self::ServerQueueFull,
+ 16 => Self::ThreadNotAvailable,
+ 17 => Self::UnhandledSyscall,
+ 18 => Self::InvalidSyscall,
+ 19 => Self::ShareViolation,
+ 20 => Self::InvalidThread,
+ 21 => Self::InvalidPid,
+ 23 => Self::AccessDenied,
+ 24 => Self::UseBeforeInit,
+ 25 => Self::DoubleFree,
+ 26 => Self::DebugInProgress,
+ 27 => Self::InvalidLimit,
+ 22 | _ => Self::UnknownError,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<i32> for Error {
+ fn from(src: i32) -> Self {
+ let Ok(src) = core::convert::TryInto::<usize>::try_into(src) else {
+ return Self::UnknownError;
+ };
+ src.into()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::fmt::Display for Error {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ write!(
+ f,
+ "{}",
+ match self {
+ Error::NoError => "no error occurred",
+ Error::BadAlignment => "memory was not properly aligned",
+ Error::BadAddress => "an invalid address was supplied",
+ Error::OutOfMemory => "the process or service has run out of memory",
+ Error::MemoryInUse => "the requested address is in use",
+ Error::InterruptNotFound =>
+ "the requested interrupt does not exist on this platform",
+ Error::InterruptInUse => "the requested interrupt is currently in use",
+ Error::InvalidString => "the specified string was not formatted correctly",
+ Error::ServerExists => "a server with that address already exists",
+ Error::ServerNotFound => "the requetsed server could not be found",
+ Error::ProcessNotFound => "the target process does not exist",
+ Error::ProcessNotChild =>
+ "the requested operation can only be done on child processes",
+ Error::ProcessTerminated => "the target process has crashed",
+ Error::Timeout => "the requested operation timed out",
+ Error::InternalError => "an internal error occurred",
+ Error::ServerQueueFull => "the server has too many pending messages",
+ Error::ThreadNotAvailable => "the specified thread does not exist",
+ Error::UnhandledSyscall => "the kernel did not recognize that syscall",
+ Error::InvalidSyscall => "the syscall had incorrect parameters",
+ Error::ShareViolation => "an attempt was made to share memory twice",
+ Error::InvalidThread => "tried to resume a thread that was not ready",
+ Error::InvalidPid => "kernel attempted to use a pid that was not valid",
+ Error::AccessDenied => "no permission to perform the requested operation",
+ Error::UseBeforeInit => "attempt to use a service before initialization finished",
+ Error::DoubleFree => "the requested resource was freed twice",
+ Error::DebugInProgress => "kernel attempted to activate a thread being debugged",
+ Error::InvalidLimit => "process attempted to adjust an invalid limit",
+ Error::UnknownError => "an unknown error occurred",
+ }
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::fmt::Debug for Error {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ write!(f, "{}", self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl crate::error::Error for Error {}
+
+/// Indicates the type of Message that is sent when making a `SendMessage` syscall.
+#[derive(Copy, Clone)]
+#[repr(usize)]
+pub(crate) enum InvokeType {
+ LendMut = 1,
+ Lend = 2,
+ Move = 3,
+ Scalar = 4,
+ BlockingScalar = 5,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug, Copy, Clone)]
+/// A representation of a connection to a Xous service.
+pub struct Connection(u32);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<u32> for Connection {
+ fn from(src: u32) -> Connection {
+ Connection(src)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl TryFrom<usize> for Connection {
+ type Error = core::num::TryFromIntError;
+ fn try_from(src: usize) -> Result<Self, Self::Error> {
+ Ok(Connection(src.try_into()?))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Into<u32> for Connection {
+ fn into(self) -> u32 {
+ self.0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl TryInto<usize> for Connection {
+ type Error = core::num::TryFromIntError;
+ fn try_into(self) -> Result<usize, Self::Error> {
+ self.0.try_into()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub enum ServerAddressError {
+ InvalidLength,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ServerAddress([u32; 4]);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl TryFrom<&str> for ServerAddress {
+ type Error = ServerAddressError;
+ fn try_from(value: &str) -> Result<Self, Self::Error> {
+ let b = value.as_bytes();
+ if b.len() == 0 || b.len() > 16 {
+ return Err(Self::Error::InvalidLength);
+ }
+
+ let mut this_temp = [0u8; 16];
+ for (dest, src) in this_temp.iter_mut().zip(b.iter()) {
+ *dest = *src;
+ }
+
+ let mut this = [0u32; 4];
+ for (dest, src) in this.iter_mut().zip(this_temp.chunks_exact(4)) {
+ *dest = u32::from_le_bytes(src.try_into().unwrap());
+ }
+ Ok(ServerAddress(this))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Into<[u32; 4]> for ServerAddress {
+ fn into(self) -> [u32; 4] {
+ self.0
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct ThreadId(usize);
+
+impl From<usize> for ThreadId {
+ fn from(src: usize) -> ThreadId {
+ ThreadId(src)
+ }
+}
+
+impl Into<usize> for ThreadId {
+ fn into(self) -> usize {
+ self.0
+ }
+}
+
+#[derive(Copy, Clone)]
+#[repr(usize)]
+/// Limits that can be passed to `AdjustLimit`
+pub(crate) enum Limits {
+ HeapMaximum = 1,
+ HeapSize = 2,
+}
diff --git a/library/std/src/os/xous/ffi/definitions/memoryflags.rs b/library/std/src/os/xous/ffi/definitions/memoryflags.rs
new file mode 100644
index 000000000..af9de3cbf
--- /dev/null
+++ b/library/std/src/os/xous/ffi/definitions/memoryflags.rs
@@ -0,0 +1,176 @@
+/// Flags to be passed to the MapMemory struct.
+/// Note that it is an error to have memory be
+/// writable and not readable.
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct MemoryFlags {
+ bits: usize,
+}
+
+impl MemoryFlags {
+ /// Free this memory
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FREE: Self = Self { bits: 0b0000_0000 };
+
+ /// Immediately allocate this memory. Otherwise it will
+ /// be demand-paged. This is implicitly set when `phys`
+ /// is not 0.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const RESERVE: Self = Self { bits: 0b0000_0001 };
+
+ /// Allow the CPU to read from this page.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const R: Self = Self { bits: 0b0000_0010 };
+
+ /// Allow the CPU to write to this page.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const W: Self = Self { bits: 0b0000_0100 };
+
+ /// Allow the CPU to execute from this page.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const X: Self = Self { bits: 0b0000_1000 };
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn bits(&self) -> usize {
+ self.bits
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_bits(raw: usize) -> Option<MemoryFlags> {
+ if raw > 16 { None } else { Some(MemoryFlags { bits: raw }) }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.bits == 0
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn empty() -> MemoryFlags {
+ MemoryFlags { bits: 0 }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn all() -> MemoryFlags {
+ MemoryFlags { bits: 15 }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::fmt::Binary for MemoryFlags {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ core::fmt::Binary::fmt(&self.bits, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::fmt::Octal for MemoryFlags {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ core::fmt::Octal::fmt(&self.bits, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::fmt::LowerHex for MemoryFlags {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ core::fmt::LowerHex::fmt(&self.bits, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::fmt::UpperHex for MemoryFlags {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ core::fmt::UpperHex::fmt(&self.bits, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::BitOr for MemoryFlags {
+ type Output = Self;
+
+ /// Returns the union of the two sets of flags.
+ #[inline]
+ fn bitor(self, other: MemoryFlags) -> Self {
+ Self { bits: self.bits | other.bits }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::BitOrAssign for MemoryFlags {
+ /// Adds the set of flags.
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ self.bits |= other.bits;
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::BitXor for MemoryFlags {
+ type Output = Self;
+
+ /// Returns the left flags, but with all the right flags toggled.
+ #[inline]
+ fn bitxor(self, other: Self) -> Self {
+ Self { bits: self.bits ^ other.bits }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::BitXorAssign for MemoryFlags {
+ /// Toggles the set of flags.
+ #[inline]
+ fn bitxor_assign(&mut self, other: Self) {
+ self.bits ^= other.bits;
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::BitAnd for MemoryFlags {
+ type Output = Self;
+
+ /// Returns the intersection between the two sets of flags.
+ #[inline]
+ fn bitand(self, other: Self) -> Self {
+ Self { bits: self.bits & other.bits }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::BitAndAssign for MemoryFlags {
+ /// Disables all flags disabled in the set.
+ #[inline]
+ fn bitand_assign(&mut self, other: Self) {
+ self.bits &= other.bits;
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::Sub for MemoryFlags {
+ type Output = Self;
+
+ /// Returns the set difference of the two sets of flags.
+ #[inline]
+ fn sub(self, other: Self) -> Self {
+ Self { bits: self.bits & !other.bits }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::SubAssign for MemoryFlags {
+ /// Disables all flags enabled in the set.
+ #[inline]
+ fn sub_assign(&mut self, other: Self) {
+ self.bits &= !other.bits;
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::ops::Not for MemoryFlags {
+ type Output = Self;
+
+ /// Returns the complement of this set of flags.
+ #[inline]
+ fn not(self) -> Self {
+ Self { bits: !self.bits } & MemoryFlags { bits: 15 }
+ }
+}
diff --git a/library/std/src/os/xous/mod.rs b/library/std/src/os/xous/mod.rs
new file mode 100644
index 000000000..153694a89
--- /dev/null
+++ b/library/std/src/os/xous/mod.rs
@@ -0,0 +1,17 @@
+#![stable(feature = "rust1", since = "1.0.0")]
+#![doc(cfg(target_os = "xous"))]
+
+pub mod ffi;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod services;
+
+/// A prelude for conveniently writing platform-specific code.
+///
+/// Includes all extension traits, and some important type definitions.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod prelude {
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::ffi::{OsStrExt, OsStringExt};
+}
diff --git a/library/std/src/os/xous/services.rs b/library/std/src/os/xous/services.rs
new file mode 100644
index 000000000..5c219f1fb
--- /dev/null
+++ b/library/std/src/os/xous/services.rs
@@ -0,0 +1,132 @@
+use crate::os::xous::ffi::Connection;
+use core::sync::atomic::{AtomicU32, Ordering};
+
+mod log;
+pub(crate) use log::*;
+
+mod systime;
+pub(crate) use systime::*;
+
+mod ticktimer;
+pub(crate) use ticktimer::*;
+
+mod ns {
+ const NAME_MAX_LENGTH: usize = 64;
+ use crate::os::xous::ffi::{lend_mut, Connection};
+ // By making this repr(C), the layout of this struct becomes well-defined
+ // and no longer shifts around.
+ // By marking it as `align(4096)` we define that it will be page-aligned,
+ // meaning it can be sent between processes. We make sure to pad out the
+ // entire struct so that memory isn't leaked to the name server.
+ #[repr(C, align(4096))]
+ struct ConnectRequest {
+ data: [u8; 4096],
+ }
+
+ impl ConnectRequest {
+ pub fn new(name: &str) -> Self {
+ let mut cr = ConnectRequest { data: [0u8; 4096] };
+ let name_bytes = name.as_bytes();
+
+ // Copy the string into our backing store.
+ for (&src_byte, dest_byte) in name_bytes.iter().zip(&mut cr.data[0..NAME_MAX_LENGTH]) {
+ *dest_byte = src_byte;
+ }
+
+ // Set the string length to the length of the passed-in String,
+ // or the maximum possible length. Which ever is smaller.
+ for (&src_byte, dest_byte) in (name.len().min(NAME_MAX_LENGTH) as u32)
+ .to_le_bytes()
+ .iter()
+ .zip(&mut cr.data[NAME_MAX_LENGTH..])
+ {
+ *dest_byte = src_byte;
+ }
+ cr
+ }
+ }
+
+ pub fn connect_with_name_impl(name: &str, blocking: bool) -> Option<Connection> {
+ let mut request = ConnectRequest::new(name);
+ let opcode = if blocking {
+ 6 /* BlockingConnect */
+ } else {
+ 7 /* TryConnect */
+ };
+ let cid = if blocking { super::name_server() } else { super::try_name_server()? };
+
+ lend_mut(cid, opcode, &mut request.data, 0, name.len().min(NAME_MAX_LENGTH))
+ .expect("unable to perform lookup");
+
+ // Read the result code back from the nameserver
+ let result = u32::from_le_bytes(request.data[0..4].try_into().unwrap());
+ if result == 0 {
+ // If the result was successful, then the CID is stored in the next 4 bytes
+ Some(u32::from_le_bytes(request.data[4..8].try_into().unwrap()).into())
+ } else {
+ None
+ }
+ }
+
+ pub fn connect_with_name(name: &str) -> Option<Connection> {
+ connect_with_name_impl(name, true)
+ }
+
+ pub fn try_connect_with_name(name: &str) -> Option<Connection> {
+ connect_with_name_impl(name, false)
+ }
+}
+
+/// Attempt to connect to a server by name. If the server does not exist, this will
+/// block until the server is created.
+///
+/// Note that this is different from connecting to a server by address. Server
+/// addresses are always 16 bytes long, whereas server names are arbitrary-length
+/// strings up to 64 bytes in length.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn connect(name: &str) -> Option<Connection> {
+ ns::connect_with_name(name)
+}
+
+/// Attempt to connect to a server by name. If the server does not exist, this will
+/// immediately return `None`.
+///
+/// Note that this is different from connecting to a server by address. Server
+/// addresses are always 16 bytes long, whereas server names are arbitrary-length
+/// strings.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn try_connect(name: &str) -> Option<Connection> {
+ ns::try_connect_with_name(name)
+}
+
+static NAME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
+
+/// Return a `Connection` to the name server. If the name server has not been started,
+/// then this call will block until the name server has been started. The `Connection`
+/// will be shared among all connections in a process, so it is safe to call this
+/// multiple times.
+pub(crate) fn name_server() -> Connection {
+ let cid = NAME_SERVER_CONNECTION.load(Ordering::Relaxed);
+ if cid != 0 {
+ return cid.into();
+ }
+
+ let cid = crate::os::xous::ffi::connect("xous-name-server".try_into().unwrap()).unwrap();
+ NAME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
+ cid
+}
+
+fn try_name_server() -> Option<Connection> {
+ let cid = NAME_SERVER_CONNECTION.load(Ordering::Relaxed);
+ if cid != 0 {
+ return Some(cid.into());
+ }
+
+ if let Ok(Some(cid)) = crate::os::xous::ffi::try_connect("xous-name-server".try_into().unwrap())
+ {
+ NAME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
+ Some(cid)
+ } else {
+ None
+ }
+}
diff --git a/library/std/src/os/xous/services/log.rs b/library/std/src/os/xous/services/log.rs
new file mode 100644
index 000000000..e6bae929e
--- /dev/null
+++ b/library/std/src/os/xous/services/log.rs
@@ -0,0 +1,63 @@
+use crate::os::xous::ffi::Connection;
+use core::sync::atomic::{AtomicU32, Ordering};
+
+/// Group `usize` bytes into a `usize` and return it, beginning
+/// from `offset` * sizeof(usize) bytes from the start. For example,
+/// `group_or_null([1,2,3,4,5,6,7,8], 1)` on a 32-bit system will
+/// return a usize with 5678 packed into it.
+fn group_or_null(data: &[u8], offset: usize) -> usize {
+ let start = offset * core::mem::size_of::<usize>();
+ let mut out_array = [0u8; core::mem::size_of::<usize>()];
+ if start < data.len() {
+ for (dest, src) in out_array.iter_mut().zip(&data[start..]) {
+ *dest = *src;
+ }
+ }
+ usize::from_le_bytes(out_array)
+}
+
+pub(crate) enum LogScalar<'a> {
+ /// A panic occurred, and a panic log is forthcoming
+ BeginPanic,
+
+ /// Some number of bytes will be appended to the log message
+ AppendPanicMessage(&'a [u8]),
+}
+
+impl<'a> Into<[usize; 5]> for LogScalar<'a> {
+ fn into(self) -> [usize; 5] {
+ match self {
+ LogScalar::BeginPanic => [1000, 0, 0, 0, 0],
+ LogScalar::AppendPanicMessage(c) =>
+ // Text is grouped into 4x `usize` words. The id is 1100 plus
+ // the number of characters in this message.
+ // Ignore errors since we're already panicking.
+ {
+ [
+ 1100 + c.len(),
+ group_or_null(&c, 0),
+ group_or_null(&c, 1),
+ group_or_null(&c, 2),
+ group_or_null(&c, 3),
+ ]
+ }
+ }
+ }
+}
+
+/// Return a `Connection` to the log server, which is used for printing messages to
+/// the console and reporting panics. If the log server has not yet started, this
+/// will block until the server is running. It is safe to call this multiple times,
+/// because the address is shared among all threads in a process.
+pub(crate) fn log_server() -> Connection {
+ static LOG_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
+
+ let cid = LOG_SERVER_CONNECTION.load(Ordering::Relaxed);
+ if cid != 0 {
+ return cid.into();
+ }
+
+ let cid = crate::os::xous::ffi::connect("xous-log-server ".try_into().unwrap()).unwrap();
+ LOG_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
+ cid
+}
diff --git a/library/std/src/os/xous/services/systime.rs b/library/std/src/os/xous/services/systime.rs
new file mode 100644
index 000000000..bbb875c69
--- /dev/null
+++ b/library/std/src/os/xous/services/systime.rs
@@ -0,0 +1,28 @@
+use crate::os::xous::ffi::{connect, Connection};
+use core::sync::atomic::{AtomicU32, Ordering};
+
+pub(crate) enum SystimeScalar {
+ GetUtcTimeMs,
+}
+
+impl Into<[usize; 5]> for SystimeScalar {
+ fn into(self) -> [usize; 5] {
+ match self {
+ SystimeScalar::GetUtcTimeMs => [3, 0, 0, 0, 0],
+ }
+ }
+}
+
+/// Return a `Connection` to the systime server. This server is used for reporting the
+/// realtime clock.
+pub(crate) fn systime_server() -> Connection {
+ static SYSTIME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
+ let cid = SYSTIME_SERVER_CONNECTION.load(Ordering::Relaxed);
+ if cid != 0 {
+ return cid.into();
+ }
+
+ let cid = connect("timeserverpublic".try_into().unwrap()).unwrap();
+ SYSTIME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
+ cid
+}
diff --git a/library/std/src/os/xous/services/ticktimer.rs b/library/std/src/os/xous/services/ticktimer.rs
new file mode 100644
index 000000000..7759303fd
--- /dev/null
+++ b/library/std/src/os/xous/services/ticktimer.rs
@@ -0,0 +1,42 @@
+use crate::os::xous::ffi::Connection;
+use core::sync::atomic::{AtomicU32, Ordering};
+
+pub(crate) enum TicktimerScalar {
+ ElapsedMs,
+ SleepMs(usize),
+ LockMutex(usize /* cookie */),
+ UnlockMutex(usize /* cookie */),
+ WaitForCondition(usize /* cookie */, usize /* timeout (ms) */),
+ NotifyCondition(usize /* cookie */, usize /* count */),
+ FreeMutex(usize /* cookie */),
+ FreeCondition(usize /* cookie */),
+}
+
+impl Into<[usize; 5]> for TicktimerScalar {
+ fn into(self) -> [usize; 5] {
+ match self {
+ TicktimerScalar::ElapsedMs => [0, 0, 0, 0, 0],
+ TicktimerScalar::SleepMs(msecs) => [1, msecs, 0, 0, 0],
+ TicktimerScalar::LockMutex(cookie) => [6, cookie, 0, 0, 0],
+ TicktimerScalar::UnlockMutex(cookie) => [7, cookie, 0, 0, 0],
+ TicktimerScalar::WaitForCondition(cookie, timeout_ms) => [8, cookie, timeout_ms, 0, 0],
+ TicktimerScalar::NotifyCondition(cookie, count) => [9, cookie, count, 0, 0],
+ TicktimerScalar::FreeMutex(cookie) => [10, cookie, 0, 0, 0],
+ TicktimerScalar::FreeCondition(cookie) => [11, cookie, 0, 0, 0],
+ }
+ }
+}
+
+/// Return a `Connection` to the ticktimer server. This server is used for synchronization
+/// primitives such as sleep, Mutex, and Condvar.
+pub(crate) fn ticktimer_server() -> Connection {
+ static TICKTIMER_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
+ let cid = TICKTIMER_SERVER_CONNECTION.load(Ordering::Relaxed);
+ if cid != 0 {
+ return cid.into();
+ }
+
+ let cid = crate::os::xous::ffi::connect("ticktimer-server".try_into().unwrap()).unwrap();
+ TICKTIMER_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed);
+ cid
+}
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index a0c21f704..d7a2baa1f 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -10,7 +10,7 @@
#![deny(unsafe_op_in_unsafe_fn)]
use crate::panic::BacktraceStyle;
-use core::panic::{BoxMeUp, Location, PanicInfo};
+use core::panic::{Location, PanicInfo, PanicPayload};
use crate::any::Any;
use crate::fmt;
@@ -47,9 +47,9 @@ extern "C" {
}
extern "Rust" {
- /// `BoxMeUp` lazily performs allocation only when needed (this avoids
+ /// `PanicPayload` lazily performs allocation only when needed (this avoids
/// allocations when using the "abort" panic runtime).
- fn __rust_start_panic(payload: &mut dyn BoxMeUp) -> u32;
+ fn __rust_start_panic(payload: &mut dyn PanicPayload) -> u32;
}
/// This function is called by the panic runtime if FFI code catches a Rust
@@ -238,7 +238,9 @@ where
fn default_hook(info: &PanicInfo<'_>) {
// If this is a double panic, make sure that we print a backtrace
// for this panic. Otherwise only print it if logging is enabled.
- let backtrace = if panic_count::get_count() >= 2 {
+ let backtrace = if info.force_no_backtrace() {
+ None
+ } else if panic_count::get_count() >= 2 {
BacktraceStyle::full()
} else {
crate::panic::get_backtrace_style()
@@ -278,7 +280,7 @@ fn default_hook(info: &PanicInfo<'_>) {
);
}
}
- // If backtraces aren't supported, do nothing.
+ // If backtraces aren't supported or are forced-off, do nothing.
None => {}
}
};
@@ -541,14 +543,14 @@ pub fn panicking() -> bool {
#[cfg(not(test))]
#[panic_handler]
pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
- struct PanicPayload<'a> {
+ struct FormatStringPayload<'a> {
inner: &'a fmt::Arguments<'a>,
string: Option<String>,
}
- impl<'a> PanicPayload<'a> {
- fn new(inner: &'a fmt::Arguments<'a>) -> PanicPayload<'a> {
- PanicPayload { inner, string: None }
+ impl<'a> FormatStringPayload<'a> {
+ fn new(inner: &'a fmt::Arguments<'a>) -> Self {
+ Self { inner, string: None }
}
fn fill(&mut self) -> &mut String {
@@ -564,7 +566,7 @@ pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
}
}
- unsafe impl<'a> BoxMeUp for PanicPayload<'a> {
+ unsafe impl<'a> PanicPayload for FormatStringPayload<'a> {
fn take_box(&mut self) -> *mut (dyn Any + Send) {
// We do two allocations here, unfortunately. But (a) they're required with the current
// scheme, and (b) we don't handle panic + OOM properly anyway (see comment in
@@ -578,9 +580,9 @@ pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
}
}
- struct StrPanicPayload(&'static str);
+ struct StaticStrPayload(&'static str);
- unsafe impl BoxMeUp for StrPanicPayload {
+ unsafe impl PanicPayload for StaticStrPayload {
fn take_box(&mut self) -> *mut (dyn Any + Send) {
Box::into_raw(Box::new(self.0))
}
@@ -593,14 +595,23 @@ pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
let loc = info.location().unwrap(); // The current implementation always returns Some
let msg = info.message().unwrap(); // The current implementation always returns Some
crate::sys_common::backtrace::__rust_end_short_backtrace(move || {
+ // FIXME: can we just pass `info` along rather than taking it apart here, only to have
+ // `rust_panic_with_hook` construct a new `PanicInfo`?
if let Some(msg) = msg.as_str() {
- rust_panic_with_hook(&mut StrPanicPayload(msg), info.message(), loc, info.can_unwind());
+ rust_panic_with_hook(
+ &mut StaticStrPayload(msg),
+ info.message(),
+ loc,
+ info.can_unwind(),
+ info.force_no_backtrace(),
+ );
} else {
rust_panic_with_hook(
- &mut PanicPayload::new(msg),
+ &mut FormatStringPayload::new(msg),
info.message(),
loc,
info.can_unwind(),
+ info.force_no_backtrace(),
);
}
})
@@ -625,20 +636,26 @@ pub const fn begin_panic<M: Any + Send>(msg: M) -> ! {
let loc = Location::caller();
return crate::sys_common::backtrace::__rust_end_short_backtrace(move || {
- rust_panic_with_hook(&mut PanicPayload::new(msg), None, loc, true)
+ rust_panic_with_hook(
+ &mut Payload::new(msg),
+ None,
+ loc,
+ /* can_unwind */ true,
+ /* force_no_backtrace */ false,
+ )
});
- struct PanicPayload<A> {
+ struct Payload<A> {
inner: Option<A>,
}
- impl<A: Send + 'static> PanicPayload<A> {
- fn new(inner: A) -> PanicPayload<A> {
- PanicPayload { inner: Some(inner) }
+ impl<A: Send + 'static> Payload<A> {
+ fn new(inner: A) -> Payload<A> {
+ Payload { inner: Some(inner) }
}
}
- unsafe impl<A: Send + 'static> BoxMeUp for PanicPayload<A> {
+ unsafe impl<A: Send + 'static> PanicPayload for Payload<A> {
fn take_box(&mut self) -> *mut (dyn Any + Send) {
// Note that this should be the only allocation performed in this code path. Currently
// this means that panic!() on OOM will invoke this code path, but then again we're not
@@ -667,10 +684,11 @@ pub const fn begin_panic<M: Any + Send>(msg: M) -> ! {
/// panics, panic hooks, and finally dispatching to the panic runtime to either
/// abort or unwind.
fn rust_panic_with_hook(
- payload: &mut dyn BoxMeUp,
+ payload: &mut dyn PanicPayload,
message: Option<&fmt::Arguments<'_>>,
location: &Location<'_>,
can_unwind: bool,
+ force_no_backtrace: bool,
) -> ! {
let must_abort = panic_count::increase(true);
@@ -685,14 +703,20 @@ fn rust_panic_with_hook(
panic_count::MustAbort::AlwaysAbort => {
// Unfortunately, this does not print a backtrace, because creating
// a `Backtrace` will allocate, which we must to avoid here.
- let panicinfo = PanicInfo::internal_constructor(message, location, can_unwind);
+ let panicinfo = PanicInfo::internal_constructor(
+ message,
+ location,
+ can_unwind,
+ force_no_backtrace,
+ );
rtprintpanic!("{panicinfo}\npanicked after panic::always_abort(), aborting.\n");
}
}
crate::sys::abort_internal();
}
- let mut info = PanicInfo::internal_constructor(message, location, can_unwind);
+ let mut info =
+ PanicInfo::internal_constructor(message, location, can_unwind, force_no_backtrace);
let hook = HOOK.read().unwrap_or_else(PoisonError::into_inner);
match *hook {
// Some platforms (like wasm) know that printing to stderr won't ever actually
@@ -736,7 +760,7 @@ pub fn rust_panic_without_hook(payload: Box<dyn Any + Send>) -> ! {
struct RewrapBox(Box<dyn Any + Send>);
- unsafe impl BoxMeUp for RewrapBox {
+ unsafe impl PanicPayload for RewrapBox {
fn take_box(&mut self) -> *mut (dyn Any + Send) {
Box::into_raw(mem::replace(&mut self.0, Box::new(())))
}
@@ -753,7 +777,7 @@ pub fn rust_panic_without_hook(payload: Box<dyn Any + Send>) -> ! {
/// yer breakpoints.
#[inline(never)]
#[cfg_attr(not(test), rustc_std_internal_symbol)]
-fn rust_panic(msg: &mut dyn BoxMeUp) -> ! {
+fn rust_panic(msg: &mut dyn PanicPayload) -> ! {
let code = unsafe { __rust_start_panic(msg) };
rtabort!("failed to initiate panic, error {code}")
}
diff --git a/library/std/src/path.rs b/library/std/src/path.rs
index 5842c096f..60562f64c 100644
--- a/library/std/src/path.rs
+++ b/library/std/src/path.rs
@@ -193,7 +193,7 @@ impl<'a> Prefix<'a> {
fn len(&self) -> usize {
use self::Prefix::*;
fn os_str_len(s: &OsStr) -> usize {
- s.as_os_str_bytes().len()
+ s.as_encoded_bytes().len()
}
match *self {
Verbatim(x) => 4 + os_str_len(x),
@@ -316,7 +316,7 @@ fn has_physical_root(s: &[u8], prefix: Option<Prefix<'_>>) -> bool {
// basic workhorse for splitting stem and extension
fn rsplit_file_at_dot(file: &OsStr) -> (Option<&OsStr>, Option<&OsStr>) {
- if file.as_os_str_bytes() == b".." {
+ if file.as_encoded_bytes() == b".." {
return (Some(file), None);
}
@@ -324,7 +324,7 @@ fn rsplit_file_at_dot(file: &OsStr) -> (Option<&OsStr>, Option<&OsStr>) {
// and back. This is safe to do because (1) we only look at ASCII
// contents of the encoding and (2) new &OsStr values are produced
// only from ASCII-bounded slices of existing &OsStr values.
- let mut iter = file.as_os_str_bytes().rsplitn(2, |b| *b == b'.');
+ let mut iter = file.as_encoded_bytes().rsplitn(2, |b| *b == b'.');
let after = iter.next();
let before = iter.next();
if before == Some(b"") {
@@ -332,15 +332,15 @@ fn rsplit_file_at_dot(file: &OsStr) -> (Option<&OsStr>, Option<&OsStr>) {
} else {
unsafe {
(
- before.map(|s| OsStr::from_os_str_bytes_unchecked(s)),
- after.map(|s| OsStr::from_os_str_bytes_unchecked(s)),
+ before.map(|s| OsStr::from_encoded_bytes_unchecked(s)),
+ after.map(|s| OsStr::from_encoded_bytes_unchecked(s)),
)
}
}
}
fn split_file_at_dot(file: &OsStr) -> (&OsStr, Option<&OsStr>) {
- let slice = file.as_os_str_bytes();
+ let slice = file.as_encoded_bytes();
if slice == b".." {
return (file, None);
}
@@ -357,8 +357,8 @@ fn split_file_at_dot(file: &OsStr) -> (&OsStr, Option<&OsStr>) {
let after = &slice[i + 1..];
unsafe {
(
- OsStr::from_os_str_bytes_unchecked(before),
- Some(OsStr::from_os_str_bytes_unchecked(after)),
+ OsStr::from_encoded_bytes_unchecked(before),
+ Some(OsStr::from_encoded_bytes_unchecked(after)),
)
}
}
@@ -739,7 +739,7 @@ impl<'a> Components<'a> {
// separately via `include_cur_dir`
b".." => Some(Component::ParentDir),
b"" => None,
- _ => Some(Component::Normal(unsafe { OsStr::from_os_str_bytes_unchecked(comp) })),
+ _ => Some(Component::Normal(unsafe { OsStr::from_encoded_bytes_unchecked(comp) })),
}
}
@@ -896,7 +896,7 @@ impl<'a> Iterator for Components<'a> {
let raw = &self.path[..self.prefix_len()];
self.path = &self.path[self.prefix_len()..];
return Some(Component::Prefix(PrefixComponent {
- raw: unsafe { OsStr::from_os_str_bytes_unchecked(raw) },
+ raw: unsafe { OsStr::from_encoded_bytes_unchecked(raw) },
parsed: self.prefix.unwrap(),
}));
}
@@ -968,7 +968,7 @@ impl<'a> DoubleEndedIterator for Components<'a> {
State::Prefix if self.prefix_len() > 0 => {
self.back = State::Done;
return Some(Component::Prefix(PrefixComponent {
- raw: unsafe { OsStr::from_os_str_bytes_unchecked(self.path) },
+ raw: unsafe { OsStr::from_encoded_bytes_unchecked(self.path) },
parsed: self.prefix.unwrap(),
}));
}
@@ -1477,17 +1477,17 @@ impl PathBuf {
fn _set_extension(&mut self, extension: &OsStr) -> bool {
let file_stem = match self.file_stem() {
None => return false,
- Some(f) => f.as_os_str_bytes(),
+ Some(f) => f.as_encoded_bytes(),
};
// truncate until right after the file stem
let end_file_stem = file_stem[file_stem.len()..].as_ptr().addr();
- let start = self.inner.as_os_str_bytes().as_ptr().addr();
+ let start = self.inner.as_encoded_bytes().as_ptr().addr();
let v = self.as_mut_vec();
v.truncate(end_file_stem.wrapping_sub(start));
// add the new extension, if any
- let new = extension.as_os_str_bytes();
+ let new = extension.as_encoded_bytes();
if !new.is_empty() {
v.reserve_exact(new.len() + 1);
v.push(b'.');
@@ -2007,11 +2007,11 @@ impl Path {
// The following (private!) function allows construction of a path from a u8
// slice, which is only safe when it is known to follow the OsStr encoding.
unsafe fn from_u8_slice(s: &[u8]) -> &Path {
- unsafe { Path::new(OsStr::from_os_str_bytes_unchecked(s)) }
+ unsafe { Path::new(OsStr::from_encoded_bytes_unchecked(s)) }
}
// The following (private!) function reveals the byte encoding used for OsStr.
fn as_u8_slice(&self) -> &[u8] {
- self.inner.as_os_str_bytes()
+ self.inner.as_encoded_bytes()
}
/// Directly wraps a string slice as a `Path` slice.
@@ -2609,7 +2609,7 @@ impl Path {
fn _with_extension(&self, extension: &OsStr) -> PathBuf {
let self_len = self.as_os_str().len();
- let self_bytes = self.as_os_str().as_os_str_bytes();
+ let self_bytes = self.as_os_str().as_encoded_bytes();
let (new_capacity, slice_to_copy) = match self.extension() {
None => {
diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs
deleted file mode 100644
index 80289ca08..000000000
--- a/library/std/src/primitive_docs.rs
+++ /dev/null
@@ -1,1593 +0,0 @@
-// `library/{std,core}/src/primitive_docs.rs` should have the same contents.
-// These are different files so that relative links work properly without
-// having to have `CARGO_PKG_NAME` set, but conceptually they should always be the same.
-#[rustc_doc_primitive = "bool"]
-#[doc(alias = "true")]
-#[doc(alias = "false")]
-/// The boolean type.
-///
-/// The `bool` represents a value, which could only be either [`true`] or [`false`]. If you cast
-/// a `bool` into an integer, [`true`] will be 1 and [`false`] will be 0.
-///
-/// # Basic usage
-///
-/// `bool` implements various traits, such as [`BitAnd`], [`BitOr`], [`Not`], etc.,
-/// which allow us to perform boolean operations using `&`, `|` and `!`.
-///
-/// [`if`] requires a `bool` value as its conditional. [`assert!`], which is an
-/// important macro in testing, checks whether an expression is [`true`] and panics
-/// if it isn't.
-///
-/// ```
-/// let bool_val = true & false | false;
-/// assert!(!bool_val);
-/// ```
-///
-/// [`true`]: ../std/keyword.true.html
-/// [`false`]: ../std/keyword.false.html
-/// [`BitAnd`]: ops::BitAnd
-/// [`BitOr`]: ops::BitOr
-/// [`Not`]: ops::Not
-/// [`if`]: ../std/keyword.if.html
-///
-/// # Examples
-///
-/// A trivial example of the usage of `bool`:
-///
-/// ```
-/// let praise_the_borrow_checker = true;
-///
-/// // using the `if` conditional
-/// if praise_the_borrow_checker {
-/// println!("oh, yeah!");
-/// } else {
-/// println!("what?!!");
-/// }
-///
-/// // ... or, a match pattern
-/// match praise_the_borrow_checker {
-/// true => println!("keep praising!"),
-/// false => println!("you should praise!"),
-/// }
-/// ```
-///
-/// Also, since `bool` implements the [`Copy`] trait, we don't
-/// have to worry about the move semantics (just like the integer and float primitives).
-///
-/// Now an example of `bool` cast to integer type:
-///
-/// ```
-/// assert_eq!(true as i32, 1);
-/// assert_eq!(false as i32, 0);
-/// ```
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_bool {}
-
-#[rustc_doc_primitive = "never"]
-#[doc(alias = "!")]
-//
-/// The `!` type, also called "never".
-///
-/// `!` represents the type of computations which never resolve to any value at all. For example,
-/// the [`exit`] function `fn exit(code: i32) -> !` exits the process without ever returning, and
-/// so returns `!`.
-///
-/// `break`, `continue` and `return` expressions also have type `!`. For example we are allowed to
-/// write:
-///
-/// ```
-/// #![feature(never_type)]
-/// # fn foo() -> u32 {
-/// let x: ! = {
-/// return 123
-/// };
-/// # }
-/// ```
-///
-/// Although the `let` is pointless here, it illustrates the meaning of `!`. Since `x` is never
-/// assigned a value (because `return` returns from the entire function), `x` can be given type
-/// `!`. We could also replace `return 123` with a `panic!` or a never-ending `loop` and this code
-/// would still be valid.
-///
-/// A more realistic usage of `!` is in this code:
-///
-/// ```
-/// # fn get_a_number() -> Option<u32> { None }
-/// # loop {
-/// let num: u32 = match get_a_number() {
-/// Some(num) => num,
-/// None => break,
-/// };
-/// # }
-/// ```
-///
-/// Both match arms must produce values of type [`u32`], but since `break` never produces a value
-/// at all we know it can never produce a value which isn't a [`u32`]. This illustrates another
-/// behaviour of the `!` type - expressions with type `!` will coerce into any other type.
-///
-/// [`u32`]: prim@u32
-#[doc = concat!("[`exit`]: ", include_str!("../primitive_docs/process_exit.md"))]
-///
-/// # `!` and generics
-///
-/// ## Infallible errors
-///
-/// The main place you'll see `!` used explicitly is in generic code. Consider the [`FromStr`]
-/// trait:
-///
-/// ```
-/// trait FromStr: Sized {
-/// type Err;
-/// fn from_str(s: &str) -> Result<Self, Self::Err>;
-/// }
-/// ```
-///
-/// When implementing this trait for [`String`] we need to pick a type for [`Err`]. And since
-/// converting a string into a string will never result in an error, the appropriate type is `!`.
-/// (Currently the type actually used is an enum with no variants, though this is only because `!`
-/// was added to Rust at a later date and it may change in the future.) With an [`Err`] type of
-/// `!`, if we have to call [`String::from_str`] for some reason the result will be a
-/// [`Result<String, !>`] which we can unpack like this:
-///
-/// ```
-/// #![feature(exhaustive_patterns)]
-/// use std::str::FromStr;
-/// let Ok(s) = String::from_str("hello");
-/// ```
-///
-/// Since the [`Err`] variant contains a `!`, it can never occur. If the `exhaustive_patterns`
-/// feature is present this means we can exhaustively match on [`Result<T, !>`] by just taking the
-/// [`Ok`] variant. This illustrates another behaviour of `!` - it can be used to "delete" certain
-/// enum variants from generic types like `Result`.
-///
-/// ## Infinite loops
-///
-/// While [`Result<T, !>`] is very useful for removing errors, `!` can also be used to remove
-/// successes as well. If we think of [`Result<T, !>`] as "if this function returns, it has not
-/// errored," we get a very intuitive idea of [`Result<!, E>`] as well: if the function returns, it
-/// *has* errored.
-///
-/// For example, consider the case of a simple web server, which can be simplified to:
-///
-/// ```ignore (hypothetical-example)
-/// loop {
-/// let (client, request) = get_request().expect("disconnected");
-/// let response = request.process();
-/// response.send(client);
-/// }
-/// ```
-///
-/// Currently, this isn't ideal, because we simply panic whenever we fail to get a new connection.
-/// Instead, we'd like to keep track of this error, like this:
-///
-/// ```ignore (hypothetical-example)
-/// loop {
-/// match get_request() {
-/// Err(err) => break err,
-/// Ok((client, request)) => {
-/// let response = request.process();
-/// response.send(client);
-/// },
-/// }
-/// }
-/// ```
-///
-/// Now, when the server disconnects, we exit the loop with an error instead of panicking. While it
-/// might be intuitive to simply return the error, we might want to wrap it in a [`Result<!, E>`]
-/// instead:
-///
-/// ```ignore (hypothetical-example)
-/// fn server_loop() -> Result<!, ConnectionError> {
-/// loop {
-/// let (client, request) = get_request()?;
-/// let response = request.process();
-/// response.send(client);
-/// }
-/// }
-/// ```
-///
-/// Now, we can use `?` instead of `match`, and the return type makes a lot more sense: if the loop
-/// ever stops, it means that an error occurred. We don't even have to wrap the loop in an `Ok`
-/// because `!` coerces to `Result<!, ConnectionError>` automatically.
-///
-/// [`String::from_str`]: str::FromStr::from_str
-#[doc = concat!("[`String`]: ", include_str!("../primitive_docs/string_string.md"))]
-/// [`FromStr`]: str::FromStr
-///
-/// # `!` and traits
-///
-/// When writing your own traits, `!` should have an `impl` whenever there is an obvious `impl`
-/// which doesn't `panic!`. The reason is that functions returning an `impl Trait` where `!`
-/// does not have an `impl` of `Trait` cannot diverge as their only possible code path. In other
-/// words, they can't return `!` from every code path. As an example, this code doesn't compile:
-///
-/// ```compile_fail
-/// use std::ops::Add;
-///
-/// fn foo() -> impl Add<u32> {
-/// unimplemented!()
-/// }
-/// ```
-///
-/// But this code does:
-///
-/// ```
-/// use std::ops::Add;
-///
-/// fn foo() -> impl Add<u32> {
-/// if true {
-/// unimplemented!()
-/// } else {
-/// 0
-/// }
-/// }
-/// ```
-///
-/// The reason is that, in the first example, there are many possible types that `!` could coerce
-/// to, because many types implement `Add<u32>`. However, in the second example,
-/// the `else` branch returns a `0`, which the compiler infers from the return type to be of type
-/// `u32`. Since `u32` is a concrete type, `!` can and will be coerced to it. See issue [#36375]
-/// for more information on this quirk of `!`.
-///
-/// [#36375]: https://github.com/rust-lang/rust/issues/36375
-///
-/// As it turns out, though, most traits can have an `impl` for `!`. Take [`Debug`]
-/// for example:
-///
-/// ```
-/// #![feature(never_type)]
-/// # use std::fmt;
-/// # trait Debug {
-/// # fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result;
-/// # }
-/// impl Debug for ! {
-/// fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
-/// *self
-/// }
-/// }
-/// ```
-///
-/// Once again we're using `!`'s ability to coerce into any other type, in this case
-/// [`fmt::Result`]. Since this method takes a `&!` as an argument we know that it can never be
-/// called (because there is no value of type `!` for it to be called with). Writing `*self`
-/// essentially tells the compiler "We know that this code can never be run, so just treat the
-/// entire function body as having type [`fmt::Result`]". This pattern can be used a lot when
-/// implementing traits for `!`. Generally, any trait which only has methods which take a `self`
-/// parameter should have such an impl.
-///
-/// On the other hand, one trait which would not be appropriate to implement is [`Default`]:
-///
-/// ```
-/// trait Default {
-/// fn default() -> Self;
-/// }
-/// ```
-///
-/// Since `!` has no values, it has no default value either. It's true that we could write an
-/// `impl` for this which simply panics, but the same is true for any type (we could `impl
-/// Default` for (eg.) [`File`] by just making [`default()`] panic.)
-///
-#[doc = concat!("[`File`]: ", include_str!("../primitive_docs/fs_file.md"))]
-/// [`Debug`]: fmt::Debug
-/// [`default()`]: Default::default
-///
-#[unstable(feature = "never_type", issue = "35121")]
-mod prim_never {}
-
-#[rustc_doc_primitive = "char"]
-#[allow(rustdoc::invalid_rust_codeblocks)]
-/// A character type.
-///
-/// The `char` type represents a single character. More specifically, since
-/// 'character' isn't a well-defined concept in Unicode, `char` is a '[Unicode
-/// scalar value]'.
-///
-/// This documentation describes a number of methods and trait implementations on the
-/// `char` type. For technical reasons, there is additional, separate
-/// documentation in [the `std::char` module](char/index.html) as well.
-///
-/// # Validity
-///
-/// A `char` is a '[Unicode scalar value]', which is any '[Unicode code point]'
-/// other than a [surrogate code point]. This has a fixed numerical definition:
-/// code points are in the range 0 to 0x10FFFF, inclusive.
-/// Surrogate code points, used by UTF-16, are in the range 0xD800 to 0xDFFF.
-///
-/// No `char` may be constructed, whether as a literal or at runtime, that is not a
-/// Unicode scalar value:
-///
-/// ```compile_fail
-/// // Each of these is a compiler error
-/// ['\u{D800}', '\u{DFFF}', '\u{110000}'];
-/// ```
-///
-/// ```should_panic
-/// // Panics; from_u32 returns None.
-/// char::from_u32(0xDE01).unwrap();
-/// ```
-///
-/// ```no_run
-/// // Undefined behaviour
-/// let _ = unsafe { char::from_u32_unchecked(0x110000) };
-/// ```
-///
-/// USVs are also the exact set of values that may be encoded in UTF-8. Because
-/// `char` values are USVs and `str` values are valid UTF-8, it is safe to store
-/// any `char` in a `str` or read any character from a `str` as a `char`.
-///
-/// The gap in valid `char` values is understood by the compiler, so in the
-/// below example the two ranges are understood to cover the whole range of
-/// possible `char` values and there is no error for a [non-exhaustive match].
-///
-/// ```
-/// let c: char = 'a';
-/// match c {
-/// '\0' ..= '\u{D7FF}' => false,
-/// '\u{E000}' ..= '\u{10FFFF}' => true,
-/// };
-/// ```
-///
-/// All USVs are valid `char` values, but not all of them represent a real
-/// character. Many USVs are not currently assigned to a character, but may be
-/// in the future ("reserved"); some will never be a character
-/// ("noncharacters"); and some may be given different meanings by different
-/// users ("private use").
-///
-/// [Unicode code point]: https://www.unicode.org/glossary/#code_point
-/// [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
-/// [non-exhaustive match]: ../book/ch06-02-match.html#matches-are-exhaustive
-/// [surrogate code point]: https://www.unicode.org/glossary/#surrogate_code_point
-///
-/// # Representation
-///
-/// `char` is always four bytes in size. This is a different representation than
-/// a given character would have as part of a [`String`]. For example:
-///
-/// ```
-/// let v = vec!['h', 'e', 'l', 'l', 'o'];
-///
-/// // five elements times four bytes for each element
-/// assert_eq!(20, v.len() * std::mem::size_of::<char>());
-///
-/// let s = String::from("hello");
-///
-/// // five elements times one byte per element
-/// assert_eq!(5, s.len() * std::mem::size_of::<u8>());
-/// ```
-///
-#[doc = concat!("[`String`]: ", include_str!("../primitive_docs/string_string.md"))]
-///
-/// As always, remember that a human intuition for 'character' might not map to
-/// Unicode's definitions. For example, despite looking similar, the 'é'
-/// character is one Unicode code point while 'é' is two Unicode code points:
-///
-/// ```
-/// let mut chars = "é".chars();
-/// // U+00e9: 'latin small letter e with acute'
-/// assert_eq!(Some('\u{00e9}'), chars.next());
-/// assert_eq!(None, chars.next());
-///
-/// let mut chars = "é".chars();
-/// // U+0065: 'latin small letter e'
-/// assert_eq!(Some('\u{0065}'), chars.next());
-/// // U+0301: 'combining acute accent'
-/// assert_eq!(Some('\u{0301}'), chars.next());
-/// assert_eq!(None, chars.next());
-/// ```
-///
-/// This means that the contents of the first string above _will_ fit into a
-/// `char` while the contents of the second string _will not_. Trying to create
-/// a `char` literal with the contents of the second string gives an error:
-///
-/// ```text
-/// error: character literal may only contain one codepoint: 'é'
-/// let c = 'é';
-/// ^^^
-/// ```
-///
-/// Another implication of the 4-byte fixed size of a `char` is that
-/// per-`char` processing can end up using a lot more memory:
-///
-/// ```
-/// let s = String::from("love: ❤️");
-/// let v: Vec<char> = s.chars().collect();
-///
-/// assert_eq!(12, std::mem::size_of_val(&s[..]));
-/// assert_eq!(32, std::mem::size_of_val(&v[..]));
-/// ```
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_char {}
-
-#[rustc_doc_primitive = "unit"]
-#[doc(alias = "(")]
-#[doc(alias = ")")]
-#[doc(alias = "()")]
-//
-/// The `()` type, also called "unit".
-///
-/// The `()` type has exactly one value `()`, and is used when there
-/// is no other meaningful value that could be returned. `()` is most
-/// commonly seen implicitly: functions without a `-> ...` implicitly
-/// have return type `()`, that is, these are equivalent:
-///
-/// ```rust
-/// fn long() -> () {}
-///
-/// fn short() {}
-/// ```
-///
-/// The semicolon `;` can be used to discard the result of an
-/// expression at the end of a block, making the expression (and thus
-/// the block) evaluate to `()`. For example,
-///
-/// ```rust
-/// fn returns_i64() -> i64 {
-/// 1i64
-/// }
-/// fn returns_unit() {
-/// 1i64;
-/// }
-///
-/// let is_i64 = {
-/// returns_i64()
-/// };
-/// let is_unit = {
-/// returns_i64();
-/// };
-/// ```
-///
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_unit {}
-
-// Required to make auto trait impls render.
-// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
-#[doc(hidden)]
-impl () {}
-
-// Fake impl that's only really used for docs.
-#[cfg(doc)]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Clone for () {
- fn clone(&self) -> Self {
- loop {}
- }
-}
-
-// Fake impl that's only really used for docs.
-#[cfg(doc)]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Copy for () {
- // empty
-}
-
-#[rustc_doc_primitive = "pointer"]
-#[doc(alias = "ptr")]
-#[doc(alias = "*")]
-#[doc(alias = "*const")]
-#[doc(alias = "*mut")]
-//
-/// Raw, unsafe pointers, `*const T`, and `*mut T`.
-///
-/// *[See also the `std::ptr` module](ptr).*
-///
-/// Working with raw pointers in Rust is uncommon, typically limited to a few patterns.
-/// Raw pointers can be unaligned or [`null`]. However, when a raw pointer is
-/// dereferenced (using the `*` operator), it must be non-null and aligned.
-///
-/// Storing through a raw pointer using `*ptr = data` calls `drop` on the old value, so
-/// [`write`] must be used if the type has drop glue and memory is not already
-/// initialized - otherwise `drop` would be called on the uninitialized memory.
-///
-/// Use the [`null`] and [`null_mut`] functions to create null pointers, and the
-/// [`is_null`] method of the `*const T` and `*mut T` types to check for null.
-/// The `*const T` and `*mut T` types also define the [`offset`] method, for
-/// pointer math.
-///
-/// # Common ways to create raw pointers
-///
-/// ## 1. Coerce a reference (`&T`) or mutable reference (`&mut T`).
-///
-/// ```
-/// let my_num: i32 = 10;
-/// let my_num_ptr: *const i32 = &my_num;
-/// let mut my_speed: i32 = 88;
-/// let my_speed_ptr: *mut i32 = &mut my_speed;
-/// ```
-///
-/// To get a pointer to a boxed value, dereference the box:
-///
-/// ```
-/// let my_num: Box<i32> = Box::new(10);
-/// let my_num_ptr: *const i32 = &*my_num;
-/// let mut my_speed: Box<i32> = Box::new(88);
-/// let my_speed_ptr: *mut i32 = &mut *my_speed;
-/// ```
-///
-/// This does not take ownership of the original allocation
-/// and requires no resource management later,
-/// but you must not use the pointer after its lifetime.
-///
-/// ## 2. Consume a box (`Box<T>`).
-///
-/// The [`into_raw`] function consumes a box and returns
-/// the raw pointer. It doesn't destroy `T` or deallocate any memory.
-///
-/// ```
-/// let my_speed: Box<i32> = Box::new(88);
-/// let my_speed: *mut i32 = Box::into_raw(my_speed);
-///
-/// // By taking ownership of the original `Box<T>` though
-/// // we are obligated to put it together later to be destroyed.
-/// unsafe {
-/// drop(Box::from_raw(my_speed));
-/// }
-/// ```
-///
-/// Note that here the call to [`drop`] is for clarity - it indicates
-/// that we are done with the given value and it should be destroyed.
-///
-/// ## 3. Create it using `ptr::addr_of!`
-///
-/// Instead of coercing a reference to a raw pointer, you can use the macros
-/// [`ptr::addr_of!`] (for `*const T`) and [`ptr::addr_of_mut!`] (for `*mut T`).
-/// These macros allow you to create raw pointers to fields to which you cannot
-/// create a reference (without causing undefined behaviour), such as an
-/// unaligned field. This might be necessary if packed structs or uninitialized
-/// memory is involved.
-///
-/// ```
-/// #[derive(Debug, Default, Copy, Clone)]
-/// #[repr(C, packed)]
-/// struct S {
-/// aligned: u8,
-/// unaligned: u32,
-/// }
-/// let s = S::default();
-/// let p = std::ptr::addr_of!(s.unaligned); // not allowed with coercion
-/// ```
-///
-/// ## 4. Get it from C.
-///
-/// ```
-/// # #![feature(rustc_private)]
-/// #[allow(unused_extern_crates)]
-/// extern crate libc;
-///
-/// use std::mem;
-///
-/// unsafe {
-/// let my_num: *mut i32 = libc::malloc(mem::size_of::<i32>()) as *mut i32;
-/// if my_num.is_null() {
-/// panic!("failed to allocate memory");
-/// }
-/// libc::free(my_num as *mut libc::c_void);
-/// }
-/// ```
-///
-/// Usually you wouldn't literally use `malloc` and `free` from Rust,
-/// but C APIs hand out a lot of pointers generally, so are a common source
-/// of raw pointers in Rust.
-///
-/// [`null`]: ptr::null
-/// [`null_mut`]: ptr::null_mut
-/// [`is_null`]: pointer::is_null
-/// [`offset`]: pointer::offset
-#[doc = concat!("[`into_raw`]: ", include_str!("../primitive_docs/box_into_raw.md"))]
-/// [`write`]: ptr::write
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_pointer {}
-
-#[rustc_doc_primitive = "array"]
-#[doc(alias = "[]")]
-#[doc(alias = "[T;N]")] // unfortunately, rustdoc doesn't have fuzzy search for aliases
-#[doc(alias = "[T; N]")]
-/// A fixed-size array, denoted `[T; N]`, for the element type, `T`, and the
-/// non-negative compile-time constant size, `N`.
-///
-/// There are two syntactic forms for creating an array:
-///
-/// * A list with each element, i.e., `[x, y, z]`.
-/// * A repeat expression `[expr; N]` where `N` is how many times to repeat `expr` in the array. `expr` must either be:
-///
-/// * A value of a type implementing the [`Copy`] trait
-/// * A `const` value
-///
-/// Note that `[expr; 0]` is allowed, and produces an empty array.
-/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
-/// be mindful of side effects.
-///
-/// Arrays of *any* size implement the following traits if the element type allows it:
-///
-/// - [`Copy`]
-/// - [`Clone`]
-/// - [`Debug`]
-/// - [`IntoIterator`] (implemented for `[T; N]`, `&[T; N]` and `&mut [T; N]`)
-/// - [`PartialEq`], [`PartialOrd`], [`Eq`], [`Ord`]
-/// - [`Hash`]
-/// - [`AsRef`], [`AsMut`]
-/// - [`Borrow`], [`BorrowMut`]
-///
-/// Arrays of sizes from 0 to 32 (inclusive) implement the [`Default`] trait
-/// if the element type allows it. As a stopgap, trait implementations are
-/// statically generated up to size 32.
-///
-/// Arrays of sizes from 1 to 12 (inclusive) implement [`From<Tuple>`], where `Tuple`
-/// is a homogenous [prim@tuple] of appropriate length.
-///
-/// Arrays coerce to [slices (`[T]`)][slice], so a slice method may be called on
-/// an array. Indeed, this provides most of the API for working with arrays.
-///
-/// Slices have a dynamic size and do not coerce to arrays. Instead, use
-/// `slice.try_into().unwrap()` or `<ArrayType>::try_from(slice).unwrap()`.
-///
-/// Array's `try_from(slice)` implementations (and the corresponding `slice.try_into()`
-/// array implementations) succeed if the input slice length is the same as the result
-/// array length. They optimize especially well when the optimizer can easily determine
-/// the slice length, e.g. `<[u8; 4]>::try_from(&slice[4..8]).unwrap()`. Array implements
-/// [TryFrom](crate::convert::TryFrom) returning:
-///
-/// - `[T; N]` copies from the slice's elements
-/// - `&[T; N]` references the original slice's elements
-/// - `&mut [T; N]` references the original slice's elements
-///
-/// You can move elements out of an array with a [slice pattern]. If you want
-/// one element, see [`mem::replace`].
-///
-/// # Examples
-///
-/// ```
-/// let mut array: [i32; 3] = [0; 3];
-///
-/// array[1] = 1;
-/// array[2] = 2;
-///
-/// assert_eq!([1, 2], &array[1..]);
-///
-/// // This loop prints: 0 1 2
-/// for x in array {
-/// print!("{x} ");
-/// }
-/// ```
-///
-/// You can also iterate over reference to the array's elements:
-///
-/// ```
-/// let array: [i32; 3] = [0; 3];
-///
-/// for x in &array { }
-/// ```
-///
-/// You can use `<ArrayType>::try_from(slice)` or `slice.try_into()` to get an array from
-/// a slice:
-///
-/// ```
-/// let bytes: [u8; 3] = [1, 0, 2];
-/// assert_eq!(1, u16::from_le_bytes(<[u8; 2]>::try_from(&bytes[0..2]).unwrap()));
-/// assert_eq!(512, u16::from_le_bytes(bytes[1..3].try_into().unwrap()));
-/// ```
-///
-/// You can use a [slice pattern] to move elements out of an array:
-///
-/// ```
-/// fn move_away(_: String) { /* Do interesting things. */ }
-///
-/// let [john, roa] = ["John".to_string(), "Roa".to_string()];
-/// move_away(john);
-/// move_away(roa);
-/// ```
-///
-/// Arrays can be created from homogenous tuples of appropriate length:
-///
-/// ```
-/// let tuple: (u32, u32, u32) = (1, 2, 3);
-/// let array: [u32; 3] = tuple.into();
-/// ```
-///
-/// # Editions
-///
-/// Prior to Rust 1.53, arrays did not implement [`IntoIterator`] by value, so the method call
-/// `array.into_iter()` auto-referenced into a [slice iterator](slice::iter). Right now, the old
-/// behavior is preserved in the 2015 and 2018 editions of Rust for compatibility, ignoring
-/// [`IntoIterator`] by value. In the future, the behavior on the 2015 and 2018 edition
-/// might be made consistent to the behavior of later editions.
-///
-/// ```rust,edition2018
-/// // Rust 2015 and 2018:
-///
-/// # #![allow(array_into_iter)] // override our `deny(warnings)`
-/// let array: [i32; 3] = [0; 3];
-///
-/// // This creates a slice iterator, producing references to each value.
-/// for item in array.into_iter().enumerate() {
-/// let (i, x): (usize, &i32) = item;
-/// println!("array[{i}] = {x}");
-/// }
-///
-/// // The `array_into_iter` lint suggests this change for future compatibility:
-/// for item in array.iter().enumerate() {
-/// let (i, x): (usize, &i32) = item;
-/// println!("array[{i}] = {x}");
-/// }
-///
-/// // You can explicitly iterate an array by value using `IntoIterator::into_iter`
-/// for item in IntoIterator::into_iter(array).enumerate() {
-/// let (i, x): (usize, i32) = item;
-/// println!("array[{i}] = {x}");
-/// }
-/// ```
-///
-/// Starting in the 2021 edition, `array.into_iter()` uses `IntoIterator` normally to iterate
-/// by value, and `iter()` should be used to iterate by reference like previous editions.
-///
-/// ```rust,edition2021
-/// // Rust 2021:
-///
-/// let array: [i32; 3] = [0; 3];
-///
-/// // This iterates by reference:
-/// for item in array.iter().enumerate() {
-/// let (i, x): (usize, &i32) = item;
-/// println!("array[{i}] = {x}");
-/// }
-///
-/// // This iterates by value:
-/// for item in array.into_iter().enumerate() {
-/// let (i, x): (usize, i32) = item;
-/// println!("array[{i}] = {x}");
-/// }
-/// ```
-///
-/// Future language versions might start treating the `array.into_iter()`
-/// syntax on editions 2015 and 2018 the same as on edition 2021. So code using
-/// those older editions should still be written with this change in mind, to
-/// prevent breakage in the future. The safest way to accomplish this is to
-/// avoid the `into_iter` syntax on those editions. If an edition update is not
-/// viable/desired, there are multiple alternatives:
-/// * use `iter`, equivalent to the old behavior, creating references
-/// * use [`IntoIterator::into_iter`], equivalent to the post-2021 behavior (Rust 1.53+)
-/// * replace `for ... in array.into_iter() {` with `for ... in array {`,
-/// equivalent to the post-2021 behavior (Rust 1.53+)
-///
-/// ```rust,edition2018
-/// // Rust 2015 and 2018:
-///
-/// let array: [i32; 3] = [0; 3];
-///
-/// // This iterates by reference:
-/// for item in array.iter() {
-/// let x: &i32 = item;
-/// println!("{x}");
-/// }
-///
-/// // This iterates by value:
-/// for item in IntoIterator::into_iter(array) {
-/// let x: i32 = item;
-/// println!("{x}");
-/// }
-///
-/// // This iterates by value:
-/// for item in array {
-/// let x: i32 = item;
-/// println!("{x}");
-/// }
-///
-/// // IntoIter can also start a chain.
-/// // This iterates by value:
-/// for item in IntoIterator::into_iter(array).enumerate() {
-/// let (i, x): (usize, i32) = item;
-/// println!("array[{i}] = {x}");
-/// }
-/// ```
-///
-/// [slice]: prim@slice
-/// [`Debug`]: fmt::Debug
-/// [`Hash`]: hash::Hash
-/// [`Borrow`]: borrow::Borrow
-/// [`BorrowMut`]: borrow::BorrowMut
-/// [slice pattern]: ../reference/patterns.html#slice-patterns
-/// [`From<Tuple>`]: convert::From
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_array {}
-
-#[rustc_doc_primitive = "slice"]
-#[doc(alias = "[")]
-#[doc(alias = "]")]
-#[doc(alias = "[]")]
-/// A dynamically-sized view into a contiguous sequence, `[T]`. Contiguous here
-/// means that elements are laid out so that every element is the same
-/// distance from its neighbors.
-///
-/// *[See also the `std::slice` module](crate::slice).*
-///
-/// Slices are a view into a block of memory represented as a pointer and a
-/// length.
-///
-/// ```
-/// // slicing a Vec
-/// let vec = vec![1, 2, 3];
-/// let int_slice = &vec[..];
-/// // coercing an array to a slice
-/// let str_slice: &[&str] = &["one", "two", "three"];
-/// ```
-///
-/// Slices are either mutable or shared. The shared slice type is `&[T]`,
-/// while the mutable slice type is `&mut [T]`, where `T` represents the element
-/// type. For example, you can mutate the block of memory that a mutable slice
-/// points to:
-///
-/// ```
-/// let mut x = [1, 2, 3];
-/// let x = &mut x[..]; // Take a full slice of `x`.
-/// x[1] = 7;
-/// assert_eq!(x, &[1, 7, 3]);
-/// ```
-///
-/// As slices store the length of the sequence they refer to, they have twice
-/// the size of pointers to [`Sized`](marker/trait.Sized.html) types.
-/// Also see the reference on
-/// [dynamically sized types](../reference/dynamically-sized-types.html).
-///
-/// ```
-/// # use std::rc::Rc;
-/// let pointer_size = std::mem::size_of::<&u8>();
-/// assert_eq!(2 * pointer_size, std::mem::size_of::<&[u8]>());
-/// assert_eq!(2 * pointer_size, std::mem::size_of::<*const [u8]>());
-/// assert_eq!(2 * pointer_size, std::mem::size_of::<Box<[u8]>>());
-/// assert_eq!(2 * pointer_size, std::mem::size_of::<Rc<[u8]>>());
-/// ```
-///
-/// ## Trait Implementations
-///
-/// Some traits are implemented for slices if the element type implements
-/// that trait. This includes [`Eq`], [`Hash`] and [`Ord`].
-///
-/// ## Iteration
-///
-/// The slices implement `IntoIterator`. The iterator yields references to the
-/// slice elements.
-///
-/// ```
-/// let numbers: &[i32] = &[0, 1, 2];
-/// for n in numbers {
-/// println!("{n} is a number!");
-/// }
-/// ```
-///
-/// The mutable slice yields mutable references to the elements:
-///
-/// ```
-/// let mut scores: &mut [i32] = &mut [7, 8, 9];
-/// for score in scores {
-/// *score += 1;
-/// }
-/// ```
-///
-/// This iterator yields mutable references to the slice's elements, so while
-/// the element type of the slice is `i32`, the element type of the iterator is
-/// `&mut i32`.
-///
-/// * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
-/// iterators.
-/// * Further methods that return iterators are [`.split`], [`.splitn`],
-/// [`.chunks`], [`.windows`] and more.
-///
-/// [`Hash`]: core::hash::Hash
-/// [`.iter`]: slice::iter
-/// [`.iter_mut`]: slice::iter_mut
-/// [`.split`]: slice::split
-/// [`.splitn`]: slice::splitn
-/// [`.chunks`]: slice::chunks
-/// [`.windows`]: slice::windows
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_slice {}
-
-#[rustc_doc_primitive = "str"]
-/// String slices.
-///
-/// *[See also the `std::str` module](crate::str).*
-///
-/// The `str` type, also called a 'string slice', is the most primitive string
-/// type. It is usually seen in its borrowed form, `&str`. It is also the type
-/// of string literals, `&'static str`.
-///
-/// String slices are always valid UTF-8.
-///
-/// # Basic Usage
-///
-/// String literals are string slices:
-///
-/// ```
-/// let hello_world = "Hello, World!";
-/// ```
-///
-/// Here we have declared a string slice initialized with a string literal.
-/// String literals have a static lifetime, which means the string `hello_world`
-/// is guaranteed to be valid for the duration of the entire program.
-/// We can explicitly specify `hello_world`'s lifetime as well:
-///
-/// ```
-/// let hello_world: &'static str = "Hello, world!";
-/// ```
-///
-/// # Representation
-///
-/// A `&str` is made up of two components: a pointer to some bytes, and a
-/// length. You can look at these with the [`as_ptr`] and [`len`] methods:
-///
-/// ```
-/// use std::slice;
-/// use std::str;
-///
-/// let story = "Once upon a time...";
-///
-/// let ptr = story.as_ptr();
-/// let len = story.len();
-///
-/// // story has nineteen bytes
-/// assert_eq!(19, len);
-///
-/// // We can re-build a str out of ptr and len. This is all unsafe because
-/// // we are responsible for making sure the two components are valid:
-/// let s = unsafe {
-/// // First, we build a &[u8]...
-/// let slice = slice::from_raw_parts(ptr, len);
-///
-/// // ... and then convert that slice into a string slice
-/// str::from_utf8(slice)
-/// };
-///
-/// assert_eq!(s, Ok(story));
-/// ```
-///
-/// [`as_ptr`]: str::as_ptr
-/// [`len`]: str::len
-///
-/// Note: This example shows the internals of `&str`. `unsafe` should not be
-/// used to get a string slice under normal circumstances. Use `as_str`
-/// instead.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_str {}
-
-#[rustc_doc_primitive = "tuple"]
-#[doc(alias = "(")]
-#[doc(alias = ")")]
-#[doc(alias = "()")]
-//
-/// A finite heterogeneous sequence, `(T, U, ..)`.
-///
-/// Let's cover each of those in turn:
-///
-/// Tuples are *finite*. In other words, a tuple has a length. Here's a tuple
-/// of length `3`:
-///
-/// ```
-/// ("hello", 5, 'c');
-/// ```
-///
-/// 'Length' is also sometimes called 'arity' here; each tuple of a different
-/// length is a different, distinct type.
-///
-/// Tuples are *heterogeneous*. This means that each element of the tuple can
-/// have a different type. In that tuple above, it has the type:
-///
-/// ```
-/// # let _:
-/// (&'static str, i32, char)
-/// # = ("hello", 5, 'c');
-/// ```
-///
-/// Tuples are a *sequence*. This means that they can be accessed by position;
-/// this is called 'tuple indexing', and it looks like this:
-///
-/// ```rust
-/// let tuple = ("hello", 5, 'c');
-///
-/// assert_eq!(tuple.0, "hello");
-/// assert_eq!(tuple.1, 5);
-/// assert_eq!(tuple.2, 'c');
-/// ```
-///
-/// The sequential nature of the tuple applies to its implementations of various
-/// traits. For example, in [`PartialOrd`] and [`Ord`], the elements are compared
-/// sequentially until the first non-equal set is found.
-///
-/// For more about tuples, see [the book](../book/ch03-02-data-types.html#the-tuple-type).
-///
-// Hardcoded anchor in src/librustdoc/html/format.rs
-// linked to as `#trait-implementations-1`
-/// # Trait implementations
-///
-/// In this documentation the shorthand `(T₁, T₂, …, Tₙ)` is used to represent tuples of varying
-/// length. When that is used, any trait bound expressed on `T` applies to each element of the
-/// tuple independently. Note that this is a convenience notation to avoid repetitive
-/// documentation, not valid Rust syntax.
-///
-/// Due to a temporary restriction in Rust’s type system, the following traits are only
-/// implemented on tuples of arity 12 or less. In the future, this may change:
-///
-/// * [`PartialEq`]
-/// * [`Eq`]
-/// * [`PartialOrd`]
-/// * [`Ord`]
-/// * [`Debug`]
-/// * [`Default`]
-/// * [`Hash`]
-/// * [`From<[T; N]>`][from]
-///
-/// [from]: convert::From
-/// [`Debug`]: fmt::Debug
-/// [`Hash`]: hash::Hash
-///
-/// The following traits are implemented for tuples of any length. These traits have
-/// implementations that are automatically generated by the compiler, so are not limited by
-/// missing language features.
-///
-/// * [`Clone`]
-/// * [`Copy`]
-/// * [`Send`]
-/// * [`Sync`]
-/// * [`Unpin`]
-/// * [`UnwindSafe`]
-/// * [`RefUnwindSafe`]
-///
-/// [`UnwindSafe`]: panic::UnwindSafe
-/// [`RefUnwindSafe`]: panic::RefUnwindSafe
-///
-/// # Examples
-///
-/// Basic usage:
-///
-/// ```
-/// let tuple = ("hello", 5, 'c');
-///
-/// assert_eq!(tuple.0, "hello");
-/// ```
-///
-/// Tuples are often used as a return type when you want to return more than
-/// one value:
-///
-/// ```
-/// fn calculate_point() -> (i32, i32) {
-/// // Don't do a calculation, that's not the point of the example
-/// (4, 5)
-/// }
-///
-/// let point = calculate_point();
-///
-/// assert_eq!(point.0, 4);
-/// assert_eq!(point.1, 5);
-///
-/// // Combining this with patterns can be nicer.
-///
-/// let (x, y) = calculate_point();
-///
-/// assert_eq!(x, 4);
-/// assert_eq!(y, 5);
-/// ```
-///
-/// Homogenous tuples can be created from arrays of appropriate length:
-///
-/// ```
-/// let array: [u32; 3] = [1, 2, 3];
-/// let tuple: (u32, u32, u32) = array.into();
-/// ```
-///
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_tuple {}
-
-// Required to make auto trait impls render.
-// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
-#[doc(hidden)]
-impl<T> (T,) {}
-
-// Fake impl that's only really used for docs.
-#[cfg(doc)]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(fake_variadic)]
-/// This trait is implemented on arbitrary-length tuples.
-impl<T: Clone> Clone for (T,) {
- fn clone(&self) -> Self {
- loop {}
- }
-}
-
-// Fake impl that's only really used for docs.
-#[cfg(doc)]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(fake_variadic)]
-/// This trait is implemented on arbitrary-length tuples.
-impl<T: Copy> Copy for (T,) {
- // empty
-}
-
-#[rustc_doc_primitive = "f32"]
-/// A 32-bit floating point type (specifically, the "binary32" type defined in IEEE 754-2008).
-///
-/// This type can represent a wide range of decimal numbers, like `3.5`, `27`,
-/// `-113.75`, `0.0078125`, `34359738368`, `0`, `-1`. So unlike integer types
-/// (such as `i32`), floating point types can represent non-integer numbers,
-/// too.
-///
-/// However, being able to represent this wide range of numbers comes at the
-/// cost of precision: floats can only represent some of the real numbers and
-/// calculation with floats round to a nearby representable number. For example,
-/// `5.0` and `1.0` can be exactly represented as `f32`, but `1.0 / 5.0` results
-/// in `0.20000000298023223876953125` since `0.2` cannot be exactly represented
-/// as `f32`. Note, however, that printing floats with `println` and friends will
-/// often discard insignificant digits: `println!("{}", 1.0f32 / 5.0f32)` will
-/// print `0.2`.
-///
-/// Additionally, `f32` can represent some special values:
-///
-/// - −0.0: IEEE 754 floating point numbers have a bit that indicates their sign, so −0.0 is a
-/// possible value. For comparison −0.0 = +0.0, but floating point operations can carry
-/// the sign bit through arithmetic operations. This means −0.0 × +0.0 produces −0.0 and
-/// a negative number rounded to a value smaller than a float can represent also produces −0.0.
-/// - [∞](#associatedconstant.INFINITY) and
-/// [−∞](#associatedconstant.NEG_INFINITY): these result from calculations
-/// like `1.0 / 0.0`.
-/// - [NaN (not a number)](#associatedconstant.NAN): this value results from
-/// calculations like `(-1.0).sqrt()`. NaN has some potentially unexpected
-/// behavior:
-/// - It is not equal to any float, including itself! This is the reason `f32`
-/// doesn't implement the `Eq` trait.
-/// - It is also neither smaller nor greater than any float, making it
-/// impossible to sort by the default comparison operation, which is the
-/// reason `f32` doesn't implement the `Ord` trait.
-/// - It is also considered *infectious* as almost all calculations where one
-/// of the operands is NaN will also result in NaN. The explanations on this
-/// page only explicitly document behavior on NaN operands if this default
-/// is deviated from.
-/// - Lastly, there are multiple bit patterns that are considered NaN.
-/// Rust does not currently guarantee that the bit patterns of NaN are
-/// preserved over arithmetic operations, and they are not guaranteed to be
-/// portable or even fully deterministic! This means that there may be some
-/// surprising results upon inspecting the bit patterns,
-/// as the same calculations might produce NaNs with different bit patterns.
-///
-/// When the number resulting from a primitive operation (addition,
-/// subtraction, multiplication, or division) on this type is not exactly
-/// representable as `f32`, it is rounded according to the roundTiesToEven
-/// direction defined in IEEE 754-2008. That means:
-///
-/// - The result is the representable value closest to the true value, if there
-/// is a unique closest representable value.
-/// - If the true value is exactly half-way between two representable values,
-/// the result is the one with an even least-significant binary digit.
-/// - If the true value's magnitude is ≥ `f32::MAX` + 2<sup>(`f32::MAX_EXP` −
-/// `f32::MANTISSA_DIGITS` − 1)</sup>, the result is ∞ or −∞ (preserving the
-/// true value's sign).
-///
-/// For more information on floating point numbers, see [Wikipedia][wikipedia].
-///
-/// *[See also the `std::f32::consts` module](crate::f32::consts).*
-///
-/// [wikipedia]: https://en.wikipedia.org/wiki/Single-precision_floating-point_format
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_f32 {}
-
-#[rustc_doc_primitive = "f64"]
-/// A 64-bit floating point type (specifically, the "binary64" type defined in IEEE 754-2008).
-///
-/// This type is very similar to [`f32`], but has increased
-/// precision by using twice as many bits. Please see [the documentation for
-/// `f32`][`f32`] or [Wikipedia on double precision
-/// values][wikipedia] for more information.
-///
-/// *[See also the `std::f64::consts` module](crate::f64::consts).*
-///
-/// [`f32`]: prim@f32
-/// [wikipedia]: https://en.wikipedia.org/wiki/Double-precision_floating-point_format
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_f64 {}
-
-#[rustc_doc_primitive = "i8"]
-//
-/// The 8-bit signed integer type.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_i8 {}
-
-#[rustc_doc_primitive = "i16"]
-//
-/// The 16-bit signed integer type.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_i16 {}
-
-#[rustc_doc_primitive = "i32"]
-//
-/// The 32-bit signed integer type.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_i32 {}
-
-#[rustc_doc_primitive = "i64"]
-//
-/// The 64-bit signed integer type.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_i64 {}
-
-#[rustc_doc_primitive = "i128"]
-//
-/// The 128-bit signed integer type.
-#[stable(feature = "i128", since = "1.26.0")]
-mod prim_i128 {}
-
-#[rustc_doc_primitive = "u8"]
-//
-/// The 8-bit unsigned integer type.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_u8 {}
-
-#[rustc_doc_primitive = "u16"]
-//
-/// The 16-bit unsigned integer type.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_u16 {}
-
-#[rustc_doc_primitive = "u32"]
-//
-/// The 32-bit unsigned integer type.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_u32 {}
-
-#[rustc_doc_primitive = "u64"]
-//
-/// The 64-bit unsigned integer type.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_u64 {}
-
-#[rustc_doc_primitive = "u128"]
-//
-/// The 128-bit unsigned integer type.
-#[stable(feature = "i128", since = "1.26.0")]
-mod prim_u128 {}
-
-#[rustc_doc_primitive = "isize"]
-//
-/// The pointer-sized signed integer type.
-///
-/// The size of this primitive is how many bytes it takes to reference any
-/// location in memory. For example, on a 32 bit target, this is 4 bytes
-/// and on a 64 bit target, this is 8 bytes.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_isize {}
-
-#[rustc_doc_primitive = "usize"]
-//
-/// The pointer-sized unsigned integer type.
-///
-/// The size of this primitive is how many bytes it takes to reference any
-/// location in memory. For example, on a 32 bit target, this is 4 bytes
-/// and on a 64 bit target, this is 8 bytes.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_usize {}
-
-#[rustc_doc_primitive = "reference"]
-#[doc(alias = "&")]
-#[doc(alias = "&mut")]
-//
-/// References, `&T` and `&mut T`.
-///
-/// A reference represents a borrow of some owned value. You can get one by using the `&` or `&mut`
-/// operators on a value, or by using a [`ref`](../std/keyword.ref.html) or
-/// <code>[ref](../std/keyword.ref.html) [mut](../std/keyword.mut.html)</code> pattern.
-///
-/// For those familiar with pointers, a reference is just a pointer that is assumed to be
-/// aligned, not null, and pointing to memory containing a valid value of `T` - for example,
-/// <code>&[bool]</code> can only point to an allocation containing the integer values `1`
-/// ([`true`](../std/keyword.true.html)) or `0` ([`false`](../std/keyword.false.html)), but
-/// creating a <code>&[bool]</code> that points to an allocation containing
-/// the value `3` causes undefined behaviour.
-/// In fact, <code>[Option]\<&T></code> has the same memory representation as a
-/// nullable but aligned pointer, and can be passed across FFI boundaries as such.
-///
-/// In most cases, references can be used much like the original value. Field access, method
-/// calling, and indexing work the same (save for mutability rules, of course). In addition, the
-/// comparison operators transparently defer to the referent's implementation, allowing references
-/// to be compared the same as owned values.
-///
-/// References have a lifetime attached to them, which represents the scope for which the borrow is
-/// valid. A lifetime is said to "outlive" another one if its representative scope is as long or
-/// longer than the other. The `'static` lifetime is the longest lifetime, which represents the
-/// total life of the program. For example, string literals have a `'static` lifetime because the
-/// text data is embedded into the binary of the program, rather than in an allocation that needs
-/// to be dynamically managed.
-///
-/// `&mut T` references can be freely coerced into `&T` references with the same referent type, and
-/// references with longer lifetimes can be freely coerced into references with shorter ones.
-///
-/// Reference equality by address, instead of comparing the values pointed to, is accomplished via
-/// implicit reference-pointer coercion and raw pointer equality via [`ptr::eq`], while
-/// [`PartialEq`] compares values.
-///
-/// ```
-/// use std::ptr;
-///
-/// let five = 5;
-/// let other_five = 5;
-/// let five_ref = &five;
-/// let same_five_ref = &five;
-/// let other_five_ref = &other_five;
-///
-/// assert!(five_ref == same_five_ref);
-/// assert!(five_ref == other_five_ref);
-///
-/// assert!(ptr::eq(five_ref, same_five_ref));
-/// assert!(!ptr::eq(five_ref, other_five_ref));
-/// ```
-///
-/// For more information on how to use references, see [the book's section on "References and
-/// Borrowing"][book-refs].
-///
-/// [book-refs]: ../book/ch04-02-references-and-borrowing.html
-///
-/// # Trait implementations
-///
-/// The following traits are implemented for all `&T`, regardless of the type of its referent:
-///
-/// * [`Copy`]
-/// * [`Clone`] \(Note that this will not defer to `T`'s `Clone` implementation if it exists!)
-/// * [`Deref`]
-/// * [`Borrow`]
-/// * [`fmt::Pointer`]
-///
-/// [`Deref`]: ops::Deref
-/// [`Borrow`]: borrow::Borrow
-///
-/// `&mut T` references get all of the above except `Copy` and `Clone` (to prevent creating
-/// multiple simultaneous mutable borrows), plus the following, regardless of the type of its
-/// referent:
-///
-/// * [`DerefMut`]
-/// * [`BorrowMut`]
-///
-/// [`DerefMut`]: ops::DerefMut
-/// [`BorrowMut`]: borrow::BorrowMut
-/// [bool]: prim@bool
-///
-/// The following traits are implemented on `&T` references if the underlying `T` also implements
-/// that trait:
-///
-/// * All the traits in [`std::fmt`] except [`fmt::Pointer`] (which is implemented regardless of the type of its referent) and [`fmt::Write`]
-/// * [`PartialOrd`]
-/// * [`Ord`]
-/// * [`PartialEq`]
-/// * [`Eq`]
-/// * [`AsRef`]
-/// * [`Fn`] \(in addition, `&T` references get [`FnMut`] and [`FnOnce`] if `T: Fn`)
-/// * [`Hash`]
-/// * [`ToSocketAddrs`]
-/// * [`Send`] \(`&T` references also require <code>T: [Sync]</code>)
-/// * [`Sync`]
-///
-/// [`std::fmt`]: fmt
-/// [`Hash`]: hash::Hash
-#[doc = concat!("[`ToSocketAddrs`]: ", include_str!("../primitive_docs/net_tosocketaddrs.md"))]
-///
-/// `&mut T` references get all of the above except `ToSocketAddrs`, plus the following, if `T`
-/// implements that trait:
-///
-/// * [`AsMut`]
-/// * [`FnMut`] \(in addition, `&mut T` references get [`FnOnce`] if `T: FnMut`)
-/// * [`fmt::Write`]
-/// * [`Iterator`]
-/// * [`DoubleEndedIterator`]
-/// * [`ExactSizeIterator`]
-/// * [`FusedIterator`]
-/// * [`TrustedLen`]
-/// * [`io::Write`]
-/// * [`Read`]
-/// * [`Seek`]
-/// * [`BufRead`]
-///
-/// [`FusedIterator`]: iter::FusedIterator
-/// [`TrustedLen`]: iter::TrustedLen
-#[doc = concat!("[`Seek`]: ", include_str!("../primitive_docs/io_seek.md"))]
-#[doc = concat!("[`BufRead`]: ", include_str!("../primitive_docs/io_bufread.md"))]
-#[doc = concat!("[`Read`]: ", include_str!("../primitive_docs/io_read.md"))]
-#[doc = concat!("[`io::Write`]: ", include_str!("../primitive_docs/io_write.md"))]
-///
-/// Note that due to method call deref coercion, simply calling a trait method will act like they
-/// work on references as well as they do on owned values! The implementations described here are
-/// meant for generic contexts, where the final type `T` is a type parameter or otherwise not
-/// locally known.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_ref {}
-
-#[rustc_doc_primitive = "fn"]
-//
-/// Function pointers, like `fn(usize) -> bool`.
-///
-/// *See also the traits [`Fn`], [`FnMut`], and [`FnOnce`].*
-///
-/// Function pointers are pointers that point to *code*, not data. They can be called
-/// just like functions. Like references, function pointers are, among other things, assumed to
-/// not be null, so if you want to pass a function pointer over FFI and be able to accommodate null
-/// pointers, make your type [`Option<fn()>`](core::option#options-and-pointers-nullable-pointers)
-/// with your required signature.
-///
-/// ### Safety
-///
-/// Plain function pointers are obtained by casting either plain functions, or closures that don't
-/// capture an environment:
-///
-/// ```
-/// fn add_one(x: usize) -> usize {
-/// x + 1
-/// }
-///
-/// let ptr: fn(usize) -> usize = add_one;
-/// assert_eq!(ptr(5), 6);
-///
-/// let clos: fn(usize) -> usize = |x| x + 5;
-/// assert_eq!(clos(5), 10);
-/// ```
-///
-/// In addition to varying based on their signature, function pointers come in two flavors: safe
-/// and unsafe. Plain `fn()` function pointers can only point to safe functions,
-/// while `unsafe fn()` function pointers can point to safe or unsafe functions.
-///
-/// ```
-/// fn add_one(x: usize) -> usize {
-/// x + 1
-/// }
-///
-/// unsafe fn add_one_unsafely(x: usize) -> usize {
-/// x + 1
-/// }
-///
-/// let safe_ptr: fn(usize) -> usize = add_one;
-///
-/// //ERROR: mismatched types: expected normal fn, found unsafe fn
-/// //let bad_ptr: fn(usize) -> usize = add_one_unsafely;
-///
-/// let unsafe_ptr: unsafe fn(usize) -> usize = add_one_unsafely;
-/// let really_safe_ptr: unsafe fn(usize) -> usize = add_one;
-/// ```
-///
-/// ### ABI
-///
-/// On top of that, function pointers can vary based on what ABI they use. This
-/// is achieved by adding the `extern` keyword before the type, followed by the
-/// ABI in question. The default ABI is "Rust", i.e., `fn()` is the exact same
-/// type as `extern "Rust" fn()`. A pointer to a function with C ABI would have
-/// type `extern "C" fn()`.
-///
-/// `extern "ABI" { ... }` blocks declare functions with ABI "ABI". The default
-/// here is "C", i.e., functions declared in an `extern {...}` block have "C"
-/// ABI.
-///
-/// For more information and a list of supported ABIs, see [the nomicon's
-/// section on foreign calling conventions][nomicon-abi].
-///
-/// [nomicon-abi]: ../nomicon/ffi.html#foreign-calling-conventions
-///
-/// ### Variadic functions
-///
-/// Extern function declarations with the "C" or "cdecl" ABIs can also be *variadic*, allowing them
-/// to be called with a variable number of arguments. Normal Rust functions, even those with an
-/// `extern "ABI"`, cannot be variadic. For more information, see [the nomicon's section on
-/// variadic functions][nomicon-variadic].
-///
-/// [nomicon-variadic]: ../nomicon/ffi.html#variadic-functions
-///
-/// ### Creating function pointers
-///
-/// When `bar` is the name of a function, then the expression `bar` is *not* a
-/// function pointer. Rather, it denotes a value of an unnameable type that
-/// uniquely identifies the function `bar`. The value is zero-sized because the
-/// type already identifies the function. This has the advantage that "calling"
-/// the value (it implements the `Fn*` traits) does not require dynamic
-/// dispatch.
-///
-/// This zero-sized type *coerces* to a regular function pointer. For example:
-///
-/// ```rust
-/// use std::mem;
-///
-/// fn bar(x: i32) {}
-///
-/// let not_bar_ptr = bar; // `not_bar_ptr` is zero-sized, uniquely identifying `bar`
-/// assert_eq!(mem::size_of_val(&not_bar_ptr), 0);
-///
-/// let bar_ptr: fn(i32) = not_bar_ptr; // force coercion to function pointer
-/// assert_eq!(mem::size_of_val(&bar_ptr), mem::size_of::<usize>());
-///
-/// let footgun = &bar; // this is a shared reference to the zero-sized type identifying `bar`
-/// ```
-///
-/// The last line shows that `&bar` is not a function pointer either. Rather, it
-/// is a reference to the function-specific ZST. `&bar` is basically never what you
-/// want when `bar` is a function.
-///
-/// ### Casting to and from integers
-///
-/// You cast function pointers directly to integers:
-///
-/// ```rust
-/// let fnptr: fn(i32) -> i32 = |x| x+2;
-/// let fnptr_addr = fnptr as usize;
-/// ```
-///
-/// However, a direct cast back is not possible. You need to use `transmute`:
-///
-/// ```rust
-/// # #[cfg(not(miri))] { // FIXME: use strict provenance APIs once they are stable, then remove this `cfg`
-/// # let fnptr: fn(i32) -> i32 = |x| x+2;
-/// # let fnptr_addr = fnptr as usize;
-/// let fnptr = fnptr_addr as *const ();
-/// let fnptr: fn(i32) -> i32 = unsafe { std::mem::transmute(fnptr) };
-/// assert_eq!(fnptr(40), 42);
-/// # }
-/// ```
-///
-/// Crucially, we `as`-cast to a raw pointer before `transmute`ing to a function pointer.
-/// This avoids an integer-to-pointer `transmute`, which can be problematic.
-/// Transmuting between raw pointers and function pointers (i.e., two pointer types) is fine.
-///
-/// Note that all of this is not portable to platforms where function pointers and data pointers
-/// have different sizes.
-///
-/// ### Trait implementations
-///
-/// In this documentation the shorthand `fn (T₁, T₂, …, Tₙ)` is used to represent non-variadic
-/// function pointers of varying length. Note that this is a convenience notation to avoid
-/// repetitive documentation, not valid Rust syntax.
-///
-/// Due to a temporary restriction in Rust's type system, these traits are only implemented on
-/// functions that take 12 arguments or less, with the `"Rust"` and `"C"` ABIs. In the future, this
-/// may change:
-///
-/// * [`PartialEq`]
-/// * [`Eq`]
-/// * [`PartialOrd`]
-/// * [`Ord`]
-/// * [`Hash`]
-/// * [`Pointer`]
-/// * [`Debug`]
-///
-/// The following traits are implemented for function pointers with any number of arguments and
-/// any ABI. These traits have implementations that are automatically generated by the compiler,
-/// so are not limited by missing language features:
-///
-/// * [`Clone`]
-/// * [`Copy`]
-/// * [`Send`]
-/// * [`Sync`]
-/// * [`Unpin`]
-/// * [`UnwindSafe`]
-/// * [`RefUnwindSafe`]
-///
-/// [`Hash`]: hash::Hash
-/// [`Pointer`]: fmt::Pointer
-/// [`UnwindSafe`]: panic::UnwindSafe
-/// [`RefUnwindSafe`]: panic::RefUnwindSafe
-///
-/// In addition, all *safe* function pointers implement [`Fn`], [`FnMut`], and [`FnOnce`], because
-/// these traits are specially known to the compiler.
-#[stable(feature = "rust1", since = "1.0.0")]
-mod prim_fn {}
-
-// Required to make auto trait impls render.
-// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
-#[doc(hidden)]
-impl<Ret, T> fn(T) -> Ret {}
-
-// Fake impl that's only really used for docs.
-#[cfg(doc)]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(fake_variadic)]
-/// This trait is implemented on function pointers with any number of arguments.
-impl<Ret, T> Clone for fn(T) -> Ret {
- fn clone(&self) -> Self {
- loop {}
- }
-}
-
-// Fake impl that's only really used for docs.
-#[cfg(doc)]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(fake_variadic)]
-/// This trait is implemented on function pointers with any number of arguments.
-impl<Ret, T> Copy for fn(T) -> Ret {
- // empty
-}
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
index 7380b45b0..8c1497613 100644
--- a/library/std/src/process.rs
+++ b/library/std/src/process.rs
@@ -12,9 +12,9 @@
//! use std::process::Command;
//!
//! let output = Command::new("echo")
-//! .arg("Hello world")
-//! .output()
-//! .expect("Failed to execute command");
+//! .arg("Hello world")
+//! .output()
+//! .expect("Failed to execute command");
//!
//! assert_eq!(b"Hello world\n", output.stdout.as_slice());
//! ```
@@ -101,7 +101,7 @@
#![stable(feature = "process", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)]
-#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))]
+#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))]
mod tests;
use crate::io::prelude::*;
@@ -154,12 +154,11 @@ use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
/// use std::process::Command;
///
/// let mut child = Command::new("/bin/cat")
-/// .arg("file.txt")
-/// .spawn()
-/// .expect("failed to execute child");
+/// .arg("file.txt")
+/// .spawn()
+/// .expect("failed to execute child");
///
-/// let ecode = child.wait()
-/// .expect("failed to wait on child");
+/// let ecode = child.wait().expect("failed to wait on child");
///
/// assert!(ecode.success());
/// ```
@@ -481,15 +480,15 @@ impl fmt::Debug for ChildStderr {
///
/// let output = if cfg!(target_os = "windows") {
/// Command::new("cmd")
-/// .args(["/C", "echo hello"])
-/// .output()
-/// .expect("failed to execute process")
+/// .args(["/C", "echo hello"])
+/// .output()
+/// .expect("failed to execute process")
/// } else {
/// Command::new("sh")
-/// .arg("-c")
-/// .arg("echo hello")
-/// .output()
-/// .expect("failed to execute process")
+/// .arg("-c")
+/// .arg("echo hello")
+/// .output()
+/// .expect("failed to execute process")
/// };
///
/// let hello = output.stdout;
@@ -502,8 +501,7 @@ impl fmt::Debug for ChildStderr {
/// use std::process::Command;
///
/// let mut echo_hello = Command::new("sh");
-/// echo_hello.arg("-c")
-/// .arg("echo hello");
+/// echo_hello.arg("-c").arg("echo hello");
/// let hello_1 = echo_hello.output().expect("failed to execute process");
/// let hello_2 = echo_hello.output().expect("failed to execute process");
/// ```
@@ -576,8 +574,8 @@ impl Command {
/// use std::process::Command;
///
/// Command::new("sh")
- /// .spawn()
- /// .expect("sh command failed to start");
+ /// .spawn()
+ /// .expect("sh command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn new<S: AsRef<OsStr>>(program: S) -> Command {
@@ -620,10 +618,10 @@ impl Command {
/// use std::process::Command;
///
/// Command::new("ls")
- /// .arg("-l")
- /// .arg("-a")
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .arg("-l")
+ /// .arg("-a")
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
@@ -650,9 +648,9 @@ impl Command {
/// use std::process::Command;
///
/// Command::new("ls")
- /// .args(["-l", "-a"])
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .args(["-l", "-a"])
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn args<I, S>(&mut self, args: I) -> &mut Command
@@ -688,9 +686,9 @@ impl Command {
/// use std::process::Command;
///
/// Command::new("ls")
- /// .env("PATH", "/bin")
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .env("PATH", "/bin")
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
@@ -731,12 +729,12 @@ impl Command {
/// ).collect();
///
/// Command::new("printenv")
- /// .stdin(Stdio::null())
- /// .stdout(Stdio::inherit())
- /// .env_clear()
- /// .envs(&filtered_env)
- /// .spawn()
- /// .expect("printenv failed to start");
+ /// .stdin(Stdio::null())
+ /// .stdout(Stdio::inherit())
+ /// .env_clear()
+ /// .envs(&filtered_env)
+ /// .spawn()
+ /// .expect("printenv failed to start");
/// ```
#[stable(feature = "command_envs", since = "1.19.0")]
pub fn envs<I, K, V>(&mut self, vars: I) -> &mut Command
@@ -772,9 +770,9 @@ impl Command {
/// use std::process::Command;
///
/// Command::new("ls")
- /// .env_remove("PATH")
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .env_remove("PATH")
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn env_remove<K: AsRef<OsStr>>(&mut self, key: K) -> &mut Command {
@@ -789,7 +787,7 @@ impl Command {
/// or [`Command::envs`]. In addition, it will prevent the spawned child process from inheriting
/// any environment variable from its parent process.
///
- /// After calling [`Command::env_remove`], the iterator from [`Command::get_envs`] will be
+ /// After calling [`Command::env_clear`], the iterator from [`Command::get_envs`] will be
/// empty.
///
/// You can use [`Command::env_remove`] to clear a single mapping.
@@ -802,9 +800,9 @@ impl Command {
/// use std::process::Command;
///
/// Command::new("ls")
- /// .env_clear()
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .env_clear()
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn env_clear(&mut self) -> &mut Command {
@@ -830,9 +828,9 @@ impl Command {
/// use std::process::Command;
///
/// Command::new("ls")
- /// .current_dir("/bin")
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .current_dir("/bin")
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
///
/// [`canonicalize`]: crate::fs::canonicalize
@@ -861,9 +859,9 @@ impl Command {
/// use std::process::{Command, Stdio};
///
/// Command::new("ls")
- /// .stdin(Stdio::null())
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .stdin(Stdio::null())
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn stdin<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
@@ -890,9 +888,9 @@ impl Command {
/// use std::process::{Command, Stdio};
///
/// Command::new("ls")
- /// .stdout(Stdio::null())
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .stdout(Stdio::null())
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn stdout<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
@@ -919,9 +917,9 @@ impl Command {
/// use std::process::{Command, Stdio};
///
/// Command::new("ls")
- /// .stderr(Stdio::null())
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .stderr(Stdio::null())
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn stderr<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
@@ -941,8 +939,8 @@ impl Command {
/// use std::process::Command;
///
/// Command::new("ls")
- /// .spawn()
- /// .expect("ls command failed to start");
+ /// .spawn()
+ /// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn spawn(&mut self) -> io::Result<Child> {
@@ -963,9 +961,9 @@ impl Command {
/// use std::process::Command;
/// use std::io::{self, Write};
/// let output = Command::new("/bin/cat")
- /// .arg("file.txt")
- /// .output()
- /// .expect("failed to execute process");
+ /// .arg("file.txt")
+ /// .output()
+ /// .expect("failed to execute process");
///
/// println!("status: {}", output.status);
/// io::stdout().write_all(&output.stdout).unwrap();
@@ -990,9 +988,9 @@ impl Command {
/// use std::process::Command;
///
/// let status = Command::new("/bin/cat")
- /// .arg("file.txt")
- /// .status()
- /// .expect("failed to execute process");
+ /// .arg("file.txt")
+ /// .status()
+ /// .expect("failed to execute process");
///
/// println!("process finished with: {status}");
///
@@ -1501,6 +1499,66 @@ impl From<fs::File> for Stdio {
}
}
+#[stable(feature = "stdio_from_stdio", since = "1.74.0")]
+impl From<io::Stdout> for Stdio {
+ /// Redirect command stdout/stderr to our stdout
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(exit_status_error)]
+ /// use std::io;
+ /// use std::process::Command;
+ ///
+ /// # fn test() -> Result<(), Box<dyn std::error::Error>> {
+ /// let output = Command::new("whoami")
+ // "whoami" is a command which exists on both Unix and Windows,
+ // and which succeeds, producing some stdout output but no stderr.
+ /// .stdout(io::stdout())
+ /// .output()?;
+ /// output.status.exit_ok()?;
+ /// assert!(output.stdout.is_empty());
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # if cfg!(unix) {
+ /// # test().unwrap();
+ /// # }
+ /// ```
+ fn from(inherit: io::Stdout) -> Stdio {
+ Stdio::from_inner(inherit.into())
+ }
+}
+
+#[stable(feature = "stdio_from_stdio", since = "1.74.0")]
+impl From<io::Stderr> for Stdio {
+ /// Redirect command stdout/stderr to our stderr
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(exit_status_error)]
+ /// use std::io;
+ /// use std::process::Command;
+ ///
+ /// # fn test() -> Result<(), Box<dyn std::error::Error>> {
+ /// let output = Command::new("whoami")
+ /// .stdout(io::stderr())
+ /// .output()?;
+ /// output.status.exit_ok()?;
+ /// assert!(output.stdout.is_empty());
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # if cfg!(unix) {
+ /// # test().unwrap();
+ /// # }
+ /// ```
+ fn from(inherit: io::Stderr) -> Stdio {
+ Stdio::from_inner(inherit.into())
+ }
+}
+
/// Describes the result of a process after it has terminated.
///
/// This `struct` is used to represent the exit status or other termination of a child process.
@@ -1558,9 +1616,9 @@ impl ExitStatus {
/// use std::process::Command;
///
/// let status = Command::new("ls")
- /// .arg("/dev/nonexistent")
- /// .status()
- /// .expect("ls could not be executed");
+ /// .arg("/dev/nonexistent")
+ /// .status()
+ /// .expect("ls could not be executed");
///
/// println!("ls: {status}");
/// status.exit_ok().expect_err("/dev/nonexistent could be listed!");
@@ -1580,9 +1638,9 @@ impl ExitStatus {
/// use std::process::Command;
///
/// let status = Command::new("mkdir")
- /// .arg("projects")
- /// .status()
- /// .expect("failed to execute mkdir");
+ /// .arg("projects")
+ /// .status()
+ /// .expect("failed to execute mkdir");
///
/// if status.success() {
/// println!("'projects/' directory created");
@@ -1613,13 +1671,13 @@ impl ExitStatus {
/// use std::process::Command;
///
/// let status = Command::new("mkdir")
- /// .arg("projects")
- /// .status()
- /// .expect("failed to execute mkdir");
+ /// .arg("projects")
+ /// .status()
+ /// .expect("failed to execute mkdir");
///
/// match status.code() {
/// Some(code) => println!("Exited with status code: {code}"),
- /// None => println!("Process terminated by signal")
+ /// None => println!("Process terminated by signal")
/// }
/// ```
#[must_use]
@@ -1749,9 +1807,9 @@ impl ExitStatusError {
}
#[unstable(feature = "exit_status_error", issue = "84908")]
-impl Into<ExitStatus> for ExitStatusError {
- fn into(self) -> ExitStatus {
- ExitStatus(self.0.into())
+impl From<ExitStatusError> for ExitStatus {
+ fn from(error: ExitStatusError) -> Self {
+ Self(error.0.into())
}
}
diff --git a/library/std/src/process/tests.rs b/library/std/src/process/tests.rs
index 366b59146..07d4de5c1 100644
--- a/library/std/src/process/tests.rs
+++ b/library/std/src/process/tests.rs
@@ -434,6 +434,91 @@ fn test_creation_flags() {
assert!(events > 0);
}
+/// Tests proc thread attributes by spawning a process with a custom parent process,
+/// then comparing the parent process ID with the expected parent process ID.
+#[test]
+#[cfg(windows)]
+fn test_proc_thread_attributes() {
+ use crate::mem;
+ use crate::os::windows::io::AsRawHandle;
+ use crate::os::windows::process::CommandExt;
+ use crate::sys::c::{CloseHandle, BOOL, HANDLE};
+ use crate::sys::cvt;
+
+ #[repr(C)]
+ #[allow(non_snake_case)]
+ struct PROCESSENTRY32W {
+ dwSize: u32,
+ cntUsage: u32,
+ th32ProcessID: u32,
+ th32DefaultHeapID: usize,
+ th32ModuleID: u32,
+ cntThreads: u32,
+ th32ParentProcessID: u32,
+ pcPriClassBase: i32,
+ dwFlags: u32,
+ szExeFile: [u16; 260],
+ }
+
+ extern "system" {
+ fn CreateToolhelp32Snapshot(dwflags: u32, th32processid: u32) -> HANDLE;
+ fn Process32First(hsnapshot: HANDLE, lppe: *mut PROCESSENTRY32W) -> BOOL;
+ fn Process32Next(hsnapshot: HANDLE, lppe: *mut PROCESSENTRY32W) -> BOOL;
+ }
+
+ const PROC_THREAD_ATTRIBUTE_PARENT_PROCESS: usize = 0x00020000;
+ const TH32CS_SNAPPROCESS: u32 = 0x00000002;
+
+ struct ProcessDropGuard(crate::process::Child);
+
+ impl Drop for ProcessDropGuard {
+ fn drop(&mut self) {
+ let _ = self.0.kill();
+ }
+ }
+
+ let parent = ProcessDropGuard(Command::new("cmd").spawn().unwrap());
+
+ let mut child_cmd = Command::new("cmd");
+
+ unsafe {
+ child_cmd
+ .raw_attribute(PROC_THREAD_ATTRIBUTE_PARENT_PROCESS, parent.0.as_raw_handle() as isize);
+ }
+
+ let child = ProcessDropGuard(child_cmd.spawn().unwrap());
+
+ let h_snapshot = unsafe { CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) };
+
+ let mut process_entry = PROCESSENTRY32W {
+ dwSize: mem::size_of::<PROCESSENTRY32W>() as u32,
+ cntUsage: 0,
+ th32ProcessID: 0,
+ th32DefaultHeapID: 0,
+ th32ModuleID: 0,
+ cntThreads: 0,
+ th32ParentProcessID: 0,
+ pcPriClassBase: 0,
+ dwFlags: 0,
+ szExeFile: [0; 260],
+ };
+
+ unsafe { cvt(Process32First(h_snapshot, &mut process_entry as *mut _)) }.unwrap();
+
+ loop {
+ if child.0.id() == process_entry.th32ProcessID {
+ break;
+ }
+ unsafe { cvt(Process32Next(h_snapshot, &mut process_entry as *mut _)) }.unwrap();
+ }
+
+ unsafe { cvt(CloseHandle(h_snapshot)) }.unwrap();
+
+ assert_eq!(parent.0.id(), process_entry.th32ParentProcessID);
+
+ drop(child)
+}
+
#[test]
fn test_command_implements_send_sync() {
fn take_send_sync_type<T: Send + Sync>(_: T) {}
@@ -452,7 +537,7 @@ fn env_empty() {
#[test]
#[cfg(not(windows))]
#[cfg_attr(any(target_os = "emscripten", target_env = "sgx"), ignore)]
-fn main() {
+fn debug_print() {
const PIDFD: &'static str =
if cfg!(target_os = "linux") { " create_pidfd: false,\n" } else { "" };
@@ -541,6 +626,51 @@ fn main() {
{PIDFD}}}"#
)
);
+
+ let mut command_with_removed_env = Command::new("boring-name");
+ command_with_removed_env.env_remove("FOO").env_remove("BAR");
+ assert_eq!(format!("{command_with_removed_env:?}"), r#"env -u BAR -u FOO "boring-name""#);
+ assert_eq!(
+ format!("{command_with_removed_env:#?}"),
+ format!(
+ r#"Command {{
+ program: "boring-name",
+ args: [
+ "boring-name",
+ ],
+ env: CommandEnv {{
+ clear: false,
+ vars: {{
+ "BAR": None,
+ "FOO": None,
+ }},
+ }},
+{PIDFD}}}"#
+ )
+ );
+
+ let mut command_with_cleared_env = Command::new("boring-name");
+ command_with_cleared_env.env_clear().env("BAR", "val").env_remove("FOO");
+ assert_eq!(format!("{command_with_cleared_env:?}"), r#"env -i BAR="val" "boring-name""#);
+ assert_eq!(
+ format!("{command_with_cleared_env:#?}"),
+ format!(
+ r#"Command {{
+ program: "boring-name",
+ args: [
+ "boring-name",
+ ],
+ env: CommandEnv {{
+ clear: true,
+ vars: {{
+ "BAR": Some(
+ "val",
+ ),
+ }},
+ }},
+{PIDFD}}}"#
+ )
+ );
}
// See issue #91991
diff --git a/library/std/src/sync/mpsc/mod.rs b/library/std/src/sync/mpsc/mod.rs
index f92bb1a4b..d353c7bd5 100644
--- a/library/std/src/sync/mpsc/mod.rs
+++ b/library/std/src/sync/mpsc/mod.rs
@@ -626,11 +626,6 @@ impl<T> Clone for Sender<T> {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Drop for Sender<T> {
- fn drop(&mut self) {}
-}
-
#[stable(feature = "mpsc_debug", since = "1.8.0")]
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -755,11 +750,6 @@ impl<T> Clone for SyncSender<T> {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Drop for SyncSender<T> {
- fn drop(&mut self) {}
-}
-
#[stable(feature = "mpsc_debug", since = "1.8.0")]
impl<T> fmt::Debug for SyncSender<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -1096,11 +1086,6 @@ impl<T> IntoIterator for Receiver<T> {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Drop for Receiver<T> {
- fn drop(&mut self) {}
-}
-
#[stable(feature = "mpsc_debug", since = "1.8.0")]
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
diff --git a/library/std/src/sys/common/small_c_string.rs b/library/std/src/sys/common/small_c_string.rs
index 963d17a47..af9b18e37 100644
--- a/library/std/src/sys/common/small_c_string.rs
+++ b/library/std/src/sys/common/small_c_string.rs
@@ -19,7 +19,7 @@ pub fn run_path_with_cstr<T, F>(path: &Path, f: F) -> io::Result<T>
where
F: FnOnce(&CStr) -> io::Result<T>,
{
- run_with_cstr(path.as_os_str().as_os_str_bytes(), f)
+ run_with_cstr(path.as_os_str().as_encoded_bytes(), f)
}
#[inline]
diff --git a/library/std/src/sys/common/tests.rs b/library/std/src/sys/common/tests.rs
index 0a1cbcbe8..32dc18ee1 100644
--- a/library/std/src/sys/common/tests.rs
+++ b/library/std/src/sys/common/tests.rs
@@ -8,7 +8,7 @@ use core::iter::repeat;
fn stack_allocation_works() {
let path = Path::new("abc");
let result = run_path_with_cstr(path, |p| {
- assert_eq!(p, &*CString::new(path.as_os_str().as_os_str_bytes()).unwrap());
+ assert_eq!(p, &*CString::new(path.as_os_str().as_encoded_bytes()).unwrap());
Ok(42)
});
assert_eq!(result.unwrap(), 42);
@@ -25,7 +25,7 @@ fn heap_allocation_works() {
let path = repeat("a").take(384).collect::<String>();
let path = Path::new(&path);
let result = run_path_with_cstr(path, |p| {
- assert_eq!(p, &*CString::new(path.as_os_str().as_os_str_bytes()).unwrap());
+ assert_eq!(p, &*CString::new(path.as_os_str().as_encoded_bytes()).unwrap());
Ok(42)
});
assert_eq!(result.unwrap(), 42);
diff --git a/library/std/src/sys/common/thread_local/mod.rs b/library/std/src/sys/common/thread_local/mod.rs
index 975509bd4..8b2c839f8 100644
--- a/library/std/src/sys/common/thread_local/mod.rs
+++ b/library/std/src/sys/common/thread_local/mod.rs
@@ -6,7 +6,7 @@
// "static" is for single-threaded platforms where a global static is sufficient.
cfg_if::cfg_if! {
- if #[cfg(all(target_family = "wasm", not(target_feature = "atomics")))] {
+ if #[cfg(any(all(target_family = "wasm", not(target_feature = "atomics")), target_os = "uefi"))] {
#[doc(hidden)]
mod static_local;
#[doc(hidden)]
diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs
index c7cb84667..abd7eb353 100644
--- a/library/std/src/sys/hermit/mod.rs
+++ b/library/std/src/sys/hermit/mod.rs
@@ -101,7 +101,6 @@ pub extern "C" fn __rust_abort() {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
pub unsafe fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) {
- let _ = net::init();
args::init(argc, argv);
}
@@ -130,6 +129,11 @@ pub unsafe extern "C" fn runtime_entry(
abi::exit(result);
}
+#[inline]
+pub(crate) fn is_interrupted(errno: i32) -> bool {
+ errno == abi::errno::EINTR
+}
+
pub fn decode_error_kind(errno: i32) -> ErrorKind {
match errno {
abi::errno::EACCES => ErrorKind::PermissionDenied,
@@ -196,7 +200,7 @@ where
{
loop {
match cvt(f()) {
- Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
other => return other,
}
}
diff --git a/library/std/src/sys/hermit/net.rs b/library/std/src/sys/hermit/net.rs
index 8c2d489d6..a564f1698 100644
--- a/library/std/src/sys/hermit/net.rs
+++ b/library/std/src/sys/hermit/net.rs
@@ -33,13 +33,7 @@ pub fn cvt_gai(err: i32) -> io::Result<()> {
))
}
-/// Checks whether the HermitCore's socket interface has been started already, and
-/// if not, starts it.
-pub fn init() {
- if unsafe { netc::network_init() } < 0 {
- panic!("Unable to initialize network interface");
- }
-}
+pub fn init() {}
#[derive(Debug)]
pub struct Socket(FileDesc);
@@ -108,7 +102,7 @@ impl Socket {
match unsafe { netc::poll(&mut pollfd, 1, timeout) } {
-1 => {
let err = io::Error::last_os_error();
- if err.kind() != io::ErrorKind::Interrupted {
+ if !err.is_interrupted() {
return Err(err);
}
}
diff --git a/library/std/src/sys/itron/error.rs b/library/std/src/sys/itron/error.rs
index 830c60d32..fbc822d4e 100644
--- a/library/std/src/sys/itron/error.rs
+++ b/library/std/src/sys/itron/error.rs
@@ -79,6 +79,11 @@ pub fn error_name(er: abi::ER) -> Option<&'static str> {
}
}
+#[inline]
+pub fn is_interrupted(er: abi::ER) -> bool {
+ er == abi::E_RLWAI
+}
+
pub fn decode_error_kind(er: abi::ER) -> ErrorKind {
match er {
// Success
diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs
index beea3f23c..159ffe7ac 100644
--- a/library/std/src/sys/mod.rs
+++ b/library/std/src/sys/mod.rs
@@ -44,6 +44,12 @@ cfg_if::cfg_if! {
} else if #[cfg(target_family = "wasm")] {
mod wasm;
pub use self::wasm::*;
+ } else if #[cfg(target_os = "xous")] {
+ mod xous;
+ pub use self::xous::*;
+ } else if #[cfg(target_os = "uefi")] {
+ mod uefi;
+ pub use self::uefi::*;
} else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] {
mod sgx;
pub use self::sgx::*;
@@ -110,3 +116,6 @@ pub fn log_wrapper<F: Fn(f64) -> f64>(n: f64, log_fn: F) -> f64 {
pub fn log_wrapper<F: Fn(f64) -> f64>(n: f64, log_fn: F) -> f64 {
log_fn(n)
}
+
+#[cfg(not(target_os = "uefi"))]
+pub type RawOsError = i32;
diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs
index 9865a945b..09d3f7638 100644
--- a/library/std/src/sys/sgx/mod.rs
+++ b/library/std/src/sys/sgx/mod.rs
@@ -86,6 +86,12 @@ pub fn sgx_ineffective<T>(v: T) -> crate::io::Result<T> {
}
}
+#[inline]
+pub fn is_interrupted(code: i32) -> bool {
+ use fortanix_sgx_abi::Error;
+ code == Error::Interrupted as _
+}
+
pub fn decode_error_kind(code: i32) -> ErrorKind {
use fortanix_sgx_abi::Error;
diff --git a/library/std/src/sys/solid/mod.rs b/library/std/src/sys/solid/mod.rs
index 923d27fd9..5af83653c 100644
--- a/library/std/src/sys/solid/mod.rs
+++ b/library/std/src/sys/solid/mod.rs
@@ -72,6 +72,11 @@ pub fn unsupported_err() -> crate::io::Error {
)
}
+#[inline]
+pub fn is_interrupted(code: i32) -> bool {
+ net::is_interrupted(code)
+}
+
pub fn decode_error_kind(code: i32) -> crate::io::ErrorKind {
error::decode_error_kind(code)
}
diff --git a/library/std/src/sys/solid/net.rs b/library/std/src/sys/solid/net.rs
index 0bd2bc3b9..6adced787 100644
--- a/library/std/src/sys/solid/net.rs
+++ b/library/std/src/sys/solid/net.rs
@@ -181,6 +181,11 @@ pub(super) fn error_name(er: abi::ER) -> Option<&'static str> {
unsafe { CStr::from_ptr(netc::strerror(er)) }.to_str().ok()
}
+#[inline]
+pub fn is_interrupted(er: abi::ER) -> bool {
+ er == netc::SOLID_NET_ERR_BASE - libc::EINTR
+}
+
pub(super) fn decode_error_kind(er: abi::ER) -> ErrorKind {
let errno = netc::SOLID_NET_ERR_BASE - er;
match errno as libc::c_int {
diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs
index 9f4e66d62..ff81544ba 100644
--- a/library/std/src/sys/solid/os.rs
+++ b/library/std/src/sys/solid/os.rs
@@ -8,7 +8,7 @@ use crate::os::{
solid::ffi::{OsStrExt, OsStringExt},
};
use crate::path::{self, PathBuf};
-use crate::sync::RwLock;
+use crate::sync::{PoisonError, RwLock};
use crate::sys::common::small_c_string::run_with_cstr;
use crate::vec;
diff --git a/library/std/src/sys/uefi/alloc.rs b/library/std/src/sys/uefi/alloc.rs
new file mode 100644
index 000000000..789e3cbd8
--- /dev/null
+++ b/library/std/src/sys/uefi/alloc.rs
@@ -0,0 +1,33 @@
+//! Global Allocator for UEFI.
+//! Uses [r-efi-alloc](https://crates.io/crates/r-efi-alloc)
+
+use crate::alloc::{GlobalAlloc, Layout, System};
+
+const MEMORY_TYPE: u32 = r_efi::efi::LOADER_DATA;
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // Return null pointer if boot services are not available
+ if crate::os::uefi::env::boot_services().is_none() {
+ return crate::ptr::null_mut();
+ }
+
+ // If boot services is valid then SystemTable is not null.
+ let system_table = crate::os::uefi::env::system_table().as_ptr().cast();
+ // The caller must ensure non-0 layout
+ unsafe { r_efi_alloc::raw::alloc(system_table, layout, MEMORY_TYPE) }
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ // Do nothing if boot services are not available
+ if crate::os::uefi::env::boot_services().is_none() {
+ return;
+ }
+
+ // If boot services is valid then SystemTable is not null.
+ let system_table = crate::os::uefi::env::system_table().as_ptr().cast();
+ // The caller must ensure non-0 layout
+ unsafe { r_efi_alloc::raw::dealloc(system_table, ptr, layout) }
+ }
+}
diff --git a/library/std/src/sys/uefi/env.rs b/library/std/src/sys/uefi/env.rs
new file mode 100644
index 000000000..c106d5fed
--- /dev/null
+++ b/library/std/src/sys/uefi/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "";
+ pub const OS: &str = "uefi";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = "";
+ pub const DLL_EXTENSION: &str = "";
+ pub const EXE_SUFFIX: &str = ".efi";
+ pub const EXE_EXTENSION: &str = "efi";
+}
diff --git a/library/std/src/sys/uefi/helpers.rs b/library/std/src/sys/uefi/helpers.rs
new file mode 100644
index 000000000..126661bfc
--- /dev/null
+++ b/library/std/src/sys/uefi/helpers.rs
@@ -0,0 +1,141 @@
+//! Contains most of the shared UEFI specific stuff. Some of this might be moved to `std::os::uefi`
+//! if needed but no point in adding extra public API when there is not Std support for UEFI in the
+//! first place
+//!
+//! Some Nomenclature
+//! * Protocol:
+//! - Protocols serve to enable communication between separately built modules, including drivers.
+//! - Every protocol has a GUID associated with it. The GUID serves as the name for the protocol.
+//! - Protocols are produced and consumed.
+//! - More information about protocols can be found [here](https://edk2-docs.gitbook.io/edk-ii-uefi-driver-writer-s-guide/3_foundation/36_protocols_and_handles)
+
+use r_efi::efi::{self, Guid};
+
+use crate::mem::{size_of, MaybeUninit};
+use crate::os::uefi;
+use crate::ptr::NonNull;
+use crate::{
+ io::{self, const_io_error},
+ os::uefi::env::boot_services,
+};
+
+const BOOT_SERVICES_UNAVAILABLE: io::Error =
+ const_io_error!(io::ErrorKind::Other, "Boot Services are no longer available");
+
+/// Locate Handles with a particular Protocol GUID
+/// Implemented using `EFI_BOOT_SERVICES.LocateHandles()`
+///
+/// Returns an array of [Handles](r_efi::efi::Handle) that support a specified protocol.
+pub(crate) fn locate_handles(mut guid: Guid) -> io::Result<Vec<NonNull<crate::ffi::c_void>>> {
+ fn inner(
+ guid: &mut Guid,
+ boot_services: NonNull<r_efi::efi::BootServices>,
+ buf_size: &mut usize,
+ buf: *mut r_efi::efi::Handle,
+ ) -> io::Result<()> {
+ let r = unsafe {
+ ((*boot_services.as_ptr()).locate_handle)(
+ r_efi::efi::BY_PROTOCOL,
+ guid,
+ crate::ptr::null_mut(),
+ buf_size,
+ buf,
+ )
+ };
+
+ if r.is_error() { Err(crate::io::Error::from_raw_os_error(r.as_usize())) } else { Ok(()) }
+ }
+
+ let boot_services = boot_services().ok_or(BOOT_SERVICES_UNAVAILABLE)?.cast();
+ let mut buf_len = 0usize;
+
+ // This should always fail since the size of buffer is 0. This call should update the buf_len
+ // variable with the required buffer length
+ match inner(&mut guid, boot_services, &mut buf_len, crate::ptr::null_mut()) {
+ Ok(()) => unreachable!(),
+ Err(e) => match e.kind() {
+ io::ErrorKind::FileTooLarge => {}
+ _ => return Err(e),
+ },
+ }
+
+ // The returned buf_len is in bytes
+ assert_eq!(buf_len % size_of::<r_efi::efi::Handle>(), 0);
+ let num_of_handles = buf_len / size_of::<r_efi::efi::Handle>();
+ let mut buf: Vec<r_efi::efi::Handle> = Vec::with_capacity(num_of_handles);
+ match inner(&mut guid, boot_services, &mut buf_len, buf.as_mut_ptr()) {
+ Ok(()) => {
+ // This is safe because the call will succeed only if buf_len >= required length.
+ // Also, on success, the `buf_len` is updated with the size of bufferv (in bytes) written
+ unsafe { buf.set_len(num_of_handles) };
+ Ok(buf.into_iter().filter_map(|x| NonNull::new(x)).collect())
+ }
+ Err(e) => Err(e),
+ }
+}
+
+/// Open Protocol on a handle.
+/// Internally just a call to `EFI_BOOT_SERVICES.OpenProtocol()`.
+///
+/// Queries a handle to determine if it supports a specified protocol. If the protocol is
+/// supported by the handle, it opens the protocol on behalf of the calling agent.
+pub(crate) fn open_protocol<T>(
+ handle: NonNull<crate::ffi::c_void>,
+ mut protocol_guid: Guid,
+) -> io::Result<NonNull<T>> {
+ let boot_services: NonNull<efi::BootServices> =
+ boot_services().ok_or(BOOT_SERVICES_UNAVAILABLE)?.cast();
+ let system_handle = uefi::env::image_handle();
+ let mut protocol: MaybeUninit<*mut T> = MaybeUninit::uninit();
+
+ let r = unsafe {
+ ((*boot_services.as_ptr()).open_protocol)(
+ handle.as_ptr(),
+ &mut protocol_guid,
+ protocol.as_mut_ptr().cast(),
+ system_handle.as_ptr(),
+ crate::ptr::null_mut(),
+ r_efi::system::OPEN_PROTOCOL_GET_PROTOCOL,
+ )
+ };
+
+ if r.is_error() {
+ Err(crate::io::Error::from_raw_os_error(r.as_usize()))
+ } else {
+ NonNull::new(unsafe { protocol.assume_init() })
+ .ok_or(const_io_error!(io::ErrorKind::Other, "null protocol"))
+ }
+}
+
+pub(crate) fn create_event(
+ signal: u32,
+ tpl: efi::Tpl,
+ handler: Option<efi::EventNotify>,
+ context: *mut crate::ffi::c_void,
+) -> io::Result<NonNull<crate::ffi::c_void>> {
+ let boot_services: NonNull<efi::BootServices> =
+ boot_services().ok_or(BOOT_SERVICES_UNAVAILABLE)?.cast();
+ let mut event: r_efi::efi::Event = crate::ptr::null_mut();
+ let r = unsafe {
+ let create_event = (*boot_services.as_ptr()).create_event;
+ (create_event)(signal, tpl, handler, context, &mut event)
+ };
+ if r.is_error() {
+ Err(crate::io::Error::from_raw_os_error(r.as_usize()))
+ } else {
+ NonNull::new(event).ok_or(const_io_error!(io::ErrorKind::Other, "null protocol"))
+ }
+}
+
+/// # SAFETY
+/// - The supplied event must be valid
+pub(crate) unsafe fn close_event(evt: NonNull<crate::ffi::c_void>) -> io::Result<()> {
+ let boot_services: NonNull<efi::BootServices> =
+ boot_services().ok_or(BOOT_SERVICES_UNAVAILABLE)?.cast();
+ let r = unsafe {
+ let close_event = (*boot_services.as_ptr()).close_event;
+ (close_event)(evt.as_ptr())
+ };
+
+ if r.is_error() { Err(crate::io::Error::from_raw_os_error(r.as_usize())) } else { Ok(()) }
+}
diff --git a/library/std/src/sys/uefi/mod.rs b/library/std/src/sys/uefi/mod.rs
new file mode 100644
index 000000000..9a10395af
--- /dev/null
+++ b/library/std/src/sys/uefi/mod.rs
@@ -0,0 +1,244 @@
+//! Platform-specific extensions to `std` for UEFI platforms.
+//!
+//! Provides access to platform-level information on UEFI platforms, and
+//! exposes UEFI-specific functions that would otherwise be inappropriate as
+//! part of the core `std` library.
+//!
+//! It exposes more ways to deal with platform-specific strings ([`OsStr`],
+//! [`OsString`]), allows to set permissions more granularly, extract low-level
+//! file descriptors from files and sockets, and has platform-specific helpers
+//! for spawning processes.
+//!
+//! [`OsStr`]: crate::ffi::OsStr
+//! [`OsString`]: crate::ffi::OsString
+
+pub mod alloc;
+#[path = "../unsupported/args.rs"]
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+#[path = "../unsupported/fs.rs"]
+pub mod fs;
+#[path = "../unsupported/io.rs"]
+pub mod io;
+#[path = "../unsupported/locks/mod.rs"]
+pub mod locks;
+#[path = "../unsupported/net.rs"]
+pub mod net;
+#[path = "../unsupported/once.rs"]
+pub mod once;
+pub mod os;
+#[path = "../windows/os_str.rs"]
+pub mod os_str;
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+#[path = "../unsupported/stdio.rs"]
+pub mod stdio;
+#[path = "../unsupported/thread.rs"]
+pub mod thread;
+#[path = "../unsupported/thread_local_key.rs"]
+pub mod thread_local_key;
+#[path = "../unsupported/thread_parking.rs"]
+pub mod thread_parking;
+#[path = "../unsupported/time.rs"]
+pub mod time;
+
+mod helpers;
+
+#[cfg(test)]
+mod tests;
+
+pub type RawOsError = usize;
+
+use crate::io as std_io;
+use crate::os::uefi;
+use crate::ptr::NonNull;
+use crate::sync::atomic::{AtomicPtr, Ordering};
+
+pub mod memchr {
+ pub use core::slice::memchr::{memchr, memrchr};
+}
+
+static EXIT_BOOT_SERVICE_EVENT: AtomicPtr<crate::ffi::c_void> =
+ AtomicPtr::new(crate::ptr::null_mut());
+
+/// # SAFETY
+/// - must be called only once during runtime initialization.
+/// - argc must be 2.
+/// - argv must be &[Handle, *mut SystemTable].
+pub(crate) unsafe fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) {
+ assert_eq!(argc, 2);
+ let image_handle = unsafe { NonNull::new(*argv as *mut crate::ffi::c_void).unwrap() };
+ let system_table = unsafe { NonNull::new(*argv.add(1) as *mut crate::ffi::c_void).unwrap() };
+ unsafe { uefi::env::init_globals(image_handle, system_table) };
+
+ // Register exit boot services handler
+ match helpers::create_event(
+ r_efi::efi::EVT_SIGNAL_EXIT_BOOT_SERVICES,
+ r_efi::efi::TPL_NOTIFY,
+ Some(exit_boot_service_handler),
+ crate::ptr::null_mut(),
+ ) {
+ Ok(x) => {
+ if EXIT_BOOT_SERVICE_EVENT
+ .compare_exchange(
+ crate::ptr::null_mut(),
+ x.as_ptr(),
+ Ordering::Release,
+ Ordering::Acquire,
+ )
+ .is_err()
+ {
+ abort_internal();
+ };
+ }
+ Err(_) => abort_internal(),
+ }
+}
+
+/// # SAFETY
+/// this is not guaranteed to run, for example when the program aborts.
+/// - must be called only once during runtime cleanup.
+pub unsafe fn cleanup() {
+ if let Some(exit_boot_service_event) =
+ NonNull::new(EXIT_BOOT_SERVICE_EVENT.swap(crate::ptr::null_mut(), Ordering::Acquire))
+ {
+ let _ = unsafe { helpers::close_event(exit_boot_service_event) };
+ }
+}
+
+#[inline]
+pub const fn unsupported<T>() -> std_io::Result<T> {
+ Err(unsupported_err())
+}
+
+#[inline]
+pub const fn unsupported_err() -> std_io::Error {
+ std_io::const_io_error!(std_io::ErrorKind::Unsupported, "operation not supported on UEFI",)
+}
+
+pub fn decode_error_kind(code: RawOsError) -> crate::io::ErrorKind {
+ use crate::io::ErrorKind;
+ use r_efi::efi::Status;
+
+ match r_efi::efi::Status::from_usize(code) {
+ Status::ALREADY_STARTED
+ | Status::COMPROMISED_DATA
+ | Status::CONNECTION_FIN
+ | Status::CRC_ERROR
+ | Status::DEVICE_ERROR
+ | Status::END_OF_MEDIA
+ | Status::HTTP_ERROR
+ | Status::ICMP_ERROR
+ | Status::INCOMPATIBLE_VERSION
+ | Status::LOAD_ERROR
+ | Status::MEDIA_CHANGED
+ | Status::NO_MAPPING
+ | Status::NO_MEDIA
+ | Status::NOT_STARTED
+ | Status::PROTOCOL_ERROR
+ | Status::PROTOCOL_UNREACHABLE
+ | Status::TFTP_ERROR
+ | Status::VOLUME_CORRUPTED => ErrorKind::Other,
+ Status::BAD_BUFFER_SIZE | Status::INVALID_LANGUAGE => ErrorKind::InvalidData,
+ Status::ABORTED => ErrorKind::ConnectionAborted,
+ Status::ACCESS_DENIED => ErrorKind::PermissionDenied,
+ Status::BUFFER_TOO_SMALL => ErrorKind::FileTooLarge,
+ Status::CONNECTION_REFUSED => ErrorKind::ConnectionRefused,
+ Status::CONNECTION_RESET => ErrorKind::ConnectionReset,
+ Status::END_OF_FILE => ErrorKind::UnexpectedEof,
+ Status::HOST_UNREACHABLE => ErrorKind::HostUnreachable,
+ Status::INVALID_PARAMETER => ErrorKind::InvalidInput,
+ Status::IP_ADDRESS_CONFLICT => ErrorKind::AddrInUse,
+ Status::NETWORK_UNREACHABLE => ErrorKind::NetworkUnreachable,
+ Status::NO_RESPONSE => ErrorKind::HostUnreachable,
+ Status::NOT_FOUND => ErrorKind::NotFound,
+ Status::NOT_READY => ErrorKind::ResourceBusy,
+ Status::OUT_OF_RESOURCES => ErrorKind::OutOfMemory,
+ Status::SECURITY_VIOLATION => ErrorKind::PermissionDenied,
+ Status::TIMEOUT => ErrorKind::TimedOut,
+ Status::UNSUPPORTED => ErrorKind::Unsupported,
+ Status::VOLUME_FULL => ErrorKind::StorageFull,
+ Status::WRITE_PROTECTED => ErrorKind::ReadOnlyFilesystem,
+ _ => ErrorKind::Uncategorized,
+ }
+}
+
+pub fn abort_internal() -> ! {
+ if let Some(exit_boot_service_event) =
+ NonNull::new(EXIT_BOOT_SERVICE_EVENT.load(Ordering::Acquire))
+ {
+ let _ = unsafe { helpers::close_event(exit_boot_service_event) };
+ }
+
+ if let (Some(boot_services), Some(handle)) =
+ (uefi::env::boot_services(), uefi::env::try_image_handle())
+ {
+ let boot_services: NonNull<r_efi::efi::BootServices> = boot_services.cast();
+ let _ = unsafe {
+ ((*boot_services.as_ptr()).exit)(
+ handle.as_ptr(),
+ r_efi::efi::Status::ABORTED,
+ 0,
+ crate::ptr::null_mut(),
+ )
+ };
+ }
+
+ // In case SystemTable and ImageHandle cannot be reached, use `core::intrinsics::abort`
+ core::intrinsics::abort();
+}
+
+// This function is needed by the panic runtime. The symbol is named in
+// pre-link args for the target specification, so keep that in sync.
+#[cfg(not(test))]
+#[no_mangle]
+pub extern "C" fn __rust_abort() {
+ abort_internal();
+}
+
+#[inline]
+pub fn hashmap_random_keys() -> (u64, u64) {
+ get_random().unwrap()
+}
+
+fn get_random() -> Option<(u64, u64)> {
+ use r_efi::protocols::rng;
+
+ let mut buf = [0u8; 16];
+ let handles = helpers::locate_handles(rng::PROTOCOL_GUID).ok()?;
+ for handle in handles {
+ if let Ok(protocol) = helpers::open_protocol::<rng::Protocol>(handle, rng::PROTOCOL_GUID) {
+ let r = unsafe {
+ ((*protocol.as_ptr()).get_rng)(
+ protocol.as_ptr(),
+ crate::ptr::null_mut(),
+ buf.len(),
+ buf.as_mut_ptr(),
+ )
+ };
+ if r.is_error() {
+ continue;
+ } else {
+ return Some((
+ u64::from_le_bytes(buf[..8].try_into().ok()?),
+ u64::from_le_bytes(buf[8..].try_into().ok()?),
+ ));
+ }
+ }
+ }
+ None
+}
+
+/// Disable access to BootServices if `EVT_SIGNAL_EXIT_BOOT_SERVICES` is signaled
+extern "efiapi" fn exit_boot_service_handler(_e: r_efi::efi::Event, _ctx: *mut crate::ffi::c_void) {
+ uefi::env::disable_boot_services();
+}
+
+pub fn is_interrupted(_code: RawOsError) -> bool {
+ false
+}
diff --git a/library/std/src/sys/uefi/os.rs b/library/std/src/sys/uefi/os.rs
new file mode 100644
index 000000000..e6693db68
--- /dev/null
+++ b/library/std/src/sys/uefi/os.rs
@@ -0,0 +1,237 @@
+use super::{unsupported, RawOsError};
+use crate::error::Error as StdError;
+use crate::ffi::{OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::os::uefi;
+use crate::path::{self, PathBuf};
+use crate::ptr::NonNull;
+use r_efi::efi::Status;
+
+pub fn errno() -> RawOsError {
+ 0
+}
+
+pub fn error_string(errno: RawOsError) -> String {
+ // Keep the List in Alphabetical Order
+ // The Messages are taken from UEFI Specification Appendix D - Status Codes
+ match r_efi::efi::Status::from_usize(errno) {
+ Status::ABORTED => "The operation was aborted.".to_owned(),
+ Status::ACCESS_DENIED => "Access was denied.".to_owned(),
+ Status::ALREADY_STARTED => "The protocol has already been started.".to_owned(),
+ Status::BAD_BUFFER_SIZE => "The buffer was not the proper size for the request.".to_owned(),
+ Status::BUFFER_TOO_SMALL => {
+ "The buffer is not large enough to hold the requested data. The required buffer size is returned in the appropriate parameter when this error occurs.".to_owned()
+ }
+ Status::COMPROMISED_DATA => {
+ "The security status of the data is unknown or compromised and the data must be updated or replaced to restore a valid security status.".to_owned()
+ }
+ Status::CONNECTION_FIN => {
+ "The receiving operation fails because the communication peer has closed the connection and there is no more data in the receive buffer of the instance.".to_owned()
+ }
+ Status::CONNECTION_REFUSED => {
+ "The receiving or transmission operation fails because this connection is refused.".to_owned()
+ }
+ Status::CONNECTION_RESET => {
+ "The connect fails because the connection is reset either by instance itself or the communication peer.".to_owned()
+ }
+ Status::CRC_ERROR => "A CRC error was detected.".to_owned(),
+ Status::DEVICE_ERROR => "The physical device reported an error while attempting the operation.".to_owned()
+ ,
+ Status::END_OF_FILE => {
+ "The end of the file was reached.".to_owned()
+ }
+ Status::END_OF_MEDIA => {
+ "Beginning or end of media was reached".to_owned()
+ }
+ Status::HOST_UNREACHABLE => {
+ "The remote host is not reachable.".to_owned()
+ }
+ Status::HTTP_ERROR => {
+ "A HTTP error occurred during the network operation.".to_owned()
+ }
+ Status::ICMP_ERROR => {
+ "An ICMP error occurred during the network operation.".to_owned()
+ }
+ Status::INCOMPATIBLE_VERSION => {
+ "The function encountered an internal version that was incompatible with a version requested by the caller.".to_owned()
+ }
+ Status::INVALID_LANGUAGE => {
+ "The language specified was invalid.".to_owned()
+ }
+ Status::INVALID_PARAMETER => {
+ "A parameter was incorrect.".to_owned()
+ }
+ Status::IP_ADDRESS_CONFLICT => {
+ "There is an address conflict address allocation".to_owned()
+ }
+ Status::LOAD_ERROR => {
+ "The image failed to load.".to_owned()
+ }
+ Status::MEDIA_CHANGED => {
+ "The medium in the device has changed since the last access.".to_owned()
+ }
+ Status::NETWORK_UNREACHABLE => {
+ "The network containing the remote host is not reachable.".to_owned()
+ }
+ Status::NO_MAPPING => {
+ "A mapping to a device does not exist.".to_owned()
+ }
+ Status::NO_MEDIA => {
+ "The device does not contain any medium to perform the operation.".to_owned()
+ }
+ Status::NO_RESPONSE => {
+ "The server was not found or did not respond to the request.".to_owned()
+ }
+ Status::NOT_FOUND => "The item was not found.".to_owned(),
+ Status::NOT_READY => {
+ "There is no data pending upon return.".to_owned()
+ }
+ Status::NOT_STARTED => {
+ "The protocol has not been started.".to_owned()
+ }
+ Status::OUT_OF_RESOURCES => {
+ "A resource has run out.".to_owned()
+ }
+ Status::PROTOCOL_ERROR => {
+ "A protocol error occurred during the network operation.".to_owned()
+ }
+ Status::PROTOCOL_UNREACHABLE => {
+ "An ICMP protocol unreachable error is received.".to_owned()
+ }
+ Status::SECURITY_VIOLATION => {
+ "The function was not performed due to a security violation.".to_owned()
+ }
+ Status::TFTP_ERROR => {
+ "A TFTP error occurred during the network operation.".to_owned()
+ }
+ Status::TIMEOUT => "The timeout time expired.".to_owned(),
+ Status::UNSUPPORTED => {
+ "The operation is not supported.".to_owned()
+ }
+ Status::VOLUME_FULL => {
+ "There is no more space on the file system.".to_owned()
+ }
+ Status::VOLUME_CORRUPTED => {
+ "An inconstancy was detected on the file system causing the operating to fail.".to_owned()
+ }
+ Status::WRITE_PROTECTED => {
+ "The device cannot be written to.".to_owned()
+ }
+ _ => format!("Status: {}", errno),
+ }
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn chdir(_: &path::Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
+
+pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
+ panic!("unsupported")
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ Err(JoinPathsError)
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "not supported on this platform yet".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub struct Env(!);
+
+impl Env {
+ // FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.0
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
+pub fn env() -> Env {
+ panic!("not supported on this platform")
+}
+
+pub fn getenv(_: &OsStr) -> Option<OsString> {
+ None
+}
+
+pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> {
+ Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform"))
+}
+
+pub fn unsetenv(_: &OsStr) -> io::Result<()> {
+ Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform"))
+}
+
+pub fn temp_dir() -> PathBuf {
+ panic!("no filesystem on this platform")
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ None
+}
+
+pub fn exit(code: i32) -> ! {
+ if let (Some(boot_services), Some(handle)) =
+ (uefi::env::boot_services(), uefi::env::try_image_handle())
+ {
+ let boot_services: NonNull<r_efi::efi::BootServices> = boot_services.cast();
+ let _ = unsafe {
+ ((*boot_services.as_ptr()).exit)(
+ handle.as_ptr(),
+ Status::from_usize(code as usize),
+ 0,
+ crate::ptr::null_mut(),
+ )
+ };
+ }
+ crate::intrinsics::abort()
+}
+
+pub fn getpid() -> u32 {
+ panic!("no pids on this platform")
+}
diff --git a/library/std/src/sys/uefi/path.rs b/library/std/src/sys/uefi/path.rs
new file mode 100644
index 000000000..106682eee
--- /dev/null
+++ b/library/std/src/sys/uefi/path.rs
@@ -0,0 +1,25 @@
+use super::unsupported;
+use crate::ffi::OsStr;
+use crate::io;
+use crate::path::{Path, PathBuf, Prefix};
+
+pub const MAIN_SEP_STR: &str = "\\";
+pub const MAIN_SEP: char = '\\';
+
+#[inline]
+pub fn is_sep_byte(b: u8) -> bool {
+ b == b'\\'
+}
+
+#[inline]
+pub fn is_verbatim_sep(b: u8) -> bool {
+ b == b'\\'
+}
+
+pub fn parse_prefix(_p: &OsStr) -> Option<Prefix<'_>> {
+ None
+}
+
+pub(crate) fn absolute(_path: &Path) -> io::Result<PathBuf> {
+ unsupported()
+}
diff --git a/library/std/src/sys/uefi/tests.rs b/library/std/src/sys/uefi/tests.rs
new file mode 100644
index 000000000..8806eda3a
--- /dev/null
+++ b/library/std/src/sys/uefi/tests.rs
@@ -0,0 +1,21 @@
+use super::alloc::*;
+
+#[test]
+fn align() {
+ // UEFI ABI specifies that allocation alignment minimum is always 8. So this can be
+ // statically verified.
+ assert_eq!(POOL_ALIGNMENT, 8);
+
+ // Loop over allocation-request sizes from 0-256 and alignments from 1-128, and verify
+ // that in case of overalignment there is at least space for one additional pointer to
+ // store in the allocation.
+ for i in 0..256 {
+ for j in &[1, 2, 4, 8, 16, 32, 64, 128] {
+ if *j <= 8 {
+ assert_eq!(align_size(i, *j), i);
+ } else {
+ assert!(align_size(i, *j) > i + std::mem::size_of::<*mut ()>());
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/alloc.rs b/library/std/src/sys/unix/alloc.rs
index 8604b5398..af0089978 100644
--- a/library/std/src/sys/unix/alloc.rs
+++ b/library/std/src/sys/unix/alloc.rs
@@ -86,7 +86,11 @@ cfg_if::cfg_if! {
} else if #[cfg(target_os = "wasi")] {
#[inline]
unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
- libc::aligned_alloc(layout.align(), layout.size()) as *mut u8
+ // C11 aligned_alloc requires that the size be a multiple of the alignment.
+ // Layout already checks that the size rounded up doesn't overflow isize::MAX.
+ let align = layout.align();
+ let size = layout.size().next_multiple_of(align);
+ libc::aligned_alloc(align, size) as *mut u8
}
} else {
#[inline]
diff --git a/library/std/src/sys/unix/args.rs b/library/std/src/sys/unix/args.rs
index eafd6821f..19334e2af 100644
--- a/library/std/src/sys/unix/args.rs
+++ b/library/std/src/sys/unix/args.rs
@@ -71,6 +71,7 @@ impl DoubleEndedIterator for Args {
target_os = "vxworks",
target_os = "horizon",
target_os = "nto",
+ target_os = "hurd",
))]
mod imp {
use super::Args;
diff --git a/library/std/src/sys/unix/env.rs b/library/std/src/sys/unix/env.rs
index 929e9dae7..c6d8578a6 100644
--- a/library/std/src/sys/unix/env.rs
+++ b/library/std/src/sys/unix/env.rs
@@ -152,6 +152,17 @@ pub mod os {
pub const EXE_EXTENSION: &str = "elf";
}
+#[cfg(target_os = "hurd")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "hurd";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
#[cfg(target_os = "vita")]
pub mod os {
pub const FAMILY: &str = "unix";
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs
index 85e020ae4..6c4f40842 100644
--- a/library/std/src/sys/unix/fd.rs
+++ b/library/std/src/sys/unix/fd.rs
@@ -13,14 +13,16 @@ use crate::sys_common::{AsInner, FromInner, IntoInner};
target_os = "android",
target_os = "linux",
target_os = "emscripten",
- target_os = "l4re"
+ target_os = "l4re",
+ target_os = "hurd",
))]
use libc::off64_t;
#[cfg(not(any(
target_os = "linux",
target_os = "emscripten",
target_os = "l4re",
- target_os = "android"
+ target_os = "android",
+ target_os = "hurd",
)))]
use libc::off_t as off64_t;
@@ -124,9 +126,9 @@ impl FileDesc {
}
pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
- #[cfg(not(any(target_os = "linux", target_os = "android")))]
+ #[cfg(not(any(target_os = "linux", target_os = "android", target_os = "hurd")))]
use libc::pread as pread64;
- #[cfg(any(target_os = "linux", target_os = "android"))]
+ #[cfg(any(target_os = "linux", target_os = "android", target_os = "hurd"))]
use libc::pread64;
unsafe {
@@ -160,6 +162,7 @@ impl FileDesc {
target_os = "emscripten",
target_os = "freebsd",
target_os = "fuchsia",
+ target_os = "hurd",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
@@ -181,6 +184,7 @@ impl FileDesc {
target_os = "emscripten",
target_os = "freebsd",
target_os = "fuchsia",
+ target_os = "hurd",
target_os = "illumos",
target_os = "ios",
target_os = "tvos",
@@ -281,9 +285,9 @@ impl FileDesc {
}
pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
- #[cfg(not(any(target_os = "linux", target_os = "android")))]
+ #[cfg(not(any(target_os = "linux", target_os = "android", target_os = "hurd")))]
use libc::pwrite as pwrite64;
- #[cfg(any(target_os = "linux", target_os = "android"))]
+ #[cfg(any(target_os = "linux", target_os = "android", target_os = "hurd"))]
use libc::pwrite64;
unsafe {
@@ -301,6 +305,7 @@ impl FileDesc {
target_os = "emscripten",
target_os = "freebsd",
target_os = "fuchsia",
+ target_os = "hurd",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
@@ -322,6 +327,7 @@ impl FileDesc {
target_os = "emscripten",
target_os = "freebsd",
target_os = "fuchsia",
+ target_os = "hurd",
target_os = "illumos",
target_os = "ios",
target_os = "tvos",
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index a5604c92a..764e1f257 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -39,9 +39,14 @@ use libc::{c_int, mode_t};
all(target_os = "linux", target_env = "gnu")
))]
use libc::c_char;
-#[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "android"))]
+#[cfg(any(
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "android",
+ target_os = "hurd",
+))]
use libc::dirfd;
-#[cfg(any(target_os = "linux", target_os = "emscripten"))]
+#[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "hurd"))]
use libc::fstatat64;
#[cfg(any(
target_os = "android",
@@ -53,7 +58,7 @@ use libc::fstatat64;
target_os = "vita",
))]
use libc::readdir as readdir64;
-#[cfg(target_os = "linux")]
+#[cfg(any(target_os = "linux", target_os = "hurd"))]
use libc::readdir64;
#[cfg(any(target_os = "emscripten", target_os = "l4re"))]
use libc::readdir64_r;
@@ -68,6 +73,7 @@ use libc::readdir64_r;
target_os = "redox",
target_os = "nto",
target_os = "vita",
+ target_os = "hurd",
)))]
use libc::readdir_r as readdir64_r;
#[cfg(target_os = "android")]
@@ -79,13 +85,19 @@ use libc::{
target_os = "linux",
target_os = "emscripten",
target_os = "l4re",
- target_os = "android"
+ target_os = "android",
+ target_os = "hurd",
)))]
use libc::{
dirent as dirent64, fstat as fstat64, ftruncate as ftruncate64, lseek as lseek64,
lstat as lstat64, off_t as off64_t, open as open64, stat as stat64,
};
-#[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "l4re"))]
+#[cfg(any(
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "l4re",
+ target_os = "hurd"
+))]
use libc::{dirent64, fstat64, ftruncate64, lseek64, lstat64, off64_t, open64, stat64};
pub use crate::sys_common::fs::try_exists;
@@ -277,7 +289,8 @@ unsafe impl Sync for Dir {}
target_os = "fuchsia",
target_os = "redox",
target_os = "nto",
- target_os = "vita"
+ target_os = "vita",
+ target_os = "hurd",
))]
pub struct DirEntry {
dir: Arc<InnerReadDir>,
@@ -300,6 +313,7 @@ pub struct DirEntry {
target_os = "redox",
target_os = "nto",
target_os = "vita",
+ target_os = "hurd",
))]
struct dirent64_min {
d_ino: u64,
@@ -321,6 +335,7 @@ struct dirent64_min {
target_os = "redox",
target_os = "nto",
target_os = "vita",
+ target_os = "hurd",
)))]
pub struct DirEntry {
dir: Arc<InnerReadDir>,
@@ -455,7 +470,8 @@ impl FileAttr {
target_os = "vxworks",
target_os = "espidf",
target_os = "horizon",
- target_os = "vita"
+ target_os = "vita",
+ target_os = "hurd",
)))]
pub fn modified(&self) -> io::Result<SystemTime> {
#[cfg(target_pointer_width = "32")]
@@ -473,7 +489,7 @@ impl FileAttr {
Ok(SystemTime::new(self.stat.st_mtime as i64, 0))
}
- #[cfg(target_os = "horizon")]
+ #[cfg(any(target_os = "horizon", target_os = "hurd"))]
pub fn modified(&self) -> io::Result<SystemTime> {
Ok(SystemTime::from(self.stat.st_mtim))
}
@@ -482,7 +498,8 @@ impl FileAttr {
target_os = "vxworks",
target_os = "espidf",
target_os = "horizon",
- target_os = "vita"
+ target_os = "vita",
+ target_os = "hurd",
)))]
pub fn accessed(&self) -> io::Result<SystemTime> {
#[cfg(target_pointer_width = "32")]
@@ -500,7 +517,7 @@ impl FileAttr {
Ok(SystemTime::new(self.stat.st_atime as i64, 0))
}
- #[cfg(target_os = "horizon")]
+ #[cfg(any(target_os = "horizon", target_os = "hurd"))]
pub fn accessed(&self) -> io::Result<SystemTime> {
Ok(SystemTime::from(self.stat.st_atim))
}
@@ -656,6 +673,7 @@ impl Iterator for ReadDir {
target_os = "illumos",
target_os = "nto",
target_os = "vita",
+ target_os = "hurd",
))]
fn next(&mut self) -> Option<io::Result<DirEntry>> {
if self.end_of_stream {
@@ -756,6 +774,7 @@ impl Iterator for ReadDir {
target_os = "illumos",
target_os = "nto",
target_os = "vita",
+ target_os = "hurd",
)))]
fn next(&mut self) -> Option<io::Result<DirEntry>> {
if self.end_of_stream {
@@ -792,7 +811,7 @@ impl Drop for Dir {
fn drop(&mut self) {
let r = unsafe { libc::closedir(self.0) };
assert!(
- r == 0 || crate::io::Error::last_os_error().kind() == crate::io::ErrorKind::Interrupted,
+ r == 0 || crate::io::Error::last_os_error().is_interrupted(),
"unexpected error during closedir: {:?}",
crate::io::Error::last_os_error()
);
@@ -809,7 +828,12 @@ impl DirEntry {
}
#[cfg(all(
- any(target_os = "linux", target_os = "emscripten", target_os = "android"),
+ any(
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "android",
+ target_os = "hurd",
+ ),
not(miri)
))]
pub fn metadata(&self) -> io::Result<FileAttr> {
@@ -833,7 +857,12 @@ impl DirEntry {
}
#[cfg(any(
- not(any(target_os = "linux", target_os = "emscripten", target_os = "android")),
+ not(any(
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "android",
+ target_os = "hurd",
+ )),
miri
))]
pub fn metadata(&self) -> io::Result<FileAttr> {
@@ -892,6 +921,7 @@ impl DirEntry {
target_os = "horizon",
target_os = "vita",
target_os = "nto",
+ target_os = "hurd",
))]
pub fn ino(&self) -> u64 {
self.entry.d_ino as u64
@@ -949,6 +979,7 @@ impl DirEntry {
target_os = "redox",
target_os = "nto",
target_os = "vita",
+ target_os = "hurd",
)))]
fn name_cstr(&self) -> &CStr {
unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()) }
@@ -962,6 +993,7 @@ impl DirEntry {
target_os = "redox",
target_os = "nto",
target_os = "vita",
+ target_os = "hurd",
))]
fn name_cstr(&self) -> &CStr {
&self.name
@@ -1131,6 +1163,7 @@ impl File {
target_os = "netbsd",
target_os = "openbsd",
target_os = "nto",
+ target_os = "hurd",
))]
unsafe fn os_datasync(fd: c_int) -> c_int {
libc::fdatasync(fd)
@@ -1146,6 +1179,7 @@ impl File {
target_os = "openbsd",
target_os = "watchos",
target_os = "nto",
+ target_os = "hurd",
)))]
unsafe fn os_datasync(fd: c_int) -> c_int {
libc::fsync(fd)
@@ -1456,6 +1490,7 @@ impl fmt::Debug for File {
target_os = "linux",
target_os = "macos",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "netbsd",
target_os = "openbsd",
target_os = "vxworks"
@@ -1477,6 +1512,7 @@ impl fmt::Debug for File {
target_os = "linux",
target_os = "macos",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "netbsd",
target_os = "openbsd",
target_os = "vxworks"
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
index 4d17a1b00..18acd5ecc 100644
--- a/library/std/src/sys/unix/kernel_copy.rs
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -59,9 +59,9 @@ use crate::ptr;
use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering};
use crate::sys::cvt;
use crate::sys::weak::syscall;
-#[cfg(not(all(target_os = "linux", target_env = "gnu")))]
+#[cfg(not(any(all(target_os = "linux", target_env = "gnu"), target_os = "hurd")))]
use libc::sendfile as sendfile64;
-#[cfg(all(target_os = "linux", target_env = "gnu"))]
+#[cfg(any(all(target_os = "linux", target_env = "gnu"), target_os = "hurd"))]
use libc::sendfile64;
use libc::{EBADF, EINVAL, ENOSYS, EOPNOTSUPP, EOVERFLOW, EPERM, EXDEV};
diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs
index 77ef086f2..3edafde71 100644
--- a/library/std/src/sys/unix/mod.rs
+++ b/library/std/src/sys/unix/mod.rs
@@ -204,6 +204,10 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
}
if let Some(handler) = handler {
rtassert!(signal(libc::SIGPIPE, handler) != libc::SIG_ERR);
+ #[cfg(target_os = "hurd")]
+ {
+ rtassert!(signal(libc::SIGLOST, handler) != libc::SIG_ERR);
+ }
}
}
}
@@ -240,6 +244,11 @@ pub use crate::sys::android::signal;
#[cfg(not(target_os = "android"))]
pub use libc::signal;
+#[inline]
+pub(crate) fn is_interrupted(errno: i32) -> bool {
+ errno == libc::EINTR
+}
+
pub fn decode_error_kind(errno: i32) -> ErrorKind {
use ErrorKind::*;
match errno as libc::c_int {
@@ -315,7 +324,7 @@ where
{
loop {
match cvt(f()) {
- Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
other => return other,
}
}
diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs
index 7258c222a..f450d708d 100644
--- a/library/std/src/sys/unix/net.rs
+++ b/library/std/src/sys/unix/net.rs
@@ -75,6 +75,7 @@ impl Socket {
target_os = "dragonfly",
target_os = "freebsd",
target_os = "illumos",
+ target_os = "hurd",
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
@@ -102,7 +103,7 @@ impl Socket {
}
}
- #[cfg(not(target_os = "vxworks"))]
+ #[cfg(not(any(target_os = "vxworks", target_os = "vita")))]
pub fn new_pair(fam: c_int, ty: c_int) -> io::Result<(Socket, Socket)> {
unsafe {
let mut fds = [0, 0];
@@ -114,6 +115,7 @@ impl Socket {
target_os = "freebsd",
target_os = "illumos",
target_os = "linux",
+ target_os = "hurd",
target_os = "netbsd",
target_os = "openbsd",
target_os = "nto",
@@ -133,7 +135,7 @@ impl Socket {
}
}
- #[cfg(target_os = "vxworks")]
+ #[cfg(any(target_os = "vxworks", target_os = "vita"))]
pub fn new_pair(_fam: c_int, _ty: c_int) -> io::Result<(Socket, Socket)> {
unimplemented!()
}
@@ -184,7 +186,7 @@ impl Socket {
match unsafe { libc::poll(&mut pollfd, 1, timeout) } {
-1 => {
let err = io::Error::last_os_error();
- if err.kind() != io::ErrorKind::Interrupted {
+ if !err.is_interrupted() {
return Err(err);
}
}
@@ -220,6 +222,7 @@ impl Socket {
target_os = "freebsd",
target_os = "illumos",
target_os = "linux",
+ target_os = "hurd",
target_os = "netbsd",
target_os = "openbsd",
))] {
diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs
index 57e1a36da..01ff375d2 100644
--- a/library/std/src/sys/unix/os.rs
+++ b/library/std/src/sys/unix/os.rs
@@ -46,7 +46,8 @@ extern "C" {
target_os = "linux",
target_os = "emscripten",
target_os = "fuchsia",
- target_os = "l4re"
+ target_os = "l4re",
+ target_os = "hurd",
),
link_name = "__errno_location"
)]
@@ -121,7 +122,10 @@ pub fn set_errno(e: i32) {
pub fn error_string(errno: i32) -> String {
extern "C" {
#[cfg_attr(
- all(any(target_os = "linux", target_env = "newlib"), not(target_env = "ohos")),
+ all(
+ any(target_os = "linux", target_os = "hurd", target_env = "newlib"),
+ not(target_env = "ohos")
+ ),
link_name = "__xpg_strerror_r"
)]
fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int;
@@ -359,7 +363,12 @@ pub fn current_exe() -> io::Result<PathBuf> {
}
}
-#[cfg(any(target_os = "linux", target_os = "android", target_os = "emscripten"))]
+#[cfg(any(
+ target_os = "linux",
+ target_os = "hurd",
+ target_os = "android",
+ target_os = "emscripten"
+))]
pub fn current_exe() -> io::Result<PathBuf> {
match crate::fs::read_link("/proc/self/exe") {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => Err(io::const_io_error!(
diff --git a/library/std/src/sys/unix/os_str.rs b/library/std/src/sys/unix/os_str.rs
index 463b0a275..7bd2f656a 100644
--- a/library/std/src/sys/unix/os_str.rs
+++ b/library/std/src/sys/unix/os_str.rs
@@ -97,12 +97,12 @@ impl AsInner<[u8]> for Buf {
impl Buf {
#[inline]
- pub fn into_os_str_bytes(self) -> Vec<u8> {
+ pub fn into_encoded_bytes(self) -> Vec<u8> {
self.inner
}
#[inline]
- pub unsafe fn from_os_str_bytes_unchecked(s: Vec<u8>) -> Self {
+ pub unsafe fn from_encoded_bytes_unchecked(s: Vec<u8>) -> Self {
Self { inner: s }
}
@@ -203,18 +203,18 @@ impl Buf {
impl Slice {
#[inline]
- pub fn as_os_str_bytes(&self) -> &[u8] {
+ pub fn as_encoded_bytes(&self) -> &[u8] {
&self.inner
}
#[inline]
- pub unsafe fn from_os_str_bytes_unchecked(s: &[u8]) -> &Slice {
+ pub unsafe fn from_encoded_bytes_unchecked(s: &[u8]) -> &Slice {
unsafe { mem::transmute(s) }
}
#[inline]
pub fn from_str(s: &str) -> &Slice {
- unsafe { Slice::from_os_str_bytes_unchecked(s.as_bytes()) }
+ unsafe { Slice::from_encoded_bytes_unchecked(s.as_bytes()) }
}
pub fn to_str(&self) -> Result<&str, crate::str::Utf8Error> {
diff --git a/library/std/src/sys/unix/os_str/tests.rs b/library/std/src/sys/unix/os_str/tests.rs
index 91bc0e61a..e2a99045e 100644
--- a/library/std/src/sys/unix/os_str/tests.rs
+++ b/library/std/src/sys/unix/os_str/tests.rs
@@ -2,7 +2,7 @@ use super::*;
#[test]
fn slice_debug_output() {
- let input = unsafe { Slice::from_os_str_bytes_unchecked(b"\xF0hello,\tworld") };
+ let input = unsafe { Slice::from_encoded_bytes_unchecked(b"\xF0hello,\tworld") };
let expected = r#""\xF0hello,\tworld""#;
let output = format!("{input:?}");
@@ -12,6 +12,6 @@ fn slice_debug_output() {
#[test]
fn display() {
assert_eq!("Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye", unsafe {
- Slice::from_os_str_bytes_unchecked(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string()
+ Slice::from_encoded_bytes_unchecked(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string()
},);
}
diff --git a/library/std/src/sys/unix/path.rs b/library/std/src/sys/unix/path.rs
index 935245f63..837f68d3e 100644
--- a/library/std/src/sys/unix/path.rs
+++ b/library/std/src/sys/unix/path.rs
@@ -30,7 +30,7 @@ pub(crate) fn absolute(path: &Path) -> io::Result<PathBuf> {
// Get the components, skipping the redundant leading "." component if it exists.
let mut components = path.strip_prefix(".").unwrap_or(path).components();
- let path_os = path.as_os_str().as_os_str_bytes();
+ let path_os = path.as_os_str().as_encoded_bytes();
let mut normalized = if path.is_absolute() {
// "If a pathname begins with two successive <slash> characters, the
diff --git a/library/std/src/sys/unix/pipe.rs b/library/std/src/sys/unix/pipe.rs
index 938a46bfd..33db24e77 100644
--- a/library/std/src/sys/unix/pipe.rs
+++ b/library/std/src/sys/unix/pipe.rs
@@ -3,7 +3,7 @@ use crate::mem;
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
use crate::sys::fd::FileDesc;
use crate::sys::{cvt, cvt_r};
-use crate::sys_common::IntoInner;
+use crate::sys_common::{FromInner, IntoInner};
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
@@ -21,6 +21,7 @@ pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
if #[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
@@ -158,3 +159,9 @@ impl FromRawFd for AnonPipe {
Self(FromRawFd::from_raw_fd(raw_fd))
}
}
+
+impl FromInner<FileDesc> for AnonPipe {
+ fn from_inner(fd: FileDesc) -> Self {
+ Self(fd)
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs
index 640648e87..1ca11a7f9 100644
--- a/library/std/src/sys/unix/process/process_common.rs
+++ b/library/std/src/sys/unix/process/process_common.rs
@@ -13,7 +13,7 @@ use crate::sys::fd::FileDesc;
use crate::sys::fs::File;
use crate::sys::pipe::{self, AnonPipe};
use crate::sys_common::process::{CommandEnv, CommandEnvs};
-use crate::sys_common::IntoInner;
+use crate::sys_common::{FromInner, IntoInner};
#[cfg(not(target_os = "fuchsia"))]
use crate::sys::fs::OpenOptions;
@@ -150,6 +150,7 @@ pub enum Stdio {
Null,
MakePipe,
Fd(FileDesc),
+ StaticFd(BorrowedFd<'static>),
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
@@ -164,9 +165,9 @@ pub enum ProgramKind {
impl ProgramKind {
fn new(program: &OsStr) -> Self {
- if program.as_os_str_bytes().starts_with(b"/") {
+ if program.as_encoded_bytes().starts_with(b"/") {
Self::Absolute
- } else if program.as_os_str_bytes().contains(&b'/') {
+ } else if program.as_encoded_bytes().contains(&b'/') {
// If the program has more than one component in it, it is a relative path.
Self::Relative
} else {
@@ -463,6 +464,11 @@ impl Stdio {
}
}
+ Stdio::StaticFd(fd) => {
+ let fd = FileDesc::from_inner(fd.try_clone_to_owned()?);
+ Ok((ChildStdio::Owned(fd), None))
+ }
+
Stdio::MakePipe => {
let (reader, writer) = pipe::anon_pipe()?;
let (ours, theirs) = if readable { (writer, reader) } else { (reader, writer) };
@@ -497,6 +503,28 @@ impl From<File> for Stdio {
}
}
+impl From<io::Stdout> for Stdio {
+ fn from(_: io::Stdout) -> Stdio {
+ // This ought really to be is Stdio::StaticFd(input_argument.as_fd()).
+ // But AsFd::as_fd takes its argument by reference, and yields
+ // a bounded lifetime, so it's no use here. There is no AsStaticFd.
+ //
+ // Additionally AsFd is only implemented for the *locked* versions.
+ // We don't want to lock them here. (The implications of not locking
+ // are the same as those for process::Stdio::inherit().)
+ //
+ // Arguably the hypothetical AsStaticFd and AsFd<'static>
+ // should be implemented for io::Stdout, not just for StdoutLocked.
+ Stdio::StaticFd(unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) })
+ }
+}
+
+impl From<io::Stderr> for Stdio {
+ fn from(_: io::Stderr) -> Stdio {
+ Stdio::StaticFd(unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) })
+ }
+}
+
impl ChildStdio {
pub fn fd(&self) -> Option<c_int> {
match *self {
@@ -558,6 +586,23 @@ impl fmt::Debug for Command {
if let Some(ref cwd) = self.cwd {
write!(f, "cd {cwd:?} && ")?;
}
+ if self.env.does_clear() {
+ write!(f, "env -i ")?;
+ // Altered env vars will be printed next, that should exactly work as expected.
+ } else {
+ // Removed env vars need the command to be wrapped in `env`.
+ let mut any_removed = false;
+ for (key, value_opt) in self.get_envs() {
+ if value_opt.is_none() {
+ if !any_removed {
+ write!(f, "env ")?;
+ any_removed = true;
+ }
+ write!(f, "-u {} ", key.to_string_lossy())?;
+ }
+ }
+ }
+ // Altered env vars can just be added in front of the program.
for (key, value_opt) in self.get_envs() {
if let Some(value) = value_opt {
write!(f, "{}={value:?} ", key.to_string_lossy())?;
diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs
index 3963e7f52..564f8c482 100644
--- a/library/std/src/sys/unix/process/process_unix.rs
+++ b/library/std/src/sys/unix/process/process_unix.rs
@@ -165,7 +165,7 @@ impl Command {
assert!(p.wait().is_ok(), "wait() should either return Ok or panic");
return Err(Error::from_raw_os_error(errno));
}
- Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(ref e) if e.is_interrupted() => {}
Err(e) => {
assert!(p.wait().is_ok(), "wait() should either return Ok or panic");
panic!("the CLOEXEC pipe failed: {e:?}")
@@ -374,6 +374,13 @@ impl Command {
return Err(io::Error::last_os_error());
}
}
+ #[cfg(target_os = "hurd")]
+ {
+ let ret = sys::signal(libc::SIGLOST, libc::SIG_DFL);
+ if ret == libc::SIG_ERR {
+ return Err(io::Error::last_os_error());
+ }
+ }
}
}
@@ -620,6 +627,10 @@ impl Command {
let mut default_set = MaybeUninit::<libc::sigset_t>::uninit();
cvt(sigemptyset(default_set.as_mut_ptr()))?;
cvt(sigaddset(default_set.as_mut_ptr(), libc::SIGPIPE))?;
+ #[cfg(target_os = "hurd")]
+ {
+ cvt(sigaddset(default_set.as_mut_ptr(), libc::SIGLOST))?;
+ }
cvt_nz(libc::posix_spawnattr_setsigdefault(
attrs.0.as_mut_ptr(),
default_set.as_ptr(),
@@ -993,6 +1004,8 @@ fn signal_string(signal: i32) -> &'static str {
target_os = "dragonfly"
))]
libc::SIGINFO => " (SIGINFO)",
+ #[cfg(target_os = "hurd")]
+ libc::SIGLOST => " (SIGLOST)",
_ => "",
}
}
diff --git a/library/std/src/sys/unix/stack_overflow.rs b/library/std/src/sys/unix/stack_overflow.rs
index b59d4ba26..73c530786 100644
--- a/library/std/src/sys/unix/stack_overflow.rs
+++ b/library/std/src/sys/unix/stack_overflow.rs
@@ -32,6 +32,7 @@ impl Drop for Handler {
target_os = "macos",
target_os = "dragonfly",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "solaris",
target_os = "illumos",
target_os = "netbsd",
@@ -193,6 +194,7 @@ mod imp {
target_os = "macos",
target_os = "dragonfly",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "solaris",
target_os = "illumos",
target_os = "netbsd",
diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs
index 4f2d9cf36..311ed9502 100644
--- a/library/std/src/sys/unix/thread.rs
+++ b/library/std/src/sys/unix/thread.rs
@@ -182,6 +182,9 @@ impl Thread {
}
if let Some(f) = pthread_setname_np.get() {
+ #[cfg(target_os = "nto")]
+ let name = truncate_cstr::<{ libc::_NTO_THREAD_NAME_MAX as usize }>(name);
+
let res = unsafe { f(libc::pthread_self(), name.as_ptr()) };
debug_assert_eq!(res, 0);
}
@@ -213,7 +216,8 @@ impl Thread {
target_os = "l4re",
target_os = "emscripten",
target_os = "redox",
- target_os = "vxworks"
+ target_os = "vxworks",
+ target_os = "hurd",
))]
pub fn set_name(_name: &CStr) {
// Newlib, Emscripten, and VxWorks have no way to set a thread name.
@@ -290,6 +294,7 @@ impl Drop for Thread {
target_os = "ios",
target_os = "tvos",
target_os = "watchos",
+ target_os = "nto",
))]
fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] {
let mut result = [0; MAX_WITH_NUL];
@@ -305,6 +310,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> {
target_os = "android",
target_os = "emscripten",
target_os = "fuchsia",
+ target_os = "hurd",
target_os = "ios",
target_os = "tvos",
target_os = "linux",
@@ -312,23 +318,38 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> {
target_os = "solaris",
target_os = "illumos",
))] {
+ #[allow(unused_assignments)]
+ #[allow(unused_mut)]
+ let mut quota = usize::MAX;
+
#[cfg(any(target_os = "android", target_os = "linux"))]
{
- let quota = cgroups::quota().max(1);
+ quota = cgroups::quota().max(1);
let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
unsafe {
if libc::sched_getaffinity(0, mem::size_of::<libc::cpu_set_t>(), &mut set) == 0 {
let count = libc::CPU_COUNT(&set) as usize;
let count = count.min(quota);
- // SAFETY: affinity mask can't be empty and the quota gets clamped to a minimum of 1
- return Ok(NonZeroUsize::new_unchecked(count));
+
+ // According to sched_getaffinity's API it should always be non-zero, but
+ // some old MIPS kernels were buggy and zero-initialized the mask if
+ // none was explicitly set.
+ // In that case we use the sysconf fallback.
+ if let Some(count) = NonZeroUsize::new(count) {
+ return Ok(count)
+ }
}
}
}
match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } {
-1 => Err(io::Error::last_os_error()),
0 => Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform")),
- cpus => Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) }),
+ cpus => {
+ let count = cpus as usize;
+ // Cover the unusual situation where we were able to get the quota but not the affinity mask
+ let count = count.min(quota);
+ Ok(unsafe { NonZeroUsize::new_unchecked(count) })
+ }
}
} else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "netbsd"))] {
use crate::ptr;
@@ -686,6 +707,7 @@ mod cgroups {
#[cfg(all(
not(target_os = "linux"),
not(target_os = "freebsd"),
+ not(target_os = "hurd"),
not(target_os = "macos"),
not(target_os = "netbsd"),
not(target_os = "openbsd"),
@@ -706,6 +728,7 @@ pub mod guard {
#[cfg(any(
target_os = "linux",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
@@ -762,6 +785,7 @@ pub mod guard {
#[cfg(any(
target_os = "android",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "linux",
target_os = "netbsd",
target_os = "l4re"
@@ -899,6 +923,7 @@ pub mod guard {
#[cfg(any(
target_os = "android",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "linux",
target_os = "netbsd",
target_os = "l4re"
@@ -930,7 +955,7 @@ pub mod guard {
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
let stackaddr = stackptr.addr();
- ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) {
+ ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
Some(stackaddr - guardsize..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) {
Some(stackaddr - guardsize..stackaddr)
diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs
index 236d2f2ee..fba2a676f 100644
--- a/library/std/src/sys/unix/thread_local_dtor.rs
+++ b/library/std/src/sys/unix/thread_local_dtor.rs
@@ -11,7 +11,7 @@
// Note, however, that we run on lots older linuxes, as well as cross
// compiling from a newer linux to an older linux, so we also have a
// fallback implementation to use as well.
-#[cfg(any(target_os = "linux", target_os = "fuchsia", target_os = "redox"))]
+#[cfg(any(target_os = "linux", target_os = "fuchsia", target_os = "redox", target_os = "hurd"))]
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
use crate::mem;
use crate::sys_common::thread_local_dtor::register_dtor_fallback;
@@ -48,7 +48,7 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
// workaround below is to register, via _tlv_atexit, a custom DTOR list once per
// thread. thread_local dtors are pushed to the DTOR list without calling
// _tlv_atexit.
-#[cfg(target_os = "macos")]
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
use crate::cell::Cell;
use crate::mem;
diff --git a/library/std/src/sys/unix/thread_parking/darwin.rs b/library/std/src/sys/unix/thread_parking/darwin.rs
index b709fada3..8231f3cba 100644
--- a/library/std/src/sys/unix/thread_parking/darwin.rs
+++ b/library/std/src/sys/unix/thread_parking/darwin.rs
@@ -2,8 +2,7 @@
//!
//! Darwin actually has futex syscalls (`__ulock_wait`/`__ulock_wake`), but they
//! cannot be used in `std` because they are non-public (their use will lead to
-//! rejection from the App Store) and because they are only available starting
-//! with macOS version 10.12, even though the minimum target version is 10.7.
+//! rejection from the App Store).
//!
//! Therefore, we need to look for other synchronization primitives. Luckily, Darwin
//! supports semaphores, which allow us to implement the behaviour we need with
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index 17b4130c2..4fe61b284 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -35,7 +35,7 @@ pub(in crate::sys::unix) struct Timespec {
}
impl SystemTime {
- #[cfg_attr(target_os = "horizon", allow(unused))]
+ #[cfg_attr(any(target_os = "horizon", target_os = "hurd"), allow(unused))]
pub fn new(tv_sec: i64, tv_nsec: i64) -> SystemTime {
SystemTime { t: Timespec::new(tv_sec, tv_nsec) }
}
diff --git a/library/std/src/sys/unsupported/common.rs b/library/std/src/sys/unsupported/common.rs
index 5cd9e57de..5c379992b 100644
--- a/library/std/src/sys/unsupported/common.rs
+++ b/library/std/src/sys/unsupported/common.rs
@@ -23,6 +23,10 @@ pub fn unsupported_err() -> std_io::Error {
)
}
+pub fn is_interrupted(_code: i32) -> bool {
+ false
+}
+
pub fn decode_error_kind(_code: i32) -> crate::io::ErrorKind {
crate::io::ErrorKind::Uncategorized
}
diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs
index 77b675aaa..a639afcc6 100644
--- a/library/std/src/sys/unsupported/process.rs
+++ b/library/std/src/sys/unsupported/process.rs
@@ -27,6 +27,8 @@ pub struct StdioPipes {
pub stderr: Option<AnonPipe>,
}
+// FIXME: This should be a unit struct, so we can always construct it
+// The value here should be never used, since we cannot spawn processes.
pub enum Stdio {
Inherit,
Null,
@@ -87,8 +89,26 @@ impl From<AnonPipe> for Stdio {
}
}
+impl From<io::Stdout> for Stdio {
+ fn from(_: io::Stdout) -> Stdio {
+ // FIXME: This is wrong.
+ // Instead, the Stdio we have here should be a unit struct.
+ panic!("unsupported")
+ }
+}
+
+impl From<io::Stderr> for Stdio {
+ fn from(_: io::Stderr) -> Stdio {
+ // FIXME: This is wrong.
+ // Instead, the Stdio we have here should be a unit struct.
+ panic!("unsupported")
+ }
+}
+
impl From<File> for Stdio {
fn from(_file: File) -> Stdio {
+ // FIXME: This is wrong.
+ // Instead, the Stdio we have here should be a unit struct.
panic!("unsupported")
}
}
diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs
index 98517da1d..5cbb5cb65 100644
--- a/library/std/src/sys/wasi/mod.rs
+++ b/library/std/src/sys/wasi/mod.rs
@@ -76,6 +76,11 @@ cfg_if::cfg_if! {
mod common;
pub use common::*;
+#[inline]
+pub fn is_interrupted(errno: i32) -> bool {
+ errno == wasi::ERRNO_INTR.raw().into()
+}
+
pub fn decode_error_kind(errno: i32) -> std_io::ErrorKind {
use std_io::ErrorKind::*;
if errno > u16::MAX as i32 || errno < 0 {
diff --git a/library/std/src/sys/windows/args.rs b/library/std/src/sys/windows/args.rs
index 6b597f499..ee7dba6e5 100644
--- a/library/std/src/sys/windows/args.rs
+++ b/library/std/src/sys/windows/args.rs
@@ -226,7 +226,7 @@ pub(crate) fn append_arg(cmd: &mut Vec<u16>, arg: &Arg, force_quotes: bool) -> i
// that it actually gets passed through on the command line or otherwise
// it will be dropped entirely when parsed on the other end.
ensure_no_nuls(arg)?;
- let arg_bytes = arg.as_os_str_bytes();
+ let arg_bytes = arg.as_encoded_bytes();
let (quote, escape) = match quote {
Quote::Always => (true, true),
Quote::Auto => {
@@ -298,7 +298,7 @@ pub(crate) fn make_bat_command_line(
const SPECIAL: &[u8] = b"\t &()[]{}^=;!'+,`~%|<>";
let force_quotes = match arg {
Arg::Regular(arg) if !force_quotes => {
- arg.as_os_str_bytes().iter().any(|c| SPECIAL.contains(c))
+ arg.as_encoded_bytes().iter().any(|c| SPECIAL.contains(c))
}
_ => force_quotes,
};
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index d9ccba0e9..f3637cbb9 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -46,10 +46,6 @@ pub use FD_SET as fd_set;
pub use LINGER as linger;
pub use TIMEVAL as timeval;
-pub type CONDITION_VARIABLE = RTL_CONDITION_VARIABLE;
-pub type SRWLOCK = RTL_SRWLOCK;
-pub type INIT_ONCE = RTL_RUN_ONCE;
-
pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { Ptr: ptr::null_mut() };
pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { Ptr: ptr::null_mut() };
pub const INIT_ONCE_STATIC_INIT: INIT_ONCE = INIT_ONCE { Ptr: ptr::null_mut() };
@@ -224,7 +220,7 @@ pub unsafe extern "system" fn ReadFileEx(
) -> BOOL {
windows_sys::ReadFileEx(
hFile.as_raw_handle(),
- lpBuffer,
+ lpBuffer.cast::<u8>(),
nNumberOfBytesToRead,
lpOverlapped,
lpCompletionRoutine,
diff --git a/library/std/src/sys/windows/c/windows_sys.lst b/library/std/src/sys/windows/c/windows_sys.lst
index 631aedd26..0aca37e2d 100644
--- a/library/std/src/sys/windows/c/windows_sys.lst
+++ b/library/std/src/sys/windows/c/windows_sys.lst
@@ -1,3 +1,6 @@
+--out windows_sys.rs
+--config flatten std
+--filter
// tidy-alphabetical-start
Windows.Wdk.Storage.FileSystem.FILE_COMPLETE_IF_OPLOCKED
Windows.Wdk.Storage.FileSystem.FILE_CONTAINS_EXTENDED_CREATE_INFORMATION
@@ -2108,7 +2111,6 @@ Windows.Win32.Networking.WinSock.WSABASEERR
Windows.Win32.Networking.WinSock.WSABUF
Windows.Win32.Networking.WinSock.WSACleanup
Windows.Win32.Networking.WinSock.WSADATA
-Windows.Win32.Networking.WinSock.WSADATA
Windows.Win32.Networking.WinSock.WSADuplicateSocketW
Windows.Win32.Networking.WinSock.WSAEACCES
Windows.Win32.Networking.WinSock.WSAEADDRINUSE
@@ -2328,7 +2330,6 @@ Windows.Win32.Storage.FileSystem.FileStandardInfo
Windows.Win32.Storage.FileSystem.FileStorageInfo
Windows.Win32.Storage.FileSystem.FileStreamInfo
Windows.Win32.Storage.FileSystem.FindClose
-Windows.Win32.Storage.FileSystem.FindFileHandle
Windows.Win32.Storage.FileSystem.FindFirstFileW
Windows.Win32.Storage.FileSystem.FindNextFileW
Windows.Win32.Storage.FileSystem.FlushFileBuffers
@@ -2420,8 +2421,6 @@ Windows.Win32.System.Console.STD_OUTPUT_HANDLE
Windows.Win32.System.Console.WriteConsoleW
Windows.Win32.System.Diagnostics.Debug.ARM64_NT_NEON128
Windows.Win32.System.Diagnostics.Debug.CONTEXT
-Windows.Win32.System.Diagnostics.Debug.CONTEXT
-Windows.Win32.System.Diagnostics.Debug.CONTEXT
Windows.Win32.System.Diagnostics.Debug.EXCEPTION_RECORD
Windows.Win32.System.Diagnostics.Debug.FACILITY_CODE
Windows.Win32.System.Diagnostics.Debug.FACILITY_NT_BIT
@@ -2435,7 +2434,6 @@ Windows.Win32.System.Diagnostics.Debug.FORMAT_MESSAGE_OPTIONS
Windows.Win32.System.Diagnostics.Debug.FormatMessageW
Windows.Win32.System.Diagnostics.Debug.M128A
Windows.Win32.System.Diagnostics.Debug.XSAVE_FORMAT
-Windows.Win32.System.Diagnostics.Debug.XSAVE_FORMAT
Windows.Win32.System.Environment.FreeEnvironmentStringsW
Windows.Win32.System.Environment.GetCommandLineW
Windows.Win32.System.Environment.GetCurrentDirectoryW
@@ -2456,7 +2454,6 @@ Windows.Win32.System.Kernel.ExceptionContinueExecution
Windows.Win32.System.Kernel.ExceptionContinueSearch
Windows.Win32.System.Kernel.ExceptionNestedException
Windows.Win32.System.Kernel.FLOATING_SAVE_AREA
-Windows.Win32.System.Kernel.FLOATING_SAVE_AREA
Windows.Win32.System.Kernel.OBJ_DONT_REPARSE
Windows.Win32.System.LibraryLoader.GetModuleFileNameW
Windows.Win32.System.LibraryLoader.GetModuleHandleA
@@ -2482,6 +2479,7 @@ Windows.Win32.System.SystemInformation.GetSystemTimeAsFileTime
Windows.Win32.System.SystemInformation.GetWindowsDirectoryW
Windows.Win32.System.SystemInformation.PROCESSOR_ARCHITECTURE
Windows.Win32.System.SystemInformation.SYSTEM_INFO
+Windows.Win32.System.SystemServices.ALL_PROCESSOR_GROUPS
Windows.Win32.System.SystemServices.DLL_PROCESS_DETACH
Windows.Win32.System.SystemServices.DLL_THREAD_DETACH
Windows.Win32.System.SystemServices.EXCEPTION_MAXIMUM_PARAMETERS
@@ -2510,9 +2508,11 @@ Windows.Win32.System.Threading.CreateProcessW
Windows.Win32.System.Threading.CreateThread
Windows.Win32.System.Threading.DEBUG_ONLY_THIS_PROCESS
Windows.Win32.System.Threading.DEBUG_PROCESS
+Windows.Win32.System.Threading.DeleteProcThreadAttributeList
Windows.Win32.System.Threading.DETACHED_PROCESS
Windows.Win32.System.Threading.ExitProcess
Windows.Win32.System.Threading.EXTENDED_STARTUPINFO_PRESENT
+Windows.Win32.System.Threading.GetActiveProcessorCount
Windows.Win32.System.Threading.GetCurrentProcess
Windows.Win32.System.Threading.GetCurrentProcessId
Windows.Win32.System.Threading.GetCurrentThread
@@ -2524,8 +2524,10 @@ Windows.Win32.System.Threading.INFINITE
Windows.Win32.System.Threading.INHERIT_CALLER_PRIORITY
Windows.Win32.System.Threading.INHERIT_PARENT_AFFINITY
Windows.Win32.System.Threading.INIT_ONCE_INIT_FAILED
+Windows.Win32.System.Threading.InitializeProcThreadAttributeList
Windows.Win32.System.Threading.InitOnceBeginInitialize
Windows.Win32.System.Threading.InitOnceComplete
+Windows.Win32.System.Threading.LPPROC_THREAD_ATTRIBUTE_LIST
Windows.Win32.System.Threading.LPTHREAD_START_ROUTINE
Windows.Win32.System.Threading.NORMAL_PRIORITY_CLASS
Windows.Win32.System.Threading.OpenProcessToken
@@ -2539,9 +2541,6 @@ Windows.Win32.System.Threading.PROFILE_USER
Windows.Win32.System.Threading.REALTIME_PRIORITY_CLASS
Windows.Win32.System.Threading.ReleaseSRWLockExclusive
Windows.Win32.System.Threading.ReleaseSRWLockShared
-Windows.Win32.System.Threading.RTL_CONDITION_VARIABLE
-Windows.Win32.System.Threading.RTL_RUN_ONCE
-Windows.Win32.System.Threading.RTL_SRWLOCK
Windows.Win32.System.Threading.SetThreadStackGuarantee
Windows.Win32.System.Threading.Sleep
Windows.Win32.System.Threading.SleepConditionVariableSRW
@@ -2561,6 +2560,7 @@ Windows.Win32.System.Threading.STARTF_USEPOSITION
Windows.Win32.System.Threading.STARTF_USESHOWWINDOW
Windows.Win32.System.Threading.STARTF_USESIZE
Windows.Win32.System.Threading.STARTF_USESTDHANDLES
+Windows.Win32.System.Threading.STARTUPINFOEXW
Windows.Win32.System.Threading.STARTUPINFOW
Windows.Win32.System.Threading.STARTUPINFOW_FLAGS
Windows.Win32.System.Threading.SwitchToThread
@@ -2575,12 +2575,11 @@ Windows.Win32.System.Threading.TlsGetValue
Windows.Win32.System.Threading.TlsSetValue
Windows.Win32.System.Threading.TryAcquireSRWLockExclusive
Windows.Win32.System.Threading.TryAcquireSRWLockShared
+Windows.Win32.System.Threading.UpdateProcThreadAttribute
Windows.Win32.System.Threading.WaitForMultipleObjects
Windows.Win32.System.Threading.WaitForSingleObject
Windows.Win32.System.Threading.WakeAllConditionVariable
Windows.Win32.System.Threading.WakeConditionVariable
-Windows.Win32.System.WindowsProgramming.IO_STATUS_BLOCK
-Windows.Win32.System.WindowsProgramming.OBJECT_ATTRIBUTES
Windows.Win32.System.WindowsProgramming.PROGRESS_CONTINUE
Windows.Win32.UI.Shell.GetUserProfileDirectoryW
// tidy-alphabetical-end
diff --git a/library/std/src/sys/windows/c/windows_sys.rs b/library/std/src/sys/windows/c/windows_sys.rs
index 023770871..851d15915 100644
--- a/library/std/src/sys/windows/c/windows_sys.rs
+++ b/library/std/src/sys/windows/c/windows_sys.rs
@@ -4,7 +4,7 @@
// regenerate the bindings.
//
// ignore-tidy-filelength
-// Bindings generated by `windows-bindgen` 0.49.0
+// Bindings generated by `windows-bindgen` 0.51.1
#![allow(non_snake_case, non_upper_case_globals, non_camel_case_types, dead_code, clippy::all)]
#[link(name = "advapi32")]
@@ -32,11 +32,11 @@ extern "system" {
}
#[link(name = "kernel32")]
extern "system" {
- pub fn AcquireSRWLockExclusive(srwlock: *mut RTL_SRWLOCK) -> ();
+ pub fn AcquireSRWLockExclusive(srwlock: *mut SRWLOCK) -> ();
}
#[link(name = "kernel32")]
extern "system" {
- pub fn AcquireSRWLockShared(srwlock: *mut RTL_SRWLOCK) -> ();
+ pub fn AcquireSRWLockShared(srwlock: *mut SRWLOCK) -> ();
}
#[link(name = "kernel32")]
extern "system" {
@@ -156,6 +156,10 @@ extern "system" {
}
#[link(name = "kernel32")]
extern "system" {
+ pub fn DeleteProcThreadAttributeList(lpattributelist: LPPROC_THREAD_ATTRIBUTE_LIST) -> ();
+}
+#[link(name = "kernel32")]
+extern "system" {
pub fn DeviceIoControl(
hdevice: HANDLE,
dwiocontrolcode: u32,
@@ -185,18 +189,15 @@ extern "system" {
}
#[link(name = "kernel32")]
extern "system" {
- pub fn FindClose(hfindfile: FindFileHandle) -> BOOL;
+ pub fn FindClose(hfindfile: HANDLE) -> BOOL;
}
#[link(name = "kernel32")]
extern "system" {
- pub fn FindFirstFileW(
- lpfilename: PCWSTR,
- lpfindfiledata: *mut WIN32_FIND_DATAW,
- ) -> FindFileHandle;
+ pub fn FindFirstFileW(lpfilename: PCWSTR, lpfindfiledata: *mut WIN32_FIND_DATAW) -> HANDLE;
}
#[link(name = "kernel32")]
extern "system" {
- pub fn FindNextFileW(hfindfile: FindFileHandle, lpfindfiledata: *mut WIN32_FIND_DATAW) -> BOOL;
+ pub fn FindNextFileW(hfindfile: HANDLE, lpfindfiledata: *mut WIN32_FIND_DATAW) -> BOOL;
}
#[link(name = "kernel32")]
extern "system" {
@@ -220,6 +221,10 @@ extern "system" {
}
#[link(name = "kernel32")]
extern "system" {
+ pub fn GetActiveProcessorCount(groupnumber: u16) -> u32;
+}
+#[link(name = "kernel32")]
+extern "system" {
pub fn GetCommandLineW() -> PCWSTR;
}
#[link(name = "kernel32")]
@@ -356,7 +361,7 @@ extern "system" {
#[link(name = "kernel32")]
extern "system" {
pub fn InitOnceBeginInitialize(
- lpinitonce: *mut RTL_RUN_ONCE,
+ lpinitonce: *mut INIT_ONCE,
dwflags: u32,
fpending: *mut BOOL,
lpcontext: *mut *mut ::core::ffi::c_void,
@@ -365,13 +370,22 @@ extern "system" {
#[link(name = "kernel32")]
extern "system" {
pub fn InitOnceComplete(
- lpinitonce: *mut RTL_RUN_ONCE,
+ lpinitonce: *mut INIT_ONCE,
dwflags: u32,
lpcontext: *const ::core::ffi::c_void,
) -> BOOL;
}
#[link(name = "kernel32")]
extern "system" {
+ pub fn InitializeProcThreadAttributeList(
+ lpattributelist: LPPROC_THREAD_ATTRIBUTE_LIST,
+ dwattributecount: u32,
+ dwflags: u32,
+ lpsize: *mut usize,
+ ) -> BOOL;
+}
+#[link(name = "kernel32")]
+extern "system" {
pub fn MoveFileExW(
lpexistingfilename: PCWSTR,
lpnewfilename: PCWSTR,
@@ -411,7 +425,7 @@ extern "system" {
extern "system" {
pub fn ReadFile(
hfile: HANDLE,
- lpbuffer: *mut ::core::ffi::c_void,
+ lpbuffer: *mut u8,
nnumberofbytestoread: u32,
lpnumberofbytesread: *mut u32,
lpoverlapped: *mut OVERLAPPED,
@@ -421,7 +435,7 @@ extern "system" {
extern "system" {
pub fn ReadFileEx(
hfile: HANDLE,
- lpbuffer: *mut ::core::ffi::c_void,
+ lpbuffer: *mut u8,
nnumberofbytestoread: u32,
lpoverlapped: *mut OVERLAPPED,
lpcompletionroutine: LPOVERLAPPED_COMPLETION_ROUTINE,
@@ -429,11 +443,11 @@ extern "system" {
}
#[link(name = "kernel32")]
extern "system" {
- pub fn ReleaseSRWLockExclusive(srwlock: *mut RTL_SRWLOCK) -> ();
+ pub fn ReleaseSRWLockExclusive(srwlock: *mut SRWLOCK) -> ();
}
#[link(name = "kernel32")]
extern "system" {
- pub fn ReleaseSRWLockShared(srwlock: *mut RTL_SRWLOCK) -> ();
+ pub fn ReleaseSRWLockShared(srwlock: *mut SRWLOCK) -> ();
}
#[link(name = "kernel32")]
extern "system" {
@@ -500,8 +514,8 @@ extern "system" {
#[link(name = "kernel32")]
extern "system" {
pub fn SleepConditionVariableSRW(
- conditionvariable: *mut RTL_CONDITION_VARIABLE,
- srwlock: *mut RTL_SRWLOCK,
+ conditionvariable: *mut CONDITION_VARIABLE,
+ srwlock: *mut SRWLOCK,
dwmilliseconds: u32,
flags: u32,
) -> BOOL;
@@ -536,11 +550,23 @@ extern "system" {
}
#[link(name = "kernel32")]
extern "system" {
- pub fn TryAcquireSRWLockExclusive(srwlock: *mut RTL_SRWLOCK) -> BOOLEAN;
+ pub fn TryAcquireSRWLockExclusive(srwlock: *mut SRWLOCK) -> BOOLEAN;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn TryAcquireSRWLockShared(srwlock: *mut SRWLOCK) -> BOOLEAN;
}
#[link(name = "kernel32")]
extern "system" {
- pub fn TryAcquireSRWLockShared(srwlock: *mut RTL_SRWLOCK) -> BOOLEAN;
+ pub fn UpdateProcThreadAttribute(
+ lpattributelist: LPPROC_THREAD_ATTRIBUTE_LIST,
+ dwflags: u32,
+ attribute: usize,
+ lpvalue: *const ::core::ffi::c_void,
+ cbsize: usize,
+ lppreviousvalue: *mut ::core::ffi::c_void,
+ lpreturnsize: *const usize,
+ ) -> BOOL;
}
#[link(name = "kernel32")]
extern "system" {
@@ -549,19 +575,19 @@ extern "system" {
lphandles: *const HANDLE,
bwaitall: BOOL,
dwmilliseconds: u32,
- ) -> WIN32_ERROR;
+ ) -> WAIT_EVENT;
}
#[link(name = "kernel32")]
extern "system" {
- pub fn WaitForSingleObject(hhandle: HANDLE, dwmilliseconds: u32) -> WIN32_ERROR;
+ pub fn WaitForSingleObject(hhandle: HANDLE, dwmilliseconds: u32) -> WAIT_EVENT;
}
#[link(name = "kernel32")]
extern "system" {
- pub fn WakeAllConditionVariable(conditionvariable: *mut RTL_CONDITION_VARIABLE) -> ();
+ pub fn WakeAllConditionVariable(conditionvariable: *mut CONDITION_VARIABLE) -> ();
}
#[link(name = "kernel32")]
extern "system" {
- pub fn WakeConditionVariable(conditionvariable: *mut RTL_CONDITION_VARIABLE) -> ();
+ pub fn WakeConditionVariable(conditionvariable: *mut CONDITION_VARIABLE) -> ();
}
#[link(name = "kernel32")]
extern "system" {
@@ -822,6 +848,7 @@ impl ::core::clone::Clone for ADDRINFOA {
pub const AF_INET: ADDRESS_FAMILY = 2u16;
pub const AF_INET6: ADDRESS_FAMILY = 23u16;
pub const AF_UNSPEC: ADDRESS_FAMILY = 0u16;
+pub const ALL_PROCESSOR_GROUPS: u32 = 65535u32;
#[repr(C)]
pub union ARM64_NT_NEON128 {
pub Anonymous: ARM64_NT_NEON128_0,
@@ -874,7 +901,17 @@ impl ::core::clone::Clone for BY_HANDLE_FILE_INFORMATION {
}
pub const CALLBACK_CHUNK_FINISHED: LPPROGRESS_ROUTINE_CALLBACK_REASON = 0u32;
pub const CALLBACK_STREAM_SWITCH: LPPROGRESS_ROUTINE_CALLBACK_REASON = 1u32;
-pub type COMPARESTRING_RESULT = u32;
+pub type COMPARESTRING_RESULT = i32;
+#[repr(C)]
+pub struct CONDITION_VARIABLE {
+ pub Ptr: *mut ::core::ffi::c_void,
+}
+impl ::core::marker::Copy for CONDITION_VARIABLE {}
+impl ::core::clone::Clone for CONDITION_VARIABLE {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
pub type CONSOLE_MODE = u32;
#[repr(C)]
pub struct CONSOLE_READCONSOLE_CONTROL {
@@ -892,7 +929,7 @@ impl ::core::clone::Clone for CONSOLE_READCONSOLE_CONTROL {
#[repr(C)]
#[cfg(target_arch = "aarch64")]
pub struct CONTEXT {
- pub ContextFlags: u32,
+ pub ContextFlags: CONTEXT_FLAGS,
pub Cpsr: u32,
pub Anonymous: CONTEXT_0,
pub Sp: u64,
@@ -979,7 +1016,7 @@ pub struct CONTEXT {
pub P4Home: u64,
pub P5Home: u64,
pub P6Home: u64,
- pub ContextFlags: u32,
+ pub ContextFlags: CONTEXT_FLAGS,
pub MxCsr: u32,
pub SegCs: u16,
pub SegDs: u16,
@@ -1075,7 +1112,7 @@ impl ::core::clone::Clone for CONTEXT_0_0 {
#[repr(C)]
#[cfg(target_arch = "x86")]
pub struct CONTEXT {
- pub ContextFlags: u32,
+ pub ContextFlags: CONTEXT_FLAGS,
pub Dr0: u32,
pub Dr1: u32,
pub Dr2: u32,
@@ -1109,6 +1146,7 @@ impl ::core::clone::Clone for CONTEXT {
*self
}
}
+pub type CONTEXT_FLAGS = u32;
pub const CP_UTF8: u32 = 65001u32;
pub const CREATE_ALWAYS: FILE_CREATION_DISPOSITION = 2u32;
pub const CREATE_BREAKAWAY_FROM_JOB: PROCESS_CREATION_FLAGS = 16777216u32;
@@ -1126,9 +1164,9 @@ pub const CREATE_SEPARATE_WOW_VDM: PROCESS_CREATION_FLAGS = 2048u32;
pub const CREATE_SHARED_WOW_VDM: PROCESS_CREATION_FLAGS = 4096u32;
pub const CREATE_SUSPENDED: PROCESS_CREATION_FLAGS = 4u32;
pub const CREATE_UNICODE_ENVIRONMENT: PROCESS_CREATION_FLAGS = 1024u32;
-pub const CSTR_EQUAL: COMPARESTRING_RESULT = 2u32;
-pub const CSTR_GREATER_THAN: COMPARESTRING_RESULT = 3u32;
-pub const CSTR_LESS_THAN: COMPARESTRING_RESULT = 1u32;
+pub const CSTR_EQUAL: COMPARESTRING_RESULT = 2i32;
+pub const CSTR_GREATER_THAN: COMPARESTRING_RESULT = 3i32;
+pub const CSTR_LESS_THAN: COMPARESTRING_RESULT = 1i32;
pub const DEBUG_ONLY_THIS_PROCESS: PROCESS_CREATION_FLAGS = 2u32;
pub const DEBUG_PROCESS: PROCESS_CREATION_FLAGS = 1u32;
pub const DELETE: FILE_ACCESS_RIGHTS = 65536u32;
@@ -3344,7 +3382,6 @@ pub const FileRenameInfoEx: FILE_INFO_BY_HANDLE_CLASS = 22i32;
pub const FileStandardInfo: FILE_INFO_BY_HANDLE_CLASS = 1i32;
pub const FileStorageInfo: FILE_INFO_BY_HANDLE_CLASS = 16i32;
pub const FileStreamInfo: FILE_INFO_BY_HANDLE_CLASS = 7i32;
-pub type FindFileHandle = *mut ::core::ffi::c_void;
pub type GENERIC_ACCESS_RIGHTS = u32;
pub const GENERIC_ALL: GENERIC_ACCESS_RIGHTS = 268435456u32;
pub const GENERIC_EXECUTE: GENERIC_ACCESS_RIGHTS = 536870912u32;
@@ -3358,6 +3395,12 @@ pub struct GUID {
pub data3: u16,
pub data4: [u8; 8],
}
+impl ::core::marker::Copy for GUID {}
+impl ::core::clone::Clone for GUID {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
impl GUID {
pub const fn from_u128(uuid: u128) -> Self {
Self {
@@ -3368,12 +3411,6 @@ impl GUID {
}
}
}
-impl ::core::marker::Copy for GUID {}
-impl ::core::clone::Clone for GUID {
- fn clone(&self) -> Self {
- *self
- }
-}
pub type HANDLE = *mut ::core::ffi::c_void;
pub type HANDLE_FLAGS = u32;
pub const HANDLE_FLAG_INHERIT: HANDLE_FLAGS = 1u32;
@@ -3406,6 +3443,16 @@ impl ::core::clone::Clone for IN6_ADDR_0 {
pub const INFINITE: u32 = 4294967295u32;
pub const INHERIT_CALLER_PRIORITY: PROCESS_CREATION_FLAGS = 131072u32;
pub const INHERIT_PARENT_AFFINITY: PROCESS_CREATION_FLAGS = 65536u32;
+#[repr(C)]
+pub union INIT_ONCE {
+ pub Ptr: *mut ::core::ffi::c_void,
+}
+impl ::core::marker::Copy for INIT_ONCE {}
+impl ::core::clone::Clone for INIT_ONCE {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
pub const INIT_ONCE_INIT_FAILED: u32 = 4u32;
pub const INVALID_FILE_ATTRIBUTES: u32 = 4294967295u32;
pub const INVALID_HANDLE_VALUE: HANDLE = ::core::ptr::invalid_mut(-1i32 as _);
@@ -3567,6 +3614,7 @@ pub type LPOVERLAPPED_COMPLETION_ROUTINE = ::core::option::Option<
lpoverlapped: *mut OVERLAPPED,
) -> (),
>;
+pub type LPPROC_THREAD_ATTRIBUTE_LIST = *mut ::core::ffi::c_void;
pub type LPPROGRESS_ROUTINE = ::core::option::Option<
unsafe extern "system" fn(
totalfilesize: i64,
@@ -3633,10 +3681,10 @@ pub type NTSTATUS = i32;
pub struct OBJECT_ATTRIBUTES {
pub Length: u32,
pub RootDirectory: HANDLE,
- pub ObjectName: *mut UNICODE_STRING,
+ pub ObjectName: *const UNICODE_STRING,
pub Attributes: u32,
- pub SecurityDescriptor: *mut ::core::ffi::c_void,
- pub SecurityQualityOfService: *mut ::core::ffi::c_void,
+ pub SecurityDescriptor: *const ::core::ffi::c_void,
+ pub SecurityQualityOfService: *const ::core::ffi::c_void,
}
impl ::core::marker::Copy for OBJECT_ATTRIBUTES {}
impl ::core::clone::Clone for OBJECT_ATTRIBUTES {
@@ -3686,8 +3734,8 @@ pub type PCSTR = *const u8;
pub type PCWSTR = *const u16;
pub type PIO_APC_ROUTINE = ::core::option::Option<
unsafe extern "system" fn(
- apccontext: *const ::core::ffi::c_void,
- iostatusblock: *const IO_STATUS_BLOCK,
+ apccontext: *mut ::core::ffi::c_void,
+ iostatusblock: *mut IO_STATUS_BLOCK,
reserved: u32,
) -> (),
>;
@@ -3729,36 +3777,6 @@ pub type PSTR = *mut u8;
pub type PWSTR = *mut u16;
pub const READ_CONTROL: FILE_ACCESS_RIGHTS = 131072u32;
pub const REALTIME_PRIORITY_CLASS: PROCESS_CREATION_FLAGS = 256u32;
-#[repr(C)]
-pub struct RTL_CONDITION_VARIABLE {
- pub Ptr: *mut ::core::ffi::c_void,
-}
-impl ::core::marker::Copy for RTL_CONDITION_VARIABLE {}
-impl ::core::clone::Clone for RTL_CONDITION_VARIABLE {
- fn clone(&self) -> Self {
- *self
- }
-}
-#[repr(C)]
-pub union RTL_RUN_ONCE {
- pub Ptr: *mut ::core::ffi::c_void,
-}
-impl ::core::marker::Copy for RTL_RUN_ONCE {}
-impl ::core::clone::Clone for RTL_RUN_ONCE {
- fn clone(&self) -> Self {
- *self
- }
-}
-#[repr(C)]
-pub struct RTL_SRWLOCK {
- pub Ptr: *mut ::core::ffi::c_void,
-}
-impl ::core::marker::Copy for RTL_SRWLOCK {}
-impl ::core::clone::Clone for RTL_SRWLOCK {
- fn clone(&self) -> Self {
- *self
- }
-}
pub const SD_BOTH: WINSOCK_SHUTDOWN_HOW = 2i32;
pub const SD_RECEIVE: WINSOCK_SHUTDOWN_HOW = 0i32;
pub const SD_SEND: WINSOCK_SHUTDOWN_HOW = 1i32;
@@ -3795,10 +3813,7 @@ impl ::core::clone::Clone for SOCKADDR {
*self
}
}
-#[cfg(target_pointer_width = "32")]
-pub type SOCKET = u32;
-#[cfg(target_pointer_width = "64")]
-pub type SOCKET = u64;
+pub type SOCKET = usize;
pub const SOCKET_ERROR: i32 = -1i32;
pub const SOCK_DGRAM: WINSOCK_SOCKET_TYPE = 2i32;
pub const SOCK_RAW: WINSOCK_SOCKET_TYPE = 3i32;
@@ -3812,6 +3827,16 @@ pub const SO_LINGER: i32 = 128i32;
pub const SO_RCVTIMEO: i32 = 4102i32;
pub const SO_SNDTIMEO: i32 = 4101i32;
pub const SPECIFIC_RIGHTS_ALL: FILE_ACCESS_RIGHTS = 65535u32;
+#[repr(C)]
+pub struct SRWLOCK {
+ pub Ptr: *mut ::core::ffi::c_void,
+}
+impl ::core::marker::Copy for SRWLOCK {}
+impl ::core::clone::Clone for SRWLOCK {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
pub const STACK_SIZE_PARAM_IS_A_RESERVATION: THREAD_CREATION_FLAGS = 65536u32;
pub const STANDARD_RIGHTS_ALL: FILE_ACCESS_RIGHTS = 2031616u32;
pub const STANDARD_RIGHTS_EXECUTE: FILE_ACCESS_RIGHTS = 131072u32;
@@ -3833,6 +3858,17 @@ pub const STARTF_USESHOWWINDOW: STARTUPINFOW_FLAGS = 1u32;
pub const STARTF_USESIZE: STARTUPINFOW_FLAGS = 2u32;
pub const STARTF_USESTDHANDLES: STARTUPINFOW_FLAGS = 256u32;
#[repr(C)]
+pub struct STARTUPINFOEXW {
+ pub StartupInfo: STARTUPINFOW,
+ pub lpAttributeList: LPPROC_THREAD_ATTRIBUTE_LIST,
+}
+impl ::core::marker::Copy for STARTUPINFOEXW {}
+impl ::core::clone::Clone for STARTUPINFOEXW {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+#[repr(C)]
pub struct STARTUPINFOW {
pub cb: u32,
pub lpReserved: PWSTR,
@@ -3971,12 +4007,13 @@ impl ::core::clone::Clone for UNICODE_STRING {
pub const VOLUME_NAME_DOS: GETFINALPATHNAMEBYHANDLE_FLAGS = 0u32;
pub const VOLUME_NAME_GUID: GETFINALPATHNAMEBYHANDLE_FLAGS = 1u32;
pub const VOLUME_NAME_NONE: GETFINALPATHNAMEBYHANDLE_FLAGS = 4u32;
-pub const WAIT_ABANDONED: WIN32_ERROR = 128u32;
-pub const WAIT_ABANDONED_0: WIN32_ERROR = 128u32;
-pub const WAIT_FAILED: WIN32_ERROR = 4294967295u32;
-pub const WAIT_IO_COMPLETION: WIN32_ERROR = 192u32;
-pub const WAIT_OBJECT_0: WIN32_ERROR = 0u32;
-pub const WAIT_TIMEOUT: WIN32_ERROR = 258u32;
+pub const WAIT_ABANDONED: WAIT_EVENT = 128u32;
+pub const WAIT_ABANDONED_0: WAIT_EVENT = 128u32;
+pub type WAIT_EVENT = u32;
+pub const WAIT_FAILED: WAIT_EVENT = 4294967295u32;
+pub const WAIT_IO_COMPLETION: WAIT_EVENT = 192u32;
+pub const WAIT_OBJECT_0: WAIT_EVENT = 0u32;
+pub const WAIT_TIMEOUT: WAIT_EVENT = 258u32;
pub const WC_ERR_INVALID_CHARS: u32 = 128u32;
pub type WIN32_ERROR = u32;
#[repr(C)]
diff --git a/library/std/src/sys/windows/handle.rs b/library/std/src/sys/windows/handle.rs
index 84c1fbde3..56d0d6c08 100644
--- a/library/std/src/sys/windows/handle.rs
+++ b/library/std/src/sys/windows/handle.rs
@@ -143,13 +143,8 @@ impl Handle {
) -> io::Result<Option<usize>> {
let len = cmp::min(buf.len(), <c::DWORD>::MAX as usize) as c::DWORD;
let mut amt = 0;
- let res = cvt(c::ReadFile(
- self.as_raw_handle(),
- buf.as_ptr() as c::LPVOID,
- len,
- &mut amt,
- overlapped,
- ));
+ let res =
+ cvt(c::ReadFile(self.as_raw_handle(), buf.as_mut_ptr(), len, &mut amt, overlapped));
match res {
Ok(_) => Ok(Some(amt as usize)),
Err(e) => {
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index bcc172b0f..b609ad247 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -60,6 +60,11 @@ pub unsafe fn cleanup() {
net::cleanup();
}
+#[inline]
+pub fn is_interrupted(_errno: i32) -> bool {
+ false
+}
+
pub fn decode_error_kind(errno: i32) -> ErrorKind {
use ErrorKind::*;
diff --git a/library/std/src/sys/windows/net.rs b/library/std/src/sys/windows/net.rs
index 1ae42cb7e..abdcab424 100644
--- a/library/std/src/sys/windows/net.rs
+++ b/library/std/src/sys/windows/net.rs
@@ -117,7 +117,7 @@ impl Socket {
};
if socket != c::INVALID_SOCKET {
- unsafe { Ok(Self::from_raw_socket(socket)) }
+ unsafe { Ok(Self::from_raw(socket)) }
} else {
let error = unsafe { c::WSAGetLastError() };
@@ -133,7 +133,7 @@ impl Socket {
}
unsafe {
- let socket = Self::from_raw_socket(socket);
+ let socket = Self::from_raw(socket);
socket.0.set_no_inherit()?;
Ok(socket)
}
@@ -144,7 +144,7 @@ impl Socket {
self.set_nonblocking(true)?;
let result = {
let (addr, len) = addr.into_inner();
- let result = unsafe { c::connect(self.as_raw_socket(), addr.as_ptr(), len) };
+ let result = unsafe { c::connect(self.as_raw(), addr.as_ptr(), len) };
cvt(result).map(drop)
};
self.set_nonblocking(false)?;
@@ -170,7 +170,7 @@ impl Socket {
let fds = {
let mut fds = unsafe { mem::zeroed::<c::fd_set>() };
fds.fd_count = 1;
- fds.fd_array[0] = self.as_raw_socket();
+ fds.fd_array[0] = self.as_raw();
fds
};
@@ -202,11 +202,11 @@ impl Socket {
}
pub fn accept(&self, storage: *mut c::SOCKADDR, len: *mut c_int) -> io::Result<Socket> {
- let socket = unsafe { c::accept(self.as_raw_socket(), storage, len) };
+ let socket = unsafe { c::accept(self.as_raw(), storage, len) };
match socket {
c::INVALID_SOCKET => Err(last_error()),
- _ => unsafe { Ok(Self::from_raw_socket(socket)) },
+ _ => unsafe { Ok(Self::from_raw(socket)) },
}
}
@@ -218,9 +218,8 @@ impl Socket {
// On unix when a socket is shut down all further reads return 0, so we
// do the same on windows to map a shut down socket to returning EOF.
let length = cmp::min(buf.capacity(), i32::MAX as usize) as i32;
- let result = unsafe {
- c::recv(self.as_raw_socket(), buf.as_mut().as_mut_ptr() as *mut _, length, flags)
- };
+ let result =
+ unsafe { c::recv(self.as_raw(), buf.as_mut().as_mut_ptr() as *mut _, length, flags) };
match result {
c::SOCKET_ERROR => {
@@ -257,7 +256,7 @@ impl Socket {
let mut flags = 0;
let result = unsafe {
c::WSARecv(
- self.as_raw_socket(),
+ self.as_raw(),
bufs.as_mut_ptr() as *mut c::WSABUF,
length,
&mut nread,
@@ -305,7 +304,7 @@ impl Socket {
// do the same on windows to map a shut down socket to returning EOF.
let result = unsafe {
c::recvfrom(
- self.as_raw_socket(),
+ self.as_raw(),
buf.as_mut_ptr() as *mut _,
length,
flags,
@@ -341,7 +340,7 @@ impl Socket {
let mut nwritten = 0;
let result = unsafe {
c::WSASend(
- self.as_raw_socket(),
+ self.as_raw(),
bufs.as_ptr() as *const c::WSABUF as *mut _,
length,
&mut nwritten,
@@ -392,14 +391,14 @@ impl Socket {
Shutdown::Read => c::SD_RECEIVE,
Shutdown::Both => c::SD_BOTH,
};
- let result = unsafe { c::shutdown(self.as_raw_socket(), how) };
+ let result = unsafe { c::shutdown(self.as_raw(), how) };
cvt(result).map(drop)
}
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
let mut nonblocking = nonblocking as c_ulong;
let result =
- unsafe { c::ioctlsocket(self.as_raw_socket(), c::FIONBIO as c_int, &mut nonblocking) };
+ unsafe { c::ioctlsocket(self.as_raw(), c::FIONBIO as c_int, &mut nonblocking) };
cvt(result).map(drop)
}
@@ -433,8 +432,15 @@ impl Socket {
}
// This is used by sys_common code to abstract over Windows and Unix.
- pub fn as_raw(&self) -> RawSocket {
- self.as_inner().as_raw_socket()
+ pub fn as_raw(&self) -> c::SOCKET {
+ debug_assert_eq!(mem::size_of::<c::SOCKET>(), mem::size_of::<RawSocket>());
+ debug_assert_eq!(mem::align_of::<c::SOCKET>(), mem::align_of::<RawSocket>());
+ self.as_inner().as_raw_socket() as c::SOCKET
+ }
+ pub unsafe fn from_raw(raw: c::SOCKET) -> Self {
+ debug_assert_eq!(mem::size_of::<c::SOCKET>(), mem::size_of::<RawSocket>());
+ debug_assert_eq!(mem::align_of::<c::SOCKET>(), mem::align_of::<RawSocket>());
+ Self::from_raw_socket(raw as RawSocket)
}
}
diff --git a/library/std/src/sys/windows/os_str.rs b/library/std/src/sys/windows/os_str.rs
index 4708657a9..237854fac 100644
--- a/library/std/src/sys/windows/os_str.rs
+++ b/library/std/src/sys/windows/os_str.rs
@@ -64,12 +64,12 @@ impl fmt::Display for Slice {
impl Buf {
#[inline]
- pub fn into_os_str_bytes(self) -> Vec<u8> {
+ pub fn into_encoded_bytes(self) -> Vec<u8> {
self.inner.into_bytes()
}
#[inline]
- pub unsafe fn from_os_str_bytes_unchecked(s: Vec<u8>) -> Self {
+ pub unsafe fn from_encoded_bytes_unchecked(s: Vec<u8>) -> Self {
Self { inner: Wtf8Buf::from_bytes_unchecked(s) }
}
@@ -162,12 +162,12 @@ impl Buf {
impl Slice {
#[inline]
- pub fn as_os_str_bytes(&self) -> &[u8] {
+ pub fn as_encoded_bytes(&self) -> &[u8] {
self.inner.as_bytes()
}
#[inline]
- pub unsafe fn from_os_str_bytes_unchecked(s: &[u8]) -> &Slice {
+ pub unsafe fn from_encoded_bytes_unchecked(s: &[u8]) -> &Slice {
mem::transmute(Wtf8::from_bytes_unchecked(s))
}
diff --git a/library/std/src/sys/windows/path.rs b/library/std/src/sys/windows/path.rs
index c9c2d10e6..8c0e07b35 100644
--- a/library/std/src/sys/windows/path.rs
+++ b/library/std/src/sys/windows/path.rs
@@ -22,12 +22,12 @@ pub fn is_verbatim_sep(b: u8) -> bool {
/// Returns true if `path` looks like a lone filename.
pub(crate) fn is_file_name(path: &OsStr) -> bool {
- !path.as_os_str_bytes().iter().copied().any(is_sep_byte)
+ !path.as_encoded_bytes().iter().copied().any(is_sep_byte)
}
pub(crate) fn has_trailing_slash(path: &OsStr) -> bool {
- let is_verbatim = path.as_os_str_bytes().starts_with(br"\\?\");
+ let is_verbatim = path.as_encoded_bytes().starts_with(br"\\?\");
let is_separator = if is_verbatim { is_verbatim_sep } else { is_sep_byte };
- if let Some(&c) = path.as_os_str_bytes().last() { is_separator(c) } else { false }
+ if let Some(&c) = path.as_encoded_bytes().last() { is_separator(c) } else { false }
}
/// Appends a suffix to a path.
@@ -49,7 +49,7 @@ impl<'a, const LEN: usize> PrefixParser<'a, LEN> {
fn get_prefix(path: &OsStr) -> [u8; LEN] {
let mut prefix = [0; LEN];
// SAFETY: Only ASCII characters are modified.
- for (i, &ch) in path.as_os_str_bytes().iter().take(LEN).enumerate() {
+ for (i, &ch) in path.as_encoded_bytes().iter().take(LEN).enumerate() {
prefix[i] = if ch == b'/' { b'\\' } else { ch };
}
prefix
@@ -82,7 +82,7 @@ impl<'a> PrefixParserSlice<'a, '_> {
}
fn prefix_bytes(&self) -> &'a [u8] {
- &self.path.as_os_str_bytes()[..self.index]
+ &self.path.as_encoded_bytes()[..self.index]
}
fn finish(self) -> &'a OsStr {
@@ -90,7 +90,7 @@ impl<'a> PrefixParserSlice<'a, '_> {
// &[u8] and back. This is safe to do because (1) we only look at ASCII
// contents of the encoding and (2) new &OsStr values are produced only
// from ASCII-bounded slices of existing &OsStr values.
- unsafe { OsStr::from_os_str_bytes_unchecked(&self.path.as_os_str_bytes()[self.index..]) }
+ unsafe { OsStr::from_encoded_bytes_unchecked(&self.path.as_encoded_bytes()[self.index..]) }
}
}
@@ -162,7 +162,7 @@ fn parse_drive(path: &OsStr) -> Option<u8> {
drive.is_ascii_alphabetic()
}
- match path.as_os_str_bytes() {
+ match path.as_encoded_bytes() {
[drive, b':', ..] if is_valid_drive_letter(drive) => Some(drive.to_ascii_uppercase()),
_ => None,
}
@@ -171,7 +171,7 @@ fn parse_drive(path: &OsStr) -> Option<u8> {
// Parses a drive prefix exactly, e.g. "C:"
fn parse_drive_exact(path: &OsStr) -> Option<u8> {
// only parse two bytes: the drive letter and the drive separator
- if path.as_os_str_bytes().get(2).map(|&x| is_sep_byte(x)).unwrap_or(true) {
+ if path.as_encoded_bytes().get(2).map(|&x| is_sep_byte(x)).unwrap_or(true) {
parse_drive(path)
} else {
None
@@ -185,15 +185,15 @@ fn parse_drive_exact(path: &OsStr) -> Option<u8> {
fn parse_next_component(path: &OsStr, verbatim: bool) -> (&OsStr, &OsStr) {
let separator = if verbatim { is_verbatim_sep } else { is_sep_byte };
- match path.as_os_str_bytes().iter().position(|&x| separator(x)) {
+ match path.as_encoded_bytes().iter().position(|&x| separator(x)) {
Some(separator_start) => {
let separator_end = separator_start + 1;
- let component = &path.as_os_str_bytes()[..separator_start];
+ let component = &path.as_encoded_bytes()[..separator_start];
// Panic safe
// The max `separator_end` is `bytes.len()` and `bytes[bytes.len()..]` is a valid index.
- let path = &path.as_os_str_bytes()[separator_end..];
+ let path = &path.as_encoded_bytes()[separator_end..];
// SAFETY: `path` is a valid wtf8 encoded slice and each of the separators ('/', '\')
// is encoded in a single byte, therefore `bytes[separator_start]` and
@@ -201,8 +201,8 @@ fn parse_next_component(path: &OsStr, verbatim: bool) -> (&OsStr, &OsStr) {
// `bytes[..separator_start]` and `bytes[separator_end..]` are valid wtf8 slices.
unsafe {
(
- OsStr::from_os_str_bytes_unchecked(component),
- OsStr::from_os_str_bytes_unchecked(path),
+ OsStr::from_encoded_bytes_unchecked(component),
+ OsStr::from_encoded_bytes_unchecked(path),
)
}
}
@@ -323,7 +323,7 @@ pub(crate) fn absolute(path: &Path) -> io::Result<PathBuf> {
// Verbatim paths should not be modified.
if prefix.map(|x| x.is_verbatim()).unwrap_or(false) {
// NULs in verbatim paths are rejected for consistency.
- if path.as_os_str_bytes().contains(&0) {
+ if path.as_encoded_bytes().contains(&0) {
return Err(io::const_io_error!(
io::ErrorKind::InvalidInput,
"strings passed to WinAPI cannot contain NULs",
diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs
index d07147ecc..7624e746f 100644
--- a/library/std/src/sys/windows/pipe.rs
+++ b/library/std/src/sys/windows/pipe.rs
@@ -12,7 +12,7 @@ use crate::sys::c;
use crate::sys::fs::{File, OpenOptions};
use crate::sys::handle::Handle;
use crate::sys::hashmap_random_keys;
-use crate::sys_common::IntoInner;
+use crate::sys_common::{FromInner, IntoInner};
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
@@ -28,6 +28,12 @@ impl IntoInner<Handle> for AnonPipe {
}
}
+impl FromInner<Handle> for AnonPipe {
+ fn from_inner(inner: Handle) -> AnonPipe {
+ Self { inner }
+ }
+}
+
pub struct Pipes {
pub ours: AnonPipe,
pub theirs: AnonPipe,
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index 2dd0c67ac..cd5bf7f15 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -11,6 +11,7 @@ use crate::ffi::{OsStr, OsString};
use crate::fmt;
use crate::io::{self, Error, ErrorKind};
use crate::mem;
+use crate::mem::MaybeUninit;
use crate::num::NonZeroI32;
use crate::os::windows::ffi::{OsStrExt, OsStringExt};
use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle};
@@ -166,10 +167,12 @@ pub struct Command {
stdout: Option<Stdio>,
stderr: Option<Stdio>,
force_quotes_enabled: bool,
+ proc_thread_attributes: BTreeMap<usize, ProcThreadAttributeValue>,
}
pub enum Stdio {
Inherit,
+ InheritSpecific { from_stdio_id: c::DWORD },
Null,
MakePipe,
Pipe(AnonPipe),
@@ -195,6 +198,7 @@ impl Command {
stdout: None,
stderr: None,
force_quotes_enabled: false,
+ proc_thread_attributes: Default::default(),
}
}
@@ -245,6 +249,17 @@ impl Command {
self.cwd.as_ref().map(|cwd| Path::new(cwd))
}
+ pub unsafe fn raw_attribute<T: Copy + Send + Sync + 'static>(
+ &mut self,
+ attribute: usize,
+ value: T,
+ ) {
+ self.proc_thread_attributes.insert(
+ attribute,
+ ProcThreadAttributeValue { size: mem::size_of::<T>(), data: Box::new(value) },
+ );
+ }
+
pub fn spawn(
&mut self,
default: Stdio,
@@ -308,7 +323,6 @@ impl Command {
let stderr = stderr.to_handle(c::STD_ERROR_HANDLE, &mut pipes.stderr)?;
let mut si = zeroed_startupinfo();
- si.cb = mem::size_of::<c::STARTUPINFOW>() as c::DWORD;
// If at least one of stdin, stdout or stderr are set (i.e. are non null)
// then set the `hStd` fields in `STARTUPINFO`.
@@ -322,6 +336,27 @@ impl Command {
si.hStdError = stderr.as_raw_handle();
}
+ let si_ptr: *mut c::STARTUPINFOW;
+
+ let mut proc_thread_attribute_list;
+ let mut si_ex;
+
+ if !self.proc_thread_attributes.is_empty() {
+ si.cb = mem::size_of::<c::STARTUPINFOEXW>() as u32;
+ flags |= c::EXTENDED_STARTUPINFO_PRESENT;
+
+ proc_thread_attribute_list =
+ make_proc_thread_attribute_list(&self.proc_thread_attributes)?;
+ si_ex = c::STARTUPINFOEXW {
+ StartupInfo: si,
+ lpAttributeList: proc_thread_attribute_list.0.as_mut_ptr() as _,
+ };
+ si_ptr = &mut si_ex as *mut _ as _;
+ } else {
+ si.cb = mem::size_of::<c::STARTUPINFOW>() as c::DWORD;
+ si_ptr = &mut si as *mut _ as _;
+ }
+
unsafe {
cvt(c::CreateProcessW(
program.as_ptr(),
@@ -332,7 +367,7 @@ impl Command {
flags,
envp,
dirp,
- &si,
+ si_ptr,
&mut pi,
))
}?;
@@ -395,7 +430,7 @@ fn resolve_exe<'a>(
// Test if the file name has the `exe` extension.
// This does a case-insensitive `ends_with`.
let has_exe_suffix = if exe_path.len() >= EXE_SUFFIX.len() {
- exe_path.as_os_str_bytes()[exe_path.len() - EXE_SUFFIX.len()..]
+ exe_path.as_encoded_bytes()[exe_path.len() - EXE_SUFFIX.len()..]
.eq_ignore_ascii_case(EXE_SUFFIX.as_bytes())
} else {
false
@@ -425,7 +460,7 @@ fn resolve_exe<'a>(
// From the `CreateProcessW` docs:
// > If the file name does not contain an extension, .exe is appended.
// Note that this rule only applies when searching paths.
- let has_extension = exe_path.as_os_str_bytes().contains(&b'.');
+ let has_extension = exe_path.as_encoded_bytes().contains(&b'.');
// Search the directories given by `search_paths`.
let result = search_paths(parent_paths, child_paths, |mut path| {
@@ -521,17 +556,19 @@ fn program_exists(path: &Path) -> Option<Vec<u16>> {
impl Stdio {
fn to_handle(&self, stdio_id: c::DWORD, pipe: &mut Option<AnonPipe>) -> io::Result<Handle> {
- match *self {
- Stdio::Inherit => match stdio::get_handle(stdio_id) {
- Ok(io) => unsafe {
- let io = Handle::from_raw_handle(io);
- let ret = io.duplicate(0, true, c::DUPLICATE_SAME_ACCESS);
- io.into_raw_handle();
- ret
- },
- // If no stdio handle is available, then propagate the null value.
- Err(..) => unsafe { Ok(Handle::from_raw_handle(ptr::null_mut())) },
+ let use_stdio_id = |stdio_id| match stdio::get_handle(stdio_id) {
+ Ok(io) => unsafe {
+ let io = Handle::from_raw_handle(io);
+ let ret = io.duplicate(0, true, c::DUPLICATE_SAME_ACCESS);
+ io.into_raw_handle();
+ ret
},
+ // If no stdio handle is available, then propagate the null value.
+ Err(..) => unsafe { Ok(Handle::from_raw_handle(ptr::null_mut())) },
+ };
+ match *self {
+ Stdio::Inherit => use_stdio_id(stdio_id),
+ Stdio::InheritSpecific { from_stdio_id } => use_stdio_id(from_stdio_id),
Stdio::MakePipe => {
let ours_readable = stdio_id != c::STD_INPUT_HANDLE;
@@ -579,6 +616,18 @@ impl From<File> for Stdio {
}
}
+impl From<io::Stdout> for Stdio {
+ fn from(_: io::Stdout) -> Stdio {
+ Stdio::InheritSpecific { from_stdio_id: c::STD_OUTPUT_HANDLE }
+ }
+}
+
+impl From<io::Stderr> for Stdio {
+ fn from(_: io::Stderr) -> Stdio {
+ Stdio::InheritSpecific { from_stdio_id: c::STD_ERROR_HANDLE }
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
// Processes
////////////////////////////////////////////////////////////////////////////////
@@ -831,6 +880,80 @@ fn make_dirp(d: Option<&OsString>) -> io::Result<(*const u16, Vec<u16>)> {
}
}
+struct ProcThreadAttributeList(Box<[MaybeUninit<u8>]>);
+
+impl Drop for ProcThreadAttributeList {
+ fn drop(&mut self) {
+ let lp_attribute_list = self.0.as_mut_ptr() as _;
+ unsafe { c::DeleteProcThreadAttributeList(lp_attribute_list) }
+ }
+}
+
+/// Wrapper around the value data to be used as a Process Thread Attribute.
+struct ProcThreadAttributeValue {
+ data: Box<dyn Send + Sync>,
+ size: usize,
+}
+
+fn make_proc_thread_attribute_list(
+ attributes: &BTreeMap<usize, ProcThreadAttributeValue>,
+) -> io::Result<ProcThreadAttributeList> {
+ // To initialize our ProcThreadAttributeList, we need to determine
+ // how many bytes to allocate for it. The Windows API simplifies this process
+ // by allowing us to call `InitializeProcThreadAttributeList` with
+ // a null pointer to retrieve the required size.
+ let mut required_size = 0;
+ let Ok(attribute_count) = attributes.len().try_into() else {
+ return Err(io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "maximum number of ProcThreadAttributes exceeded",
+ ));
+ };
+ unsafe {
+ c::InitializeProcThreadAttributeList(
+ ptr::null_mut(),
+ attribute_count,
+ 0,
+ &mut required_size,
+ )
+ };
+
+ let mut proc_thread_attribute_list = ProcThreadAttributeList(
+ vec![MaybeUninit::uninit(); required_size as usize].into_boxed_slice(),
+ );
+
+ // Once we've allocated the necessary memory, it's safe to invoke
+ // `InitializeProcThreadAttributeList` to properly initialize the list.
+ cvt(unsafe {
+ c::InitializeProcThreadAttributeList(
+ proc_thread_attribute_list.0.as_mut_ptr() as *mut _,
+ attribute_count,
+ 0,
+ &mut required_size,
+ )
+ })?;
+
+ // # Add our attributes to the buffer.
+ // It's theoretically possible for the attribute count to exceed a u32 value.
+ // Therefore, we ensure that we don't add more attributes than the buffer was initialized for.
+ for (&attribute, value) in attributes.iter().take(attribute_count as usize) {
+ let value_ptr = &*value.data as *const (dyn Send + Sync) as _;
+ cvt(unsafe {
+ c::UpdateProcThreadAttribute(
+ proc_thread_attribute_list.0.as_mut_ptr() as _,
+ 0,
+ attribute,
+ value_ptr,
+ value.size,
+ ptr::null_mut(),
+ ptr::null_mut(),
+ )
+ })?;
+ }
+
+ Ok(proc_thread_attribute_list)
+}
+
pub struct CommandArgs<'a> {
iter: crate::slice::Iter<'a, Arg>,
}
diff --git a/library/std/src/sys/xous/alloc.rs b/library/std/src/sys/xous/alloc.rs
new file mode 100644
index 000000000..b3a3e691e
--- /dev/null
+++ b/library/std/src/sys/xous/alloc.rs
@@ -0,0 +1,62 @@
+use crate::alloc::{GlobalAlloc, Layout, System};
+
+static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new();
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling malloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.malloc(layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling calloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.calloc(layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling free() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.free(ptr, layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling realloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) }
+ }
+}
+
+mod lock {
+ use crate::sync::atomic::{AtomicI32, Ordering::SeqCst};
+
+ static LOCKED: AtomicI32 = AtomicI32::new(0);
+
+ pub struct DropLock;
+
+ pub fn lock() -> DropLock {
+ loop {
+ if LOCKED.swap(1, SeqCst) == 0 {
+ return DropLock;
+ }
+ crate::os::xous::ffi::do_yield();
+ }
+ }
+
+ impl Drop for DropLock {
+ fn drop(&mut self) {
+ let r = LOCKED.swap(0, SeqCst);
+ debug_assert_eq!(r, 1);
+ }
+ }
+}
diff --git a/library/std/src/sys/xous/locks/condvar.rs b/library/std/src/sys/xous/locks/condvar.rs
new file mode 100644
index 000000000..1bb38dfa3
--- /dev/null
+++ b/library/std/src/sys/xous/locks/condvar.rs
@@ -0,0 +1,111 @@
+use super::mutex::Mutex;
+use crate::os::xous::ffi::{blocking_scalar, scalar};
+use crate::os::xous::services::ticktimer_server;
+use crate::sync::Mutex as StdMutex;
+use crate::time::Duration;
+
+// The implementation is inspired by Andrew D. Birrell's paper
+// "Implementing Condition Variables with Semaphores"
+
+pub struct Condvar {
+ counter: StdMutex<usize>,
+}
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+impl Condvar {
+ #[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+ pub const fn new() -> Condvar {
+ Condvar { counter: StdMutex::new(0) }
+ }
+
+ pub fn notify_one(&self) {
+ let mut counter = self.counter.lock().unwrap();
+ if *counter <= 0 {
+ return;
+ } else {
+ *counter -= 1;
+ }
+ let result = blocking_scalar(
+ ticktimer_server(),
+ crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), 1).into(),
+ );
+ drop(counter);
+ result.expect("failure to send NotifyCondition command");
+ }
+
+ pub fn notify_all(&self) {
+ let mut counter = self.counter.lock().unwrap();
+ if *counter <= 0 {
+ return;
+ }
+ let result = blocking_scalar(
+ ticktimer_server(),
+ crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), *counter)
+ .into(),
+ );
+ *counter = 0;
+ drop(counter);
+
+ result.expect("failure to send NotifyCondition command");
+ }
+
+ fn index(&self) -> usize {
+ self as *const Condvar as usize
+ }
+
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ let mut counter = self.counter.lock().unwrap();
+ *counter += 1;
+ unsafe { mutex.unlock() };
+ drop(counter);
+
+ let result = blocking_scalar(
+ ticktimer_server(),
+ crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), 0).into(),
+ );
+ unsafe { mutex.lock() };
+
+ result.expect("Ticktimer: failure to send WaitForCondition command");
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ let mut counter = self.counter.lock().unwrap();
+ *counter += 1;
+ unsafe { mutex.unlock() };
+ drop(counter);
+
+ let mut millis = dur.as_millis() as usize;
+ if millis == 0 {
+ millis = 1;
+ }
+
+ let result = blocking_scalar(
+ ticktimer_server(),
+ crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), millis)
+ .into(),
+ );
+ unsafe { mutex.lock() };
+
+ let result = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0;
+
+ // If we awoke due to a timeout, decrement the wake count, as that would not have
+ // been done in the `notify()` call.
+ if !result {
+ *self.counter.lock().unwrap() -= 1;
+ }
+ result
+ }
+}
+
+impl Drop for Condvar {
+ fn drop(&mut self) {
+ scalar(
+ ticktimer_server(),
+ crate::os::xous::services::TicktimerScalar::FreeCondition(self.index()).into(),
+ )
+ .ok();
+ }
+}
diff --git a/library/std/src/sys/xous/locks/mod.rs b/library/std/src/sys/xous/locks/mod.rs
new file mode 100644
index 000000000..f3c5c5d9f
--- /dev/null
+++ b/library/std/src/sys/xous/locks/mod.rs
@@ -0,0 +1,7 @@
+mod condvar;
+mod mutex;
+mod rwlock;
+
+pub use condvar::*;
+pub use mutex::*;
+pub use rwlock::*;
diff --git a/library/std/src/sys/xous/locks/mutex.rs b/library/std/src/sys/xous/locks/mutex.rs
new file mode 100644
index 000000000..ea51776d5
--- /dev/null
+++ b/library/std/src/sys/xous/locks/mutex.rs
@@ -0,0 +1,116 @@
+use crate::os::xous::ffi::{blocking_scalar, do_yield, scalar};
+use crate::os::xous::services::ticktimer_server;
+use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst};
+
+pub struct Mutex {
+ /// The "locked" value indicates how many threads are waiting on this
+ /// Mutex. Possible values are:
+ /// 0: The lock is unlocked
+ /// 1: The lock is locked and uncontended
+ /// >=2: The lock is locked and contended
+ ///
+ /// A lock is "contended" when there is more than one thread waiting
+ /// for a lock, or it is locked for long periods of time. Rather than
+ /// spinning, these locks send a Message to the ticktimer server
+ /// requesting that they be woken up when a lock is unlocked.
+ locked: AtomicUsize,
+
+ /// Whether this Mutex ever was contended, and therefore made a trip
+ /// to the ticktimer server. If this was never set, then we were never
+ /// on the slow path and can skip deregistering the mutex.
+ contended: AtomicBool,
+}
+
+impl Mutex {
+ #[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+ pub const fn new() -> Mutex {
+ Mutex { locked: AtomicUsize::new(0), contended: AtomicBool::new(false) }
+ }
+
+ fn index(&self) -> usize {
+ self as *const Mutex as usize
+ }
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ // Try multiple times to acquire the lock without resorting to the ticktimer
+ // server. For locks that are held for a short amount of time, this will
+ // result in the ticktimer server never getting invoked. The `locked` value
+ // will be either 0 or 1.
+ for _attempts in 0..3 {
+ if unsafe { self.try_lock() } {
+ return;
+ }
+ do_yield();
+ }
+
+ // Try one more time to lock. If the lock is released between the previous code and
+ // here, then the inner `locked` value will be 1 at the end of this. If it was not
+ // locked, then the value will be more than 1, for example if there are multiple other
+ // threads waiting on this lock.
+ if unsafe { self.try_lock_or_poison() } {
+ return;
+ }
+
+ // When this mutex is dropped, we will need to deregister it with the server.
+ self.contended.store(true, Relaxed);
+
+ // The lock is now "contended". When the lock is released, a Message will get sent to the
+ // ticktimer server to wake it up. Note that this may already have happened, so the actual
+ // value of `lock` may be anything (0, 1, 2, ...).
+ blocking_scalar(
+ ticktimer_server(),
+ crate::os::xous::services::TicktimerScalar::LockMutex(self.index()).into(),
+ )
+ .expect("failure to send LockMutex command");
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ let prev = self.locked.fetch_sub(1, SeqCst);
+
+ // If the previous value was 1, then this was a "fast path" unlock, so no
+ // need to involve the Ticktimer server
+ if prev == 1 {
+ return;
+ }
+
+ // If it was 0, then something has gone seriously wrong and the counter
+ // has just wrapped around.
+ if prev == 0 {
+ panic!("mutex lock count underflowed");
+ }
+
+ // Unblock one thread that is waiting on this message.
+ scalar(
+ ticktimer_server(),
+ crate::os::xous::services::TicktimerScalar::UnlockMutex(self.index()).into(),
+ )
+ .expect("failure to send UnlockMutex command");
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn try_lock_or_poison(&self) -> bool {
+ self.locked.fetch_add(1, SeqCst) == 0
+ }
+}
+
+impl Drop for Mutex {
+ fn drop(&mut self) {
+ // If there was Mutex contention, then we involved the ticktimer. Free
+ // the resources associated with this Mutex as it is deallocated.
+ if self.contended.load(Relaxed) {
+ scalar(
+ ticktimer_server(),
+ crate::os::xous::services::TicktimerScalar::FreeMutex(self.index()).into(),
+ )
+ .ok();
+ }
+ }
+}
diff --git a/library/std/src/sys/xous/locks/rwlock.rs b/library/std/src/sys/xous/locks/rwlock.rs
new file mode 100644
index 000000000..618da758a
--- /dev/null
+++ b/library/std/src/sys/xous/locks/rwlock.rs
@@ -0,0 +1,72 @@
+use crate::os::xous::ffi::do_yield;
+use crate::sync::atomic::{AtomicIsize, Ordering::SeqCst};
+
+pub struct RwLock {
+ /// The "mode" value indicates how many threads are waiting on this
+ /// Mutex. Possible values are:
+ /// -1: The lock is locked for writing
+ /// 0: The lock is unlocked
+ /// >=1: The lock is locked for reading
+ ///
+ /// This currently spins waiting for the lock to be freed. An
+ /// optimization would be to involve the ticktimer server to
+ /// coordinate unlocks.
+ mode: AtomicIsize,
+}
+
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {}
+
+impl RwLock {
+ #[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+ pub const fn new() -> RwLock {
+ RwLock { mode: AtomicIsize::new(0) }
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ while !unsafe { self.try_read() } {
+ do_yield();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ // Non-atomically determine the current value.
+ let current = self.mode.load(SeqCst);
+
+ // If it's currently locked for writing, then we cannot read.
+ if current < 0 {
+ return false;
+ }
+
+ // Attempt to lock. If the `current` value has changed, then this
+ // operation will fail and we will not obtain the lock even if we
+ // could potentially keep it.
+ let new = current + 1;
+ self.mode.compare_exchange(current, new, SeqCst, SeqCst).is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ while !unsafe { self.try_write() } {
+ do_yield();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ self.mode.compare_exchange(0, -1, SeqCst, SeqCst).is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ self.mode.fetch_sub(1, SeqCst);
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ assert_eq!(self.mode.compare_exchange(-1, 0, SeqCst, SeqCst), Ok(-1));
+ }
+}
diff --git a/library/std/src/sys/xous/mod.rs b/library/std/src/sys/xous/mod.rs
new file mode 100644
index 000000000..6d5c218d1
--- /dev/null
+++ b/library/std/src/sys/xous/mod.rs
@@ -0,0 +1,37 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+pub mod alloc;
+#[path = "../unsupported/args.rs"]
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+#[path = "../unsupported/env.rs"]
+pub mod env;
+#[path = "../unsupported/fs.rs"]
+pub mod fs;
+#[path = "../unsupported/io.rs"]
+pub mod io;
+pub mod locks;
+#[path = "../unsupported/net.rs"]
+pub mod net;
+#[path = "../unsupported/once.rs"]
+pub mod once;
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+#[path = "../unix/path.rs"]
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+pub mod stdio;
+pub mod thread;
+pub mod thread_local_key;
+#[path = "../unsupported/thread_parking.rs"]
+pub mod thread_parking;
+pub mod time;
+
+#[path = "../unsupported/common.rs"]
+mod common;
+pub use common::*;
diff --git a/library/std/src/sys/xous/os.rs b/library/std/src/sys/xous/os.rs
new file mode 100644
index 000000000..3d19fa4b3
--- /dev/null
+++ b/library/std/src/sys/xous/os.rs
@@ -0,0 +1,147 @@
+use super::unsupported;
+use crate::error::Error as StdError;
+use crate::ffi::{OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::os::xous::ffi::Error as XousError;
+use crate::path::{self, PathBuf};
+
+#[cfg(not(test))]
+mod c_compat {
+ use crate::os::xous::ffi::exit;
+ extern "C" {
+ fn main() -> u32;
+ }
+
+ #[no_mangle]
+ pub extern "C" fn abort() {
+ exit(1);
+ }
+
+ #[no_mangle]
+ pub extern "C" fn _start() {
+ exit(unsafe { main() });
+ }
+
+ // This function is needed by the panic runtime. The symbol is named in
+ // pre-link args for the target specification, so keep that in sync.
+ #[no_mangle]
+ // NB. used by both libunwind and libpanic_abort
+ pub extern "C" fn __rust_abort() -> ! {
+ exit(101);
+ }
+}
+
+pub fn errno() -> i32 {
+ 0
+}
+
+pub fn error_string(errno: i32) -> String {
+ Into::<XousError>::into(errno).to_string()
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn chdir(_: &path::Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
+
+pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
+ panic!("unsupported")
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ Err(JoinPathsError)
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "not supported on this platform yet".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "not supported on this platform yet"
+ }
+}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub struct Env(!);
+
+impl Env {
+ // FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.0
+ }
+}
+
+pub fn env() -> Env {
+ panic!("not supported on this platform")
+}
+
+pub fn getenv(_: &OsStr) -> Option<OsString> {
+ None
+}
+
+pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> {
+ Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform"))
+}
+
+pub fn unsetenv(_: &OsStr) -> io::Result<()> {
+ Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform"))
+}
+
+pub fn temp_dir() -> PathBuf {
+ panic!("no filesystem on this platform")
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ None
+}
+
+pub fn exit(code: i32) -> ! {
+ crate::os::xous::ffi::exit(code as u32);
+}
+
+pub fn getpid() -> u32 {
+ panic!("no pids on this platform")
+}
diff --git a/library/std/src/sys/xous/stdio.rs b/library/std/src/sys/xous/stdio.rs
new file mode 100644
index 000000000..2ac694641
--- /dev/null
+++ b/library/std/src/sys/xous/stdio.rs
@@ -0,0 +1,131 @@
+use crate::io;
+
+pub struct Stdin;
+pub struct Stdout {}
+pub struct Stderr;
+
+use crate::os::xous::ffi::{lend, try_lend, try_scalar, Connection};
+use crate::os::xous::services::{log_server, try_connect, LogScalar};
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
+ Ok(0)
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout {}
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ #[repr(align(4096))]
+ struct LendBuffer([u8; 4096]);
+ let mut lend_buffer = LendBuffer([0u8; 4096]);
+ let connection = log_server();
+ for chunk in buf.chunks(lend_buffer.0.len()) {
+ for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) {
+ *dest = *src;
+ }
+ lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap();
+ }
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ #[repr(align(4096))]
+ struct LendBuffer([u8; 4096]);
+ let mut lend_buffer = LendBuffer([0u8; 4096]);
+ let connection = log_server();
+ for chunk in buf.chunks(lend_buffer.0.len()) {
+ for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) {
+ *dest = *src;
+ }
+ lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap();
+ }
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub const STDIN_BUF_SIZE: usize = 0;
+
+pub fn is_ebadf(_err: &io::Error) -> bool {
+ true
+}
+
+#[derive(Copy, Clone)]
+pub struct PanicWriter {
+ log: Connection,
+ gfx: Option<Connection>,
+}
+
+impl io::Write for PanicWriter {
+ fn write(&mut self, s: &[u8]) -> core::result::Result<usize, io::Error> {
+ for c in s.chunks(core::mem::size_of::<usize>() * 4) {
+ // Text is grouped into 4x `usize` words. The id is 1100 plus
+ // the number of characters in this message.
+ // Ignore errors since we're already panicking.
+ try_scalar(self.log, LogScalar::AppendPanicMessage(&c).into()).ok();
+ }
+
+ // Serialize the text to the graphics panic handler, only if we were able
+ // to acquire a connection to it. Text length is encoded in the `valid` field,
+ // the data itself in the buffer. Typically several messages are require to
+ // fully transmit the entire panic message.
+ if let Some(gfx) = self.gfx {
+ #[repr(C, align(4096))]
+ struct Request([u8; 4096]);
+ let mut request = Request([0u8; 4096]);
+ for (&s, d) in s.iter().zip(request.0.iter_mut()) {
+ *d = s;
+ }
+ try_lend(gfx, 0 /* AppendPanicText */, &request.0, 0, s.len()).ok();
+ }
+ Ok(s.len())
+ }
+
+ // Tests show that this does not seem to be reliably called at the end of a panic
+ // print, so, we can't rely on this to e.g. trigger a graphics update.
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ // Generally this won't fail because every server has already connected, so
+ // this is likely to succeed.
+ let log = log_server();
+
+ // Send the "We're panicking" message (1000).
+ try_scalar(log, LogScalar::BeginPanic.into()).ok();
+
+ // This is will fail in the case that the connection table is full, or if the
+ // graphics server is not running. Most servers do not already have this connection.
+ let gfx = try_connect("panic-to-screen!");
+
+ Some(PanicWriter { log, gfx })
+}
diff --git a/library/std/src/sys/xous/thread.rs b/library/std/src/sys/xous/thread.rs
new file mode 100644
index 000000000..78c68de7b
--- /dev/null
+++ b/library/std/src/sys/xous/thread.rs
@@ -0,0 +1,144 @@
+use crate::ffi::CStr;
+use crate::io;
+use crate::num::NonZeroUsize;
+use crate::os::xous::ffi::{
+ blocking_scalar, create_thread, do_yield, join_thread, map_memory, update_memory_flags,
+ MemoryFlags, Syscall, ThreadId,
+};
+use crate::os::xous::services::{ticktimer_server, TicktimerScalar};
+use crate::time::Duration;
+use core::arch::asm;
+
+pub struct Thread {
+ tid: ThreadId,
+}
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 131072;
+const MIN_STACK_SIZE: usize = 4096;
+pub const GUARD_PAGE_SIZE: usize = 4096;
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ let p = Box::into_raw(Box::new(p));
+ let mut stack_size = crate::cmp::max(stack, MIN_STACK_SIZE);
+
+ if (stack_size & 4095) != 0 {
+ stack_size = (stack_size + 4095) & !4095;
+ }
+
+ // Allocate the whole thing, then divide it up after the fact. This ensures that
+ // even if there's a context switch during this function, the whole stack plus
+ // guard pages will remain contiguous.
+ let stack_plus_guard_pages: &mut [u8] = unsafe {
+ map_memory(
+ None,
+ None,
+ GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE,
+ MemoryFlags::R | MemoryFlags::W | MemoryFlags::X,
+ )
+ }
+ .map_err(|code| io::Error::from_raw_os_error(code as i32))?;
+
+ // No access to this page. Note: Write-only pages are illegal, and will
+ // cause an access violation.
+ unsafe {
+ update_memory_flags(&mut stack_plus_guard_pages[0..GUARD_PAGE_SIZE], MemoryFlags::W)
+ .map_err(|code| io::Error::from_raw_os_error(code as i32))?
+ };
+
+ // No access to this page. Note: Write-only pages are illegal, and will
+ // cause an access violation.
+ unsafe {
+ update_memory_flags(
+ &mut stack_plus_guard_pages[(GUARD_PAGE_SIZE + stack_size)..],
+ MemoryFlags::W,
+ )
+ .map_err(|code| io::Error::from_raw_os_error(code as i32))?
+ };
+
+ let guard_page_pre = stack_plus_guard_pages.as_ptr() as usize;
+ let tid = create_thread(
+ thread_start as *mut usize,
+ &mut stack_plus_guard_pages[GUARD_PAGE_SIZE..(stack_size + GUARD_PAGE_SIZE)],
+ p as usize,
+ guard_page_pre,
+ stack_size,
+ 0,
+ )
+ .map_err(|code| io::Error::from_raw_os_error(code as i32))?;
+
+ extern "C" fn thread_start(main: *mut usize, guard_page_pre: usize, stack_size: usize) {
+ unsafe {
+ // Finally, let's run some code.
+ Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+ }
+
+ // Destroy TLS, which will free the TLS page and call the destructor for
+ // any thread local storage.
+ unsafe {
+ crate::sys::thread_local_key::destroy_tls();
+ }
+
+ // Deallocate the stack memory, along with the guard pages. Afterwards,
+ // exit the thread by returning to the magic address 0xff80_3000usize,
+ // which tells the kernel to deallocate this thread.
+ let mapped_memory_base = guard_page_pre;
+ let mapped_memory_length = GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE;
+ unsafe {
+ asm!(
+ "ecall",
+ "ret",
+ in("a0") Syscall::UnmapMemory as usize,
+ in("a1") mapped_memory_base,
+ in("a2") mapped_memory_length,
+ in("ra") 0xff80_3000usize,
+ options(nomem, nostack, noreturn)
+ );
+ }
+ }
+
+ Ok(Thread { tid })
+ }
+
+ pub fn yield_now() {
+ do_yield();
+ }
+
+ pub fn set_name(_name: &CStr) {
+ // nope
+ }
+
+ pub fn sleep(dur: Duration) {
+ // Because the sleep server works on units of `usized milliseconds`, split
+ // the messages up into these chunks. This means we may run into issues
+ // if you try to sleep a thread for more than 49 days on a 32-bit system.
+ let mut millis = dur.as_millis();
+ while millis > 0 {
+ let sleep_duration =
+ if millis > (usize::MAX as _) { usize::MAX } else { millis as usize };
+ blocking_scalar(ticktimer_server(), TicktimerScalar::SleepMs(sleep_duration).into())
+ .expect("failed to send message to ticktimer server");
+ millis -= sleep_duration as u128;
+ }
+ }
+
+ pub fn join(self) {
+ join_thread(self.tid).unwrap();
+ }
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ // We're unicore right now.
+ Ok(unsafe { NonZeroUsize::new_unchecked(1) })
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
diff --git a/library/std/src/sys/xous/thread_local_key.rs b/library/std/src/sys/xous/thread_local_key.rs
new file mode 100644
index 000000000..3771ea657
--- /dev/null
+++ b/library/std/src/sys/xous/thread_local_key.rs
@@ -0,0 +1,190 @@
+use crate::mem::ManuallyDrop;
+use crate::ptr;
+use crate::sync::atomic::AtomicPtr;
+use crate::sync::atomic::AtomicUsize;
+use crate::sync::atomic::Ordering::SeqCst;
+use core::arch::asm;
+
+use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags};
+
+/// Thread Local Storage
+///
+/// Currently, we are limited to 1023 TLS entries. The entries
+/// live in a page of memory that's unique per-process, and is
+/// stored in the `$tp` register. If this register is 0, then
+/// TLS has not been initialized and thread cleanup can be skipped.
+///
+/// The index into this register is the `key`. This key is identical
+/// between all threads, but indexes a different offset within this
+/// pointer.
+pub type Key = usize;
+
+pub type Dtor = unsafe extern "C" fn(*mut u8);
+
+const TLS_MEMORY_SIZE: usize = 4096;
+
+/// TLS keys start at `1` to mimic POSIX.
+static TLS_KEY_INDEX: AtomicUsize = AtomicUsize::new(1);
+
+fn tls_ptr_addr() -> *mut usize {
+ let mut tp: usize;
+ unsafe {
+ asm!(
+ "mv {}, tp",
+ out(reg) tp,
+ );
+ }
+ core::ptr::from_exposed_addr_mut::<usize>(tp)
+}
+
+/// Create an area of memory that's unique per thread. This area will
+/// contain all thread local pointers.
+fn tls_ptr() -> *mut usize {
+ let mut tp = tls_ptr_addr();
+
+ // If the TP register is `0`, then this thread hasn't initialized
+ // its TLS yet. Allocate a new page to store this memory.
+ if tp.is_null() {
+ tp = unsafe {
+ map_memory(
+ None,
+ None,
+ TLS_MEMORY_SIZE / core::mem::size_of::<usize>(),
+ MemoryFlags::R | MemoryFlags::W,
+ )
+ }
+ .expect("Unable to allocate memory for thread local storage")
+ .as_mut_ptr();
+
+ unsafe {
+ // Key #0 is currently unused.
+ (tp).write_volatile(0);
+
+ // Set the thread's `$tp` register
+ asm!(
+ "mv tp, {}",
+ in(reg) tp as usize,
+ );
+ }
+ }
+ tp
+}
+
+/// Allocate a new TLS key. These keys are shared among all threads.
+fn tls_alloc() -> usize {
+ TLS_KEY_INDEX.fetch_add(1, SeqCst)
+}
+
+#[inline]
+pub unsafe fn create(dtor: Option<Dtor>) -> Key {
+ let key = tls_alloc();
+ if let Some(f) = dtor {
+ unsafe { register_dtor(key, f) };
+ }
+ key
+}
+
+#[inline]
+pub unsafe fn set(key: Key, value: *mut u8) {
+ assert!((key < 1022) && (key >= 1));
+ unsafe { tls_ptr().add(key).write_volatile(value as usize) };
+}
+
+#[inline]
+pub unsafe fn get(key: Key) -> *mut u8 {
+ assert!((key < 1022) && (key >= 1));
+ core::ptr::from_exposed_addr_mut::<u8>(unsafe { tls_ptr().add(key).read_volatile() })
+}
+
+#[inline]
+pub unsafe fn destroy(_key: Key) {
+ panic!("can't destroy keys on Xous");
+}
+
+// -------------------------------------------------------------------------
+// Dtor registration (stolen from Windows)
+//
+// Xous has no native support for running destructors so we manage our own
+// list of destructors to keep track of how to destroy keys. We then install a
+// callback later to get invoked whenever a thread exits, running all
+// appropriate destructors.
+//
+// Currently unregistration from this list is not supported. A destructor can be
+// registered but cannot be unregistered. There's various simplifying reasons
+// for doing this, the big ones being:
+//
+// 1. Currently we don't even support deallocating TLS keys, so normal operation
+// doesn't need to deallocate a destructor.
+// 2. There is no point in time where we know we can unregister a destructor
+// because it could always be getting run by some remote thread.
+//
+// Typically processes have a statically known set of TLS keys which is pretty
+// small, and we'd want to keep this memory alive for the whole process anyway
+// really.
+//
+// Perhaps one day we can fold the `Box` here into a static allocation,
+// expanding the `StaticKey` structure to contain not only a slot for the TLS
+// key but also a slot for the destructor queue on windows. An optimization for
+// another day!
+
+static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut());
+
+struct Node {
+ dtor: Dtor,
+ key: Key,
+ next: *mut Node,
+}
+
+unsafe fn register_dtor(key: Key, dtor: Dtor) {
+ let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
+
+ let mut head = DTORS.load(SeqCst);
+ loop {
+ node.next = head;
+ match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) {
+ Ok(_) => return, // nothing to drop, we successfully added the node to the list
+ Err(cur) => head = cur,
+ }
+ }
+}
+
+pub unsafe fn destroy_tls() {
+ let tp = tls_ptr_addr();
+
+ // If the pointer address is 0, then this thread has no TLS.
+ if tp.is_null() {
+ return;
+ }
+ unsafe { run_dtors() };
+
+ // Finally, free the TLS array
+ unsafe {
+ unmap_memory(core::slice::from_raw_parts_mut(
+ tp,
+ TLS_MEMORY_SIZE / core::mem::size_of::<usize>(),
+ ))
+ .unwrap()
+ };
+}
+
+unsafe fn run_dtors() {
+ let mut any_run = true;
+ for _ in 0..5 {
+ if !any_run {
+ break;
+ }
+ any_run = false;
+ let mut cur = DTORS.load(SeqCst);
+ while !cur.is_null() {
+ let ptr = unsafe { get((*cur).key) };
+
+ if !ptr.is_null() {
+ unsafe { set((*cur).key, ptr::null_mut()) };
+ unsafe { ((*cur).dtor)(ptr as *mut _) };
+ any_run = true;
+ }
+
+ unsafe { cur = (*cur).next };
+ }
+ }
+}
diff --git a/library/std/src/sys/xous/time.rs b/library/std/src/sys/xous/time.rs
new file mode 100644
index 000000000..4e4ae67ef
--- /dev/null
+++ b/library/std/src/sys/xous/time.rs
@@ -0,0 +1,57 @@
+use crate::os::xous::ffi::blocking_scalar;
+use crate::os::xous::services::{
+ systime_server, ticktimer_server, SystimeScalar::GetUtcTimeMs, TicktimerScalar::ElapsedMs,
+};
+use crate::time::Duration;
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct Instant(Duration);
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct SystemTime(Duration);
+
+pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));
+
+impl Instant {
+ pub fn now() -> Instant {
+ let result = blocking_scalar(ticktimer_server(), ElapsedMs.into())
+ .expect("failed to request elapsed_ms");
+ let lower = result[0];
+ let upper = result[1];
+ Instant { 0: Duration::from_millis(lower as u64 | (upper as u64) << 32) }
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ self.0.checked_sub(other.0)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ self.0.checked_add(*other).map(Instant)
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ self.0.checked_sub(*other).map(Instant)
+ }
+}
+
+impl SystemTime {
+ pub fn now() -> SystemTime {
+ let result = blocking_scalar(systime_server(), GetUtcTimeMs.into())
+ .expect("failed to request utc time in ms");
+ let lower = result[0];
+ let upper = result[1];
+ SystemTime { 0: Duration::from_millis((upper as u64) << 32 | lower as u64) }
+ }
+
+ pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+ self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_add(*other)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_sub(*other)?))
+ }
+}
diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs
index e9c727cbb..e18638f2a 100644
--- a/library/std/src/sys_common/mod.rs
+++ b/library/std/src/sys_common/mod.rs
@@ -44,8 +44,10 @@ cfg_if::cfg_if! {
cfg_if::cfg_if! {
if #[cfg(any(target_os = "l4re",
+ target_os = "uefi",
feature = "restricted-std",
all(target_family = "wasm", not(target_os = "emscripten")),
+ target_os = "xous",
all(target_vendor = "fortanix", target_env = "sgx")))] {
pub use crate::sys::net;
} else {
diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs
index 2976a9f57..4f5b17dea 100644
--- a/library/std/src/sys_common/net.rs
+++ b/library/std/src/sys_common/net.rs
@@ -32,6 +32,7 @@ cfg_if::cfg_if! {
cfg_if::cfg_if! {
if #[cfg(any(
target_os = "linux", target_os = "android",
+ target_os = "hurd",
target_os = "dragonfly", target_os = "freebsd",
target_os = "openbsd", target_os = "netbsd",
target_os = "haiku", target_os = "nto"))] {
diff --git a/library/std/src/sys_common/process.rs b/library/std/src/sys_common/process.rs
index 18883048d..4d295cf0f 100644
--- a/library/std/src/sys_common/process.rs
+++ b/library/std/src/sys_common/process.rs
@@ -80,6 +80,10 @@ impl CommandEnv {
self.vars.clear();
}
+ pub fn does_clear(&self) -> bool {
+ self.clear
+ }
+
pub fn have_changed_path(&self) -> bool {
self.saw_path || self.clear
}
diff --git a/library/std/src/sys_common/thread_info.rs b/library/std/src/sys_common/thread_info.rs
index 88d937a7d..8d51732e0 100644
--- a/library/std/src/sys_common/thread_info.rs
+++ b/library/std/src/sys_common/thread_info.rs
@@ -1,46 +1,51 @@
#![allow(dead_code)] // stack_guard isn't used right now on all platforms
-use crate::cell::RefCell;
+use crate::cell::OnceCell;
use crate::sys::thread::guard::Guard;
use crate::thread::Thread;
struct ThreadInfo {
- stack_guard: Option<Guard>,
- thread: Thread,
+ stack_guard: OnceCell<Guard>,
+ thread: OnceCell<Thread>,
}
-thread_local! { static THREAD_INFO: RefCell<Option<ThreadInfo>> = const { RefCell::new(None) } }
+thread_local! {
+ static THREAD_INFO: ThreadInfo = const { ThreadInfo {
+ stack_guard: OnceCell::new(),
+ thread: OnceCell::new()
+ } };
+}
impl ThreadInfo {
fn with<R, F>(f: F) -> Option<R>
where
- F: FnOnce(&mut ThreadInfo) -> R,
+ F: FnOnce(&Thread, &OnceCell<Guard>) -> R,
{
THREAD_INFO
.try_with(move |thread_info| {
- let mut thread_info = thread_info.borrow_mut();
- let thread_info = thread_info.get_or_insert_with(|| ThreadInfo {
- stack_guard: None,
- thread: Thread::new(None),
- });
- f(thread_info)
+ let thread = thread_info.thread.get_or_init(|| Thread::new(None));
+ f(thread, &thread_info.stack_guard)
})
.ok()
}
}
pub fn current_thread() -> Option<Thread> {
- ThreadInfo::with(|info| info.thread.clone())
+ ThreadInfo::with(|thread, _| thread.clone())
}
pub fn stack_guard() -> Option<Guard> {
- ThreadInfo::with(|info| info.stack_guard.clone()).and_then(|o| o)
+ ThreadInfo::with(|_, guard| guard.get().cloned()).flatten()
}
+/// Set new thread info, panicking if it has already been initialized
+#[allow(unreachable_code, unreachable_patterns)] // some platforms don't use stack_guard
pub fn set(stack_guard: Option<Guard>, thread: Thread) {
THREAD_INFO.with(move |thread_info| {
- let mut thread_info = thread_info.borrow_mut();
- rtassert!(thread_info.is_none());
- *thread_info = Some(ThreadInfo { stack_guard, thread });
+ rtassert!(thread_info.stack_guard.get().is_none() && thread_info.thread.get().is_none());
+ if let Some(guard) = stack_guard {
+ thread_info.stack_guard.set(guard).unwrap();
+ }
+ thread_info.thread.set(thread).unwrap();
});
}
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index e4581c2de..7b26068c2 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -178,7 +178,7 @@ use crate::sys_common::thread;
use crate::sys_common::thread_info;
use crate::sys_common::thread_parking::Parker;
use crate::sys_common::{AsInner, IntoInner};
-use crate::time::Duration;
+use crate::time::{Duration, Instant};
#[stable(feature = "scoped_threads", since = "1.63.0")]
mod scoped;
@@ -872,6 +872,86 @@ pub fn sleep(dur: Duration) {
imp::Thread::sleep(dur)
}
+/// Puts the current thread to sleep until the specified deadline has passed.
+///
+/// The thread may still be asleep after the deadline specified due to
+/// scheduling specifics or platform-dependent functionality. It will never
+/// wake before.
+///
+/// This function is blocking, and should not be used in `async` functions.
+///
+/// # Platform-specific behavior
+///
+/// This function uses [`sleep`] internally, see its platform-specific behaviour.
+///
+///
+/// # Examples
+///
+/// A simple game loop that limits the game to 60 frames per second.
+///
+/// ```no_run
+/// #![feature(thread_sleep_until)]
+/// # use std::time::{Duration, Instant};
+/// # use std::thread;
+/// #
+/// # fn update() {}
+/// # fn render() {}
+/// #
+/// let max_fps = 60.0;
+/// let frame_time = Duration::from_secs_f32(1.0/max_fps);
+/// let mut next_frame = Instant::now();
+/// loop {
+/// thread::sleep_until(next_frame);
+/// next_frame += frame_time;
+/// update();
+/// render();
+/// }
+/// ```
+///
+/// A slow api we must not call too fast and which takes a few
+/// tries before succeeding. By using `sleep_until` the time the
+/// api call takes does not influence when we retry or when we give up
+///
+/// ```no_run
+/// #![feature(thread_sleep_until)]
+/// # use std::time::{Duration, Instant};
+/// # use std::thread;
+/// #
+/// # enum Status {
+/// # Ready(usize),
+/// # Waiting,
+/// # }
+/// # fn slow_web_api_call() -> Status { Status::Ready(42) }
+/// #
+/// # const MAX_DURATION: Duration = Duration::from_secs(10);
+/// #
+/// # fn try_api_call() -> Result<usize, ()> {
+/// let deadline = Instant::now() + MAX_DURATION;
+/// let delay = Duration::from_millis(250);
+/// let mut next_attempt = Instant::now();
+/// loop {
+/// if Instant::now() > deadline {
+/// break Err(());
+/// }
+/// if let Status::Ready(data) = slow_web_api_call() {
+/// break Ok(data);
+/// }
+///
+/// next_attempt = deadline.min(next_attempt + delay);
+/// thread::sleep_until(next_attempt);
+/// }
+/// # }
+/// # let _data = try_api_call();
+/// ```
+#[unstable(feature = "thread_sleep_until", issue = "113752")]
+pub fn sleep_until(deadline: Instant) {
+ let now = Instant::now();
+
+ if let Some(delay) = deadline.checked_duration_since(now) {
+ sleep(delay);
+ }
+}
+
/// Used to ensure that `park` and `park_timeout` do not unwind, as that can
/// cause undefined behaviour if not handled correctly (see #102398 for context).
struct PanicGuard;
diff --git a/library/std/src/time.rs b/library/std/src/time.rs
index 00e2857a1..005d8c767 100644
--- a/library/std/src/time.rs
+++ b/library/std/src/time.rs
@@ -58,6 +58,8 @@ pub use core::time::TryFromFloatSecsError;
/// some seconds may be longer than others). An instant may jump forwards or
/// experience time dilation (slow down or speed up), but it will never go
/// backwards.
+/// As part of this non-guarantee it is also not specified whether system suspends count as
+/// elapsed time or not. The behavior varies across platforms and rust versions.
///
/// Instants are opaque types that can only be compared to one another. There is
/// no method to get "the number of seconds" from an instant. Instead, it only
@@ -176,6 +178,14 @@ pub struct Instant(time::Instant);
/// The size of a `SystemTime` struct may vary depending on the target operating
/// system.
///
+/// A `SystemTime` does not count leap seconds.
+/// `SystemTime::now()`'s behaviour around a leap second
+/// is the same as the operating system's wall clock.
+/// The precise behaviour near a leap second
+/// (e.g. whether the clock appears to run slow or fast, or stop, or jump)
+/// depends on platform and configuration,
+/// so should not be relied on.
+///
/// Example:
///
/// ```no_run
@@ -461,6 +471,9 @@ impl fmt::Debug for Instant {
impl SystemTime {
/// An anchor in time which can be used to create new `SystemTime` instances or
/// learn about where in time a `SystemTime` lies.
+ //
+ // NOTE! this documentation is duplicated, here and in std::time::UNIX_EPOCH.
+ // The two copies are not quite identical, because of the difference in naming.
///
/// This constant is defined to be "1970-01-01 00:00:00 UTC" on all systems with
/// respect to the system clock. Using `duration_since` on an existing
@@ -468,6 +481,11 @@ impl SystemTime {
/// measurement lies, and using `UNIX_EPOCH + duration` can be used to create a
/// `SystemTime` instance to represent another fixed point in time.
///
+ /// `duration_since(UNIX_EPOCH).unwrap().as_secs()` returns
+ /// the number of non-leap seconds since the start of 1970 UTC.
+ /// This is a POSIX `time_t` (as a `u64`),
+ /// and is the same time representation as used in many Internet protocols.
+ ///
/// # Examples
///
/// ```no_run
@@ -617,6 +635,9 @@ impl fmt::Debug for SystemTime {
/// An anchor in time which can be used to create new `SystemTime` instances or
/// learn about where in time a `SystemTime` lies.
+//
+// NOTE! this documentation is duplicated, here and in SystemTime::UNIX_EPOCH.
+// The two copies are not quite identical, because of the difference in naming.
///
/// This constant is defined to be "1970-01-01 00:00:00 UTC" on all systems with
/// respect to the system clock. Using `duration_since` on an existing
@@ -624,6 +645,11 @@ impl fmt::Debug for SystemTime {
/// measurement lies, and using `UNIX_EPOCH + duration` can be used to create a
/// [`SystemTime`] instance to represent another fixed point in time.
///
+/// `duration_since(UNIX_EPOCH).unwrap().as_secs()` returns
+/// the number of non-leap seconds since the start of 1970 UTC.
+/// This is a POSIX `time_t` (as a `u64`),
+/// and is the same time representation as used in many Internet protocols.
+///
/// # Examples
///
/// ```no_run
diff --git a/library/std/tests/env.rs b/library/std/tests/env.rs
index 96b4f372b..a1ca85c21 100644
--- a/library/std/tests/env.rs
+++ b/library/std/tests/env.rs
@@ -5,6 +5,7 @@ use rand::distributions::{Alphanumeric, DistString};
mod common;
use common::test_rng;
+use std::thread;
#[track_caller]
fn make_rand_name() -> OsString {
@@ -140,3 +141,22 @@ fn env_home_dir() {
}
}
}
+
+#[test] // miri shouldn't detect any data race in this fn
+#[cfg_attr(any(not(miri), target_os = "emscripten"), ignore)]
+fn test_env_get_set_multithreaded() {
+ let getter = thread::spawn(|| {
+ for _ in 0..100 {
+ let _ = var_os("foo");
+ }
+ });
+
+ let setter = thread::spawn(|| {
+ for _ in 0..100 {
+ set_var("foo", "bar");
+ }
+ });
+
+ let _ = getter.join();
+ let _ = setter.join();
+}
diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml
index 2de81e5a2..014a9aca0 100644
--- a/library/stdarch/.github/workflows/main.yml
+++ b/library/stdarch/.github/workflows/main.yml
@@ -74,13 +74,15 @@ jobs:
- aarch64-unknown-linux-gnu
- riscv64gc-unknown-linux-gnu
- powerpc64le-unknown-linux-gnu
- - mips-unknown-linux-gnu
- - mips64-unknown-linux-gnuabi64
- - mips64el-unknown-linux-gnuabi64
+ # MIPS targets disabled since they are dropped to tier 3.
+ # See https://github.com/rust-lang/compiler-team/issues/648
+ #- mips-unknown-linux-gnu
+ #- mips64-unknown-linux-gnuabi64
+ #- mips64el-unknown-linux-gnuabi64
+ #- mipsel-unknown-linux-musl
- s390x-unknown-linux-gnu
- wasm32-wasi
- i586-unknown-linux-gnu
- - mipsel-unknown-linux-musl
- nvptx64-nvidia-cuda
- thumbv6m-none-eabi
- thumbv7m-none-eabi
@@ -114,15 +116,20 @@ jobs:
os: ubuntu-latest
- target: armv7-unknown-linux-gnueabihf
os: ubuntu-latest
- - target: mips-unknown-linux-gnu
- os: ubuntu-latest
- norun: true
- - target: mips64-unknown-linux-gnuabi64
- os: ubuntu-latest
- norun: true
- - target: mips64el-unknown-linux-gnuabi64
- os: ubuntu-latest
- norun: true
+ # MIPS targets disabled since they are dropped to tier 3.
+ # See https://github.com/rust-lang/compiler-team/issues/648
+ #- target: mips-unknown-linux-gnu
+ # os: ubuntu-latest
+ # norun: true
+ #- target: mips64-unknown-linux-gnuabi64
+ # os: ubuntu-latest
+ # norun: true
+ #- target: mips64el-unknown-linux-gnuabi64
+ # os: ubuntu-latest
+ # norun: true
+ #- target: mipsel-unknown-linux-musl
+ # os: ubuntu-latest
+ # norun: 1
- target: powerpc64le-unknown-linux-gnu
os: ubuntu-latest
disable_assert_instr: true
@@ -143,9 +150,6 @@ jobs:
os: windows-latest
- target: i586-unknown-linux-gnu
os: ubuntu-latest
- - target: mipsel-unknown-linux-musl
- os: ubuntu-latest
- norun: 1
- target: nvptx64-nvidia-cuda
os: ubuntu-latest
- target: thumbv6m-none-eabi
diff --git a/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
index b9b3c682e..7ea795cac 100644
--- a/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
+++ b/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
@@ -1,10 +1,9 @@
-FROM ubuntu:22.04
+FROM ubuntu:23.04
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc libc6-dev qemu-user ca-certificates \
- gcc-riscv64-linux-gnu libc6-dev-riscv64-cross \
- qemu-user
+ gcc-riscv64-linux-gnu libc6-dev-riscv64-cross
ENV CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_LINKER=riscv64-linux-gnu-gcc \
- CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_RUNNER="qemu-riscv64 -L /usr/riscv64-linux-gnu" \
+ CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_RUNNER="qemu-riscv64 -L /usr/riscv64-linux-gnu -cpu rv64,zk=true,zbb=true,zbc=true" \
OBJDUMP=riscv64-linux-gnu-objdump
diff --git a/library/stdarch/ci/dox.sh b/library/stdarch/ci/dox.sh
index 3e507b456..cc207cb35 100755
--- a/library/stdarch/ci/dox.sh
+++ b/library/stdarch/ci/dox.sh
@@ -45,6 +45,8 @@ dox arm armv7-unknown-linux-gnueabihf
dox aarch64 aarch64-unknown-linux-gnu
dox powerpc powerpc-unknown-linux-gnu
dox powerpc64le powerpc64le-unknown-linux-gnu
-dox mips mips-unknown-linux-gnu
-dox mips64 mips64-unknown-linux-gnuabi64
+# MIPS targets disabled since they are dropped to tier 3.
+# See https://github.com/rust-lang/compiler-team/issues/648
+#dox mips mips-unknown-linux-gnu
+#dox mips64 mips64-unknown-linux-gnuabi64
dox wasm32 wasm32-unknown-unknown
diff --git a/library/stdarch/ci/run-docker.sh b/library/stdarch/ci/run-docker.sh
index 32209d96c..59170439c 100755
--- a/library/stdarch/ci/run-docker.sh
+++ b/library/stdarch/ci/run-docker.sh
@@ -5,6 +5,11 @@
set -ex
+if [ $# -lt 1 ]; then
+ >&2 echo "Usage: $0 <TARGET>"
+ exit 1
+fi
+
run() {
target=$(echo "${1}" | sed 's/-emulated//')
echo "Building docker container for TARGET=${1}"
diff --git a/library/stdarch/ci/run.sh b/library/stdarch/ci/run.sh
index 1c8e219e6..7b2416fda 100755
--- a/library/stdarch/ci/run.sh
+++ b/library/stdarch/ci/run.sh
@@ -47,6 +47,7 @@ case ${TARGET} in
# Some of our test dependencies use the deprecated `gcc` crates which
# doesn't detect RISC-V compilers automatically, so do it manually here.
riscv64*)
+ export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+zk,+zbb,+zbc"
export TARGET_CC="riscv64-linux-gnu-gcc"
;;
esac
@@ -76,6 +77,11 @@ cargo_test() {
# qemu has an erratic behavior on those tests
powerpc64*)
cmd="$cmd --skip test_vec_lde_u16 --skip test_vec_lde_u32 --skip test_vec_expte"
+ ;;
+ # Miscompilation: https://github.com/rust-lang/rust/issues/112460
+ arm*)
+ cmd="$cmd --skip vld2q_dup_f32"
+ ;;
esac
if [ "$SKIP_TESTS" != "" ]; then
diff --git a/library/stdarch/crates/core_arch/src/aarch64/armclang.rs b/library/stdarch/crates/core_arch/src/aarch64/armclang.rs
deleted file mode 100644
index 9a608702a..000000000
--- a/library/stdarch/crates/core_arch/src/aarch64/armclang.rs
+++ /dev/null
@@ -1,23 +0,0 @@
-//! ARM compiler specific intrinsics
-//!
-//! # References
-//!
-//! - [ARM Compiler v 6.10 - armclang Reference Guide][arm_comp_ref]
-//!
-//! [arm_comp_ref]: https://developer.arm.com/docs/100067/0610
-
-#[cfg(test)]
-use stdarch_test::assert_instr;
-
-/// Inserts a breakpoint instruction.
-///
-/// `VAL` is a compile-time constant integer in range `[0, 65535]`.
-///
-/// The breakpoint instruction inserted is `BRK` on A64.
-#[cfg_attr(test, assert_instr(brk, VAL = 0))]
-#[inline(always)]
-#[rustc_legacy_const_generics(0)]
-pub unsafe fn __breakpoint<const VAL: i32>() {
- static_assert_uimm_bits!(VAL, 16);
- crate::arch::asm!("brk {}", const VAL);
-}
diff --git a/library/stdarch/crates/core_arch/src/aarch64/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/mod.rs
index 0411fc106..c31989dd3 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/mod.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/mod.rs
@@ -6,9 +6,6 @@
//! [arm_ref]: http://infocenter.arm.com/help/topic/com.arm.doc.ihi0073a/IHI0073A_arm_neon_intrinsics_ref.pdf
//! [arm_dat]: https://developer.arm.com/technologies/neon/intrinsics
-mod v8;
-pub use self::v8::*;
-
mod neon;
pub use self::neon::*;
@@ -23,19 +20,8 @@ pub use self::prefetch::*;
pub use super::arm_shared::*;
-mod armclang;
-
-pub use self::armclang::*;
-
#[cfg(test)]
use stdarch_test::assert_instr;
-/// Generates the trap instruction `BRK 1`
-#[cfg_attr(test, assert_instr(brk))]
-#[inline]
-pub unsafe fn brk() -> ! {
- crate::intrinsics::abort()
-}
-
#[cfg(test)]
pub(crate) mod test_support;
diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
index da7fdf8b1..20dec6d80 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
@@ -926,7 +926,7 @@ pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
simd_gt(a, b)
}
-/// Compare unsigned highe
+/// Compare unsigned greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)
#[inline]
@@ -937,7 +937,7 @@ pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
simd_gt(a, b)
}
-/// Compare unsigned highe
+/// Compare unsigned greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)
#[inline]
@@ -8353,6 +8353,62 @@ pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
vst4q_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
+/// Dot product index form with unsigned and signed integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vusdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t {
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int32x4_t = transmute(c);
+ let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
+ vusdot_s32(a, b, transmute(c))
+}
+
+/// Dot product index form with unsigned and signed integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vusdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int32x4_t = transmute(c);
+ let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ vusdotq_s32(a, b, transmute(c))
+}
+
+/// Dot product index form with signed and unsigned integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vsudot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t {
+ static_assert_uimm_bits!(LANE, 2);
+ let c: uint32x4_t = transmute(c);
+ let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
+ vusdot_s32(a, transmute(c), b)
+}
+
+/// Dot product index form with signed and unsigned integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vsudotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t {
+ static_assert_uimm_bits!(LANE, 2);
+ let c: uint32x4_t = transmute(c);
+ let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ vusdotq_s32(a, transmute(c), b)
+}
+
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)
@@ -10501,80 +10557,7 @@ pub unsafe fn vcmlaq_rot270_laneq_f32<const LANE: i32>(a: float32x4_t, b: float3
vcmlaq_rot270_f32(a, b, c)
}
-/// Dot product arithmetic
-///
-/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)
-#[inline]
-#[target_feature(enable = "neon,dotprod")]
-#[cfg_attr(test, assert_instr(sdot))]
-pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t {
- #[allow(improper_ctypes)]
- extern "unadjusted" {
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8")]
- fn vdot_s32_(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t;
- }
- vdot_s32_(a, b, c)
-}
-
-/// Dot product arithmetic
-///
-/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)
-#[inline]
-#[target_feature(enable = "neon,dotprod")]
-#[cfg_attr(test, assert_instr(sdot))]
-pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
- #[allow(improper_ctypes)]
- extern "unadjusted" {
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8")]
- fn vdotq_s32_(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t;
- }
- vdotq_s32_(a, b, c)
-}
-
-/// Dot product arithmetic
-///
-/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)
-#[inline]
-#[target_feature(enable = "neon,dotprod")]
-#[cfg_attr(test, assert_instr(udot))]
-pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t {
- #[allow(improper_ctypes)]
- extern "unadjusted" {
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.udot.v2i32.v8i8")]
- fn vdot_u32_(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t;
- }
- vdot_u32_(a, b, c)
-}
-
-/// Dot product arithmetic
-///
-/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)
-#[inline]
-#[target_feature(enable = "neon,dotprod")]
-#[cfg_attr(test, assert_instr(udot))]
-pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
- #[allow(improper_ctypes)]
- extern "unadjusted" {
- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.udot.v4i32.v16i8")]
- fn vdotq_u32_(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t;
- }
- vdotq_u32_(a, b, c)
-}
-
-/// Dot product arithmetic
-///
-/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)
-#[inline]
-#[target_feature(enable = "neon,dotprod")]
-#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
-#[rustc_legacy_const_generics(3)]
-pub unsafe fn vdot_lane_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t {
- static_assert_uimm_bits!(LANE, 1);
- let c: int8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
- vdot_s32(a, b, c)
-}
-
-/// Dot product arithmetic
+/// Dot product arithmetic (indexed)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)
#[inline]
@@ -10583,24 +10566,12 @@ pub unsafe fn vdot_lane_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
static_assert_uimm_bits!(LANE, 2);
- let c: int8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
- vdot_s32(a, b, c)
-}
-
-/// Dot product arithmetic
-///
-/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)
-#[inline]
-#[target_feature(enable = "neon,dotprod")]
-#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
-#[rustc_legacy_const_generics(3)]
-pub unsafe fn vdotq_lane_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t {
- static_assert_uimm_bits!(LANE, 1);
- let c: int8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
- vdotq_s32(a, b, c)
+ let c: int32x4_t = transmute(c);
+ let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
+ vdot_s32(a, b, transmute(c))
}
-/// Dot product arithmetic
+/// Dot product arithmetic (indexed)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)
#[inline]
@@ -10609,24 +10580,12 @@ pub unsafe fn vdotq_lane_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
static_assert_uimm_bits!(LANE, 2);
- let c: int8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
- vdotq_s32(a, b, c)
-}
-
-/// Dot product arithmetic
-///
-/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)
-#[inline]
-#[target_feature(enable = "neon,dotprod")]
-#[cfg_attr(test, assert_instr(udot, LANE = 0))]
-#[rustc_legacy_const_generics(3)]
-pub unsafe fn vdot_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t {
- static_assert_uimm_bits!(LANE, 1);
- let c: uint8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
- vdot_u32(a, b, c)
+ let c: int32x4_t = transmute(c);
+ let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ vdotq_s32(a, b, transmute(c))
}
-/// Dot product arithmetic
+/// Dot product arithmetic (indexed)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)
#[inline]
@@ -10635,24 +10594,12 @@ pub unsafe fn vdot_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uin
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
static_assert_uimm_bits!(LANE, 2);
- let c: uint8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
- vdot_u32(a, b, c)
-}
-
-/// Dot product arithmetic
-///
-/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)
-#[inline]
-#[target_feature(enable = "neon,dotprod")]
-#[cfg_attr(test, assert_instr(udot, LANE = 0))]
-#[rustc_legacy_const_generics(3)]
-pub unsafe fn vdotq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t {
- static_assert_uimm_bits!(LANE, 1);
- let c: uint8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
- vdotq_u32(a, b, c)
+ let c: uint32x4_t = transmute(c);
+ let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
+ vdot_u32(a, b, transmute(c))
}
-/// Dot product arithmetic
+/// Dot product arithmetic (indexed)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)
#[inline]
@@ -10661,8 +10608,9 @@ pub unsafe fn vdotq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: u
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
static_assert_uimm_bits!(LANE, 2);
- let c: uint8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
- vdotq_u32(a, b, c)
+ let c: uint32x4_t = transmute(c);
+ let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ vdotq_u32(a, b, transmute(c))
}
/// Maximum (vector)
@@ -14864,7 +14812,7 @@ pub unsafe fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> u
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(srsra, N = 2))]
+#[cfg_attr(test, assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
@@ -14873,12 +14821,12 @@ pub unsafe fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
a.wrapping_add(b)
}
-/// Ungisned rounding shift right and accumulate.
+/// Unsigned rounding shift right and accumulate.
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)
#[inline]
#[target_feature(enable = "neon")]
-#[cfg_attr(test, assert_instr(ursra, N = 2))]
+#[cfg_attr(test, assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
@@ -15349,6 +15297,36 @@ pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
vrnd32xq_f32_(a)
}
+/// Floating-point round to 32-bit integer, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)
+#[inline]
+#[target_feature(enable = "neon,frintts")]
+#[cfg_attr(test, assert_instr(frint32x))]
+pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint32x.v2f64")]
+ fn vrnd32xq_f64_(a: float64x2_t) -> float64x2_t;
+ }
+ vrnd32xq_f64_(a)
+}
+
+/// Floating-point round to 32-bit integer, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)
+#[inline]
+#[target_feature(enable = "neon,frintts")]
+#[cfg_attr(test, assert_instr(frint32x))]
+pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.frint32x.f64")]
+ fn vrnd32x_f64_(a: f64) -> f64;
+ }
+ transmute(vrnd32x_f64_(simd_extract(a, 0)))
+}
+
/// Floating-point round to 32-bit integer toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)
@@ -15379,6 +15357,36 @@ pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
vrnd32zq_f32_(a)
}
+/// Floating-point round to 32-bit integer toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)
+#[inline]
+#[target_feature(enable = "neon,frintts")]
+#[cfg_attr(test, assert_instr(frint32z))]
+pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint32z.v2f64")]
+ fn vrnd32zq_f64_(a: float64x2_t) -> float64x2_t;
+ }
+ vrnd32zq_f64_(a)
+}
+
+/// Floating-point round to 32-bit integer toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)
+#[inline]
+#[target_feature(enable = "neon,frintts")]
+#[cfg_attr(test, assert_instr(frint32z))]
+pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.frint32z.f64")]
+ fn vrnd32z_f64_(a: f64) -> f64;
+ }
+ transmute(vrnd32z_f64_(simd_extract(a, 0)))
+}
+
/// Floating-point round to 64-bit integer, using current rounding mode
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)
@@ -15409,6 +15417,36 @@ pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
vrnd64xq_f32_(a)
}
+/// Floating-point round to 64-bit integer, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)
+#[inline]
+#[target_feature(enable = "neon,frintts")]
+#[cfg_attr(test, assert_instr(frint64x))]
+pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint64x.v2f64")]
+ fn vrnd64xq_f64_(a: float64x2_t) -> float64x2_t;
+ }
+ vrnd64xq_f64_(a)
+}
+
+/// Floating-point round to 64-bit integer, using current rounding mode
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)
+#[inline]
+#[target_feature(enable = "neon,frintts")]
+#[cfg_attr(test, assert_instr(frint64x))]
+pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.frint64x.f64")]
+ fn vrnd64x_f64_(a: f64) -> f64;
+ }
+ transmute(vrnd64x_f64_(simd_extract(a, 0)))
+}
+
/// Floating-point round to 64-bit integer toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)
@@ -15439,6 +15477,36 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
vrnd64zq_f32_(a)
}
+/// Floating-point round to 64-bit integer toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)
+#[inline]
+#[target_feature(enable = "neon,frintts")]
+#[cfg_attr(test, assert_instr(frint64z))]
+pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frint64z.v2f64")]
+ fn vrnd64zq_f64_(a: float64x2_t) -> float64x2_t;
+ }
+ vrnd64zq_f64_(a)
+}
+
+/// Floating-point round to 64-bit integer toward zero
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)
+#[inline]
+#[target_feature(enable = "neon,frintts")]
+#[cfg_attr(test, assert_instr(frint64z))]
+pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.frint64z.f64")]
+ fn vrnd64z_f64_(a: f64) -> f64;
+ }
+ transmute(vrnd64z_f64_(simd_extract(a, 0)))
+}
+
/// Transpose vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)
@@ -22184,6 +22252,46 @@ mod test {
assert_eq!(r, e);
}
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vusdot_laneq_s32() {
+ let a: i32x2 = i32x2::new(1000, -4200);
+ let b: u8x8 = u8x8::new(100, 110, 120, 130, 140, 150, 160, 170);
+ let c: i8x16 = i8x16::new(4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
+ let e: i32x2 = i32x2::new(-3420, -10140);
+ let r: i32x2 = transmute(vusdot_laneq_s32::<3>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vusdotq_laneq_s32() {
+ let a: i32x4 = i32x4::new(1000, -4200, -1000, 2000);
+ let b: u8x16 = u8x16::new(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250);
+ let c: i8x16 = i8x16::new(4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
+ let e: i32x4 = i32x4::new(-3420, -10140, -8460, -6980);
+ let r: i32x4 = transmute(vusdotq_laneq_s32::<3>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vsudot_laneq_s32() {
+ let a: i32x2 = i32x2::new(-2000, 4200);
+ let b: i8x8 = i8x8::new(4, 3, 2, 1, 0, -1, -2, -3);
+ let c: u8x16 = u8x16::new(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250);
+ let e: i32x2 = i32x2::new(300, 2740);
+ let r: i32x2 = transmute(vsudot_laneq_s32::<3>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vsudotq_laneq_s32() {
+ let a: i32x4 = i32x4::new(-2000, 4200, -1000, 2000);
+ let b: i8x16 = i8x16::new(4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
+ let c: u8x16 = u8x16::new(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250);
+ let e: i32x4 = i32x4::new(300, 2740, -6220, -6980);
+ let r: i32x4 = transmute(vsudotq_laneq_s32::<3>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
#[simd_test(enable = "neon")]
unsafe fn test_vmul_f64() {
let a: f64 = 1.0;
@@ -23664,121 +23772,41 @@ mod test {
}
#[simd_test(enable = "neon,dotprod")]
- unsafe fn test_vdot_s32() {
- let a: i32x2 = i32x2::new(1, 2);
- let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let e: i32x2 = i32x2::new(31, 176);
- let r: i32x2 = transmute(vdot_s32(transmute(a), transmute(b), transmute(c)));
- assert_eq!(r, e);
- }
-
- #[simd_test(enable = "neon,dotprod")]
- unsafe fn test_vdotq_s32() {
- let a: i32x4 = i32x4::new(1, 2, 1, 2);
- let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let e: i32x4 = i32x4::new(31, 176, 31, 176);
- let r: i32x4 = transmute(vdotq_s32(transmute(a), transmute(b), transmute(c)));
- assert_eq!(r, e);
- }
-
- #[simd_test(enable = "neon,dotprod")]
- unsafe fn test_vdot_u32() {
- let a: u32x2 = u32x2::new(1, 2);
- let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let e: u32x2 = u32x2::new(31, 176);
- let r: u32x2 = transmute(vdot_u32(transmute(a), transmute(b), transmute(c)));
- assert_eq!(r, e);
- }
-
- #[simd_test(enable = "neon,dotprod")]
- unsafe fn test_vdotq_u32() {
- let a: u32x4 = u32x4::new(1, 2, 1, 2);
- let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let e: u32x4 = u32x4::new(31, 176, 31, 176);
- let r: u32x4 = transmute(vdotq_u32(transmute(a), transmute(b), transmute(c)));
- assert_eq!(r, e);
- }
-
- #[simd_test(enable = "neon,dotprod")]
- unsafe fn test_vdot_lane_s32() {
- let a: i32x2 = i32x2::new(1, 2);
- let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let e: i32x2 = i32x2::new(31, 72);
- let r: i32x2 = transmute(vdot_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
- assert_eq!(r, e);
- }
-
- #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdot_laneq_s32() {
let a: i32x2 = i32x2::new(1, 2);
- let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let b: i8x8 = i8x8::new(-1, 2, 3, 4, 5, 6, 7, 8);
let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let e: i32x2 = i32x2::new(31, 72);
+ let e: i32x2 = i32x2::new(29, 72);
let r: i32x2 = transmute(vdot_laneq_s32::<0>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,dotprod")]
- unsafe fn test_vdotq_lane_s32() {
- let a: i32x4 = i32x4::new(1, 2, 1, 2);
- let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let e: i32x4 = i32x4::new(31, 72, 31, 72);
- let r: i32x4 = transmute(vdotq_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
- assert_eq!(r, e);
- }
-
- #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdotq_laneq_s32() {
let a: i32x4 = i32x4::new(1, 2, 1, 2);
- let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
+ let b: i8x16 = i8x16::new(-1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let e: i32x4 = i32x4::new(31, 72, 31, 72);
+ let e: i32x4 = i32x4::new(29, 72, 31, 72);
let r: i32x4 = transmute(vdotq_laneq_s32::<0>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,dotprod")]
- unsafe fn test_vdot_lane_u32() {
- let a: u32x2 = u32x2::new(1, 2);
- let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let e: u32x2 = u32x2::new(31, 72);
- let r: u32x2 = transmute(vdot_lane_u32::<0>(transmute(a), transmute(b), transmute(c)));
- assert_eq!(r, e);
- }
-
- #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdot_laneq_u32() {
let a: u32x2 = u32x2::new(1, 2);
- let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let b: u8x8 = u8x8::new(255, 2, 3, 4, 5, 6, 7, 8);
let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let e: u32x2 = u32x2::new(31, 72);
+ let e: u32x2 = u32x2::new(285, 72);
let r: u32x2 = transmute(vdot_laneq_u32::<0>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,dotprod")]
- unsafe fn test_vdotq_lane_u32() {
- let a: u32x4 = u32x4::new(1, 2, 1, 2);
- let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
- let e: u32x4 = u32x4::new(31, 72, 31, 72);
- let r: u32x4 = transmute(vdotq_lane_u32::<0>(transmute(a), transmute(b), transmute(c)));
- assert_eq!(r, e);
- }
-
- #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdotq_laneq_u32() {
let a: u32x4 = u32x4::new(1, 2, 1, 2);
- let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
+ let b: u8x16 = u8x16::new(255, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- let e: u32x4 = u32x4::new(31, 72, 31, 72);
+ let e: u32x4 = u32x4::new(285, 72, 31, 72);
let r: u32x4 = transmute(vdotq_laneq_u32::<0>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
@@ -26888,68 +26916,332 @@ mod test {
#[simd_test(enable = "neon,frintts")]
unsafe fn test_vrnd32x_f32() {
- let a: f32x2 = f32x2::new(1.1, 1.9);
- let e: f32x2 = f32x2::new(1.0, 2.0);
+ let a: f32x2 = f32x2::new(-1.5, 2.9);
+ let e: f32x2 = f32x2::new(-2.0, 3.0);
let r: f32x2 = transmute(vrnd32x_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,frintts")]
unsafe fn test_vrnd32xq_f32() {
- let a: f32x4 = f32x4::new(1.1, 1.9, -1.7, -2.3);
- let e: f32x4 = f32x4::new(1.0, 2.0, -2.0, -2.0);
+ let a: f32x4 = f32x4::new(-1.5, 2.9, 1.5, -2.5);
+ let e: f32x4 = f32x4::new(-2.0, 3.0, 2.0, -2.0);
let r: f32x4 = transmute(vrnd32xq_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,frintts")]
+ unsafe fn test_vrnd32xq_f64() {
+ let a: f64x2 = f64x2::new(-1.5, 2.9);
+ let e: f64x2 = f64x2::new(-2.0, 3.0);
+ let r: f64x2 = transmute(vrnd32xq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(1.5, -2.5);
+ let e: f64x2 = f64x2::new(2.0, -2.0);
+ let r: f64x2 = transmute(vrnd32xq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(2147483647.499999762, 2147483647.5);
+ let e: f64x2 = f64x2::new(2147483647.0, -2147483648.0);
+ let r: f64x2 = transmute(vrnd32xq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(-2147483647.499999762, -2147483648.500000477);
+ let e: f64x2 = f64x2::new(-2147483647.0, -2147483648.0);
+ let r: f64x2 = transmute(vrnd32xq_f64(transmute(a)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,frintts")]
+ unsafe fn test_vrnd32x_f64() {
+ let a: f64 = -1.5;
+ let e: f64 = -2.0;
+ let r: f64 = transmute(vrnd32x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 1.5;
+ let e: f64 = 2.0;
+ let r: f64 = transmute(vrnd32x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 2147483647.499999762;
+ let e: f64 = 2147483647.0;
+ let r: f64 = transmute(vrnd32x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -2147483647.499999762;
+ let e: f64 = -2147483647.0;
+ let r: f64 = transmute(vrnd32x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 2.9;
+ let e: f64 = 3.0;
+ let r: f64 = transmute(vrnd32x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -2.5;
+ let e: f64 = -2.0;
+ let r: f64 = transmute(vrnd32x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 2147483647.5;
+ let e: f64 = -2147483648.0;
+ let r: f64 = transmute(vrnd32x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -2147483648.500000477;
+ let e: f64 = -2147483648.0;
+ let r: f64 = transmute(vrnd32x_f64(transmute(a)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,frintts")]
unsafe fn test_vrnd32z_f32() {
- let a: f32x2 = f32x2::new(1.1, 1.9);
- let e: f32x2 = f32x2::new(1.0, 1.0);
+ let a: f32x2 = f32x2::new(-1.5, 2.9);
+ let e: f32x2 = f32x2::new(-1.0, 2.0);
let r: f32x2 = transmute(vrnd32z_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,frintts")]
unsafe fn test_vrnd32zq_f32() {
- let a: f32x4 = f32x4::new(1.1, 1.9, -1.7, -2.3);
- let e: f32x4 = f32x4::new(1.0, 1.0, -1.0, -2.0);
+ let a: f32x4 = f32x4::new(-1.5, 2.9, 1.5, -2.5);
+ let e: f32x4 = f32x4::new(-1.0, 2.0, 1.0, -2.0);
let r: f32x4 = transmute(vrnd32zq_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,frintts")]
+ unsafe fn test_vrnd32zq_f64() {
+ let a: f64x2 = f64x2::new(-1.5, 2.9);
+ let e: f64x2 = f64x2::new(-1.0, 2.0);
+ let r: f64x2 = transmute(vrnd32zq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(1.5, -2.5);
+ let e: f64x2 = f64x2::new(1.0, -2.0);
+ let r: f64x2 = transmute(vrnd32zq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(2147483647.999999762, 2147483648.0);
+ let e: f64x2 = f64x2::new(2147483647.0, -2147483648.0);
+ let r: f64x2 = transmute(vrnd32zq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(-2147483647.999999762, -2147483649.0);
+ let e: f64x2 = f64x2::new(-2147483647.0, -2147483648.0);
+ let r: f64x2 = transmute(vrnd32zq_f64(transmute(a)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,frintts")]
+ unsafe fn test_vrnd32z_f64() {
+ let a: f64 = -1.5;
+ let e: f64 = -1.0;
+ let r: f64 = transmute(vrnd32z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 1.5;
+ let e: f64 = 1.0;
+ let r: f64 = transmute(vrnd32z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 2147483647.999999762;
+ let e: f64 = 2147483647.0;
+ let r: f64 = transmute(vrnd32z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -2147483647.999999762;
+ let e: f64 = -2147483647.0;
+ let r: f64 = transmute(vrnd32z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 2.9;
+ let e: f64 = 2.0;
+ let r: f64 = transmute(vrnd32z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -2.5;
+ let e: f64 = -2.0;
+ let r: f64 = transmute(vrnd32z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 2147483648.0;
+ let e: f64 = -2147483648.0;
+ let r: f64 = transmute(vrnd32z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -2147483649.0;
+ let e: f64 = -2147483648.0;
+ let r: f64 = transmute(vrnd32z_f64(transmute(a)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,frintts")]
unsafe fn test_vrnd64x_f32() {
- let a: f32x2 = f32x2::new(1.1, 1.9);
- let e: f32x2 = f32x2::new(1.0, 2.0);
+ let a: f32x2 = f32x2::new(-1.5, 2.9);
+ let e: f32x2 = f32x2::new(-2.0, 3.0);
let r: f32x2 = transmute(vrnd64x_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,frintts")]
unsafe fn test_vrnd64xq_f32() {
- let a: f32x4 = f32x4::new(1.1, 1.9, -1.7, -2.3);
- let e: f32x4 = f32x4::new(1.0, 2.0, -2.0, -2.0);
+ let a: f32x4 = f32x4::new(-1.5, 2.9, 1.5, -2.5);
+ let e: f32x4 = f32x4::new(-2.0, 3.0, 2.0, -2.0);
let r: f32x4 = transmute(vrnd64xq_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,frintts")]
+ unsafe fn test_vrnd64xq_f64() {
+ let a: f64x2 = f64x2::new(-1.5, 2.9);
+ let e: f64x2 = f64x2::new(-2.0, 3.0);
+ let r: f64x2 = transmute(vrnd64xq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(1.5, -2.5);
+ let e: f64x2 = f64x2::new(2.0, -2.0);
+ let r: f64x2 = transmute(vrnd64xq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(9223372036854774784.0, 9223372036854775808.0);
+ let e: f64x2 = f64x2::new(9223372036854774784.0, -9223372036854775808.0);
+ let r: f64x2 = transmute(vrnd64xq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(-9223372036854775808.0, -9223372036854777856.0);
+ let e: f64x2 = f64x2::new(-9223372036854775808.0, -9223372036854775808.0);
+ let r: f64x2 = transmute(vrnd64xq_f64(transmute(a)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,frintts")]
+ unsafe fn test_vrnd64x_f64() {
+ let a: f64 = -1.5;
+ let e: f64 = -2.0;
+ let r: f64 = transmute(vrnd64x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 1.5;
+ let e: f64 = 2.0;
+ let r: f64 = transmute(vrnd64x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 9223372036854774784.0;
+ let e: f64 = 9223372036854774784.0;
+ let r: f64 = transmute(vrnd64x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -9223372036854775808.0;
+ let e: f64 = -9223372036854775808.0;
+ let r: f64 = transmute(vrnd64x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 2.9;
+ let e: f64 = 3.0;
+ let r: f64 = transmute(vrnd64x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -2.5;
+ let e: f64 = -2.0;
+ let r: f64 = transmute(vrnd64x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 9223372036854775808.0;
+ let e: f64 = -9223372036854775808.0;
+ let r: f64 = transmute(vrnd64x_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -9223372036854777856.0;
+ let e: f64 = -9223372036854775808.0;
+ let r: f64 = transmute(vrnd64x_f64(transmute(a)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,frintts")]
unsafe fn test_vrnd64z_f32() {
- let a: f32x2 = f32x2::new(1.1, 1.9);
- let e: f32x2 = f32x2::new(1.0, 1.0);
+ let a: f32x2 = f32x2::new(-1.5, 2.9);
+ let e: f32x2 = f32x2::new(-1.0, 2.0);
let r: f32x2 = transmute(vrnd64z_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,frintts")]
unsafe fn test_vrnd64zq_f32() {
- let a: f32x4 = f32x4::new(1.1, 1.9, -1.7, -2.3);
- let e: f32x4 = f32x4::new(1.0, 1.0, -1.0, -2.0);
+ let a: f32x4 = f32x4::new(-1.5, 2.9, 1.5, -2.5);
+ let e: f32x4 = f32x4::new(-1.0, 2.0, 1.0, -2.0);
let r: f32x4 = transmute(vrnd64zq_f32(transmute(a)));
assert_eq!(r, e);
}
+ #[simd_test(enable = "neon,frintts")]
+ unsafe fn test_vrnd64zq_f64() {
+ let a: f64x2 = f64x2::new(-1.5, 2.9);
+ let e: f64x2 = f64x2::new(-1.0, 2.0);
+ let r: f64x2 = transmute(vrnd64zq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(1.5, -2.5);
+ let e: f64x2 = f64x2::new(1.0, -2.0);
+ let r: f64x2 = transmute(vrnd64zq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(9223372036854774784.0, 9223372036854775808.0);
+ let e: f64x2 = f64x2::new(9223372036854774784.0, -9223372036854775808.0);
+ let r: f64x2 = transmute(vrnd64zq_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64x2 = f64x2::new(-9223372036854775808.0, -9223372036854777856.0);
+ let e: f64x2 = f64x2::new(-9223372036854775808.0, -9223372036854775808.0);
+ let r: f64x2 = transmute(vrnd64zq_f64(transmute(a)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,frintts")]
+ unsafe fn test_vrnd64z_f64() {
+ let a: f64 = -1.5;
+ let e: f64 = -1.0;
+ let r: f64 = transmute(vrnd64z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 1.5;
+ let e: f64 = 1.0;
+ let r: f64 = transmute(vrnd64z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 9223372036854774784.0;
+ let e: f64 = 9223372036854774784.0;
+ let r: f64 = transmute(vrnd64z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -9223372036854775808.0;
+ let e: f64 = -9223372036854775808.0;
+ let r: f64 = transmute(vrnd64z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 2.9;
+ let e: f64 = 2.0;
+ let r: f64 = transmute(vrnd64z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -2.5;
+ let e: f64 = -2.0;
+ let r: f64 = transmute(vrnd64z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = 9223372036854775808.0;
+ let e: f64 = -9223372036854775808.0;
+ let r: f64 = transmute(vrnd64z_f64(transmute(a)));
+ assert_eq!(r, e);
+
+ let a: f64 = -9223372036854777856.0;
+ let e: f64 = -9223372036854775808.0;
+ let r: f64 = transmute(vrnd64z_f64(transmute(a)));
+ assert_eq!(r, e);
+ }
+
#[simd_test(enable = "neon")]
unsafe fn test_vtrn1_s8() {
let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
index 850657033..30fa21dd8 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
@@ -4127,11 +4127,11 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vpminq_s8() {
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let e = i8x16::new(-2, -4, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6);
let r: i8x16 = transmute(vpminq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
@@ -4157,11 +4157,11 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vpminq_u8() {
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let e = u8x16::new(1, 3, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6);
let r: u8x16 = transmute(vpminq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
@@ -4205,11 +4205,11 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vpmaxq_s8() {
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let e = i8x16::new(1, 3, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9);
let r: i8x16 = transmute(vpmaxq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
@@ -4235,11 +4235,11 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vpmaxq_u8() {
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
- #[cfg_attr(rustfmt, skip)]
+ #[rustfmt::skip]
let e = u8x16::new(2, 4, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9);
let r: u8x16 = transmute(vpmaxq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
diff --git a/library/stdarch/crates/core_arch/src/aarch64/tme.rs b/library/stdarch/crates/core_arch/src/aarch64/tme.rs
index 05df313e4..15f1b877d 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/tme.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/tme.rs
@@ -21,9 +21,9 @@ extern "unadjusted" {
#[link_name = "llvm.aarch64.tstart"]
fn aarch64_tstart() -> u64;
#[link_name = "llvm.aarch64.tcommit"]
- fn aarch64_tcommit() -> ();
+ fn aarch64_tcommit();
#[link_name = "llvm.aarch64.tcancel"]
- fn aarch64_tcancel(imm0: u64) -> ();
+ fn aarch64_tcancel(imm0: u64);
#[link_name = "llvm.aarch64.ttest"]
fn aarch64_ttest() -> u64;
}
diff --git a/library/stdarch/crates/core_arch/src/aarch64/v8.rs b/library/stdarch/crates/core_arch/src/aarch64/v8.rs
deleted file mode 100644
index 778721c68..000000000
--- a/library/stdarch/crates/core_arch/src/aarch64/v8.rs
+++ /dev/null
@@ -1,104 +0,0 @@
-//! ARMv8 intrinsics.
-//!
-//! The reference is [ARMv8-A Reference Manual][armv8].
-//!
-//! [armv8]: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.
-//! ddi0487a.k_10775/index.html
-
-#[cfg(test)]
-use stdarch_test::assert_instr;
-
-/// Reverse the order of the bytes.
-#[inline]
-#[cfg_attr(test, assert_instr(rev))]
-pub unsafe fn _rev_u64(x: u64) -> u64 {
- x.swap_bytes() as u64
-}
-
-/// Count Leading Zeros.
-#[inline]
-#[cfg_attr(test, assert_instr(clz))]
-pub unsafe fn _clz_u64(x: u64) -> u64 {
- x.leading_zeros() as u64
-}
-
-/// Reverse the bit order.
-#[inline]
-#[cfg_attr(test, assert_instr(rbit))]
-pub unsafe fn _rbit_u64(x: u64) -> u64 {
- crate::intrinsics::bitreverse(x)
-}
-
-/// Counts the leading most significant bits set.
-///
-/// When all bits of the operand are set it returns the size of the operand in
-/// bits.
-#[inline]
-#[cfg_attr(test, assert_instr(cls))]
-pub unsafe fn _cls_u32(x: u32) -> u32 {
- u32::leading_zeros((((((x as i32) >> 31) as u32) ^ x) << 1) | 1) as u32
-}
-
-/// Counts the leading most significant bits set.
-///
-/// When all bits of the operand are set it returns the size of the operand in
-/// bits.
-#[inline]
-#[cfg_attr(test, assert_instr(cls))]
-pub unsafe fn _cls_u64(x: u64) -> u64 {
- u64::leading_zeros((((((x as i64) >> 63) as u64) ^ x) << 1) | 1) as u64
-}
-
-#[cfg(test)]
-mod tests {
- use crate::core_arch::aarch64::v8;
-
- #[test]
- fn _rev_u64() {
- unsafe {
- assert_eq!(
- v8::_rev_u64(0b0000_0000_1111_1111_0000_0000_1111_1111_u64),
- 0b1111_1111_0000_0000_1111_1111_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_u64
- );
- }
- }
-
- #[test]
- fn _clz_u64() {
- unsafe {
- assert_eq!(v8::_clz_u64(0b0000_1010u64), 60u64);
- }
- }
-
- #[test]
- fn _rbit_u64() {
- unsafe {
- assert_eq!(
- v8::_rbit_u64(0b0000_0000_1111_1101_0000_0000_1111_1111_u64),
- 0b1111_1111_0000_0000_1011_1111_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_u64
- );
- }
- }
-
- #[test]
- fn _cls_u32() {
- unsafe {
- assert_eq!(
- v8::_cls_u32(0b1111_1111_1111_1111_0000_0000_1111_1111_u32),
- 15_u32
- );
- }
- }
-
- #[test]
- fn _cls_u64() {
- unsafe {
- assert_eq!(
- v8::_cls_u64(
- 0b1111_1111_1111_1111_0000_0000_1111_1111_0000_0000_0000_0000_0000_0000_0000_0000_u64
- ),
- 15_u64
- );
- }
- }
-}
diff --git a/library/stdarch/crates/core_arch/src/arm/armclang.rs b/library/stdarch/crates/core_arch/src/arm/armclang.rs
deleted file mode 100644
index e44ee2f4a..000000000
--- a/library/stdarch/crates/core_arch/src/arm/armclang.rs
+++ /dev/null
@@ -1,35 +0,0 @@
-//! ARM compiler specific intrinsics
-//!
-//! # References
-//!
-//! - [ARM Compiler v 6.10 - armclang Reference Guide][arm_comp_ref]
-//!
-//! [arm_comp_ref]: https://developer.arm.com/docs/100067/0610
-
-#[cfg(test)]
-use stdarch_test::assert_instr;
-
-/// Inserts a breakpoint instruction.
-///
-/// `VAL` is a compile-time constant integer in range `[0, 255]`.
-///
-/// The breakpoint instruction inserted is `BKPT` on A32/T32.
-///
-/// # Note
-///
-/// [ARM's documentation][arm_docs] defines that `__breakpoint` accepts the
-/// following values for `VAL`:
-///
-/// - `0...65535` when compiling as A32,
-/// - `0...255` when compiling as T32.
-///
-/// The current implementation only accepts values in range `[0, 255]`.
-///
-/// [arm_docs]: https://developer.arm.com/docs/100067/latest/compiler-specific-intrinsics/__breakpoint-intrinsic
-#[cfg_attr(test, assert_instr(bkpt, VAL = 0))]
-#[inline(always)]
-#[rustc_legacy_const_generics(0)]
-pub unsafe fn __breakpoint<const VAL: i32>() {
- static_assert_uimm_bits!(VAL, 8);
- crate::arch::asm!("bkpt #{}", const VAL);
-}
diff --git a/library/stdarch/crates/core_arch/src/arm/ex.rs b/library/stdarch/crates/core_arch/src/arm/ex.rs
deleted file mode 100644
index 75f378642..000000000
--- a/library/stdarch/crates/core_arch/src/arm/ex.rs
+++ /dev/null
@@ -1,125 +0,0 @@
-// Reference: Section 5.4.4 "LDREX / STREX" of ACLE
-
-/// Removes the exclusive lock created by LDREX
-// Supported: v6, v6K, v7-M, v7-A, v7-R
-// Not supported: v5, v6-M
-// NOTE: there's no dedicated CLREX instruction in v6 (<v6k); to clear the exclusive monitor users
-// have to do a dummy STREX operation
-#[cfg(any(
- all(target_feature = "v6k", not(target_feature = "mclass")), // excludes v6-M
- all(target_feature = "v7", target_feature = "mclass"), // v7-M
- doc
-))]
-pub unsafe fn __clrex() {
- extern "unadjusted" {
- #[link_name = "llvm.arm.clrex"]
- fn clrex();
- }
-
- clrex()
-}
-
-/// Executes an exclusive LDR instruction for 8 bit value.
-// Supported: v6K, v7-M, v7-A, v7-R
-// Not supported: v5, v6, v6-M
-#[cfg(any(
- target_feature = "v6k", // includes v7-M but excludes v6-M
- doc
-))]
-pub unsafe fn __ldrexb(p: *const u8) -> u8 {
- extern "unadjusted" {
- #[link_name = "llvm.arm.ldrex.p0i8"]
- fn ldrex8(p: *const u8) -> u32;
- }
-
- ldrex8(p) as u8
-}
-
-/// Executes an exclusive LDR instruction for 16 bit value.
-// Supported: v6K, v7-M, v7-A, v7-R, v8
-// Not supported: v5, v6, v6-M
-#[cfg(any(
- target_feature = "v6k", // includes v7-M but excludes v6-M
- doc
-))]
-pub unsafe fn __ldrexh(p: *const u16) -> u16 {
- extern "unadjusted" {
- #[link_name = "llvm.arm.ldrex.p0i16"]
- fn ldrex16(p: *const u16) -> u32;
- }
-
- ldrex16(p) as u16
-}
-
-/// Executes an exclusive LDR instruction for 32 bit value.
-// Supported: v6, v7-M, v6K, v7-A, v7-R, v8
-// Not supported: v5, v6-M
-#[cfg(any(
- all(target_feature = "v6", not(target_feature = "mclass")), // excludes v6-M
- all(target_feature = "v7", target_feature = "mclass"), // v7-M
- doc
-))]
-pub unsafe fn __ldrex(p: *const u32) -> u32 {
- extern "unadjusted" {
- #[link_name = "llvm.arm.ldrex.p0i32"]
- fn ldrex32(p: *const u32) -> u32;
- }
-
- ldrex32(p)
-}
-
-/// Executes an exclusive STR instruction for 8 bit values
-///
-/// Returns `0` if the operation succeeded, or `1` if it failed
-// supported: v6K, v7-M, v7-A, v7-R
-// Not supported: v5, v6, v6-M
-#[cfg(any(
- target_feature = "v6k", // includes v7-M but excludes v6-M
- doc
-))]
-pub unsafe fn __strexb(value: u32, addr: *mut u8) -> u32 {
- extern "unadjusted" {
- #[link_name = "llvm.arm.strex.p0i8"]
- fn strex8(value: u32, addr: *mut u8) -> u32;
- }
-
- strex8(value, addr)
-}
-
-/// Executes an exclusive STR instruction for 16 bit values
-///
-/// Returns `0` if the operation succeeded, or `1` if it failed
-// Supported: v6K, v7-M, v7-A, v7-R, v8
-// Not supported: v5, v6, v6-M
-#[cfg(target_feature = "aarch64")]
-#[cfg(any(
- target_feature = "v6k", // includes v7-M but excludes v6-M
- doc
-))]
-pub unsafe fn __strexh(value: u16, addr: *mut u16) -> u32 {
- extern "unadjusted" {
- #[link_name = "llvm.arm.strex.p0i16"]
- fn strex16(value: u32, addr: *mut u16) -> u32;
- }
-
- strex16(value as u32, addr)
-}
-
-/// Executes an exclusive STR instruction for 32 bit values
-///
-/// Returns `0` if the operation succeeded, or `1` if it failed
-// Supported: v6, v7-M, v6K, v7-A, v7-R, v8
-// Not supported: v5, v6-M
-#[cfg(any(
- all(target_feature = "v6", not(target_feature = "mclass")), // excludes v6-M
- all(target_feature = "v7", target_feature = "mclass"), // v7-M
- doc
-))]
-pub unsafe fn __strex(value: u32, addr: *mut u32) -> u32 {
- extern "unadjusted" {
- #[link_name = "llvm.arm.strex.p0i32"]
- fn strex32(value: u32, addr: *mut u32) -> u32;
- }
-
- strex32(value, addr)
-}
diff --git a/library/stdarch/crates/core_arch/src/arm/mod.rs b/library/stdarch/crates/core_arch/src/arm/mod.rs
index ec91e5de5..9cc75a3cc 100644
--- a/library/stdarch/crates/core_arch/src/arm/mod.rs
+++ b/library/stdarch/crates/core_arch/src/arm/mod.rs
@@ -6,12 +6,6 @@
//! [arm_ref]: http://infocenter.arm.com/help/topic/com.arm.doc.ihi0073a/IHI0073A_arm_neon_intrinsics_ref.pdf
//! [arm_dat]: https://developer.arm.com/technologies/neon/intrinsics
-mod armclang;
-pub use self::armclang::*;
-
-mod v6;
-pub use self::v6::*;
-
// Supported arches: 6, 7-M. See Section 10.1 of ACLE (e.g. SSAT)
#[cfg(any(target_feature = "v6", doc))]
mod sat;
@@ -62,14 +56,6 @@ mod simd32;
))]
pub use self::simd32::*;
-#[cfg(any(target_feature = "v7", doc))]
-mod v7;
-#[cfg(any(target_feature = "v7", doc))]
-pub use self::v7::*;
-
-mod ex;
-pub use self::ex::*;
-
pub use crate::core_arch::arm_shared::*;
#[cfg(test)]
diff --git a/library/stdarch/crates/core_arch/src/arm/neon.rs b/library/stdarch/crates/core_arch/src/arm/neon.rs
index e1de48538..75d3f19e8 100644
--- a/library/stdarch/crates/core_arch/src/arm/neon.rs
+++ b/library/stdarch/crates/core_arch/src/arm/neon.rs
@@ -1,16 +1,9 @@
use crate::core_arch::arm_shared::neon::*;
-use crate::core_arch::simd::{f32x4, i32x4, u32x4};
-use crate::core_arch::simd_llvm::*;
use crate::mem::{align_of, transmute};
#[cfg(test)]
use stdarch_test::assert_instr;
-#[allow(non_camel_case_types)]
-pub(crate) type p8 = u8;
-#[allow(non_camel_case_types)]
-pub(crate) type p16 = u16;
-
#[allow(improper_ctypes)]
extern "unadjusted" {
#[link_name = "llvm.arm.neon.vbsl.v8i8"]
@@ -794,27 +787,6 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t
))
}
-// These float-to-int implementations have undefined behaviour when `a` overflows
-// the destination type. Clang has the same problem: https://llvm.org/PR47510
-
-/// Floating-point Convert to Signed fixed-point, rounding toward Zero (vector)
-#[inline]
-#[target_feature(enable = "neon")]
-#[target_feature(enable = "v7")]
-#[cfg_attr(test, assert_instr("vcvt.s32.f32"))]
-pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t {
- transmute(simd_cast::<_, i32x4>(transmute::<_, f32x4>(a)))
-}
-
-/// Floating-point Convert to Unsigned fixed-point, rounding toward Zero (vector)
-#[inline]
-#[target_feature(enable = "neon")]
-#[target_feature(enable = "v7")]
-#[cfg_attr(test, assert_instr("vcvt.u32.f32"))]
-pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t {
- transmute(simd_cast::<_, u32x4>(transmute::<_, f32x4>(a)))
-}
-
/// Shift Left and Insert (immediate)
#[inline]
#[target_feature(enable = "neon,v7")]
diff --git a/library/stdarch/crates/core_arch/src/arm/v6.rs b/library/stdarch/crates/core_arch/src/arm/v6.rs
deleted file mode 100644
index 5df30cd62..000000000
--- a/library/stdarch/crates/core_arch/src/arm/v6.rs
+++ /dev/null
@@ -1,49 +0,0 @@
-//! ARMv6 intrinsics.
-//!
-//! The reference is [ARMv6-M Architecture Reference Manual][armv6m].
-//!
-//! [armv6m]:
-//! http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0419c/index.
-//! html
-
-#[cfg(test)]
-use stdarch_test::assert_instr;
-
-/// Reverse the order of the bytes.
-#[inline]
-#[cfg_attr(test, assert_instr(rev))]
-pub unsafe fn _rev_u16(x: u16) -> u16 {
- x.swap_bytes() as u16
-}
-
-/// Reverse the order of the bytes.
-#[inline]
-#[cfg_attr(test, assert_instr(rev))]
-pub unsafe fn _rev_u32(x: u32) -> u32 {
- x.swap_bytes() as u32
-}
-
-#[cfg(test)]
-mod tests {
- use crate::core_arch::arm::v6;
-
- #[test]
- fn _rev_u16() {
- unsafe {
- assert_eq!(
- v6::_rev_u16(0b0000_0000_1111_1111_u16),
- 0b1111_1111_0000_0000_u16
- );
- }
- }
-
- #[test]
- fn _rev_u32() {
- unsafe {
- assert_eq!(
- v6::_rev_u32(0b0000_0000_1111_1111_0000_0000_1111_1111_u32),
- 0b1111_1111_0000_0000_1111_1111_0000_0000_u32
- );
- }
- }
-}
diff --git a/library/stdarch/crates/core_arch/src/arm/v7.rs b/library/stdarch/crates/core_arch/src/arm/v7.rs
deleted file mode 100644
index 59beaf722..000000000
--- a/library/stdarch/crates/core_arch/src/arm/v7.rs
+++ /dev/null
@@ -1,87 +0,0 @@
-//! ARMv7 intrinsics.
-//!
-//! The reference is [ARMv7-M Architecture Reference Manual (Issue
-//! E.b)][armv7m].
-//!
-//! [armv7m]:
-//! http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0403e.
-//! b/index.html
-
-pub use super::v6::*;
-
-#[cfg(test)]
-use stdarch_test::assert_instr;
-
-/// Count Leading Zeros.
-#[inline]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
-// FIXME: https://github.com/rust-lang/stdarch/issues/382
-// #[cfg_attr(all(test, target_arch = "arm"), assert_instr(clz))]
-pub unsafe fn _clz_u8(x: u8) -> u8 {
- x.leading_zeros() as u8
-}
-
-/// Count Leading Zeros.
-#[inline]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
-// FIXME: https://github.com/rust-lang/stdarch/issues/382
-// #[cfg_attr(all(test, target_arch = "arm"), assert_instr(clz))]
-pub unsafe fn _clz_u16(x: u16) -> u16 {
- x.leading_zeros() as u16
-}
-
-/// Count Leading Zeros.
-#[inline]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
-// FIXME: https://github.com/rust-lang/stdarch/issues/382
-// #[cfg_attr(all(test, target_arch = "arm"), assert_instr(clz))]
-pub unsafe fn _clz_u32(x: u32) -> u32 {
- x.leading_zeros() as u32
-}
-
-/// Reverse the bit order.
-#[inline]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
-#[cfg_attr(test, assert_instr(rbit))]
-pub unsafe fn _rbit_u32(x: u32) -> u32 {
- crate::intrinsics::bitreverse(x)
-}
-
-#[cfg(test)]
-mod tests {
- use crate::core_arch::arm::v7;
-
- #[test]
- fn _clz_u8() {
- unsafe {
- assert_eq!(v7::_clz_u8(0b0000_1010u8), 4u8);
- }
- }
-
- #[test]
- fn _clz_u16() {
- unsafe {
- assert_eq!(v7::_clz_u16(0b0000_1010u16), 12u16);
- }
- }
-
- #[test]
- fn _clz_u32() {
- unsafe {
- assert_eq!(v7::_clz_u32(0b0000_1010u32), 28u32);
- }
- }
-
- #[test]
- fn _rbit_u32() {
- unsafe {
- assert_eq!(
- v7::_rbit_u32(0b0000_1010u32),
- 0b0101_0000_0000_0000_0000_0000_0000_0000u32
- );
- }
- }
-}
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/mod.rs
index 4c8d19854..fc6617f5a 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/mod.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/mod.rs
@@ -59,9 +59,6 @@ pub use self::barrier::*;
mod hints;
pub use self::hints::*;
-mod registers;
-pub use self::registers::*;
-
#[cfg(any(target_arch = "aarch64", target_feature = "v7", doc))]
mod crc;
#[cfg(any(target_arch = "aarch64", target_feature = "v7", doc))]
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
index 775811e65..34dc3a334 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
@@ -1532,7 +1532,7 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
simd_gt(a, b)
}
-/// Compare unsigned highe
+/// Compare unsigned greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)
#[inline]
@@ -1545,7 +1545,7 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_gt(a, b)
}
-/// Compare unsigned highe
+/// Compare unsigned greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)
#[inline]
@@ -1558,7 +1558,7 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_gt(a, b)
}
-/// Compare unsigned highe
+/// Compare unsigned greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)
#[inline]
@@ -1571,7 +1571,7 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_gt(a, b)
}
-/// Compare unsigned highe
+/// Compare unsigned greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)
#[inline]
@@ -1584,7 +1584,7 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_gt(a, b)
}
-/// Compare unsigned highe
+/// Compare unsigned greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)
#[inline]
@@ -1597,7 +1597,7 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_gt(a, b)
}
-/// Compare unsigned highe
+/// Compare unsigned greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)
#[inline]
@@ -2888,7 +2888,7 @@ vcvt_n_f32_s32_(a, N)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -2925,7 +2925,7 @@ vcvtq_n_f32_s32_(a, N)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -2962,7 +2962,7 @@ vcvt_n_f32_u32_(a, N)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -2999,7 +2999,7 @@ vcvtq_n_f32_u32_(a, N)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -3036,7 +3036,7 @@ vcvt_n_s32_f32_(a, N)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -3073,7 +3073,7 @@ vcvtq_n_s32_f32_(a, N)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -3110,7 +3110,7 @@ vcvt_n_u32_f32_(a, N)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -3147,7 +3147,7 @@ vcvtq_n_u32_f32_(a, N)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -8548,7 +8548,7 @@ vld2_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -8581,7 +8581,7 @@ vld2_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -8614,7 +8614,7 @@ vld2_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -8647,7 +8647,7 @@ vld2q_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -8680,7 +8680,7 @@ vld2q_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -8713,7 +8713,7 @@ vld2q_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -8746,7 +8746,7 @@ vld2_s64_(a as *const i8, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -8935,7 +8935,7 @@ vld2_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -8968,7 +8968,7 @@ vld2q_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9001,7 +9001,7 @@ vld2_dup_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9034,7 +9034,7 @@ vld2_dup_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9067,7 +9067,7 @@ vld2_dup_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9100,7 +9100,7 @@ vld2q_dup_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9133,7 +9133,7 @@ vld2q_dup_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9166,7 +9166,7 @@ vld2q_dup_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9199,7 +9199,7 @@ vld2_dup_s64_(a as *const i8, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9388,7 +9388,7 @@ vld2_dup_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9421,7 +9421,7 @@ vld2q_dup_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9456,7 +9456,7 @@ vld2_lane_s8_(a as _, b.0, b.1, LANE, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -9493,7 +9493,7 @@ vld2_lane_s16_(a as _, b.0, b.1, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -9530,7 +9530,7 @@ vld2_lane_s32_(a as _, b.0, b.1, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -9567,7 +9567,7 @@ vld2q_lane_s16_(a as _, b.0, b.1, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -9604,7 +9604,7 @@ vld2q_lane_s32_(a as _, b.0, b.1, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -9761,7 +9761,7 @@ vld2_lane_f32_(a as _, b.0, b.1, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -9798,7 +9798,7 @@ vld2q_lane_f32_(a as _, b.0, b.1, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -9833,7 +9833,7 @@ vld3_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9866,7 +9866,7 @@ vld3_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9899,7 +9899,7 @@ vld3_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9932,7 +9932,7 @@ vld3q_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9965,7 +9965,7 @@ vld3q_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -9998,7 +9998,7 @@ vld3q_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10031,7 +10031,7 @@ vld3_s64_(a as *const i8, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10220,7 +10220,7 @@ vld3_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10253,7 +10253,7 @@ vld3q_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10286,7 +10286,7 @@ vld3_dup_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10319,7 +10319,7 @@ vld3_dup_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10352,7 +10352,7 @@ vld3_dup_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10385,7 +10385,7 @@ vld3q_dup_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10418,7 +10418,7 @@ vld3q_dup_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10451,7 +10451,7 @@ vld3q_dup_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10484,7 +10484,7 @@ vld3_dup_s64_(a as *const i8, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10673,7 +10673,7 @@ vld3_dup_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10706,7 +10706,7 @@ vld3q_dup_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -10741,7 +10741,7 @@ vld3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -10778,7 +10778,7 @@ vld3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -10815,7 +10815,7 @@ vld3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -10852,7 +10852,7 @@ vld3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -10889,7 +10889,7 @@ vld3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -11046,7 +11046,7 @@ vld3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -11083,7 +11083,7 @@ vld3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -11118,7 +11118,7 @@ vld4_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11151,7 +11151,7 @@ vld4_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11184,7 +11184,7 @@ vld4_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11217,7 +11217,7 @@ vld4q_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11250,7 +11250,7 @@ vld4q_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11283,7 +11283,7 @@ vld4q_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11316,7 +11316,7 @@ vld4_s64_(a as *const i8, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11505,7 +11505,7 @@ vld4_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11538,7 +11538,7 @@ vld4q_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11571,7 +11571,7 @@ vld4_dup_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11604,7 +11604,7 @@ vld4_dup_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11637,7 +11637,7 @@ vld4_dup_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11670,7 +11670,7 @@ vld4q_dup_s8_(a as *const i8, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11703,7 +11703,7 @@ vld4q_dup_s16_(a as *const i8, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11736,7 +11736,7 @@ vld4q_dup_s32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11769,7 +11769,7 @@ vld4_dup_s64_(a as *const i8, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11958,7 +11958,7 @@ vld4_dup_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -11991,7 +11991,7 @@ vld4q_dup_f32_(a as *const i8, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -12026,7 +12026,7 @@ vld4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -12063,7 +12063,7 @@ vld4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -12100,7 +12100,7 @@ vld4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -12137,7 +12137,7 @@ vld4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -12174,7 +12174,7 @@ vld4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -12331,7 +12331,7 @@ vld4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -12368,7 +12368,7 @@ vld4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -12763,7 +12763,7 @@ vst1_s8_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -12796,7 +12796,7 @@ vst1_s16_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -12829,7 +12829,7 @@ vst1_s32_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -12862,7 +12862,7 @@ vst1_s64_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -12895,7 +12895,7 @@ vst1q_s8_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -12928,7 +12928,7 @@ vst1q_s16_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -12961,7 +12961,7 @@ vst1q_s32_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -12994,7 +12994,7 @@ vst1q_s64_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13027,7 +13027,7 @@ vst1_s8_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13060,7 +13060,7 @@ vst1_s16_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13093,7 +13093,7 @@ vst1_s32_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13126,7 +13126,7 @@ vst1_s64_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13159,7 +13159,7 @@ vst1q_s8_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13192,7 +13192,7 @@ vst1q_s16_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13225,7 +13225,7 @@ vst1q_s32_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13258,7 +13258,7 @@ vst1q_s64_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13291,7 +13291,7 @@ vst1_s8_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13324,7 +13324,7 @@ vst1_s16_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13357,7 +13357,7 @@ vst1_s32_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13390,7 +13390,7 @@ vst1_s64_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13423,7 +13423,7 @@ vst1q_s8_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13456,7 +13456,7 @@ vst1q_s16_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13489,7 +13489,7 @@ vst1q_s32_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -13522,7 +13522,7 @@ vst1q_s64_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14101,7 +14101,7 @@ vst1_f32_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14134,7 +14134,7 @@ vst1q_f32_x2_(a, b.0, b.1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14167,7 +14167,7 @@ vst1_f32_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14200,7 +14200,7 @@ vst1q_f32_x3_(a, b.0, b.1, b.2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14233,7 +14233,7 @@ vst1_f32_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14266,7 +14266,7 @@ vst1q_f32_x4_(a, b.0, b.1, b.2, b.3)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14299,7 +14299,7 @@ vst2_s8_(a as _, b.0, b.1, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14332,7 +14332,7 @@ vst2_s16_(a as _, b.0, b.1, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14365,7 +14365,7 @@ vst2_s32_(a as _, b.0, b.1, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14398,7 +14398,7 @@ vst2q_s8_(a as _, b.0, b.1, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14431,7 +14431,7 @@ vst2q_s16_(a as _, b.0, b.1, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14464,7 +14464,7 @@ vst2q_s32_(a as _, b.0, b.1, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14497,7 +14497,7 @@ vst2_s64_(a as _, b.0, b.1, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14686,7 +14686,7 @@ vst2_f32_(a as _, b.0, b.1, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14719,7 +14719,7 @@ vst2q_f32_(a as _, b.0, b.1, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -14754,7 +14754,7 @@ vst2_lane_s8_(a as _, b.0, b.1, LANE, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -14791,7 +14791,7 @@ vst2_lane_s16_(a as _, b.0, b.1, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -14828,7 +14828,7 @@ vst2_lane_s32_(a as _, b.0, b.1, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -14865,7 +14865,7 @@ vst2q_lane_s16_(a as _, b.0, b.1, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -14902,7 +14902,7 @@ vst2q_lane_s32_(a as _, b.0, b.1, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15059,7 +15059,7 @@ vst2_lane_f32_(a as _, b.0, b.1, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15096,7 +15096,7 @@ vst2q_lane_f32_(a as _, b.0, b.1, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15131,7 +15131,7 @@ vst3_s8_(a as _, b.0, b.1, b.2, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15164,7 +15164,7 @@ vst3_s16_(a as _, b.0, b.1, b.2, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15197,7 +15197,7 @@ vst3_s32_(a as _, b.0, b.1, b.2, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15230,7 +15230,7 @@ vst3q_s8_(a as _, b.0, b.1, b.2, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15263,7 +15263,7 @@ vst3q_s16_(a as _, b.0, b.1, b.2, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15296,7 +15296,7 @@ vst3q_s32_(a as _, b.0, b.1, b.2, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15329,7 +15329,7 @@ vst3_s64_(a as _, b.0, b.1, b.2, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15518,7 +15518,7 @@ vst3_f32_(a as _, b.0, b.1, b.2, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15551,7 +15551,7 @@ vst3q_f32_(a as _, b.0, b.1, b.2, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15586,7 +15586,7 @@ vst3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15623,7 +15623,7 @@ vst3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15660,7 +15660,7 @@ vst3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15697,7 +15697,7 @@ vst3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15734,7 +15734,7 @@ vst3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15891,7 +15891,7 @@ vst3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15928,7 +15928,7 @@ vst3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -15963,7 +15963,7 @@ vst4_s8_(a as _, b.0, b.1, b.2, b.3, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -15996,7 +15996,7 @@ vst4_s16_(a as _, b.0, b.1, b.2, b.3, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -16029,7 +16029,7 @@ vst4_s32_(a as _, b.0, b.1, b.2, b.3, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -16062,7 +16062,7 @@ vst4q_s8_(a as _, b.0, b.1, b.2, b.3, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -16095,7 +16095,7 @@ vst4q_s16_(a as _, b.0, b.1, b.2, b.3, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -16128,7 +16128,7 @@ vst4q_s32_(a as _, b.0, b.1, b.2, b.3, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -16161,7 +16161,7 @@ vst4_s64_(a as _, b.0, b.1, b.2, b.3, 8)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -16350,7 +16350,7 @@ vst4_f32_(a as _, b.0, b.1, b.2, b.3, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -16383,7 +16383,7 @@ vst4q_f32_(a as _, b.0, b.1, b.2, b.3, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
@@ -16418,7 +16418,7 @@ vst4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -16455,7 +16455,7 @@ vst4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -16492,7 +16492,7 @@ vst4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -16529,7 +16529,7 @@ vst4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -16566,7 +16566,7 @@ vst4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -16723,7 +16723,7 @@ vst4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -16760,7 +16760,7 @@ vst4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
@@ -16775,6 +16775,106 @@ pub unsafe fn vst4q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x4_t) {
vst4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
+/// Dot product vector form with unsigned and signed integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usdot))]
+pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")]
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8")]
+ fn vusdot_s32_(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t;
+ }
+vusdot_s32_(a, b, c)
+}
+
+/// Dot product vector form with unsigned and signed integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usdot))]
+pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")]
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8")]
+ fn vusdotq_s32_(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t;
+ }
+vusdotq_s32_(a, b, c)
+}
+
+/// Dot product index form with unsigned and signed integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usdot, LANE = 0))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vusdot_lane_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t {
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int32x2_t = transmute(c);
+ let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
+ vusdot_s32(a, b, transmute(c))
+}
+
+/// Dot product index form with unsigned and signed integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usdot, LANE = 0))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vusdotq_lane_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x8_t) -> int32x4_t {
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int32x2_t = transmute(c);
+ let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ vusdotq_s32(a, b, transmute(c))
+}
+
+/// Dot product index form with signed and unsigned integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sudot, LANE = 0))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vsudot_lane_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x8_t) -> int32x2_t {
+ static_assert_uimm_bits!(LANE, 1);
+ let c: uint32x2_t = transmute(c);
+ let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
+ vusdot_s32(a, transmute(c), b)
+}
+
+/// Dot product index form with signed and unsigned integers
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)
+#[inline]
+#[target_feature(enable = "neon,i8mm")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sudot, LANE = 0))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vsudotq_lane_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x8_t) -> int32x4_t {
+ static_assert_uimm_bits!(LANE, 1);
+ let c: uint32x2_t = transmute(c);
+ let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ vusdotq_s32(a, transmute(c), b)
+}
+
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)
@@ -18737,6 +18837,142 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
simd_sub(c, d)
}
+/// Dot product arithmetic (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)
+#[inline]
+#[target_feature(enable = "neon,dotprod")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sdot))]
+pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")]
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8")]
+ fn vdot_s32_(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t;
+ }
+vdot_s32_(a, b, c)
+}
+
+/// Dot product arithmetic (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)
+#[inline]
+#[target_feature(enable = "neon,dotprod")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sdot))]
+pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")]
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8")]
+ fn vdotq_s32_(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t;
+ }
+vdotq_s32_(a, b, c)
+}
+
+/// Dot product arithmetic (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)
+#[inline]
+#[target_feature(enable = "neon,dotprod")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(udot))]
+pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")]
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.udot.v2i32.v8i8")]
+ fn vdot_u32_(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t;
+ }
+vdot_u32_(a, b, c)
+}
+
+/// Dot product arithmetic (vector)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)
+#[inline]
+#[target_feature(enable = "neon,dotprod")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(udot))]
+pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
+ #[allow(improper_ctypes)]
+ extern "unadjusted" {
+ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")]
+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.udot.v4i32.v16i8")]
+ fn vdotq_u32_(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t;
+ }
+vdotq_u32_(a, b, c)
+}
+
+/// Dot product arithmetic (indexed)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)
+#[inline]
+#[target_feature(enable = "neon,dotprod")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sdot, LANE = 0))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vdot_lane_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t {
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int32x2_t = transmute(c);
+ let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
+ vdot_s32(a, b, transmute(c))
+}
+
+/// Dot product arithmetic (indexed)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)
+#[inline]
+#[target_feature(enable = "neon,dotprod")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sdot, LANE = 0))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vdotq_lane_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t {
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int32x2_t = transmute(c);
+ let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ vdotq_s32(a, b, transmute(c))
+}
+
+/// Dot product arithmetic (indexed)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)
+#[inline]
+#[target_feature(enable = "neon,dotprod")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(udot, LANE = 0))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vdot_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t {
+ static_assert_uimm_bits!(LANE, 1);
+ let c: uint32x2_t = transmute(c);
+ let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
+ vdot_u32(a, b, transmute(c))
+}
+
+/// Dot product arithmetic (indexed)
+///
+/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)
+#[inline]
+#[target_feature(enable = "neon,dotprod")]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(udot, LANE = 0))]
+#[rustc_legacy_const_generics(3)]
+pub unsafe fn vdotq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t {
+ static_assert_uimm_bits!(LANE, 1);
+ let c: uint32x2_t = transmute(c);
+ let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ vdotq_u32(a, b, transmute(c))
+}
+
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)
@@ -20569,7 +20805,7 @@ vqrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -20606,7 +20842,7 @@ vqrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -20643,7 +20879,7 @@ vqrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -20680,7 +20916,7 @@ vqrshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -20717,7 +20953,7 @@ vqrshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -20754,7 +20990,7 @@ vqrshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -20791,7 +21027,7 @@ vqrshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -20828,7 +21064,7 @@ vqrshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -20865,7 +21101,7 @@ vqrshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21446,7 +21682,7 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21483,7 +21719,7 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21520,7 +21756,7 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21557,7 +21793,7 @@ vqshlu_n_s64_(a, int64x1_t(N as i64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21594,7 +21830,7 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8,
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21631,7 +21867,7 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21668,7 +21904,7 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21705,7 +21941,7 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21742,7 +21978,7 @@ vqshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21779,7 +22015,7 @@ vqshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21816,7 +22052,7 @@ vqshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21853,7 +22089,7 @@ vqshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u1
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21890,7 +22126,7 @@ vqshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21927,7 +22163,7 @@ vqshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -21964,7 +22200,7 @@ vqshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -22001,7 +22237,7 @@ vqshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -22038,7 +22274,7 @@ vqshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -26185,7 +26421,7 @@ vrshlq_u64_(a, b)
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
static_assert!(N >= 1 && N <= 8);
- vrshl_s8(a, vdup_n_s8((-N) as _))
+ vrshl_s8(a, vdup_n_s8(-N as _))
}
/// Signed rounding shift right
@@ -26200,7 +26436,7 @@ pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
static_assert!(N >= 1 && N <= 8);
- vrshlq_s8(a, vdupq_n_s8((-N) as _))
+ vrshlq_s8(a, vdupq_n_s8(-N as _))
}
/// Signed rounding shift right
@@ -26215,7 +26451,7 @@ pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
static_assert!(N >= 1 && N <= 16);
- vrshl_s16(a, vdup_n_s16((-N) as _))
+ vrshl_s16(a, vdup_n_s16(-N as _))
}
/// Signed rounding shift right
@@ -26230,7 +26466,7 @@ pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
static_assert!(N >= 1 && N <= 16);
- vrshlq_s16(a, vdupq_n_s16((-N) as _))
+ vrshlq_s16(a, vdupq_n_s16(-N as _))
}
/// Signed rounding shift right
@@ -26245,7 +26481,7 @@ pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
static_assert!(N >= 1 && N <= 32);
- vrshl_s32(a, vdup_n_s32((-N) as _))
+ vrshl_s32(a, vdup_n_s32(-N as _))
}
/// Signed rounding shift right
@@ -26260,7 +26496,7 @@ pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
static_assert!(N >= 1 && N <= 32);
- vrshlq_s32(a, vdupq_n_s32((-N) as _))
+ vrshlq_s32(a, vdupq_n_s32(-N as _))
}
/// Signed rounding shift right
@@ -26275,7 +26511,7 @@ pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
static_assert!(N >= 1 && N <= 64);
- vrshl_s64(a, vdup_n_s64((-N) as _))
+ vrshl_s64(a, vdup_n_s64(-N as _))
}
/// Signed rounding shift right
@@ -26290,7 +26526,7 @@ pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
static_assert!(N >= 1 && N <= 64);
- vrshlq_s64(a, vdupq_n_s64((-N) as _))
+ vrshlq_s64(a, vdupq_n_s64(-N as _))
}
/// Unsigned rounding shift right
@@ -26305,7 +26541,7 @@ pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
static_assert!(N >= 1 && N <= 8);
- vrshl_u8(a, vdup_n_s8((-N) as _))
+ vrshl_u8(a, vdup_n_s8(-N as _))
}
/// Unsigned rounding shift right
@@ -26320,7 +26556,7 @@ pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
static_assert!(N >= 1 && N <= 8);
- vrshlq_u8(a, vdupq_n_s8((-N) as _))
+ vrshlq_u8(a, vdupq_n_s8(-N as _))
}
/// Unsigned rounding shift right
@@ -26335,7 +26571,7 @@ pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
static_assert!(N >= 1 && N <= 16);
- vrshl_u16(a, vdup_n_s16((-N) as _))
+ vrshl_u16(a, vdup_n_s16(-N as _))
}
/// Unsigned rounding shift right
@@ -26350,7 +26586,7 @@ pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
static_assert!(N >= 1 && N <= 16);
- vrshlq_u16(a, vdupq_n_s16((-N) as _))
+ vrshlq_u16(a, vdupq_n_s16(-N as _))
}
/// Unsigned rounding shift right
@@ -26365,7 +26601,7 @@ pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
static_assert!(N >= 1 && N <= 32);
- vrshl_u32(a, vdup_n_s32((-N) as _))
+ vrshl_u32(a, vdup_n_s32(-N as _))
}
/// Unsigned rounding shift right
@@ -26380,7 +26616,7 @@ pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
static_assert!(N >= 1 && N <= 32);
- vrshlq_u32(a, vdupq_n_s32((-N) as _))
+ vrshlq_u32(a, vdupq_n_s32(-N as _))
}
/// Unsigned rounding shift right
@@ -26395,7 +26631,7 @@ pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
static_assert!(N >= 1 && N <= 64);
- vrshl_u64(a, vdup_n_s64((-N) as _))
+ vrshl_u64(a, vdup_n_s64(-N as _))
}
/// Unsigned rounding shift right
@@ -26410,7 +26646,7 @@ pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
static_assert!(N >= 1 && N <= 64);
- vrshlq_u64(a, vdupq_n_s64((-N) as _))
+ vrshlq_u64(a, vdupq_n_s64(-N as _))
}
/// Rounding shift right narrow
@@ -26435,7 +26671,7 @@ vrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -26472,7 +26708,7 @@ vrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -26509,7 +26745,7 @@ vrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)
#[inline]
-#[cfg(target_arch = "aarch64")]
+#[cfg(not(target_arch = "arm"))]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
@@ -28882,7 +29118,7 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
let a0: int8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
@@ -28897,7 +29133,7 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
@@ -28912,7 +29148,7 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
@@ -28927,7 +29163,7 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
let a0: uint8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
@@ -28942,7 +29178,7 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
@@ -28957,7 +29193,7 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
@@ -28972,7 +29208,7 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
let a0: poly8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
@@ -28987,7 +29223,7 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
let a0: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
@@ -29017,7 +29253,7 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
@@ -37823,6 +38059,94 @@ mod test {
assert_eq!(r, e);
}
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vusdot_s32() {
+ let a: i32x2 = i32x2::new(1000, -4200);
+ let b: u8x8 = u8x8::new(100, 205, 110, 195, 120, 185, 130, 175);
+ let c: i8x8 = i8x8::new(0, 1, 2, 3, -1, -2, -3, -4);
+ let e: i32x2 = i32x2::new(2010, -5780);
+ let r: i32x2 = transmute(vusdot_s32(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vusdotq_s32() {
+ let a: i32x4 = i32x4::new(1000, -4200, -1000, 2000);
+ let b: u8x16 = u8x16::new(100, 205, 110, 195, 120, 185, 130, 175, 140, 165, 150, 155, 160, 145, 170, 135);
+ let c: i8x16 = i8x16::new(0, 1, 2, 3, -1, -2, -3, -4, 4, 5, 6, 7, -5, -6, -7, -8);
+ let e: i32x4 = i32x4::new(2010, -5780, 2370, -1940);
+ let r: i32x4 = transmute(vusdotq_s32(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vusdot_lane_s32() {
+ let a: i32x2 = i32x2::new(1000, -4200);
+ let b: u8x8 = u8x8::new(100, 110, 120, 130, 140, 150, 160, 170);
+ let c: i8x8 = i8x8::new(4, 3, 2, 1, 0, -1, -2, -3);
+ let e: i32x2 = i32x2::new(2100, -2700);
+ let r: i32x2 = transmute(vusdot_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+
+ let a: i32x2 = i32x2::new(1000, -4200);
+ let b: u8x8 = u8x8::new(100, 110, 120, 130, 140, 150, 160, 170);
+ let c: i8x8 = i8x8::new(4, 3, 2, 1, 0, -1, -2, -3);
+ let e: i32x2 = i32x2::new(260, -5180);
+ let r: i32x2 = transmute(vusdot_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vusdotq_lane_s32() {
+ let a: i32x4 = i32x4::new(1000, -4200, -1000, 2000);
+ let b: u8x16 = u8x16::new(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250);
+ let c: i8x8 = i8x8::new(4, 3, 2, 1, 0, -1, -2, -3);
+ let e: i32x4 = i32x4::new(2100, -2700, 900, 4300);
+ let r: i32x4 = transmute(vusdotq_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+
+ let a: i32x4 = i32x4::new(1000, -4200, -1000, 2000);
+ let b: u8x16 = u8x16::new(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250);
+ let c: i8x8 = i8x8::new(4, 3, 2, 1, 0, -1, -2, -3);
+ let e: i32x4 = i32x4::new(260, -5180, -2220, 540);
+ let r: i32x4 = transmute(vusdotq_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vsudot_lane_s32() {
+ let a: i32x2 = i32x2::new(-2000, 4200);
+ let b: i8x8 = i8x8::new(4, 3, 2, 1, 0, -1, -2, -3);
+ let c: u8x8 = u8x8::new(100, 110, 120, 130, 140, 150, 160, 170);
+ let e: i32x2 = i32x2::new(-900, 3460);
+ let r: i32x2 = transmute(vsudot_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+
+ let a: i32x2 = i32x2::new(-2000, 4200);
+ let b: i8x8 = i8x8::new(4, 3, 2, 1, 0, -1, -2, -3);
+ let c: u8x8 = u8x8::new(100, 110, 120, 130, 140, 150, 160, 170);
+ let e: i32x2 = i32x2::new(-500, 3220);
+ let r: i32x2 = transmute(vsudot_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,i8mm")]
+ unsafe fn test_vsudotq_lane_s32() {
+ let a: i32x4 = i32x4::new(-2000, 4200, -1000, 2000);
+ let b: i8x16 = i8x16::new(4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
+ let c: u8x8 = u8x8::new(100, 110, 120, 130, 140, 150, 160, 170);
+ let e: i32x4 = i32x4::new(-900, 3460, -3580, -2420);
+ let r: i32x4 = transmute(vsudotq_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+
+ let a: i32x4 = i32x4::new(-2000, 4200, -1000, 2000);
+ let b: i8x16 = i8x16::new(4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
+ let c: u8x8 = u8x8::new(100, 110, 120, 130, 140, 150, 160, 170);
+ let e: i32x4 = i32x4::new(-500, 3220, -4460, -3940);
+ let r: i32x4 = transmute(vsudotq_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
#[simd_test(enable = "neon")]
unsafe fn test_vmul_s8() {
let a: i8x8 = i8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
@@ -39051,6 +39375,86 @@ mod test {
assert_eq!(r, e);
}
+ #[simd_test(enable = "neon,dotprod")]
+ unsafe fn test_vdot_s32() {
+ let a: i32x2 = i32x2::new(1, 2);
+ let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let e: i32x2 = i32x2::new(31, 176);
+ let r: i32x2 = transmute(vdot_s32(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,dotprod")]
+ unsafe fn test_vdotq_s32() {
+ let a: i32x4 = i32x4::new(1, 2, 1, 2);
+ let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
+ let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
+ let e: i32x4 = i32x4::new(31, 176, 31, 176);
+ let r: i32x4 = transmute(vdotq_s32(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,dotprod")]
+ unsafe fn test_vdot_u32() {
+ let a: u32x2 = u32x2::new(1, 2);
+ let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let e: u32x2 = u32x2::new(31, 176);
+ let r: u32x2 = transmute(vdot_u32(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,dotprod")]
+ unsafe fn test_vdotq_u32() {
+ let a: u32x4 = u32x4::new(1, 2, 1, 2);
+ let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
+ let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
+ let e: u32x4 = u32x4::new(31, 176, 31, 176);
+ let r: u32x4 = transmute(vdotq_u32(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,dotprod")]
+ unsafe fn test_vdot_lane_s32() {
+ let a: i32x2 = i32x2::new(1, 2);
+ let b: i8x8 = i8x8::new(-1, 2, 3, 4, 5, 6, 7, 8);
+ let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let e: i32x2 = i32x2::new(29, 72);
+ let r: i32x2 = transmute(vdot_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,dotprod")]
+ unsafe fn test_vdotq_lane_s32() {
+ let a: i32x4 = i32x4::new(1, 2, 1, 2);
+ let b: i8x16 = i8x16::new(-1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
+ let c: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let e: i32x4 = i32x4::new(29, 72, 31, 72);
+ let r: i32x4 = transmute(vdotq_lane_s32::<0>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,dotprod")]
+ unsafe fn test_vdot_lane_u32() {
+ let a: u32x2 = u32x2::new(1, 2);
+ let b: u8x8 = u8x8::new(255, 2, 3, 4, 5, 6, 7, 8);
+ let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let e: u32x2 = u32x2::new(285, 72);
+ let r: u32x2 = transmute(vdot_lane_u32::<0>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
+ #[simd_test(enable = "neon,dotprod")]
+ unsafe fn test_vdotq_lane_u32() {
+ let a: u32x4 = u32x4::new(1, 2, 1, 2);
+ let b: u8x16 = u8x16::new(255, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
+ let c: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
+ let e: u32x4 = u32x4::new(285, 72, 31, 72);
+ let r: u32x4 = transmute(vdotq_lane_u32::<0>(transmute(a), transmute(b), transmute(c)));
+ assert_eq!(r, e);
+ }
+
#[simd_test(enable = "neon")]
unsafe fn test_vmax_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/registers/aarch32.rs b/library/stdarch/crates/core_arch/src/arm_shared/registers/aarch32.rs
deleted file mode 100644
index e0b71218a..000000000
--- a/library/stdarch/crates/core_arch/src/arm_shared/registers/aarch32.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-/// Application Program Status Register
-pub struct APSR;
-
-// Note (@Lokathor): Because this breaks the use of Rust on the Game Boy
-// Advance, this change must be reverted until Rust learns to handle cpu state
-// properly. See also: https://github.com/rust-lang/stdarch/issues/702
-
-//#[cfg(any(not(target_feature = "thumb-state"), target_feature = "v6t2"))]
-//rsr!(APSR);
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/registers/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/registers/mod.rs
deleted file mode 100644
index 621efe2f5..000000000
--- a/library/stdarch/crates/core_arch/src/arm_shared/registers/mod.rs
+++ /dev/null
@@ -1,121 +0,0 @@
-#[allow(unused_macros)]
-macro_rules! rsr {
- ($R:ident) => {
- impl super::super::sealed::Rsr for $R {
- unsafe fn __rsr(&self) -> u32 {
- let r: u32;
- crate::arch::asm!(concat!("mrs {},", stringify!($R)), out(reg) r, options(nomem, nostack));
- r
- }
- }
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! rsrp {
- ($R:ident) => {
- impl super::super::sealed::Rsrp for $R {
- unsafe fn __rsrp(&self) -> *const u8 {
- let r: *const u8;
- crate::arch::asm!(concat!("mrs {},", stringify!($R)), out(reg) r, options(nomem, nostack));
- r
- }
- }
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! wsr {
- ($R:ident) => {
- impl super::super::sealed::Wsr for $R {
- unsafe fn __wsr(&self, value: u32) {
- crate::arch::asm!(concat!("msr ", stringify!($R), ", {}"), in(reg) value, options(nomem, nostack));
- }
- }
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! wsrp {
- ($R:ident) => {
- impl super::super::sealed::Wsrp for $R {
- unsafe fn __wsrp(&self, value: *const u8) {
- crate::arch::asm!(concat!("msr ", stringify!($R), ", {}"), in(reg) value, options(nomem, nostack));
- }
- }
- };
-}
-
-#[cfg(target_feature = "mclass")]
-mod v6m;
-
-#[cfg(target_feature = "mclass")]
-pub use self::v6m::*;
-
-#[cfg(all(target_feature = "v7", target_feature = "mclass"))]
-mod v7m;
-
-#[cfg(all(target_feature = "v7", target_feature = "mclass"))]
-pub use self::v7m::*;
-
-#[cfg(not(target_arch = "aarch64"))]
-mod aarch32;
-
-#[cfg(not(target_arch = "aarch64"))]
-pub use self::aarch32::*;
-
-/// Reads a 32-bit system register
-#[inline(always)]
-pub unsafe fn __rsr<R>(reg: R) -> u32
-where
- R: super::sealed::Rsr,
-{
- reg.__rsr()
-}
-
-/// Reads a 64-bit system register
-#[cfg(target_arch = "aarch64")]
-#[inline(always)]
-pub unsafe fn __rsr64<R>(reg: R) -> u64
-where
- R: super::sealed::Rsr64,
-{
- reg.__rsr64()
-}
-
-/// Reads a system register containing an address
-#[inline(always)]
-pub unsafe fn __rsrp<R>(reg: R) -> *const u8
-where
- R: super::sealed::Rsrp,
-{
- reg.__rsrp()
-}
-
-/// Writes a 32-bit system register
-#[inline(always)]
-pub unsafe fn __wsr<R>(reg: R, value: u32)
-where
- R: super::sealed::Wsr,
-{
- reg.__wsr(value)
-}
-
-/// Writes a 64-bit system register
-#[cfg(target_arch = "aarch64")]
-#[inline(always)]
-pub unsafe fn __wsr64<R>(reg: R, value: u64)
-where
- R: super::sealed::Wsr64,
-{
- reg.__wsr64(value)
-}
-
-/// Writes a system register containing an address
-#[inline(always)]
-pub unsafe fn __wsrp<R>(reg: R, value: *const u8)
-where
- R: super::sealed::Wsrp,
-{
- reg.__wsrp(value)
-}
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/registers/v6m.rs b/library/stdarch/crates/core_arch/src/arm_shared/registers/v6m.rs
deleted file mode 100644
index 7acc63b6d..000000000
--- a/library/stdarch/crates/core_arch/src/arm_shared/registers/v6m.rs
+++ /dev/null
@@ -1,39 +0,0 @@
-/// CONTROL register
-pub struct CONTROL;
-
-rsr!(CONTROL);
-wsr!(CONTROL);
-
-/// Execution Program Status Register
-pub struct EPSR;
-
-rsr!(EPSR);
-
-/// Interrupt Program Status Register
-pub struct IPSR;
-
-rsr!(IPSR);
-
-/// Main Stack Pointer
-pub struct MSP;
-
-rsrp!(MSP);
-wsrp!(MSP);
-
-/// Priority Mask Register
-pub struct PRIMASK;
-
-rsr!(PRIMASK);
-wsr!(PRIMASK);
-
-/// Process Stack Pointer
-pub struct PSP;
-
-rsrp!(PSP);
-wsrp!(PSP);
-
-/// Program Status Register
-#[allow(non_camel_case_types)]
-pub struct xPSR;
-
-rsr!(xPSR);
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/registers/v7m.rs b/library/stdarch/crates/core_arch/src/arm_shared/registers/v7m.rs
deleted file mode 100644
index d1b1d474f..000000000
--- a/library/stdarch/crates/core_arch/src/arm_shared/registers/v7m.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-/// Base Priority Mask Register
-pub struct BASEPRI;
-
-rsr!(BASEPRI);
-wsr!(BASEPRI);
-
-/// Base Priority Mask Register (conditional write)
-#[allow(non_camel_case_types)]
-pub struct BASEPRI_MAX;
-
-wsr!(BASEPRI_MAX);
-
-/// Fault Mask Register
-pub struct FAULTMASK;
-
-rsr!(FAULTMASK);
-wsr!(FAULTMASK);
diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs
index 023947b83..27dad8e24 100644
--- a/library/stdarch/crates/core_arch/src/lib.rs
+++ b/library/stdarch/crates/core_arch/src/lib.rs
@@ -2,6 +2,7 @@
#![allow(improper_ctypes_definitions)]
#![allow(dead_code)]
#![allow(unused_features)]
+#![allow(internal_features)]
#![deny(rust_2018_idioms)]
#![feature(
custom_inner_attributes,
@@ -12,6 +13,7 @@
proc_macro_hygiene,
stmt_expr_attributes,
core_intrinsics,
+ intrinsics,
no_core,
rustc_attrs,
stdsimd,
diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs
index 12a5b086c..ad3ec863d 100644
--- a/library/stdarch/crates/core_arch/src/mod.rs
+++ b/library/stdarch/crates/core_arch/src/mod.rs
@@ -64,8 +64,9 @@ pub mod arch {
/// See the [module documentation](../index.html) for more details.
#[cfg(any(target_arch = "riscv32", doc))]
#[doc(cfg(any(target_arch = "riscv32")))]
- #[unstable(feature = "stdsimd", issue = "27731")]
+ #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")]
pub mod riscv32 {
+ pub use crate::core_arch::riscv32::*;
pub use crate::core_arch::riscv_shared::*;
}
@@ -74,7 +75,7 @@ pub mod arch {
/// See the [module documentation](../index.html) for more details.
#[cfg(any(target_arch = "riscv64", doc))]
#[doc(cfg(any(target_arch = "riscv64")))]
- #[unstable(feature = "stdsimd", issue = "27731")]
+ #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")]
pub mod riscv64 {
pub use crate::core_arch::riscv64::*;
// RISC-V RV64 supports all RV32 instructions as well in current specifications (2022-01-05).
@@ -279,6 +280,10 @@ mod aarch64;
#[doc(cfg(any(target_arch = "arm")))]
mod arm;
+#[cfg(any(target_arch = "riscv32", doc))]
+#[doc(cfg(any(target_arch = "riscv32")))]
+mod riscv32;
+
#[cfg(any(target_arch = "riscv64", doc))]
#[doc(cfg(any(target_arch = "riscv64")))]
mod riscv64;
diff --git a/library/stdarch/crates/core_arch/src/powerpc/altivec.rs b/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
index ae10377ce..e94afa77d 100644
--- a/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
+++ b/library/stdarch/crates/core_arch/src/powerpc/altivec.rs
@@ -15,6 +15,7 @@
use crate::{
core_arch::{simd::*, simd_llvm::*},
+ mem,
mem::transmute,
};
@@ -318,6 +319,12 @@ extern "C" {
fn vupkhsh(a: vector_signed_short) -> vector_signed_int;
#[link_name = "llvm.ppc.altivec.vupklsh"]
fn vupklsh(a: vector_signed_short) -> vector_signed_int;
+
+ #[link_name = "llvm.ppc.altivec.mfvscr"]
+ fn mfvscr() -> vector_unsigned_short;
+
+ #[link_name = "llvm.ppc.altivec.vlogefp"]
+ fn vlogefp(a: vector_float) -> vector_float;
}
macro_rules! s_t_l {
@@ -528,6 +535,60 @@ mod sealed {
impl_vec_lde! { vec_lde_f32 lvewx f32 }
+ pub trait VectorXl {
+ type Result;
+ unsafe fn vec_xl(self, a: isize) -> Self::Result;
+ }
+
+ macro_rules! impl_vec_xl {
+ ($fun:ident $notpwr9:ident / $pwr9:ident $ty:ident) => {
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ #[cfg_attr(
+ all(test, not(target_feature = "power9-altivec")),
+ assert_instr($notpwr9)
+ )]
+ #[cfg_attr(all(test, target_feature = "power9-altivec"), assert_instr($pwr9))]
+ pub unsafe fn $fun(a: isize, b: *const $ty) -> t_t_l!($ty) {
+ let addr = (b as *const u8).offset(a);
+
+ // Workaround ptr::copy_nonoverlapping not being inlined
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[rustc_nounwind]
+ pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ let mut r = mem::MaybeUninit::uninit();
+
+ copy_nonoverlapping(
+ addr,
+ r.as_mut_ptr() as *mut u8,
+ mem::size_of::<t_t_l!($ty)>(),
+ );
+
+ r.assume_init()
+ }
+
+ impl VectorXl for *const $ty {
+ type Result = t_t_l!($ty);
+ #[inline]
+ #[target_feature(enable = "altivec")]
+ unsafe fn vec_xl(self, a: isize) -> Self::Result {
+ $fun(a, self)
+ }
+ }
+ };
+ }
+
+ impl_vec_xl! { vec_xl_i8 lxvd2x / lxv i8 }
+ impl_vec_xl! { vec_xl_u8 lxvd2x / lxv u8 }
+ impl_vec_xl! { vec_xl_i16 lxvd2x / lxv i16 }
+ impl_vec_xl! { vec_xl_u16 lxvd2x / lxv u16 }
+ impl_vec_xl! { vec_xl_i32 lxvd2x / lxv i32 }
+ impl_vec_xl! { vec_xl_u32 lxvd2x / lxv u32 }
+ impl_vec_xl! { vec_xl_f32 lxvd2x / lxv f32 }
+
test_impl! { vec_floor(a: vector_float) -> vector_float [ vfloor, vrfim / xvrspim ] }
test_impl! { vec_vexptefp(a: vector_float) -> vector_float [ vexptefp, vexptefp ] }
@@ -2501,6 +2562,24 @@ where
p.vec_lde(off)
}
+/// VSX Unaligned Load
+#[inline]
+#[target_feature(enable = "altivec")]
+pub unsafe fn vec_xl<T>(off: isize, p: T) -> <T as sealed::VectorXl>::Result
+where
+ T: sealed::VectorXl,
+{
+ p.vec_xl(off)
+}
+
+/// Vector Base-2 Logarithm Estimate
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr(vlogefp))]
+pub unsafe fn vec_loge(a: vector_float) -> vector_float {
+ vlogefp(a)
+}
+
/// Vector floor.
#[inline]
#[target_feature(enable = "altivec")]
@@ -2566,7 +2645,7 @@ pub unsafe fn vec_cmpb(a: vector_float, b: vector_float) -> vector_signed_int {
sealed::vec_vcmpbfp(a, b)
}
-/// Vector cmpb.
+/// Vector ceil.
#[inline]
#[target_feature(enable = "altivec")]
pub unsafe fn vec_ceil(a: vector_float) -> vector_float {
@@ -2737,6 +2816,14 @@ where
a.vec_max(b)
}
+/// Move From Vector Status and Control Register.
+#[inline]
+#[target_feature(enable = "altivec")]
+#[cfg_attr(test, assert_instr(mfvscr))]
+pub unsafe fn vec_mfvscr() -> vector_unsigned_short {
+ mfvscr()
+}
+
/// Vector add.
#[inline]
#[target_feature(enable = "altivec")]
@@ -3281,6 +3368,24 @@ mod tests {
}
#[simd_test(enable = "altivec")]
+ unsafe fn test_vec_xl() {
+ let pat = [
+ u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
+ u8x16::new(
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ ),
+ ];
+
+ for off in 0..16 {
+ let val: u8x16 = transmute(vec_xl(0, (pat.as_ptr() as *const u8).offset(off)));
+ for i in 0..16 {
+ let v = val.extract(i);
+ assert_eq!(off as usize + i, v as usize);
+ }
+ }
+ }
+
+ #[simd_test(enable = "altivec")]
unsafe fn test_vec_ldl() {
let pat = [
u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
diff --git a/library/stdarch/crates/core_arch/src/riscv32/mod.rs b/library/stdarch/crates/core_arch/src/riscv32/mod.rs
new file mode 100644
index 000000000..0a8634c85
--- /dev/null
+++ b/library/stdarch/crates/core_arch/src/riscv32/mod.rs
@@ -0,0 +1,5 @@
+//! RISC-V RV32 specific intrinsics
+
+mod zk;
+
+pub use zk::*;
diff --git a/library/stdarch/crates/core_arch/src/riscv32/zk.rs b/library/stdarch/crates/core_arch/src/riscv32/zk.rs
new file mode 100644
index 000000000..376757772
--- /dev/null
+++ b/library/stdarch/crates/core_arch/src/riscv32/zk.rs
@@ -0,0 +1,367 @@
+#[cfg(test)]
+use stdarch_test::assert_instr;
+
+extern "unadjusted" {
+ #[link_name = "llvm.riscv.aes32esi"]
+ fn _aes32esi(rs1: i32, rs2: i32, bs: i32) -> i32;
+
+ #[link_name = "llvm.riscv.aes32esmi"]
+ fn _aes32esmi(rs1: i32, rs2: i32, bs: i32) -> i32;
+
+ #[link_name = "llvm.riscv.aes32dsi"]
+ fn _aes32dsi(rs1: i32, rs2: i32, bs: i32) -> i32;
+
+ #[link_name = "llvm.riscv.aes32dsmi"]
+ fn _aes32dsmi(rs1: i32, rs2: i32, bs: i32) -> i32;
+
+ #[link_name = "llvm.riscv.zip.i32"]
+ fn _zip(rs1: i32) -> i32;
+
+ #[link_name = "llvm.riscv.unzip.i32"]
+ fn _unzip(rs1: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha512sig0h"]
+ fn _sha512sig0h(rs1: i32, rs2: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha512sig0l"]
+ fn _sha512sig0l(rs1: i32, rs2: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha512sig1h"]
+ fn _sha512sig1h(rs1: i32, rs2: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha512sig1l"]
+ fn _sha512sig1l(rs1: i32, rs2: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha512sum0r"]
+ fn _sha512sum0r(rs1: i32, rs2: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha512sum1r"]
+ fn _sha512sum1r(rs1: i32, rs2: i32) -> i32;
+}
+
+/// AES final round encryption instruction for RV32.
+///
+/// This instruction sources a single byte from rs2 according to bs. To this it applies the
+/// forward AES SBox operation, before XOR’ing the result with rs1. This instruction must
+/// always be implemented such that its execution latency does not depend on the data being
+/// operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.3
+///
+/// # Note
+///
+/// The `BS` parameter is expected to be a constant value and only the bottom 2 bits of `bs` are
+/// used.
+///
+/// # Safety
+///
+/// This function is safe to use if the `zkne` target feature is present.
+#[target_feature(enable = "zkne")]
+#[rustc_legacy_const_generics(2)]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes32esi, BS = 0))]
+#[inline]
+pub unsafe fn aes32esi<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
+ static_assert!(BS < 4);
+
+ _aes32esi(rs1 as i32, rs2 as i32, BS as i32) as u32
+}
+
+/// AES middle round encryption instruction for RV32 with.
+///
+/// This instruction sources a single byte from rs2 according to bs. To this it applies the
+/// forward AES SBox operation, and a partial forward MixColumn, before XOR’ing the result with
+/// rs1. This instruction must always be implemented such that its execution latency does not
+/// depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.4
+///
+/// # Note
+///
+/// The `bs` parameter is expected to be a constant value and only the bottom 2 bits of `bs` are
+/// used.
+///
+/// # Safety
+///
+/// This function is safe to use if the `zkne` target feature is present.
+#[target_feature(enable = "zkne")]
+#[rustc_legacy_const_generics(2)]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes32esmi, BS = 0))]
+#[inline]
+pub unsafe fn aes32esmi<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
+ static_assert!(BS < 4);
+
+ _aes32esmi(rs1 as i32, rs2 as i32, BS as i32) as u32
+}
+
+/// AES final round decryption instruction for RV32.
+///
+/// This instruction sources a single byte from rs2 according to bs. To this it applies the
+/// inverse AES SBox operation, and XOR’s the result with rs1. This instruction must always be
+/// implemented such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.1
+///
+/// # Note
+///
+/// The `BS` parameter is expected to be a constant value and only the bottom 2 bits of `bs` are
+/// used.
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknd` target feature is present.
+#[target_feature(enable = "zknd")]
+#[rustc_legacy_const_generics(2)]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes32dsi, BS = 0))]
+#[inline]
+pub unsafe fn aes32dsi<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
+ static_assert!(BS < 4);
+
+ _aes32dsi(rs1 as i32, rs2 as i32, BS as i32) as u32
+}
+
+/// AES middle round decryption instruction for RV32.
+///
+/// This instruction sources a single byte from rs2 according to bs. To this it applies the
+/// inverse AES SBox operation, and a partial inverse MixColumn, before XOR’ing the result with
+/// rs1. This instruction must always be implemented such that its execution latency does not
+/// depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.2
+///
+/// # Note
+///
+/// The `BS` parameter is expected to be a constant value and only the bottom 2 bits of `bs` are
+/// used.
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknd` target feature is present.
+#[target_feature(enable = "zknd")]
+#[rustc_legacy_const_generics(2)]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes32dsmi, BS = 0))]
+#[inline]
+pub unsafe fn aes32dsmi<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
+ static_assert!(BS < 4);
+
+ _aes32dsmi(rs1 as i32, rs2 as i32, BS as i32) as u32
+}
+
+/// Place upper/lower halves of the source register into odd/even bits of the destination
+/// respectivley.
+///
+/// This instruction places bits in the low half of the source register into the even bit
+/// positions of the destination, and bits in the high half of the source register into the odd
+/// bit positions of the destination. It is the inverse of the unzip instruction. This
+/// instruction is available only on RV32.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.49
+///
+/// # Safety
+///
+/// This function is safe to use if the `zbkb` target feature is present.
+#[target_feature(enable = "zbkb")]
+// See #1464
+// #[cfg_attr(test, assert_instr(zip))]
+#[inline]
+pub unsafe fn zip(rs: u32) -> u32 {
+ _zip(rs as i32) as u32
+}
+
+/// Place odd and even bits of the source word into upper/lower halves of the destination.
+///
+/// This instruction places the even bits of the source register into the low half of the
+/// destination, and the odd bits of the source into the high bits of the destination. It is
+/// the inverse of the zip instruction. This instruction is available only on RV32.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.45
+///
+/// # Safety
+///
+/// This function is safe to use if the `zbkb` target feature is present.
+#[target_feature(enable = "zbkb")]
+#[cfg_attr(test, assert_instr(unzip))]
+#[inline]
+pub unsafe fn unzip(rs: u32) -> u32 {
+ _unzip(rs as i32) as u32
+}
+
+/// Implements the high half of the Sigma0 transformation, as used in the SHA2-512 hash
+/// function \[49\] (Section 4.1.3).
+///
+/// This instruction is implemented on RV32 only. Used to compute the Sigma0 transform of the
+/// SHA2-512 hash function in conjunction with the sha512sig0l instruction. The transform is a
+/// 64-bit to 64-bit function, so the input and output are each represented by two 32-bit
+/// registers. This instruction must always be implemented such that its execution latency does
+/// not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.31
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sig0h))]
+#[inline]
+pub unsafe fn sha512sig0h(rs1: u32, rs2: u32) -> u32 {
+ _sha512sig0h(rs1 as i32, rs2 as i32) as u32
+}
+
+/// Implements the low half of the Sigma0 transformation, as used in the SHA2-512 hash function
+/// \[49\] (Section 4.1.3).
+///
+/// This instruction is implemented on RV32 only. Used to compute the Sigma0 transform of the
+/// SHA2-512 hash function in conjunction with the sha512sig0h instruction. The transform is a
+/// 64-bit to 64-bit function, so the input and output are each represented by two 32-bit
+/// registers. This instruction must always be implemented such that its execution latency does
+/// not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.32
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sig0l))]
+#[inline]
+pub unsafe fn sha512sig0l(rs1: u32, rs2: u32) -> u32 {
+ _sha512sig0l(rs1 as i32, rs2 as i32) as u32
+}
+
+/// Implements the high half of the Sigma1 transformation, as used in the SHA2-512 hash
+/// function \[49\] (Section 4.1.3).
+///
+/// This instruction is implemented on RV32 only. Used to compute the Sigma1 transform of the
+/// SHA2-512 hash function in conjunction with the sha512sig1l instruction. The transform is a
+/// 64-bit to 64-bit function, so the input and output are each represented by two 32-bit
+/// registers. This instruction must always be implemented such that its execution latency does
+/// not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.33
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sig1h))]
+#[inline]
+pub unsafe fn sha512sig1h(rs1: u32, rs2: u32) -> u32 {
+ _sha512sig1h(rs1 as i32, rs2 as i32) as u32
+}
+
+/// Implements the low half of the Sigma1 transformation, as used in the SHA2-512 hash function
+/// \[49\] (Section 4.1.3).
+///
+/// This instruction is implemented on RV32 only. Used to compute the Sigma1 transform of the
+/// SHA2-512 hash function in conjunction with the sha512sig1h instruction. The transform is a
+/// 64-bit to 64-bit function, so the input and output are each represented by two 32-bit
+/// registers. This instruction must always be implemented such that its execution latency does
+/// not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.34
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+#[cfg_attr(test, assert_instr(sha512sig1l))]
+#[inline]
+pub unsafe fn sha512sig1l(rs1: u32, rs2: u32) -> u32 {
+ _sha512sig1l(rs1 as i32, rs2 as i32) as u32
+}
+
+/// Implements the Sum0 transformation, as used in the SHA2-512 hash function \[49\] (Section
+/// 4.1.3).
+///
+/// This instruction is implemented on RV32 only. Used to compute the Sum0 transform of the
+/// SHA2-512 hash function. The transform is a 64-bit to 64-bit function, so the input and
+/// output is represented by two 32-bit registers. This instruction must always be implemented
+/// such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.35
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sum0r))]
+#[inline]
+pub unsafe fn sha512sum0r(rs1: u32, rs2: u32) -> u32 {
+ _sha512sum0r(rs1 as i32, rs2 as i32) as u32
+}
+
+/// Implements the Sum1 transformation, as used in the SHA2-512 hash function \[49\] (Section
+/// 4.1.3).
+///
+/// This instruction is implemented on RV32 only. Used to compute the Sum1 transform of the
+/// SHA2-512 hash function. The transform is a 64-bit to 64-bit function, so the input and
+/// output is represented by two 32-bit registers. This instruction must always be implemented
+/// such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.36
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sum1r))]
+#[inline]
+pub unsafe fn sha512sum1r(rs1: u32, rs2: u32) -> u32 {
+ _sha512sum1r(rs1 as i32, rs2 as i32) as u32
+}
diff --git a/library/stdarch/crates/core_arch/src/riscv64/mod.rs b/library/stdarch/crates/core_arch/src/riscv64/mod.rs
index 751b9a860..ad16d6c23 100644
--- a/library/stdarch/crates/core_arch/src/riscv64/mod.rs
+++ b/library/stdarch/crates/core_arch/src/riscv64/mod.rs
@@ -1,6 +1,10 @@
//! RISC-V RV64 specific intrinsics
use crate::arch::asm;
+mod zk;
+
+pub use zk::*;
+
/// Loads virtual machine memory by unsigned word integer
///
/// This instruction performs an explicit memory access as though `V=1`;
diff --git a/library/stdarch/crates/core_arch/src/riscv64/zk.rs b/library/stdarch/crates/core_arch/src/riscv64/zk.rs
new file mode 100644
index 000000000..3dbe3705d
--- /dev/null
+++ b/library/stdarch/crates/core_arch/src/riscv64/zk.rs
@@ -0,0 +1,281 @@
+#[cfg(test)]
+use stdarch_test::assert_instr;
+
+extern "unadjusted" {
+ #[link_name = "llvm.riscv.aes64es"]
+ fn _aes64es(rs1: i64, rs2: i64) -> i64;
+
+ #[link_name = "llvm.riscv.aes64esm"]
+ fn _aes64esm(rs1: i64, rs2: i64) -> i64;
+
+ #[link_name = "llvm.riscv.aes64ds"]
+ fn _aes64ds(rs1: i64, rs2: i64) -> i64;
+
+ #[link_name = "llvm.riscv.aes64dsm"]
+ fn _aes64dsm(rs1: i64, rs2: i64) -> i64;
+
+ #[link_name = "llvm.riscv.aes64ks1i"]
+ fn _aes64ks1i(rs1: i64, rnum: i32) -> i64;
+
+ #[link_name = "llvm.riscv.aes64ks2"]
+ fn _aes64ks2(rs1: i64, rs2: i64) -> i64;
+
+ #[link_name = "llvm.riscv.sha512sig0"]
+ fn _sha512sig0(rs1: i64) -> i64;
+
+ #[link_name = "llvm.riscv.sha512sig1"]
+ fn _sha512sig1(rs1: i64) -> i64;
+
+ #[link_name = "llvm.riscv.sha512sum0"]
+ fn _sha512sum0(rs1: i64) -> i64;
+
+ #[link_name = "llvm.riscv.sha512sum1"]
+ fn _sha512sum1(rs1: i64) -> i64;
+}
+
+/// AES final round encryption instruction for RV64.
+///
+/// Uses the two 64-bit source registers to represent the entire AES state, and produces half
+/// of the next round output, applying the ShiftRows and SubBytes steps. This instruction must
+/// always be implemented such that its execution latency does not depend on the data being
+/// operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.7
+///
+/// # Safety
+///
+/// This function is safe to use if the `zkne` target feature is present.
+#[target_feature(enable = "zkne")]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes64es))]
+#[inline]
+pub unsafe fn aes64es(rs1: u64, rs2: u64) -> u64 {
+ _aes64es(rs1 as i64, rs2 as i64) as u64
+}
+
+/// AES middle round encryption instruction for RV64.
+///
+/// Uses the two 64-bit source registers to represent the entire AES state, and produces half
+/// of the next round output, applying the ShiftRows, SubBytes and MixColumns steps. This
+/// instruction must always be implemented such that its execution latency does not depend on
+/// the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.8
+///
+/// # Safety
+///
+/// This function is safe to use if the `zkne` target feature is present.
+#[target_feature(enable = "zkne")]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes64esm))]
+#[inline]
+pub unsafe fn aes64esm(rs1: u64, rs2: u64) -> u64 {
+ _aes64esm(rs1 as i64, rs2 as i64) as u64
+}
+
+/// AES final round decryption instruction for RV64.
+///
+/// Uses the two 64-bit source registers to represent the entire AES state, and produces half
+/// of the next round output, applying the Inverse ShiftRows and SubBytes steps. This
+/// instruction must always be implemented such that its execution latency does not depend on
+/// the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.5
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknd` target feature is present.
+#[target_feature(enable = "zknd")]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes64ds))]
+#[inline]
+pub unsafe fn aes64ds(rs1: u64, rs2: u64) -> u64 {
+ _aes64ds(rs1 as i64, rs2 as i64) as u64
+}
+
+/// AES middle round decryption instruction for RV64.
+///
+/// Uses the two 64-bit source registers to represent the entire AES state, and produces half
+/// of the next round output, applying the Inverse ShiftRows, SubBytes and MixColumns steps.
+/// This instruction must always be implemented such that its execution latency does not depend
+/// on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.6
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknd` target feature is present.
+#[target_feature(enable = "zknd")]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes64dsm))]
+#[inline]
+pub unsafe fn aes64dsm(rs1: u64, rs2: u64) -> u64 {
+ _aes64dsm(rs1 as i64, rs2 as i64) as u64
+}
+
+/// This instruction implements part of the KeySchedule operation for the AES Block cipher
+/// involving the SBox operation.
+///
+/// This instruction implements the rotation, SubBytes and Round Constant addition steps of the
+/// AES block cipher Key Schedule. This instruction must always be implemented such that its
+/// execution latency does not depend on the data being operated on. Note that rnum must be in
+/// the range 0x0..0xA. The values 0xB..0xF are reserved.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.10
+///
+/// # Note
+///
+/// The `RNUM` parameter is expected to be a constant value inside the range of `0..=10`.
+///
+/// # Safety
+///
+/// This function is safe to use if the `zkne` or `zknd` target feature is present.
+#[target_feature(enable = "zkne", enable = "zknd")]
+#[rustc_legacy_const_generics(1)]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes64ks1i, RNUM = 0))]
+#[inline]
+pub unsafe fn aes64ks1i<const RNUM: u8>(rs1: u64) -> u64 {
+ static_assert!(RNUM <= 10);
+
+ _aes64ks1i(rs1 as i64, RNUM as i32) as u64
+}
+
+/// This instruction implements part of the KeySchedule operation for the AES Block cipher.
+///
+/// This instruction implements the additional XOR’ing of key words as part of the AES block
+/// cipher Key Schedule. This instruction must always be implemented such that its execution
+/// latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.11
+///
+/// # Safety
+///
+/// This function is safe to use if the `zkne` or `zknd` target feature is present.
+#[target_feature(enable = "zkne", enable = "zknd")]
+// See #1464
+// #[cfg_attr(test, assert_instr(aes64ks2))]
+#[inline]
+pub unsafe fn aes64ks2(rs1: u64, rs2: u64) -> u64 {
+ _aes64ks2(rs1 as i64, rs2 as i64) as u64
+}
+
+/// Implements the Sigma0 transformation function as used in the SHA2-512 hash function \[49\]
+/// (Section 4.1.3).
+///
+/// This instruction is supported for the RV64 base architecture. It implements the Sigma0
+/// transform of the SHA2-512 hash function. \[49\]. This instruction must always be
+/// implemented such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.37
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sig0))]
+#[inline]
+pub unsafe fn sha512sig0(rs1: u64) -> u64 {
+ _sha512sig0(rs1 as i64) as u64
+}
+
+/// Implements the Sigma1 transformation function as used in the SHA2-512 hash function \[49\]
+/// (Section 4.1.3).
+///
+/// This instruction is supported for the RV64 base architecture. It implements the Sigma1
+/// transform of the SHA2-512 hash function. \[49\]. This instruction must always be
+/// implemented such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.38
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sig1))]
+#[inline]
+pub unsafe fn sha512sig1(rs1: u64) -> u64 {
+ _sha512sig1(rs1 as i64) as u64
+}
+
+/// Implements the Sum0 transformation function as used in the SHA2-512 hash function \[49\]
+/// (Section 4.1.3).
+///
+/// This instruction is supported for the RV64 base architecture. It implements the Sum0
+/// transform of the SHA2-512 hash function. \[49\]. This instruction must always be
+/// implemented such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.39
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sum0))]
+#[inline]
+pub unsafe fn sha512sum0(rs1: u64) -> u64 {
+ _sha512sum0(rs1 as i64) as u64
+}
+
+/// Implements the Sum1 transformation function as used in the SHA2-512 hash function \[49\]
+/// (Section 4.1.3).
+///
+/// This instruction is supported for the RV64 base architecture. It implements the Sum1
+/// transform of the SHA2-512 hash function. \[49\]. This instruction must always be
+/// implemented such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.40
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha512sum1))]
+#[inline]
+pub unsafe fn sha512sum1(rs1: u64) -> u64 {
+ _sha512sum1(rs1 as i64) as u64
+}
diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
index ed021df5a..14f6989d2 100644
--- a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
+++ b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
@@ -1,7 +1,13 @@
//! Shared RISC-V intrinsics
+
mod p;
+mod zb;
+mod zk;
+#[unstable(feature = "stdsimd", issue = "27731")]
pub use p::*;
+pub use zb::*;
+pub use zk::*;
use crate::arch::asm;
@@ -10,6 +16,7 @@ use crate::arch::asm;
/// The PAUSE instruction is a HINT that indicates the current hart's rate of instruction retirement
/// should be temporarily reduced or paused. The duration of its effect must be bounded and may be zero.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub fn pause() {
unsafe { asm!(".insn i 0x0F, 0, x0, x0, 0x010", options(nomem, nostack)) }
}
@@ -19,6 +26,7 @@ pub fn pause() {
/// The NOP instruction does not change any architecturally visible state, except for
/// advancing the `pc` and incrementing any applicable performance counters.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub fn nop() {
unsafe { asm!("nop", options(nomem, nostack)) }
}
@@ -29,6 +37,7 @@ pub fn nop() {
/// until an interrupt might need servicing. This instruction is a hint,
/// and a legal implementation is to simply implement WFI as a NOP.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn wfi() {
asm!("wfi", options(nomem, nostack))
}
@@ -41,6 +50,7 @@ pub unsafe fn wfi() {
/// FENCE.I does not ensure that other RISC-V harts' instruction fetches will observe the
/// local hart's stores in a multiprocessor system.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn fence_i() {
asm!("fence.i", options(nostack))
}
@@ -54,6 +64,7 @@ pub unsafe fn fence_i() {
/// virtual address in parameter `vaddr` and that match the address space identified by integer
/// parameter `asid`, except for entries containing global mappings.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sfence_vma(vaddr: usize, asid: usize) {
asm!("sfence.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack))
}
@@ -65,6 +76,7 @@ pub unsafe fn sfence_vma(vaddr: usize, asid: usize) {
/// The fence also invalidates all address-translation cache entries that contain leaf page
/// table entries corresponding to the virtual address in parameter `vaddr`, for all address spaces.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sfence_vma_vaddr(vaddr: usize) {
asm!("sfence.vma {}, x0", in(reg) vaddr, options(nostack))
}
@@ -78,6 +90,7 @@ pub unsafe fn sfence_vma_vaddr(vaddr: usize) {
/// address-translation cache entries matching the address space identified by integer
/// parameter `asid`, except for entries containing global mappings.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sfence_vma_asid(asid: usize) {
asm!("sfence.vma x0, {}", in(reg) asid, options(nostack))
}
@@ -88,6 +101,7 @@ pub unsafe fn sfence_vma_asid(asid: usize) {
/// tables, for all address spaces. The fence also invalidates all address-translation cache entries,
/// for all address spaces.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sfence_vma_all() {
asm!("sfence.vma", options(nostack))
}
@@ -97,6 +111,7 @@ pub unsafe fn sfence_vma_all() {
/// This instruction invalidates any address-translation cache entries that an
/// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sinval_vma(vaddr: usize, asid: usize) {
// asm!("sinval.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack))
asm!(".insn r 0x73, 0, 0x0B, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack))
@@ -107,6 +122,7 @@ pub unsafe fn sinval_vma(vaddr: usize, asid: usize) {
/// This instruction invalidates any address-translation cache entries that an
/// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sinval_vma_vaddr(vaddr: usize) {
asm!(".insn r 0x73, 0, 0x0B, x0, {}, x0", in(reg) vaddr, options(nostack))
}
@@ -116,6 +132,7 @@ pub unsafe fn sinval_vma_vaddr(vaddr: usize) {
/// This instruction invalidates any address-translation cache entries that an
/// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sinval_vma_asid(asid: usize) {
asm!(".insn r 0x73, 0, 0x0B, x0, x0, {}", in(reg) asid, options(nostack))
}
@@ -125,6 +142,7 @@ pub unsafe fn sinval_vma_asid(asid: usize) {
/// This instruction invalidates any address-translation cache entries that an
/// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sinval_vma_all() {
asm!(".insn r 0x73, 0, 0x0B, x0, x0, x0", options(nostack))
}
@@ -134,6 +152,7 @@ pub unsafe fn sinval_vma_all() {
/// This instruction guarantees that any previous stores already visible to the current RISC-V hart
/// are ordered before subsequent `SINVAL.VMA` instructions executed by the same hart.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sfence_w_inval() {
// asm!("sfence.w.inval", options(nostack))
asm!(".insn i 0x73, 0, x0, x0, 0x180", options(nostack))
@@ -144,6 +163,7 @@ pub unsafe fn sfence_w_inval() {
/// This instruction guarantees that any previous SINVAL.VMA instructions executed by the current hart
/// are ordered before subsequent implicit references by that hart to the memory-management data structures.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn sfence_inval_ir() {
// asm!("sfence.inval.ir", options(nostack))
asm!(".insn i 0x73, 0, x0, x0, 0x181", options(nostack))
@@ -158,6 +178,7 @@ pub unsafe fn sfence_inval_ir() {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.B`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hlv_b(src: *const i8) -> i8 {
let value: i8;
asm!(".insn i 0x73, 0x4, {}, {}, 0x600", out(reg) value, in(reg) src, options(readonly, nostack));
@@ -173,6 +194,7 @@ pub unsafe fn hlv_b(src: *const i8) -> i8 {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.BU`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hlv_bu(src: *const u8) -> u8 {
let value: u8;
asm!(".insn i 0x73, 0x4, {}, {}, 0x601", out(reg) value, in(reg) src, options(readonly, nostack));
@@ -188,6 +210,7 @@ pub unsafe fn hlv_bu(src: *const u8) -> u8 {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.H`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hlv_h(src: *const i16) -> i16 {
let value: i16;
asm!(".insn i 0x73, 0x4, {}, {}, 0x640", out(reg) value, in(reg) src, options(readonly, nostack));
@@ -203,6 +226,7 @@ pub unsafe fn hlv_h(src: *const i16) -> i16 {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.HU`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hlv_hu(src: *const u16) -> u16 {
let value: u16;
asm!(".insn i 0x73, 0x4, {}, {}, 0x641", out(reg) value, in(reg) src, options(readonly, nostack));
@@ -218,6 +242,7 @@ pub unsafe fn hlv_hu(src: *const u16) -> u16 {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HLVX.HU`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hlvx_hu(src: *const u16) -> u16 {
let insn: u16;
asm!(".insn i 0x73, 0x4, {}, {}, 0x643", out(reg) insn, in(reg) src, options(readonly, nostack));
@@ -233,6 +258,7 @@ pub unsafe fn hlvx_hu(src: *const u16) -> u16 {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.W`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hlv_w(src: *const i32) -> i32 {
let value: i32;
asm!(".insn i 0x73, 0x4, {}, {}, 0x680", out(reg) value, in(reg) src, options(readonly, nostack));
@@ -248,6 +274,7 @@ pub unsafe fn hlv_w(src: *const i32) -> i32 {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HLVX.WU`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hlvx_wu(src: *const u32) -> u32 {
let insn: u32;
asm!(".insn i 0x73, 0x4, {}, {}, 0x683", out(reg) insn, in(reg) src, options(readonly, nostack));
@@ -263,6 +290,7 @@ pub unsafe fn hlvx_wu(src: *const u32) -> u32 {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.B`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hsv_b(dst: *mut i8, src: i8) {
asm!(".insn r 0x73, 0x4, 0x31, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack));
}
@@ -276,6 +304,7 @@ pub unsafe fn hsv_b(dst: *mut i8, src: i8) {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.H`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hsv_h(dst: *mut i16, src: i16) {
asm!(".insn r 0x73, 0x4, 0x33, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack));
}
@@ -289,6 +318,7 @@ pub unsafe fn hsv_h(dst: *mut i16, src: i16) {
/// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.W`
/// instruction which is effectively a dereference to any memory address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hsv_w(dst: *mut i32, src: i32) {
asm!(".insn r 0x73, 0x4, 0x35, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack));
}
@@ -302,6 +332,7 @@ pub unsafe fn hsv_w(dst: *mut i32, src: i32) {
///
/// This fence specifies a single guest virtual address, and a single guest address-space identifier.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hfence_vvma(vaddr: usize, asid: usize) {
// asm!("hfence.vvma {}, {}", in(reg) vaddr, in(reg) asid)
asm!(".insn r 0x73, 0, 0x11, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack))
@@ -316,6 +347,7 @@ pub unsafe fn hfence_vvma(vaddr: usize, asid: usize) {
///
/// This fence specifies a single guest virtual address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hfence_vvma_vaddr(vaddr: usize) {
asm!(".insn r 0x73, 0, 0x11, x0, {}, x0", in(reg) vaddr, options(nostack))
}
@@ -329,6 +361,7 @@ pub unsafe fn hfence_vvma_vaddr(vaddr: usize) {
///
/// This fence specifies a single guest address-space identifier.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hfence_vvma_asid(asid: usize) {
asm!(".insn r 0x73, 0, 0x11, x0, x0, {}", in(reg) asid, options(nostack))
}
@@ -342,6 +375,7 @@ pub unsafe fn hfence_vvma_asid(asid: usize) {
///
/// This fence applies to any guest address spaces and guest virtual addresses.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hfence_vvma_all() {
asm!(".insn r 0x73, 0, 0x11, x0, x0, x0", options(nostack))
}
@@ -354,6 +388,7 @@ pub unsafe fn hfence_vvma_all() {
/// This fence specifies a single guest physical address, **shifted right by 2 bits**, and a single virtual machine
/// by virtual machine identifier (VMID).
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hfence_gvma(gaddr: usize, vmid: usize) {
// asm!("hfence.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack))
asm!(".insn r 0x73, 0, 0x31, x0, {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack))
@@ -366,6 +401,7 @@ pub unsafe fn hfence_gvma(gaddr: usize, vmid: usize) {
///
/// This fence specifies a single guest physical address; **the physical address should be shifted right by 2 bits**.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hfence_gvma_gaddr(gaddr: usize) {
asm!(".insn r 0x73, 0, 0x31, x0, {}, x0", in(reg) gaddr, options(nostack))
}
@@ -377,6 +413,7 @@ pub unsafe fn hfence_gvma_gaddr(gaddr: usize) {
///
/// This fence specifies a single virtual machine by virtual machine identifier (VMID).
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hfence_gvma_vmid(vmid: usize) {
asm!(".insn r 0x73, 0, 0x31, x0, x0, {}", in(reg) vmid, options(nostack))
}
@@ -388,6 +425,7 @@ pub unsafe fn hfence_gvma_vmid(vmid: usize) {
///
/// This fence specifies all guest physical addresses and all virtual machines.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hfence_gvma_all() {
asm!(".insn r 0x73, 0, 0x31, x0, x0, x0", options(nostack))
}
@@ -399,6 +437,7 @@ pub unsafe fn hfence_gvma_all() {
///
/// This fence specifies a single guest virtual address, and a single guest address-space identifier.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hinval_vvma(vaddr: usize, asid: usize) {
// asm!("hinval.vvma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack))
asm!(".insn r 0x73, 0, 0x13, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack))
@@ -411,6 +450,7 @@ pub unsafe fn hinval_vvma(vaddr: usize, asid: usize) {
///
/// This fence specifies a single guest virtual address.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hinval_vvma_vaddr(vaddr: usize) {
asm!(".insn r 0x73, 0, 0x13, x0, {}, x0", in(reg) vaddr, options(nostack))
}
@@ -422,6 +462,7 @@ pub unsafe fn hinval_vvma_vaddr(vaddr: usize) {
///
/// This fence specifies a single guest address-space identifier.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hinval_vvma_asid(asid: usize) {
asm!(".insn r 0x73, 0, 0x13, x0, x0, {}", in(reg) asid, options(nostack))
}
@@ -433,6 +474,7 @@ pub unsafe fn hinval_vvma_asid(asid: usize) {
///
/// This fence applies to any guest address spaces and guest virtual addresses.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hinval_vvma_all() {
asm!(".insn r 0x73, 0, 0x13, x0, x0, x0", options(nostack))
}
@@ -445,6 +487,7 @@ pub unsafe fn hinval_vvma_all() {
/// This fence specifies a single guest physical address, **shifted right by 2 bits**, and a single virtual machine
/// by virtual machine identifier (VMID).
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hinval_gvma(gaddr: usize, vmid: usize) {
// asm!("hinval.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack))
asm!(".insn r 0x73, 0, 0x33, x0, {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack))
@@ -457,6 +500,7 @@ pub unsafe fn hinval_gvma(gaddr: usize, vmid: usize) {
///
/// This fence specifies a single guest physical address; **the physical address should be shifted right by 2 bits**.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hinval_gvma_gaddr(gaddr: usize) {
asm!(".insn r 0x73, 0, 0x33, x0, {}, x0", in(reg) gaddr, options(nostack))
}
@@ -468,6 +512,7 @@ pub unsafe fn hinval_gvma_gaddr(gaddr: usize) {
///
/// This fence specifies a single virtual machine by virtual machine identifier (VMID).
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hinval_gvma_vmid(vmid: usize) {
asm!(".insn r 0x73, 0, 0x33, x0, x0, {}", in(reg) vmid, options(nostack))
}
@@ -479,6 +524,7 @@ pub unsafe fn hinval_gvma_vmid(vmid: usize) {
///
/// This fence specifies all guest physical addresses and all virtual machines.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub unsafe fn hinval_gvma_all() {
asm!(".insn r 0x73, 0, 0x33, x0, x0, x0", options(nostack))
}
@@ -502,6 +548,7 @@ pub unsafe fn hinval_gvma_all() {
/// [`frrm`]: fn.frrm.html
/// [`frflags`]: fn.frflags.html
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub fn frcsr() -> u32 {
let value: u32;
unsafe { asm!("frcsr {}", out(reg) value, options(nomem, nostack)) };
@@ -513,6 +560,7 @@ pub fn frcsr() -> u32 {
/// This function swaps the value in `fcsr` by copying the original value to be returned,
/// and then writing a new value obtained from input variable `value` into `fcsr`.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub fn fscsr(value: u32) -> u32 {
let original: u32;
unsafe { asm!("fscsr {}, {}", out(reg) original, in(reg) value, options(nomem, nostack)) }
@@ -535,6 +583,7 @@ pub fn fscsr(value: u32) -> u32 {
/// | 110 | | _Reserved for future use._ |
/// | 111 | DYN | In Rounding Mode register, _reserved_. |
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub fn frrm() -> u32 {
let value: u32;
unsafe { asm!("frrm {}", out(reg) value, options(nomem, nostack)) };
@@ -547,6 +596,7 @@ pub fn frrm() -> u32 {
/// and then writing a new value obtained from the three least-significant bits of
/// input variable `value` into `frm`.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub fn fsrm(value: u32) -> u32 {
let original: u32;
unsafe { asm!("fsrm {}, {}", out(reg) original, in(reg) value, options(nomem, nostack)) }
@@ -570,6 +620,7 @@ pub fn fsrm(value: u32) -> u32 {
/// | 1 | UF | Underflow |
/// | 0 | NX | Inexact |
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub fn frflags() -> u32 {
let value: u32;
unsafe { asm!("frflags {}", out(reg) value, options(nomem, nostack)) };
@@ -582,179 +633,9 @@ pub fn frflags() -> u32 {
/// and then writing a new value obtained from the five least-significant bits of
/// input variable `value` into `fflags`.
#[inline]
+#[unstable(feature = "stdsimd", issue = "27731")]
pub fn fsflags(value: u32) -> u32 {
let original: u32;
unsafe { asm!("fsflags {}, {}", out(reg) original, in(reg) value, options(nomem, nostack)) }
original
}
-
-/// `P0` transformation function as is used in the SM3 hash algorithm
-///
-/// This function is included in `Zksh` extension. It's defined as:
-///
-/// ```text
-/// P0(X) = X ⊕ (X ≪ 9) ⊕ (X ≪ 17)
-/// ```
-///
-/// where `⊕` represents 32-bit xor, and `≪ k` represents rotate left by `k` bits.
-///
-/// In the SM3 algorithm, the `P0` transformation is used as `E ← P0(TT2)` when the
-/// compression function `CF` uses the intermediate value `TT2` to calculate
-/// the variable `E` in one iteration for subsequent processes.
-///
-/// According to RISC-V Cryptography Extensions, Volume I, the execution latency of
-/// this instruction must always be independent from the data it operates on.
-#[inline]
-#[target_feature(enable = "zksh")]
-pub fn sm3p0(x: u32) -> u32 {
- let ans: u32;
- unsafe { asm!("sm3p0 {}, {}", lateout(reg) ans, in(reg) x, options(pure, nomem, nostack)) };
- ans
-}
-
-/// `P1` transformation function as is used in the SM3 hash algorithm
-///
-/// This function is included in `Zksh` extension. It's defined as:
-///
-/// ```text
-/// P1(X) = X ⊕ (X ≪ 15) ⊕ (X ≪ 23)
-/// ```
-///
-/// where `⊕` represents 32-bit xor, and `≪ k` represents rotate left by `k` bits.
-///
-/// In the SM3 algorithm, the `P1` transformation is used to expand message,
-/// where expanded word `Wj` can be generated from the previous words.
-/// The whole process can be described as the following pseudocode:
-///
-/// ```text
-/// FOR j=16 TO 67
-/// Wj ← P1(Wj−16 ⊕ Wj−9 ⊕ (Wj−3 ≪ 15)) ⊕ (Wj−13 ≪ 7) ⊕ Wj−6
-/// ENDFOR
-/// ```
-///
-/// According to RISC-V Cryptography Extensions, Volume I, the execution latency of
-/// this instruction must always be independent from the data it operates on.
-#[inline]
-#[target_feature(enable = "zksh")]
-pub fn sm3p1(x: u32) -> u32 {
- let ans: u32;
- unsafe { asm!("sm3p1 {}, {}", lateout(reg) ans, in(reg) x, options(pure, nomem, nostack)) };
- ans
-}
-
-/// Accelerates the round function `F` in the SM4 block cipher algorithm
-///
-/// This instruction is included in extension `Zksed`. It's defined as:
-///
-/// ```text
-/// SM4ED(x, a, BS) = x ⊕ T(ai)
-/// ... where
-/// ai = a.bytes[BS]
-/// T(ai) = L(τ(ai))
-/// bi = τ(ai) = SM4-S-Box(ai)
-/// ci = L(bi) = bi ⊕ (bi ≪ 2) ⊕ (bi ≪ 10) ⊕ (bi ≪ 18) ⊕ (bi ≪ 24)
-/// SM4ED = (ci ≪ (BS * 8)) ⊕ x
-/// ```
-///
-/// where `⊕` represents 32-bit xor, and `≪ k` represents rotate left by `k` bits.
-/// As is defined above, `T` is a combined transformation of non linear S-Box transform `τ`
-/// and linear layer transform `L`.
-///
-/// In the SM4 algorithm, the round function `F` is defined as:
-///
-/// ```text
-/// F(x0, x1, x2, x3, rk) = x0 ⊕ T(x1 ⊕ x2 ⊕ x3 ⊕ rk)
-/// ... where
-/// T(A) = L(τ(A))
-/// B = τ(A) = (SM4-S-Box(a0), SM4-S-Box(a1), SM4-S-Box(a2), SM4-S-Box(a3))
-/// C = L(B) = B ⊕ (B ≪ 2) ⊕ (B ≪ 10) ⊕ (B ≪ 18) ⊕ (B ≪ 24)
-/// ```
-///
-/// It can be implemented by `sm4ed` instruction like:
-///
-/// ```no_run
-/// # #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
-/// # fn round_function(x0: u32, x1: u32, x2: u32, x3: u32, rk: u32) -> u32 {
-/// # #[cfg(target_arch = "riscv32")] use core::arch::riscv32::sm4ed;
-/// # #[cfg(target_arch = "riscv64")] use core::arch::riscv64::sm4ed;
-/// let a = x1 ^ x2 ^ x3 ^ rk;
-/// let c0 = sm4ed::<0>(x0, a);
-/// let c1 = sm4ed::<1>(c0, a); // c1 represents c[0..=1], etc.
-/// let c2 = sm4ed::<2>(c1, a);
-/// let c3 = sm4ed::<3>(c2, a);
-/// return c3; // c3 represents c[0..=3]
-/// # }
-/// ```
-///
-/// According to RISC-V Cryptography Extensions, Volume I, the execution latency of
-/// this instruction must always be independent from the data it operates on.
-#[inline]
-#[target_feature(enable = "zksed")]
-pub fn sm4ed<const BS: u8>(x: u32, a: u32) -> u32 {
- static_assert!(BS <= 3);
- let ans: u32;
- unsafe {
- asm!("sm4ed {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) a, const BS, options(pure, nomem, nostack))
- };
- ans
-}
-
-/// Accelerates the key schedule operation in the SM4 block cipher algorithm
-///
-/// This instruction is included in extension `Zksed`. It's defined as:
-///
-/// ```text
-/// SM4KS(x, k, BS) = x ⊕ T'(ki)
-/// ... where
-/// ki = k.bytes[BS]
-/// T'(ki) = L'(τ(ki))
-/// bi = τ(ki) = SM4-S-Box(ki)
-/// ci = L'(bi) = bi ⊕ (bi ≪ 13) ⊕ (bi ≪ 23)
-/// SM4KS = (ci ≪ (BS * 8)) ⊕ x
-/// ```
-///
-/// where `⊕` represents 32-bit xor, and `≪ k` represents rotate left by `k` bits.
-/// As is defined above, `T'` is a combined transformation of non linear S-Box transform `τ`
-/// and the replaced linear layer transform `L'`.
-///
-/// In the SM4 algorithm, the key schedule is defined as:
-///
-/// ```text
-/// rk[i] = K[i+4] = K[i] ⊕ T'(K[i+1] ⊕ K[i+2] ⊕ K[i+3] ⊕ CK[i])
-/// ... where
-/// K[0..=3] = MK[0..=3] ⊕ FK[0..=3]
-/// T'(K) = L'(τ(K))
-/// B = τ(K) = (SM4-S-Box(k0), SM4-S-Box(k1), SM4-S-Box(k2), SM4-S-Box(k3))
-/// C = L'(B) = B ⊕ (B ≪ 13) ⊕ (B ≪ 23)
-/// ```
-///
-/// where `MK` represents the input 128-bit encryption key,
-/// constants `FK` and `CK` are fixed system configuration constant values defined by the SM4 algorithm.
-/// Hence, the key schedule operation can be implemented by `sm4ks` instruction like:
-///
-/// ```no_run
-/// # #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
-/// # fn key_schedule(k0: u32, k1: u32, k2: u32, k3: u32, ck_i: u32) -> u32 {
-/// # #[cfg(target_arch = "riscv32")] use core::arch::riscv32::sm4ks;
-/// # #[cfg(target_arch = "riscv64")] use core::arch::riscv64::sm4ks;
-/// let k = k1 ^ k2 ^ k3 ^ ck_i;
-/// let c0 = sm4ks::<0>(k0, k);
-/// let c1 = sm4ks::<1>(c0, k); // c1 represents c[0..=1], etc.
-/// let c2 = sm4ks::<2>(c1, k);
-/// let c3 = sm4ks::<3>(c2, k);
-/// return c3; // c3 represents c[0..=3]
-/// # }
-/// ```
-///
-/// According to RISC-V Cryptography Extensions, Volume I, the execution latency of
-/// this instruction must always be independent from the data it operates on.
-#[inline]
-#[target_feature(enable = "zksed")]
-pub fn sm4ks<const BS: u8>(x: u32, k: u32) -> u32 {
- static_assert!(BS <= 3);
- let ans: u32;
- unsafe {
- asm!("sm4ks {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) k, const BS, options(pure, nomem, nostack))
- };
- ans
-}
diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs b/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs
new file mode 100644
index 000000000..cfae6caa5
--- /dev/null
+++ b/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs
@@ -0,0 +1,150 @@
+#[cfg(test)]
+use stdarch_test::assert_instr;
+
+#[cfg(target_arch = "riscv32")]
+extern "unadjusted" {
+ #[link_name = "llvm.riscv.orc.b.i32"]
+ fn _orc_b_32(rs: i32) -> i32;
+
+ #[link_name = "llvm.riscv.clmul.i32"]
+ fn _clmul_32(rs1: i32, rs2: i32) -> i32;
+
+ #[link_name = "llvm.riscv.clmulh.i32"]
+ fn _clmulh_32(rs1: i32, rs2: i32) -> i32;
+
+ #[link_name = "llvm.riscv.clmulr.i32"]
+ fn _clmulr_32(rs1: i32, rs2: i32) -> i32;
+}
+
+#[cfg(target_arch = "riscv64")]
+extern "unadjusted" {
+ #[link_name = "llvm.riscv.orc.b.i64"]
+ fn _orc_b_64(rs1: i64) -> i64;
+
+ #[link_name = "llvm.riscv.clmul.i64"]
+ fn _clmul_64(rs1: i64, rs2: i64) -> i64;
+
+ #[link_name = "llvm.riscv.clmulh.i64"]
+ fn _clmulh_64(rs1: i64, rs2: i64) -> i64;
+
+ #[link_name = "llvm.riscv.clmulr.i64"]
+ fn _clmulr_64(rs1: i64, rs2: i64) -> i64;
+}
+
+/// Bitwise OR-Combine, byte granule
+///
+/// Combines the bits within every byte through a reciprocal bitwise logical OR. This sets the bits of each byte in
+/// the result rd to all zeros if no bit within the respective byte of rs is set, or to all ones if any bit within the
+/// respective byte of rs is set.
+///
+/// Source: RISC-V Bit-Manipulation ISA-extensions
+///
+/// Version: v1.0.0
+///
+/// Section: 2.24
+///
+/// # Safety
+///
+/// This function is safe to use if the `zbb` target feature is present.
+#[target_feature(enable = "zbb")]
+// See #1464
+// #[cfg_attr(test, assert_instr(orc.b))]
+#[inline]
+pub unsafe fn orc_b(rs: usize) -> usize {
+ #[cfg(target_arch = "riscv32")]
+ {
+ _orc_b_32(rs as i32) as usize
+ }
+
+ #[cfg(target_arch = "riscv64")]
+ {
+ _orc_b_64(rs as i64) as usize
+ }
+}
+
+/// Carry-less multiply (low-part)
+///
+/// clmul produces the lower half of the 2·XLEN carry-less product.
+///
+/// Source: RISC-V Bit-Manipulation ISA-extensions
+///
+/// Version: v1.0.0
+///
+/// Section: 2.11
+///
+/// # Safety
+///
+/// This function is safe to use if the `zbc` target feature is present.
+#[target_feature(enable = "zbc")]
+// See #1464
+// #[cfg_attr(test, assert_instr(clmul))]
+#[inline]
+pub unsafe fn clmul(rs1: usize, rs2: usize) -> usize {
+ #[cfg(target_arch = "riscv32")]
+ {
+ _clmul_32(rs1 as i32, rs2 as i32) as usize
+ }
+
+ #[cfg(target_arch = "riscv64")]
+ {
+ _clmul_64(rs1 as i64, rs2 as i64) as usize
+ }
+}
+
+/// Carry-less multiply (high-part)
+///
+/// clmulh produces the upper half of the 2·XLEN carry-less product.
+///
+/// Source: RISC-V Bit-Manipulation ISA-extensions
+///
+/// Version: v1.0.0
+///
+/// Section: 2.12
+///
+/// # Safety
+///
+/// This function is safe to use if the `zbc` target feature is present.
+#[target_feature(enable = "zbc")]
+// See #1464
+// #[cfg_attr(test, assert_instr(clmulh))]
+#[inline]
+pub unsafe fn clmulh(rs1: usize, rs2: usize) -> usize {
+ #[cfg(target_arch = "riscv32")]
+ {
+ _clmulh_32(rs1 as i32, rs2 as i32) as usize
+ }
+
+ #[cfg(target_arch = "riscv64")]
+ {
+ _clmulh_64(rs1 as i64, rs2 as i64) as usize
+ }
+}
+
+/// Carry-less multiply (reversed)
+///
+/// clmulr produces bits 2·XLEN−2:XLEN-1 of the 2·XLEN carry-less product.
+///
+/// Source: RISC-V Bit-Manipulation ISA-extensions
+///
+/// Version: v1.0.0
+///
+/// Section: 2.13
+///
+/// # Safety
+///
+/// This function is safe to use if the `zbc` target feature is present.
+#[target_feature(enable = "zbc")]
+// See #1464
+// #[cfg_attr(test, assert_instr(clmulr))]
+#[inline]
+pub unsafe fn clmulr(rs1: usize, rs2: usize) -> usize {
+ #[cfg(target_arch = "riscv32")]
+ {
+ _clmulr_32(rs1 as i32, rs2 as i32) as usize
+ }
+
+ #[cfg(target_arch = "riscv64")]
+ {
+ _clmulr_64(rs1 as i64, rs2 as i64) as usize
+ }
+}
diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/zk.rs b/library/stdarch/crates/core_arch/src/riscv_shared/zk.rs
new file mode 100644
index 000000000..db97f72bc
--- /dev/null
+++ b/library/stdarch/crates/core_arch/src/riscv_shared/zk.rs
@@ -0,0 +1,462 @@
+#[cfg(test)]
+use stdarch_test::assert_instr;
+
+extern "unadjusted" {
+ #[link_name = "llvm.riscv.sm4ed"]
+ fn _sm4ed(rs1: i32, rs2: i32, bs: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sm4ks"]
+ fn _sm4ks(rs1: i32, rs2: i32, bs: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sm3p0"]
+ fn _sm3p0(rs1: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sm3p1"]
+ fn _sm3p1(rs1: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha256sig0"]
+ fn _sha256sig0(rs1: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha256sig1"]
+ fn _sha256sig1(rs1: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha256sum0"]
+ fn _sha256sum0(rs1: i32) -> i32;
+
+ #[link_name = "llvm.riscv.sha256sum1"]
+ fn _sha256sum1(rs1: i32) -> i32;
+}
+
+#[cfg(target_arch = "riscv32")]
+extern "unadjusted" {
+ #[link_name = "llvm.riscv.xperm8.i32"]
+ fn _xperm8_32(rs1: i32, rs2: i32) -> i32;
+
+ #[link_name = "llvm.riscv.xperm4.i32"]
+ fn _xperm4_32(rs1: i32, rs2: i32) -> i32;
+}
+
+#[cfg(target_arch = "riscv64")]
+extern "unadjusted" {
+ #[link_name = "llvm.riscv.xperm8.i64"]
+ fn _xperm8_64(rs1: i64, rs2: i64) -> i64;
+
+ #[link_name = "llvm.riscv.xperm4.i64"]
+ fn _xperm4_64(rs1: i64, rs2: i64) -> i64;
+}
+
+/// Byte-wise lookup of indicies into a vector in registers.
+///
+/// The xperm8 instruction operates on bytes. The rs1 register contains a vector of XLEN/8
+/// 8-bit elements. The rs2 register contains a vector of XLEN/8 8-bit indexes. The result is
+/// each element in rs2 replaced by the indexed element in rs1, or zero if the index into rs2
+/// is out of bounds.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.47
+///
+/// # Safety
+///
+/// This function is safe to use if the `zbkx` target feature is present.
+#[target_feature(enable = "zbkx")]
+// See #1464
+// #[cfg_attr(test, assert_instr(xperm8))]
+#[inline]
+pub unsafe fn xperm8(rs1: usize, rs2: usize) -> usize {
+ #[cfg(target_arch = "riscv32")]
+ {
+ _xperm8_32(rs1 as i32, rs2 as i32) as usize
+ }
+
+ #[cfg(target_arch = "riscv64")]
+ {
+ _xperm8_64(rs1 as i64, rs2 as i64) as usize
+ }
+}
+
+/// Nibble-wise lookup of indicies into a vector.
+///
+/// The xperm4 instruction operates on nibbles. The rs1 register contains a vector of XLEN/4
+/// 4-bit elements. The rs2 register contains a vector of XLEN/4 4-bit indexes. The result is
+/// each element in rs2 replaced by the indexed element in rs1, or zero if the index into rs2
+/// is out of bounds.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.48
+///
+/// # Safety
+///
+/// This function is safe to use if the `zbkx` target feature is present.
+#[target_feature(enable = "zbkx")]
+// See #1464
+// #[cfg_attr(test, assert_instr(xperm4))]
+#[inline]
+pub unsafe fn xperm4(rs1: usize, rs2: usize) -> usize {
+ #[cfg(target_arch = "riscv32")]
+ {
+ _xperm4_32(rs1 as i32, rs2 as i32) as usize
+ }
+
+ #[cfg(target_arch = "riscv64")]
+ {
+ _xperm4_64(rs1 as i64, rs2 as i64) as usize
+ }
+}
+
+/// Implements the Sigma0 transformation function as used in the SHA2-256 hash function \[49\]
+/// (Section 4.1.2).
+///
+/// This instruction is supported for both RV32 and RV64 base architectures. For RV32, the
+/// entire XLEN source register is operated on. For RV64, the low 32 bits of the source
+/// register are operated on, and the result sign extended to XLEN bits. Though named for
+/// SHA2-256, the instruction works for both the SHA2-224 and SHA2-256 parameterisations as
+/// described in \[49\]. This instruction must always be implemented such that its execution
+/// latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.27
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha256sig0))]
+#[inline]
+pub unsafe fn sha256sig0(rs1: u32) -> u32 {
+ _sha256sig0(rs1 as i32) as u32
+}
+
+/// Implements the Sigma1 transformation function as used in the SHA2-256 hash function \[49\]
+/// (Section 4.1.2).
+///
+/// This instruction is supported for both RV32 and RV64 base architectures. For RV32, the
+/// entire XLEN source register is operated on. For RV64, the low 32 bits of the source
+/// register are operated on, and the result sign extended to XLEN bits. Though named for
+/// SHA2-256, the instruction works for both the SHA2-224 and SHA2-256 parameterisations as
+/// described in \[49\]. This instruction must always be implemented such that its execution
+/// latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.28
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha256sig1))]
+#[inline]
+pub unsafe fn sha256sig1(rs1: u32) -> u32 {
+ _sha256sig1(rs1 as i32) as u32
+}
+
+/// Implements the Sum0 transformation function as used in the SHA2-256 hash function \[49\]
+/// (Section 4.1.2).
+///
+/// This instruction is supported for both RV32 and RV64 base architectures. For RV32, the
+/// entire XLEN source register is operated on. For RV64, the low 32 bits of the source
+/// register are operated on, and the result sign extended to XLEN bits. Though named for
+/// SHA2-256, the instruction works for both the SHA2-224 and SHA2-256 parameterisations as
+/// described in \[49\]. This instruction must always be implemented such that its execution
+/// latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.29
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha256sum0))]
+#[inline]
+pub unsafe fn sha256sum0(rs1: u32) -> u32 {
+ _sha256sum0(rs1 as i32) as u32
+}
+
+/// Implements the Sum1 transformation function as used in the SHA2-256 hash function \[49\]
+/// (Section 4.1.2).
+///
+/// This instruction is supported for both RV32 and RV64 base architectures. For RV32, the
+/// entire XLEN source register is operated on. For RV64, the low 32 bits of the source
+/// register are operated on, and the result sign extended to XLEN bits. Though named for
+/// SHA2-256, the instruction works for both the SHA2-224 and SHA2-256 parameterisations as
+/// described in \[49\]. This instruction must always be implemented such that its execution
+/// latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.30
+///
+/// # Safety
+///
+/// This function is safe to use if the `zknh` target feature is present.
+#[target_feature(enable = "zknh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sha256sum1))]
+#[inline]
+pub unsafe fn sha256sum1(rs1: u32) -> u32 {
+ _sha256sum1(rs1 as i32) as u32
+}
+
+/// Accelerates the block encrypt/decrypt operation of the SM4 block cipher \[5, 31\].
+///
+/// Implements a T-tables in hardware style approach to accelerating the SM4 round function. A
+/// byte is extracted from rs2 based on bs, to which the SBox and linear layer transforms are
+/// applied, before the result is XOR’d with rs1 and written back to rd. This instruction
+/// exists on RV32 and RV64 base architectures. On RV64, the 32-bit result is sign extended to
+/// XLEN bits. This instruction must always be implemented such that its execution latency does
+/// not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.43
+///
+/// # Note
+///
+/// The `BS` parameter is expected to be a constant value and only the bottom 2 bits of `bs` are
+/// used.
+///
+/// # Safety
+///
+/// This function is safe to use if the `zksed` target feature is present.
+///
+/// # Details
+///
+/// Accelerates the round function `F` in the SM4 block cipher algorithm
+///
+/// This instruction is included in extension `Zksed`. It's defined as:
+///
+/// ```text
+/// SM4ED(x, a, BS) = x ⊕ T(ai)
+/// ... where
+/// ai = a.bytes[BS]
+/// T(ai) = L(τ(ai))
+/// bi = τ(ai) = SM4-S-Box(ai)
+/// ci = L(bi) = bi ⊕ (bi ≪ 2) ⊕ (bi ≪ 10) ⊕ (bi ≪ 18) ⊕ (bi ≪ 24)
+/// SM4ED = (ci ≪ (BS * 8)) ⊕ x
+/// ```
+///
+/// where `⊕` represents 32-bit xor, and `≪ k` represents rotate left by `k` bits.
+/// As is defined above, `T` is a combined transformation of non linear S-Box transform `τ`
+/// and linear layer transform `L`.
+///
+/// In the SM4 algorithm, the round function `F` is defined as:
+///
+/// ```text
+/// F(x0, x1, x2, x3, rk) = x0 ⊕ T(x1 ⊕ x2 ⊕ x3 ⊕ rk)
+/// ... where
+/// T(A) = L(τ(A))
+/// B = τ(A) = (SM4-S-Box(a0), SM4-S-Box(a1), SM4-S-Box(a2), SM4-S-Box(a3))
+/// C = L(B) = B ⊕ (B ≪ 2) ⊕ (B ≪ 10) ⊕ (B ≪ 18) ⊕ (B ≪ 24)
+/// ```
+///
+/// It can be implemented by `sm4ed` instruction like:
+///
+/// ```no_run
+/// # #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+/// # fn round_function(x0: u32, x1: u32, x2: u32, x3: u32, rk: u32) -> u32 {
+/// # #[cfg(target_arch = "riscv32")] use core::arch::riscv32::sm4ed;
+/// # #[cfg(target_arch = "riscv64")] use core::arch::riscv64::sm4ed;
+/// let a = x1 ^ x2 ^ x3 ^ rk;
+/// let c0 = sm4ed(x0, a, 0);
+/// let c1 = sm4ed(c0, a, 1); // c1 represents c[0..=1], etc.
+/// let c2 = sm4ed(c1, a, 2);
+/// let c3 = sm4ed(c2, a, 3);
+/// return c3; // c3 represents c[0..=3]
+/// # }
+/// ```
+#[target_feature(enable = "zksed")]
+#[rustc_legacy_const_generics(2)]
+// See #1464
+// #[cfg_attr(test, assert_instr(sm4ed, BS = 0))]
+#[inline]
+pub unsafe fn sm4ed<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
+ static_assert!(BS < 4);
+
+ _sm4ed(rs1 as i32, rs2 as i32, BS as i32) as u32
+}
+
+/// Accelerates the Key Schedule operation of the SM4 block cipher \[5, 31\] with `bs=0`.
+///
+/// Implements a T-tables in hardware style approach to accelerating the SM4 Key Schedule. A
+/// byte is extracted from rs2 based on bs, to which the SBox and linear layer transforms are
+/// applied, before the result is XOR’d with rs1 and written back to rd. This instruction
+/// exists on RV32 and RV64 base architectures. On RV64, the 32-bit result is sign extended to
+/// XLEN bits. This instruction must always be implemented such that its execution latency does
+/// not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.44
+///
+/// # Note
+///
+/// The `BS` parameter is expected to be a constant value and only the bottom 2 bits of `bs` are
+/// used.
+///
+/// # Safety
+///
+/// This function is safe to use if the `zksed` target feature is present.
+///
+/// # Details
+///
+/// Accelerates the round function `F` in the SM4 block cipher algorithm
+///
+/// This instruction is included in extension `Zksed`. It's defined as:
+///
+/// ```text
+/// SM4ED(x, a, BS) = x ⊕ T(ai)
+/// ... where
+/// ai = a.bytes[BS]
+/// T(ai) = L(τ(ai))
+/// bi = τ(ai) = SM4-S-Box(ai)
+/// ci = L(bi) = bi ⊕ (bi ≪ 2) ⊕ (bi ≪ 10) ⊕ (bi ≪ 18) ⊕ (bi ≪ 24)
+/// SM4ED = (ci ≪ (BS * 8)) ⊕ x
+/// ```
+///
+/// where `⊕` represents 32-bit xor, and `≪ k` represents rotate left by `k` bits.
+/// As is defined above, `T` is a combined transformation of non linear S-Box transform `τ`
+/// and linear layer transform `L`.
+///
+/// In the SM4 algorithm, the round function `F` is defined as:
+///
+/// ```text
+/// F(x0, x1, x2, x3, rk) = x0 ⊕ T(x1 ⊕ x2 ⊕ x3 ⊕ rk)
+/// ... where
+/// T(A) = L(τ(A))
+/// B = τ(A) = (SM4-S-Box(a0), SM4-S-Box(a1), SM4-S-Box(a2), SM4-S-Box(a3))
+/// C = L(B) = B ⊕ (B ≪ 2) ⊕ (B ≪ 10) ⊕ (B ≪ 18) ⊕ (B ≪ 24)
+/// ```
+///
+/// It can be implemented by `sm4ed` instruction like:
+///
+/// ```no_run
+/// # #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+/// # fn round_function(x0: u32, x1: u32, x2: u32, x3: u32, rk: u32) -> u32 {
+/// # #[cfg(target_arch = "riscv32")] use core::arch::riscv32::sm4ed;
+/// # #[cfg(target_arch = "riscv64")] use core::arch::riscv64::sm4ed;
+/// let a = x1 ^ x2 ^ x3 ^ rk;
+/// let c0 = sm4ed(x0, a, 0);
+/// let c1 = sm4ed(c0, a, 1); // c1 represents c[0..=1], etc.
+/// let c2 = sm4ed(c1, a, 2);
+/// let c3 = sm4ed(c2, a, 3);
+/// return c3; // c3 represents c[0..=3]
+/// # }
+/// ```
+#[target_feature(enable = "zksed")]
+#[rustc_legacy_const_generics(2)]
+// See #1464
+// #[cfg_attr(test, assert_instr(sm4ks, BS = 0))]
+#[inline]
+pub unsafe fn sm4ks<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
+ static_assert!(BS < 4);
+
+ _sm4ks(rs1 as i32, rs2 as i32, BS as i32) as u32
+}
+
+/// Implements the P0 transformation function as used in the SM3 hash function [4, 30].
+///
+/// This instruction is supported for the RV32 and RV64 base architectures. It implements the
+/// P0 transform of the SM3 hash function [4, 30]. This instruction must always be implemented
+/// such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.41
+///
+/// # Safety
+///
+/// This function is safe to use if the `zksh` target feature is present.
+///
+/// # Details
+///
+/// `P0` transformation function as is used in the SM3 hash algorithm
+///
+/// This function is included in `Zksh` extension. It's defined as:
+///
+/// ```text
+/// P0(X) = X ⊕ (X ≪ 9) ⊕ (X ≪ 17)
+/// ```
+///
+/// where `⊕` represents 32-bit xor, and `≪ k` represents rotate left by `k` bits.
+///
+/// In the SM3 algorithm, the `P0` transformation is used as `E ← P0(TT2)` when the
+/// compression function `CF` uses the intermediate value `TT2` to calculate
+/// the variable `E` in one iteration for subsequent processes.
+#[target_feature(enable = "zksh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sm3p0))]
+#[inline]
+pub unsafe fn sm3p0(rs1: u32) -> u32 {
+ _sm3p0(rs1 as i32) as u32
+}
+
+/// Implements the P1 transformation function as used in the SM3 hash function [4, 30].
+///
+/// This instruction is supported for the RV32 and RV64 base architectures. It implements the
+/// P1 transform of the SM3 hash function [4, 30]. This instruction must always be implemented
+/// such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.42
+///
+/// # Safety
+///
+/// This function is safe to use if the `zksh` target feature is present.
+///
+/// # Details
+///
+/// `P1` transformation function as is used in the SM3 hash algorithm
+///
+/// This function is included in `Zksh` extension. It's defined as:
+///
+/// ```text
+/// P1(X) = X ⊕ (X ≪ 15) ⊕ (X ≪ 23)
+/// ```
+///
+/// where `⊕` represents 32-bit xor, and `≪ k` represents rotate left by `k` bits.
+///
+/// In the SM3 algorithm, the `P1` transformation is used to expand message,
+/// where expanded word `Wj` can be generated from the previous words.
+/// The whole process can be described as the following pseudocode:
+///
+/// ```text
+/// FOR j=16 TO 67
+/// Wj ← P1(Wj−16 ⊕ Wj−9 ⊕ (Wj−3 ≪ 15)) ⊕ (Wj−13 ≪ 7) ⊕ Wj−6
+/// ENDFOR
+/// ```
+#[target_feature(enable = "zksh")]
+// See #1464
+// #[cfg_attr(test, assert_instr(sm3p1))]
+#[inline]
+pub unsafe fn sm3p1(rs1: u32) -> u32 {
+ _sm3p1(rs1 as i32) as u32
+}
diff --git a/library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs b/library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs
index 8fe935d1f..403fc79d0 100644
--- a/library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs
+++ b/library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs
@@ -303,11 +303,11 @@ pub fn i32x4_relaxed_dot_i8x16_i7x16_add(a: v128, b: v128, c: v128) -> v128 {
}
#[cfg(test)]
-pub mod tests {
+mod tests {
use super::super::simd128::*;
use super::*;
use core::ops::{Add, Div, Mul, Neg, Sub};
- use std;
+
use std::fmt::Debug;
use std::mem::transmute;
use std::num::Wrapping;
diff --git a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
index e974d9e56..4819195dc 100644
--- a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
+++ b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
@@ -672,7 +672,6 @@ pub unsafe fn v128_store64_lane<const L: usize>(v: v128, m: *mut u64) {
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[cfg_attr(
test,
assert_instr(
@@ -727,7 +726,6 @@ pub const fn i8x16(
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[doc(alias("v128.const"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
@@ -760,7 +758,6 @@ pub const fn u8x16(
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[cfg_attr(
test,
assert_instr(
@@ -787,7 +784,6 @@ pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[doc(alias("v128.const"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
@@ -800,7 +796,6 @@ pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))]
#[doc(alias("v128.const"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
@@ -814,7 +809,6 @@ pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 {
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[doc(alias("v128.const"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
@@ -827,7 +821,6 @@ pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 {
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))]
#[doc(alias("v128.const"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
@@ -841,7 +834,6 @@ pub const fn i64x2(a0: i64, a1: i64) -> v128 {
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[doc(alias("v128.const"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
@@ -854,7 +846,6 @@ pub const fn u64x2(a0: u64, a1: u64) -> v128 {
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))]
#[doc(alias("v128.const"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
@@ -868,7 +859,6 @@ pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 {
/// If possible this will generate a `v128.const` instruction, otherwise it may
/// be lowered to a sequence of instructions to materialize the vector value.
#[inline]
-#[target_feature(enable = "simd128")]
#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))]
#[doc(alias("v128.const"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
@@ -3212,7 +3202,7 @@ pub fn i32x4_shr(a: v128, amt: u32) -> v128 {
#[doc(alias("i32x4.shr_u"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn u32x4_shr(a: v128, amt: u32) -> v128 {
- unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt as u32)).v128() }
+ unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt)).v128() }
}
/// Adds two 128-bit vectors as if they were two packed four 32-bit integers.
@@ -4236,10 +4226,10 @@ pub fn f64x2_promote_low_f32x4(a: v128) -> v128 {
}
#[cfg(test)]
-pub mod tests {
+mod tests {
use super::*;
use core::ops::{Add, Div, Mul, Neg, Sub};
- use std;
+
use std::fmt::Debug;
use std::mem::transmute;
use std::num::Wrapping;
@@ -4587,8 +4577,8 @@ pub mod tests {
u8::MAX.into(),
),
i16x8(
- i16::MIN.into(),
- i16::MAX.into(),
+ i16::MIN,
+ i16::MAX,
u16::MIN as i16,
u16::MAX as i16,
0,
@@ -4613,8 +4603,8 @@ pub mod tests {
u8::MAX.into(),
),
i16x8(
- i16::MIN.into(),
- i16::MAX.into(),
+ i16::MIN,
+ i16::MAX,
u16::MIN as i16,
u16::MAX as i16,
0,
@@ -4634,12 +4624,7 @@ pub mod tests {
compare_bytes(
i16x8_narrow_i32x4(
i32x4(0, -1, i16::MIN.into(), i16::MAX.into()),
- i32x4(
- i32::MIN.into(),
- i32::MAX.into(),
- u32::MIN as i32,
- u32::MAX as i32,
- ),
+ i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
),
i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1),
);
@@ -4647,12 +4632,7 @@ pub mod tests {
compare_bytes(
u16x8_narrow_i32x4(
i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()),
- i32x4(
- i32::MIN.into(),
- i32::MAX.into(),
- u32::MIN as i32,
- u32::MAX as i32,
- ),
+ i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
),
i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0),
);
diff --git a/library/stdarch/crates/core_arch/src/x86/avx.rs b/library/stdarch/crates/core_arch/src/x86/avx.rs
index fafee5c0b..00bcc1fa1 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx.rs
@@ -738,7 +738,7 @@ pub const _CMP_TRUE_US: i32 = 0x1f;
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmp_pd<const IMM5: i32>(a: __m128d, b: __m128d) -> __m128d {
static_assert_uimm_bits!(IMM5, 5);
- vcmppd(a, b, IMM5 as i8)
+ vcmppd(a, b, const { IMM5 as i8 })
}
/// Compares packed double-precision (64-bit) floating-point
@@ -768,7 +768,7 @@ pub unsafe fn _mm256_cmp_pd<const IMM5: i32>(a: __m256d, b: __m256d) -> __m256d
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmp_ps<const IMM5: i32>(a: __m128, b: __m128) -> __m128 {
static_assert_uimm_bits!(IMM5, 5);
- vcmpps(a, b, IMM5 as i8)
+ vcmpps(a, b, const { IMM5 as i8 })
}
/// Compares packed single-precision (32-bit) floating-point
@@ -783,7 +783,7 @@ pub unsafe fn _mm_cmp_ps<const IMM5: i32>(a: __m128, b: __m128) -> __m128 {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cmp_ps<const IMM5: i32>(a: __m256, b: __m256) -> __m256 {
static_assert_uimm_bits!(IMM5, 5);
- vcmpps256(a, b, IMM5 as u8)
+ vcmpps256(a, b, const { IMM5 as u8 })
}
/// Compares the lower double-precision (64-bit) floating-point element in
@@ -1028,7 +1028,7 @@ pub unsafe fn _mm_permutevar_ps(a: __m128, b: __m128i) -> __m128 {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute_ps)
#[inline]
#[target_feature(enable = "avx")]
-#[cfg_attr(test, assert_instr(vpermilps, IMM8 = 9))]
+#[cfg_attr(test, assert_instr(vshufps, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute_ps<const IMM8: i32>(a: __m256) -> __m256 {
@@ -1055,7 +1055,7 @@ pub unsafe fn _mm256_permute_ps<const IMM8: i32>(a: __m256) -> __m256 {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permute_ps)
#[inline]
#[target_feature(enable = "avx,sse")]
-#[cfg_attr(test, assert_instr(vpermilps, IMM8 = 9))]
+#[cfg_attr(test, assert_instr(vshufps, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_permute_ps<const IMM8: i32>(a: __m128) -> __m128 {
@@ -1102,7 +1102,7 @@ pub unsafe fn _mm_permutevar_pd(a: __m128d, b: __m128i) -> __m128d {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute_pd)
#[inline]
#[target_feature(enable = "avx")]
-#[cfg_attr(test, assert_instr(vpermilpd, IMM4 = 0x1))]
+#[cfg_attr(test, assert_instr(vshufpd, IMM4 = 0x1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute_pd<const IMM4: i32>(a: __m256d) -> __m256d {
@@ -1125,7 +1125,7 @@ pub unsafe fn _mm256_permute_pd<const IMM4: i32>(a: __m256d) -> __m256d {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permute_pd)
#[inline]
#[target_feature(enable = "avx,sse2")]
-#[cfg_attr(test, assert_instr(vpermilpd, IMM2 = 0x1))]
+#[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0x1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_permute_pd<const IMM2: i32>(a: __m128d) -> __m128d {
@@ -1439,7 +1439,7 @@ pub unsafe fn _mm256_loadu_pd(mem_addr: *const f64) -> __m256d {
#[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovupd expected
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_storeu_pd(mem_addr: *mut f64, a: __m256d) {
- storeupd256(mem_addr, a);
+ mem_addr.cast::<__m256d>().write_unaligned(a);
}
/// Loads 256-bits (composed of 8 packed single-precision (32-bit)
@@ -1471,7 +1471,7 @@ pub unsafe fn _mm256_loadu_ps(mem_addr: *const f32) -> __m256 {
#[cfg_attr(test, assert_instr(vmovups))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_storeu_ps(mem_addr: *mut f32, a: __m256) {
- storeups256(mem_addr, a);
+ mem_addr.cast::<__m256>().write_unaligned(a);
}
/// Loads 256-bits of integer data from memory into result.
@@ -1519,7 +1519,7 @@ pub unsafe fn _mm256_loadu_si256(mem_addr: *const __m256i) -> __m256i {
}
/// Stores 256-bits of integer data from `a` into memory.
-/// `mem_addr` does not need to be aligned on any particular boundary.
+/// `mem_addr` does not need to be aligned on any particular boundary.
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_si256)
#[inline]
@@ -1527,7 +1527,7 @@ pub unsafe fn _mm256_loadu_si256(mem_addr: *const __m256i) -> __m256i {
#[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovdqu expected
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_storeu_si256(mem_addr: *mut __m256i, a: __m256i) {
- storeudq256(mem_addr as *mut i8, a.as_i8x32());
+ mem_addr.write_unaligned(a);
}
/// Loads packed double-precision (64-bit) floating-point elements from memory
@@ -2974,12 +2974,6 @@ extern "C" {
fn vbroadcastf128ps256(a: &__m128) -> __m256;
#[link_name = "llvm.x86.avx.vbroadcastf128.pd.256"]
fn vbroadcastf128pd256(a: &__m128d) -> __m256d;
- #[link_name = "llvm.x86.avx.storeu.pd.256"]
- fn storeupd256(mem_addr: *mut f64, a: __m256d);
- #[link_name = "llvm.x86.avx.storeu.ps.256"]
- fn storeups256(mem_addr: *mut f32, a: __m256);
- #[link_name = "llvm.x86.avx.storeu.dq.256"]
- fn storeudq256(mem_addr: *mut i8, a: i8x32);
#[link_name = "llvm.x86.avx.maskload.pd.256"]
fn maskloadpd256(mem_addr: *const i8, mask: i64x4) -> __m256d;
#[link_name = "llvm.x86.avx.maskstore.pd.256"]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs
index cdf84b382..e23c795ee 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs
@@ -2373,7 +2373,7 @@ pub unsafe fn _mm256_shuffle_epi8(a: __m256i, b: __m256i) -> __m256i {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_epi32)
#[inline]
#[target_feature(enable = "avx2")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 9))]
+#[cfg_attr(test, assert_instr(vshufps, MASK = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_shuffle_epi32<const MASK: i32>(a: __m256i) -> __m256i {
@@ -2557,7 +2557,11 @@ pub unsafe fn _mm256_sll_epi64(a: __m256i, count: __m128i) -> __m256i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(pslliw(a.as_i16x16(), IMM8))
+ if IMM8 >= 16 {
+ _mm256_setzero_si256()
+ } else {
+ transmute(simd_shl(a.as_u16x16(), u16x16::splat(IMM8 as u16)))
+ }
}
/// Shifts packed 32-bit integers in `a` left by `IMM8` while
@@ -2571,7 +2575,11 @@ pub unsafe fn _mm256_slli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psllid(a.as_i32x8(), IMM8))
+ if IMM8 >= 32 {
+ _mm256_setzero_si256()
+ } else {
+ transmute(simd_shl(a.as_u32x8(), u32x8::splat(IMM8 as u32)))
+ }
}
/// Shifts packed 64-bit integers in `a` left by `IMM8` while
@@ -2585,7 +2593,11 @@ pub unsafe fn _mm256_slli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(pslliq(a.as_i64x4(), IMM8))
+ if IMM8 >= 64 {
+ _mm256_setzero_si256()
+ } else {
+ transmute(simd_shl(a.as_u64x4(), u64x4::splat(IMM8 as u64)))
+ }
}
/// Shifts 128-bit lanes in `a` left by `imm8` bytes while shifting in zeros.
@@ -2749,7 +2761,7 @@ pub unsafe fn _mm256_sra_epi32(a: __m256i, count: __m128i) -> __m256i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srai_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psraiw(a.as_i16x16(), IMM8))
+ transmute(simd_shr(a.as_i16x16(), i16x16::splat(IMM8.min(15) as i16)))
}
/// Shifts packed 32-bit integers in `a` right by `IMM8` while
@@ -2763,7 +2775,7 @@ pub unsafe fn _mm256_srai_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srai_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psraid(a.as_i32x8(), IMM8))
+ transmute(simd_shr(a.as_i32x8(), i32x8::splat(IMM8.min(31))))
}
/// Shifts packed 32-bit integers in `a` right by the amount specified by the
@@ -2996,7 +3008,11 @@ pub unsafe fn _mm256_srl_epi64(a: __m256i, count: __m128i) -> __m256i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psrliw(a.as_i16x16(), IMM8))
+ if IMM8 >= 16 {
+ _mm256_setzero_si256()
+ } else {
+ transmute(simd_shr(a.as_u16x16(), u16x16::splat(IMM8 as u16)))
+ }
}
/// Shifts packed 32-bit integers in `a` right by `IMM8` while shifting in
@@ -3010,7 +3026,11 @@ pub unsafe fn _mm256_srli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psrlid(a.as_i32x8(), IMM8))
+ if IMM8 >= 32 {
+ _mm256_setzero_si256()
+ } else {
+ transmute(simd_shr(a.as_u32x8(), u32x8::splat(IMM8 as u32)))
+ }
}
/// Shifts packed 64-bit integers in `a` right by `IMM8` while shifting in
@@ -3024,7 +3044,11 @@ pub unsafe fn _mm256_srli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psrliq(a.as_i64x4(), IMM8))
+ if IMM8 >= 64 {
+ _mm256_setzero_si256()
+ } else {
+ transmute(simd_shr(a.as_u64x4(), u64x4::splat(IMM8 as u64)))
+ }
}
/// Shifts packed 32-bit integers in `a` right by the amount specified by
@@ -3677,12 +3701,6 @@ extern "C" {
fn pslld(a: i32x8, count: i32x4) -> i32x8;
#[link_name = "llvm.x86.avx2.psll.q"]
fn psllq(a: i64x4, count: i64x2) -> i64x4;
- #[link_name = "llvm.x86.avx2.pslli.w"]
- fn pslliw(a: i16x16, imm8: i32) -> i16x16;
- #[link_name = "llvm.x86.avx2.pslli.d"]
- fn psllid(a: i32x8, imm8: i32) -> i32x8;
- #[link_name = "llvm.x86.avx2.pslli.q"]
- fn pslliq(a: i64x4, imm8: i32) -> i64x4;
#[link_name = "llvm.x86.avx2.psllv.d"]
fn psllvd(a: i32x4, count: i32x4) -> i32x4;
#[link_name = "llvm.x86.avx2.psllv.d.256"]
@@ -3695,10 +3713,6 @@ extern "C" {
fn psraw(a: i16x16, count: i16x8) -> i16x16;
#[link_name = "llvm.x86.avx2.psra.d"]
fn psrad(a: i32x8, count: i32x4) -> i32x8;
- #[link_name = "llvm.x86.avx2.psrai.w"]
- fn psraiw(a: i16x16, imm8: i32) -> i16x16;
- #[link_name = "llvm.x86.avx2.psrai.d"]
- fn psraid(a: i32x8, imm8: i32) -> i32x8;
#[link_name = "llvm.x86.avx2.psrav.d"]
fn psravd(a: i32x4, count: i32x4) -> i32x4;
#[link_name = "llvm.x86.avx2.psrav.d.256"]
@@ -3709,12 +3723,6 @@ extern "C" {
fn psrld(a: i32x8, count: i32x4) -> i32x8;
#[link_name = "llvm.x86.avx2.psrl.q"]
fn psrlq(a: i64x4, count: i64x2) -> i64x4;
- #[link_name = "llvm.x86.avx2.psrli.w"]
- fn psrliw(a: i16x16, imm8: i32) -> i16x16;
- #[link_name = "llvm.x86.avx2.psrli.d"]
- fn psrlid(a: i32x8, imm8: i32) -> i32x8;
- #[link_name = "llvm.x86.avx2.psrli.q"]
- fn psrliq(a: i64x4, imm8: i32) -> i64x4;
#[link_name = "llvm.x86.avx2.psrlv.d"]
fn psrlvd(a: i32x4, count: i32x4) -> i32x4;
#[link_name = "llvm.x86.avx2.psrlv.d.256"]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
index bc1e7ddfb..364023539 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
@@ -5339,9 +5339,11 @@ pub unsafe fn _mm_maskz_sll_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_slli_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i16x32();
- let r = vpslliw(a, IMM8);
- transmute(r)
+ if IMM8 >= 16 {
+ _mm512_setzero_si512()
+ } else {
+ transmute(simd_shl(a.as_u16x32(), u16x32::splat(IMM8 as u16)))
+ }
}
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -5357,9 +5359,12 @@ pub unsafe fn _mm512_mask_slli_epi16<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i16x32();
- let shf = vpslliw(a, IMM8);
- transmute(simd_select_bitmask(k, shf, src.as_i16x32()))
+ let shf = if IMM8 >= 16 {
+ u16x32::splat(0)
+ } else {
+ simd_shl(a.as_u16x32(), u16x32::splat(IMM8 as u16))
+ };
+ transmute(simd_select_bitmask(k, shf, src.as_u16x32()))
}
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -5371,10 +5376,13 @@ pub unsafe fn _mm512_mask_slli_epi16<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_slli_epi16<const IMM8: u32>(k: __mmask32, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i16x32();
- let shf = vpslliw(a, IMM8);
- let zero = _mm512_setzero_si512().as_i16x32();
- transmute(simd_select_bitmask(k, shf, zero))
+ if IMM8 >= 16 {
+ _mm512_setzero_si512()
+ } else {
+ let shf = simd_shl(a.as_u16x32(), u16x32::splat(IMM8 as u16));
+ let zero = u16x32::splat(0);
+ transmute(simd_select_bitmask(k, shf, zero))
+ }
}
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -5390,9 +5398,12 @@ pub unsafe fn _mm256_mask_slli_epi16<const IMM8: u32>(
a: __m256i,
) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = pslliw256(a.as_i16x16(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i16x16()))
+ let shf = if IMM8 >= 16 {
+ u16x16::splat(0)
+ } else {
+ simd_shl(a.as_u16x16(), u16x16::splat(IMM8 as u16))
+ };
+ transmute(simd_select_bitmask(k, shf, src.as_u16x16()))
}
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -5404,10 +5415,13 @@ pub unsafe fn _mm256_mask_slli_epi16<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_slli_epi16<const IMM8: u32>(k: __mmask16, a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = pslliw256(a.as_i16x16(), imm8);
- let zero = _mm256_setzero_si256().as_i16x16();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 16 {
+ _mm256_setzero_si256()
+ } else {
+ let shf = simd_shl(a.as_u16x16(), u16x16::splat(IMM8 as u16));
+ let zero = u16x16::splat(0);
+ transmute(simd_select_bitmask(k, shf, zero))
+ }
}
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -5423,9 +5437,12 @@ pub unsafe fn _mm_mask_slli_epi16<const IMM8: u32>(
a: __m128i,
) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = pslliw128(a.as_i16x8(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i16x8()))
+ let shf = if IMM8 >= 16 {
+ u16x8::splat(0)
+ } else {
+ simd_shl(a.as_u16x8(), u16x8::splat(IMM8 as u16))
+ };
+ transmute(simd_select_bitmask(k, shf, src.as_u16x8()))
}
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -5437,10 +5454,13 @@ pub unsafe fn _mm_mask_slli_epi16<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_slli_epi16<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = pslliw128(a.as_i16x8(), imm8);
- let zero = _mm_setzero_si128().as_i16x8();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 16 {
+ _mm_setzero_si128()
+ } else {
+ let shf = simd_shl(a.as_u16x8(), u16x8::splat(IMM8 as u16));
+ let zero = u16x8::splat(0);
+ transmute(simd_select_bitmask(k, shf, zero))
+ }
}
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
@@ -5655,9 +5675,11 @@ pub unsafe fn _mm_maskz_srl_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srli_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i16x32();
- let r = vpsrliw(a, IMM8);
- transmute(r)
+ if IMM8 >= 16 {
+ _mm512_setzero_si512()
+ } else {
+ transmute(simd_shr(a.as_u16x32(), u16x32::splat(IMM8 as u16)))
+ }
}
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -5673,9 +5695,12 @@ pub unsafe fn _mm512_mask_srli_epi16<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i16x32();
- let shf = vpsrliw(a, IMM8);
- transmute(simd_select_bitmask(k, shf, src.as_i16x32()))
+ let shf = if IMM8 >= 16 {
+ u16x32::splat(0)
+ } else {
+ simd_shr(a.as_u16x32(), u16x32::splat(IMM8 as u16))
+ };
+ transmute(simd_select_bitmask(k, shf, src.as_u16x32()))
}
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -5688,10 +5713,13 @@ pub unsafe fn _mm512_mask_srli_epi16<const IMM8: u32>(
pub unsafe fn _mm512_maskz_srli_epi16<const IMM8: i32>(k: __mmask32, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
//imm8 should be u32, it seems the document to verify is incorrect
- let a = a.as_i16x32();
- let shf = vpsrliw(a, IMM8 as u32);
- let zero = _mm512_setzero_si512().as_i16x32();
- transmute(simd_select_bitmask(k, shf, zero))
+ if IMM8 >= 16 {
+ _mm512_setzero_si512()
+ } else {
+ let shf = simd_shr(a.as_u16x32(), u16x32::splat(IMM8 as u16));
+ let zero = u16x32::splat(0);
+ transmute(simd_select_bitmask(k, shf, zero))
+ }
}
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -5968,9 +5996,7 @@ pub unsafe fn _mm_maskz_sra_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srai_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i16x32();
- let r = vpsraiw(a, IMM8);
- transmute(r)
+ transmute(simd_shr(a.as_i16x32(), i16x32::splat(IMM8.min(15) as i16)))
}
/// Shift packed 16-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -5986,8 +6012,7 @@ pub unsafe fn _mm512_mask_srai_epi16<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i16x32();
- let shf = vpsraiw(a, IMM8);
+ let shf = simd_shr(a.as_i16x32(), i16x32::splat(IMM8.min(15) as i16));
transmute(simd_select_bitmask(k, shf, src.as_i16x32()))
}
@@ -6000,9 +6025,8 @@ pub unsafe fn _mm512_mask_srai_epi16<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srai_epi16<const IMM8: u32>(k: __mmask32, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i16x32();
- let shf = vpsraiw(a, IMM8);
- let zero = _mm512_setzero_si512().as_i16x32();
+ let shf = simd_shr(a.as_i16x32(), i16x32::splat(IMM8.min(15) as i16));
+ let zero = i16x32::splat(0);
transmute(simd_select_bitmask(k, shf, zero))
}
@@ -6019,8 +6043,7 @@ pub unsafe fn _mm256_mask_srai_epi16<const IMM8: u32>(
a: __m256i,
) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psraiw256(a.as_i16x16(), imm8);
+ let r = simd_shr(a.as_i16x16(), i16x16::splat(IMM8.min(15) as i16));
transmute(simd_select_bitmask(k, r, src.as_i16x16()))
}
@@ -6033,9 +6056,8 @@ pub unsafe fn _mm256_mask_srai_epi16<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srai_epi16<const IMM8: u32>(k: __mmask16, a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psraiw256(a.as_i16x16(), imm8);
- let zero = _mm256_setzero_si256().as_i16x16();
+ let r = simd_shr(a.as_i16x16(), i16x16::splat(IMM8.min(15) as i16));
+ let zero = i16x16::splat(0);
transmute(simd_select_bitmask(k, r, zero))
}
@@ -6052,8 +6074,7 @@ pub unsafe fn _mm_mask_srai_epi16<const IMM8: u32>(
a: __m128i,
) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psraiw128(a.as_i16x8(), imm8);
+ let r = simd_shr(a.as_i16x8(), i16x8::splat(IMM8.min(15) as i16));
transmute(simd_select_bitmask(k, r, src.as_i16x8()))
}
@@ -6066,9 +6087,8 @@ pub unsafe fn _mm_mask_srai_epi16<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srai_epi16<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psraiw128(a.as_i16x8(), imm8);
- let zero = _mm_setzero_si128().as_i16x8();
+ let r = simd_shr(a.as_i16x8(), i16x8::splat(IMM8.min(15) as i16));
+ let zero = i16x8::splat(0);
transmute(simd_select_bitmask(k, r, zero))
}
@@ -9750,7 +9770,7 @@ pub unsafe fn _mm_maskz_alignr_epi8<const IMM8: i32>(
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovswb))]
pub unsafe fn _mm512_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) {
- vpmovswbmem(mem_addr as *mut i8, a.as_i16x32(), k);
+ vpmovswbmem(mem_addr, a.as_i16x32(), k);
}
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -9760,7 +9780,7 @@ pub unsafe fn _mm512_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32,
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
pub unsafe fn _mm256_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) {
- vpmovswbmem256(mem_addr as *mut i8, a.as_i16x16(), k);
+ vpmovswbmem256(mem_addr, a.as_i16x16(), k);
}
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -9770,7 +9790,7 @@ pub unsafe fn _mm256_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
pub unsafe fn _mm_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovswbmem128(mem_addr as *mut i8, a.as_i16x8(), k);
+ vpmovswbmem128(mem_addr, a.as_i16x8(), k);
}
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -9780,7 +9800,7 @@ pub unsafe fn _mm_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovwb))]
pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) {
- vpmovwbmem(mem_addr as *mut i8, a.as_i16x32(), k);
+ vpmovwbmem(mem_addr, a.as_i16x32(), k);
}
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -9790,7 +9810,7 @@ pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32,
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) {
- vpmovwbmem256(mem_addr as *mut i8, a.as_i16x16(), k);
+ vpmovwbmem256(mem_addr, a.as_i16x16(), k);
}
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -9800,7 +9820,7 @@ pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
pub unsafe fn _mm_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovwbmem128(mem_addr as *mut i8, a.as_i16x8(), k);
+ vpmovwbmem128(mem_addr, a.as_i16x8(), k);
}
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -9810,7 +9830,7 @@ pub unsafe fn _mm_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub unsafe fn _mm512_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) {
- vpmovuswbmem(mem_addr as *mut i8, a.as_i16x32(), k);
+ vpmovuswbmem(mem_addr, a.as_i16x32(), k);
}
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -9820,7 +9840,7 @@ pub unsafe fn _mm512_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub unsafe fn _mm256_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) {
- vpmovuswbmem256(mem_addr as *mut i8, a.as_i16x16(), k);
+ vpmovuswbmem256(mem_addr, a.as_i16x16(), k);
}
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -9830,7 +9850,7 @@ pub unsafe fn _mm256_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub unsafe fn _mm_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovuswbmem128(mem_addr as *mut i8, a.as_i16x8(), k);
+ vpmovuswbmem128(mem_addr, a.as_i16x8(), k);
}
#[allow(improper_ctypes)]
@@ -9965,13 +9985,6 @@ extern "C" {
#[link_name = "llvm.x86.avx512.psll.w.512"]
fn vpsllw(a: i16x32, count: i16x8) -> i16x32;
- #[link_name = "llvm.x86.avx512.pslli.w.512"]
- fn vpslliw(a: i16x32, imm8: u32) -> i16x32;
-
- #[link_name = "llvm.x86.avx2.pslli.w"]
- fn pslliw256(a: i16x16, imm8: i32) -> i16x16;
- #[link_name = "llvm.x86.sse2.pslli.w"]
- fn pslliw128(a: i16x8, imm8: i32) -> i16x8;
#[link_name = "llvm.x86.avx512.psllv.w.512"]
fn vpsllvw(a: i16x32, b: i16x32) -> i16x32;
@@ -9982,8 +9995,6 @@ extern "C" {
#[link_name = "llvm.x86.avx512.psrl.w.512"]
fn vpsrlw(a: i16x32, count: i16x8) -> i16x32;
- #[link_name = "llvm.x86.avx512.psrli.w.512"]
- fn vpsrliw(a: i16x32, imm8: u32) -> i16x32;
#[link_name = "llvm.x86.avx512.psrlv.w.512"]
fn vpsrlvw(a: i16x32, b: i16x32) -> i16x32;
@@ -9994,13 +10005,6 @@ extern "C" {
#[link_name = "llvm.x86.avx512.psra.w.512"]
fn vpsraw(a: i16x32, count: i16x8) -> i16x32;
- #[link_name = "llvm.x86.avx512.psrai.w.512"]
- fn vpsraiw(a: i16x32, imm8: u32) -> i16x32;
-
- #[link_name = "llvm.x86.avx2.psrai.w"]
- fn psraiw256(a: i16x16, imm8: i32) -> i16x16;
- #[link_name = "llvm.x86.sse2.psrai.w"]
- fn psraiw128(a: i16x8, imm8: i32) -> i16x8;
#[link_name = "llvm.x86.avx512.psrav.w.512"]
fn vpsravw(a: i16x32, count: i16x32) -> i16x32;
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs
index 9baa7eeca..5412237ca 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs
@@ -17141,9 +17141,11 @@ pub unsafe fn _mm_maskz_ror_epi64<const IMM8: i32>(k: __mmask8, a: __m128i) -> _
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_slli_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let r = vpsllid(a, IMM8);
- transmute(r)
+ if IMM8 >= 32 {
+ _mm512_setzero_si512()
+ } else {
+ transmute(simd_shl(a.as_u32x16(), u32x16::splat(IMM8 as u32)))
+ }
}
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17159,9 +17161,12 @@ pub unsafe fn _mm512_mask_slli_epi32<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let shf = vpsllid(a, IMM8);
- transmute(simd_select_bitmask(k, shf, src.as_i32x16()))
+ let shf = if IMM8 >= 32 {
+ u32x16::splat(0)
+ } else {
+ simd_shl(a.as_u32x16(), u32x16::splat(IMM8))
+ };
+ transmute(simd_select_bitmask(k, shf, src.as_u32x16()))
}
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17173,10 +17178,13 @@ pub unsafe fn _mm512_mask_slli_epi32<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_slli_epi32<const IMM8: u32>(k: __mmask16, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let shf = vpsllid(a, IMM8);
- let zero = _mm512_setzero_si512().as_i32x16();
- transmute(simd_select_bitmask(k, shf, zero))
+ if IMM8 >= 32 {
+ _mm512_setzero_si512()
+ } else {
+ let shf = simd_shl(a.as_u32x16(), u32x16::splat(IMM8));
+ let zero = u32x16::splat(0);
+ transmute(simd_select_bitmask(k, shf, zero))
+ }
}
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17192,9 +17200,12 @@ pub unsafe fn _mm256_mask_slli_epi32<const IMM8: u32>(
a: __m256i,
) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psllid256(a.as_i32x8(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i32x8()))
+ let r = if IMM8 >= 32 {
+ u32x8::splat(0)
+ } else {
+ simd_shl(a.as_u32x8(), u32x8::splat(IMM8))
+ };
+ transmute(simd_select_bitmask(k, r, src.as_u32x8()))
}
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17206,10 +17217,13 @@ pub unsafe fn _mm256_mask_slli_epi32<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_slli_epi32<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psllid256(a.as_i32x8(), imm8);
- let zero = _mm256_setzero_si256().as_i32x8();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 32 {
+ _mm256_setzero_si256()
+ } else {
+ let r = simd_shl(a.as_u32x8(), u32x8::splat(IMM8));
+ let zero = u32x8::splat(0);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17225,9 +17239,12 @@ pub unsafe fn _mm_mask_slli_epi32<const IMM8: u32>(
a: __m128i,
) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psllid128(a.as_i32x4(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i32x4()))
+ let r = if IMM8 >= 32 {
+ u32x4::splat(0)
+ } else {
+ simd_shl(a.as_u32x4(), u32x4::splat(IMM8))
+ };
+ transmute(simd_select_bitmask(k, r, src.as_u32x4()))
}
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17239,10 +17256,13 @@ pub unsafe fn _mm_mask_slli_epi32<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_slli_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psllid128(a.as_i32x4(), imm8);
- let zero = _mm_setzero_si128().as_i32x4();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 32 {
+ _mm_setzero_si128()
+ } else {
+ let r = simd_shl(a.as_u32x4(), u32x4::splat(IMM8));
+ let zero = u32x4::splat(0);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst.
@@ -17254,9 +17274,11 @@ pub unsafe fn _mm_maskz_slli_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) ->
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srli_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let r = vpsrlid(a, IMM8);
- transmute(r)
+ if IMM8 >= 32 {
+ _mm512_setzero_si512()
+ } else {
+ transmute(simd_shr(a.as_u32x16(), u32x16::splat(IMM8)))
+ }
}
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17272,9 +17294,12 @@ pub unsafe fn _mm512_mask_srli_epi32<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let shf = vpsrlid(a, IMM8);
- transmute(simd_select_bitmask(k, shf, src.as_i32x16()))
+ let shf = if IMM8 >= 32 {
+ u32x16::splat(0)
+ } else {
+ simd_shr(a.as_u32x16(), u32x16::splat(IMM8))
+ };
+ transmute(simd_select_bitmask(k, shf, src.as_u32x16()))
}
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17286,10 +17311,13 @@ pub unsafe fn _mm512_mask_srli_epi32<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srli_epi32<const IMM8: u32>(k: __mmask16, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let shf = vpsrlid(a, IMM8);
- let zero = _mm512_setzero_si512().as_i32x16();
- transmute(simd_select_bitmask(k, shf, zero))
+ if IMM8 >= 32 {
+ _mm512_setzero_si512()
+ } else {
+ let shf = simd_shr(a.as_u32x16(), u32x16::splat(IMM8));
+ let zero = u32x16::splat(0);
+ transmute(simd_select_bitmask(k, shf, zero))
+ }
}
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17305,9 +17333,12 @@ pub unsafe fn _mm256_mask_srli_epi32<const IMM8: u32>(
a: __m256i,
) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psrlid256(a.as_i32x8(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i32x8()))
+ let r = if IMM8 >= 32 {
+ u32x8::splat(0)
+ } else {
+ simd_shr(a.as_u32x8(), u32x8::splat(IMM8))
+ };
+ transmute(simd_select_bitmask(k, r, src.as_u32x8()))
}
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17319,10 +17350,13 @@ pub unsafe fn _mm256_mask_srli_epi32<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srli_epi32<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psrlid256(a.as_i32x8(), imm8);
- let zero = _mm256_setzero_si256().as_i32x8();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 32 {
+ _mm256_setzero_si256()
+ } else {
+ let r = simd_shr(a.as_u32x8(), u32x8::splat(IMM8));
+ let zero = u32x8::splat(0);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17338,9 +17372,12 @@ pub unsafe fn _mm_mask_srli_epi32<const IMM8: u32>(
a: __m128i,
) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psrlid128(a.as_i32x4(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i32x4()))
+ let r = if IMM8 >= 32 {
+ u32x4::splat(0)
+ } else {
+ simd_shr(a.as_u32x4(), u32x4::splat(IMM8))
+ };
+ transmute(simd_select_bitmask(k, r, src.as_u32x4()))
}
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17352,10 +17389,13 @@ pub unsafe fn _mm_mask_srli_epi32<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srli_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psrlid128(a.as_i32x4(), imm8);
- let zero = _mm_setzero_si128().as_i32x4();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 32 {
+ _mm_setzero_si128()
+ } else {
+ let r = simd_shr(a.as_u32x4(), u32x4::splat(IMM8));
+ let zero = u32x4::splat(0);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst.
@@ -17367,9 +17407,11 @@ pub unsafe fn _mm_maskz_srli_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) ->
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_slli_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let r = vpslliq(a, IMM8);
- transmute(r)
+ if IMM8 >= 64 {
+ _mm512_setzero_si512()
+ } else {
+ transmute(simd_shl(a.as_u64x8(), u64x8::splat(IMM8 as u64)))
+ }
}
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17385,9 +17427,12 @@ pub unsafe fn _mm512_mask_slli_epi64<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let shf = vpslliq(a, IMM8);
- transmute(simd_select_bitmask(k, shf, src.as_i64x8()))
+ let shf = if IMM8 >= 64 {
+ u64x8::splat(0)
+ } else {
+ simd_shl(a.as_u64x8(), u64x8::splat(IMM8 as u64))
+ };
+ transmute(simd_select_bitmask(k, shf, src.as_u64x8()))
}
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17399,10 +17444,13 @@ pub unsafe fn _mm512_mask_slli_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let shf = vpslliq(a, IMM8);
- let zero = _mm512_setzero_si512().as_i64x8();
- transmute(simd_select_bitmask(k, shf, zero))
+ if IMM8 >= 64 {
+ _mm512_setzero_si512()
+ } else {
+ let shf = simd_shl(a.as_u64x8(), u64x8::splat(IMM8 as u64));
+ let zero = u64x8::splat(0);
+ transmute(simd_select_bitmask(k, shf, zero))
+ }
}
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17418,9 +17466,12 @@ pub unsafe fn _mm256_mask_slli_epi64<const IMM8: u32>(
a: __m256i,
) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = pslliq256(a.as_i64x4(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i64x4()))
+ let r = if IMM8 >= 64 {
+ u64x4::splat(0)
+ } else {
+ simd_shl(a.as_u64x4(), u64x4::splat(IMM8 as u64))
+ };
+ transmute(simd_select_bitmask(k, r, src.as_u64x4()))
}
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17432,10 +17483,13 @@ pub unsafe fn _mm256_mask_slli_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = pslliq256(a.as_i64x4(), imm8);
- let zero = _mm256_setzero_si256().as_i64x4();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 64 {
+ _mm256_setzero_si256()
+ } else {
+ let r = simd_shl(a.as_u64x4(), u64x4::splat(IMM8 as u64));
+ let zero = u64x4::splat(0);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17451,9 +17505,12 @@ pub unsafe fn _mm_mask_slli_epi64<const IMM8: u32>(
a: __m128i,
) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = pslliq128(a.as_i64x2(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i64x2()))
+ let r = if IMM8 >= 64 {
+ u64x2::splat(0)
+ } else {
+ simd_shl(a.as_u64x2(), u64x2::splat(IMM8 as u64))
+ };
+ transmute(simd_select_bitmask(k, r, src.as_u64x2()))
}
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17465,10 +17522,13 @@ pub unsafe fn _mm_mask_slli_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = pslliq128(a.as_i64x2(), imm8);
- let zero = _mm_setzero_si128().as_i64x2();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 64 {
+ _mm_setzero_si128()
+ } else {
+ let r = simd_shl(a.as_u64x2(), u64x2::splat(IMM8 as u64));
+ let zero = u64x2::splat(0);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst.
@@ -17480,9 +17540,11 @@ pub unsafe fn _mm_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) ->
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srli_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let r = vpsrliq(a, IMM8);
- transmute(r)
+ if IMM8 >= 64 {
+ _mm512_setzero_si512()
+ } else {
+ transmute(simd_shr(a.as_u64x8(), u64x8::splat(IMM8 as u64)))
+ }
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17498,9 +17560,12 @@ pub unsafe fn _mm512_mask_srli_epi64<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let shf = vpsrliq(a, IMM8);
- transmute(simd_select_bitmask(k, shf, src.as_i64x8()))
+ let shf = if IMM8 >= 64 {
+ u64x8::splat(0)
+ } else {
+ simd_shr(a.as_u64x8(), u64x8::splat(IMM8 as u64))
+ };
+ transmute(simd_select_bitmask(k, shf, src.as_u64x8()))
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17512,10 +17577,13 @@ pub unsafe fn _mm512_mask_srli_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let shf = vpsrliq(a, IMM8);
- let zero = _mm512_setzero_si512().as_i64x8();
- transmute(simd_select_bitmask(k, shf, zero))
+ if IMM8 >= 64 {
+ _mm512_setzero_si512()
+ } else {
+ let shf = simd_shr(a.as_u64x8(), u64x8::splat(IMM8 as u64));
+ let zero = u64x8::splat(0);
+ transmute(simd_select_bitmask(k, shf, zero))
+ }
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17531,9 +17599,12 @@ pub unsafe fn _mm256_mask_srli_epi64<const IMM8: u32>(
a: __m256i,
) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psrliq256(a.as_i64x4(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i64x4()))
+ let r = if IMM8 >= 64 {
+ u64x4::splat(0)
+ } else {
+ simd_shr(a.as_u64x4(), u64x4::splat(IMM8 as u64))
+ };
+ transmute(simd_select_bitmask(k, r, src.as_u64x4()))
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17545,10 +17616,13 @@ pub unsafe fn _mm256_mask_srli_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psrliq256(a.as_i64x4(), imm8);
- let zero = _mm256_setzero_si256().as_i64x4();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 64 {
+ _mm256_setzero_si256()
+ } else {
+ let r = simd_shr(a.as_u64x4(), u64x4::splat(IMM8 as u64));
+ let zero = u64x4::splat(0);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -17564,9 +17638,12 @@ pub unsafe fn _mm_mask_srli_epi64<const IMM8: u32>(
a: __m128i,
) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psrliq128(a.as_i64x2(), imm8);
- transmute(simd_select_bitmask(k, r, src.as_i64x2()))
+ let r = if IMM8 >= 64 {
+ u64x2::splat(0)
+ } else {
+ simd_shr(a.as_u64x2(), u64x2::splat(IMM8 as u64))
+ };
+ transmute(simd_select_bitmask(k, r, src.as_u64x2()))
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -17578,10 +17655,13 @@ pub unsafe fn _mm_mask_srli_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let imm8 = IMM8 as i32;
- let r = psrliq128(a.as_i64x2(), imm8);
- let zero = _mm_setzero_si128().as_i64x2();
- transmute(simd_select_bitmask(k, r, zero))
+ if IMM8 >= 64 {
+ _mm_setzero_si128()
+ } else {
+ let r = simd_shr(a.as_u64x2(), u64x2::splat(IMM8 as u64));
+ let zero = u64x2::splat(0);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Shift packed 32-bit integers in a left by count while shifting in zeros, and store the results in dst.
@@ -18147,9 +18227,7 @@ pub unsafe fn _mm_maskz_sra_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srai_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let r = vpsraid512(a, IMM8);
- transmute(r)
+ transmute(simd_shr(a.as_i32x16(), i32x16::splat(IMM8.min(31) as i32)))
}
/// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -18165,8 +18243,7 @@ pub unsafe fn _mm512_mask_srai_epi32<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let r = vpsraid512(a, IMM8);
+ let r = simd_shr(a.as_i32x16(), i32x16::splat(IMM8.min(31) as i32));
transmute(simd_select_bitmask(k, r, src.as_i32x16()))
}
@@ -18179,9 +18256,8 @@ pub unsafe fn _mm512_mask_srai_epi32<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srai_epi32<const IMM8: u32>(k: __mmask16, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i32x16();
- let r = vpsraid512(a, IMM8);
- let zero = _mm512_setzero_si512().as_i32x16();
+ let r = simd_shr(a.as_i32x16(), i32x16::splat(IMM8.min(31) as i32));
+ let zero = i32x16::splat(0);
transmute(simd_select_bitmask(k, r, zero))
}
@@ -18197,8 +18273,7 @@ pub unsafe fn _mm256_mask_srai_epi32<const IMM8: u32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- let imm8 = IMM8 as i32;
- let r = psraid256(a.as_i32x8(), imm8);
+ let r = simd_shr(a.as_i32x8(), i32x8::splat(IMM8.min(31) as i32));
transmute(simd_select_bitmask(k, r, src.as_i32x8()))
}
@@ -18210,9 +18285,8 @@ pub unsafe fn _mm256_mask_srai_epi32<const IMM8: u32>(
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srai_epi32<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
- let imm8 = IMM8 as i32;
- let r = psraid256(a.as_i32x8(), imm8);
- let zero = _mm256_setzero_si256().as_i32x8();
+ let r = simd_shr(a.as_i32x8(), i32x8::splat(IMM8.min(31) as i32));
+ let zero = i32x8::splat(0);
transmute(simd_select_bitmask(k, r, zero))
}
@@ -18228,8 +18302,7 @@ pub unsafe fn _mm_mask_srai_epi32<const IMM8: u32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- let imm8 = IMM8 as i32;
- let r = psraid128(a.as_i32x4(), imm8);
+ let r = simd_shr(a.as_i32x4(), i32x4::splat(IMM8.min(31) as i32));
transmute(simd_select_bitmask(k, r, src.as_i32x4()))
}
@@ -18241,9 +18314,8 @@ pub unsafe fn _mm_mask_srai_epi32<const IMM8: u32>(
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srai_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
- let imm8 = IMM8 as i32;
- let r = psraid128(a.as_i32x4(), imm8);
- let zero = _mm_setzero_si128().as_i32x4();
+ let r = simd_shr(a.as_i32x4(), i32x4::splat(IMM8.min(31) as i32));
+ let zero = i32x4::splat(0);
transmute(simd_select_bitmask(k, r, zero))
}
@@ -18256,9 +18328,7 @@ pub unsafe fn _mm_maskz_srai_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) ->
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srai_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let r = vpsraiq(a, IMM8);
- transmute(r)
+ transmute(simd_shr(a.as_i64x8(), i64x8::splat(IMM8.min(63) as i64)))
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -18274,8 +18344,7 @@ pub unsafe fn _mm512_mask_srai_epi64<const IMM8: u32>(
a: __m512i,
) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let shf = vpsraiq(a, IMM8);
+ let shf = simd_shr(a.as_i64x8(), i64x8::splat(IMM8.min(63) as i64));
transmute(simd_select_bitmask(k, shf, src.as_i64x8()))
}
@@ -18288,9 +18357,8 @@ pub unsafe fn _mm512_mask_srai_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x8();
- let shf = vpsraiq(a, IMM8);
- let zero = _mm512_setzero_si512().as_i64x8();
+ let shf = simd_shr(a.as_i64x8(), i64x8::splat(IMM8.min(63) as i64));
+ let zero = i64x8::splat(0);
transmute(simd_select_bitmask(k, shf, zero))
}
@@ -18303,9 +18371,7 @@ pub unsafe fn _mm512_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m512i)
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_srai_epi64<const IMM8: u32>(a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x4();
- let r = vpsraiq256(a, IMM8);
- transmute(r)
+ transmute(simd_shr(a.as_i64x4(), i64x4::splat(IMM8.min(63) as i64)))
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -18321,8 +18387,7 @@ pub unsafe fn _mm256_mask_srai_epi64<const IMM8: u32>(
a: __m256i,
) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x4();
- let shf = vpsraiq256(a, IMM8);
+ let shf = simd_shr(a.as_i64x4(), i64x4::splat(IMM8.min(63) as i64));
transmute(simd_select_bitmask(k, shf, src.as_i64x4()))
}
@@ -18335,9 +18400,8 @@ pub unsafe fn _mm256_mask_srai_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x4();
- let shf = vpsraiq256(a, IMM8);
- let zero = _mm256_setzero_si256().as_i64x4();
+ let shf = simd_shr(a.as_i64x4(), i64x4::splat(IMM8.min(63) as i64));
+ let zero = i64x4::splat(0);
transmute(simd_select_bitmask(k, shf, zero))
}
@@ -18350,9 +18414,7 @@ pub unsafe fn _mm256_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m256i)
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm_srai_epi64<const IMM8: u32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x2();
- let r = vpsraiq128(a, IMM8);
- transmute(r)
+ transmute(simd_shr(a.as_i64x2(), i64x2::splat(IMM8.min(63) as i64)))
}
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -18368,8 +18430,7 @@ pub unsafe fn _mm_mask_srai_epi64<const IMM8: u32>(
a: __m128i,
) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x2();
- let shf = vpsraiq128(a, IMM8);
+ let shf = simd_shr(a.as_i64x2(), i64x2::splat(IMM8.min(63) as i64));
transmute(simd_select_bitmask(k, shf, src.as_i64x2()))
}
@@ -18382,9 +18443,8 @@ pub unsafe fn _mm_mask_srai_epi64<const IMM8: u32>(
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- let a = a.as_i64x2();
- let shf = vpsraiq128(a, IMM8);
- let zero = _mm_setzero_si128().as_i64x2();
+ let shf = simd_shr(a.as_i64x2(), i64x2::splat(IMM8.min(63) as i64));
+ let zero = i64x2::splat(0);
transmute(simd_select_bitmask(k, shf, zero))
}
@@ -19383,7 +19443,7 @@ pub unsafe fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> _
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permute_ps&expand=4170)
#[inline]
#[target_feature(enable = "avx512f")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
+#[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_permute_ps<const MASK: i32>(a: __m512) -> __m512 {
static_assert_uimm_bits!(MASK, 8);
@@ -19416,7 +19476,7 @@ pub unsafe fn _mm512_permute_ps<const MASK: i32>(a: __m512) -> __m512 {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permute_ps&expand=4168)
#[inline]
#[target_feature(enable = "avx512f")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
+#[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm512_mask_permute_ps<const MASK: i32>(
src: __m512,
@@ -19433,7 +19493,7 @@ pub unsafe fn _mm512_mask_permute_ps<const MASK: i32>(
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permute_ps&expand=4169)
#[inline]
#[target_feature(enable = "avx512f")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
+#[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_permute_ps<const MASK: i32>(k: __mmask16, a: __m512) -> __m512 {
static_assert_uimm_bits!(MASK, 8);
@@ -19447,7 +19507,7 @@ pub unsafe fn _mm512_maskz_permute_ps<const MASK: i32>(k: __mmask16, a: __m512)
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permute_ps&expand=4165)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
+#[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm256_mask_permute_ps<const MASK: i32>(
src: __m256,
@@ -19463,7 +19523,7 @@ pub unsafe fn _mm256_mask_permute_ps<const MASK: i32>(
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permute_ps&expand=4166)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
+#[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_permute_ps<const MASK: i32>(k: __mmask8, a: __m256) -> __m256 {
let r = _mm256_permute_ps::<MASK>(a);
@@ -19476,7 +19536,7 @@ pub unsafe fn _mm256_maskz_permute_ps<const MASK: i32>(k: __mmask8, a: __m256) -
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permute_ps&expand=4162)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
+#[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm_mask_permute_ps<const MASK: i32>(src: __m128, k: __mmask8, a: __m128) -> __m128 {
let r = _mm_permute_ps::<MASK>(a);
@@ -19488,7 +19548,7 @@ pub unsafe fn _mm_mask_permute_ps<const MASK: i32>(src: __m128, k: __mmask8, a:
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permute_ps&expand=4163)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
+#[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_permute_ps<const MASK: i32>(k: __mmask8, a: __m128) -> __m128 {
let r = _mm_permute_ps::<MASK>(a);
@@ -19501,7 +19561,7 @@ pub unsafe fn _mm_maskz_permute_ps<const MASK: i32>(k: __mmask8, a: __m128) -> _
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permute_pd&expand=4161)
#[inline]
#[target_feature(enable = "avx512f")]
-#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01_10_01))]
+#[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_permute_pd<const MASK: i32>(a: __m512d) -> __m512d {
static_assert_uimm_bits!(MASK, 8);
@@ -19526,7 +19586,7 @@ pub unsafe fn _mm512_permute_pd<const MASK: i32>(a: __m512d) -> __m512d {
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permute_pd&expand=4159)
#[inline]
#[target_feature(enable = "avx512f")]
-#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01_10_01))]
+#[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm512_mask_permute_pd<const MASK: i32>(
src: __m512d,
@@ -19543,7 +19603,7 @@ pub unsafe fn _mm512_mask_permute_pd<const MASK: i32>(
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permute_pd&expand=4160)
#[inline]
#[target_feature(enable = "avx512f")]
-#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01_10_01))]
+#[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_permute_pd<const MASK: i32>(k: __mmask8, a: __m512d) -> __m512d {
static_assert_uimm_bits!(MASK, 8);
@@ -19557,7 +19617,7 @@ pub unsafe fn _mm512_maskz_permute_pd<const MASK: i32>(k: __mmask8, a: __m512d)
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permute_pd&expand=4156)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
-#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01))]
+#[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm256_mask_permute_pd<const MASK: i32>(
src: __m256d,
@@ -19574,7 +19634,7 @@ pub unsafe fn _mm256_mask_permute_pd<const MASK: i32>(
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permute_pd&expand=4157)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
-#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01))]
+#[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_permute_pd<const MASK: i32>(k: __mmask8, a: __m256d) -> __m256d {
static_assert_uimm_bits!(MASK, 4);
@@ -19588,7 +19648,7 @@ pub unsafe fn _mm256_maskz_permute_pd<const MASK: i32>(k: __mmask8, a: __m256d)
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permute_pd&expand=4153)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
-#[cfg_attr(test, assert_instr(vpermilpd, IMM2 = 0b01))]
+#[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0b01))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm_mask_permute_pd<const IMM2: i32>(
src: __m128d,
@@ -19605,7 +19665,7 @@ pub unsafe fn _mm_mask_permute_pd<const IMM2: i32>(
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permute_pd&expand=4154)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
-#[cfg_attr(test, assert_instr(vpermilpd, IMM2 = 0b01))]
+#[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0b01))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_permute_pd<const IMM2: i32>(k: __mmask8, a: __m128d) -> __m128d {
static_assert_uimm_bits!(IMM2, 2);
@@ -21035,7 +21095,7 @@ pub unsafe fn _mm_mask2_permutex2var_pd(
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_shuffle_epi32&expand=5150)
#[inline]
#[target_feature(enable = "avx512f")]
-#[cfg_attr(test, assert_instr(vpermilps, MASK = 9))] //should be vpshufd
+#[cfg_attr(test, assert_instr(vshufps, MASK = 9))] //should be vpshufd
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_shuffle_epi32<const MASK: _MM_PERM_ENUM>(a: __m512i) -> __m512i {
static_assert_uimm_bits!(MASK, 8);
@@ -29721,7 +29781,7 @@ pub unsafe fn _mm_loadu_epi32(mem_addr: *const i32) -> __m128i {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdw))]
pub unsafe fn _mm512_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16, a: __m512i) {
- vpmovdwmem(mem_addr as *mut i8, a.as_i32x16(), k);
+ vpmovdwmem(mem_addr, a.as_i32x16(), k);
}
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29731,7 +29791,7 @@ pub unsafe fn _mm512_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
pub unsafe fn _mm256_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovdwmem256(mem_addr as *mut i8, a.as_i32x8(), k);
+ vpmovdwmem256(mem_addr, a.as_i32x8(), k);
}
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29741,7 +29801,7 @@ pub unsafe fn _mm256_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
pub unsafe fn _mm_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovdwmem128(mem_addr as *mut i8, a.as_i32x4(), k);
+ vpmovdwmem128(mem_addr, a.as_i32x4(), k);
}
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29751,7 +29811,7 @@ pub unsafe fn _mm_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16, a: __m512i) {
- vpmovsdwmem(mem_addr as *mut i8, a.as_i32x16(), k);
+ vpmovsdwmem(mem_addr, a.as_i32x16(), k);
}
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29761,7 +29821,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovsdwmem256(mem_addr as *mut i8, a.as_i32x8(), k);
+ vpmovsdwmem256(mem_addr, a.as_i32x8(), k);
}
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29771,7 +29831,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
pub unsafe fn _mm_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovsdwmem128(mem_addr as *mut i8, a.as_i32x4(), k);
+ vpmovsdwmem128(mem_addr, a.as_i32x4(), k);
}
/// Convert packed unsigned 32-bit integers in a to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29781,7 +29841,7 @@ pub unsafe fn _mm_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16, a: __m512i) {
- vpmovusdwmem(mem_addr as *mut i8, a.as_i32x16(), k);
+ vpmovusdwmem(mem_addr, a.as_i32x16(), k);
}
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29791,7 +29851,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask1
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovusdwmem256(mem_addr as *mut i8, a.as_i32x8(), k);
+ vpmovusdwmem256(mem_addr, a.as_i32x8(), k);
}
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29801,7 +29861,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
pub unsafe fn _mm_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovusdwmem128(mem_addr as *mut i8, a.as_i32x4(), k);
+ vpmovusdwmem128(mem_addr, a.as_i32x4(), k);
}
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29811,7 +29871,7 @@ pub unsafe fn _mm_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdb))]
pub unsafe fn _mm512_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m512i) {
- vpmovdbmem(mem_addr as *mut i8, a.as_i32x16(), k);
+ vpmovdbmem(mem_addr, a.as_i32x16(), k);
}
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29821,7 +29881,7 @@ pub unsafe fn _mm512_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
pub unsafe fn _mm256_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovdbmem256(mem_addr as *mut i8, a.as_i32x8(), k);
+ vpmovdbmem256(mem_addr, a.as_i32x8(), k);
}
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29831,7 +29891,7 @@ pub unsafe fn _mm256_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
pub unsafe fn _mm_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovdbmem128(mem_addr as *mut i8, a.as_i32x4(), k);
+ vpmovdbmem128(mem_addr, a.as_i32x4(), k);
}
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29841,7 +29901,7 @@ pub unsafe fn _mm_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m512i) {
- vpmovsdbmem(mem_addr as *mut i8, a.as_i32x16(), k);
+ vpmovsdbmem(mem_addr, a.as_i32x16(), k);
}
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29851,7 +29911,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovsdbmem256(mem_addr as *mut i8, a.as_i32x8(), k);
+ vpmovsdbmem256(mem_addr, a.as_i32x8(), k);
}
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29861,7 +29921,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
pub unsafe fn _mm_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovsdbmem128(mem_addr as *mut i8, a.as_i32x4(), k);
+ vpmovsdbmem128(mem_addr, a.as_i32x4(), k);
}
/// Convert packed unsigned 32-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29871,7 +29931,7 @@ pub unsafe fn _mm_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m512i) {
- vpmovusdbmem(mem_addr as *mut i8, a.as_i32x16(), k);
+ vpmovusdbmem(mem_addr, a.as_i32x16(), k);
}
/// Convert packed unsigned 32-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29881,7 +29941,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovusdbmem256(mem_addr as *mut i8, a.as_i32x8(), k);
+ vpmovusdbmem256(mem_addr, a.as_i32x8(), k);
}
/// Convert packed unsigned 32-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29891,7 +29951,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
pub unsafe fn _mm_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovusdbmem128(mem_addr as *mut i8, a.as_i32x4(), k);
+ vpmovusdbmem128(mem_addr, a.as_i32x4(), k);
}
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29901,7 +29961,7 @@ pub unsafe fn _mm_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqw))]
pub unsafe fn _mm512_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovqwmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovqwmem(mem_addr, a.as_i64x8(), k);
}
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29911,7 +29971,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
pub unsafe fn _mm256_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovqwmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovqwmem256(mem_addr, a.as_i64x4(), k);
}
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29921,7 +29981,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
pub unsafe fn _mm_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovqwmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovqwmem128(mem_addr, a.as_i64x2(), k);
}
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29931,7 +29991,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovsqwmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovsqwmem(mem_addr, a.as_i64x8(), k);
}
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29941,7 +30001,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovsqwmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovsqwmem256(mem_addr, a.as_i64x4(), k);
}
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29951,7 +30011,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
pub unsafe fn _mm_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovsqwmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovsqwmem128(mem_addr, a.as_i64x2(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29961,7 +30021,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovusqwmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovusqwmem(mem_addr, a.as_i64x8(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29971,7 +30031,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovusqwmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovusqwmem256(mem_addr, a.as_i64x4(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29981,7 +30041,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
pub unsafe fn _mm_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovusqwmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovusqwmem128(mem_addr, a.as_i64x2(), k);
}
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -29991,7 +30051,7 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqb))]
pub unsafe fn _mm512_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovqbmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovqbmem(mem_addr, a.as_i64x8(), k);
}
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30001,7 +30061,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
pub unsafe fn _mm256_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovqbmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovqbmem256(mem_addr, a.as_i64x4(), k);
}
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30011,7 +30071,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
pub unsafe fn _mm_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovqbmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovqbmem128(mem_addr, a.as_i64x2(), k);
}
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30021,7 +30081,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovsqbmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovsqbmem(mem_addr, a.as_i64x8(), k);
}
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30031,7 +30091,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovsqbmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovsqbmem256(mem_addr, a.as_i64x4(), k);
}
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30041,7 +30101,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
pub unsafe fn _mm_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovsqbmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovsqbmem128(mem_addr, a.as_i64x2(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30051,7 +30111,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovusqbmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovusqbmem(mem_addr, a.as_i64x8(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30061,7 +30121,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovusqbmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovusqbmem256(mem_addr, a.as_i64x4(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30071,7 +30131,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
pub unsafe fn _mm_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovusqbmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovusqbmem128(mem_addr, a.as_i64x2(), k);
}
///Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30081,7 +30141,7 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqd))]
pub unsafe fn _mm512_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovqdmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovqdmem(mem_addr, a.as_i64x8(), k);
}
///Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30091,7 +30151,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
pub unsafe fn _mm256_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovqdmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovqdmem256(mem_addr, a.as_i64x4(), k);
}
///Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30101,7 +30161,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
pub unsafe fn _mm_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovqdmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovqdmem128(mem_addr, a.as_i64x2(), k);
}
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30111,7 +30171,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovsqdmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovsqdmem(mem_addr, a.as_i64x8(), k);
}
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30121,7 +30181,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovsqdmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovsqdmem256(mem_addr, a.as_i64x4(), k);
}
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30131,7 +30191,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8,
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
pub unsafe fn _mm_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovsqdmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovsqdmem128(mem_addr, a.as_i64x2(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30141,7 +30201,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a:
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m512i) {
- vpmovusqdmem(mem_addr as *mut i8, a.as_i64x8(), k);
+ vpmovusqdmem(mem_addr, a.as_i64x8(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30151,7 +30211,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m256i) {
- vpmovusqdmem256(mem_addr as *mut i8, a.as_i64x4(), k);
+ vpmovusqdmem256(mem_addr, a.as_i64x4(), k);
}
/// Convert packed unsigned 64-bit integers in a to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -30161,7 +30221,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
pub unsafe fn _mm_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
- vpmovusqdmem128(mem_addr as *mut i8, a.as_i64x2(), k);
+ vpmovusqdmem128(mem_addr, a.as_i64x2(), k);
}
/// Store 512-bits (composed of 16 packed 32-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
@@ -38449,38 +38509,6 @@ extern "C" {
#[link_name = "llvm.x86.avx512.psrlv.q.512"]
fn vpsrlvq(a: i64x8, b: i64x8) -> i64x8;
- #[link_name = "llvm.x86.avx512.pslli.d.512"]
- fn vpsllid(a: i32x16, imm8: u32) -> i32x16;
-
- #[link_name = "llvm.x86.avx2.pslli.d"]
- fn psllid256(a: i32x8, imm8: i32) -> i32x8;
- #[link_name = "llvm.x86.sse2.pslli.d"]
- fn psllid128(a: i32x4, imm8: i32) -> i32x4;
-
- #[link_name = "llvm.x86.avx512.psrli.d.512"]
- fn vpsrlid(a: i32x16, imm8: u32) -> i32x16;
-
- #[link_name = "llvm.x86.avx2.psrli.d"]
- fn psrlid256(a: i32x8, imm8: i32) -> i32x8;
- #[link_name = "llvm.x86.sse2.psrli.d"]
- fn psrlid128(a: i32x4, imm8: i32) -> i32x4;
-
- #[link_name = "llvm.x86.avx512.pslli.q.512"]
- fn vpslliq(a: i64x8, imm8: u32) -> i64x8;
-
- #[link_name = "llvm.x86.avx2.pslli.q"]
- fn pslliq256(a: i64x4, imm8: i32) -> i64x4;
- #[link_name = "llvm.x86.sse2.pslli.q"]
- fn pslliq128(a: i64x2, imm8: i32) -> i64x2;
-
- #[link_name = "llvm.x86.avx512.psrli.q.512"]
- fn vpsrliq(a: i64x8, imm8: u32) -> i64x8;
-
- #[link_name = "llvm.x86.avx2.psrli.q"]
- fn psrliq256(a: i64x4, imm8: i32) -> i64x4;
- #[link_name = "llvm.x86.sse2.psrli.q"]
- fn psrliq128(a: i64x2, imm8: i32) -> i64x2;
-
#[link_name = "llvm.x86.avx512.psll.d.512"]
fn vpslld(a: i32x16, count: i32x4) -> i32x16;
#[link_name = "llvm.x86.avx512.psrl.d.512"]
@@ -38500,20 +38528,6 @@ extern "C" {
#[link_name = "llvm.x86.avx512.psra.q.128"]
fn vpsraq128(a: i64x2, count: i64x2) -> i64x2;
- #[link_name = "llvm.x86.avx512.psrai.d.512"]
- fn vpsraid512(a: i32x16, imm8: u32) -> i32x16;
- #[link_name = "llvm.x86.avx2.psrai.d"]
- fn psraid256(a: i32x8, imm8: i32) -> i32x8;
- #[link_name = "llvm.x86.sse2.psrai.d"]
- fn psraid128(a: i32x4, imm8: i32) -> i32x4;
-
- #[link_name = "llvm.x86.avx512.psrai.q.512"]
- fn vpsraiq(a: i64x8, imm8: u32) -> i64x8;
- #[link_name = "llvm.x86.avx512.psrai.q.256"]
- fn vpsraiq256(a: i64x4, imm8: u32) -> i64x4;
- #[link_name = "llvm.x86.avx512.psrai.q.128"]
- fn vpsraiq128(a: i64x2, imm8: u32) -> i64x2;
-
#[link_name = "llvm.x86.avx512.psrav.d.512"]
fn vpsravd(a: i32x16, count: i32x16) -> i32x16;
diff --git a/library/stdarch/crates/core_arch/src/x86/mod.rs b/library/stdarch/crates/core_arch/src/x86/mod.rs
index ee8b7e75d..c5e457ae7 100644
--- a/library/stdarch/crates/core_arch/src/x86/mod.rs
+++ b/library/stdarch/crates/core_arch/src/x86/mod.rs
@@ -300,14 +300,14 @@ types! {
#[stable(feature = "simd_avx512_types", since = "CURRENT_RUSTC_VERSION")]
pub struct __m512d(f64, f64, f64, f64, f64, f64, f64, f64);
- /// 128-bit wide set of eight 'u16' types, x86-specific
+ /// 128-bit wide set of eight `u16` types, x86-specific
///
/// This type is representing a 128-bit SIMD register which internally is consisted of
/// eight packed `u16` instances. Its purpose is for bf16 related intrinsic
/// implementations.
pub struct __m128bh(u16, u16, u16, u16, u16, u16, u16, u16);
- /// 256-bit wide set of 16 'u16' types, x86-specific
+ /// 256-bit wide set of 16 `u16` types, x86-specific
///
/// This type is the same as the `__m256bh` type defined by Intel,
/// representing a 256-bit SIMD register which internally is consisted of
@@ -318,7 +318,7 @@ types! {
u16, u16, u16, u16, u16, u16, u16, u16
);
- /// 512-bit wide set of 32 'u16' types, x86-specific
+ /// 512-bit wide set of 32 `u16` types, x86-specific
///
/// This type is the same as the `__m512bh` type defined by Intel,
/// representing a 512-bit SIMD register which internally is consisted of
diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs
index f4fdb5046..3d572a1f5 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs
@@ -501,7 +501,11 @@ pub unsafe fn _mm_bsrli_si128<const IMM8: i32>(a: __m128i) -> __m128i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_slli_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(pslliw(a.as_i16x8(), IMM8))
+ if IMM8 >= 16 {
+ _mm_setzero_si128()
+ } else {
+ transmute(simd_shl(a.as_u16x8(), u16x8::splat(IMM8 as u16)))
+ }
}
/// Shifts packed 16-bit integers in `a` left by `count` while shifting in
@@ -526,7 +530,11 @@ pub unsafe fn _mm_sll_epi16(a: __m128i, count: __m128i) -> __m128i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_slli_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psllid(a.as_i32x4(), IMM8))
+ if IMM8 >= 32 {
+ _mm_setzero_si128()
+ } else {
+ transmute(simd_shl(a.as_u32x4(), u32x4::splat(IMM8 as u32)))
+ }
}
/// Shifts packed 32-bit integers in `a` left by `count` while shifting in
@@ -551,7 +559,11 @@ pub unsafe fn _mm_sll_epi32(a: __m128i, count: __m128i) -> __m128i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_slli_epi64<const IMM8: i32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(pslliq(a.as_i64x2(), IMM8))
+ if IMM8 >= 64 {
+ _mm_setzero_si128()
+ } else {
+ transmute(simd_shl(a.as_u64x2(), u64x2::splat(IMM8 as u64)))
+ }
}
/// Shifts packed 64-bit integers in `a` left by `count` while shifting in
@@ -577,7 +589,7 @@ pub unsafe fn _mm_sll_epi64(a: __m128i, count: __m128i) -> __m128i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srai_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psraiw(a.as_i16x8(), IMM8))
+ transmute(simd_shr(a.as_i16x8(), i16x8::splat(IMM8.min(15) as i16)))
}
/// Shifts packed 16-bit integers in `a` right by `count` while shifting in sign
@@ -603,7 +615,7 @@ pub unsafe fn _mm_sra_epi16(a: __m128i, count: __m128i) -> __m128i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srai_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psraid(a.as_i32x4(), IMM8))
+ transmute(simd_shr(a.as_i32x4(), i32x4::splat(IMM8.min(31))))
}
/// Shifts packed 32-bit integers in `a` right by `count` while shifting in sign
@@ -680,7 +692,11 @@ unsafe fn _mm_srli_si128_impl<const IMM8: i32>(a: __m128i) -> __m128i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srli_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psrliw(a.as_i16x8(), IMM8))
+ if IMM8 >= 16 {
+ _mm_setzero_si128()
+ } else {
+ transmute(simd_shr(a.as_u16x8(), u16x8::splat(IMM8 as u16)))
+ }
}
/// Shifts packed 16-bit integers in `a` right by `count` while shifting in
@@ -706,7 +722,11 @@ pub unsafe fn _mm_srl_epi16(a: __m128i, count: __m128i) -> __m128i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srli_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psrlid(a.as_i32x4(), IMM8))
+ if IMM8 >= 32 {
+ _mm_setzero_si128()
+ } else {
+ transmute(simd_shr(a.as_u32x4(), u32x4::splat(IMM8 as u32)))
+ }
}
/// Shifts packed 32-bit integers in `a` right by `count` while shifting in
@@ -732,7 +752,11 @@ pub unsafe fn _mm_srl_epi32(a: __m128i, count: __m128i) -> __m128i {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srli_epi64<const IMM8: i32>(a: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(psrliq(a.as_i64x2(), IMM8))
+ if IMM8 >= 64 {
+ _mm_setzero_si128()
+ } else {
+ transmute(simd_shr(a.as_u64x2(), u64x2::splat(IMM8 as u64)))
+ }
}
/// Shifts packed 64-bit integers in `a` right by `count` while shifting in
@@ -1248,7 +1272,7 @@ pub unsafe fn _mm_store_si128(mem_addr: *mut __m128i, a: __m128i) {
#[cfg_attr(test, assert_instr(movups))] // FIXME movdqu expected
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_storeu_si128(mem_addr: *mut __m128i, a: __m128i) {
- storeudq(mem_addr as *mut i8, a);
+ mem_addr.write_unaligned(a);
}
/// Stores the lower 64-bit integer `a` to a memory location.
@@ -2515,7 +2539,7 @@ pub unsafe fn _mm_store_pd(mem_addr: *mut f64, a: __m128d) {
#[cfg_attr(test, assert_instr(movups))] // FIXME movupd expected
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_storeu_pd(mem_addr: *mut f64, a: __m128d) {
- storeupd(mem_addr as *mut i8, a);
+ mem_addr.cast::<__m128d>().write_unaligned(a);
}
/// Stores the lower double-precision (64-bit) floating-point element from `a`
@@ -2816,36 +2840,20 @@ extern "C" {
fn pmuludq(a: u32x4, b: u32x4) -> u64x2;
#[link_name = "llvm.x86.sse2.psad.bw"]
fn psadbw(a: u8x16, b: u8x16) -> u64x2;
- #[link_name = "llvm.x86.sse2.pslli.w"]
- fn pslliw(a: i16x8, imm8: i32) -> i16x8;
#[link_name = "llvm.x86.sse2.psll.w"]
fn psllw(a: i16x8, count: i16x8) -> i16x8;
- #[link_name = "llvm.x86.sse2.pslli.d"]
- fn psllid(a: i32x4, imm8: i32) -> i32x4;
#[link_name = "llvm.x86.sse2.psll.d"]
fn pslld(a: i32x4, count: i32x4) -> i32x4;
- #[link_name = "llvm.x86.sse2.pslli.q"]
- fn pslliq(a: i64x2, imm8: i32) -> i64x2;
#[link_name = "llvm.x86.sse2.psll.q"]
fn psllq(a: i64x2, count: i64x2) -> i64x2;
- #[link_name = "llvm.x86.sse2.psrai.w"]
- fn psraiw(a: i16x8, imm8: i32) -> i16x8;
#[link_name = "llvm.x86.sse2.psra.w"]
fn psraw(a: i16x8, count: i16x8) -> i16x8;
- #[link_name = "llvm.x86.sse2.psrai.d"]
- fn psraid(a: i32x4, imm8: i32) -> i32x4;
#[link_name = "llvm.x86.sse2.psra.d"]
fn psrad(a: i32x4, count: i32x4) -> i32x4;
- #[link_name = "llvm.x86.sse2.psrli.w"]
- fn psrliw(a: i16x8, imm8: i32) -> i16x8;
#[link_name = "llvm.x86.sse2.psrl.w"]
fn psrlw(a: i16x8, count: i16x8) -> i16x8;
- #[link_name = "llvm.x86.sse2.psrli.d"]
- fn psrlid(a: i32x4, imm8: i32) -> i32x4;
#[link_name = "llvm.x86.sse2.psrl.d"]
fn psrld(a: i32x4, count: i32x4) -> i32x4;
- #[link_name = "llvm.x86.sse2.psrli.q"]
- fn psrliq(a: i64x2, imm8: i32) -> i64x2;
#[link_name = "llvm.x86.sse2.psrl.q"]
fn psrlq(a: i64x2, count: i64x2) -> i64x2;
#[link_name = "llvm.x86.sse2.cvtdq2ps"]
@@ -2920,10 +2928,6 @@ extern "C" {
fn cvttsd2si(a: __m128d) -> i32;
#[link_name = "llvm.x86.sse2.cvttps2dq"]
fn cvttps2dq(a: __m128) -> i32x4;
- #[link_name = "llvm.x86.sse2.storeu.dq"]
- fn storeudq(mem_addr: *mut i8, a: __m128i);
- #[link_name = "llvm.x86.sse2.storeu.pd"]
- fn storeupd(mem_addr: *mut i8, a: __m128d);
}
#[cfg(test)]
diff --git a/library/stdarch/crates/intrinsic-test/missing_aarch64.txt b/library/stdarch/crates/intrinsic-test/missing_aarch64.txt
index b09d677af..33b7425d7 100644
--- a/library/stdarch/crates/intrinsic-test/missing_aarch64.txt
+++ b/library/stdarch/crates/intrinsic-test/missing_aarch64.txt
@@ -12,27 +12,18 @@ vbfmlaltq_f32
vbfmlaltq_lane_f32
vbfmlaltq_laneq_f32
vbfmmlaq_f32
-vsudot_laneq_s32
-vsudot_lane_s32
-vsudotq_laneq_s32
-vsudotq_lane_s32
-vusdot_laneq_s32
-vusdot_lane_s32
-vusdotq_laneq_s32
-vusdotq_lane_s32
-vusdotq_s32
-vusdot_s32
-# Missing from both Clang and stdarch
-vrnd32x_f64
+# Implemented in stdarch, but missing in Clang.
vrnd32xq_f64
-vrnd32z_f64
vrnd32zq_f64
-vrnd64x_f64
vrnd64xq_f64
-vrnd64z_f64
vrnd64zq_f64
+# LLVM select error, and missing in Clang.
+vrnd32x_f64
+vrnd32z_f64
+vrnd64x_f64
+vrnd64z_f64
# LLVM select error in debug builds
#vqshlu_n_s16
diff --git a/library/stdarch/crates/intrinsic-test/missing_arm.txt b/library/stdarch/crates/intrinsic-test/missing_arm.txt
index 3acc61678..7439cd6e6 100644
--- a/library/stdarch/crates/intrinsic-test/missing_arm.txt
+++ b/library/stdarch/crates/intrinsic-test/missing_arm.txt
@@ -12,16 +12,6 @@ vbfmlaltq_f32
vbfmlaltq_lane_f32
vbfmlaltq_laneq_f32
vbfmmlaq_f32
-vsudot_laneq_s32
-vsudot_lane_s32
-vsudotq_laneq_s32
-vsudotq_lane_s32
-vusdot_laneq_s32
-vusdot_lane_s32
-vusdotq_laneq_s32
-vusdotq_lane_s32
-vusdotq_s32
-vusdot_s32
# Implemented in Clang and stdarch for A64 only even though CSV claims A32 support
__crc32d
@@ -170,14 +160,6 @@ vcvtpq_s32_f32
vcvtpq_u32_f32
vcvtp_s32_f32
vcvtp_u32_f32
-vdot_lane_s32
-vdot_lane_u32
-vdotq_lane_s32
-vdotq_lane_u32
-vdotq_s32
-vdotq_u32
-vdot_s32
-vdot_u32
vqdmulh_lane_s16
vqdmulh_lane_s32
vqdmulhq_lane_s16
diff --git a/library/stdarch/crates/intrinsic-test/src/argument.rs b/library/stdarch/crates/intrinsic-test/src/argument.rs
index c2f9f9450..dd930115b 100644
--- a/library/stdarch/crates/intrinsic-test/src/argument.rs
+++ b/library/stdarch/crates/intrinsic-test/src/argument.rs
@@ -173,8 +173,8 @@ impl ArgumentList {
.join("\n")
}
- /// Creates a line for each argument that initalizes the argument from an array [arg]_vals at
- /// an offset i using a load intrinsic, in C.
+ /// Creates a line for each argument that initializes the argument from an array `[arg]_vals` at
+ /// an offset `i` using a load intrinsic, in C.
/// e.g `uint8x8_t a = vld1_u8(&a_vals[i]);`
pub fn load_values_c(&self, p64_armv7_workaround: bool) -> String {
self.iter()
@@ -214,8 +214,8 @@ impl ArgumentList {
.join("\n ")
}
- /// Creates a line for each argument that initalizes the argument from array [ARG]_VALS at
- /// an offset i using a load intrinsic, in Rust.
+ /// Creates a line for each argument that initializes the argument from array `[ARG]_VALS` at
+ /// an offset `i` using a load intrinsic, in Rust.
/// e.g `let a = vld1_u8(A_VALS.as_ptr().offset(i));`
pub fn load_values_rust(&self) -> String {
self.iter()
diff --git a/library/stdarch/crates/std_detect/src/detect/arch/arm.rs b/library/stdarch/crates/std_detect/src/detect/arch/arm.rs
index a7dea27fb..fd332e0b2 100644
--- a/library/stdarch/crates/std_detect/src/detect/arch/arm.rs
+++ b/library/stdarch/crates/std_detect/src/detect/arch/arm.rs
@@ -22,5 +22,7 @@ features! {
@FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] sha2: "sha2";
/// FEAT_SHA1 & FEAT_SHA256 (SHA1 & SHA2-256 instructions)
@FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] i8mm: "i8mm";
- /// FEAT_I8MM
+ /// FEAT_I8MM (integer matrix multiplication, plus ASIMD support)
+ @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] dotprod: "dotprod";
+ /// FEAT_DotProd (Vector Dot-Product - ASIMDDP)
}
diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs
index 7601cf0a8..4dc9590e1 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs
@@ -17,6 +17,8 @@ pub(crate) fn detect_features() -> cache::Initializer {
//
// [hwcap]: https://github.com/torvalds/linux/blob/master/arch/arm/include/uapi/asm/hwcap.h
if let Ok(auxv) = auxvec::auxv() {
+ enable_feature(&mut value, Feature::i8mm, bit::test(auxv.hwcap, 27));
+ enable_feature(&mut value, Feature::dotprod, bit::test(auxv.hwcap, 24));
enable_feature(&mut value, Feature::neon, bit::test(auxv.hwcap, 12));
enable_feature(&mut value, Feature::pmull, bit::test(auxv.hwcap2, 1));
enable_feature(&mut value, Feature::crc, bit::test(auxv.hwcap2, 4));
@@ -37,6 +39,12 @@ pub(crate) fn detect_features() -> cache::Initializer {
Feature::neon,
c.field("Features").has("neon") && !has_broken_neon(&c),
);
+ enable_feature(&mut value, Feature::i8mm, c.field("Features").has("i8mm"));
+ enable_feature(
+ &mut value,
+ Feature::dotprod,
+ c.field("Features").has("asimddp"),
+ );
enable_feature(&mut value, Feature::pmull, c.field("Features").has("pmull"));
enable_feature(&mut value, Feature::crc, c.field("Features").has("crc32"));
enable_feature(&mut value, Feature::aes, c.field("Features").has("aes"));
diff --git a/library/stdarch/crates/std_detect/src/lib.rs b/library/stdarch/crates/std_detect/src/lib.rs
index c0819218c..7fdfb872e 100644
--- a/library/stdarch/crates/std_detect/src/lib.rs
+++ b/library/stdarch/crates/std_detect/src/lib.rs
@@ -19,8 +19,6 @@
#![deny(clippy::missing_inline_in_public_items)]
#![cfg_attr(test, allow(unused_imports))]
#![no_std]
-// FIXME(Nilstrieb): Remove this once the compiler in stdarch CI has the internal_features lint.
-#![allow(unknown_lints)]
#![allow(internal_features)]
#[cfg(test)]
diff --git a/library/stdarch/crates/std_detect/tests/cpu-detection.rs b/library/stdarch/crates/std_detect/tests/cpu-detection.rs
index 38bdb5bbd..f93212d24 100644
--- a/library/stdarch/crates/std_detect/tests/cpu-detection.rs
+++ b/library/stdarch/crates/std_detect/tests/cpu-detection.rs
@@ -20,16 +20,25 @@ fn all() {
}
#[test]
-#[cfg(all(
- target_arch = "arm",
- any(target_os = "linux", target_os = "android", target_os = "freebsd"),
-))]
-fn arm_linux_or_freebsd() {
+#[cfg(all(target_arch = "arm", any(target_os = "freebsd"),))]
+fn arm_freebsd() {
+ println!("neon: {}", is_arm_feature_detected!("neon"));
+ println!("pmull: {}", is_arm_feature_detected!("pmull"));
+ println!("crc: {}", is_arm_feature_detected!("crc"));
+ println!("aes: {}", is_arm_feature_detected!("aes"));
+ println!("sha2: {}", is_arm_feature_detected!("sha2"));
+}
+
+#[test]
+#[cfg(all(target_arch = "arm", any(target_os = "linux", target_os = "android"),))]
+fn arm_linux() {
println!("neon: {}", is_arm_feature_detected!("neon"));
println!("pmull: {}", is_arm_feature_detected!("pmull"));
println!("crc: {}", is_arm_feature_detected!("crc"));
println!("aes: {}", is_arm_feature_detected!("aes"));
println!("sha2: {}", is_arm_feature_detected!("sha2"));
+ println!("dotprod: {}", is_arm_feature_detected!("dotprod"));
+ println!("i8mm: {}", is_arm_feature_detected!("i8mm"));
}
#[test]
diff --git a/library/stdarch/crates/stdarch-gen/neon.spec b/library/stdarch/crates/stdarch-gen/neon.spec
index 06090e669..760fa2204 100644
--- a/library/stdarch/crates/stdarch-gen/neon.spec
+++ b/library/stdarch/crates/stdarch-gen/neon.spec
@@ -3478,27 +3478,138 @@ link-arm = vst4lane._EXTpi8r_
const-arm = LANE
generate *mut f32:float32x2x4_t:void, *mut f32:float32x4x4_t:void
+/// Dot product vector form with unsigned and signed integers
+name = vusdot
+out-suffix
+a = 1000, -4200, -1000, 2000
+b = 100, 205, 110, 195, 120, 185, 130, 175, 140, 165, 150, 155, 160, 145, 170, 135
+c = 0, 1, 2, 3, -1, -2, -3, -4, 4, 5, 6, 7, -5, -6, -7, -8
+aarch64 = usdot
+arm = vusdot
+target = i8mm
+
+// 1000 + (100, 205, 110, 195) . ( 0, 1, 2, 3)
+// -4200 + (120, 185, 130, 175) . (-1, -2, -3, -4)
+// ...
+validate 2010, -5780, 2370, -1940
+
+link-arm = usdot._EXT2_._EXT4_:int32x2_t:uint8x8_t:int8x8_t:int32x2_t
+link-aarch64 = usdot._EXT2_._EXT4_:int32x2_t:uint8x8_t:int8x8_t:int32x2_t
+generate int32x2_t:uint8x8_t:int8x8_t:int32x2_t
+
+link-arm = usdot._EXT2_._EXT4_:int32x4_t:uint8x16_t:int8x16_t:int32x4_t
+link-aarch64 = usdot._EXT2_._EXT4_:int32x4_t:uint8x16_t:int8x16_t:int32x4_t
+generate int32x4_t:uint8x16_t:int8x16_t:int32x4_t
+
+/// Dot product index form with unsigned and signed integers
+name = vusdot
+out-lane-suffixes
+constn = LANE
+aarch64 = usdot
+arm = vusdot
+target = i8mm
+multi_fn = static_assert_imm-in2_dot-LANE
+multi_fn = transmute, c:merge4_t2, c
+multi_fn = simd_shuffle!, c:out_signed, c, c, {dup-out_len-LANE as u32}
+multi_fn = vusdot-out-noext, a, b, {transmute, c}
+a = 1000, -4200, -1000, 2000
+b = 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250
+c = 4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11
+
+// 1000 + (100, 110, 120, 130) . (4, 3, 2, 1)
+// -4200 + (140, 150, 160, 170) . (4, 3, 2, 1)
+// ...
+n = 0
+validate 2100, -2700, 900, 4300
+
+// 1000 + (100, 110, 120, 130) . (0, -1, -2, -3)
+// -4200 + (140, 150, 160, 170) . (0, -1, -2, -3)
+// ...
+n = 1
+validate 260, -5180, -2220, 540
+
+generate int32x2_t:uint8x8_t:int8x8_t:int32x2_t
+generate int32x4_t:uint8x16_t:int8x8_t:int32x4_t
+
+/// Dot product index form with unsigned and signed integers
+name = vusdot
+out-lane-suffixes
+constn = LANE
+// Only AArch64 has the laneq forms.
+aarch64 = usdot
+target = i8mm
+multi_fn = static_assert_imm-in2_dot-LANE
+multi_fn = transmute, c:merge4_t2, c
+multi_fn = simd_shuffle!, c:out_signed, c, c, {dup-out_len-LANE as u32}
+multi_fn = vusdot-out-noext, a, b, {transmute, c}
+a = 1000, -4200, -1000, 2000
+b = 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250
+c = 4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11
+
+// 1000 + (100, 110, 120, 130) . (-4, -5, -6, -7)
+// -4200 + (140, 150, 160, 170) . (-4, -5, -6, -7)
+// ...
+n = 3
+validate -3420, -10140, -8460, -6980
+
+generate int32x2_t:uint8x8_t:int8x16_t:int32x2_t
+generate int32x4_t:uint8x16_t:int8x16_t:int32x4_t
+
/// Dot product index form with signed and unsigned integers
name = vsudot
out-lane-suffixes
constn = LANE
+aarch64 = sudot
+arm = vsudot
+target = i8mm
+
multi_fn = static_assert_imm-in2_dot-LANE
-multi_fn = simd_shuffle!, c:unsigned, c, c, {base-4-LANE}
-multi_fn = vsudot-outlane-_, a, b, c
-a = 1, 2, 1, 2
-b = 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
-c = 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
+multi_fn = transmute, c:merge4_t2, c
+multi_fn = simd_shuffle!, c:out_unsigned, c, c, {dup-out_len-LANE as u32}
+multi_fn = vusdot-out-noext, a, {transmute, c}, b
+a = -2000, 4200, -1000, 2000
+b = 4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11
+c = 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250
+
+// -2000 + (4, 3, 2, 1) . (100, 110, 120, 130)
+// 4200 + (0, -1, -2, -3) . (100, 110, 120, 130)
+// ...
n = 0
-validate 31, 72, 31, 72
-target = dotprod
+validate -900, 3460, -3580, -2420
+
+// -2000 + (4, 3, 2, 1) . (140, 150, 160, 170)
+// 4200 + (0, -1, -2, -3) . (140, 150, 160, 170)
+// ...
+n = 1
+validate -500, 3220, -4460, -3940
+generate int32x2_t:int8x8_t:uint8x8_t:int32x2_t
+generate int32x4_t:int8x16_t:uint8x8_t:int32x4_t
+
+/// Dot product index form with signed and unsigned integers
+name = vsudot
+out-lane-suffixes
+constn = LANE
+// Only AArch64 has the laneq forms.
aarch64 = sudot
-link-aarch64 = usdot._EXT2_._EXT4_:int32x2_t:int8x8_t:uint8x8_t:int32x2_t
-// LLVM ERROR: Cannot select: intrinsic %llvm.aarch64.neon.usdot
-//generate int32x2_t:int8x8_t:uint8x8_t:int32x2_t, int32x2_t:int8x8_t:uint8x16_t:int32x2_t
-link-aarch64 = usdot._EXT2_._EXT4_:int32x4_t:int8x16_t:uint8x16_t:int32x4_t
-// LLVM ERROR: Cannot select: intrinsic %llvm.aarch64.neon.usdot
-//generate int32x4_t:int8x16_t:uint8x8_t:int32x4_t, int32x4_t:int8x16_t:uint8x16_t:int32x4_t
+target = i8mm
+
+multi_fn = static_assert_imm-in2_dot-LANE
+multi_fn = transmute, c:merge4_t2, c
+multi_fn = simd_shuffle!, c:out_unsigned, c, c, {dup-out_len-LANE as u32}
+multi_fn = vusdot-out-noext, a, {transmute, c}, b
+a = -2000, 4200, -1000, 2000
+b = 4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11
+c = 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250
+
+// -2000 + (4, 3, 2, 1) . (220, 230, 240, 250)
+// 4200 + (0, -1, -2, -3) . (220, 230, 240, 250)
+// ...
+n = 3
+validate 300, 2740, -6220, -6980
+
+generate int32x2_t:int8x8_t:uint8x16_t:int32x2_t
+generate int32x4_t:int8x16_t:uint8x16_t:int32x4_t
/// Multiply
name = vmul
@@ -4612,7 +4723,7 @@ aarch64 = fcmla
generate float32x2_t, float32x2_t:float32x2_t:float32x4_t:float32x2_t
generate float32x4_t:float32x4_t:float32x2_t:float32x4_t, float32x4_t
-/// Dot product arithmetic
+/// Dot product arithmetic (vector)
name = vdot
out-suffix
a = 1, 2, 1, 2
@@ -4621,35 +4732,65 @@ c = 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
validate 31, 176, 31, 176
target = dotprod
+arm = vsdot
aarch64 = sdot
+link-arm = sdot._EXT_._EXT3_
link-aarch64 = sdot._EXT_._EXT3_
generate int32x2_t:int8x8_t:int8x8_t:int32x2_t, int32x4_t:int8x16_t:int8x16_t:int32x4_t
+arm = vudot
aarch64 = udot
+link-arm = udot._EXT_._EXT3_
link-aarch64 = udot._EXT_._EXT3_
generate uint32x2_t:uint8x8_t:uint8x8_t:uint32x2_t, uint32x4_t:uint8x16_t:uint8x16_t:uint32x4_t
-/// Dot product arithmetic
+/// Dot product arithmetic (indexed)
name = vdot
out-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_dot-LANE
-multi_fn = simd_shuffle!, c:in_t, c, c, {base-4-LANE}
-multi_fn = vdot-out-noext, a, b, c
+multi_fn = transmute, c:merge4_t2, c
+multi_fn = simd_shuffle!, c:out_t, c, c, {dup-out_len-LANE as u32}
+multi_fn = vdot-out-noext, a, b, {transmute, c}
a = 1, 2, 1, 2
-b = 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
+b = -1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
c = 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
n = 0
-validate 31, 72, 31, 72
+validate 29, 72, 31, 72
target = dotprod
+// Only AArch64 has the laneq forms.
aarch64 = sdot
-generate int32x2_t:int8x8_t:int8x8_t:int32x2_t, int32x2_t:int8x8_t:int8x16_t:int32x2_t
-generate int32x4_t:int8x16_t:int8x8_t:int32x4_t, int32x4_t:int8x16_t:int8x16_t:int32x4_t
+generate int32x2_t:int8x8_t:int8x16_t:int32x2_t
+generate int32x4_t:int8x16_t:int8x16_t:int32x4_t
+
+arm = vsdot
+generate int32x2_t:int8x8_t:int8x8_t:int32x2_t
+generate int32x4_t:int8x16_t:int8x8_t:int32x4_t
+
+/// Dot product arithmetic (indexed)
+name = vdot
+out-lane-suffixes
+constn = LANE
+multi_fn = static_assert_imm-in2_dot-LANE
+multi_fn = transmute, c:merge4_t2, c
+multi_fn = simd_shuffle!, c:out_t, c, c, {dup-out_len-LANE as u32}
+multi_fn = vdot-out-noext, a, b, {transmute, c}
+a = 1, 2, 1, 2
+b = 255, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
+c = 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
+n = 0
+validate 285, 72, 31, 72
+target = dotprod
+// Only AArch64 has the laneq forms.
aarch64 = udot
-generate uint32x2_t:uint8x8_t:uint8x8_t:uint32x2_t, uint32x2_t:uint8x8_t:uint8x16_t:uint32x2_t
-generate uint32x4_t:uint8x16_t:uint8x8_t:uint32x4_t, uint32x4_t:uint8x16_t:uint8x16_t:uint32x4_t
+generate uint32x2_t:uint8x8_t:uint8x16_t:uint32x2_t
+generate uint32x4_t:uint8x16_t:uint8x16_t:uint32x4_t
+
+arm = vudot
+generate uint32x2_t:uint8x8_t:uint8x8_t:uint32x2_t
+generate uint32x4_t:uint8x16_t:uint8x8_t:uint32x4_t
/// Maximum (vector)
name = vmax
@@ -6511,7 +6652,7 @@ name = vrshr
n-suffix
constn = N
multi_fn = static_assert-N-1-bits
-multi_fn = vrshl-self-noext, a, {vdup-nself-noext, (-N) as _}
+multi_fn = vrshl-self-noext, a, {vdup-nself-noext, -N as _}
a = 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64
n = 2
validate 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
@@ -6538,7 +6679,7 @@ name = vrshr
n-suffix
constn = N
multi_fn = static_assert-N-1-bits
-multi_fn = vrshl-self-noext, a, {vdup-nsigned-noext, (-N) as _}
+multi_fn = vrshl-self-noext, a, {vdup-nsigned-noext, -N as _}
a = 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64
n = 2
validate 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
@@ -6650,10 +6791,10 @@ b = 4
n = 2
validate 2
-aarch64 = srsra
+aarch64 = srshr
generate i64
-/// Ungisned rounding shift right and accumulate.
+/// Unsigned rounding shift right and accumulate.
name = vrsra
n-suffix
constn = N
@@ -6665,7 +6806,7 @@ b = 4
n = 2
validate 2
-aarch64 = ursra
+aarch64 = urshr
generate u64
/// Rounding subtract returning high narrow
@@ -7071,44 +7212,170 @@ generate uint64x2_t
/// Floating-point round to 32-bit integer, using current rounding mode
name = vrnd32x
-a = 1.1, 1.9, -1.7, -2.3
-validate 1.0, 2.0, -2.0, -2.0
target = frintts
+// For validation, the rounding mode should be the default: round-to-nearest (ties-to-even).
+a = -1.5, 2.9, 1.5, -2.5
+validate -2.0, 3.0, 2.0, -2.0
+
aarch64 = frint32x
link-aarch64 = frint32x._EXT_
generate float32x2_t, float32x4_t
+// The float64x1_t form uses a different LLVM link and isn't supported by Clang
+// (and so has no intrinsic-test), so perform extra validation to make sure
+// that it matches the float64x2_t form.
+
+a = 1.5, -2.5
+validate 2.0, -2.0
+// - The biggest f64 that rounds to i32::MAX.
+// - The smallest positive f64 that rounds out of range.
+a = 2147483647.499999762, 2147483647.5
+validate 2147483647.0, -2147483648.0
+// - The smallest f64 that rounds to i32::MIN + 1.
+// - The largest negative f64 that rounds out of range.
+a = -2147483647.499999762, -2147483648.500000477
+validate -2147483647.0, -2147483648.0
+generate float64x2_t
+
+// Odd-numbered tests for float64x1_t coverage.
+a = 2.9
+validate 3.0
+a = -2.5
+validate -2.0
+a = 2147483647.5
+validate -2147483648.0
+a = -2147483648.500000477
+validate -2147483648.0
+
+multi_fn = transmute, {self-out-_, {simd_extract, a, 0}}
+link-aarch64 = llvm.aarch64.frint32x.f64:f64:::f64
+generate float64x1_t
+
/// Floating-point round to 32-bit integer toward zero
name = vrnd32z
-a = 1.1, 1.9, -1.7, -2.3
-validate 1.0, 1.0, -1.0, -2.0
target = frintts
+a = -1.5, 2.9, 1.5, -2.5
+validate -1.0, 2.0, 1.0, -2.0
+
aarch64 = frint32z
link-aarch64 = frint32z._EXT_
generate float32x2_t, float32x4_t
+// The float64x1_t form uses a different LLVM link and isn't supported by Clang
+// (and so has no intrinsic-test), so perform extra validation to make sure
+// that it matches the float64x2_t form.
+
+a = 1.5, -2.5
+validate 1.0, -2.0
+// - The biggest f64 that rounds to i32::MAX.
+// - The smallest positive f64 that rounds out of range.
+a = 2147483647.999999762, 2147483648.0
+validate 2147483647.0, -2147483648.0
+// - The smallest f64 that rounds to i32::MIN + 1.
+// - The largest negative f64 that rounds out of range.
+a = -2147483647.999999762, -2147483649.0
+validate -2147483647.0, -2147483648.0
+generate float64x2_t
+
+// Odd-numbered tests for float64x1_t coverage.
+a = 2.9
+validate 2.0
+a = -2.5
+validate -2.0
+a = 2147483648.0
+validate -2147483648.0
+a = -2147483649.0
+validate -2147483648.0
+
+multi_fn = transmute, {self-out-_, {simd_extract, a, 0}}
+link-aarch64 = llvm.aarch64.frint32z.f64:f64:::f64
+generate float64x1_t
+
/// Floating-point round to 64-bit integer, using current rounding mode
name = vrnd64x
-a = 1.1, 1.9, -1.7, -2.3
-validate 1.0, 2.0, -2.0, -2.0
target = frintts
+// For validation, the rounding mode should be the default: round-to-nearest (ties-to-even).
+a = -1.5, 2.9, 1.5, -2.5
+validate -2.0, 3.0, 2.0, -2.0
+
aarch64 = frint64x
link-aarch64 = frint64x._EXT_
generate float32x2_t, float32x4_t
+// The float64x1_t form uses a different LLVM link and isn't supported by Clang
+// (and so has no intrinsic-test), so perform extra validation to make sure
+// that it matches the float64x2_t form.
+
+a = 1.5, -2.5
+validate 2.0, -2.0
+// - The biggest f64 representable as an i64 (0x7ffffffffffffc00).
+// - The smallest positive f64 that is out of range (2^63).
+a = 9223372036854774784.0, 9223372036854775808.0
+validate 9223372036854774784.0, -9223372036854775808.0
+// - The smallest f64 representable as an i64 (i64::MIN).
+// - The biggest negative f64 that is out of range.
+a = -9223372036854775808.0, -9223372036854777856.0
+validate -9223372036854775808.0, -9223372036854775808.0
+generate float64x2_t
+
+// Odd-numbered tests for float64x1_t coverage.
+a = 2.9
+validate 3.0
+a = -2.5
+validate -2.0
+a = 9223372036854775808.0
+validate -9223372036854775808.0
+a = -9223372036854777856.0
+validate -9223372036854775808.0
+
+multi_fn = transmute, {self-out-_, {simd_extract, a, 0}}
+link-aarch64 = llvm.aarch64.frint64x.f64:f64:::f64
+generate float64x1_t
+
/// Floating-point round to 64-bit integer toward zero
name = vrnd64z
-a = 1.1, 1.9, -1.7, -2.3
-validate 1.0, 1.0, -1.0, -2.0
target = frintts
+a = -1.5, 2.9, 1.5, -2.5
+validate -1.0, 2.0, 1.0, -2.0
+
aarch64 = frint64z
link-aarch64 = frint64z._EXT_
generate float32x2_t, float32x4_t
+// The float64x1_t form uses a different LLVM link and isn't supported by Clang
+// (and so has no intrinsic-test), so perform extra validation to make sure
+// that it matches the float64x2_t form.
+
+a = 1.5, -2.5
+validate 1.0, -2.0
+// - The biggest f64 representable as an i64 (0x7ffffffffffffc00).
+// - The smallest positive f64 that is out of range (2^63).
+a = 9223372036854774784.0, 9223372036854775808.0
+validate 9223372036854774784.0, -9223372036854775808.0
+// - The smallest f64 representable as an i64 (i64::MIN).
+// - The biggest negative f64 that is out of range.
+a = -9223372036854775808.0, -9223372036854777856.0
+validate -9223372036854775808.0, -9223372036854775808.0
+generate float64x2_t
+
+// Odd-numbered tests for float64x1_t coverage.
+a = 2.9
+validate 2.0
+a = -2.5
+validate -2.0
+a = 9223372036854775808.0
+validate -9223372036854775808.0
+a = -9223372036854777856.0
+validate -9223372036854775808.0
+
+multi_fn = transmute, {self-out-_, {simd_extract, a, 0}}
+link-aarch64 = llvm.aarch64.frint64z.f64:f64:::f64
+generate float64x1_t
+
/// Transpose elements
name = vtrn
multi_fn = simd_shuffle!, a1:in_t, a, b, {transpose-1-in_len}
@@ -7209,7 +7476,7 @@ generate uint8x8_t:uint8x8_t:uint8x8x2_t, uint16x4_t:uint16x4_t:uint16x4x2_t
generate poly8x8_t:poly8x8_t:poly8x8x2_t, poly16x4_t:poly16x4_t:poly16x4x2_t
arm = vtrn
generate int32x2_t:int32x2_t:int32x2x2_t, uint32x2_t:uint32x2_t:uint32x2x2_t
-aarch64 = ext
+aarch64 = zip
arm = vorr
generate int8x16_t:int8x16_t:int8x16x2_t, int16x8_t:int16x8_t:int16x8x2_t, int32x4_t:int32x4_t:int32x4x2_t
generate uint8x16_t:uint8x16_t:uint8x16x2_t, uint16x8_t:uint16x8_t:uint16x8x2_t, uint32x4_t:uint32x4_t:uint32x4x2_t
@@ -7227,7 +7494,7 @@ validate 1., 5., 2., 6., 3., 7., 4., 8.
aarch64 = zip
arm = vtrn
generate float32x2_t:float32x2_t:float32x2x2_t
-aarch64 = ext
+aarch64 = zip
arm = vorr
generate float32x4_t:float32x4_t:float32x4x2_t
diff --git a/library/stdarch/crates/stdarch-gen/src/main.rs b/library/stdarch/crates/stdarch-gen/src/main.rs
index 652aee88c..8e2bea0e2 100644
--- a/library/stdarch/crates/stdarch-gen/src/main.rs
+++ b/library/stdarch/crates/stdarch-gen/src/main.rs
@@ -799,6 +799,19 @@ fn type_to_half(t: &str) -> &str {
}
}
+fn type_with_merged_lanes(t: &str, elements_per_lane: usize) -> String {
+ assert_eq!(type_len(t) % elements_per_lane, 0);
+ let prefix_len = t
+ .find(|c: char| c.is_ascii_digit())
+ .unwrap_or_else(|| t.len());
+ format!(
+ "{prefix}{bits}x{len}_t",
+ prefix = &t[0..prefix_len],
+ bits = type_bits(t) * elements_per_lane,
+ len = type_len(t) / elements_per_lane
+ )
+}
+
fn asc(start: i32, len: usize) -> String {
let mut s = String::from("[");
for i in 0..len {
@@ -2515,7 +2528,7 @@ fn gen_arm(
{function_doc}
#[inline]
-#[cfg(target_arch = "aarch64")]{target_feature_aarch64}
+#[cfg(not(target_arch = "arm"))]{target_feature_aarch64}
#[cfg_attr(test, assert_instr({assert_aarch64}{const_assert}))]{const_legacy}{stable_aarch64}
{call_aarch64}
"#,
@@ -2993,6 +3006,12 @@ fn get_call(
re = Some((re_params[0].clone(), in_t[1].to_string()));
} else if re_params[1] == "out_t" {
re = Some((re_params[0].clone(), out_t.to_string()));
+ } else if re_params[1] == "out_unsigned" {
+ re = Some((re_params[0].clone(), type_to_unsigned(out_t).to_string()));
+ } else if re_params[1] == "out_signed" {
+ re = Some((re_params[0].clone(), type_to_signed(out_t).to_string()));
+ } else if re_params[1] == "merge4_t2" {
+ re = Some((re_params[0].clone(), type_with_merged_lanes(in_t[2], 4)));
} else if re_params[1] == "half" {
re = Some((re_params[0].clone(), type_to_half(in_t[1]).to_string()));
} else if re_params[1] == "in_ntt" {
diff --git a/library/stdarch/crates/stdarch-test/Cargo.toml b/library/stdarch/crates/stdarch-test/Cargo.toml
index ce5705c6e..3a2130d4e 100644
--- a/library/stdarch/crates/stdarch-test/Cargo.toml
+++ b/library/stdarch/crates/stdarch-test/Cargo.toml
@@ -7,11 +7,13 @@ edition = "2021"
[dependencies]
assert-instr-macro = { path = "../assert-instr-macro" }
simd-test-macro = { path = "../simd-test-macro" }
-cc = "1.0"
lazy_static = "1.0"
rustc-demangle = "0.1.8"
cfg-if = "1.0"
+[target.'cfg(windows)'.dependencies]
+cc = "1.0"
+
# We use a crates.io dependency to disassemble wasm binaries to look for
# instructions for `#[assert_instr]`. Note that we use an `=` dependency here
# instead of a floating dependency because the text format for wasm changes over
diff --git a/library/stdarch/crates/stdarch-test/src/disassembly.rs b/library/stdarch/crates/stdarch-test/src/disassembly.rs
index 5d7a27e8a..54df7261e 100644
--- a/library/stdarch/crates/stdarch-test/src/disassembly.rs
+++ b/library/stdarch/crates/stdarch-test/src/disassembly.rs
@@ -1,7 +1,7 @@
//! Disassembly calling function for most targets.
use crate::Function;
-use std::{collections::HashSet, env, process::Command, str};
+use std::{collections::HashSet, env, str};
// Extracts the "shim" name from the `symbol`.
fn normalize(mut symbol: &str) -> String {
@@ -39,10 +39,11 @@ fn normalize(mut symbol: &str) -> String {
symbol
}
+#[cfg(windows)]
pub(crate) fn disassemble_myself() -> HashSet<Function> {
let me = env::current_exe().expect("failed to get current exe");
- let disassembly = if cfg!(target_os = "windows") && cfg!(target_env = "msvc") {
+ let disassembly = if cfg!(target_env = "msvc") {
let target = if cfg!(target_arch = "x86_64") {
"x86_64-pc-windows-msvc"
} else if cfg!(target_arch = "x86") {
@@ -65,32 +66,39 @@ pub(crate) fn disassemble_myself() -> HashSet<Function> {
assert!(output.status.success());
// Windows does not return valid UTF-8 output:
String::from_utf8_lossy(Vec::leak(output.stdout))
- } else if cfg!(target_os = "windows") {
- panic!("disassembly unimplemented")
} else {
- let objdump = env::var("OBJDUMP").unwrap_or_else(|_| "objdump".to_string());
- let add_args = if cfg!(target_os = "macos") && cfg!(target_arch = "aarch64") {
- // Target features need to be enabled for LLVM objdump on Macos ARM64
- vec!["--mattr=+v8.6a,+crypto,+tme"]
- } else {
- vec![]
- };
- let output = Command::new(objdump.clone())
- .arg("--disassemble")
- .arg("--no-show-raw-insn")
- .args(add_args)
- .arg(&me)
- .output()
- .unwrap_or_else(|_| panic!("failed to execute objdump. OBJDUMP={objdump}"));
- println!(
- "{}\n{}",
- output.status,
- String::from_utf8_lossy(&output.stderr)
- );
- assert!(output.status.success());
+ panic!("disassembly unimplemented")
+ };
- String::from_utf8_lossy(Vec::leak(output.stdout))
+ parse(&disassembly)
+}
+
+#[cfg(not(windows))]
+pub(crate) fn disassemble_myself() -> HashSet<Function> {
+ let me = env::current_exe().expect("failed to get current exe");
+
+ let objdump = env::var("OBJDUMP").unwrap_or_else(|_| "objdump".to_string());
+ let add_args = if cfg!(target_os = "macos") && cfg!(target_arch = "aarch64") {
+ // Target features need to be enabled for LLVM objdump on Macos ARM64
+ vec!["--mattr=+v8.6a,+crypto,+tme"]
+ } else {
+ vec![]
};
+ let output = std::process::Command::new(objdump.clone())
+ .arg("--disassemble")
+ .arg("--no-show-raw-insn")
+ .args(add_args)
+ .arg(&me)
+ .output()
+ .unwrap_or_else(|_| panic!("failed to execute objdump. OBJDUMP={objdump}"));
+ println!(
+ "{}\n{}",
+ output.status,
+ String::from_utf8_lossy(&output.stderr)
+ );
+ assert!(output.status.success());
+
+ let disassembly = String::from_utf8_lossy(Vec::leak(output.stdout));
parse(&disassembly)
}
diff --git a/library/stdarch/crates/stdarch-test/src/lib.rs b/library/stdarch/crates/stdarch-test/src/lib.rs
index 232e47ec1..7ea189ff5 100644
--- a/library/stdarch/crates/stdarch-test/src/lib.rs
+++ b/library/stdarch/crates/stdarch-test/src/lib.rs
@@ -129,17 +129,20 @@ pub fn assert(shim_addr: usize, fnname: &str, expected: &str) {
"usad8" | "vfma" | "vfms" => 27,
"qadd8" | "qsub8" | "sadd8" | "sel" | "shadd8" | "shsub8" | "usub8" | "ssub8" => 29,
// core_arch/src/arm_shared/simd32
- // vst1q_s64_x4_vst1 : #instructions = 22 >= 22 (limit)
- "vld3" => 23,
+ // vst1q_s64_x4_vst1 : #instructions = 27 >= 22 (limit)
+ "vld3" => 28,
// core_arch/src/arm_shared/simd32
- // vld4q_lane_u32_vld4 : #instructions = 31 >= 22 (limit)
- "vld4" => 32,
+ // vld4q_lane_u32_vld4 : #instructions = 36 >= 22 (limit)
+ "vld4" => 37,
// core_arch/src/arm_shared/simd32
// vst1q_s64_x4_vst1 : #instructions = 40 >= 22 (limit)
"vst1" => 41,
// core_arch/src/arm_shared/simd32
- // vst4q_u32_vst4 : #instructions = 26 >= 22 (limit)
- "vst4" => 27,
+ // vst3q_u32_vst3 : #instructions = 25 >= 22 (limit)
+ "vst3" => 26,
+ // core_arch/src/arm_shared/simd32
+ // vst4q_u32_vst4 : #instructions = 33 >= 22 (limit)
+ "vst4" => 34,
// core_arch/src/arm_shared/simd32
// vst1q_p64_x4_nop : #instructions = 33 >= 22 (limit)
diff --git a/library/stdarch/examples/connect5.rs b/library/stdarch/examples/connect5.rs
index 09e7e48a7..805108c24 100644
--- a/library/stdarch/examples/connect5.rs
+++ b/library/stdarch/examples/connect5.rs
@@ -851,7 +851,7 @@ fn check_patterndead4(pos: &Pos, sd: Side) -> i32 {
n
}
-/// Check <b>-OOO-, -OO-O-, -O-OO-</br>
+/// Check <b>-OOO-, -OO-O-, -O-OO-</b>
fn check_patternlive3(pos: &Pos, sd: Side) -> i32 {
let mut n: i32 = 0;
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index 64d10dd57..413f0fba3 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -21,7 +21,7 @@
#![feature(process_exitcode_internals)]
#![feature(panic_can_unwind)]
#![feature(test)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
// Public reexports
pub use self::bench::{black_box, Bencher};
diff --git a/library/unwind/src/lib.rs b/library/unwind/src/lib.rs
index 0b4daeafe..df4f286a5 100644
--- a/library/unwind/src/lib.rs
+++ b/library/unwind/src/lib.rs
@@ -5,7 +5,7 @@
#![feature(c_unwind)]
#![feature(cfg_target_abi)]
#![cfg_attr(not(target_env = "msvc"), feature(libc))]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
cfg_if::cfg_if! {
if #[cfg(target_env = "msvc")] {
@@ -148,3 +148,7 @@ extern "C" {}
#[cfg(target_os = "nto")]
#[link(name = "gcc_s")]
extern "C" {}
+
+#[cfg(target_os = "hurd")]
+#[link(name = "gcc_s")]
+extern "C" {}