summaryrefslogtreecommitdiffstats
path: root/library
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:42 +0000
commitcec1877e180393eba0f6ddb0cf97bf3a791631c7 (patch)
tree47b4dac2a9dd9a40c30c251b4d4a72d7ccf77e9f /library
parentAdding debian version 1.74.1+dfsg1-1. (diff)
downloadrustc-cec1877e180393eba0f6ddb0cf97bf3a791631c7.tar.xz
rustc-cec1877e180393eba0f6ddb0cf97bf3a791631c7.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library')
-rw-r--r--library/alloc/Cargo.toml2
-rw-r--r--library/alloc/src/alloc.rs9
-rw-r--r--library/alloc/src/borrow.rs1
-rw-r--r--library/alloc/src/boxed.rs20
-rw-r--r--library/alloc/src/collections/binary_heap/mod.rs6
-rw-r--r--library/alloc/src/collections/btree/dedup_sorted_iter.rs2
-rw-r--r--library/alloc/src/collections/btree/map.rs2
-rw-r--r--library/alloc/src/collections/btree/set.rs2
-rw-r--r--library/alloc/src/fmt.rs2
-rw-r--r--library/alloc/src/lib.rs7
-rw-r--r--library/alloc/src/raw_vec.rs19
-rw-r--r--library/alloc/src/rc.rs9
-rw-r--r--library/alloc/src/string.rs151
-rw-r--r--library/alloc/src/sync.rs9
-rw-r--r--library/alloc/src/vec/mod.rs16
-rw-r--r--library/alloc/tests/autotraits.rs4
-rw-r--r--library/backtrace/.github/actions/build-with-patched-std/action.yml48
-rw-r--r--library/backtrace/.github/actions/report-code-size-changes/action.yml111
-rw-r--r--library/backtrace/.github/workflows/check-binary-size.yml172
-rw-r--r--library/backtrace/Cargo.lock221
-rw-r--r--library/backtrace/Cargo.toml9
-rw-r--r--library/backtrace/build.rs31
-rw-r--r--library/backtrace/crates/as-if-std/Cargo.toml4
-rw-r--r--library/backtrace/crates/debuglink/Cargo.toml2
-rw-r--r--library/backtrace/crates/dylib-dep/Cargo.toml2
-rw-r--r--library/backtrace/crates/line-tables-only/Cargo.toml2
-rw-r--r--library/backtrace/crates/line-tables-only/src/lib.rs15
-rw-r--r--library/backtrace/crates/macos_frames_test/Cargo.toml2
-rw-r--r--library/backtrace/crates/without_debuginfo/Cargo.toml2
-rw-r--r--library/backtrace/src/android-api.c4
-rw-r--r--library/backtrace/src/backtrace/dbghelp.rs327
-rw-r--r--library/backtrace/src/backtrace/libunwind.rs13
-rw-r--r--library/backtrace/src/backtrace/mod.rs35
-rw-r--r--library/backtrace/src/dbghelp.rs52
-rw-r--r--library/backtrace/src/lib.rs15
-rw-r--r--library/backtrace/src/print.rs11
-rw-r--r--library/backtrace/src/symbolize/dbghelp.rs115
-rw-r--r--library/backtrace/src/symbolize/gimli.rs45
-rw-r--r--library/backtrace/src/symbolize/gimli/elf.rs2
-rw-r--r--library/backtrace/src/symbolize/gimli/libs_aix.rs74
-rw-r--r--library/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs20
-rw-r--r--library/backtrace/src/symbolize/gimli/xcoff.rs186
-rw-r--r--library/backtrace/src/windows.rs52
-rw-r--r--library/backtrace/tests/accuracy/main.rs4
-rw-r--r--library/backtrace/tests/sgx-image-base.rs56
-rw-r--r--library/backtrace/tests/smoke.rs51
-rw-r--r--library/core/src/alloc/layout.rs2
-rw-r--r--library/core/src/arch.rs1
-rw-r--r--library/core/src/array/iter.rs28
-rw-r--r--library/core/src/cell.rs14
-rw-r--r--library/core/src/cell/once.rs46
-rw-r--r--library/core/src/char/decode.rs4
-rw-r--r--library/core/src/char/methods.rs9
-rw-r--r--library/core/src/cmp.rs76
-rw-r--r--library/core/src/convert/mod.rs59
-rw-r--r--library/core/src/default.rs1
-rw-r--r--library/core/src/error.rs8
-rw-r--r--library/core/src/ffi/c_str.rs2
-rw-r--r--library/core/src/ffi/mod.rs2
-rw-r--r--library/core/src/fmt/builders.rs145
-rw-r--r--library/core/src/fmt/mod.rs10
-rw-r--r--library/core/src/fmt/rt.rs4
-rw-r--r--library/core/src/future/mod.rs2
-rw-r--r--library/core/src/hint.rs2
-rw-r--r--library/core/src/intrinsics.rs11
-rw-r--r--library/core/src/intrinsics/mir.rs10
-rw-r--r--library/core/src/io/borrowed_buf.rs (renamed from library/std/src/io/readbuf.rs)18
-rw-r--r--library/core/src/io/mod.rs6
-rw-r--r--library/core/src/iter/adapters/peekable.rs1
-rw-r--r--library/core/src/iter/adapters/zip.rs90
-rw-r--r--library/core/src/iter/mod.rs6
-rw-r--r--library/core/src/iter/sources.rs8
-rw-r--r--library/core/src/iter/sources/empty.rs1
-rw-r--r--library/core/src/iter/sources/from_coroutine.rs59
-rw-r--r--library/core/src/iter/sources/from_generator.rs58
-rw-r--r--library/core/src/iter/sources/once.rs1
-rw-r--r--library/core/src/iter/sources/once_with.rs2
-rw-r--r--library/core/src/iter/sources/successors.rs2
-rw-r--r--library/core/src/iter/traits/collect.rs1
-rw-r--r--library/core/src/iter/traits/iterator.rs11
-rw-r--r--library/core/src/lib.rs19
-rw-r--r--library/core/src/macros/mod.rs37
-rw-r--r--library/core/src/marker.rs60
-rw-r--r--library/core/src/mem/manually_drop.rs12
-rw-r--r--library/core/src/mem/maybe_uninit.rs24
-rw-r--r--library/core/src/mem/mod.rs97
-rw-r--r--library/core/src/net/ip_addr.rs154
-rw-r--r--library/core/src/net/socket_addr.rs51
-rw-r--r--library/core/src/num/f32.rs44
-rw-r--r--library/core/src/num/f64.rs42
-rw-r--r--library/core/src/num/int_macros.rs43
-rw-r--r--library/core/src/num/mod.rs11
-rw-r--r--library/core/src/num/saturating.rs4
-rw-r--r--library/core/src/num/uint_macros.rs18
-rw-r--r--library/core/src/ops/coroutine.rs139
-rw-r--r--library/core/src/ops/deref.rs163
-rw-r--r--library/core/src/ops/function.rs24
-rw-r--r--library/core/src/ops/generator.rs135
-rw-r--r--library/core/src/ops/index.rs2
-rw-r--r--library/core/src/ops/mod.rs10
-rw-r--r--library/core/src/ops/range.rs8
-rw-r--r--library/core/src/ops/try_trait.rs45
-rw-r--r--library/core/src/option.rs47
-rw-r--r--library/core/src/panic.rs45
-rw-r--r--library/core/src/panic/unwind_safe.rs1
-rw-r--r--library/core/src/panicking.rs13
-rw-r--r--library/core/src/pin.rs28
-rw-r--r--library/core/src/primitive_docs.rs67
-rw-r--r--library/core/src/ptr/const_ptr.rs39
-rw-r--r--library/core/src/ptr/mod.rs78
-rw-r--r--library/core/src/ptr/mut_ptr.rs37
-rw-r--r--library/core/src/ptr/non_null.rs5
-rw-r--r--library/core/src/result.rs1
-rw-r--r--library/core/src/slice/index.rs5
-rw-r--r--library/core/src/slice/iter.rs1
-rw-r--r--library/core/src/slice/mod.rs70
-rw-r--r--library/core/src/slice/raw.rs2
-rw-r--r--library/core/src/slice/sort.rs13
-rw-r--r--library/core/src/str/iter.rs2
-rw-r--r--library/core/src/str/mod.rs12
-rw-r--r--library/core/src/str/pattern.rs4
-rw-r--r--library/core/src/str/traits.rs1
-rw-r--r--library/core/src/sync/atomic.rs167
-rw-r--r--library/core/src/sync/exclusive.rs10
-rw-r--r--library/core/src/task/wake.rs15
-rw-r--r--library/core/src/time.rs9
-rw-r--r--library/core/tests/array.rs4
-rw-r--r--library/core/tests/io/borrowed_buf.rs (renamed from library/std/src/io/readbuf/tests.rs)4
-rw-r--r--library/core/tests/io/mod.rs1
-rw-r--r--library/core/tests/iter/adapters/zip.rs6
-rw-r--r--library/core/tests/lib.rs8
-rw-r--r--library/core/tests/mem.rs21
-rw-r--r--library/core/tests/net/socket_addr.rs11
-rw-r--r--library/core/tests/num/flt2dec/mod.rs2
-rw-r--r--library/core/tests/slice.rs20
-rw-r--r--library/panic_unwind/src/gcc.rs2
-rw-r--r--library/portable-simd/crates/core_simd/src/mod.rs1
-rw-r--r--library/proc_macro/src/lib.rs2
-rw-r--r--library/std/Cargo.toml6
-rw-r--r--library/std/build.rs11
-rw-r--r--library/std/src/collections/hash/map.rs2
-rw-r--r--library/std/src/collections/hash/set.rs2
-rw-r--r--library/std/src/fs.rs19
-rw-r--r--library/std/src/fs/tests.rs86
-rw-r--r--library/std/src/io/buffered/bufreader.rs25
-rw-r--r--library/std/src/io/copy.rs74
-rw-r--r--library/std/src/io/copy/tests.rs12
-rw-r--r--library/std/src/io/impls.rs32
-rw-r--r--library/std/src/io/mod.rs67
-rw-r--r--library/std/src/io/stdio.rs2
-rw-r--r--library/std/src/lib.rs7
-rw-r--r--library/std/src/net/udp.rs21
-rw-r--r--library/std/src/os/aix/fs.rs348
-rw-r--r--library/std/src/os/aix/mod.rs6
-rw-r--r--library/std/src/os/aix/raw.rs9
-rw-r--r--library/std/src/os/fd/owned.rs6
-rw-r--r--library/std/src/os/freebsd/fs.rs10
-rw-r--r--library/std/src/os/ios/fs.rs6
-rw-r--r--library/std/src/os/linux/fs.rs9
-rw-r--r--library/std/src/os/macos/fs.rs6
-rw-r--r--library/std/src/os/mod.rs2
-rw-r--r--library/std/src/os/unix/mod.rs2
-rw-r--r--library/std/src/os/watchos/fs.rs6
-rw-r--r--library/std/src/os/windows/fs.rs6
-rw-r--r--library/std/src/panicking.rs60
-rw-r--r--library/std/src/process.rs16
-rw-r--r--library/std/src/rt.rs1
-rw-r--r--library/std/src/sync/once.rs2
-rw-r--r--library/std/src/sync/once_lock.rs43
-rw-r--r--library/std/src/sync/rwlock.rs4
-rw-r--r--library/std/src/sys/common/mod.rs1
-rw-r--r--library/std/src/sys/hermit/net.rs6
-rw-r--r--library/std/src/sys/hermit/thread_local_dtor.rs14
-rw-r--r--library/std/src/sys/personality/dwarf/eh.rs131
-rw-r--r--library/std/src/sys/personality/gcc.rs11
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/alloc.rs207
-rw-r--r--library/std/src/sys/sgx/waitqueue/mod.rs26
-rw-r--r--library/std/src/sys/solid/net.rs11
-rw-r--r--library/std/src/sys/solid/thread_local_dtor.rs15
-rw-r--r--library/std/src/sys/uefi/alloc.rs22
-rw-r--r--library/std/src/sys/uefi/args.rs158
-rw-r--r--library/std/src/sys/uefi/helpers.rs7
-rw-r--r--library/std/src/sys/uefi/mod.rs2
-rw-r--r--library/std/src/sys/uefi/stdio.rs162
-rw-r--r--library/std/src/sys/unix/args.rs1
-rw-r--r--library/std/src/sys/unix/env.rs11
-rw-r--r--library/std/src/sys/unix/fd.rs24
-rw-r--r--library/std/src/sys/unix/fs.rs59
-rw-r--r--library/std/src/sys/unix/mod.rs3
-rw-r--r--library/std/src/sys/unix/net.rs21
-rw-r--r--library/std/src/sys/unix/os.rs36
-rw-r--r--library/std/src/sys/unix/process/mod.rs9
-rw-r--r--library/std/src/sys/unix/process/process_common.rs1
-rw-r--r--library/std/src/sys/unix/process/process_common/tests.rs33
-rw-r--r--library/std/src/sys/unix/process/process_unix.rs5
-rw-r--r--library/std/src/sys/unix/process/process_unsupported.rs56
-rw-r--r--library/std/src/sys/unix/process/process_unsupported/wait_status.rs84
-rw-r--r--library/std/src/sys/unix/process/process_unsupported/wait_status/tests.rs36
-rw-r--r--library/std/src/sys/unix/rand.rs125
-rw-r--r--library/std/src/sys/unix/stack_overflow.rs14
-rw-r--r--library/std/src/sys/unix/thread.rs6
-rw-r--r--library/std/src/sys/unix/thread_local_dtor.rs67
-rw-r--r--library/std/src/sys/unix/time.rs298
-rw-r--r--library/std/src/sys/wasi/mod.rs115
-rw-r--r--library/std/src/sys/windows/api.rs157
-rw-r--r--library/std/src/sys/windows/c.rs4
-rw-r--r--library/std/src/sys/windows/c/windows_sys.lst10
-rw-r--r--library/std/src/sys/windows/c/windows_sys.rs65
-rw-r--r--library/std/src/sys/windows/cmath.rs6
-rw-r--r--library/std/src/sys/windows/fs.rs79
-rw-r--r--library/std/src/sys/windows/io.rs4
-rw-r--r--library/std/src/sys/windows/mod.rs16
-rw-r--r--library/std/src/sys/windows/net.rs14
-rw-r--r--library/std/src/sys/windows/os.rs6
-rw-r--r--library/std/src/sys/windows/process.rs5
-rw-r--r--library/std/src/sys/windows/stack_overflow.rs4
-rw-r--r--library/std/src/sys/windows/stdio.rs3
-rw-r--r--library/std/src/sys/windows/thread.rs15
-rw-r--r--library/std/src/sys/windows/thread_local_key.rs19
-rw-r--r--library/std/src/sys/windows/time.rs38
-rw-r--r--library/std/src/sys_common/net.rs4
-rw-r--r--library/std/src/sys_common/thread_local_dtor.rs17
-rw-r--r--library/std/src/thread/local.rs6
-rw-r--r--library/std/src/thread/mod.rs18
-rw-r--r--library/std/src/time.rs7
-rw-r--r--library/std/tests/switch-stdout.rs24
-rw-r--r--library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile10
-rwxr-xr-xlibrary/stdarch/ci/run.sh7
-rw-r--r--library/stdarch/crates/assert-instr-macro/Cargo.toml2
-rw-r--r--library/stdarch/crates/assert-instr-macro/src/lib.rs13
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/barrier/cp15.rs9
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/riscv64/zk.rs57
-rw-r--r--library/stdarch/crates/core_arch/src/riscv_shared/zb.rs12
-rw-r--r--library/stdarch/crates/core_arch/src/riscv_shared/zk.rs30
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx.rs40
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx2.rs46
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs12
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512bw.rs104
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512f.rs333
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse.rs186
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse2.rs309
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse3.rs18
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse41.rs72
-rw-r--r--library/stdarch/crates/core_arch/src/x86/test.rs9
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/avx512f.rs60
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/sse2.rs3
-rw-r--r--library/stdarch/crates/intrinsic-test/Cargo.toml6
-rw-r--r--library/stdarch/crates/intrinsic-test/README.md4
-rw-r--r--library/stdarch/crates/intrinsic-test/src/json_parser.rs3
-rw-r--r--library/stdarch/crates/intrinsic-test/src/main.rs93
-rw-r--r--library/stdarch/crates/simd-test-macro/Cargo.toml1
-rw-r--r--library/stdarch/crates/simd-test-macro/src/lib.rs39
-rw-r--r--library/stdarch/crates/std_detect/Cargo.toml1
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs59
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/x86.rs6
-rw-r--r--library/stdarch/crates/stdarch-test/Cargo.toml2
-rw-r--r--library/stdarch/crates/stdarch-test/src/disassembly.rs2
-rw-r--r--library/stdarch/crates/stdarch-verify/Cargo.toml4
-rw-r--r--library/stdarch/crates/stdarch-verify/src/lib.rs82
-rw-r--r--library/stdarch/examples/Cargo.toml4
-rw-r--r--library/test/src/lib.rs2
-rw-r--r--library/unwind/Cargo.toml3
-rw-r--r--library/unwind/build.rs25
-rw-r--r--library/unwind/src/lib.rs11
-rw-r--r--library/unwind/src/libunwind.rs14
266 files changed, 6748 insertions, 2695 deletions
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
index e5f828c4c..63aec14f4 100644
--- a/library/alloc/Cargo.toml
+++ b/library/alloc/Cargo.toml
@@ -36,3 +36,5 @@ compiler-builtins-c = ["compiler_builtins/c"]
compiler-builtins-no-asm = ["compiler_builtins/no-asm"]
compiler-builtins-mangled-names = ["compiler_builtins/mangled-names"]
compiler-builtins-weak-intrinsics = ["compiler_builtins/weak-intrinsics"]
+# Make panics and failed asserts immediately abort without formatting any message
+panic_immediate_abort = []
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index a548de814..2499f1053 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -377,13 +377,20 @@ pub const fn handle_alloc_error(layout: Layout) -> ! {
panic!("allocation failed");
}
+ #[inline]
fn rt_error(layout: Layout) -> ! {
unsafe {
__rust_alloc_error_handler(layout.size(), layout.align());
}
}
- unsafe { core::intrinsics::const_eval_select((layout,), ct_error, rt_error) }
+ #[cfg(not(feature = "panic_immediate_abort"))]
+ unsafe {
+ core::intrinsics::const_eval_select((layout,), ct_error, rt_error)
+ }
+
+ #[cfg(feature = "panic_immediate_abort")]
+ ct_error(layout)
}
// For alloc test `std::alloc::handle_alloc_error` can be used directly.
diff --git a/library/alloc/src/borrow.rs b/library/alloc/src/borrow.rs
index 84331eba2..b6b6246ba 100644
--- a/library/alloc/src/borrow.rs
+++ b/library/alloc/src/borrow.rs
@@ -55,6 +55,7 @@ pub trait ToOwned {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "cloning is often expensive and is not expected to have side effects"]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "to_owned_method")]
fn to_owned(&self) -> Self::Owned;
/// Uses borrowed data to replace owned data, usually by cloning.
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index 96b93830f..25c63b425 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -159,7 +159,7 @@ use core::marker::Tuple;
use core::marker::Unsize;
use core::mem::{self, SizedTypeProperties};
use core::ops::{
- CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
+ CoerceUnsized, Coroutine, CoroutineState, Deref, DerefMut, DispatchFromDyn, Receiver,
};
use core::pin::Pin;
use core::ptr::{self, NonNull, Unique};
@@ -207,7 +207,7 @@ impl<T> Box<T> {
/// ```
/// let five = Box::new(5);
/// ```
- #[cfg(all(not(no_global_oom_handling)))]
+ #[cfg(not(no_global_oom_handling))]
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
@@ -2106,28 +2106,28 @@ impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+impl<G: ?Sized + Coroutine<R> + Unpin, R, A: Allocator> Coroutine<R> for Box<G, A>
where
A: 'static,
{
type Yield = G::Yield;
type Return = G::Return;
- fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume(Pin::new(&mut *self), arg)
}
}
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator<R>, R, A: Allocator> Generator<R> for Pin<Box<G, A>>
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+impl<G: ?Sized + Coroutine<R>, R, A: Allocator> Coroutine<R> for Pin<Box<G, A>>
where
A: 'static,
{
type Yield = G::Yield;
type Return = G::Return;
- fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume((*self).as_mut(), arg)
}
}
@@ -2444,4 +2444,8 @@ impl<T: core::error::Error> core::error::Error for Box<T> {
fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
core::error::Error::source(&**self)
}
+
+ fn provide<'b>(&'b self, request: &mut core::error::Request<'b>) {
+ core::error::Error::provide(&**self, request);
+ }
}
diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs
index 66573b90d..61c5950b0 100644
--- a/library/alloc/src/collections/binary_heap/mod.rs
+++ b/library/alloc/src/collections/binary_heap/mod.rs
@@ -434,8 +434,9 @@ impl<T: Ord> BinaryHeap<T> {
/// heap.push(4);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_binary_heap_constructor", issue = "112353")]
#[must_use]
- pub fn new() -> BinaryHeap<T> {
+ pub const fn new() -> BinaryHeap<T> {
BinaryHeap { data: vec![] }
}
@@ -477,8 +478,9 @@ impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
/// heap.push(4);
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_binary_heap_constructor", issue = "112353")]
#[must_use]
- pub fn new_in(alloc: A) -> BinaryHeap<T, A> {
+ pub const fn new_in(alloc: A) -> BinaryHeap<T, A> {
BinaryHeap { data: Vec::new_in(alloc) }
}
diff --git a/library/alloc/src/collections/btree/dedup_sorted_iter.rs b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
index 17ee78045..cd6a88f32 100644
--- a/library/alloc/src/collections/btree/dedup_sorted_iter.rs
+++ b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
@@ -1,6 +1,6 @@
use core::iter::Peekable;
-/// A iterator for deduping the key of a sorted iterator.
+/// An iterator for deduping the key of a sorted iterator.
/// When encountering the duplicated key, only the last key-value pair is yielded.
///
/// Used by [`BTreeMap::bulk_build_from_sorted_iter`][1].
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index 5481b327d..4bdd96395 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -669,7 +669,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// map.insert(1, "a");
/// ```
#[unstable(feature = "btreemap_alloc", issue = "32838")]
- pub fn new_in(alloc: A) -> BTreeMap<K, V, A> {
+ pub const fn new_in(alloc: A) -> BTreeMap<K, V, A> {
BTreeMap { root: None, length: 0, alloc: ManuallyDrop::new(alloc), _marker: PhantomData }
}
}
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
index 9da230915..0e0355128 100644
--- a/library/alloc/src/collections/btree/set.rs
+++ b/library/alloc/src/collections/btree/set.rs
@@ -358,7 +358,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// let mut set: BTreeSet<i32> = BTreeSet::new_in(Global);
/// ```
#[unstable(feature = "btreemap_alloc", issue = "32838")]
- pub fn new_in(alloc: A) -> BTreeSet<T, A> {
+ pub const fn new_in(alloc: A) -> BTreeSet<T, A> {
BTreeSet { map: BTreeMap::new_in(alloc) }
}
diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs
index 1e2c35bf7..5b50ef7bf 100644
--- a/library/alloc/src/fmt.rs
+++ b/library/alloc/src/fmt.rs
@@ -555,6 +555,8 @@
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
+#[unstable(feature = "debug_closure_helpers", issue = "117729")]
+pub use core::fmt::FormatterFn;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index f435f503f..d33c4418e 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -78,6 +78,8 @@
not(no_sync),
target_has_atomic = "ptr"
))]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
#![no_std]
#![needs_allocator]
// Lints:
@@ -113,7 +115,6 @@
#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_write)]
-#![feature(const_maybe_uninit_zeroed)]
#![feature(const_pin)]
#![feature(const_refs_to_cell)]
#![feature(const_size_of_val)]
@@ -139,7 +140,7 @@
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
-#![feature(pointer_byte_offsets)]
+#![feature(ptr_addr_eq)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
@@ -166,7 +167,7 @@
//
// Language features:
// tidy-alphabetical-start
-#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(not(test), feature(coroutine_trait))]
#![cfg_attr(test, feature(panic_update_hook))]
#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index 01b03de6a..817b93720 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -305,10 +305,13 @@ impl<T, A: Allocator> RawVec<T, A> {
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
- self.grow_amortized(len, additional)
- } else {
- Ok(())
+ self.grow_amortized(len, additional)?;
+ }
+ unsafe {
+ // Inform the optimizer that the reservation has succeeded or wasn't needed
+ core::intrinsics::assume(!self.needs_to_grow(len, additional));
}
+ Ok(())
}
/// Ensures that the buffer contains at least enough space to hold `len +
@@ -339,7 +342,14 @@ impl<T, A: Allocator> RawVec<T, A> {
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
- if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
+ if self.needs_to_grow(len, additional) {
+ self.grow_exact(len, additional)?;
+ }
+ unsafe {
+ // Inform the optimizer that the reservation has succeeded or wasn't needed
+ core::intrinsics::assume(!self.needs_to_grow(len, additional));
+ }
+ Ok(())
}
/// Shrinks the buffer down to the specified capacity. If the given amount
@@ -530,6 +540,7 @@ fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index 38339117c..dd7876bed 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -1304,7 +1304,7 @@ impl<T: ?Sized, A: Allocator> Rc<T, A> {
/// assert_eq!(unsafe { &*x_ptr }, "hello");
/// ```
#[stable(feature = "rc_raw", since = "1.17.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub fn into_raw(this: Self) -> *const T {
let ptr = Self::as_ptr(&this);
mem::forget(this);
@@ -1328,7 +1328,7 @@ impl<T: ?Sized, A: Allocator> Rc<T, A> {
/// assert_eq!(unsafe { &*x_ptr }, "hello");
/// ```
#[stable(feature = "weak_into_raw", since = "1.45.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub fn as_ptr(this: &Self) -> *const T {
let ptr: *mut RcBox<T> = NonNull::as_ptr(this.ptr);
@@ -1649,7 +1649,7 @@ impl<T: ?Sized, A: Allocator> Rc<T, A> {
/// assert!(!Rc::ptr_eq(&five, &other_five));
/// ```
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
- this.ptr.as_ptr() as *const () == other.ptr.as_ptr() as *const ()
+ ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
}
}
@@ -2701,6 +2701,7 @@ impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
///
/// [`upgrade`]: Weak::upgrade
#[stable(feature = "rc_weak", since = "1.4.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RcWeak")]
pub struct Weak<
T: ?Sized,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
@@ -3146,7 +3147,7 @@ impl<T: ?Sized, A: Allocator> Weak<T, A> {
#[must_use]
#[stable(feature = "weak_ptr_eq", since = "1.39.0")]
pub fn ptr_eq(&self, other: &Self) -> bool {
- ptr::eq(self.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ())
+ ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
}
}
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index ed43244eb..4d6968157 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -714,6 +714,156 @@ impl String {
.collect()
}
+ /// Decode a UTF-16LE–encoded vector `v` into a `String`, returning [`Err`]
+ /// if `v` contains any invalid data.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(str_from_utf16_endian)]
+ /// // 𝄞music
+ /// let v = &[0x34, 0xD8, 0x1E, 0xDD, 0x6d, 0x00, 0x75, 0x00,
+ /// 0x73, 0x00, 0x69, 0x00, 0x63, 0x00];
+ /// assert_eq!(String::from("𝄞music"),
+ /// String::from_utf16le(v).unwrap());
+ ///
+ /// // 𝄞mu<invalid>ic
+ /// let v = &[0x34, 0xD8, 0x1E, 0xDD, 0x6d, 0x00, 0x75, 0x00,
+ /// 0x00, 0xD8, 0x69, 0x00, 0x63, 0x00];
+ /// assert!(String::from_utf16le(v).is_err());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "str_from_utf16_endian", issue = "116258")]
+ pub fn from_utf16le(v: &[u8]) -> Result<String, FromUtf16Error> {
+ if v.len() % 2 != 0 {
+ return Err(FromUtf16Error(()));
+ }
+ match (cfg!(target_endian = "little"), unsafe { v.align_to::<u16>() }) {
+ (true, ([], v, [])) => Self::from_utf16(v),
+ _ => char::decode_utf16(v.array_chunks::<2>().copied().map(u16::from_le_bytes))
+ .collect::<Result<_, _>>()
+ .map_err(|_| FromUtf16Error(())),
+ }
+ }
+
+ /// Decode a UTF-16LE–encoded slice `v` into a `String`, replacing
+ /// invalid data with [the replacement character (`U+FFFD`)][U+FFFD].
+ ///
+ /// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`],
+ /// `from_utf16le_lossy` returns a `String` since the UTF-16 to UTF-8
+ /// conversion requires a memory allocation.
+ ///
+ /// [`from_utf8_lossy`]: String::from_utf8_lossy
+ /// [`Cow<'a, str>`]: crate::borrow::Cow "borrow::Cow"
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(str_from_utf16_endian)]
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = &[0x34, 0xD8, 0x1E, 0xDD, 0x6d, 0x00, 0x75, 0x00,
+ /// 0x73, 0x00, 0x1E, 0xDD, 0x69, 0x00, 0x63, 0x00,
+ /// 0x34, 0xD8];
+ ///
+ /// assert_eq!(String::from("𝄞mus\u{FFFD}ic\u{FFFD}"),
+ /// String::from_utf16le_lossy(v));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "str_from_utf16_endian", issue = "116258")]
+ pub fn from_utf16le_lossy(v: &[u8]) -> String {
+ match (cfg!(target_endian = "little"), unsafe { v.align_to::<u16>() }) {
+ (true, ([], v, [])) => Self::from_utf16_lossy(v),
+ (true, ([], v, [_remainder])) => Self::from_utf16_lossy(v) + "\u{FFFD}",
+ _ => {
+ let mut iter = v.array_chunks::<2>();
+ let string = char::decode_utf16(iter.by_ref().copied().map(u16::from_le_bytes))
+ .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER))
+ .collect();
+ if iter.remainder().is_empty() { string } else { string + "\u{FFFD}" }
+ }
+ }
+ }
+
+ /// Decode a UTF-16BE–encoded vector `v` into a `String`, returning [`Err`]
+ /// if `v` contains any invalid data.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(str_from_utf16_endian)]
+ /// // 𝄞music
+ /// let v = &[0xD8, 0x34, 0xDD, 0x1E, 0x00, 0x6d, 0x00, 0x75,
+ /// 0x00, 0x73, 0x00, 0x69, 0x00, 0x63];
+ /// assert_eq!(String::from("𝄞music"),
+ /// String::from_utf16be(v).unwrap());
+ ///
+ /// // 𝄞mu<invalid>ic
+ /// let v = &[0xD8, 0x34, 0xDD, 0x1E, 0x00, 0x6d, 0x00, 0x75,
+ /// 0xD8, 0x00, 0x00, 0x69, 0x00, 0x63];
+ /// assert!(String::from_utf16be(v).is_err());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "str_from_utf16_endian", issue = "116258")]
+ pub fn from_utf16be(v: &[u8]) -> Result<String, FromUtf16Error> {
+ if v.len() % 2 != 0 {
+ return Err(FromUtf16Error(()));
+ }
+ match (cfg!(target_endian = "big"), unsafe { v.align_to::<u16>() }) {
+ (true, ([], v, [])) => Self::from_utf16(v),
+ _ => char::decode_utf16(v.array_chunks::<2>().copied().map(u16::from_be_bytes))
+ .collect::<Result<_, _>>()
+ .map_err(|_| FromUtf16Error(())),
+ }
+ }
+
+ /// Decode a UTF-16BE–encoded slice `v` into a `String`, replacing
+ /// invalid data with [the replacement character (`U+FFFD`)][U+FFFD].
+ ///
+ /// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`],
+ /// `from_utf16le_lossy` returns a `String` since the UTF-16 to UTF-8
+ /// conversion requires a memory allocation.
+ ///
+ /// [`from_utf8_lossy`]: String::from_utf8_lossy
+ /// [`Cow<'a, str>`]: crate::borrow::Cow "borrow::Cow"
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(str_from_utf16_endian)]
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = &[0xD8, 0x34, 0xDD, 0x1E, 0x00, 0x6d, 0x00, 0x75,
+ /// 0x00, 0x73, 0xDD, 0x1E, 0x00, 0x69, 0x00, 0x63,
+ /// 0xD8, 0x34];
+ ///
+ /// assert_eq!(String::from("𝄞mus\u{FFFD}ic\u{FFFD}"),
+ /// String::from_utf16be_lossy(v));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "str_from_utf16_endian", issue = "116258")]
+ pub fn from_utf16be_lossy(v: &[u8]) -> String {
+ match (cfg!(target_endian = "big"), unsafe { v.align_to::<u16>() }) {
+ (true, ([], v, [])) => Self::from_utf16_lossy(v),
+ (true, ([], v, [_remainder])) => Self::from_utf16_lossy(v) + "\u{FFFD}",
+ _ => {
+ let mut iter = v.array_chunks::<2>();
+ let string = char::decode_utf16(iter.by_ref().copied().map(u16::from_be_bytes))
+ .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER))
+ .collect();
+ if iter.remainder().is_empty() { string } else { string + "\u{FFFD}" }
+ }
+ }
+ }
+
/// Decomposes a `String` into its raw components.
///
/// Returns the raw pointer to the underlying data, the length of
@@ -2435,6 +2585,7 @@ pub trait ToString {
/// ```
#[rustc_conversion_suggestion]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "to_string_method")]
fn to_string(&self) -> String;
}
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 838987f67..351e6c1a4 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -311,6 +311,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
///
/// [`upgrade`]: Weak::upgrade
#[stable(feature = "arc_weak", since = "1.4.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "ArcWeak")]
pub struct Weak<
T: ?Sized,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
@@ -1454,7 +1455,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
/// ```
#[must_use = "losing the pointer will leak memory"]
#[stable(feature = "rc_raw", since = "1.17.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub fn into_raw(this: Self) -> *const T {
let ptr = Self::as_ptr(&this);
mem::forget(this);
@@ -1479,7 +1480,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
/// ```
#[must_use]
#[stable(feature = "rc_as_ptr", since = "1.45.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub fn as_ptr(this: &Self) -> *const T {
let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
@@ -1778,7 +1779,7 @@ impl<T: ?Sized, A: Allocator> Arc<T, A> {
#[must_use]
#[stable(feature = "ptr_eq", since = "1.17.0")]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
- this.ptr.as_ptr() as *const () == other.ptr.as_ptr() as *const ()
+ ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
}
}
@@ -2900,7 +2901,7 @@ impl<T: ?Sized, A: Allocator> Weak<T, A> {
#[must_use]
#[stable(feature = "weak_ptr_eq", since = "1.39.0")]
pub fn ptr_eq(&self, other: &Self) -> bool {
- ptr::eq(self.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ())
+ ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
}
}
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index 56fc6bc40..6c78d65f1 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -1258,7 +1258,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// [`as_mut_ptr`]: Vec::as_mut_ptr
/// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[inline]
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
@@ -1318,7 +1318,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// [`as_mut_ptr`]: Vec::as_mut_ptr
/// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
@@ -1447,7 +1447,8 @@ impl<T, A: Allocator> Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap_remove(&mut self, index: usize) -> T {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("swap_remove index (is {index}) should be < len (is {len})");
}
@@ -1488,7 +1489,8 @@ impl<T, A: Allocator> Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, index: usize, element: T) {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("insertion index (is {index}) should be <= len (is {len})");
}
@@ -1549,7 +1551,7 @@ impl<T, A: Allocator> Vec<T, A> {
#[track_caller]
pub fn remove(&mut self, index: usize) -> T {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
#[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("removal index (is {index}) should be < len (is {len})");
@@ -1956,6 +1958,7 @@ impl<T, A: Allocator> Vec<T, A> {
} else {
unsafe {
self.len -= 1;
+ core::intrinsics::assume(self.len < self.capacity());
Some(ptr::read(self.as_ptr().add(self.len())))
}
}
@@ -2147,7 +2150,8 @@ impl<T, A: Allocator> Vec<T, A> {
A: Clone,
{
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(at: usize, len: usize) -> ! {
panic!("`at` split index (is {at}) should be <= len (is {len})");
}
diff --git a/library/alloc/tests/autotraits.rs b/library/alloc/tests/autotraits.rs
index 6a8e55bff..b41e45761 100644
--- a/library/alloc/tests/autotraits.rs
+++ b/library/alloc/tests/autotraits.rs
@@ -14,8 +14,8 @@ fn test_btree_map() {
//
// We test autotraits in this convoluted way, instead of a straightforward
// `require_send_sync::<TypeIWantToTest>()`, because the interaction with
- // generators exposes some current limitations in rustc's ability to prove a
- // lifetime bound on the erased generator witness types. See the above link.
+ // coroutines exposes some current limitations in rustc's ability to prove a
+ // lifetime bound on the erased coroutine witness types. See the above link.
//
// A typical way this would surface in real code is:
//
diff --git a/library/backtrace/.github/actions/build-with-patched-std/action.yml b/library/backtrace/.github/actions/build-with-patched-std/action.yml
new file mode 100644
index 000000000..5466a2289
--- /dev/null
+++ b/library/backtrace/.github/actions/build-with-patched-std/action.yml
@@ -0,0 +1,48 @@
+# Github composite action to build a single-source-file test binary with an
+# already-checked-out version of Rust's stdlib, that will be patched with a
+# given revision of the backtrace crate.
+
+name: Build with patched std
+description: >
+ Build a binary with a version of std that's had a specific revision of
+ backtrace patched in.
+inputs:
+ backtrace-commit:
+ description: The git commit of backtrace to patch in to std
+ required: true
+ main-rs:
+ description: The (single) source code file to compile
+ required: true
+ rustc-dir:
+ description: The root directory of the rustc repo
+ required: true
+outputs:
+ test-binary-size:
+ description: The size in bytes of the built test binary
+ value: ${{ steps.measure.outputs.test-binary-size }}
+runs:
+ using: composite
+ steps:
+ - shell: bash
+ id: measure
+ env:
+ RUSTC_FLAGS: -Copt-level=3 -Cstrip=symbols
+ # This symlink is made by Build::new() in the bootstrap crate, using a
+ # symlink on Linux and a junction on Windows, so it will exist on both
+ # platforms.
+ RUSTC_BUILD_DIR: build/host
+ working-directory: ${{ inputs.rustc-dir }}
+ run: |
+ rm -rf "$RUSTC_BUILD_DIR/stage0-std"
+
+ (cd library/backtrace && git checkout ${{ inputs.backtrace-commit }})
+ git add library/backtrace
+
+ python3 x.py build library --stage 0
+
+ TEMP_BUILD_OUTPUT=$(mktemp test-binary-XXXXXXXX)
+ "$RUSTC_BUILD_DIR/stage0-sysroot/bin/rustc" $RUSTC_FLAGS "${{ inputs.main-rs }}" -o "$TEMP_BUILD_OUTPUT"
+ BINARY_SIZE=$(stat -c '%s' "$TEMP_BUILD_OUTPUT")
+ rm "$TEMP_BUILD_OUTPUT"
+
+ echo "test-binary-size=$BINARY_SIZE" >> "$GITHUB_OUTPUT"
diff --git a/library/backtrace/.github/actions/report-code-size-changes/action.yml b/library/backtrace/.github/actions/report-code-size-changes/action.yml
new file mode 100644
index 000000000..ede26975a
--- /dev/null
+++ b/library/backtrace/.github/actions/report-code-size-changes/action.yml
@@ -0,0 +1,111 @@
+# Github composite action to report on code size changes across different
+# platforms.
+
+name: Report binary size changes on PR
+description: |
+ Report on code size changes across different platforms resulting from a PR.
+ The only input argument is the path to a directory containing a set of
+ "*.json" files (extension required), each file containing the keys:
+
+ - platform: the platform that the code size change was measured on
+ - reference: the size in bytes of the reference binary (base of PR)
+ - updated: the size in bytes of the updated binary (head of PR)
+
+ The size is reported as a comment on the PR (accessed via context).
+inputs:
+ data-directory:
+ description: >
+ Path to directory containing size data as a set of "*.json" files.
+ required: true
+runs:
+ using: composite
+ steps:
+ - name: Post a PR comment if the size has changed
+ uses: actions/github-script@v6
+ env:
+ DATA_DIRECTORY: ${{ inputs.data-directory }}
+ with:
+ script: |
+ const fs = require("fs");
+
+ const size_dir = process.env.DATA_DIRECTORY;
+
+ // Map the set of all the *.json files into an array of objects.
+ const globber = await glob.create(`${size_dir}/*.json`);
+ const files = await globber.glob();
+ const sizes = files.map(path => {
+ const contents = fs.readFileSync(path);
+ return JSON.parse(contents);
+ });
+
+ // Map each object into some text, but only if it shows any difference
+ // to report.
+ const size_reports = sizes.flatMap(size_data => {
+ const platform = size_data["platform"];
+ const reference = size_data["reference"];
+ const updated = size_data["updated"];
+
+ if (!(reference > 0)) {
+ core.setFailed(`Reference size invalid: ${reference}`);
+ return;
+ }
+
+ if (!(updated > 0)) {
+ core.setFailed(`Updated size invalid: ${updated}`);
+ return;
+ }
+
+ const formatter = Intl.NumberFormat("en", {
+ useGrouping: "always"
+ });
+
+ const updated_str = formatter.format(updated);
+ const reference_str = formatter.format(reference);
+
+ const diff = updated - reference;
+ const diff_pct = (updated / reference) - 1;
+
+ const diff_str = Intl.NumberFormat("en", {
+ useGrouping: "always",
+ sign: "exceptZero"
+ }).format(diff);
+
+ const diff_pct_str = Intl.NumberFormat("en", {
+ style: "percent",
+ useGrouping: "always",
+ sign: "exceptZero",
+ maximumFractionDigits: 2
+ }).format(diff_pct);
+
+ if (diff !== 0) {
+ // The body is created here and wrapped so "weirdly" to avoid whitespace at the start of the lines,
+ // which is interpreted as a code block by Markdown.
+ const report = `On platform \`${platform}\`:
+
+ - Original binary size: **${reference_str} B**
+ - Updated binary size: **${updated_str} B**
+ - Difference: **${diff_str} B** (${diff_pct_str})
+
+ `;
+
+ return [report];
+ } else {
+ return [];
+ }
+ });
+
+ // If there are any size changes to report, format a comment and post
+ // it.
+ if (size_reports.length > 0) {
+ const comment_sizes = size_reports.join("");
+ const body = `Code size changes for a hello-world Rust program linked with libstd with backtrace:
+
+ ${comment_sizes}`;
+
+ github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body
+ });
+ }
diff --git a/library/backtrace/.github/workflows/check-binary-size.yml b/library/backtrace/.github/workflows/check-binary-size.yml
index 0beae1da9..d045fb7b3 100644
--- a/library/backtrace/.github/workflows/check-binary-size.yml
+++ b/library/backtrace/.github/workflows/check-binary-size.yml
@@ -9,75 +9,143 @@ on:
branches:
- master
+# Both the "measure" and "report" jobs need to know this.
+env:
+ SIZE_DATA_DIR: sizes
+
+# Responsibility is divided between two jobs "measure" and "report", so that the
+# job that builds (and potentnially runs) untrusted code does not have PR write
+# permission, and vice-versa.
jobs:
- test:
+ measure:
name: Check binary size
- runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ platform: [ubuntu-latest, windows-latest]
+ runs-on: ${{ matrix.platform }}
permissions:
- pull-requests: write
+ contents: read
+ env:
+ # This cannot be used as a context variable in the 'uses' key later. If it
+ # changes, update those steps too.
+ BACKTRACE_DIR: backtrace
+ RUSTC_DIR: rustc
+ TEST_MAIN_RS: foo.rs
+ BASE_COMMIT: ${{ github.event.pull_request.base.sha }}
+ HEAD_COMMIT: ${{ github.event.pull_request.head.sha }}
+ SIZE_DATA_FILE: size-${{ strategy.job-index }}.json
steps:
- name: Print info
+ shell: bash
run: |
- echo "Current SHA: ${{ github.event.pull_request.head.sha }}"
- echo "Base SHA: ${{ github.event.pull_request.base.sha }}"
+ echo "Current SHA: $HEAD_COMMIT"
+ echo "Base SHA: $BASE_COMMIT"
+ # Note: the backtrace source that's cloned here is NOT the version to be
+ # patched in to std. It's cloned here to access the Github action for
+ # building and measuring the test binary.
+ - name: Clone backtrace to access Github action
+ uses: actions/checkout@v3
+ with:
+ path: ${{ env.BACKTRACE_DIR }}
- name: Clone Rustc
uses: actions/checkout@v3
with:
repository: rust-lang/rust
- fetch-depth: 1
- - name: Fetch backtrace
- run: git submodule update --init library/backtrace
- - name: Create hello world program that uses backtrace
- run: printf "fn main() { panic!(); }" > foo.rs
- - name: Build binary with base version of backtrace
+ path: ${{ env.RUSTC_DIR }}
+ - name: Set up std repository and backtrace submodule for size test
+ shell: bash
+ working-directory: ${{ env.RUSTC_DIR }}
+ env:
+ PR_SOURCE_REPO: ${{ github.event.pull_request.head.repo.full_name }}
run: |
- printf "[llvm]\ndownload-ci-llvm = true\n\n[rust]\nincremental = false\n" > config.toml
+ # Bootstrap config
+ cat <<EOF > config.toml
+ [llvm]
+ download-ci-llvm = true
+ [rust]
+ incremental = false
+ EOF
+
+ # Test program source
+ cat <<EOF > $TEST_MAIN_RS
+ fn main() {
+ panic!();
+ }
+ EOF
+
+ git submodule update --init library/backtrace
+
cd library/backtrace
- git remote add head-pr https://github.com/${{ github.event.pull_request.head.repo.full_name }}
+ git remote add head-pr "https://github.com/$PR_SOURCE_REPO"
git fetch --all
- git checkout ${{ github.event.pull_request.base.sha }}
- cd ../..
- git add library/backtrace
- python3 x.py build library --stage 0
- ./build/x86_64-unknown-linux-gnu/stage0-sysroot/bin/rustc -O foo.rs -o binary-reference
+ - name: Build binary with base version of backtrace
+ uses: ./backtrace/.github/actions/build-with-patched-std
+ with:
+ backtrace-commit: ${{ env.BASE_COMMIT }}
+ main-rs: ${{ env.TEST_MAIN_RS }}
+ rustc-dir: ${{ env.RUSTC_DIR }}
+ id: size-reference
- name: Build binary with PR version of backtrace
- run: |
- cd library/backtrace
- git checkout ${{ github.event.pull_request.head.sha }}
- cd ../..
- git add library/backtrace
- rm -rf build/x86_64-unknown-linux-gnu/stage0-std
- python3 x.py build library --stage 0
- ./build/x86_64-unknown-linux-gnu/stage0-sysroot/bin/rustc -O foo.rs -o binary-updated
- - name: Display binary size
- run: |
- ls -la binary-*
- echo "SIZE_REFERENCE=$(stat -c '%s' binary-reference)" >> "$GITHUB_ENV"
- echo "SIZE_UPDATED=$(stat -c '%s' binary-updated)" >> "$GITHUB_ENV"
- - name: Post a PR comment if the size has changed
+ uses: ./backtrace/.github/actions/build-with-patched-std
+ with:
+ backtrace-commit: ${{ env.HEAD_COMMIT }}
+ main-rs: ${{ env.TEST_MAIN_RS }}
+ rustc-dir: ${{ env.RUSTC_DIR }}
+ id: size-updated
+ # There is no built-in way to "collect" all the outputs of a set of jobs
+ # run with a matrix strategy. Subsequent jobs that have a "needs"
+ # dependency on this one will be run once, when the last matrix job is
+ # run. Appending data to a single file within a matrix is subject to race
+ # conditions. So we write the size data to files with distinct names
+ # generated from the job index.
+ - name: Write sizes to file
uses: actions/github-script@v6
+ env:
+ SIZE_REFERENCE: ${{ steps.size-reference.outputs.test-binary-size }}
+ SIZE_UPDATED: ${{ steps.size-updated.outputs.test-binary-size }}
+ PLATFORM: ${{ matrix.platform }}
with:
script: |
- const reference = process.env.SIZE_REFERENCE;
- const updated = process.env.SIZE_UPDATED;
- const diff = updated - reference;
- const plus = diff > 0 ? "+" : "";
- const diff_str = `${plus}${diff}B`;
+ const fs = require("fs");
+ const path = require("path");
- if (diff !== 0) {
- const percent = (((updated / reference) - 1) * 100).toFixed(2);
- // The body is created here and wrapped so "weirdly" to avoid whitespace at the start of the lines,
- // which is interpreted as a code block by Markdown.
- const body = `Below is the size of a hello-world Rust program linked with libstd with backtrace.
+ fs.mkdirSync(process.env.SIZE_DATA_DIR, {recursive: true});
- Original binary size: **${reference}B**
- Updated binary size: **${updated}B**
- Difference: **${diff_str}** (${percent}%)`;
+ const output_data = JSON.stringify({
+ platform: process.env.PLATFORM,
+ reference: process.env.SIZE_REFERENCE,
+ updated: process.env.SIZE_UPDATED,
+ });
- github.rest.issues.createComment({
- issue_number: context.issue.number,
- owner: context.repo.owner,
- repo: context.repo.repo,
- body
- })
- }
+ // The "wx" flag makes this fail if the file exists, which we want,
+ // because there should be no collisions.
+ fs.writeFileSync(
+ path.join(process.env.SIZE_DATA_DIR, process.env.SIZE_DATA_FILE),
+ output_data,
+ { flag: "wx" },
+ );
+ - name: Upload size data
+ uses: actions/upload-artifact@v3
+ with:
+ name: size-files
+ path: ${{ env.SIZE_DATA_DIR }}/${{ env.SIZE_DATA_FILE }}
+ retention-days: 1
+ if-no-files-found: error
+ report:
+ name: Report binary size changes
+ runs-on: ubuntu-latest
+ needs: measure
+ permissions:
+ pull-requests: write
+ steps:
+ # Clone backtrace to access Github composite actions to report size.
+ - uses: actions/checkout@v3
+ - name: Download size data
+ uses: actions/download-artifact@v3
+ with:
+ name: size-files
+ path: ${{ env.SIZE_DATA_DIR }}
+ - name: Analyze and report size changes
+ uses: ./.github/actions/report-code-size-changes
+ with:
+ data-directory: ${{ env.SIZE_DATA_DIR }}
diff --git a/library/backtrace/Cargo.lock b/library/backtrace/Cargo.lock
new file mode 100644
index 000000000..619a1392f
--- /dev/null
+++ b/library/backtrace/Cargo.lock
@@ -0,0 +1,221 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
+[[package]]
+name = "as-if-std"
+version = "0.1.0"
+dependencies = [
+ "addr2line",
+ "cc",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+]
+
+[[package]]
+name = "backtrace"
+version = "0.3.69"
+dependencies = [
+ "addr2line",
+ "cc",
+ "cfg-if",
+ "cpp_demangle",
+ "dylib-dep",
+ "libc",
+ "libloading",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+ "rustc-serialize",
+ "serde",
+ "winapi",
+]
+
+[[package]]
+name = "cc"
+version = "1.0.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cpp_demangle"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "cpp_smoke_test"
+version = "0.1.0"
+dependencies = [
+ "backtrace",
+ "cc",
+]
+
+[[package]]
+name = "dylib-dep"
+version = "0.1.0"
+
+[[package]]
+name = "gimli"
+version = "0.28.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
+
+[[package]]
+name = "libc"
+version = "0.2.147"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
+
+[[package]]
+name = "libloading"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "miniz_oxide"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
+dependencies = [
+ "adler",
+]
+
+[[package]]
+name = "object"
+version = "0.32.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.66"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
+
+[[package]]
+name = "rustc-serialize"
+version = "0.3.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
+
+[[package]]
+name = "serde"
+version = "1.0.188"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.188"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/library/backtrace/Cargo.toml b/library/backtrace/Cargo.toml
index 6714b3b7d..932310bcf 100644
--- a/library/backtrace/Cargo.toml
+++ b/library/backtrace/Cargo.toml
@@ -13,8 +13,9 @@ A library to acquire a stack trace (backtrace) at runtime in a Rust program.
"""
autoexamples = true
autotests = true
-edition = "2018"
+edition = "2021"
exclude = ["/ci/"]
+rust-version = "1.65.0"
[workspace]
members = ['crates/cpp_smoke_test', 'crates/as-if-std']
@@ -45,7 +46,7 @@ libc = { version = "0.2.146", default-features = false }
[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies.object]
version = "0.32.0"
default-features = false
-features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
+features = ['read_core', 'elf', 'macho', 'pe', 'xcoff', 'unaligned', 'archive']
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3.9", optional = true }
@@ -118,12 +119,12 @@ required-features = ["std"]
[[test]]
name = "smoke"
required-features = ["std"]
-edition = '2018'
+edition = '2021'
[[test]]
name = "accuracy"
required-features = ["std"]
-edition = '2018'
+edition = '2021'
[[test]]
name = "concurrent-panics"
diff --git a/library/backtrace/build.rs b/library/backtrace/build.rs
index 9bd3abd16..ed4e07a85 100644
--- a/library/backtrace/build.rs
+++ b/library/backtrace/build.rs
@@ -11,17 +11,27 @@ pub fn main() {
}
}
+// Used to detect the value of the `__ANDROID_API__`
+// builtin #define
+const MARKER: &str = "BACKTRACE_RS_ANDROID_APIVERSION";
+const ANDROID_API_C: &str = "
+BACKTRACE_RS_ANDROID_APIVERSION __ANDROID_API__
+";
+
fn build_android() {
- // Resolve `src/android-api.c` relative to this file.
+ // Create `android-api.c` on demand.
// Required to support calling this from the `std` build script.
- let android_api_c = Path::new(file!())
- .parent()
- .unwrap()
- .join("src/android-api.c");
- let expansion = match cc::Build::new().file(android_api_c).try_expand() {
+ let out_dir = env::var_os("OUT_DIR").unwrap();
+ let android_api_c = Path::new(&out_dir).join("android-api.c");
+ std::fs::write(&android_api_c, ANDROID_API_C).unwrap();
+
+ let expansion = match cc::Build::new().file(&android_api_c).try_expand() {
Ok(result) => result,
Err(e) => {
- println!("failed to run C compiler: {}", e);
+ eprintln!(
+ "warning: android version detection failed while running C compiler: {}",
+ e
+ );
return;
}
};
@@ -29,13 +39,12 @@ fn build_android() {
Ok(s) => s,
Err(_) => return,
};
- println!("expanded android version detection:\n{}", expansion);
- let marker = "APIVERSION";
- let i = match expansion.find(marker) {
+ eprintln!("expanded android version detection:\n{}", expansion);
+ let i = match expansion.find(MARKER) {
Some(i) => i,
None => return,
};
- let version = match expansion[i + marker.len() + 1..].split_whitespace().next() {
+ let version = match expansion[i + MARKER.len() + 1..].split_whitespace().next() {
Some(s) => s,
None => return,
};
diff --git a/library/backtrace/crates/as-if-std/Cargo.toml b/library/backtrace/crates/as-if-std/Cargo.toml
index bcbcfe159..7f12cfb56 100644
--- a/library/backtrace/crates/as-if-std/Cargo.toml
+++ b/library/backtrace/crates/as-if-std/Cargo.toml
@@ -2,7 +2,7 @@
name = "as-if-std"
version = "0.1.0"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
-edition = "2018"
+edition = "2021"
publish = false
[lib]
@@ -24,7 +24,7 @@ addr2line = { version = "0.21.0", optional = true, default-features = false }
version = "0.32.0"
default-features = false
optional = true
-features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
+features = ['read_core', 'elf', 'macho', 'pe', 'xcoff', 'unaligned', 'archive']
[build-dependencies]
# Dependency of the `backtrace` crate
diff --git a/library/backtrace/crates/debuglink/Cargo.toml b/library/backtrace/crates/debuglink/Cargo.toml
index 6b55b1394..5e62abd37 100644
--- a/library/backtrace/crates/debuglink/Cargo.toml
+++ b/library/backtrace/crates/debuglink/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "debuglink"
version = "0.1.0"
-edition = "2018"
+edition = "2021"
[dependencies]
backtrace = { path = "../.." }
diff --git a/library/backtrace/crates/dylib-dep/Cargo.toml b/library/backtrace/crates/dylib-dep/Cargo.toml
index c3d4a8c2f..e6cc9c23b 100644
--- a/library/backtrace/crates/dylib-dep/Cargo.toml
+++ b/library/backtrace/crates/dylib-dep/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "dylib-dep"
version = "0.1.0"
-edition = "2018"
+edition = "2021"
authors = []
publish = false
diff --git a/library/backtrace/crates/line-tables-only/Cargo.toml b/library/backtrace/crates/line-tables-only/Cargo.toml
index e2967d3d3..8d17db58c 100644
--- a/library/backtrace/crates/line-tables-only/Cargo.toml
+++ b/library/backtrace/crates/line-tables-only/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "line-tables-only"
version = "0.1.0"
-edition = "2018"
+edition = "2021"
[build-dependencies]
cc = "1.0"
diff --git a/library/backtrace/crates/line-tables-only/src/lib.rs b/library/backtrace/crates/line-tables-only/src/lib.rs
index bd5afcb3a..b292b8441 100644
--- a/library/backtrace/crates/line-tables-only/src/lib.rs
+++ b/library/backtrace/crates/line-tables-only/src/lib.rs
@@ -1,8 +1,8 @@
#[cfg(test)]
mod tests {
- use std::path::Path;
use backtrace::Backtrace;
use libc::c_void;
+ use std::path::Path;
pub type Callback = extern "C" fn(data: *mut c_void);
@@ -15,11 +15,12 @@ mod tests {
unsafe { *(data as *mut Option<Backtrace>) = Some(bt) };
}
- fn assert_contains(backtrace: &Backtrace,
- expected_name: &str,
- expected_file: &str,
- expected_line: u32) {
-
+ fn assert_contains(
+ backtrace: &Backtrace,
+ expected_name: &str,
+ expected_file: &str,
+ expected_line: u32,
+ ) {
let expected_file = Path::new(expected_file);
for frame in backtrace.frames() {
@@ -34,7 +35,7 @@ mod tests {
}
}
- panic!("symbol {:?} not found in backtrace: {:?}", expected_name, backtrace);
+ panic!("symbol {expected_name:?} not found in backtrace: {backtrace:?}");
}
/// Verifies that when debug info includes only lines tables the generated
diff --git a/library/backtrace/crates/macos_frames_test/Cargo.toml b/library/backtrace/crates/macos_frames_test/Cargo.toml
index 278d51e79..849e76414 100644
--- a/library/backtrace/crates/macos_frames_test/Cargo.toml
+++ b/library/backtrace/crates/macos_frames_test/Cargo.toml
@@ -2,7 +2,7 @@
name = "macos_frames_test"
version = "0.1.0"
authors = ["Aaron Hill <aa1ronham@gmail.com>"]
-edition = "2018"
+edition = "2021"
[dependencies.backtrace]
path = "../.."
diff --git a/library/backtrace/crates/without_debuginfo/Cargo.toml b/library/backtrace/crates/without_debuginfo/Cargo.toml
index 19d76cbec..38e559971 100644
--- a/library/backtrace/crates/without_debuginfo/Cargo.toml
+++ b/library/backtrace/crates/without_debuginfo/Cargo.toml
@@ -2,7 +2,7 @@
name = "without_debuginfo"
version = "0.1.0"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
-edition = "2018"
+edition = "2021"
[dependencies.backtrace]
path = "../.."
diff --git a/library/backtrace/src/android-api.c b/library/backtrace/src/android-api.c
deleted file mode 100644
index 1bfeadf5b..000000000
--- a/library/backtrace/src/android-api.c
+++ /dev/null
@@ -1,4 +0,0 @@
-// Used from the build script to detect the value of the `__ANDROID_API__`
-// builtin #define
-
-APIVERSION __ANDROID_API__
diff --git a/library/backtrace/src/backtrace/dbghelp.rs b/library/backtrace/src/backtrace/dbghelp.rs
index ba0f05f3b..d1b76e281 100644
--- a/library/backtrace/src/backtrace/dbghelp.rs
+++ b/library/backtrace/src/backtrace/dbghelp.rs
@@ -1,30 +1,32 @@
//! Backtrace strategy for MSVC platforms.
//!
-//! This module contains the ability to generate a backtrace on MSVC using one
-//! of two possible methods. The `StackWalkEx` function is primarily used if
-//! possible, but not all systems have that. Failing that the `StackWalk64`
-//! function is used instead. Note that `StackWalkEx` is favored because it
-//! handles debuginfo internally and returns inline frame information.
+//! This module contains the ability to capture a backtrace on MSVC using one
+//! of three possible methods. For `x86_64` and `aarch64`, we use `RtlVirtualUnwind`
+//! to walk the stack one frame at a time. This function is much faster than using
+//! `dbghelp!StackWalk*` because it does not load debug info to report inlined frames.
+//! We still report inlined frames during symbolization by consulting the appropriate
+//! `dbghelp` functions.
+//!
+//! For all other platforms, primarily `i686`, the `StackWalkEx` function is used if
+//! possible, but not all systems have that. Failing that the `StackWalk64` function
+//! is used instead. Note that `StackWalkEx` is favored because it handles debuginfo
+//! internally and returns inline frame information.
//!
//! Note that all dbghelp support is loaded dynamically, see `src/dbghelp.rs`
//! for more information about that.
#![allow(bad_style)]
-use super::super::{dbghelp, windows::*};
+use super::super::windows::*;
use core::ffi::c_void;
-use core::mem;
-
-#[derive(Clone, Copy)]
-pub enum StackFrame {
- New(STACKFRAME_EX),
- Old(STACKFRAME64),
-}
#[derive(Clone, Copy)]
pub struct Frame {
- pub(crate) stack_frame: StackFrame,
base_address: *mut c_void,
+ ip: *mut c_void,
+ sp: *mut c_void,
+ #[cfg(not(target_env = "gnu"))]
+ inline_context: Option<DWORD>,
}
// we're just sending around raw pointers and reading them, never interpreting
@@ -34,62 +36,144 @@ unsafe impl Sync for Frame {}
impl Frame {
pub fn ip(&self) -> *mut c_void {
- self.addr_pc().Offset as *mut _
+ self.ip
}
pub fn sp(&self) -> *mut c_void {
- self.addr_stack().Offset as *mut _
+ self.sp
}
pub fn symbol_address(&self) -> *mut c_void {
- self.ip()
+ self.ip
}
pub fn module_base_address(&self) -> Option<*mut c_void> {
Some(self.base_address)
}
- fn addr_pc(&self) -> &ADDRESS64 {
- match self.stack_frame {
- StackFrame::New(ref new) => &new.AddrPC,
- StackFrame::Old(ref old) => &old.AddrPC,
- }
+ #[cfg(not(target_env = "gnu"))]
+ pub fn inline_context(&self) -> Option<DWORD> {
+ self.inline_context
}
+}
- fn addr_pc_mut(&mut self) -> &mut ADDRESS64 {
- match self.stack_frame {
- StackFrame::New(ref mut new) => &mut new.AddrPC,
- StackFrame::Old(ref mut old) => &mut old.AddrPC,
- }
+#[repr(C, align(16))] // required by `CONTEXT`, is a FIXME in winapi right now
+struct MyContext(CONTEXT);
+
+#[cfg(target_arch = "x86_64")]
+impl MyContext {
+ #[inline(always)]
+ fn ip(&self) -> DWORD64 {
+ self.0.Rip
}
- fn addr_frame_mut(&mut self) -> &mut ADDRESS64 {
- match self.stack_frame {
- StackFrame::New(ref mut new) => &mut new.AddrFrame,
- StackFrame::Old(ref mut old) => &mut old.AddrFrame,
- }
+ #[inline(always)]
+ fn sp(&self) -> DWORD64 {
+ self.0.Rsp
}
+}
- fn addr_stack(&self) -> &ADDRESS64 {
- match self.stack_frame {
- StackFrame::New(ref new) => &new.AddrStack,
- StackFrame::Old(ref old) => &old.AddrStack,
- }
+#[cfg(target_arch = "aarch64")]
+impl MyContext {
+ #[inline(always)]
+ fn ip(&self) -> DWORD64 {
+ self.0.Pc
}
- fn addr_stack_mut(&mut self) -> &mut ADDRESS64 {
- match self.stack_frame {
- StackFrame::New(ref mut new) => &mut new.AddrStack,
- StackFrame::Old(ref mut old) => &mut old.AddrStack,
- }
+ #[inline(always)]
+ fn sp(&self) -> DWORD64 {
+ self.0.Sp
}
}
-#[repr(C, align(16))] // required by `CONTEXT`, is a FIXME in winapi right now
-struct MyContext(CONTEXT);
+#[cfg(target_arch = "x86")]
+impl MyContext {
+ #[inline(always)]
+ fn ip(&self) -> DWORD {
+ self.0.Eip
+ }
+
+ #[inline(always)]
+ fn sp(&self) -> DWORD {
+ self.0.Esp
+ }
+ #[inline(always)]
+ fn fp(&self) -> DWORD {
+ self.0.Ebp
+ }
+}
+
+#[cfg(target_arch = "arm")]
+impl MyContext {
+ #[inline(always)]
+ fn ip(&self) -> DWORD {
+ self.0.Pc
+ }
+
+ #[inline(always)]
+ fn sp(&self) -> DWORD {
+ self.0.Sp
+ }
+
+ #[inline(always)]
+ fn fp(&self) -> DWORD {
+ self.0.R11
+ }
+}
+
+#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
#[inline(always)]
pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) {
+ use core::ptr;
+
+ let mut context = core::mem::zeroed::<MyContext>();
+ RtlCaptureContext(&mut context.0);
+
+ // Call `RtlVirtualUnwind` to find the previous stack frame, walking until we hit ip = 0.
+ while context.ip() != 0 {
+ let mut base = 0;
+
+ let fn_entry = RtlLookupFunctionEntry(context.ip(), &mut base, ptr::null_mut());
+ if fn_entry.is_null() {
+ break;
+ }
+
+ let frame = super::Frame {
+ inner: Frame {
+ base_address: fn_entry as *mut c_void,
+ ip: context.ip() as *mut c_void,
+ sp: context.sp() as *mut c_void,
+ #[cfg(not(target_env = "gnu"))]
+ inline_context: None,
+ },
+ };
+
+ if !cb(&frame) {
+ break;
+ }
+
+ let mut handler_data = 0usize;
+ let mut establisher_frame = 0;
+
+ RtlVirtualUnwind(
+ 0,
+ base,
+ context.ip(),
+ fn_entry,
+ &mut context.0,
+ &mut handler_data as *mut usize as *mut PVOID,
+ &mut establisher_frame,
+ ptr::null_mut(),
+ );
+ }
+}
+
+#[cfg(any(target_arch = "x86", target_arch = "arm"))]
+#[inline(always)]
+pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) {
+ use core::mem;
+
// Allocate necessary structures for doing the stack walk
let process = GetCurrentProcess();
let thread = GetCurrentThread();
@@ -98,65 +182,40 @@ pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) {
RtlCaptureContext(&mut context.0);
// Ensure this process's symbols are initialized
- let dbghelp = match dbghelp::init() {
+ let dbghelp = match super::super::dbghelp::init() {
Ok(dbghelp) => dbghelp,
Err(()) => return, // oh well...
};
- // On x86_64 and ARM64 we opt to not use the default `Sym*` functions from
- // dbghelp for getting the function table and module base. Instead we use
- // the `RtlLookupFunctionEntry` function in kernel32 which will account for
- // JIT compiler frames as well. These should be equivalent, but using
- // `Rtl*` allows us to backtrace through JIT frames.
- //
- // Note that `RtlLookupFunctionEntry` only works for in-process backtraces,
- // but that's all we support anyway, so it all lines up well.
- cfg_if::cfg_if! {
- if #[cfg(target_pointer_width = "64")] {
- use core::ptr;
-
- unsafe extern "system" fn function_table_access(_process: HANDLE, addr: DWORD64) -> PVOID {
- let mut base = 0;
- RtlLookupFunctionEntry(addr, &mut base, ptr::null_mut()).cast()
- }
-
- unsafe extern "system" fn get_module_base(_process: HANDLE, addr: DWORD64) -> DWORD64 {
- let mut base = 0;
- RtlLookupFunctionEntry(addr, &mut base, ptr::null_mut());
- base
- }
- } else {
- let function_table_access = dbghelp.SymFunctionTableAccess64();
- let get_module_base = dbghelp.SymGetModuleBase64();
- }
- }
+ let function_table_access = dbghelp.SymFunctionTableAccess64();
+ let get_module_base = dbghelp.SymGetModuleBase64();
let process_handle = GetCurrentProcess();
+ #[cfg(target_arch = "x86")]
+ let image = IMAGE_FILE_MACHINE_I386;
+ #[cfg(target_arch = "arm")]
+ let image = IMAGE_FILE_MACHINE_ARMNT;
+
// Attempt to use `StackWalkEx` if we can, but fall back to `StackWalk64`
// since it's in theory supported on more systems.
match (*dbghelp.dbghelp()).StackWalkEx() {
Some(StackWalkEx) => {
- let mut inner: STACKFRAME_EX = mem::zeroed();
- inner.StackFrameSize = mem::size_of::<STACKFRAME_EX>() as DWORD;
- let mut frame = super::Frame {
- inner: Frame {
- stack_frame: StackFrame::New(inner),
- base_address: 0 as _,
- },
- };
- let image = init_frame(&mut frame.inner, &context.0);
- let frame_ptr = match &mut frame.inner.stack_frame {
- StackFrame::New(ptr) => ptr as *mut STACKFRAME_EX,
- _ => unreachable!(),
- };
+ let mut stack_frame_ex: STACKFRAME_EX = mem::zeroed();
+ stack_frame_ex.StackFrameSize = mem::size_of::<STACKFRAME_EX>() as DWORD;
+ stack_frame_ex.AddrPC.Offset = context.ip() as u64;
+ stack_frame_ex.AddrPC.Mode = AddrModeFlat;
+ stack_frame_ex.AddrStack.Offset = context.sp() as u64;
+ stack_frame_ex.AddrStack.Mode = AddrModeFlat;
+ stack_frame_ex.AddrFrame.Offset = context.fp() as u64;
+ stack_frame_ex.AddrFrame.Mode = AddrModeFlat;
while StackWalkEx(
image as DWORD,
process,
thread,
- frame_ptr,
- &mut context.0 as *mut CONTEXT as *mut _,
+ &mut stack_frame_ex,
+ &mut context.0 as *mut CONTEXT as PVOID,
None,
Some(function_table_access),
Some(get_module_base),
@@ -164,7 +223,16 @@ pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) {
0,
) == TRUE
{
- frame.inner.base_address = get_module_base(process_handle, frame.ip() as _) as _;
+ let frame = super::Frame {
+ inner: Frame {
+ base_address: get_module_base(process_handle, stack_frame_ex.AddrPC.Offset)
+ as *mut c_void,
+ ip: stack_frame_ex.AddrPC.Offset as *mut c_void,
+ sp: stack_frame_ex.AddrStack.Offset as *mut c_void,
+ #[cfg(not(target_env = "gnu"))]
+ inline_context: Some(stack_frame_ex.InlineFrameContext),
+ },
+ };
if !cb(&frame) {
break;
@@ -172,31 +240,36 @@ pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) {
}
}
None => {
- let mut frame = super::Frame {
- inner: Frame {
- stack_frame: StackFrame::Old(mem::zeroed()),
- base_address: 0 as _,
- },
- };
- let image = init_frame(&mut frame.inner, &context.0);
- let frame_ptr = match &mut frame.inner.stack_frame {
- StackFrame::Old(ptr) => ptr as *mut STACKFRAME64,
- _ => unreachable!(),
- };
+ let mut stack_frame64: STACKFRAME64 = mem::zeroed();
+ stack_frame64.AddrPC.Offset = context.ip() as u64;
+ stack_frame64.AddrPC.Mode = AddrModeFlat;
+ stack_frame64.AddrStack.Offset = context.sp() as u64;
+ stack_frame64.AddrStack.Mode = AddrModeFlat;
+ stack_frame64.AddrFrame.Offset = context.fp() as u64;
+ stack_frame64.AddrFrame.Mode = AddrModeFlat;
while dbghelp.StackWalk64()(
image as DWORD,
process,
thread,
- frame_ptr,
- &mut context.0 as *mut CONTEXT as *mut _,
+ &mut stack_frame64,
+ &mut context.0 as *mut CONTEXT as PVOID,
None,
Some(function_table_access),
Some(get_module_base),
None,
) == TRUE
{
- frame.inner.base_address = get_module_base(process_handle, frame.ip() as _) as _;
+ let frame = super::Frame {
+ inner: Frame {
+ base_address: get_module_base(process_handle, stack_frame64.AddrPC.Offset)
+ as *mut c_void,
+ ip: stack_frame64.AddrPC.Offset as *mut c_void,
+ sp: stack_frame64.AddrStack.Offset as *mut c_void,
+ #[cfg(not(target_env = "gnu"))]
+ inline_context: None,
+ },
+ };
if !cb(&frame) {
break;
@@ -205,53 +278,3 @@ pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) {
}
}
}
-
-#[cfg(target_arch = "x86_64")]
-fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD {
- frame.addr_pc_mut().Offset = ctx.Rip as u64;
- frame.addr_pc_mut().Mode = AddrModeFlat;
- frame.addr_stack_mut().Offset = ctx.Rsp as u64;
- frame.addr_stack_mut().Mode = AddrModeFlat;
- frame.addr_frame_mut().Offset = ctx.Rbp as u64;
- frame.addr_frame_mut().Mode = AddrModeFlat;
-
- IMAGE_FILE_MACHINE_AMD64
-}
-
-#[cfg(target_arch = "x86")]
-fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD {
- frame.addr_pc_mut().Offset = ctx.Eip as u64;
- frame.addr_pc_mut().Mode = AddrModeFlat;
- frame.addr_stack_mut().Offset = ctx.Esp as u64;
- frame.addr_stack_mut().Mode = AddrModeFlat;
- frame.addr_frame_mut().Offset = ctx.Ebp as u64;
- frame.addr_frame_mut().Mode = AddrModeFlat;
-
- IMAGE_FILE_MACHINE_I386
-}
-
-#[cfg(target_arch = "aarch64")]
-fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD {
- frame.addr_pc_mut().Offset = ctx.Pc as u64;
- frame.addr_pc_mut().Mode = AddrModeFlat;
- frame.addr_stack_mut().Offset = ctx.Sp as u64;
- frame.addr_stack_mut().Mode = AddrModeFlat;
- unsafe {
- frame.addr_frame_mut().Offset = ctx.u.s().Fp as u64;
- }
- frame.addr_frame_mut().Mode = AddrModeFlat;
- IMAGE_FILE_MACHINE_ARM64
-}
-
-#[cfg(target_arch = "arm")]
-fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD {
- frame.addr_pc_mut().Offset = ctx.Pc as u64;
- frame.addr_pc_mut().Mode = AddrModeFlat;
- frame.addr_stack_mut().Offset = ctx.Sp as u64;
- frame.addr_stack_mut().Mode = AddrModeFlat;
- unsafe {
- frame.addr_frame_mut().Offset = ctx.R11 as u64;
- }
- frame.addr_frame_mut().Mode = AddrModeFlat;
- IMAGE_FILE_MACHINE_ARMNT
-}
diff --git a/library/backtrace/src/backtrace/libunwind.rs b/library/backtrace/src/backtrace/libunwind.rs
index aefa8b094..0cf6365f7 100644
--- a/library/backtrace/src/backtrace/libunwind.rs
+++ b/library/backtrace/src/backtrace/libunwind.rs
@@ -40,7 +40,18 @@ impl Frame {
Frame::Raw(ctx) => ctx,
Frame::Cloned { ip, .. } => return ip,
};
- unsafe { uw::_Unwind_GetIP(ctx) as *mut c_void }
+ #[allow(unused_mut)]
+ let mut ip = unsafe { uw::_Unwind_GetIP(ctx) as *mut c_void };
+
+ // To reduce TCB size in SGX enclaves, we do not want to implement
+ // symbol resolution functionality. Rather, we can print the offset of
+ // the address here, which could be later mapped to correct function.
+ #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))]
+ {
+ let image_base = super::get_image_base();
+ ip = usize::wrapping_sub(ip as usize, image_base as _) as _;
+ }
+ ip
}
pub fn sp(&self) -> *mut c_void {
diff --git a/library/backtrace/src/backtrace/mod.rs b/library/backtrace/src/backtrace/mod.rs
index 6ca1080c4..1b812d84e 100644
--- a/library/backtrace/src/backtrace/mod.rs
+++ b/library/backtrace/src/backtrace/mod.rs
@@ -125,6 +125,39 @@ impl fmt::Debug for Frame {
}
}
+#[cfg(all(target_env = "sgx", target_vendor = "fortanix", not(feature = "std")))]
+mod sgx_no_std_image_base {
+ use core::ffi::c_void;
+ use core::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+
+ static IMAGE_BASE: AtomicUsize = AtomicUsize::new(0);
+
+ /// Set the image base address. This is only available for Fortanix SGX
+ /// target when the `std` feature is not enabled. This can be used in the
+ /// standard library to set the correct base address.
+ #[doc(hidden)]
+ pub fn set_image_base(base_addr: *mut c_void) {
+ IMAGE_BASE.store(base_addr as _, SeqCst);
+ }
+
+ pub(crate) fn get_image_base() -> *mut c_void {
+ IMAGE_BASE.load(SeqCst) as _
+ }
+}
+
+#[cfg(all(target_env = "sgx", target_vendor = "fortanix", not(feature = "std")))]
+pub use self::sgx_no_std_image_base::set_image_base;
+
+#[cfg(all(target_env = "sgx", target_vendor = "fortanix", not(feature = "std")))]
+#[deny(unused)]
+pub(crate) use self::sgx_no_std_image_base::get_image_base;
+
+#[cfg(all(target_env = "sgx", target_vendor = "fortanix", feature = "std"))]
+#[deny(unused)]
+pub(crate) fn get_image_base() -> *mut c_void {
+ std::os::fortanix_sgx::mem::image_base() as _
+}
+
cfg_if::cfg_if! {
// This needs to come first, to ensure that
// Miri takes priority over the host platform
@@ -153,8 +186,6 @@ cfg_if::cfg_if! {
mod dbghelp;
use self::dbghelp::trace as trace_imp;
pub(crate) use self::dbghelp::Frame as FrameImp;
- #[cfg(target_env = "msvc")] // only used in dbghelp symbolize
- pub(crate) use self::dbghelp::StackFrame;
} else {
mod noop;
use self::noop::trace as trace_imp;
diff --git a/library/backtrace/src/dbghelp.rs b/library/backtrace/src/dbghelp.rs
index c81766bae..e456dd45d 100644
--- a/library/backtrace/src/dbghelp.rs
+++ b/library/backtrace/src/dbghelp.rs
@@ -34,8 +34,8 @@ use core::ptr;
mod dbghelp {
use crate::windows::*;
pub use winapi::um::dbghelp::{
- StackWalk64, StackWalkEx, SymCleanup, SymFromAddrW, SymFunctionTableAccess64,
- SymGetLineFromAddrW64, SymGetModuleBase64, SymGetOptions, SymInitializeW, SymSetOptions,
+ StackWalk64, StackWalkEx, SymFromAddrW, SymFunctionTableAccess64, SymGetLineFromAddrW64,
+ SymGetModuleBase64, SymGetOptions, SymInitializeW, SymSetOptions,
};
extern "system" {
@@ -55,6 +55,16 @@ mod dbghelp {
pdwDisplacement: PDWORD,
Line: PIMAGEHLP_LINEW64,
) -> BOOL;
+ pub fn SymAddrIncludeInlineTrace(hProcess: HANDLE, Address: DWORD64) -> DWORD;
+ pub fn SymQueryInlineTrace(
+ hProcess: HANDLE,
+ StartAddress: DWORD64,
+ StartContext: DWORD,
+ StartRetAddress: DWORD64,
+ CurAddress: DWORD64,
+ CurContext: LPDWORD,
+ CurFrameIndex: LPDWORD,
+ ) -> BOOL;
}
pub fn assert_equal_types<T>(a: T, _b: T) -> T {
@@ -164,7 +174,6 @@ dbghelp! {
path: PCWSTR,
invade: BOOL
) -> BOOL;
- fn SymCleanup(handle: HANDLE) -> BOOL;
fn StackWalk64(
MachineType: DWORD,
hProcess: HANDLE,
@@ -184,18 +193,6 @@ dbghelp! {
hProcess: HANDLE,
AddrBase: DWORD64
) -> DWORD64;
- fn SymFromAddrW(
- hProcess: HANDLE,
- Address: DWORD64,
- Displacement: PDWORD64,
- Symbol: PSYMBOL_INFOW
- ) -> BOOL;
- fn SymGetLineFromAddrW64(
- hProcess: HANDLE,
- dwAddr: DWORD64,
- pdwDisplacement: PDWORD,
- Line: PIMAGEHLP_LINEW64
- ) -> BOOL;
fn StackWalkEx(
MachineType: DWORD,
hProcess: HANDLE,
@@ -223,6 +220,31 @@ dbghelp! {
pdwDisplacement: PDWORD,
Line: PIMAGEHLP_LINEW64
) -> BOOL;
+ fn SymAddrIncludeInlineTrace(
+ hProcess: HANDLE,
+ Address: DWORD64
+ ) -> DWORD;
+ fn SymQueryInlineTrace(
+ hProcess: HANDLE,
+ StartAddress: DWORD64,
+ StartContext: DWORD,
+ StartRetAddress: DWORD64,
+ CurAddress: DWORD64,
+ CurContext: LPDWORD,
+ CurFrameIndex: LPDWORD
+ ) -> BOOL;
+ fn SymFromAddrW(
+ hProcess: HANDLE,
+ Address: DWORD64,
+ Displacement: PDWORD64,
+ Symbol: PSYMBOL_INFOW
+ ) -> BOOL;
+ fn SymGetLineFromAddrW64(
+ hProcess: HANDLE,
+ dwAddr: DWORD64,
+ pdwDisplacement: PDWORD,
+ Line: PIMAGEHLP_LINEW64
+ ) -> BOOL;
}
}
diff --git a/library/backtrace/src/lib.rs b/library/backtrace/src/lib.rs
index 4615e1f96..44a0bc64e 100644
--- a/library/backtrace/src/lib.rs
+++ b/library/backtrace/src/lib.rs
@@ -134,6 +134,12 @@ cfg_if::cfg_if! {
}
}
+cfg_if::cfg_if! {
+ if #[cfg(all(target_env = "sgx", target_vendor = "fortanix", not(feature = "std")))] {
+ pub use self::backtrace::set_image_base;
+ }
+}
+
#[allow(dead_code)]
struct Bomb {
enabled: bool,
@@ -186,7 +192,14 @@ mod lock {
}
}
-#[cfg(all(windows, not(target_vendor = "uwp")))]
+#[cfg(all(
+ windows,
+ any(
+ target_env = "msvc",
+ all(target_env = "gnu", any(target_arch = "x86", target_arch = "arm"))
+ ),
+ not(target_vendor = "uwp")
+))]
mod dbghelp;
#[cfg(windows)]
mod windows;
diff --git a/library/backtrace/src/print.rs b/library/backtrace/src/print.rs
index 395328a0a..de8569182 100644
--- a/library/backtrace/src/print.rs
+++ b/library/backtrace/src/print.rs
@@ -219,7 +219,7 @@ impl BacktraceFrameFmt<'_, '_, '_> {
#[allow(unused_mut)]
fn print_raw_generic(
&mut self,
- mut frame_ip: *mut c_void,
+ frame_ip: *mut c_void,
symbol_name: Option<SymbolName<'_>>,
filename: Option<BytesOrWideString<'_>>,
lineno: Option<u32>,
@@ -233,15 +233,6 @@ impl BacktraceFrameFmt<'_, '_, '_> {
}
}
- // To reduce TCB size in Sgx enclave, we do not want to implement symbol
- // resolution functionality. Rather, we can print the offset of the
- // address here, which could be later mapped to correct function.
- #[cfg(all(feature = "std", target_env = "sgx", target_vendor = "fortanix"))]
- {
- let image_base = std::os::fortanix_sgx::mem::image_base();
- frame_ip = usize::wrapping_sub(frame_ip as usize, image_base as _) as _;
- }
-
// Print the index of the frame as well as the optional instruction
// pointer of the frame. If we're beyond the first symbol of this frame
// though we just print appropriate whitespace.
diff --git a/library/backtrace/src/symbolize/dbghelp.rs b/library/backtrace/src/symbolize/dbghelp.rs
index 181dba731..8c47d58e8 100644
--- a/library/backtrace/src/symbolize/dbghelp.rs
+++ b/library/backtrace/src/symbolize/dbghelp.rs
@@ -17,7 +17,7 @@
#![allow(bad_style)]
-use super::super::{backtrace::StackFrame, dbghelp, windows::*};
+use super::super::{dbghelp, windows::*};
use super::{BytesOrWideString, ResolveWhat, SymbolName};
use core::char;
use core::ffi::c_void;
@@ -78,54 +78,103 @@ pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol))
Err(()) => return, // oh well...
};
+ let resolve_inner = if (*dbghelp.dbghelp()).SymAddrIncludeInlineTrace().is_some() {
+ // We are on a version of dbghelp 6.2+, which contains the more modern
+ // Inline APIs.
+ resolve_with_inline
+ } else {
+ // We are on an older version of dbghelp which doesn't contain the Inline
+ // APIs.
+ resolve_legacy
+ };
match what {
- ResolveWhat::Address(_) => resolve_without_inline(&dbghelp, what.address_or_ip(), cb),
- ResolveWhat::Frame(frame) => match &frame.inner.stack_frame {
- StackFrame::New(frame) => resolve_with_inline(&dbghelp, frame, cb),
- StackFrame::Old(_) => resolve_without_inline(&dbghelp, frame.ip(), cb),
- },
+ ResolveWhat::Address(_) => resolve_inner(&dbghelp, what.address_or_ip(), None, cb),
+ ResolveWhat::Frame(frame) => {
+ resolve_inner(&dbghelp, frame.ip(), frame.inner.inline_context(), cb)
+ }
}
}
-unsafe fn resolve_with_inline(
+/// Resolve the address using the legacy dbghelp API.
+///
+/// This should work all the way down to Windows XP. The inline context is
+/// ignored, since this concept was only introduced in dbghelp 6.2+.
+unsafe fn resolve_legacy(
dbghelp: &dbghelp::Init,
- frame: &STACKFRAME_EX,
+ addr: *mut c_void,
+ _inline_context: Option<DWORD>,
cb: &mut dyn FnMut(&super::Symbol),
) {
+ let addr = super::adjust_ip(addr) as DWORD64;
do_resolve(
- |info| {
- dbghelp.SymFromInlineContextW()(
- GetCurrentProcess(),
- super::adjust_ip(frame.AddrPC.Offset as *mut _) as u64,
- frame.InlineFrameContext,
- &mut 0,
- info,
- )
- },
- |line| {
- dbghelp.SymGetLineFromInlineContextW()(
- GetCurrentProcess(),
- super::adjust_ip(frame.AddrPC.Offset as *mut _) as u64,
- frame.InlineFrameContext,
- 0,
- &mut 0,
- line,
- )
- },
+ |info| dbghelp.SymFromAddrW()(GetCurrentProcess(), addr, &mut 0, info),
+ |line| dbghelp.SymGetLineFromAddrW64()(GetCurrentProcess(), addr, &mut 0, line),
cb,
)
}
-unsafe fn resolve_without_inline(
+/// Resolve the address using the modern dbghelp APIs.
+///
+/// Note that calling this function requires having dbghelp 6.2+ loaded - and
+/// will panic otherwise.
+unsafe fn resolve_with_inline(
dbghelp: &dbghelp::Init,
addr: *mut c_void,
+ inline_context: Option<DWORD>,
cb: &mut dyn FnMut(&super::Symbol),
) {
- do_resolve(
- |info| dbghelp.SymFromAddrW()(GetCurrentProcess(), addr as DWORD64, &mut 0, info),
- |line| dbghelp.SymGetLineFromAddrW64()(GetCurrentProcess(), addr as DWORD64, &mut 0, line),
- cb,
- )
+ let current_process = GetCurrentProcess();
+
+ let addr = super::adjust_ip(addr) as DWORD64;
+
+ let (inlined_frame_count, inline_context) = if let Some(ic) = inline_context {
+ (0, ic)
+ } else {
+ let mut inlined_frame_count = dbghelp.SymAddrIncludeInlineTrace()(current_process, addr);
+
+ let mut inline_context = 0;
+
+ // If there is are inlined frames but we can't load them for some reason OR if there are no
+ // inlined frames, then we disregard inlined_frame_count and inline_context.
+ if (inlined_frame_count > 0
+ && dbghelp.SymQueryInlineTrace()(
+ current_process,
+ addr,
+ 0,
+ addr,
+ addr,
+ &mut inline_context,
+ &mut 0,
+ ) != TRUE)
+ || inlined_frame_count == 0
+ {
+ inlined_frame_count = 0;
+ inline_context = 0;
+ }
+
+ (inlined_frame_count, inline_context)
+ };
+
+ let last_inline_context = inline_context + 1 + inlined_frame_count;
+
+ for inline_context in inline_context..last_inline_context {
+ do_resolve(
+ |info| {
+ dbghelp.SymFromInlineContextW()(current_process, addr, inline_context, &mut 0, info)
+ },
+ |line| {
+ dbghelp.SymGetLineFromInlineContextW()(
+ current_process,
+ addr,
+ inline_context,
+ 0,
+ &mut 0,
+ line,
+ )
+ },
+ cb,
+ );
+ }
}
unsafe fn do_resolve(
diff --git a/library/backtrace/src/symbolize/gimli.rs b/library/backtrace/src/symbolize/gimli.rs
index 7f1c6a528..3b28bf741 100644
--- a/library/backtrace/src/symbolize/gimli.rs
+++ b/library/backtrace/src/symbolize/gimli.rs
@@ -35,12 +35,14 @@ cfg_if::cfg_if! {
target_os = "freebsd",
target_os = "fuchsia",
target_os = "haiku",
+ target_os = "hurd",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "openbsd",
target_os = "solaris",
target_os = "illumos",
+ target_os = "aix",
))] {
#[path = "gimli/mmap_unix.rs"]
mod mmap;
@@ -116,8 +118,17 @@ impl<'data> Context<'data> {
dwp: Option<Object<'data>>,
) -> Option<Context<'data>> {
let mut sections = gimli::Dwarf::load(|id| -> Result<_, ()> {
- let data = object.section(stash, id.name()).unwrap_or(&[]);
- Ok(EndianSlice::new(data, Endian))
+ if cfg!(not(target_os = "aix")) {
+ let data = object.section(stash, id.name()).unwrap_or(&[]);
+ Ok(EndianSlice::new(data, Endian))
+ } else {
+ if let Some(name) = id.xcoff_name() {
+ let data = object.section(stash, name).unwrap_or(&[]);
+ Ok(EndianSlice::new(data, Endian))
+ } else {
+ Ok(EndianSlice::new(&[], Endian))
+ }
+ }
})
.ok()?;
@@ -192,6 +203,9 @@ cfg_if::cfg_if! {
))] {
mod macho;
use self::macho::{handle_split_dwarf, Object};
+ } else if #[cfg(target_os = "aix")] {
+ mod xcoff;
+ use self::xcoff::{handle_split_dwarf, Object};
} else {
mod elf;
use self::elf::{handle_split_dwarf, Object};
@@ -218,6 +232,7 @@ cfg_if::cfg_if! {
target_os = "linux",
target_os = "fuchsia",
target_os = "freebsd",
+ target_os = "hurd",
target_os = "openbsd",
target_os = "netbsd",
all(target_os = "android", feature = "dl_iterate_phdr"),
@@ -234,6 +249,9 @@ cfg_if::cfg_if! {
} else if #[cfg(target_os = "haiku")] {
mod libs_haiku;
use libs_haiku::native_libraries;
+ } else if #[cfg(target_os = "aix")] {
+ mod libs_aix;
+ use libs_aix::native_libraries;
} else {
// Everything else should doesn't know how to load native libraries.
fn native_libraries() -> Vec<Library> {
@@ -261,6 +279,13 @@ struct Cache {
struct Library {
name: OsString,
+ #[cfg(target_os = "aix")]
+ /// On AIX, the library mmapped can be a member of a big-archive file.
+ /// For example, with a big-archive named libfoo.a containing libbar.so,
+ /// one can use `dlopen("libfoo.a(libbar.so)", RTLD_MEMBER | RTLD_LAZY)`
+ /// to use the `libbar.so` library. In this case, only `libbar.so` is
+ /// mmapped, not the whole `libfoo.a`.
+ member_name: OsString,
/// Segments of this library loaded into memory, and where they're loaded.
segments: Vec<LibrarySegment>,
/// The "bias" of this library, typically where it's loaded into memory.
@@ -280,6 +305,19 @@ struct LibrarySegment {
len: usize,
}
+#[cfg(target_os = "aix")]
+fn create_mapping(lib: &Library) -> Option<Mapping> {
+ let name = &lib.name;
+ let member_name = &lib.member_name;
+ Mapping::new(name.as_ref(), member_name)
+}
+
+#[cfg(not(target_os = "aix"))]
+fn create_mapping(lib: &Library) -> Option<Mapping> {
+ let name = &lib.name;
+ Mapping::new(name.as_ref())
+}
+
// unsafe because this is required to be externally synchronized
pub unsafe fn clear_symbol_cache() {
Cache::with_global(|cache| cache.mappings.clear());
@@ -360,8 +398,7 @@ impl Cache {
// When the mapping is not in the cache, create a new mapping,
// insert it into the front of the cache, and evict the oldest cache
// entry if necessary.
- let name = &self.libraries[lib].name;
- let mapping = Mapping::new(name.as_ref())?;
+ let mapping = create_mapping(&self.libraries[lib])?;
if self.mappings.len() == MAPPINGS_CACHE_SIZE {
self.mappings.pop();
diff --git a/library/backtrace/src/symbolize/gimli/elf.rs b/library/backtrace/src/symbolize/gimli/elf.rs
index b0eec0762..906a30054 100644
--- a/library/backtrace/src/symbolize/gimli/elf.rs
+++ b/library/backtrace/src/symbolize/gimli/elf.rs
@@ -308,7 +308,7 @@ const DEBUG_PATH: &[u8] = b"/usr/lib/debug";
fn debug_path_exists() -> bool {
cfg_if::cfg_if! {
- if #[cfg(any(target_os = "freebsd", target_os = "linux"))] {
+ if #[cfg(any(target_os = "freebsd", target_os = "hurd", target_os = "linux"))] {
use core::sync::atomic::{AtomicU8, Ordering};
static DEBUG_PATH_EXISTS: AtomicU8 = AtomicU8::new(0);
diff --git a/library/backtrace/src/symbolize/gimli/libs_aix.rs b/library/backtrace/src/symbolize/gimli/libs_aix.rs
new file mode 100644
index 000000000..8cac11d4d
--- /dev/null
+++ b/library/backtrace/src/symbolize/gimli/libs_aix.rs
@@ -0,0 +1,74 @@
+use super::mystd::borrow::ToOwned;
+use super::mystd::env;
+use super::mystd::ffi::{CStr, OsStr};
+use super::mystd::io::Error;
+use super::mystd::os::unix::prelude::*;
+use super::xcoff;
+use super::{Library, LibrarySegment, Vec};
+use alloc::vec;
+use core::mem;
+
+const EXE_IMAGE_BASE: u64 = 0x100000000;
+
+/// On AIX, we use `loadquery` with `L_GETINFO` flag to query libraries mmapped.
+/// See https://www.ibm.com/docs/en/aix/7.2?topic=l-loadquery-subroutine for
+/// detailed information of `loadquery`.
+pub(super) fn native_libraries() -> Vec<Library> {
+ let mut ret = Vec::new();
+ unsafe {
+ let mut buffer = vec![mem::zeroed::<libc::ld_info>(); 64];
+ loop {
+ if libc::loadquery(
+ libc::L_GETINFO,
+ buffer.as_mut_ptr() as *mut libc::c_char,
+ (mem::size_of::<libc::ld_info>() * buffer.len()) as u32,
+ ) != -1
+ {
+ break;
+ } else {
+ match Error::last_os_error().raw_os_error() {
+ Some(libc::ENOMEM) => {
+ buffer.resize(buffer.len() * 2, mem::zeroed::<libc::ld_info>());
+ }
+ Some(_) => {
+ // If other error occurs, return empty libraries.
+ return Vec::new();
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+ let mut current = buffer.as_mut_ptr();
+ loop {
+ let text_base = (*current).ldinfo_textorg as usize;
+ let filename_ptr: *const libc::c_char = &(*current).ldinfo_filename[0];
+ let bytes = CStr::from_ptr(filename_ptr).to_bytes();
+ let member_name_ptr = filename_ptr.offset((bytes.len() + 1) as isize);
+ let mut filename = OsStr::from_bytes(bytes).to_owned();
+ if text_base == EXE_IMAGE_BASE as usize {
+ if let Ok(exe) = env::current_exe() {
+ filename = exe.into_os_string();
+ }
+ }
+ let bytes = CStr::from_ptr(member_name_ptr).to_bytes();
+ let member_name = OsStr::from_bytes(bytes).to_owned();
+ if let Some(image) = xcoff::parse_image(filename.as_ref(), &member_name) {
+ ret.push(Library {
+ name: filename,
+ member_name,
+ segments: vec![LibrarySegment {
+ stated_virtual_memory_address: image.base as usize,
+ len: image.size,
+ }],
+ bias: (text_base + image.offset).wrapping_sub(image.base as usize),
+ });
+ }
+ if (*current).ldinfo_next == 0 {
+ break;
+ }
+ current = (current as *mut libc::c_char).offset((*current).ldinfo_next as isize)
+ as *mut libc::ld_info;
+ }
+ }
+ return ret;
+}
diff --git a/library/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs b/library/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs
index 9f0304ce8..518512fff 100644
--- a/library/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs
+++ b/library/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs
@@ -18,14 +18,18 @@ pub(super) fn native_libraries() -> Vec<Library> {
}
fn infer_current_exe(base_addr: usize) -> OsString {
- if let Ok(entries) = super::parse_running_mmaps::parse_maps() {
- let opt_path = entries
- .iter()
- .find(|e| e.ip_matches(base_addr) && e.pathname().len() > 0)
- .map(|e| e.pathname())
- .cloned();
- if let Some(path) = opt_path {
- return path;
+ cfg_if::cfg_if! {
+ if #[cfg(not(target_os = "hurd"))] {
+ if let Ok(entries) = super::parse_running_mmaps::parse_maps() {
+ let opt_path = entries
+ .iter()
+ .find(|e| e.ip_matches(base_addr) && e.pathname().len() > 0)
+ .map(|e| e.pathname())
+ .cloned();
+ if let Some(path) = opt_path {
+ return path;
+ }
+ }
}
}
env::current_exe().map(|e| e.into()).unwrap_or_default()
diff --git a/library/backtrace/src/symbolize/gimli/xcoff.rs b/library/backtrace/src/symbolize/gimli/xcoff.rs
new file mode 100644
index 000000000..dd308840f
--- /dev/null
+++ b/library/backtrace/src/symbolize/gimli/xcoff.rs
@@ -0,0 +1,186 @@
+use super::mystd::ffi::{OsStr, OsString};
+use super::mystd::os::unix::ffi::OsStrExt;
+use super::mystd::str;
+use super::{gimli, Context, Endian, EndianSlice, Mapping, Path, Stash, Vec};
+use alloc::sync::Arc;
+use core::ops::Deref;
+use object::read::archive::ArchiveFile;
+use object::read::xcoff::{FileHeader, SectionHeader, XcoffFile, XcoffSymbol};
+use object::Object as _;
+use object::ObjectSection as _;
+use object::ObjectSymbol as _;
+use object::SymbolFlags;
+
+#[cfg(target_pointer_width = "32")]
+type Xcoff = object::xcoff::FileHeader32;
+#[cfg(target_pointer_width = "64")]
+type Xcoff = object::xcoff::FileHeader64;
+
+impl Mapping {
+ pub fn new(path: &Path, member_name: &OsString) -> Option<Mapping> {
+ let map = super::mmap(path)?;
+ Mapping::mk(map, |data, stash| {
+ if member_name.is_empty() {
+ Context::new(stash, Object::parse(data)?, None, None)
+ } else {
+ let archive = ArchiveFile::parse(data).ok()?;
+ for member in archive
+ .members()
+ .filter_map(|m| m.ok())
+ .filter(|m| OsStr::from_bytes(m.name()) == member_name)
+ {
+ let member_data = member.data(data).ok()?;
+ if let Some(obj) = Object::parse(member_data) {
+ return Context::new(stash, obj, None, None);
+ }
+ }
+ None
+ }
+ })
+ }
+}
+
+struct ParsedSym<'a> {
+ address: u64,
+ size: u64,
+ name: &'a str,
+}
+
+pub struct Object<'a> {
+ syms: Vec<ParsedSym<'a>>,
+ file: XcoffFile<'a, Xcoff>,
+}
+
+pub struct Image {
+ pub offset: usize,
+ pub base: u64,
+ pub size: usize,
+}
+
+pub fn parse_xcoff(data: &[u8]) -> Option<Image> {
+ let mut offset = 0;
+ let header = Xcoff::parse(data, &mut offset).ok()?;
+ let _ = header.aux_header(data, &mut offset).ok()?;
+ let sections = header.sections(data, &mut offset).ok()?;
+ if let Some(section) = sections.iter().find(|s| {
+ if let Ok(name) = str::from_utf8(&s.s_name()[0..5]) {
+ name == ".text"
+ } else {
+ false
+ }
+ }) {
+ Some(Image {
+ offset: section.s_scnptr() as usize,
+ base: section.s_paddr() as u64,
+ size: section.s_size() as usize,
+ })
+ } else {
+ None
+ }
+}
+
+pub fn parse_image(path: &Path, member_name: &OsString) -> Option<Image> {
+ let map = super::mmap(path)?;
+ let data = map.deref();
+ if member_name.is_empty() {
+ return parse_xcoff(data);
+ } else {
+ let archive = ArchiveFile::parse(data).ok()?;
+ for member in archive
+ .members()
+ .filter_map(|m| m.ok())
+ .filter(|m| OsStr::from_bytes(m.name()) == member_name)
+ {
+ let member_data = member.data(data).ok()?;
+ if let Some(image) = parse_xcoff(member_data) {
+ return Some(image);
+ }
+ }
+ None
+ }
+}
+
+impl<'a> Object<'a> {
+ fn get_concrete_size(file: &XcoffFile<'a, Xcoff>, sym: &XcoffSymbol<'a, '_, Xcoff>) -> u64 {
+ match sym.flags() {
+ SymbolFlags::Xcoff {
+ n_sclass: _,
+ x_smtyp: _,
+ x_smclas: _,
+ containing_csect: Some(index),
+ } => {
+ if let Ok(tgt_sym) = file.symbol_by_index(index) {
+ Self::get_concrete_size(file, &tgt_sym)
+ } else {
+ 0
+ }
+ }
+ _ => sym.size(),
+ }
+ }
+
+ fn parse(data: &'a [u8]) -> Option<Object<'a>> {
+ let file = XcoffFile::parse(data).ok()?;
+ let mut syms = file
+ .symbols()
+ .filter_map(|sym| {
+ let name = sym.name().map_or("", |v| v);
+ let address = sym.address();
+ let size = Self::get_concrete_size(&file, &sym);
+ if name == ".text" || name == ".data" {
+ // We don't want to include ".text" and ".data" symbols.
+ // If they are included, since their ranges cover other
+ // symbols, when searching a symbol for a given address,
+ // ".text" or ".data" is returned. That's not what we expect.
+ None
+ } else {
+ Some(ParsedSym {
+ address,
+ size,
+ name,
+ })
+ }
+ })
+ .collect::<Vec<_>>();
+ syms.sort_by_key(|s| s.address);
+ Some(Object { syms, file })
+ }
+
+ pub fn section(&self, _: &Stash, name: &str) -> Option<&'a [u8]> {
+ Some(self.file.section_by_name(name)?.data().ok()?)
+ }
+
+ pub fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> {
+ // Symbols, except ".text" and ".data", are sorted and are not overlapped each other,
+ // so we can just perform a binary search here.
+ let i = match self.syms.binary_search_by_key(&addr, |sym| sym.address) {
+ Ok(i) => i,
+ Err(i) => i.checked_sub(1)?,
+ };
+ let sym = self.syms.get(i)?;
+ if (sym.address..sym.address + sym.size).contains(&addr) {
+ // On AIX, for a function call, for example, `foo()`, we have
+ // two symbols `foo` and `.foo`. `foo` references the function
+ // descriptor and `.foo` references the function entry.
+ // See https://www.ibm.com/docs/en/xl-fortran-aix/16.1.0?topic=calls-linkage-convention-function
+ // for more information.
+ // We trim the prefix `.` here, so that the rust demangler can work
+ // properly.
+ Some(sym.name.trim_start_matches(".").as_bytes())
+ } else {
+ None
+ }
+ }
+
+ pub(super) fn search_object_map(&self, _addr: u64) -> Option<(&Context<'_>, u64)> {
+ None
+ }
+}
+
+pub(super) fn handle_split_dwarf<'data>(
+ _package: Option<&gimli::DwarfPackage<EndianSlice<'data, Endian>>>,
+ _stash: &'data Stash,
+ _load: addr2line::SplitDwarfLoad<EndianSlice<'data, Endian>>,
+) -> Option<Arc<gimli::Dwarf<EndianSlice<'data, Endian>>>> {
+ None
+}
diff --git a/library/backtrace/src/windows.rs b/library/backtrace/src/windows.rs
index 92c2b2e66..13287f7c3 100644
--- a/library/backtrace/src/windows.rs
+++ b/library/backtrace/src/windows.rs
@@ -19,6 +19,9 @@ cfg_if::cfg_if! {
pub use self::winapi::PUNWIND_HISTORY_TABLE;
#[cfg(target_pointer_width = "64")]
pub use self::winapi::PRUNTIME_FUNCTION;
+ pub use self::winapi::PEXCEPTION_ROUTINE;
+ #[cfg(target_pointer_width = "64")]
+ pub use self::winapi::PKNONVOLATILE_CONTEXT_POINTERS;
mod winapi {
pub use winapi::ctypes::*;
@@ -35,6 +38,22 @@ cfg_if::cfg_if! {
pub use winapi::um::tlhelp32::*;
pub use winapi::um::winbase::*;
pub use winapi::um::winnt::*;
+
+ // Work around winapi not having this function on aarch64.
+ #[cfg(target_arch = "aarch64")]
+ #[link(name = "kernel32")]
+ extern "system" {
+ pub fn RtlVirtualUnwind(
+ HandlerType: ULONG,
+ ImageBase: ULONG64,
+ ControlPc: ULONG64,
+ FunctionEntry: PRUNTIME_FUNCTION,
+ ContextRecord: PCONTEXT,
+ HandlerData: *mut PVOID,
+ EstablisherFrame: PULONG64,
+ ContextPointers: PKNONVOLATILE_CONTEXT_POINTERS
+ ) -> PEXCEPTION_ROUTINE;
+ }
}
} else {
pub use core::ffi::c_void;
@@ -45,6 +64,9 @@ cfg_if::cfg_if! {
pub type PRUNTIME_FUNCTION = *mut c_void;
#[cfg(target_pointer_width = "64")]
pub type PUNWIND_HISTORY_TABLE = *mut c_void;
+ pub type PEXCEPTION_ROUTINE = *mut c_void;
+ #[cfg(target_pointer_width = "64")]
+ pub type PKNONVOLATILE_CONTEXT_POINTERS = *mut c_void;
}
}
@@ -359,6 +381,7 @@ ffi! {
pub type LPCSTR = *const i8;
pub type PWSTR = *mut u16;
pub type WORD = u16;
+ pub type USHORT = u16;
pub type ULONG = u32;
pub type ULONG64 = u64;
pub type WCHAR = u16;
@@ -370,6 +393,8 @@ ffi! {
pub type LPVOID = *mut c_void;
pub type LPCVOID = *const c_void;
pub type LPMODULEENTRY32W = *mut MODULEENTRY32W;
+ pub type PULONG = *mut ULONG;
+ pub type PULONG64 = *mut ULONG64;
#[link(name = "kernel32")]
extern "system" {
@@ -435,6 +460,33 @@ ffi! {
lpme: LPMODULEENTRY32W,
) -> BOOL;
}
+
+ #[link(name = "ntdll")]
+ extern "system" {
+ pub fn RtlCaptureStackBackTrace(
+ FramesToSkip: ULONG,
+ FramesToCapture: ULONG,
+ BackTrace: *mut PVOID,
+ BackTraceHash: PULONG,
+ ) -> USHORT;
+ }
+}
+
+#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
+ffi! {
+ #[link(name = "kernel32")]
+ extern "system" {
+ pub fn RtlVirtualUnwind(
+ HandlerType: ULONG,
+ ImageBase: ULONG64,
+ ControlPc: ULONG64,
+ FunctionEntry: PRUNTIME_FUNCTION,
+ ContextRecord: PCONTEXT,
+ HandlerData: *mut PVOID,
+ EstablisherFrame: PULONG64,
+ ContextPointers: PKNONVOLATILE_CONTEXT_POINTERS
+ ) -> PEXCEPTION_ROUTINE;
+ }
}
#[cfg(target_pointer_width = "64")]
diff --git a/library/backtrace/tests/accuracy/main.rs b/library/backtrace/tests/accuracy/main.rs
index 149203a1b..79b2d3797 100644
--- a/library/backtrace/tests/accuracy/main.rs
+++ b/library/backtrace/tests/accuracy/main.rs
@@ -31,6 +31,8 @@ fn doit() {
dir.push("dylib_dep.dll");
} else if cfg!(target_os = "macos") {
dir.push("libdylib_dep.dylib");
+ } else if cfg!(target_os = "aix") {
+ dir.push("libdylib_dep.a");
} else {
dir.push("libdylib_dep.so");
}
@@ -103,7 +105,7 @@ fn verify(filelines: &[Pos]) {
loop {
let sym = match symbols.next() {
Some(sym) => sym,
- None => panic!("failed to find {}:{}", file, line),
+ None => panic!("failed to find {file}:{line}"),
};
if let Some(filename) = sym.filename() {
if let Some(lineno) = sym.lineno() {
diff --git a/library/backtrace/tests/sgx-image-base.rs b/library/backtrace/tests/sgx-image-base.rs
new file mode 100644
index 000000000..c29a8b67a
--- /dev/null
+++ b/library/backtrace/tests/sgx-image-base.rs
@@ -0,0 +1,56 @@
+#![cfg(all(target_env = "sgx", target_vendor = "fortanix"))]
+#![feature(sgx_platform)]
+
+#[cfg(feature = "std")]
+#[test]
+fn sgx_image_base_with_std() {
+ use backtrace::trace;
+
+ let image_base = std::os::fortanix_sgx::mem::image_base();
+
+ let mut frame_ips = Vec::new();
+ trace(|frame| {
+ frame_ips.push(frame.ip());
+ true
+ });
+
+ assert!(frame_ips.len() > 0);
+ for ip in frame_ips {
+ let ip: u64 = ip as _;
+ assert!(ip < image_base);
+ }
+}
+
+#[cfg(not(feature = "std"))]
+#[test]
+fn sgx_image_base_no_std() {
+ use backtrace::trace_unsynchronized;
+
+ fn guess_image_base() -> u64 {
+ let mut top_frame_ip = None;
+ unsafe {
+ trace_unsynchronized(|frame| {
+ top_frame_ip = Some(frame.ip());
+ false
+ });
+ }
+ top_frame_ip.unwrap() as u64 & 0xFFFFFF000000
+ }
+
+ let image_base = guess_image_base();
+ backtrace::set_image_base(image_base as _);
+
+ let mut frame_ips = Vec::new();
+ unsafe {
+ trace_unsynchronized(|frame| {
+ frame_ips.push(frame.ip());
+ true
+ });
+ }
+
+ assert!(frame_ips.len() > 0);
+ for ip in frame_ips {
+ let ip: u64 = ip as _;
+ assert!(ip < image_base);
+ }
+}
diff --git a/library/backtrace/tests/smoke.rs b/library/backtrace/tests/smoke.rs
index 683a6f0db..715f567f3 100644
--- a/library/backtrace/tests/smoke.rs
+++ b/library/backtrace/tests/smoke.rs
@@ -1,6 +1,27 @@
use backtrace::Frame;
use std::thread;
+fn get_actual_fn_pointer(fp: usize) -> usize {
+ // On AIX, the function name references a function descriptor.
+ // A function descriptor consists of (See https://reviews.llvm.org/D62532)
+ // * The address of the entry point of the function.
+ // * The TOC base address for the function.
+ // * The environment pointer.
+ // Deref `fp` directly so that we can get the address of `fp`'s
+ // entry point in text section.
+ //
+ // For TOC, one can find more information in
+ // https://www.ibm.com/docs/en/aix/7.2?topic=program-understanding-programming-toc
+ if cfg!(target_os = "aix") {
+ unsafe {
+ let actual_fn_entry = *(fp as *const usize);
+ actual_fn_entry
+ }
+ } else {
+ fp
+ }
+}
+
#[test]
// FIXME: shouldn't ignore this test on i686-msvc, unsure why it's failing
#[cfg_attr(all(target_arch = "x86", target_env = "msvc"), ignore)]
@@ -20,7 +41,7 @@ fn smoke_test_frames() {
// Various platforms have various bits of weirdness about their
// backtraces. To find a good starting spot let's search through the
// frames
- let target = frame_4 as usize;
+ let target = get_actual_fn_pointer(frame_4 as usize);
let offset = v
.iter()
.map(|frame| frame.symbol_address() as usize)
@@ -39,7 +60,7 @@ fn smoke_test_frames() {
assert_frame(
frames.next().unwrap(),
- frame_4 as usize,
+ get_actual_fn_pointer(frame_4 as usize),
"frame_4",
"tests/smoke.rs",
start_line + 6,
@@ -47,7 +68,7 @@ fn smoke_test_frames() {
);
assert_frame(
frames.next().unwrap(),
- frame_3 as usize,
+ get_actual_fn_pointer(frame_3 as usize),
"frame_3",
"tests/smoke.rs",
start_line + 3,
@@ -55,7 +76,7 @@ fn smoke_test_frames() {
);
assert_frame(
frames.next().unwrap(),
- frame_2 as usize,
+ get_actual_fn_pointer(frame_2 as usize),
"frame_2",
"tests/smoke.rs",
start_line + 2,
@@ -63,7 +84,7 @@ fn smoke_test_frames() {
);
assert_frame(
frames.next().unwrap(),
- frame_1 as usize,
+ get_actual_fn_pointer(frame_1 as usize),
"frame_1",
"tests/smoke.rs",
start_line + 1,
@@ -71,7 +92,7 @@ fn smoke_test_frames() {
);
assert_frame(
frames.next().unwrap(),
- smoke_test_frames as usize,
+ get_actual_fn_pointer(smoke_test_frames as usize),
"smoke_test_frames",
"",
0,
@@ -150,9 +171,7 @@ fn smoke_test_frames() {
if cfg!(debug_assertions) {
assert!(
name.contains(expected_name),
- "didn't find `{}` in `{}`",
- expected_name,
- name
+ "didn't find `{expected_name}` in `{name}`"
);
}
@@ -164,18 +183,13 @@ fn smoke_test_frames() {
if !expected_file.is_empty() {
assert!(
file.ends_with(expected_file),
- "{:?} didn't end with {:?}",
- file,
- expected_file
+ "{file:?} didn't end with {expected_file:?}"
);
}
if expected_line != 0 {
assert!(
line == expected_line,
- "bad line number on frame for `{}`: {} != {}",
- expected_name,
- line,
- expected_line
+ "bad line number on frame for `{expected_name}`: {line} != {expected_line}"
);
}
@@ -185,10 +199,7 @@ fn smoke_test_frames() {
if expected_col != 0 {
assert!(
col == expected_col,
- "bad column number on frame for `{}`: {} != {}",
- expected_name,
- col,
- expected_col
+ "bad column number on frame for `{expected_name}`: {col} != {expected_col}",
);
}
}
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
index 597303037..65946e09f 100644
--- a/library/core/src/alloc/layout.rs
+++ b/library/core/src/alloc/layout.rs
@@ -130,6 +130,8 @@ impl Layout {
}
/// The minimum byte alignment for a memory block of this layout.
+ ///
+ /// The returned alignment is guaranteed to be a power of two.
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")]
#[must_use = "this returns the minimum alignment, \
diff --git a/library/core/src/arch.rs b/library/core/src/arch.rs
index fc2a5b89c..8817ec077 100644
--- a/library/core/src/arch.rs
+++ b/library/core/src/arch.rs
@@ -1,5 +1,6 @@
#![doc = include_str!("../../stdarch/crates/core_arch/src/core_arch_docs.md")]
+#[allow(unused_imports)]
#[stable(feature = "simd_arch", since = "1.27.0")]
pub use crate::core_arch::arch::*;
diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs
index 587877dff..321357a15 100644
--- a/library/core/src/array/iter.rs
+++ b/library/core/src/array/iter.rs
@@ -4,7 +4,7 @@ use crate::num::NonZeroUsize;
use crate::{
fmt,
intrinsics::transmute_unchecked,
- iter::{self, ExactSizeIterator, FusedIterator, TrustedLen},
+ iter::{self, ExactSizeIterator, FusedIterator, TrustedLen, TrustedRandomAccessNoCoerce},
mem::MaybeUninit,
ops::{IndexRange, Range},
ptr,
@@ -13,6 +13,7 @@ use crate::{
/// A by-value [array] iterator.
#[stable(feature = "array_value_iter", since = "1.51.0")]
#[rustc_insignificant_dtor]
+#[rustc_diagnostic_item = "ArrayIntoIter"]
pub struct IntoIter<T, const N: usize> {
/// This is the array we are iterating over.
///
@@ -293,6 +294,12 @@ impl<T, const N: usize> Iterator for IntoIter<T, N> {
NonZeroUsize::new(remaining).map_or(Ok(()), Err)
}
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // SAFETY: The caller must provide an idx that is in bound of the remainder.
+ unsafe { self.data.as_ptr().add(self.alive.start()).add(idx).cast::<T>().read() }
+ }
}
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
@@ -374,6 +381,25 @@ impl<T, const N: usize> FusedIterator for IntoIter<T, N> {}
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
unsafe impl<T, const N: usize> TrustedLen for IntoIter<T, N> {}
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "std_internals")]
+#[rustc_unsafe_specialization_marker]
+pub trait NonDrop {}
+
+// T: Copy as approximation for !Drop since get_unchecked does not advance self.alive
+// and thus we can't implement drop-handling
+#[unstable(issue = "none", feature = "std_internals")]
+impl<T: Copy> NonDrop for T {}
+
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "std_internals")]
+unsafe impl<T, const N: usize> TrustedRandomAccessNoCoerce for IntoIter<T, N>
+where
+ T: NonDrop,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T: Clone, const N: usize> Clone for IntoIter<T, N> {
fn clone(&self) -> Self {
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 3b4d99221..0978b3c92 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -556,7 +556,7 @@ impl<T: ?Sized> Cell<T> {
#[inline]
#[stable(feature = "cell_as_ptr", since = "1.12.0")]
#[rustc_const_stable(feature = "const_cell_as_ptr", since = "1.32.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub const fn as_ptr(&self) -> *mut T {
self.value.get()
}
@@ -755,7 +755,7 @@ impl Display for BorrowMutError {
}
// This ensures the panicking code is outlined from `borrow_mut` for `RefCell`.
-#[inline(never)]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
#[track_caller]
#[cold]
fn panic_already_borrowed(err: BorrowMutError) -> ! {
@@ -763,7 +763,7 @@ fn panic_already_borrowed(err: BorrowMutError) -> ! {
}
// This ensures the panicking code is outlined from `borrow` for `RefCell`.
-#[inline(never)]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
#[track_caller]
#[cold]
fn panic_already_mutably_borrowed(err: BorrowError) -> ! {
@@ -1112,7 +1112,7 @@ impl<T: ?Sized> RefCell<T> {
/// ```
#[inline]
#[stable(feature = "cell_as_ptr", since = "1.12.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub fn as_ptr(&self) -> *mut T {
self.value.get()
}
@@ -1423,6 +1423,7 @@ impl Clone for BorrowRef<'_> {
/// See the [module-level documentation](self) for more.
#[stable(feature = "rust1", since = "1.0.0")]
#[must_not_suspend = "holding a Ref across suspend points can cause BorrowErrors"]
+#[rustc_diagnostic_item = "RefCellRef"]
pub struct Ref<'b, T: ?Sized + 'b> {
// NB: we use a pointer instead of `&'b T` to avoid `noalias` violations, because a
// `Ref` argument doesn't hold immutability for its whole scope, only until it drops.
@@ -1804,6 +1805,7 @@ impl<'b> BorrowRefMut<'b> {
/// See the [module-level documentation](self) for more.
#[stable(feature = "rust1", since = "1.0.0")]
#[must_not_suspend = "holding a RefMut across suspend points can cause BorrowErrors"]
+#[rustc_diagnostic_item = "RefCellRefMut"]
pub struct RefMut<'b, T: ?Sized + 'b> {
// NB: we use a pointer instead of `&'b mut T` to avoid `noalias` violations, because a
// `RefMut` argument doesn't hold exclusivity for its whole scope, only until it drops.
@@ -2107,7 +2109,7 @@ impl<T: ?Sized> UnsafeCell<T> {
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_unsafecell_get", since = "1.32.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub const fn get(&self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
// #[repr(transparent)]. This exploits std's special status, there is
@@ -2251,7 +2253,7 @@ impl<T: ?Sized> SyncUnsafeCell<T> {
/// when casting to `&mut T`, and ensure that there are no mutations
/// or mutable aliases going on when casting to `&T`
#[inline]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub const fn get(&self) -> *mut T {
self.value.get()
}
diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs
index 2e8534f65..3877a0c48 100644
--- a/library/core/src/cell/once.rs
+++ b/library/core/src/cell/once.rs
@@ -87,10 +87,40 @@ impl<T> OnceCell<T> {
#[inline]
#[stable(feature = "once_cell", since = "1.70.0")]
pub fn set(&self, value: T) -> Result<(), T> {
- // SAFETY: Safe because we cannot have overlapping mutable borrows
- let slot = unsafe { &*self.inner.get() };
- if slot.is_some() {
- return Err(value);
+ match self.try_insert(value) {
+ Ok(_) => Ok(()),
+ Err((_, value)) => Err(value),
+ }
+ }
+
+ /// Sets the contents of the cell to `value` if the cell was empty, then
+ /// returns a reference to it.
+ ///
+ /// # Errors
+ ///
+ /// This method returns `Ok(&value)` if the cell was empty and
+ /// `Err(&current_value, value)` if it was full.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell_try_insert)]
+ ///
+ /// use std::cell::OnceCell;
+ ///
+ /// let cell = OnceCell::new();
+ /// assert!(cell.get().is_none());
+ ///
+ /// assert_eq!(cell.try_insert(92), Ok(&92));
+ /// assert_eq!(cell.try_insert(62), Err((&92, 62)));
+ ///
+ /// assert!(cell.get().is_some());
+ /// ```
+ #[inline]
+ #[unstable(feature = "once_cell_try_insert", issue = "116693")]
+ pub fn try_insert(&self, value: T) -> Result<&T, (&T, T)> {
+ if let Some(old) = self.get() {
+ return Err((old, value));
}
// SAFETY: This is the only place where we set the slot, no races
@@ -98,8 +128,7 @@ impl<T> OnceCell<T> {
// checked that slot is currently `None`, so this write
// maintains the `inner`'s invariant.
let slot = unsafe { &mut *self.inner.get() };
- *slot = Some(value);
- Ok(())
+ Ok(slot.insert(value))
}
/// Gets the contents of the cell, initializing it with `f`
@@ -183,10 +212,9 @@ impl<T> OnceCell<T> {
let val = outlined_call(f)?;
// Note that *some* forms of reentrant initialization might lead to
// UB (see `reentrant_init` test). I believe that just removing this
- // `assert`, while keeping `set/get` would be sound, but it seems
+ // `panic`, while keeping `try_insert` would be sound, but it seems
// better to panic, rather than to silently use an old value.
- assert!(self.set(val).is_ok(), "reentrant init");
- Ok(self.get().unwrap())
+ if let Ok(val) = self.try_insert(val) { Ok(val) } else { panic!("reentrant init") }
}
/// Consumes the cell, returning the wrapped value.
diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs
index dbfe251f2..23319fbe5 100644
--- a/library/core/src/char/decode.rs
+++ b/library/core/src/char/decode.rs
@@ -2,6 +2,7 @@
use crate::error::Error;
use crate::fmt;
+use crate::iter::FusedIterator;
/// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s.
///
@@ -105,6 +106,9 @@ impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> {
}
}
+#[stable(feature = "decode_utf16_fused_iterator", since = "1.75.0")]
+impl<I: Iterator<Item = u16> + FusedIterator> FusedIterator for DecodeUtf16<I> {}
+
impl DecodeUtf16Error {
/// Returns the unpaired surrogate which caused this error.
#[must_use]
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index 4ac956e7b..7ce33bdd4 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -1450,7 +1450,7 @@ impl char {
#[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
#[inline]
pub const fn is_ascii_alphanumeric(&self) -> bool {
- matches!(*self, '0'..='9' | 'A'..='Z' | 'a'..='z')
+ matches!(*self, '0'..='9') | matches!(*self, 'A'..='Z') | matches!(*self, 'a'..='z')
}
/// Checks if the value is an ASCII decimal digit:
@@ -1553,7 +1553,7 @@ impl char {
#[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
#[inline]
pub const fn is_ascii_hexdigit(&self) -> bool {
- matches!(*self, '0'..='9' | 'A'..='F' | 'a'..='f')
+ matches!(*self, '0'..='9') | matches!(*self, 'A'..='F') | matches!(*self, 'a'..='f')
}
/// Checks if the value is an ASCII punctuation character:
@@ -1591,7 +1591,10 @@ impl char {
#[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
#[inline]
pub const fn is_ascii_punctuation(&self) -> bool {
- matches!(*self, '!'..='/' | ':'..='@' | '['..='`' | '{'..='~')
+ matches!(*self, '!'..='/')
+ | matches!(*self, ':'..='@')
+ | matches!(*self, '['..='`')
+ | matches!(*self, '{'..='~')
}
/// Checks if the value is an ASCII graphic character:
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index 360806167..fadf2fcc9 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -3,14 +3,17 @@
//! This module contains various tools for comparing and ordering values. In
//! summary:
//!
-//! * [`Eq`] and [`PartialEq`] are traits that allow you to define total and
-//! partial equality between values, respectively. Implementing them overloads
-//! the `==` and `!=` operators.
+//! * [`PartialEq<Rhs>`] overloads the `==` and `!=` operators. In cases where
+//! `Rhs` (the right hand side's type) is `Self`, this trait corresponds to a
+//! partial equivalence relation.
+//! * [`Eq`] indicates that the overloaded `==` operator corresponds to an
+//! equivalence relation.
//! * [`Ord`] and [`PartialOrd`] are traits that allow you to define total and
//! partial orderings between values, respectively. Implementing them overloads
//! the `<`, `<=`, `>`, and `>=` operators.
//! * [`Ordering`] is an enum returned by the main functions of [`Ord`] and
-//! [`PartialOrd`], and describes an ordering.
+//! [`PartialOrd`], and describes an ordering of two values (less, equal, or
+//! greater).
//! * [`Reverse`] is a struct that allows you to easily reverse an ordering.
//! * [`max`] and [`min`] are functions that build off of [`Ord`] and allow you
//! to find the maximum or minimum of two values.
@@ -27,16 +30,21 @@ pub(crate) use bytewise::BytewiseEq;
use self::Ordering::*;
-/// Trait for equality comparisons.
+/// Trait for comparisons using the equality operator.
+///
+/// Implementing this trait for types provides the `==` and `!=` operators for
+/// those types.
///
/// `x.eq(y)` can also be written `x == y`, and `x.ne(y)` can be written `x != y`.
/// We use the easier-to-read infix notation in the remainder of this documentation.
///
-/// This trait allows for partial equality, for types that do not have a full
-/// equivalence relation. For example, in floating point numbers `NaN != NaN`,
-/// so floating point types implement `PartialEq` but not [`trait@Eq`].
-/// Formally speaking, when `Rhs == Self`, this trait corresponds to a [partial equivalence
-/// relation](https://en.wikipedia.org/wiki/Partial_equivalence_relation).
+/// This trait allows for comparisons using the equality operator, for types
+/// that do not have a full equivalence relation. For example, in floating point
+/// numbers `NaN != NaN`, so floating point types implement `PartialEq` but not
+/// [`trait@Eq`]. Formally speaking, when `Rhs == Self`, this trait corresponds
+/// to a [partial equivalence relation].
+///
+/// [partial equivalence relation]: https://en.wikipedia.org/wiki/Partial_equivalence_relation
///
/// Implementations must ensure that `eq` and `ne` are consistent with each other:
///
@@ -242,15 +250,15 @@ pub macro PartialEq($item:item) {
/* compiler built-in */
}
-/// Trait for equality comparisons which are [equivalence relations](
+/// Trait for comparisons corresponding to [equivalence relations](
/// https://en.wikipedia.org/wiki/Equivalence_relation).
///
-/// This means, that in addition to `a == b` and `a != b` being strict inverses, the equality must
-/// be (for all `a`, `b` and `c`):
+/// This means, that in addition to `a == b` and `a != b` being strict inverses,
+/// the relation must be (for all `a`, `b` and `c`):
///
/// - reflexive: `a == a`;
-/// - symmetric: `a == b` implies `b == a`; and
-/// - transitive: `a == b` and `b == c` implies `a == c`.
+/// - symmetric: `a == b` implies `b == a` (required by `PartialEq` as well); and
+/// - transitive: `a == b` and `b == c` implies `a == c` (required by `PartialEq` as well).
///
/// This property cannot be checked by the compiler, and therefore `Eq` implies
/// [`PartialEq`], and has no extra methods.
@@ -260,6 +268,10 @@ pub macro PartialEq($item:item) {
/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of these
/// methods.
///
+/// Implement `Eq` in addition to `PartialEq` if it's guaranteed that
+/// `PartialEq::eq(a, a)` always returns `true` (reflexivity), in addition to
+/// the symmetric and transitive properties already required by `PartialEq`.
+///
/// ## Derivable
///
/// This trait can be used with `#[derive]`. When `derive`d, because `Eq` has
@@ -299,8 +311,7 @@ pub trait Eq: PartialEq<Self> {
//
// This should never be implemented by hand.
#[doc(hidden)]
- #[cfg_attr(bootstrap, no_coverage)] // rust-lang/rust#84605
- #[cfg_attr(not(bootstrap), coverage(off))] //
+ #[coverage(off)]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn assert_receiver_is_total_eq(&self) {}
@@ -310,8 +321,7 @@ pub trait Eq: PartialEq<Self> {
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match)]
-#[cfg_attr(bootstrap, allow_internal_unstable(no_coverage))]
-#[cfg_attr(not(bootstrap), allow_internal_unstable(coverage_attribute))]
+#[allow_internal_unstable(coverage_attribute)]
pub macro Eq($item:item) {
/* compiler built-in */
}
@@ -676,12 +686,19 @@ impl<T: Clone> Clone for Reverse<T> {
///
/// ## Corollaries
///
-/// From the above and the requirements of `PartialOrd`, it follows that `<` defines a strict total order.
-/// This means that for all `a`, `b` and `c`:
+/// From the above and the requirements of `PartialOrd`, it follows that for
+/// all `a`, `b` and `c`:
///
/// - exactly one of `a < b`, `a == b` or `a > b` is true; and
/// - `<` is transitive: `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
+/// Mathematically speaking, the `<` operator defines a strict [weak order]. In
+/// cases where `==` conforms to mathematical equality, it also defines a
+/// strict [total order].
+///
+/// [weak order]: https://en.wikipedia.org/wiki/Weak_ordering
+/// [total order]: https://en.wikipedia.org/wiki/Total_order
+///
/// ## Derivable
///
/// This trait can be used with `#[derive]`.
@@ -723,7 +740,7 @@ impl<T: Clone> Clone for Reverse<T> {
/// - Two sequences are compared element by element.
/// - The first mismatching element defines which sequence is lexicographically less or greater than the other.
/// - If one sequence is a prefix of another, the shorter sequence is lexicographically less than the other.
-/// - If two sequence have equivalent elements and are of the same length, then the sequences are lexicographically equal.
+/// - If two sequences have equivalent elements and are of the same length, then the sequences are lexicographically equal.
/// - An empty sequence is lexicographically less than any non-empty sequence.
/// - Two empty sequences are lexicographically equal.
///
@@ -790,6 +807,7 @@ pub trait Ord: Eq + PartialOrd<Self> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "ord_cmp_method"]
fn cmp(&self, other: &Self) -> Ordering;
/// Compares and returns the maximum of two values.
@@ -920,6 +938,20 @@ pub macro Ord($item:item) {
/// - transitivity of `>`: if `a > b` and `b > c` then `a > c`
/// - duality of `partial_cmp`: `partial_cmp(a, b) == partial_cmp(b, a).map(Ordering::reverse)`
///
+/// ## Strict and non-strict partial orders
+///
+/// The `<` and `>` operators behave according to a *strict* partial order.
+/// However, `<=` and `>=` do **not** behave according to a *non-strict*
+/// partial order.
+/// That is because mathematically, a non-strict partial order would require
+/// reflexivity, i.e. `a <= a` would need to be true for every `a`. This isn't
+/// always the case for types that implement `PartialOrd`, for example:
+///
+/// ```
+/// let a = f64::sqrt(-1.0);
+/// assert_eq!(a <= a, false);
+/// ```
+///
/// ## Derivable
///
/// This trait can be used with `#[derive]`.
diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs
index ff5a4c913..8c01b0973 100644
--- a/library/core/src/convert/mod.rs
+++ b/library/core/src/convert/mod.rs
@@ -100,6 +100,7 @@ pub use num::FloatToInt;
#[stable(feature = "convert_id", since = "1.33.0")]
#[rustc_const_stable(feature = "const_identity", since = "1.33.0")]
#[inline(always)]
+#[rustc_diagnostic_item = "convert_identity"]
pub const fn identity<T>(x: T) -> T {
x
}
@@ -137,7 +138,7 @@ pub const fn identity<T>(x: T) -> T {
///
/// [dereferenceable types]: core::ops::Deref
/// [pointed-to value]: core::ops::Deref::Target
-/// ['`Deref` coercion']: core::ops::Deref#more-on-deref-coercion
+/// ['`Deref` coercion']: core::ops::Deref#deref-coercion
///
/// ```
/// let x = Box::new(5i32);
@@ -243,7 +244,7 @@ pub trait AsRef<T: ?Sized> {
///
/// [mutably dereferenceable types]: core::ops::DerefMut
/// [pointed-to value]: core::ops::Deref::Target
-/// ['`Deref` coercion']: core::ops::DerefMut#more-on-deref-coercion
+/// ['`Deref` coercion']: core::ops::DerefMut#mutable-deref-coercion
///
/// ```
/// let mut x = Box::new(5i32);
@@ -478,6 +479,46 @@ pub trait Into<T>: Sized {
/// - `From<T> for U` implies [`Into`]`<U> for T`
/// - `From` is reflexive, which means that `From<T> for T` is implemented
///
+/// # When to implement `From`
+///
+/// While there's no technical restrictions on which conversions can be done using
+/// a `From` implementation, the general expectation is that the conversions
+/// should typically be restricted as follows:
+///
+/// * The conversion is *infallible*: if the conversion can fail, use [`TryFrom`]
+/// instead; don't provide a `From` impl that panics.
+///
+/// * The conversion is *lossless*: semantically, it should not lose or discard
+/// information. For example, `i32: From<u16>` exists, where the original
+/// value can be recovered using `u16: TryFrom<i32>`. And `String: From<&str>`
+/// exists, where you can get something equivalent to the original value via
+/// `Deref`. But `From` cannot be used to convert from `u32` to `u16`, since
+/// that cannot succeed in a lossless way. (There's some wiggle room here for
+/// information not considered semantically relevant. For example,
+/// `Box<[T]>: From<Vec<T>>` exists even though it might not preserve capacity,
+/// like how two vectors can be equal despite differing capacities.)
+///
+/// * The conversion is *value-preserving*: the conceptual kind and meaning of
+/// the resulting value is the same, even though the Rust type and technical
+/// representation might be different. For example `-1_i8 as u8` is *lossless*,
+/// since `as` casting back can recover the original value, but that conversion
+/// is *not* available via `From` because `-1` and `255` are different conceptual
+/// values (despite being identical bit patterns technically). But
+/// `f32: From<i16>` *is* available because `1_i16` and `1.0_f32` are conceptually
+/// the same real number (despite having very different bit patterns technically).
+/// `String: From<char>` is available because they're both *text*, but
+/// `String: From<u32>` is *not* available, since `1` (a number) and `"1"`
+/// (text) are too different. (Converting values to text is instead covered
+/// by the [`Display`](crate::fmt::Display) trait.)
+///
+/// * The conversion is *obvious*: it's the only reasonable conversion between
+/// the two types. Otherwise it's better to have it be a named method or
+/// constructor, like how [`str::as_bytes`] is a method and how integers have
+/// methods like [`u32::from_ne_bytes`], [`u32::from_le_bytes`], and
+/// [`u32::from_be_bytes`], none of which are `From` implementations. Whereas
+/// there's only one reasonable way to wrap an [`Ipv6Addr`](crate::net::Ipv6Addr)
+/// into an [`IpAddr`](crate::net::IpAddr), thus `IpAddr: From<Ipv6Addr>` exists.
+///
/// # Examples
///
/// [`String`] implements `From<&str>`:
@@ -532,7 +573,7 @@ pub trait Into<T>: Sized {
#[rustc_diagnostic_item = "From"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented(on(
- all(_Self = "&str", any(T = "alloc::string::String", T = "std::string::String")),
+ all(_Self = "&str", T = "alloc::string::String"),
note = "to coerce a `{T}` into a `{Self}`, use `&*` as a prefix",
))]
pub trait From<T>: Sized {
@@ -577,12 +618,11 @@ pub trait TryInto<T>: Sized {
/// For example, there is no way to convert an [`i64`] into an [`i32`]
/// using the [`From`] trait, because an [`i64`] may contain a value
/// that an [`i32`] cannot represent and so the conversion would lose data.
-/// This might be handled by truncating the [`i64`] to an [`i32`] (essentially
-/// giving the [`i64`]'s value modulo [`i32::MAX`]) or by simply returning
-/// [`i32::MAX`], or by some other method. The [`From`] trait is intended
-/// for perfect conversions, so the `TryFrom` trait informs the
-/// programmer when a type conversion could go bad and lets them
-/// decide how to handle it.
+/// This might be handled by truncating the [`i64`] to an [`i32`] or by
+/// simply returning [`i32::MAX`], or by some other method. The [`From`]
+/// trait is intended for perfect conversions, so the `TryFrom` trait
+/// informs the programmer when a type conversion could go bad and lets
+/// them decide how to handle it.
///
/// # Generic Implementations
///
@@ -642,6 +682,7 @@ pub trait TryFrom<T>: Sized {
/// Performs the conversion.
#[stable(feature = "try_from", since = "1.34.0")]
+ #[rustc_diagnostic_item = "try_from_fn"]
fn try_from(value: T) -> Result<Self, Self::Error>;
}
diff --git a/library/core/src/default.rs b/library/core/src/default.rs
index 5242e97eb..16618b387 100644
--- a/library/core/src/default.rs
+++ b/library/core/src/default.rs
@@ -130,6 +130,7 @@ pub trait Default: Sized {
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "default_fn"]
fn default() -> Self;
}
diff --git a/library/core/src/error.rs b/library/core/src/error.rs
index 1170221c1..f1a7ad935 100644
--- a/library/core/src/error.rs
+++ b/library/core/src/error.rs
@@ -439,10 +439,10 @@ where
/// * A Producer initializes the value of one of its fields of a specific type. (or is otherwise
/// prepared to generate a value requested). eg, `backtrace::Backtrace` or
/// `std::backtrace::Backtrace`
-/// * A Consumer requests an object of a specific type (say `std::backtrace::Backtrace). In the case
-/// of a `dyn Error` trait object (the Producer), there are methods called `request_ref` and
-/// `request_value` are available to simplify obtaining an ``Option<T>`` for a given type. * The
-/// Producer, when requested, populates the given Request object which is given as a mutable
+/// * A Consumer requests an object of a specific type (say `std::backtrace::Backtrace`). In the
+/// case of a `dyn Error` trait object (the Producer), there are functions called `request_ref` and
+/// `request_value` to simplify obtaining an `Option<T>` for a given type.
+/// * The Producer, when requested, populates the given Request object which is given as a mutable
/// reference.
/// * The Consumer extracts a value or reference to the requested type from the `Request` object
/// wrapped in an `Option<T>`; in the case of `dyn Error` the aforementioned `request_ref` and `
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index 93a6716d7..e7ec1fb73 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -487,7 +487,7 @@ impl CStr {
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_str_as_ptr", since = "1.32.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub const fn as_ptr(&self) -> *const c_char {
self.inner.as_ptr()
}
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
index b2c9a0800..6908c824f 100644
--- a/library/core/src/ffi/mod.rs
+++ b/library/core/src/ffi/mod.rs
@@ -6,7 +6,7 @@
//! match those defined by C, so that code that interacts with C will
//! refer to the correct types.
-#![stable(feature = "", since = "1.30.0")]
+#![stable(feature = "core_ffi", since = "1.30.0")]
#![allow(non_camel_case_types)]
use crate::fmt;
diff --git a/library/core/src/fmt/builders.rs b/library/core/src/fmt/builders.rs
index 922724804..4ccb58586 100644
--- a/library/core/src/fmt/builders.rs
+++ b/library/core/src/fmt/builders.rs
@@ -84,6 +84,7 @@ impl fmt::Write for PadAdapter<'_, '_> {
#[must_use = "must eventually call `finish()` on Debug builders"]
#[allow(missing_debug_implementations)]
#[stable(feature = "debug_builders", since = "1.2.0")]
+#[rustc_diagnostic_item = "DebugStruct"]
pub struct DebugStruct<'a, 'b: 'a> {
fmt: &'a mut fmt::Formatter<'b>,
result: fmt::Result,
@@ -129,6 +130,18 @@ impl<'a, 'b: 'a> DebugStruct<'a, 'b> {
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
pub fn field(&mut self, name: &str, value: &dyn fmt::Debug) -> &mut Self {
+ self.field_with(name, |f| value.fmt(f))
+ }
+
+ /// Adds a new field to the generated struct output.
+ ///
+ /// This method is equivalent to [`DebugStruct::field`], but formats the
+ /// value using a provided closure rather than by calling [`Debug::fmt`].
+ #[unstable(feature = "debug_closure_helpers", issue = "117729")]
+ pub fn field_with<F>(&mut self, name: &str, value_fmt: F) -> &mut Self
+ where
+ F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
+ {
self.result = self.result.and_then(|_| {
if self.is_pretty() {
if !self.has_fields {
@@ -139,14 +152,14 @@ impl<'a, 'b: 'a> DebugStruct<'a, 'b> {
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
writer.write_str(name)?;
writer.write_str(": ")?;
- value.fmt(&mut writer)?;
+ value_fmt(&mut writer)?;
writer.write_str(",\n")
} else {
let prefix = if self.has_fields { ", " } else { " { " };
self.fmt.write_str(prefix)?;
self.fmt.write_str(name)?;
self.fmt.write_str(": ")?;
- value.fmt(self.fmt)
+ value_fmt(self.fmt)
}
});
@@ -314,6 +327,18 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> {
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
pub fn field(&mut self, value: &dyn fmt::Debug) -> &mut Self {
+ self.field_with(|f| value.fmt(f))
+ }
+
+ /// Adds a new field to the generated tuple struct output.
+ ///
+ /// This method is equivalent to [`DebugTuple::field`], but formats the
+ /// value using a provided closure rather than by calling [`Debug::fmt`].
+ #[unstable(feature = "debug_closure_helpers", issue = "117729")]
+ pub fn field_with<F>(&mut self, value_fmt: F) -> &mut Self
+ where
+ F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
+ {
self.result = self.result.and_then(|_| {
if self.is_pretty() {
if self.fields == 0 {
@@ -322,12 +347,12 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> {
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
- value.fmt(&mut writer)?;
+ value_fmt(&mut writer)?;
writer.write_str(",\n")
} else {
let prefix = if self.fields == 0 { "(" } else { ", " };
self.fmt.write_str(prefix)?;
- value.fmt(self.fmt)
+ value_fmt(self.fmt)
}
});
@@ -384,7 +409,10 @@ struct DebugInner<'a, 'b: 'a> {
}
impl<'a, 'b: 'a> DebugInner<'a, 'b> {
- fn entry(&mut self, entry: &dyn fmt::Debug) {
+ fn entry_with<F>(&mut self, entry_fmt: F)
+ where
+ F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
+ {
self.result = self.result.and_then(|_| {
if self.is_pretty() {
if !self.has_fields {
@@ -393,13 +421,13 @@ impl<'a, 'b: 'a> DebugInner<'a, 'b> {
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
- entry.fmt(&mut writer)?;
+ entry_fmt(&mut writer)?;
writer.write_str(",\n")
} else {
if self.has_fields {
self.fmt.write_str(", ")?
}
- entry.fmt(self.fmt)
+ entry_fmt(self.fmt)
}
});
@@ -474,7 +502,20 @@ impl<'a, 'b: 'a> DebugSet<'a, 'b> {
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut Self {
- self.inner.entry(entry);
+ self.inner.entry_with(|f| entry.fmt(f));
+ self
+ }
+
+ /// Adds a new entry to the set output.
+ ///
+ /// This method is equivalent to [`DebugSet::entry`], but formats the
+ /// entry using a provided closure rather than by calling [`Debug::fmt`].
+ #[unstable(feature = "debug_closure_helpers", issue = "117729")]
+ pub fn entry_with<F>(&mut self, entry_fmt: F) -> &mut Self
+ where
+ F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
+ {
+ self.inner.entry_with(entry_fmt);
self
}
@@ -604,7 +645,20 @@ impl<'a, 'b: 'a> DebugList<'a, 'b> {
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut Self {
- self.inner.entry(entry);
+ self.inner.entry_with(|f| entry.fmt(f));
+ self
+ }
+
+ /// Adds a new entry to the list output.
+ ///
+ /// This method is equivalent to [`DebugList::entry`], but formats the
+ /// entry using a provided closure rather than by calling [`Debug::fmt`].
+ #[unstable(feature = "debug_closure_helpers", issue = "117729")]
+ pub fn entry_with<F>(&mut self, entry_fmt: F) -> &mut Self
+ where
+ F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
+ {
+ self.inner.entry_with(entry_fmt);
self
}
@@ -774,6 +828,18 @@ impl<'a, 'b: 'a> DebugMap<'a, 'b> {
/// ```
#[stable(feature = "debug_map_key_value", since = "1.42.0")]
pub fn key(&mut self, key: &dyn fmt::Debug) -> &mut Self {
+ self.key_with(|f| key.fmt(f))
+ }
+
+ /// Adds the key part of a new entry to the map output.
+ ///
+ /// This method is equivalent to [`DebugMap::key`], but formats the
+ /// key using a provided closure rather than by calling [`Debug::fmt`].
+ #[unstable(feature = "debug_closure_helpers", issue = "117729")]
+ pub fn key_with<F>(&mut self, key_fmt: F) -> &mut Self
+ where
+ F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
+ {
self.result = self.result.and_then(|_| {
assert!(
!self.has_key,
@@ -788,13 +854,13 @@ impl<'a, 'b: 'a> DebugMap<'a, 'b> {
let mut slot = None;
self.state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut self.state);
- key.fmt(&mut writer)?;
+ key_fmt(&mut writer)?;
writer.write_str(": ")?;
} else {
if self.has_fields {
self.fmt.write_str(", ")?
}
- key.fmt(self.fmt)?;
+ key_fmt(self.fmt)?;
self.fmt.write_str(": ")?;
}
@@ -838,16 +904,28 @@ impl<'a, 'b: 'a> DebugMap<'a, 'b> {
/// ```
#[stable(feature = "debug_map_key_value", since = "1.42.0")]
pub fn value(&mut self, value: &dyn fmt::Debug) -> &mut Self {
+ self.value_with(|f| value.fmt(f))
+ }
+
+ /// Adds the value part of a new entry to the map output.
+ ///
+ /// This method is equivalent to [`DebugMap::value`], but formats the
+ /// value using a provided closure rather than by calling [`Debug::fmt`].
+ #[unstable(feature = "debug_closure_helpers", issue = "117729")]
+ pub fn value_with<F>(&mut self, value_fmt: F) -> &mut Self
+ where
+ F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
+ {
self.result = self.result.and_then(|_| {
assert!(self.has_key, "attempted to format a map value before its key");
if self.is_pretty() {
let mut slot = None;
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut self.state);
- value.fmt(&mut writer)?;
+ value_fmt(&mut writer)?;
writer.write_str(",\n")?;
} else {
- value.fmt(self.fmt)?;
+ value_fmt(self.fmt)?;
}
self.has_key = false;
@@ -935,3 +1013,44 @@ impl<'a, 'b: 'a> DebugMap<'a, 'b> {
self.fmt.alternate()
}
}
+
+/// Implements [`fmt::Debug`] and [`fmt::Display`] using a function.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(debug_closure_helpers)]
+/// use std::fmt;
+///
+/// let value = 'a';
+/// assert_eq!(format!("{}", value), "a");
+/// assert_eq!(format!("{:?}", value), "'a'");
+///
+/// let wrapped = fmt::FormatterFn(|f| write!(f, "{:?}", &value));
+/// assert_eq!(format!("{}", wrapped), "'a'");
+/// assert_eq!(format!("{:?}", wrapped), "'a'");
+/// ```
+#[unstable(feature = "debug_closure_helpers", issue = "117729")]
+pub struct FormatterFn<F>(pub F)
+where
+ F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result;
+
+#[unstable(feature = "debug_closure_helpers", issue = "117729")]
+impl<F> fmt::Debug for FormatterFn<F>
+where
+ F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (self.0)(f)
+ }
+}
+
+#[unstable(feature = "debug_closure_helpers", issue = "117729")]
+impl<F> fmt::Display for FormatterFn<F>
+where
+ F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (self.0)(f)
+ }
+}
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index fc91d1afc..e1b7b46a1 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -39,6 +39,9 @@ pub enum Alignment {
#[stable(feature = "debug_builders", since = "1.2.0")]
pub use self::builders::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
+#[unstable(feature = "debug_closure_helpers", issue = "117729")]
+pub use self::builders::FormatterFn;
+
/// The type returned by formatter methods.
///
/// # Examples
@@ -239,6 +242,7 @@ impl<W: Write + ?Sized> Write for &mut W {
/// documentation of the methods defined on `Formatter` below.
#[allow(missing_debug_implementations)]
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "Formatter"]
pub struct Formatter<'a> {
flags: u32,
fill: char,
@@ -791,8 +795,10 @@ pub trait Octal {
/// assert_eq!(format!("l as binary is: {l:b}"), "l as binary is: 1101011");
///
/// assert_eq!(
-/// format!("l as binary is: {l:#032b}"),
-/// "l as binary is: 0b000000000000000000000001101011"
+/// // Note that the `0b` prefix added by `#` is included in the total width, so we
+/// // need to add two to correctly display all 32 bits.
+/// format!("l as binary is: {l:#034b}"),
+/// "l as binary is: 0b00000000000000000000000001101011"
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/fmt/rt.rs b/library/core/src/fmt/rt.rs
index d37888c27..5bf221b42 100644
--- a/library/core/src/fmt/rt.rs
+++ b/library/core/src/fmt/rt.rs
@@ -133,6 +133,10 @@ impl<'a> Argument<'a> {
Self::new(x, USIZE_MARKER)
}
+ // FIXME: Transmuting formatter in new and indirectly branching to/calling
+ // it here is an explicit CFI violation.
+ #[allow(inline_no_sanitize)]
+ #[no_sanitize(cfi, kcfi)]
#[inline(always)]
pub(super) fn fmt(&self, f: &mut Formatter<'_>) -> Result {
(self.formatter)(self.value, f)
diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs
index 089493d37..0f77a2d83 100644
--- a/library/core/src/future/mod.rs
+++ b/library/core/src/future/mod.rs
@@ -38,7 +38,7 @@ pub use poll_fn::{poll_fn, PollFn};
/// This type is needed because:
///
-/// a) Generators cannot implement `for<'a, 'b> Generator<&'a mut Context<'b>>`, so we need to pass
+/// a) Coroutines cannot implement `for<'a, 'b> Coroutine<&'a mut Context<'b>>`, so we need to pass
/// a raw pointer (see <https://github.com/rust-lang/rust/issues/68923>).
/// b) Raw pointers and `NonNull` aren't `Send` or `Sync`, so that would make every single future
/// non-Send/Sync as well, and we don't want that.
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
index 4bf3da073..ff177c70d 100644
--- a/library/core/src/hint.rs
+++ b/library/core/src/hint.rs
@@ -277,7 +277,7 @@ pub fn spin_loop() {
/// - Treats the call to `contains` and its result as volatile: the body of `benchmark` cannot
/// optimize this away
///
-/// This makes our benchmark much more realistic to how the function would be used in situ, where
+/// This makes our benchmark much more realistic to how the function would actually be used, where
/// arguments are usually not known at compile time and the result is used in some way.
#[inline]
#[stable(feature = "bench_black_box", since = "1.66.0")]
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 4c76662ac..c5aef67b5 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -1072,7 +1072,7 @@ extern "rust-intrinsic" {
/// zero-initialization: This will statically either panic, or do nothing.
///
/// This intrinsic does not have a stable counterpart.
- #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
+ #[rustc_const_stable(feature = "const_assert_type2", since = "1.75.0")]
#[rustc_safe_intrinsic]
#[rustc_nounwind]
pub fn assert_zero_valid<T>();
@@ -1080,7 +1080,7 @@ extern "rust-intrinsic" {
/// A guard for `std::mem::uninitialized`. This will statically either panic, or do nothing.
///
/// This intrinsic does not have a stable counterpart.
- #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
+ #[rustc_const_stable(feature = "const_assert_type2", since = "1.75.0")]
#[rustc_safe_intrinsic]
#[rustc_nounwind]
pub fn assert_mem_uninitialized_valid<T>();
@@ -1509,12 +1509,14 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_nounwind]
+ #[rustc_diagnostic_item = "intrinsics_unaligned_volatile_load"]
pub fn unaligned_volatile_load<T>(src: *const T) -> T;
/// Performs a volatile store to the `dst` pointer.
/// The pointer is not required to be aligned.
///
/// This intrinsic does not have a stable counterpart.
#[rustc_nounwind]
+ #[rustc_diagnostic_item = "intrinsics_unaligned_volatile_store"]
pub fn unaligned_volatile_store<T>(dst: *mut T, val: T);
/// Returns the square root of an `f32`
@@ -2277,7 +2279,7 @@ extern "rust-intrinsic" {
/// any safety invariants.
///
/// The stabilized version of this intrinsic is [`core::mem::discriminant`].
- #[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
+ #[rustc_const_stable(feature = "const_discriminant", since = "1.75.0")]
#[rustc_safe_intrinsic]
#[rustc_nounwind]
pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
@@ -2666,6 +2668,7 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[rustc_diagnostic_item = "ptr_copy_nonoverlapping"]
pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
extern "rust-intrinsic" {
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
@@ -2761,6 +2764,7 @@ pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: us
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[rustc_diagnostic_item = "ptr_copy"]
pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
extern "rust-intrinsic" {
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
@@ -2834,6 +2838,7 @@ pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[rustc_diagnostic_item = "ptr_write_bytes"]
pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
extern "rust-intrinsic" {
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
index cab195dad..b26a17ec3 100644
--- a/library/core/src/intrinsics/mir.rs
+++ b/library/core/src/intrinsics/mir.rs
@@ -12,8 +12,7 @@
//!
//! Typical usage will look like this:
//!
-#![cfg_attr(bootstrap, doc = "```rust,ignore")]
-#![cfg_attr(not(bootstrap), doc = "```rust")]
+//! ```rust
//! #![feature(core_intrinsics, custom_mir)]
//! #![allow(internal_features)]
//!
@@ -63,8 +62,7 @@
//!
//! # Examples
//!
-#![cfg_attr(bootstrap, doc = "```rust,ignore")]
-#![cfg_attr(not(bootstrap), doc = "```rust")]
+//! ```rust
//! #![feature(core_intrinsics, custom_mir)]
//! #![allow(internal_features)]
//!
@@ -106,7 +104,6 @@
//! }
//!
//! #[custom_mir(dialect = "runtime", phase = "optimized")]
-#![cfg_attr(bootstrap, doc = "#[cfg(any())]")] // disable the following function in doctests when `bootstrap` is set
//! fn push_and_pop<T>(v: &mut Vec<T>, value: T) {
//! mir!(
//! let _unused;
@@ -319,8 +316,7 @@ define!(
///
/// # Examples
///
- #[cfg_attr(bootstrap, doc = "```rust,ignore")]
- #[cfg_attr(not(bootstrap), doc = "```rust")]
+ /// ```rust
/// #![allow(internal_features)]
/// #![feature(custom_mir, core_intrinsics)]
///
diff --git a/library/std/src/io/readbuf.rs b/library/core/src/io/borrowed_buf.rs
index 034ddd8df..fe25cac28 100644
--- a/library/std/src/io/readbuf.rs
+++ b/library/core/src/io/borrowed_buf.rs
@@ -1,10 +1,6 @@
-#![unstable(feature = "read_buf", issue = "78485")]
-
-#[cfg(test)]
-mod tests;
+#![unstable(feature = "core_io_borrowed_buf", issue = "117693")]
use crate::fmt::{self, Debug, Formatter};
-use crate::io::{Result, Write};
use crate::mem::{self, MaybeUninit};
use crate::{cmp, ptr};
@@ -303,15 +299,3 @@ impl<'a> BorrowedCursor<'a> {
self.buf.filled += buf.len();
}
}
-
-impl<'a> Write for BorrowedCursor<'a> {
- fn write(&mut self, buf: &[u8]) -> Result<usize> {
- self.append(buf);
- Ok(buf.len())
- }
-
- #[inline]
- fn flush(&mut self) -> Result<()> {
- Ok(())
- }
-}
diff --git a/library/core/src/io/mod.rs b/library/core/src/io/mod.rs
new file mode 100644
index 000000000..2f20180cd
--- /dev/null
+++ b/library/core/src/io/mod.rs
@@ -0,0 +1,6 @@
+//! Traits, helpers, and type definitions for core I/O functionality.
+
+mod borrowed_buf;
+
+#[unstable(feature = "core_io_borrowed_buf", issue = "117693")]
+pub use self::borrowed_buf::{BorrowedBuf, BorrowedCursor};
diff --git a/library/core/src/iter/adapters/peekable.rs b/library/core/src/iter/adapters/peekable.rs
index 20aca323b..65ba42920 100644
--- a/library/core/src/iter/adapters/peekable.rs
+++ b/library/core/src/iter/adapters/peekable.rs
@@ -12,6 +12,7 @@ use crate::ops::{ControlFlow, Try};
#[derive(Clone, Debug)]
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "IterPeekable"]
pub struct Peekable<I: Iterator> {
iter: I,
/// Remember a peeked value, even if it was None.
diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs
index b6b0c90cb..77ccf5085 100644
--- a/library/core/src/iter/adapters/zip.rs
+++ b/library/core/src/iter/adapters/zip.rs
@@ -95,6 +95,14 @@ where
}
#[inline]
+ fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ ZipImpl::fold(self, init, f)
+ }
+
+ #[inline]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item
where
Self: TrustedRandomAccessNoCoerce,
@@ -129,6 +137,9 @@ trait ZipImpl<A, B> {
where
A: DoubleEndedIterator + ExactSizeIterator,
B: DoubleEndedIterator + ExactSizeIterator;
+ fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc;
// This has the same safety requirements as `Iterator::__iterator_get_unchecked`
unsafe fn get_unchecked(&mut self, idx: usize) -> <Self as Iterator>::Item
where
@@ -228,6 +239,14 @@ where
{
unreachable!("Always specialized");
}
+
+ #[inline]
+ default fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ SpecFold::spec_fold(self, init, f)
+ }
}
#[doc(hidden)]
@@ -251,6 +270,24 @@ where
// `Iterator::__iterator_get_unchecked`.
unsafe { (self.a.__iterator_get_unchecked(idx), self.b.__iterator_get_unchecked(idx)) }
}
+
+ #[inline]
+ fn fold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let mut accum = init;
+ let len = ZipImpl::size_hint(&self).0;
+ for i in 0..len {
+ // SAFETY: since Self: TrustedRandomAccessNoCoerce we can trust the size-hint to
+ // calculate the length and then use that to do unchecked iteration.
+ // fold consumes the iterator so we don't need to fixup any state.
+ unsafe {
+ accum = f(accum, self.get_unchecked(i));
+ }
+ }
+ accum
+ }
}
#[doc(hidden)]
@@ -590,3 +627,56 @@ unsafe impl<I: Iterator + TrustedRandomAccessNoCoerce> SpecTrustedRandomAccess f
unsafe { self.__iterator_get_unchecked(index) }
}
}
+
+trait SpecFold: Iterator {
+ fn spec_fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B;
+}
+
+impl<A: Iterator, B: Iterator> SpecFold for Zip<A, B> {
+ // Adapted from default impl from the Iterator trait
+ #[inline]
+ default fn spec_fold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let mut accum = init;
+ while let Some(x) = ZipImpl::next(&mut self) {
+ accum = f(accum, x);
+ }
+ accum
+ }
+}
+
+impl<A: TrustedLen, B: TrustedLen> SpecFold for Zip<A, B> {
+ #[inline]
+ fn spec_fold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let mut accum = init;
+ loop {
+ let (upper, more) = if let Some(upper) = ZipImpl::size_hint(&self).1 {
+ (upper, false)
+ } else {
+ // Per TrustedLen contract a None upper bound means more than usize::MAX items
+ (usize::MAX, true)
+ };
+
+ for _ in 0..upper {
+ let pair =
+ // SAFETY: TrustedLen guarantees that at least `upper` many items are available
+ // therefore we know they can't be None
+ unsafe { (self.a.next().unwrap_unchecked(), self.b.next().unwrap_unchecked()) };
+ accum = f(accum, pair);
+ }
+
+ if !more {
+ break;
+ }
+ }
+ accum
+ }
+}
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
index ca977d1ef..937a149ac 100644
--- a/library/core/src/iter/mod.rs
+++ b/library/core/src/iter/mod.rs
@@ -391,11 +391,11 @@ pub use self::traits::Iterator;
pub use self::range::Step;
#[unstable(
- feature = "iter_from_generator",
+ feature = "iter_from_coroutine",
issue = "43122",
- reason = "generators are unstable"
+ reason = "coroutines are unstable"
)]
-pub use self::sources::from_generator;
+pub use self::sources::from_coroutine;
#[stable(feature = "iter_empty", since = "1.2.0")]
pub use self::sources::{empty, Empty};
#[stable(feature = "iter_from_fn", since = "1.34.0")]
diff --git a/library/core/src/iter/sources.rs b/library/core/src/iter/sources.rs
index 3ec426a3a..56c1f8607 100644
--- a/library/core/src/iter/sources.rs
+++ b/library/core/src/iter/sources.rs
@@ -1,6 +1,6 @@
mod empty;
+mod from_coroutine;
mod from_fn;
-mod from_generator;
mod once;
mod once_with;
mod repeat;
@@ -27,11 +27,11 @@ pub use self::repeat_with::{repeat_with, RepeatWith};
pub use self::from_fn::{from_fn, FromFn};
#[unstable(
- feature = "iter_from_generator",
+ feature = "iter_from_coroutine",
issue = "43122",
- reason = "generators are unstable"
+ reason = "coroutines are unstable"
)]
-pub use self::from_generator::from_generator;
+pub use self::from_coroutine::from_coroutine;
#[stable(feature = "iter_successors", since = "1.34.0")]
pub use self::successors::{successors, Successors};
diff --git a/library/core/src/iter/sources/empty.rs b/library/core/src/iter/sources/empty.rs
index 243df015f..438e046a4 100644
--- a/library/core/src/iter/sources/empty.rs
+++ b/library/core/src/iter/sources/empty.rs
@@ -27,6 +27,7 @@ pub const fn empty<T>() -> Empty<T> {
/// This `struct` is created by the [`empty()`] function. See its documentation for more.
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "iter_empty", since = "1.2.0")]
+#[rustc_diagnostic_item = "IterEmpty"]
pub struct Empty<T>(marker::PhantomData<fn() -> T>);
#[stable(feature = "core_impl_debug", since = "1.9.0")]
diff --git a/library/core/src/iter/sources/from_coroutine.rs b/library/core/src/iter/sources/from_coroutine.rs
new file mode 100644
index 000000000..16fbca9b6
--- /dev/null
+++ b/library/core/src/iter/sources/from_coroutine.rs
@@ -0,0 +1,59 @@
+use crate::fmt;
+use crate::ops::{Coroutine, CoroutineState};
+use crate::pin::Pin;
+
+/// Creates a new iterator where each iteration calls the provided coroutine.
+///
+/// Similar to [`iter::from_fn`].
+///
+/// [`iter::from_fn`]: crate::iter::from_fn
+///
+/// # Examples
+///
+/// ```
+/// #![cfg_attr(bootstrap, feature(generators))]
+/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(iter_from_coroutine)]
+///
+/// let it = std::iter::from_coroutine(|| {
+/// yield 1;
+/// yield 2;
+/// yield 3;
+/// });
+/// let v: Vec<_> = it.collect();
+/// assert_eq!(v, [1, 2, 3]);
+/// ```
+#[inline]
+#[unstable(feature = "iter_from_coroutine", issue = "43122", reason = "coroutines are unstable")]
+pub fn from_coroutine<G: Coroutine<Return = ()> + Unpin>(coroutine: G) -> FromCoroutine<G> {
+ FromCoroutine(coroutine)
+}
+
+/// An iterator over the values yielded by an underlying coroutine.
+///
+/// This `struct` is created by the [`iter::from_coroutine()`] function. See its documentation for
+/// more.
+///
+/// [`iter::from_coroutine()`]: from_coroutine
+#[unstable(feature = "iter_from_coroutine", issue = "43122", reason = "coroutines are unstable")]
+#[derive(Clone)]
+pub struct FromCoroutine<G>(G);
+
+#[unstable(feature = "iter_from_coroutine", issue = "43122", reason = "coroutines are unstable")]
+impl<G: Coroutine<Return = ()> + Unpin> Iterator for FromCoroutine<G> {
+ type Item = G::Yield;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match Pin::new(&mut self.0).resume(()) {
+ CoroutineState::Yielded(n) => Some(n),
+ CoroutineState::Complete(()) => None,
+ }
+ }
+}
+
+#[unstable(feature = "iter_from_coroutine", issue = "43122", reason = "coroutines are unstable")]
+impl<G> fmt::Debug for FromCoroutine<G> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FromCoroutine").finish()
+ }
+}
diff --git a/library/core/src/iter/sources/from_generator.rs b/library/core/src/iter/sources/from_generator.rs
deleted file mode 100644
index 4cbe731b2..000000000
--- a/library/core/src/iter/sources/from_generator.rs
+++ /dev/null
@@ -1,58 +0,0 @@
-use crate::fmt;
-use crate::ops::{Generator, GeneratorState};
-use crate::pin::Pin;
-
-/// Creates a new iterator where each iteration calls the provided generator.
-///
-/// Similar to [`iter::from_fn`].
-///
-/// [`iter::from_fn`]: crate::iter::from_fn
-///
-/// # Examples
-///
-/// ```
-/// #![feature(generators)]
-/// #![feature(iter_from_generator)]
-///
-/// let it = std::iter::from_generator(|| {
-/// yield 1;
-/// yield 2;
-/// yield 3;
-/// });
-/// let v: Vec<_> = it.collect();
-/// assert_eq!(v, [1, 2, 3]);
-/// ```
-#[inline]
-#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
-pub fn from_generator<G: Generator<Return = ()> + Unpin>(generator: G) -> FromGenerator<G> {
- FromGenerator(generator)
-}
-
-/// An iterator over the values yielded by an underlying generator.
-///
-/// This `struct` is created by the [`iter::from_generator()`] function. See its documentation for
-/// more.
-///
-/// [`iter::from_generator()`]: from_generator
-#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
-#[derive(Clone)]
-pub struct FromGenerator<G>(G);
-
-#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
-impl<G: Generator<Return = ()> + Unpin> Iterator for FromGenerator<G> {
- type Item = G::Yield;
-
- fn next(&mut self) -> Option<Self::Item> {
- match Pin::new(&mut self.0).resume(()) {
- GeneratorState::Yielded(n) => Some(n),
- GeneratorState::Complete(()) => None,
- }
- }
-}
-
-#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
-impl<G> fmt::Debug for FromGenerator<G> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("FromGenerator").finish()
- }
-}
diff --git a/library/core/src/iter/sources/once.rs b/library/core/src/iter/sources/once.rs
index 6e9ed0d3c..21be4377d 100644
--- a/library/core/src/iter/sources/once.rs
+++ b/library/core/src/iter/sources/once.rs
@@ -61,6 +61,7 @@ pub fn once<T>(value: T) -> Once<T> {
/// This `struct` is created by the [`once()`] function. See its documentation for more.
#[derive(Clone, Debug)]
#[stable(feature = "iter_once", since = "1.2.0")]
+#[rustc_diagnostic_item = "IterOnce"]
pub struct Once<T> {
inner: crate::option::IntoIter<T>,
}
diff --git a/library/core/src/iter/sources/once_with.rs b/library/core/src/iter/sources/once_with.rs
index 9309a06c8..8b31ab2ff 100644
--- a/library/core/src/iter/sources/once_with.rs
+++ b/library/core/src/iter/sources/once_with.rs
@@ -4,7 +4,7 @@ use crate::iter::{FusedIterator, TrustedLen};
/// Creates an iterator that lazily generates a value exactly once by invoking
/// the provided closure.
///
-/// This is commonly used to adapt a single value generator into a [`chain()`] of
+/// This is commonly used to adapt a single value coroutine into a [`chain()`] of
/// other kinds of iteration. Maybe you have an iterator that covers almost
/// everything, but you need an extra special case. Maybe you have a function
/// which works on iterators, but you only need to process one value.
diff --git a/library/core/src/iter/sources/successors.rs b/library/core/src/iter/sources/successors.rs
index 6a6cbe905..7f7b2c775 100644
--- a/library/core/src/iter/sources/successors.rs
+++ b/library/core/src/iter/sources/successors.rs
@@ -17,7 +17,7 @@ where
F: FnMut(&T) -> Option<T>,
{
// If this function returned `impl Iterator<Item=T>`
- // it could be based on `unfold` and not need a dedicated type.
+ // it could be based on `from_fn` and not need a dedicated type.
// However having a named `Successors<T, F>` type allows it to be `Clone` when `T` and `F` are.
Successors { next: first, succ }
}
diff --git a/library/core/src/iter/traits/collect.rs b/library/core/src/iter/traits/collect.rs
index e0ef5071c..0d1cf7941 100644
--- a/library/core/src/iter/traits/collect.rs
+++ b/library/core/src/iter/traits/collect.rs
@@ -146,6 +146,7 @@ pub trait FromIterator<A>: Sized {
/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "from_iter_fn"]
fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self;
}
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index ac1fc26a1..6adea4442 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -27,13 +27,13 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented(
on(
- any(_Self = "core::ops::RangeTo<Idx>", _Self = "std::ops::RangeTo<Idx>"),
+ _Self = "core::ops::range::RangeTo<Idx>",
label = "if you meant to iterate until a value, add a starting value",
note = "`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \
bounded `Range`: `0..end`"
),
on(
- any(_Self = "core::ops::RangeToInclusive<Idx>", _Self = "std::ops::RangeToInclusive<Idx>"),
+ _Self = "core::ops::range::RangeToInclusive<Idx>",
label = "if you meant to iterate until a value (including it), add a starting value",
note = "`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \
to have a bounded `RangeInclusive`: `0..=end`"
@@ -44,7 +44,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
),
on(_Self = "&[]", label = "`{Self}` is not an iterator; try calling `.iter()`"),
on(
- any(_Self = "alloc::vec::Vec<T, A>", _Self = "std::vec::Vec<T, A>"),
+ _Self = "alloc::vec::Vec<T, A>",
label = "`{Self}` is not an iterator; try calling `.into_iter()` or `.iter()`"
),
on(
@@ -52,7 +52,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
),
on(
- any(_Self = "alloc::string::String", _Self = "std::string::String"),
+ _Self = "alloc::string::String",
label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
),
on(
@@ -69,6 +69,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
message = "`{Self}` is not an iterator"
)]
#[doc(notable_trait)]
+#[cfg_attr(not(bootstrap), lang = "iterator")]
#[rustc_diagnostic_item = "Iterator"]
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub trait Iterator {
@@ -2141,7 +2142,7 @@ pub trait Iterator {
/// passed collection. The collection is then returned, so the call chain
/// can be continued.
///
- /// This is useful when you already have a collection and wants to add
+ /// This is useful when you already have a collection and want to add
/// the iterator items to it.
///
/// This method is a convenience method to call [Extend::extend](trait.Extend.html),
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 8b15e8269..5a6d242a7 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -26,7 +26,8 @@
//! assumptions about their semantics: For `memcpy`, `memmove`, `memset`, `memcmp`, and `bcmp`, if
//! the `n` parameter is 0, the function is assumed to not be UB. Furthermore, for `memcpy`, if
//! source and target pointer are equal, the function is assumed to not be UB.
-//! (Note that these are [standard assumptions](https://reviews.llvm.org/D86993) among compilers.)
+//! (Note that these are standard assumptions among compilers:
+//! [clang](https://reviews.llvm.org/D86993) and [GCC](https://gcc.gnu.org/bugzilla/show_bug.cgi?id=32667) do the same.)
//! These functions are often provided by the system libc, but can also be provided by the
//! [compiler-builtins crate](https://crates.io/crates/compiler_builtins).
//! Note that the library does not guarantee that it will always make these assumptions, so Rust
@@ -68,6 +69,7 @@
test(no_crate_inject, attr(deny(warnings))),
test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
)]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
#![doc(cfg_hide(
not(test),
any(not(feature = "miri-test-libstd"), test, doctest),
@@ -110,8 +112,6 @@
//
// Library features:
// tidy-alphabetical-start
-#![cfg_attr(bootstrap, feature(no_coverage))] // rust-lang/rust#84605
-#![cfg_attr(not(bootstrap), feature(coverage_attribute))] // rust-lang/rust#84605
#![feature(char_indices_offset)]
#![feature(const_align_of_val)]
#![feature(const_align_of_val_raw)]
@@ -126,7 +126,6 @@
#![feature(const_caller_location)]
#![feature(const_cell_into_inner)]
#![feature(const_char_from_u32_unchecked)]
-#![feature(const_discriminant)]
#![feature(const_eval_select)]
#![feature(const_exact_div)]
#![feature(const_float_bits_conv)]
@@ -135,7 +134,6 @@
#![feature(const_hash)]
#![feature(const_heap)]
#![feature(const_index_range_slice_index)]
-#![feature(const_inherent_unchecked_arith)]
#![feature(const_int_unchecked_arith)]
#![feature(const_intrinsic_forget)]
#![feature(const_ipv4)]
@@ -149,7 +147,6 @@
#![feature(const_option)]
#![feature(const_option_ext)]
#![feature(const_pin)]
-#![feature(const_pointer_byte_offsets)]
#![feature(const_pointer_is_aligned)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_is_null)]
@@ -173,6 +170,7 @@
#![feature(const_unsafecell_get_mut)]
#![feature(const_waker)]
#![feature(core_panic)]
+#![feature(coverage_attribute)]
#![feature(duration_consts_float)]
#![feature(internal_impls_macro)]
#![feature(ip)]
@@ -189,6 +187,8 @@
#![feature(str_split_inclusive_remainder)]
#![feature(str_split_remainder)]
#![feature(strict_provenance)]
+#![feature(unchecked_math)]
+#![feature(unchecked_shifts)]
#![feature(utf16_extra)]
#![feature(utf16_extra_const)]
#![feature(variant_count)]
@@ -237,6 +237,7 @@
#![feature(negative_impls)]
#![feature(never_type)]
#![feature(no_core)]
+#![feature(no_sanitize)]
#![feature(platform_intrinsics)]
#![feature(prelude_import)]
#![feature(repr_simd)]
@@ -252,6 +253,7 @@
#![feature(try_blocks)]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
+#![feature(with_negative_coherence)]
// tidy-alphabetical-end
//
// Target features:
@@ -367,6 +369,8 @@ pub mod async_iter;
pub mod cell;
pub mod char;
pub mod ffi;
+#[unstable(feature = "core_io_borrowed_buf", issue = "117693")]
+pub mod io;
pub mod iter;
pub mod net;
pub mod option;
@@ -414,7 +418,8 @@ pub mod primitive;
dead_code,
unused_imports,
unsafe_op_in_unsafe_fn,
- ambiguous_glob_reexports
+ ambiguous_glob_reexports,
+ deprecated_in_future
)]
#[allow(rustdoc::bare_urls)]
// FIXME: This annotation should be moved into rust-lang/stdarch after clashing_extern_declarations is
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index 123661b35..7f5908e47 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -718,7 +718,8 @@ macro_rules! unreachable {
/// The difference between `unimplemented!` and [`todo!`] is that while `todo!`
/// conveys an intent of implementing the functionality later and the message is "not yet
/// implemented", `unimplemented!` makes no such claims. Its message is "not implemented".
-/// Also some IDEs will mark `todo!`s.
+///
+/// Also, some IDEs will mark `todo!`s.
///
/// # Panics
///
@@ -804,11 +805,15 @@ macro_rules! unimplemented {
/// The difference between [`unimplemented!`] and `todo!` is that while `todo!` conveys
/// an intent of implementing the functionality later and the message is "not yet
/// implemented", `unimplemented!` makes no such claims. Its message is "not implemented".
-/// Also some IDEs will mark `todo!`s.
+///
+/// Also, some IDEs will mark `todo!`s.
///
/// # Panics
///
-/// This will always [`panic!`].
+/// This will always [`panic!`] because `todo!` is just a shorthand for `panic!` with a
+/// fixed, specific message.
+///
+/// Like `panic!`, this macro has a second form for displaying custom values.
///
/// # Examples
///
@@ -816,38 +821,47 @@ macro_rules! unimplemented {
///
/// ```
/// trait Foo {
-/// fn bar(&self);
+/// fn bar(&self) -> u8;
/// fn baz(&self);
+/// fn qux(&self) -> Result<u64, ()>;
/// }
/// ```
///
/// We want to implement `Foo` on one of our types, but we also want to work on
/// just `bar()` first. In order for our code to compile, we need to implement
-/// `baz()`, so we can use `todo!`:
+/// `baz()` and `qux()`, so we can use `todo!`:
///
/// ```
/// # trait Foo {
-/// # fn bar(&self);
+/// # fn bar(&self) -> u8;
/// # fn baz(&self);
+/// # fn qux(&self) -> Result<u64, ()>;
/// # }
/// struct MyStruct;
///
/// impl Foo for MyStruct {
-/// fn bar(&self) {
-/// // implementation goes here
+/// fn bar(&self) -> u8 {
+/// 1 + 1
/// }
///
/// fn baz(&self) {
-/// // let's not worry about implementing baz() for now
+/// // Let's not worry about implementing baz() for now
/// todo!();
/// }
+///
+/// fn qux(&self) -> Result<u64, ()> {
+/// // We can add a message to todo! to display our omission.
+/// // This will display:
+/// // "thread 'main' panicked at 'not yet implemented: MyStruct is not yet quxable'".
+/// todo!("MyStruct is not yet quxable");
+/// }
/// }
///
/// fn main() {
/// let s = MyStruct;
/// s.bar();
///
-/// // we aren't even using baz(), so this is fine.
+/// // We aren't even using baz() or qux(), so this is fine.
/// }
/// ```
#[macro_export]
@@ -1030,6 +1044,7 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
+ #[rustc_diagnostic_item = "env_macro"] // useful for external lints
macro_rules! env {
($name:expr $(,)?) => {{ /* compiler built-in */ }};
($name:expr, $error_msg:expr $(,)?) => {{ /* compiler built-in */ }};
@@ -1060,6 +1075,7 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
+ #[rustc_diagnostic_item = "option_env_macro"] // useful for external lints
macro_rules! option_env {
($name:expr $(,)?) => {{ /* compiler built-in */ }};
}
@@ -1465,6 +1481,7 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
+ #[rustc_diagnostic_item = "include_macro"] // useful for external lints
macro_rules! include {
($file:expr $(,)?) => {{ /* compiler built-in */ }};
}
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index 5ed82e26a..99762bccd 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -155,12 +155,18 @@ pub trait Sized {
/// Those implementations are:
///
/// - Arrays `[T; N]` implement `Unsize<[T]>`.
-/// - Types implementing a trait `Trait` also implement `Unsize<dyn Trait>`.
-/// - Structs `Foo<..., T, ...>` implement `Unsize<Foo<..., U, ...>>` if all of these conditions
-/// are met:
-/// - `T: Unsize<U>`.
-/// - Only the last field of `Foo` has a type involving `T`.
-/// - `Bar<T>: Unsize<Bar<U>>`, where `Bar<T>` stands for the actual type of that last field.
+/// - A type implements `Unsize<dyn Trait + 'a>` if all of these conditions are met:
+/// - The type implements `Trait`.
+/// - `Trait` is object safe.
+/// - The type is sized.
+/// - The type outlives `'a`.
+/// - Structs `Foo<..., T1, ..., Tn, ...>` implement `Unsize<Foo<..., U1, ..., Un, ...>>`
+/// where any number of (type and const) parameters may be changed if all of these conditions
+/// are met:
+/// - Only the last field of `Foo` has a type involving the parameters `T1`, ..., `Tn`.
+/// - All other parameters of the struct are equal.
+/// - `Field<T1, ..., Tn>: Unsize<Field<U1, ..., Un>>`, where `Field<...>` stands for the actual
+/// type of the struct's last field.
///
/// `Unsize` is used along with [`ops::CoerceUnsized`] to allow
/// "user-defined" containers such as [`Rc`] to contain dynamically-sized
@@ -247,6 +253,7 @@ marker_impls! {
///
/// const CFN: Wrap<fn(&())> = Wrap(higher_order);
///
+/// #[allow(pointer_structural_match)]
/// fn main() {
/// match CFN {
/// CFN => {}
@@ -573,59 +580,72 @@ impl<T: ?Sized> Copy for &T {}
#[lang = "sync"]
#[rustc_on_unimplemented(
on(
- any(_Self = "core::cell:OnceCell<T>", _Self = "std::cell::OnceCell<T>"),
+ _Self = "core::cell::once::OnceCell<T>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::OnceLock` instead"
),
on(
- any(_Self = "core::cell::Cell<u8>", _Self = "std::cell::Cell<u8>"),
+ _Self = "core::cell::Cell<u8>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU8` instead",
),
on(
- any(_Self = "core::cell::Cell<u16>", _Self = "std::cell::Cell<u16>"),
+ _Self = "core::cell::Cell<u16>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU16` instead",
),
on(
- any(_Self = "core::cell::Cell<u32>", _Self = "std::cell::Cell<u32>"),
+ _Self = "core::cell::Cell<u32>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU32` instead",
),
on(
- any(_Self = "core::cell::Cell<u64>", _Self = "std::cell::Cell<u64>"),
+ _Self = "core::cell::Cell<u64>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU64` instead",
),
on(
- any(_Self = "core::cell::Cell<usize>", _Self = "std::cell::Cell<usize>"),
+ _Self = "core::cell::Cell<usize>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicUsize` instead",
),
on(
- any(_Self = "core::cell::Cell<i8>", _Self = "std::cell::Cell<i8>"),
+ _Self = "core::cell::Cell<i8>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI8` instead",
),
on(
- any(_Self = "core::cell::Cell<i16>", _Self = "std::cell::Cell<i16>"),
+ _Self = "core::cell::Cell<i16>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI16` instead",
),
on(
- any(_Self = "core::cell::Cell<i32>", _Self = "std::cell::Cell<i32>"),
+ _Self = "core::cell::Cell<i32>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI32` instead",
),
on(
- any(_Self = "core::cell::Cell<i64>", _Self = "std::cell::Cell<i64>"),
+ _Self = "core::cell::Cell<i64>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI64` instead",
),
on(
- any(_Self = "core::cell::Cell<isize>", _Self = "std::cell::Cell<isize>"),
+ _Self = "core::cell::Cell<isize>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicIsize` instead",
),
on(
- any(_Self = "core::cell::Cell<bool>", _Self = "std::cell::Cell<bool>"),
+ _Self = "core::cell::Cell<bool>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicBool` instead",
),
on(
- any(_Self = "core::cell::Cell<T>", _Self = "std::cell::Cell<T>"),
+ all(
+ _Self = "core::cell::Cell<T>",
+ not(_Self = "core::cell::Cell<u8>"),
+ not(_Self = "core::cell::Cell<u16>"),
+ not(_Self = "core::cell::Cell<u32>"),
+ not(_Self = "core::cell::Cell<u64>"),
+ not(_Self = "core::cell::Cell<usize>"),
+ not(_Self = "core::cell::Cell<i8>"),
+ not(_Self = "core::cell::Cell<i16>"),
+ not(_Self = "core::cell::Cell<i32>"),
+ not(_Self = "core::cell::Cell<i64>"),
+ not(_Self = "core::cell::Cell<isize>"),
+ not(_Self = "core::cell::Cell<bool>")
+ ),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock`",
),
on(
- any(_Self = "core::cell::RefCell<T>", _Self = "std::cell::RefCell<T>"),
+ _Self = "core::cell::RefCell<T>",
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` instead",
),
message = "`{Self}` cannot be shared between threads safely",
diff --git a/library/core/src/mem/manually_drop.rs b/library/core/src/mem/manually_drop.rs
index 5f3d66e37..98cff3493 100644
--- a/library/core/src/mem/manually_drop.rs
+++ b/library/core/src/mem/manually_drop.rs
@@ -4,12 +4,12 @@ use crate::ptr;
/// A wrapper to inhibit compiler from automatically calling `T`’s destructor.
/// This wrapper is 0-cost.
///
-/// `ManuallyDrop<T>` is guaranteed to have the same layout as `T`, and is subject
-/// to the same layout optimizations as `T`. As a consequence, it has *no effect*
-/// on the assumptions that the compiler makes about its contents. For example,
-/// initializing a `ManuallyDrop<&mut T>` with [`mem::zeroed`] is undefined
-/// behavior. If you need to handle uninitialized data, use [`MaybeUninit<T>`]
-/// instead.
+/// `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
+/// `T`, and is subject to the same layout optimizations as `T`. As a consequence,
+/// it has *no effect* on the assumptions that the compiler makes about its
+/// contents. For example, initializing a `ManuallyDrop<&mut T>` with [`mem::zeroed`]
+/// is undefined behavior. If you need to handle uninitialized data, use
+/// [`MaybeUninit<T>`] instead.
///
/// Note that accessing the value inside a `ManuallyDrop<T>` is safe.
/// This means that a `ManuallyDrop<T>` whose content has been dropped must not
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
index d09a24b4b..8a210c195 100644
--- a/library/core/src/mem/maybe_uninit.rs
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -242,7 +242,7 @@ use crate::slice;
/// the same size, alignment, and ABI as `T`; it's just that the way `MaybeUninit` implements that
/// guarantee may evolve.
#[stable(feature = "maybe_uninit", since = "1.36.0")]
-// Lang item so we can wrap other types in it. This is useful for generators.
+// Lang item so we can wrap other types in it. This is useful for coroutines.
#[lang = "maybe_uninit"]
#[derive(Copy)]
#[repr(transparent)]
@@ -374,6 +374,9 @@ impl<T> MaybeUninit<T> {
/// assert_eq!(x, (0, false));
/// ```
///
+ /// This can be used in const contexts, such as to indicate the end of static arrays for
+ /// plugin registration.
+ ///
/// *Incorrect* usage of this function: calling `x.zeroed().assume_init()`
/// when `0` is not a valid bit-pattern for the type:
///
@@ -387,17 +390,19 @@ impl<T> MaybeUninit<T> {
/// // Inside a pair, we create a `NotZero` that does not have a valid discriminant.
/// // This is undefined behavior. ⚠️
/// ```
- #[stable(feature = "maybe_uninit", since = "1.36.0")]
- #[rustc_const_unstable(feature = "const_maybe_uninit_zeroed", issue = "91850")]
- #[must_use]
#[inline]
+ #[must_use]
#[rustc_diagnostic_item = "maybe_uninit_zeroed"]
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ // These are OK to allow since we do not leak &mut to user-visible API
+ #[rustc_allow_const_fn_unstable(const_mut_refs)]
+ #[rustc_allow_const_fn_unstable(const_ptr_write)]
+ #[rustc_allow_const_fn_unstable(const_maybe_uninit_as_mut_ptr)]
+ #[rustc_const_stable(feature = "const_maybe_uninit_zeroed", since = "1.75.0")]
pub const fn zeroed() -> MaybeUninit<T> {
let mut u = MaybeUninit::<T>::uninit();
// SAFETY: `u.as_mut_ptr()` points to allocated memory.
- unsafe {
- u.as_mut_ptr().write_bytes(0u8, 1);
- }
+ unsafe { u.as_mut_ptr().write_bytes(0u8, 1) };
u
}
@@ -686,7 +691,10 @@ impl<T> MaybeUninit<T> {
/// // they both get dropped!
/// ```
#[stable(feature = "maybe_uninit_extra", since = "1.60.0")]
- #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init_read", issue = "63567")]
+ #[rustc_const_stable(
+ feature = "const_maybe_uninit_assume_init_read",
+ since = "1.75.0"
+ )]
#[inline(always)]
#[track_caller]
pub const unsafe fn assume_init_read(&self) -> T {
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index e478b217f..eef214528 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -647,7 +647,8 @@ pub const fn needs_drop<T: ?Sized>() -> bool {
#[allow(deprecated)]
#[rustc_diagnostic_item = "mem_zeroed"]
#[track_caller]
-pub unsafe fn zeroed<T>() -> T {
+#[rustc_const_stable(feature = "const_mem_zeroed", since = "1.75.0")]
+pub const unsafe fn zeroed<T>() -> T {
// SAFETY: the caller must guarantee that an all-zero value is valid for `T`.
unsafe {
intrinsics::assert_zero_valid::<T>();
@@ -723,15 +724,12 @@ pub unsafe fn uninitialized<T>() -> T {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+#[rustc_diagnostic_item = "mem_swap"]
pub const fn swap<T>(x: &mut T, y: &mut T) {
// NOTE(eddyb) SPIR-V's Logical addressing model doesn't allow for arbitrary
// reinterpretation of values as (chunkable) byte arrays, and the loop in the
// block optimization in `swap_slice` is hard to rewrite back
// into the (unoptimized) direct swapping implementation, so we disable it.
- // FIXME(eddyb) the block optimization also prevents MIR optimizations from
- // understanding `mem::replace`, `Option::take`, etc. - a better overall
- // solution might be to make `ptr::swap_nonoverlapping` into an intrinsic, which
- // a backend can choose to implement using the block optimization, or not.
#[cfg(not(any(target_arch = "spirv")))]
{
// For types that are larger multiples of their alignment, the simple way
@@ -768,11 +766,14 @@ pub(crate) const fn swap_simple<T>(x: &mut T, y: &mut T) {
// And LLVM actually optimizes it to 3×memcpy if called with
// a type larger than it's willing to keep in a register.
// Having typed reads and writes in MIR here is also good as
- // it lets MIRI and CTFE understand them better, including things
+ // it lets Miri and CTFE understand them better, including things
// like enforcing type validity for them.
// Importantly, read+copy_nonoverlapping+write introduces confusing
// asymmetry to the behaviour where one value went through read+write
// whereas the other was copied over by the intrinsic (see #94371).
+ // Furthermore, using only read+write here benefits limited backends
+ // such as SPIR-V that work on an underlying *typed* view of memory,
+ // and thus have trouble with Rust's untyped memory operations.
// SAFETY: exclusive references are always valid to read/write,
// including being aligned, and nothing here panics so it's drop-safe.
@@ -909,6 +910,10 @@ pub fn take<T: Default>(dest: &mut T) -> T {
#[rustc_const_unstable(feature = "const_replace", issue = "83164")]
#[cfg_attr(not(test), rustc_diagnostic_item = "mem_replace")]
pub const fn replace<T>(dest: &mut T, src: T) -> T {
+ // It may be tempting to use `swap` to avoid `unsafe` here. Don't!
+ // The compiler optimizes the implementation below to two `memcpy`s
+ // while `swap` would require at least three. See PR#83022 for details.
+
// SAFETY: We read from `dest` but directly write `src` into it afterwards,
// such that the old value is not duplicated. Nothing is dropped and
// nothing here can panic.
@@ -930,7 +935,7 @@ pub const fn replace<T>(dest: &mut T, src: T) -> T {
/// This function is not magic; it is literally defined as
///
/// ```
-/// pub fn drop<T>(_x: T) { }
+/// pub fn drop<T>(_x: T) {}
/// ```
///
/// Because `_x` is moved into the function, it is automatically dropped before
@@ -1050,6 +1055,7 @@ pub const fn copy<T: Copy>(x: &T) -> T {
/// ```
#[inline]
#[must_use]
+#[track_caller]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_transmute_copy", since = "1.74.0")]
pub const unsafe fn transmute_copy<Src, Dst>(src: &Src) -> Dst {
@@ -1204,7 +1210,7 @@ impl<T> fmt::Debug for Discriminant<T> {
/// // assert_eq!(0, unsafe { std::mem::transmute::<_, u8>(std::mem::discriminant(&unit_like)) });
/// ```
#[stable(feature = "discriminant_value", since = "1.21.0")]
-#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
+#[rustc_const_stable(feature = "const_discriminant", since = "1.75.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "mem_discriminant")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const fn discriminant<T>(v: &T) -> Discriminant<T> {
@@ -1290,16 +1296,71 @@ impl<T> SizedTypeProperties for T {}
/// Expands to the offset in bytes of a field from the beginning of the given type.
///
-/// Only structs, unions and tuples are supported.
+/// Structs, enums, unions and tuples are supported.
+///
+/// Nested field accesses may be used, but not array indexes.
+///
+/// Enum variants may be traversed as if they were fields. Variants themselves do
+/// not have an offset.
+///
+/// Visibility is respected - all types and fields must be visible to the call site:
+///
+/// ```
+/// #![feature(offset_of)]
+///
+/// mod nested {
+/// #[repr(C)]
+/// pub struct Struct {
+/// private: u8,
+/// }
+/// }
+///
+/// // assert_eq!(mem::offset_of!(nested::Struct, private), 0);
+/// // ^^^ error[E0616]: field `private` of struct `Struct` is private
+/// ```
+///
+/// Note that type layout is, in general, [subject to change and
+/// platform-specific](https://doc.rust-lang.org/reference/type-layout.html). If
+/// layout stability is required, consider using an [explicit `repr` attribute].
+///
+/// Rust guarantees that the offset of a given field within a given type will not
+/// change over the lifetime of the program. However, two different compilations of
+/// the same program may result in different layouts. Also, even within a single
+/// program execution, no guarantees are made about types which are *similar* but
+/// not *identical*, e.g.:
+///
+/// ```
+/// #![feature(offset_of)]
+///
+/// struct Wrapper<T, U>(T, U);
+///
+/// type A = Wrapper<u8, u8>;
+/// type B = Wrapper<u8, i8>;
+///
+/// // Not necessarily identical even though `u8` and `i8` have the same layout!
+/// // assert!(mem::offset_of!(A, 1), mem::offset_of!(B, 1));
+///
+/// #[repr(transparent)]
+/// struct U8(u8);
+///
+/// type C = Wrapper<u8, U8>;
+///
+/// // Not necessarily identical even though `u8` and `U8` have the same layout!
+/// // assert!(mem::offset_of!(A, 1), mem::offset_of!(C, 1));
///
-/// Nested field accesses may be used, but not array indexes like in `C`'s `offsetof`.
+/// struct Empty<T>(core::marker::PhantomData<T>);
///
-/// Note that the output of this macro is not stable, except for `#[repr(C)]` types.
+/// // Not necessarily identical even though `PhantomData` always has the same layout!
+/// // assert!(mem::offset_of!(Empty<u8>, 0), mem::offset_of!(Empty<i8>, 0));
+/// ```
+///
+/// [explicit `repr` attribute]: https://doc.rust-lang.org/reference/type-layout.html#representations
///
/// # Examples
///
/// ```
/// #![feature(offset_of)]
+/// # #![cfg_attr(not(bootstrap), feature(offset_of_enum))]
///
/// use std::mem;
/// #[repr(C)]
@@ -1322,6 +1383,20 @@ impl<T> SizedTypeProperties for T {}
/// struct NestedB(u8);
///
/// assert_eq!(mem::offset_of!(NestedA, b.0), 0);
+///
+/// #[repr(u8)]
+/// enum Enum {
+/// A(u8, u16),
+/// B { one: u8, two: u16 },
+/// }
+///
+/// # #[cfg(not(bootstrap))]
+/// assert_eq!(mem::offset_of!(Enum, A.0), 1);
+/// # #[cfg(not(bootstrap))]
+/// assert_eq!(mem::offset_of!(Enum, B.two), 2);
+///
+/// # #[cfg(not(bootstrap))]
+/// assert_eq!(mem::offset_of!(Option<&u8>, Some.0), 0);
/// ```
#[unstable(feature = "offset_of", issue = "106655")]
#[allow_internal_unstable(builtin_syntax, hint_must_use)]
diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs
index 6a36dfec0..77f85215d 100644
--- a/library/core/src/net/ip_addr.rs
+++ b/library/core/src/net/ip_addr.rs
@@ -1,6 +1,8 @@
use crate::cmp::Ordering;
use crate::fmt::{self, Write};
+use crate::iter;
use crate::mem::transmute;
+use crate::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use super::display_buffer::DisplayBuffer;
@@ -410,9 +412,12 @@ impl IpAddr {
/// # Examples
///
/// ```
- /// #![feature(ip)]
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
+ /// let localhost_v4 = Ipv4Addr::new(127, 0, 0, 1);
+ ///
+ /// assert_eq!(IpAddr::V4(localhost_v4).to_canonical(), localhost_v4);
+ /// assert_eq!(IpAddr::V6(localhost_v4.to_ipv6_mapped()).to_canonical(), localhost_v4);
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).to_canonical().is_loopback(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).is_loopback(), false);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).to_canonical().is_loopback(), true);
@@ -420,11 +425,11 @@ impl IpAddr {
#[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
+ #[stable(feature = "ip_to_canonical", since = "1.75.0")]
+ #[rustc_const_stable(feature = "ip_to_canonical", since = "1.75.0")]
pub const fn to_canonical(&self) -> IpAddr {
match self {
- &v4 @ IpAddr::V4(_) => v4,
+ IpAddr::V4(_) => *self,
IpAddr::V6(v6) => v6.to_canonical(),
}
}
@@ -1748,11 +1753,11 @@ impl Ipv6Addr {
/// Some(Ipv4Addr::new(192, 10, 2, 255)));
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4_mapped(), None);
/// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[stable(feature = "ipv6_to_ipv4_mapped", since = "1.63.0")]
+ #[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[inline]
+ #[stable(feature = "ipv6_to_ipv4_mapped", since = "1.63.0")]
+ #[rustc_const_stable(feature = "const_ipv6_to_ipv4_mapped", since = "1.75.0")]
pub const fn to_ipv4_mapped(&self) -> Option<Ipv4Addr> {
match self.octets() {
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => {
@@ -1817,11 +1822,11 @@ impl Ipv6Addr {
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).is_loopback(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).to_canonical().is_loopback(), true);
/// ```
- #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
- #[unstable(feature = "ip", issue = "27709")]
+ #[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[inline]
+ #[stable(feature = "ip_to_canonical", since = "1.75.0")]
+ #[rustc_const_stable(feature = "ip_to_canonical", since = "1.75.0")]
pub const fn to_canonical(&self) -> IpAddr {
if let Some(mapped) = self.to_ipv4_mapped() {
return IpAddr::V4(mapped);
@@ -2122,3 +2127,132 @@ impl From<[u16; 8]> for IpAddr {
IpAddr::V6(Ipv6Addr::from(segments))
}
}
+
+#[stable(feature = "ip_bitops", since = "1.75.0")]
+impl Not for Ipv4Addr {
+ type Output = Ipv4Addr;
+
+ #[inline]
+ fn not(mut self) -> Ipv4Addr {
+ for octet in &mut self.octets {
+ *octet = !*octet;
+ }
+ self
+ }
+}
+
+#[stable(feature = "ip_bitops", since = "1.75.0")]
+impl Not for &'_ Ipv4Addr {
+ type Output = Ipv4Addr;
+
+ #[inline]
+ fn not(self) -> Ipv4Addr {
+ !*self
+ }
+}
+
+#[stable(feature = "ip_bitops", since = "1.75.0")]
+impl Not for Ipv6Addr {
+ type Output = Ipv6Addr;
+
+ #[inline]
+ fn not(mut self) -> Ipv6Addr {
+ for octet in &mut self.octets {
+ *octet = !*octet;
+ }
+ self
+ }
+}
+
+#[stable(feature = "ip_bitops", since = "1.75.0")]
+impl Not for &'_ Ipv6Addr {
+ type Output = Ipv6Addr;
+
+ #[inline]
+ fn not(self) -> Ipv6Addr {
+ !*self
+ }
+}
+
+macro_rules! bitop_impls {
+ ($(
+ $(#[$attr:meta])*
+ impl ($BitOp:ident, $BitOpAssign:ident) for $ty:ty = ($bitop:ident, $bitop_assign:ident);
+ )*) => {
+ $(
+ $(#[$attr])*
+ impl $BitOpAssign for $ty {
+ fn $bitop_assign(&mut self, rhs: $ty) {
+ for (lhs, rhs) in iter::zip(&mut self.octets, rhs.octets) {
+ lhs.$bitop_assign(rhs);
+ }
+ }
+ }
+
+ $(#[$attr])*
+ impl $BitOpAssign<&'_ $ty> for $ty {
+ fn $bitop_assign(&mut self, rhs: &'_ $ty) {
+ self.$bitop_assign(*rhs);
+ }
+ }
+
+ $(#[$attr])*
+ impl $BitOp for $ty {
+ type Output = $ty;
+
+ #[inline]
+ fn $bitop(mut self, rhs: $ty) -> $ty {
+ self.$bitop_assign(rhs);
+ self
+ }
+ }
+
+ $(#[$attr])*
+ impl $BitOp<&'_ $ty> for $ty {
+ type Output = $ty;
+
+ #[inline]
+ fn $bitop(mut self, rhs: &'_ $ty) -> $ty {
+ self.$bitop_assign(*rhs);
+ self
+ }
+ }
+
+ $(#[$attr])*
+ impl $BitOp<$ty> for &'_ $ty {
+ type Output = $ty;
+
+ #[inline]
+ fn $bitop(self, rhs: $ty) -> $ty {
+ let mut lhs = *self;
+ lhs.$bitop_assign(rhs);
+ lhs
+ }
+ }
+
+ $(#[$attr])*
+ impl $BitOp<&'_ $ty> for &'_ $ty {
+ type Output = $ty;
+
+ #[inline]
+ fn $bitop(self, rhs: &'_ $ty) -> $ty {
+ let mut lhs = *self;
+ lhs.$bitop_assign(*rhs);
+ lhs
+ }
+ }
+ )*
+ };
+}
+
+bitop_impls! {
+ #[stable(feature = "ip_bitops", since = "1.75.0")]
+ impl (BitAnd, BitAndAssign) for Ipv4Addr = (bitand, bitand_assign);
+ #[stable(feature = "ip_bitops", since = "1.75.0")]
+ impl (BitOr, BitOrAssign) for Ipv4Addr = (bitor, bitor_assign);
+
+ #[stable(feature = "ip_bitops", since = "1.75.0")]
+ impl (BitAnd, BitAndAssign) for Ipv6Addr = (bitand, bitand_assign);
+ #[stable(feature = "ip_bitops", since = "1.75.0")]
+ impl (BitOr, BitOrAssign) for Ipv6Addr = (bitor, bitor_assign);
+}
diff --git a/library/core/src/net/socket_addr.rs b/library/core/src/net/socket_addr.rs
index 8396aecf9..551162858 100644
--- a/library/core/src/net/socket_addr.rs
+++ b/library/core/src/net/socket_addr.rs
@@ -1,6 +1,4 @@
-use crate::cmp::Ordering;
use crate::fmt::{self, Write};
-use crate::hash;
use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use super::display_buffer::DisplayBuffer;
@@ -63,7 +61,7 @@ pub enum SocketAddr {
/// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1));
/// assert_eq!(socket.port(), 8080);
/// ```
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SocketAddrV4 {
ip: Ipv4Addr,
@@ -96,7 +94,7 @@ pub struct SocketAddrV4 {
/// assert_eq!(socket.ip(), &Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1));
/// assert_eq!(socket.port(), 8080);
/// ```
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SocketAddrV6 {
ip: Ipv6Addr,
@@ -644,48 +642,3 @@ impl fmt::Debug for SocketAddrV6 {
fmt::Display::fmt(self, fmt)
}
}
-
-#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
-impl PartialOrd for SocketAddrV4 {
- #[inline]
- fn partial_cmp(&self, other: &SocketAddrV4) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
-impl PartialOrd for SocketAddrV6 {
- #[inline]
- fn partial_cmp(&self, other: &SocketAddrV6) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
-impl Ord for SocketAddrV4 {
- #[inline]
- fn cmp(&self, other: &SocketAddrV4) -> Ordering {
- self.ip().cmp(other.ip()).then(self.port().cmp(&other.port()))
- }
-}
-
-#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
-impl Ord for SocketAddrV6 {
- #[inline]
- fn cmp(&self, other: &SocketAddrV6) -> Ordering {
- self.ip().cmp(other.ip()).then(self.port().cmp(&other.port()))
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl hash::Hash for SocketAddrV4 {
- fn hash<H: hash::Hasher>(&self, s: &mut H) {
- (self.port, self.ip).hash(s)
- }
-}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl hash::Hash for SocketAddrV6 {
- fn hash<H: hash::Hasher>(&self, s: &mut H) {
- (self.port, &self.ip, self.flowinfo, self.scope_id).hash(s)
- }
-}
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index 290f649f9..f60626b00 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -377,6 +377,13 @@ impl f32 {
pub const MANTISSA_DIGITS: u32 = 24;
/// Approximate number of significant digits in base 10.
+ ///
+ /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
+ /// significant digits can be converted to `f32` and back without loss.
+ ///
+ /// Equal to floor(log<sub>10</sub>&nbsp;2<sup>[`MANTISSA_DIGITS`]&nbsp;&minus;&nbsp;1</sup>).
+ ///
+ /// [`MANTISSA_DIGITS`]: f32::MANTISSA_DIGITS
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const DIGITS: u32 = 6;
@@ -384,31 +391,62 @@ impl f32 {
///
/// This is the difference between `1.0` and the next larger representable number.
///
+ /// Equal to 2<sup>1&nbsp;&minus;&nbsp;[`MANTISSA_DIGITS`]</sup>.
+ ///
/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+ /// [`MANTISSA_DIGITS`]: f32::MANTISSA_DIGITS
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite `f32` value.
+ ///
+ /// Equal to &minus;[`MAX`].
+ ///
+ /// [`MAX`]: f32::MAX
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive normal `f32` value.
+ ///
+ /// Equal to 2<sup>[`MIN_EXP`]&nbsp;&minus;&nbsp;1</sup>.
+ ///
+ /// [`MIN_EXP`]: f32::MIN_EXP
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite `f32` value.
+ ///
+ /// Equal to
+ /// (1&nbsp;&minus;&nbsp;2<sup>&minus;[`MANTISSA_DIGITS`]</sup>)&nbsp;2<sup>[`MAX_EXP`]</sup>.
+ ///
+ /// [`MANTISSA_DIGITS`]: f32::MANTISSA_DIGITS
+ /// [`MAX_EXP`]: f32::MAX_EXP
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
/// One greater than the minimum possible normal power of 2 exponent.
+ ///
+ /// If <i>x</i>&nbsp;=&nbsp;`MIN_EXP`, then normal numbers
+ /// ≥&nbsp;0.5&nbsp;×&nbsp;2<sup><i>x</i></sup>.
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MIN_EXP: i32 = -125;
/// Maximum possible power of 2 exponent.
+ ///
+ /// If <i>x</i>&nbsp;=&nbsp;`MAX_EXP`, then normal numbers
+ /// &lt;&nbsp;1&nbsp;×&nbsp;2<sup><i>x</i></sup>.
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MAX_EXP: i32 = 128;
- /// Minimum possible normal power of 10 exponent.
+ /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
+ ///
+ /// Equal to ceil(log<sub>10</sub>&nbsp;[`MIN_POSITIVE`]).
+ ///
+ /// [`MIN_POSITIVE`]: f32::MIN_POSITIVE
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MIN_10_EXP: i32 = -37;
- /// Maximum possible power of 10 exponent.
+ /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
+ ///
+ /// Equal to floor(log<sub>10</sub>&nbsp;[`MAX`]).
+ ///
+ /// [`MAX`]: f32::MAX
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MAX_10_EXP: i32 = 38;
@@ -820,7 +858,7 @@ impl f32 {
/// let angle = std::f32::consts::PI;
///
/// let abs_difference = (angle.to_degrees() - 180.0).abs();
- ///
+ /// # #[cfg(any(not(target_arch = "x86"), target_feature = "sse2"))]
/// assert!(abs_difference <= f32::EPSILON);
/// ```
#[must_use = "this returns the result of the operation, \
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index 7569d2cd6..0a87021d8 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -376,6 +376,13 @@ impl f64 {
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MANTISSA_DIGITS: u32 = 53;
/// Approximate number of significant digits in base 10.
+ ///
+ /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
+ /// significant digits can be converted to `f64` and back without loss.
+ ///
+ /// Equal to floor(log<sub>10</sub>&nbsp;2<sup>[`MANTISSA_DIGITS`]&nbsp;&minus;&nbsp;1</sup>).
+ ///
+ /// [`MANTISSA_DIGITS`]: f64::MANTISSA_DIGITS
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const DIGITS: u32 = 15;
@@ -383,31 +390,62 @@ impl f64 {
///
/// This is the difference between `1.0` and the next larger representable number.
///
+ /// Equal to 2<sup>1&nbsp;&minus;&nbsp;[`MANTISSA_DIGITS`]</sup>.
+ ///
/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+ /// [`MANTISSA_DIGITS`]: f64::MANTISSA_DIGITS
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const EPSILON: f64 = 2.2204460492503131e-16_f64;
/// Smallest finite `f64` value.
+ ///
+ /// Equal to &minus;[`MAX`].
+ ///
+ /// [`MAX`]: f64::MAX
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MIN: f64 = -1.7976931348623157e+308_f64;
/// Smallest positive normal `f64` value.
+ ///
+ /// Equal to 2<sup>[`MIN_EXP`]&nbsp;&minus;&nbsp;1</sup>.
+ ///
+ /// [`MIN_EXP`]: f64::MIN_EXP
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MIN_POSITIVE: f64 = 2.2250738585072014e-308_f64;
/// Largest finite `f64` value.
+ ///
+ /// Equal to
+ /// (1&nbsp;&minus;&nbsp;2<sup>&minus;[`MANTISSA_DIGITS`]</sup>)&nbsp;2<sup>[`MAX_EXP`]</sup>.
+ ///
+ /// [`MANTISSA_DIGITS`]: f64::MANTISSA_DIGITS
+ /// [`MAX_EXP`]: f64::MAX_EXP
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MAX: f64 = 1.7976931348623157e+308_f64;
/// One greater than the minimum possible normal power of 2 exponent.
+ ///
+ /// If <i>x</i>&nbsp;=&nbsp;`MIN_EXP`, then normal numbers
+ /// ≥&nbsp;0.5&nbsp;×&nbsp;2<sup><i>x</i></sup>.
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MIN_EXP: i32 = -1021;
/// Maximum possible power of 2 exponent.
+ ///
+ /// If <i>x</i>&nbsp;=&nbsp;`MAX_EXP`, then normal numbers
+ /// &lt;&nbsp;1&nbsp;×&nbsp;2<sup><i>x</i></sup>.
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MAX_EXP: i32 = 1024;
- /// Minimum possible normal power of 10 exponent.
+ /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
+ ///
+ /// Equal to ceil(log<sub>10</sub>&nbsp;[`MIN_POSITIVE`]).
+ ///
+ /// [`MIN_POSITIVE`]: f64::MIN_POSITIVE
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MIN_10_EXP: i32 = -307;
- /// Maximum possible power of 10 exponent.
+ /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
+ ///
+ /// Equal to floor(log<sub>10</sub>&nbsp;[`MAX`]).
+ ///
+ /// [`MAX`]: f64::MAX
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const MAX_10_EXP: i32 = 308;
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 3cbb55af3..fd01f1b26 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -471,7 +471,7 @@ macro_rules! int_impl {
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_math", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
@@ -539,7 +539,7 @@ macro_rules! int_impl {
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_math", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
@@ -607,7 +607,7 @@ macro_rules! int_impl {
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_math", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_mul(self, rhs: Self) -> Self {
@@ -740,6 +740,31 @@ macro_rules! int_impl {
if unlikely!(b) {None} else {Some(a)}
}
+ /// Unchecked negation. Computes `-self`, assuming overflow cannot occur.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior when
+ #[doc = concat!("`self == ", stringify!($SelfT), "::MIN`,")]
+ /// i.e. when [`checked_neg`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_neg`]: ", stringify!($SelfT), "::checked_neg")]
+ #[unstable(
+ feature = "unchecked_neg",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "unchecked_neg", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_neg(self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_neg`.
+ unsafe { intrinsics::unchecked_sub(0, self) }
+ }
+
/// Checked shift left. Computes `self << rhs`, returning `None` if `rhs` is larger
/// than or equal to the number of bits in `self`.
///
@@ -772,13 +797,13 @@ macro_rules! int_impl {
///
#[doc = concat!("[`checked_shl`]: ", stringify!($SelfT), "::checked_shl")]
#[unstable(
- feature = "unchecked_math",
+ feature = "unchecked_shifts",
reason = "niche optimization path",
issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_shifts", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
@@ -820,13 +845,13 @@ macro_rules! int_impl {
///
#[doc = concat!("[`checked_shr`]: ", stringify!($SelfT), "::checked_shr")]
#[unstable(
- feature = "unchecked_math",
+ feature = "unchecked_shifts",
reason = "niche optimization path",
issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_shifts", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
@@ -1404,7 +1429,7 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
- #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
+ #[rustc_allow_const_fn_unstable(unchecked_shifts)]
pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
@@ -1434,7 +1459,7 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
- #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
+ #[rustc_allow_const_fn_unstable(unchecked_shifts)]
pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index 8b127132c..2a0b31404 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -114,7 +114,7 @@ macro_rules! midpoint_impl {
without modifying the original"]
#[inline]
pub const fn midpoint(self, rhs: $SelfT) -> $SelfT {
- // Use the well known branchless algorthim from Hacker's Delight to compute
+ // Use the well known branchless algorithm from Hacker's Delight to compute
// `(a + b) / 2` without overflowing: `((a ^ b) >> 1) + (a & b)`.
((self ^ rhs) >> 1) + (self & rhs)
}
@@ -791,7 +791,7 @@ impl u8 {
#[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
#[inline]
pub const fn is_ascii_alphanumeric(&self) -> bool {
- matches!(*self, b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z')
+ matches!(*self, b'0'..=b'9') | matches!(*self, b'A'..=b'Z') | matches!(*self, b'a'..=b'z')
}
/// Checks if the value is an ASCII decimal digit:
@@ -894,7 +894,7 @@ impl u8 {
#[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
#[inline]
pub const fn is_ascii_hexdigit(&self) -> bool {
- matches!(*self, b'0'..=b'9' | b'A'..=b'F' | b'a'..=b'f')
+ matches!(*self, b'0'..=b'9') | matches!(*self, b'A'..=b'F') | matches!(*self, b'a'..=b'f')
}
/// Checks if the value is an ASCII punctuation character:
@@ -932,7 +932,10 @@ impl u8 {
#[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
#[inline]
pub const fn is_ascii_punctuation(&self) -> bool {
- matches!(*self, b'!'..=b'/' | b':'..=b'@' | b'['..=b'`' | b'{'..=b'~')
+ matches!(*self, b'!'..=b'/')
+ | matches!(*self, b':'..=b'@')
+ | matches!(*self, b'['..=b'`')
+ | matches!(*self, b'{'..=b'~')
}
/// Checks if the value is an ASCII graphic character:
diff --git a/library/core/src/num/saturating.rs b/library/core/src/num/saturating.rs
index d9ccc73c4..d040539eb 100644
--- a/library/core/src/num/saturating.rs
+++ b/library/core/src/num/saturating.rs
@@ -35,9 +35,7 @@ use crate::ops::{Sub, SubAssign};
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)]
#[repr(transparent)]
#[rustc_diagnostic_item = "Saturating"]
-pub struct Saturating<T>(
- #[stable(feature = "saturating_int_impl", since = "1.74.0")] pub T,
-);
+pub struct Saturating<T>(#[stable(feature = "saturating_int_impl", since = "1.74.0")] pub T);
#[stable(feature = "saturating_int_impl", since = "1.74.0")]
impl<T: fmt::Debug> fmt::Debug for Saturating<T> {
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index a9c5312a1..11a53aaf1 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -479,7 +479,7 @@ macro_rules! uint_impl {
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_math", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
@@ -548,7 +548,7 @@ macro_rules! uint_impl {
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_math", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
@@ -595,7 +595,7 @@ macro_rules! uint_impl {
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_math", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_mul(self, rhs: Self) -> Self {
@@ -926,13 +926,13 @@ macro_rules! uint_impl {
///
#[doc = concat!("[`checked_shl`]: ", stringify!($SelfT), "::checked_shl")]
#[unstable(
- feature = "unchecked_math",
+ feature = "unchecked_shifts",
reason = "niche optimization path",
issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_shifts", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
@@ -974,13 +974,13 @@ macro_rules! uint_impl {
///
#[doc = concat!("[`checked_shr`]: ", stringify!($SelfT), "::checked_shr")]
#[unstable(
- feature = "unchecked_math",
+ feature = "unchecked_shifts",
reason = "niche optimization path",
issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
- #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[rustc_const_unstable(feature = "unchecked_shifts", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
@@ -1418,7 +1418,7 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
- #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
+ #[rustc_allow_const_fn_unstable(unchecked_shifts)]
pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
@@ -1451,7 +1451,7 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
- #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
+ #[rustc_allow_const_fn_unstable(unchecked_shifts)]
pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
diff --git a/library/core/src/ops/coroutine.rs b/library/core/src/ops/coroutine.rs
new file mode 100644
index 000000000..cd5ca988f
--- /dev/null
+++ b/library/core/src/ops/coroutine.rs
@@ -0,0 +1,139 @@
+use crate::marker::Unpin;
+use crate::pin::Pin;
+
+/// The result of a coroutine resumption.
+///
+/// This enum is returned from the `Coroutine::resume` method and indicates the
+/// possible return values of a coroutine. Currently this corresponds to either
+/// a suspension point (`Yielded`) or a termination point (`Complete`).
+#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[cfg_attr(bootstrap, lang = "generator_state")]
+#[cfg_attr(not(bootstrap), lang = "coroutine_state")]
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+pub enum CoroutineState<Y, R> {
+ /// The coroutine suspended with a value.
+ ///
+ /// This state indicates that a coroutine has been suspended, and typically
+ /// corresponds to a `yield` statement. The value provided in this variant
+ /// corresponds to the expression passed to `yield` and allows coroutines to
+ /// provide a value each time they yield.
+ Yielded(Y),
+
+ /// The coroutine completed with a return value.
+ ///
+ /// This state indicates that a coroutine has finished execution with the
+ /// provided value. Once a coroutine has returned `Complete` it is
+ /// considered a programmer error to call `resume` again.
+ Complete(R),
+}
+
+/// The trait implemented by builtin coroutine types.
+///
+/// Coroutines are currently an
+/// experimental language feature in Rust. Added in [RFC 2033] coroutines are
+/// currently intended to primarily provide a building block for async/await
+/// syntax but will likely extend to also providing an ergonomic definition for
+/// iterators and other primitives.
+///
+/// The syntax and semantics for coroutines is unstable and will require a
+/// further RFC for stabilization. At this time, though, the syntax is
+/// closure-like:
+///
+/// ```rust
+/// #![cfg_attr(bootstrap, feature(generators))]
+/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(coroutine_trait)]
+///
+/// use std::ops::{Coroutine, CoroutineState};
+/// use std::pin::Pin;
+///
+/// fn main() {
+/// let mut coroutine = || {
+/// yield 1;
+/// "foo"
+/// };
+///
+/// match Pin::new(&mut coroutine).resume(()) {
+/// CoroutineState::Yielded(1) => {}
+/// _ => panic!("unexpected return from resume"),
+/// }
+/// match Pin::new(&mut coroutine).resume(()) {
+/// CoroutineState::Complete("foo") => {}
+/// _ => panic!("unexpected return from resume"),
+/// }
+/// }
+/// ```
+///
+/// More documentation of coroutines can be found in the [unstable book].
+///
+/// [RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033
+/// [unstable book]: ../../unstable-book/language-features/coroutines.html
+#[cfg_attr(bootstrap, lang = "generator")]
+#[cfg_attr(not(bootstrap), lang = "coroutine")]
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+#[fundamental]
+pub trait Coroutine<R = ()> {
+ /// The type of value this coroutine yields.
+ ///
+ /// This associated type corresponds to the `yield` expression and the
+ /// values which are allowed to be returned each time a coroutine yields.
+ /// For example an iterator-as-a-coroutine would likely have this type as
+ /// `T`, the type being iterated over.
+ type Yield;
+
+ /// The type of value this coroutine returns.
+ ///
+ /// This corresponds to the type returned from a coroutine either with a
+ /// `return` statement or implicitly as the last expression of a coroutine
+ /// literal. For example futures would use this as `Result<T, E>` as it
+ /// represents a completed future.
+ type Return;
+
+ /// Resumes the execution of this coroutine.
+ ///
+ /// This function will resume execution of the coroutine or start execution
+ /// if it hasn't already. This call will return back into the coroutine's
+ /// last suspension point, resuming execution from the latest `yield`. The
+ /// coroutine will continue executing until it either yields or returns, at
+ /// which point this function will return.
+ ///
+ /// # Return value
+ ///
+ /// The `CoroutineState` enum returned from this function indicates what
+ /// state the coroutine is in upon returning. If the `Yielded` variant is
+ /// returned then the coroutine has reached a suspension point and a value
+ /// has been yielded out. Coroutines in this state are available for
+ /// resumption at a later point.
+ ///
+ /// If `Complete` is returned then the coroutine has completely finished
+ /// with the value provided. It is invalid for the coroutine to be resumed
+ /// again.
+ ///
+ /// # Panics
+ ///
+ /// This function may panic if it is called after the `Complete` variant has
+ /// been returned previously. While coroutine literals in the language are
+ /// guaranteed to panic on resuming after `Complete`, this is not guaranteed
+ /// for all implementations of the `Coroutine` trait.
+ fn resume(self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return>;
+}
+
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+impl<G: ?Sized + Coroutine<R>, R> Coroutine<R> for Pin<&mut G> {
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
+ G::resume((*self).as_mut(), arg)
+ }
+}
+
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+impl<G: ?Sized + Coroutine<R> + Unpin, R> Coroutine<R> for &mut G {
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
+ G::resume(Pin::new(&mut *self), arg)
+ }
+}
diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs
index 911761c6e..99adbb915 100644
--- a/library/core/src/ops/deref.rs
+++ b/library/core/src/ops/deref.rs
@@ -3,40 +3,107 @@
/// In addition to being used for explicit dereferencing operations with the
/// (unary) `*` operator in immutable contexts, `Deref` is also used implicitly
/// by the compiler in many circumstances. This mechanism is called
-/// ['`Deref` coercion'][more]. In mutable contexts, [`DerefMut`] is used.
+/// ["`Deref` coercion"][coercion]. In mutable contexts, [`DerefMut`] is used and
+/// mutable deref coercion similarly occurs.
///
-/// Implementing `Deref` for smart pointers makes accessing the data behind them
-/// convenient, which is why they implement `Deref`. On the other hand, the
-/// rules regarding `Deref` and [`DerefMut`] were designed specifically to
-/// accommodate smart pointers. Because of this, **`Deref` should only be
-/// implemented for smart pointers** to avoid confusion.
+/// **Warning:** Deref coercion is a powerful language feature which has
+/// far-reaching implications for every type that implements `Deref`. The
+/// compiler will silently insert calls to `Deref::deref`. For this reason, one
+/// should be careful about implementing `Deref` and only do so when deref
+/// coercion is desirable. See [below][implementing] for advice on when this is
+/// typically desirable or undesirable.
///
-/// For similar reasons, **this trait should never fail**. Failure during
-/// dereferencing can be extremely confusing when `Deref` is invoked implicitly.
+/// Types that implement `Deref` or `DerefMut` are often called "smart
+/// pointers" and the mechanism of deref coercion has been specifically designed
+/// to facilitate the pointer-like behaviour that name suggests. Often, the
+/// purpose of a "smart pointer" type is to change the ownership semantics
+/// of a contained value (for example, [`Rc`][rc] or [`Cow`][cow]) or the
+/// storage semantics of a contained value (for example, [`Box`][box]).
///
-/// Violating these requirements is a logic error. The behavior resulting from a logic error is not
-/// specified, but users of the trait must ensure that such logic errors do *not* result in
-/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of this
-/// method.
+/// # Deref coercion
///
-/// # More on `Deref` coercion
+/// If `T` implements `Deref<Target = U>`, and `v` is a value of type `T`, then:
///
-/// If `T` implements `Deref<Target = U>`, and `x` is a value of type `T`, then:
-///
-/// * In immutable contexts, `*x` (where `T` is neither a reference nor a raw pointer)
-/// is equivalent to `*Deref::deref(&x)`.
+/// * In immutable contexts, `*v` (where `T` is neither a reference nor a raw
+/// pointer) is equivalent to `*Deref::deref(&v)`.
/// * Values of type `&T` are coerced to values of type `&U`
-/// * `T` implicitly implements all the (immutable) methods of the type `U`.
+/// * `T` implicitly implements all the methods of the type `U` which take the
+/// `&self` receiver.
///
/// For more details, visit [the chapter in *The Rust Programming Language*][book]
/// as well as the reference sections on [the dereference operator][ref-deref-op],
-/// [method resolution] and [type coercions].
+/// [method resolution], and [type coercions].
+///
+/// # When to implement `Deref` or `DerefMut`
+///
+/// The same advice applies to both deref traits. In general, deref traits
+/// **should** be implemented if:
+///
+/// 1. a value of the type transparently behaves like a value of the target
+/// type;
+/// 1. the implementation of the deref function is cheap; and
+/// 1. users of the type will not be surprised by any deref coercion behaviour.
+///
+/// In general, deref traits **should not** be implemented if:
+///
+/// 1. the deref implementations could fail unexpectedly; or
+/// 1. the type has methods that are likely to collide with methods on the
+/// target type; or
+/// 1. committing to deref coercion as part of the public API is not desirable.
+///
+/// Note that there's a large difference between implementing deref traits
+/// generically over many target types, and doing so only for specific target
+/// types.
+///
+/// Generic implementations, such as for [`Box<T>`][box] (which is generic over
+/// every type and dereferences to `T`) should be careful to provide few or no
+/// methods, since the target type is unknown and therefore every method could
+/// collide with one on the target type, causing confusion for users.
+/// `impl<T> Box<T>` has no methods (though several associated functions),
+/// partly for this reason.
+///
+/// Specific implementations, such as for [`String`][string] (whose `Deref`
+/// implementation has `Target = str`) can have many methods, since avoiding
+/// collision is much easier. `String` and `str` both have many methods, and
+/// `String` additionally behaves as if it has every method of `str` because of
+/// deref coercion. The implementing type may also be generic while the
+/// implementation is still specific in this sense; for example, [`Vec<T>`][vec]
+/// dereferences to `[T]`, so methods of `T` are not applicable.
+///
+/// Consider also that deref coericion means that deref traits are a much larger
+/// part of a type's public API than any other trait as it is implicitly called
+/// by the compiler. Therefore, it is advisable to consider whether this is
+/// something you are comfortable supporting as a public API.
+///
+/// The [`AsRef`] and [`Borrow`][core::borrow::Borrow] traits have very similar
+/// signatures to `Deref`. It may be desirable to implement either or both of
+/// these, whether in addition to or rather than deref traits. See their
+/// documentation for details.
+///
+/// # Fallibility
+///
+/// **This trait's method should never unexpectedly fail**. Deref coercion means
+/// the compiler will often insert calls to `Deref::deref` implicitly. Failure
+/// during dereferencing can be extremely confusing when `Deref` is invoked
+/// implicitly. In the majority of uses it should be infallible, though it may
+/// be acceptable to panic if the type is misused through programmer error, for
+/// example.
+///
+/// However, infallibility is not enforced and therefore not guaranteed.
+/// As such, `unsafe` code should not rely on infallibility in general for
+/// soundness.
///
/// [book]: ../../book/ch15-02-deref.html
-/// [more]: #more-on-deref-coercion
+/// [coercion]: #deref-coercion
+/// [implementing]: #when-to-implement-deref-or-derefmut
/// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator
/// [method resolution]: ../../reference/expressions/method-call-expr.html
/// [type coercions]: ../../reference/type-coercions.html
+/// [box]: ../../alloc/boxed/struct.Box.html
+/// [string]: ../../alloc/string/struct.String.html
+/// [vec]: ../../alloc/vec/struct.Vec.html
+/// [rc]: ../../alloc/rc/struct.Rc.html
+/// [cow]: ../../alloc/borrow/enum.Cow.html
///
/// # Examples
///
@@ -107,30 +174,29 @@ impl<T: ?Sized> Deref for &mut T {
/// In addition to being used for explicit dereferencing operations with the
/// (unary) `*` operator in mutable contexts, `DerefMut` is also used implicitly
/// by the compiler in many circumstances. This mechanism is called
-/// ['`Deref` coercion'][more]. In immutable contexts, [`Deref`] is used.
-///
-/// Implementing `DerefMut` for smart pointers makes mutating the data behind
-/// them convenient, which is why they implement `DerefMut`. On the other hand,
-/// the rules regarding [`Deref`] and `DerefMut` were designed specifically to
-/// accommodate smart pointers. Because of this, **`DerefMut` should only be
-/// implemented for smart pointers** to avoid confusion.
+/// ["mutable deref coercion"][coercion]. In immutable contexts, [`Deref`] is used.
///
-/// For similar reasons, **this trait should never fail**. Failure during
-/// dereferencing can be extremely confusing when `DerefMut` is invoked
-/// implicitly.
+/// **Warning:** Deref coercion is a powerful language feature which has
+/// far-reaching implications for every type that implements `DerefMut`. The
+/// compiler will silently insert calls to `DerefMut::deref_mut`. For this
+/// reason, one should be careful about implementing `DerefMut` and only do so
+/// when mutable deref coercion is desirable. See [the `Deref` docs][implementing]
+/// for advice on when this is typically desirable or undesirable.
///
-/// Violating these requirements is a logic error. The behavior resulting from a logic error is not
-/// specified, but users of the trait must ensure that such logic errors do *not* result in
-/// undefined behavior. This means that `unsafe` code **must not** rely on the correctness of this
-/// method.
+/// Types that implement `DerefMut` or `Deref` are often called "smart
+/// pointers" and the mechanism of deref coercion has been specifically designed
+/// to facilitate the pointer-like behaviour that name suggests. Often, the
+/// purpose of a "smart pointer" type is to change the ownership semantics
+/// of a contained value (for example, [`Rc`][rc] or [`Cow`][cow]) or the
+/// storage semantics of a contained value (for example, [`Box`][box]).
///
-/// # More on `Deref` coercion
+/// # Mutable deref coercion
///
-/// If `T` implements `DerefMut<Target = U>`, and `x` is a value of type `T`,
+/// If `T` implements `DerefMut<Target = U>`, and `v` is a value of type `T`,
/// then:
///
-/// * In mutable contexts, `*x` (where `T` is neither a reference nor a raw pointer)
-/// is equivalent to `*DerefMut::deref_mut(&mut x)`.
+/// * In mutable contexts, `*v` (where `T` is neither a reference nor a raw pointer)
+/// is equivalent to `*DerefMut::deref_mut(&mut v)`.
/// * Values of type `&mut T` are coerced to values of type `&mut U`
/// * `T` implicitly implements all the (mutable) methods of the type `U`.
///
@@ -138,11 +204,29 @@ impl<T: ?Sized> Deref for &mut T {
/// as well as the reference sections on [the dereference operator][ref-deref-op],
/// [method resolution] and [type coercions].
///
+/// # Fallibility
+///
+/// **This trait's method should never unexpectedly fail**. Deref coercion means
+/// the compiler will often insert calls to `DerefMut::deref_mut` implicitly.
+/// Failure during dereferencing can be extremely confusing when `DerefMut` is
+/// invoked implicitly. In the majority of uses it should be infallible, though
+/// it may be acceptable to panic if the type is misused through programmer
+/// error, for example.
+///
+/// However, infallibility is not enforced and therefore not guaranteed.
+/// As such, `unsafe` code should not rely on infallibility in general for
+/// soundness.
+///
/// [book]: ../../book/ch15-02-deref.html
-/// [more]: #more-on-deref-coercion
+/// [coercion]: #mutable-deref-coercion
+/// [implementing]: Deref#when-to-implement-deref-or-derefmut
/// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator
/// [method resolution]: ../../reference/expressions/method-call-expr.html
/// [type coercions]: ../../reference/type-coercions.html
+/// [box]: ../../alloc/boxed/struct.Box.html
+/// [string]: ../../alloc/string/struct.String.html
+/// [rc]: ../../alloc/rc/struct.Rc.html
+/// [cow]: ../../alloc/borrow/enum.Cow.html
///
/// # Examples
///
@@ -180,6 +264,7 @@ impl<T: ?Sized> Deref for &mut T {
pub trait DerefMut: Deref {
/// Mutably dereferences the value.
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "deref_mut_method"]
fn deref_mut(&mut self) -> &mut Self::Target;
}
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index 20f0bba4c..51e304dd7 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -56,7 +56,7 @@ use crate::marker::Tuple;
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
+#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -67,9 +67,9 @@ use crate::marker::Tuple;
// SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
),
- message = "expected a `{Fn}<{Args}>` closure, found `{Self}`",
- label = "expected an `Fn<{Args}>` closure, found `{Self}`"
-)]
+ message = "expected a `{Trait}` closure, found `{Self}`",
+ label = "expected an `{Trait}` closure, found `{Self}`"
+))]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
@@ -143,7 +143,7 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
+#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -154,9 +154,9 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
// SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
),
- message = "expected a `{FnMut}<{Args}>` closure, found `{Self}`",
- label = "expected an `FnMut<{Args}>` closure, found `{Self}`"
-)]
+ message = "expected a `{Trait}` closure, found `{Self}`",
+ label = "expected an `{Trait}` closure, found `{Self}`"
+))]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
@@ -222,7 +222,7 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
+#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -233,9 +233,9 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
// SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
),
- message = "expected a `{FnOnce}<{Args}>` closure, found `{Self}`",
- label = "expected an `FnOnce<{Args}>` closure, found `{Self}`"
-)]
+ message = "expected a `{Trait}` closure, found `{Self}`",
+ label = "expected an `{Trait}` closure, found `{Self}`"
+))]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
diff --git a/library/core/src/ops/generator.rs b/library/core/src/ops/generator.rs
deleted file mode 100644
index fee4beb1e..000000000
--- a/library/core/src/ops/generator.rs
+++ /dev/null
@@ -1,135 +0,0 @@
-use crate::marker::Unpin;
-use crate::pin::Pin;
-
-/// The result of a generator resumption.
-///
-/// This enum is returned from the `Generator::resume` method and indicates the
-/// possible return values of a generator. Currently this corresponds to either
-/// a suspension point (`Yielded`) or a termination point (`Complete`).
-#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
-#[lang = "generator_state"]
-#[unstable(feature = "generator_trait", issue = "43122")]
-pub enum GeneratorState<Y, R> {
- /// The generator suspended with a value.
- ///
- /// This state indicates that a generator has been suspended, and typically
- /// corresponds to a `yield` statement. The value provided in this variant
- /// corresponds to the expression passed to `yield` and allows generators to
- /// provide a value each time they yield.
- Yielded(Y),
-
- /// The generator completed with a return value.
- ///
- /// This state indicates that a generator has finished execution with the
- /// provided value. Once a generator has returned `Complete` it is
- /// considered a programmer error to call `resume` again.
- Complete(R),
-}
-
-/// The trait implemented by builtin generator types.
-///
-/// Generators, also commonly referred to as coroutines, are currently an
-/// experimental language feature in Rust. Added in [RFC 2033] generators are
-/// currently intended to primarily provide a building block for async/await
-/// syntax but will likely extend to also providing an ergonomic definition for
-/// iterators and other primitives.
-///
-/// The syntax and semantics for generators is unstable and will require a
-/// further RFC for stabilization. At this time, though, the syntax is
-/// closure-like:
-///
-/// ```rust
-/// #![feature(generators, generator_trait)]
-///
-/// use std::ops::{Generator, GeneratorState};
-/// use std::pin::Pin;
-///
-/// fn main() {
-/// let mut generator = || {
-/// yield 1;
-/// "foo"
-/// };
-///
-/// match Pin::new(&mut generator).resume(()) {
-/// GeneratorState::Yielded(1) => {}
-/// _ => panic!("unexpected return from resume"),
-/// }
-/// match Pin::new(&mut generator).resume(()) {
-/// GeneratorState::Complete("foo") => {}
-/// _ => panic!("unexpected return from resume"),
-/// }
-/// }
-/// ```
-///
-/// More documentation of generators can be found in the [unstable book].
-///
-/// [RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033
-/// [unstable book]: ../../unstable-book/language-features/generators.html
-#[lang = "generator"]
-#[unstable(feature = "generator_trait", issue = "43122")]
-#[fundamental]
-pub trait Generator<R = ()> {
- /// The type of value this generator yields.
- ///
- /// This associated type corresponds to the `yield` expression and the
- /// values which are allowed to be returned each time a generator yields.
- /// For example an iterator-as-a-generator would likely have this type as
- /// `T`, the type being iterated over.
- type Yield;
-
- /// The type of value this generator returns.
- ///
- /// This corresponds to the type returned from a generator either with a
- /// `return` statement or implicitly as the last expression of a generator
- /// literal. For example futures would use this as `Result<T, E>` as it
- /// represents a completed future.
- type Return;
-
- /// Resumes the execution of this generator.
- ///
- /// This function will resume execution of the generator or start execution
- /// if it hasn't already. This call will return back into the generator's
- /// last suspension point, resuming execution from the latest `yield`. The
- /// generator will continue executing until it either yields or returns, at
- /// which point this function will return.
- ///
- /// # Return value
- ///
- /// The `GeneratorState` enum returned from this function indicates what
- /// state the generator is in upon returning. If the `Yielded` variant is
- /// returned then the generator has reached a suspension point and a value
- /// has been yielded out. Generators in this state are available for
- /// resumption at a later point.
- ///
- /// If `Complete` is returned then the generator has completely finished
- /// with the value provided. It is invalid for the generator to be resumed
- /// again.
- ///
- /// # Panics
- ///
- /// This function may panic if it is called after the `Complete` variant has
- /// been returned previously. While generator literals in the language are
- /// guaranteed to panic on resuming after `Complete`, this is not guaranteed
- /// for all implementations of the `Generator` trait.
- fn resume(self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return>;
-}
-
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator<R>, R> Generator<R> for Pin<&mut G> {
- type Yield = G::Yield;
- type Return = G::Return;
-
- fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
- G::resume((*self).as_mut(), arg)
- }
-}
-
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator<R> + Unpin, R> Generator<R> for &mut G {
- type Yield = G::Yield;
- type Return = G::Return;
-
- fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
- G::resume(Pin::new(&mut *self), arg)
- }
-}
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
index f4649be54..6ceee4637 100644
--- a/library/core/src/ops/index.rs
+++ b/library/core/src/ops/index.rs
@@ -153,7 +153,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind
see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
),
on(
- any(_Self = "alloc::string::String", _Self = "std::string::String"),
+ _Self = "alloc::string::String",
note = "you can use `.chars().nth()` or `.bytes().nth()`
see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
),
diff --git a/library/core/src/ops/mod.rs b/library/core/src/ops/mod.rs
index 97d9b750d..35654d0b8 100644
--- a/library/core/src/ops/mod.rs
+++ b/library/core/src/ops/mod.rs
@@ -8,8 +8,8 @@
//! trait, but since the assignment operator (`=`) has no backing trait, there
//! is no way of overloading its semantics. Additionally, this module does not
//! provide any mechanism to create new operators. If traitless overloading or
-//! custom operators are required, you should look toward macros or compiler
-//! plugins to extend Rust's syntax.
+//! custom operators are required, you should look toward macros to extend
+//! Rust's syntax.
//!
//! Implementations of operator traits should be unsurprising in their
//! respective contexts, keeping in mind their usual meanings and
@@ -141,10 +141,10 @@
mod arith;
mod bit;
mod control_flow;
+mod coroutine;
mod deref;
mod drop;
mod function;
-mod generator;
mod index;
mod index_range;
mod range;
@@ -198,8 +198,8 @@ pub use self::try_trait::Residual;
pub(crate) use self::try_trait::{ChangeOutputType, NeverShortCircuit};
-#[unstable(feature = "generator_trait", issue = "43122")]
-pub use self::generator::{Generator, GeneratorState};
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+pub use self::coroutine::{Coroutine, CoroutineState};
#[unstable(feature = "coerce_unsized", issue = "18598")]
pub use self::unsize::CoerceUnsized;
diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs
index cc596293c..b419a738f 100644
--- a/library/core/src/ops/range.rs
+++ b/library/core/src/ops/range.rs
@@ -115,6 +115,7 @@ impl<Idx: PartialOrd<Idx>> Range<Idx> {
/// assert!(!(0.0..f32::NAN).contains(&0.5));
/// assert!(!(f32::NAN..1.0).contains(&0.5));
/// ```
+ #[inline]
#[stable(feature = "range_contains", since = "1.35.0")]
pub fn contains<U>(&self, item: &U) -> bool
where
@@ -141,6 +142,7 @@ impl<Idx: PartialOrd<Idx>> Range<Idx> {
/// assert!( (3.0..f32::NAN).is_empty());
/// assert!( (f32::NAN..5.0).is_empty());
/// ```
+ #[inline]
#[stable(feature = "range_is_empty", since = "1.47.0")]
pub fn is_empty(&self) -> bool {
!(self.start < self.end)
@@ -213,6 +215,7 @@ impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> {
/// assert!(!(0.0..).contains(&f32::NAN));
/// assert!(!(f32::NAN..).contains(&0.5));
/// ```
+ #[inline]
#[stable(feature = "range_contains", since = "1.35.0")]
pub fn contains<U>(&self, item: &U) -> bool
where
@@ -294,6 +297,7 @@ impl<Idx: PartialOrd<Idx>> RangeTo<Idx> {
/// assert!(!(..1.0).contains(&f32::NAN));
/// assert!(!(..f32::NAN).contains(&0.5));
/// ```
+ #[inline]
#[stable(feature = "range_contains", since = "1.35.0")]
pub fn contains<U>(&self, item: &U) -> bool
where
@@ -500,6 +504,7 @@ impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> {
/// // Precise field values are unspecified here
/// assert!(!r.contains(&3) && !r.contains(&5));
/// ```
+ #[inline]
#[stable(feature = "range_contains", since = "1.35.0")]
pub fn contains<U>(&self, item: &U) -> bool
where
@@ -613,6 +618,7 @@ impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> {
/// assert!(!(..=1.0).contains(&f32::NAN));
/// assert!(!(..=f32::NAN).contains(&0.5));
/// ```
+ #[inline]
#[stable(feature = "range_contains", since = "1.35.0")]
pub fn contains<U>(&self, item: &U) -> bool
where
@@ -758,6 +764,7 @@ impl<T: Clone> Bound<&T> {
/// `RangeBounds` is implemented by Rust's built-in range types, produced
/// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`.
#[stable(feature = "collections_range", since = "1.28.0")]
+#[rustc_diagnostic_item = "RangeBounds"]
pub trait RangeBounds<T: ?Sized> {
/// Start index bound.
///
@@ -807,6 +814,7 @@ pub trait RangeBounds<T: ?Sized> {
/// assert!(!(0.0..1.0).contains(&f32::NAN));
/// assert!(!(0.0..f32::NAN).contains(&0.5));
/// assert!(!(f32::NAN..1.0).contains(&0.5));
+ #[inline]
#[stable(feature = "range_contains", since = "1.35.0")]
fn contains<U>(&self, item: &U) -> bool
where
diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs
index 17625dacc..3f8c8efd4 100644
--- a/library/core/src/ops/try_trait.rs
+++ b/library/core/src/ops/try_trait.rs
@@ -226,14 +226,8 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- any(
- _Self = "core::result::Result<T, E>",
- _Self = "std::result::Result<T, E>",
- ),
- any(
- R = "core::option::Option<core::convert::Infallible>",
- R = "std::option::Option<std::convert::Infallible>",
- )
+ _Self = "core::result::Result<T, E>",
+ R = "core::option::Option<core::convert::Infallible>",
),
message = "the `?` operator can only be used on `Result`s, not `Option`s, \
in {ItemContext} that returns `Result`",
@@ -243,10 +237,7 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- any(
- _Self = "core::result::Result<T, E>",
- _Self = "std::result::Result<T, E>",
- )
+ _Self = "core::result::Result<T, E>",
),
// There's a special error message in the trait selection code for
// `From` in `?`, so this is not shown for result-in-result errors,
@@ -259,14 +250,8 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- any(
- _Self = "core::option::Option<T>",
- _Self = "std::option::Option<T>",
- ),
- any(
- R = "core::result::Result<T, E>",
- R = "std::result::Result<T, E>",
- )
+ _Self = "core::option::Option<T>",
+ R = "core::result::Result<T, E>",
),
message = "the `?` operator can only be used on `Option`s, not `Result`s, \
in {ItemContext} that returns `Option`",
@@ -276,10 +261,7 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- any(
- _Self = "core::option::Option<T>",
- _Self = "std::option::Option<T>",
- )
+ _Self = "core::option::Option<T>",
),
// `Option`-in-`Option` always works, as there's only one possible
// residual, so this can also be phrased strongly.
@@ -291,14 +273,8 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- any(
- _Self = "core::ops::ControlFlow<B, C>",
- _Self = "std::ops::ControlFlow<B, C>",
- ),
- any(
- R = "core::ops::ControlFlow<B, C>",
- R = "std::ops::ControlFlow<B, C>",
- )
+ _Self = "core::ops::control_flow::ControlFlow<B, C>",
+ R = "core::ops::control_flow::ControlFlow<B, C>",
),
message = "the `?` operator in {ItemContext} that returns `ControlFlow<B, _>` \
can only be used on other `ControlFlow<B, _>`s (with the same Break type)",
@@ -309,10 +285,7 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- any(
- _Self = "core::ops::ControlFlow<B, C>",
- _Self = "std::ops::ControlFlow<B, C>",
- )
+ _Self = "core::ops::control_flow::ControlFlow<B, C>",
// `R` is not a `ControlFlow`, as that case was matched previously
),
message = "the `?` operator can only be used on `ControlFlow`s \
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index f2909a81d..89d4532de 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -119,15 +119,21 @@
//! # Representation
//!
//! Rust guarantees to optimize the following types `T` such that
-//! [`Option<T>`] has the same size and alignment as `T`:
-//!
-//! * [`Box<U>`]
-//! * `&U`
-//! * `&mut U`
-//! * `fn`, `extern "C" fn`[^extern_fn]
-//! * [`num::NonZero*`]
-//! * [`ptr::NonNull<U>`]
-//! * `#[repr(transparent)]` struct around one of the types in this list.
+//! [`Option<T>`] has the same size and alignment as `T`. In some
+//! of these cases, Rust further guarantees that
+//! `transmute::<_, Option<T>>([0u8; size_of::<T>()])` is sound and
+//! produces `Option::<T>::None`. These cases are identified by the
+//! second column:
+//!
+//! | `T` | `transmute::<_, Option<T>>([0u8; size_of::<T>()])` sound? |
+//! |---------------------------------------------------------------------|----------------------------------------------------------------------|
+//! | [`Box<U>`] | when `U: Sized` |
+//! | `&U` | when `U: Sized` |
+//! | `&mut U` | when `U: Sized` |
+//! | `fn`, `extern "C" fn`[^extern_fn] | always |
+//! | [`num::NonZero*`] | always |
+//! | [`ptr::NonNull<U>`] | when `U: Sized` |
+//! | `#[repr(transparent)]` struct around one of the types in this list. | when it holds for the inner type |
//!
//! [^extern_fn]: this remains true for any other ABI: `extern "abi" fn` (_e.g._, `extern "system" fn`)
//!
@@ -743,8 +749,6 @@ impl<T> Option<T> {
/// # Examples
///
/// ```rust
- /// #![feature(option_as_slice)]
- ///
/// assert_eq!(
/// [Some(1234).as_slice(), None.as_slice()],
/// [&[1234][..], &[][..]],
@@ -755,15 +759,13 @@ impl<T> Option<T> {
/// borrowing) [`[_]::first`](slice::first):
///
/// ```rust
- /// #![feature(option_as_slice)]
- ///
/// for i in [Some(1234_u16), None] {
/// assert_eq!(i.as_ref(), i.as_slice().first());
/// }
/// ```
#[inline]
#[must_use]
- #[unstable(feature = "option_as_slice", issue = "108545")]
+ #[stable(feature = "option_as_slice", since = "1.75.0")]
pub fn as_slice(&self) -> &[T] {
// SAFETY: When the `Option` is `Some`, we're using the actual pointer
// to the payload, with a length of 1, so this is equivalent to
@@ -794,8 +796,6 @@ impl<T> Option<T> {
/// # Examples
///
/// ```rust
- /// #![feature(option_as_slice)]
- ///
/// assert_eq!(
/// [Some(1234).as_mut_slice(), None.as_mut_slice()],
/// [&mut [1234][..], &mut [][..]],
@@ -806,8 +806,6 @@ impl<T> Option<T> {
/// our original `Option`:
///
/// ```rust
- /// #![feature(option_as_slice)]
- ///
/// let mut x = Some(1234);
/// x.as_mut_slice()[0] += 1;
/// assert_eq!(x, Some(1235));
@@ -817,13 +815,11 @@ impl<T> Option<T> {
/// is [`[_]::first_mut`](slice::first_mut):
///
/// ```rust
- /// #![feature(option_as_slice)]
- ///
/// assert_eq!(Some(123).as_mut_slice().first_mut(), Some(&mut 123))
/// ```
#[inline]
#[must_use]
- #[unstable(feature = "option_as_slice", issue = "108545")]
+ #[stable(feature = "option_as_slice", since = "1.75.0")]
pub fn as_mut_slice(&mut self) -> &mut [T] {
// SAFETY: When the `Option` is `Some`, we're using the actual pointer
// to the payload, with a length of 1, so this is equivalent to
@@ -969,6 +965,7 @@ impl<T> Option<T> {
/// assert_eq!(None.unwrap_or_else(|| 2 * k), 20);
/// ```
#[inline]
+ #[track_caller]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or_else<F>(self, f: F) -> T
where
@@ -1485,7 +1482,7 @@ impl<T> Option<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or(self, optb: Option<T>) -> Option<T> {
match self {
- Some(x) => Some(x),
+ x @ Some(_) => x,
None => optb,
}
}
@@ -1510,7 +1507,7 @@ impl<T> Option<T> {
F: FnOnce() -> Option<T>,
{
match self {
- Some(x) => Some(x),
+ x @ Some(_) => x,
None => f(),
}
}
@@ -1540,8 +1537,8 @@ impl<T> Option<T> {
#[stable(feature = "option_xor", since = "1.37.0")]
pub fn xor(self, optb: Option<T>) -> Option<T> {
match (self, optb) {
- (Some(a), None) => Some(a),
- (None, Some(b)) => Some(b),
+ (a @ Some(_), None) => a,
+ (None, b @ Some(_)) => b,
_ => None,
}
}
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
index 386f5fcbd..a00fd322b 100644
--- a/library/core/src/panic.rs
+++ b/library/core/src/panic.rs
@@ -47,6 +47,7 @@ pub macro panic_2015 {
#[allow_internal_unstable(core_panic, const_format_args)]
#[rustc_diagnostic_item = "core_panic_2021_macro"]
#[rustc_macro_transparency = "semitransparent"]
+#[cfg(any(bootstrap, feature = "panic_immediate_abort"))]
pub macro panic_2021 {
() => (
$crate::panicking::panic("explicit panic")
@@ -63,6 +64,50 @@ pub macro panic_2021 {
}
#[doc(hidden)]
+#[unstable(feature = "edition_panic", issue = "none", reason = "use panic!() instead")]
+#[allow_internal_unstable(
+ core_panic,
+ core_intrinsics,
+ const_dispatch,
+ const_eval_select,
+ const_format_args,
+ rustc_attrs
+)]
+#[rustc_diagnostic_item = "core_panic_2021_macro"]
+#[rustc_macro_transparency = "semitransparent"]
+#[cfg(not(any(bootstrap, feature = "panic_immediate_abort")))]
+pub macro panic_2021 {
+ () => ({
+ // Create a function so that the argument for `track_caller`
+ // can be moved inside if possible.
+ #[cold]
+ #[track_caller]
+ #[inline(never)]
+ const fn panic_cold_explicit() -> ! {
+ $crate::panicking::panic_explicit()
+ }
+ panic_cold_explicit();
+ }),
+ // Special-case the single-argument case for const_panic.
+ ("{}", $arg:expr $(,)?) => ({
+ #[cold]
+ #[track_caller]
+ #[inline(never)]
+ #[rustc_const_panic_str] // enforce a &&str argument in const-check and hook this by const-eval
+ #[rustc_do_not_const_check] // hooked by const-eval
+ const fn panic_cold_display<T: $crate::fmt::Display>(arg: &T) -> ! {
+ $crate::panicking::panic_display(arg)
+ }
+ panic_cold_display(&$arg);
+ }),
+ ($($t:tt)+) => ({
+ // Semicolon to prevent temporaries inside the formatting machinery from
+ // being considered alive in the caller after the panic_fmt call.
+ $crate::panicking::panic_fmt($crate::const_format_args!($($t)+));
+ }),
+}
+
+#[doc(hidden)]
#[unstable(feature = "edition_panic", issue = "none", reason = "use unreachable!() instead")]
#[allow_internal_unstable(core_panic)]
#[rustc_diagnostic_item = "unreachable_2015_macro"]
diff --git a/library/core/src/panic/unwind_safe.rs b/library/core/src/panic/unwind_safe.rs
index 7e7b6b4db..6a53909a8 100644
--- a/library/core/src/panic/unwind_safe.rs
+++ b/library/core/src/panic/unwind_safe.rs
@@ -267,6 +267,7 @@ impl<T> DerefMut for AssertUnwindSafe<T> {
impl<R, F: FnOnce() -> R> FnOnce<()> for AssertUnwindSafe<F> {
type Output = R;
+ #[inline]
extern "rust-call" fn call_once(self, _args: ()) -> R {
(self.0)()
}
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index e6cdffd96..39a5e8d9f 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -152,6 +152,14 @@ pub const fn panic_str(expr: &str) -> ! {
panic_display(&expr);
}
+#[track_caller]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_explicit() -> ! {
+ panic_display(&"explicit panic");
+}
+
#[inline]
#[track_caller]
#[rustc_diagnostic_item = "unreachable_display"] // needed for `non-fmt-panics` lint
@@ -161,8 +169,10 @@ pub fn unreachable_display<T: fmt::Display>(x: &T) -> ! {
#[inline]
#[track_caller]
-#[lang = "panic_display"] // needed for const-evaluated panics
#[rustc_do_not_const_check] // hooked by const-eval
+#[cfg_attr(bootstrap, lang = "panic_display")]
+// enforce a &&str argument in const-check and hook this by const-eval
+#[cfg_attr(not(bootstrap), rustc_const_panic_str)]
#[rustc_const_unstable(feature = "core_panic", issue = "none")]
pub const fn panic_display<T: fmt::Display>(x: &T) -> ! {
panic_fmt(format_args!("{}", *x));
@@ -219,7 +229,6 @@ fn panic_cannot_unwind() -> ! {
/// pass to `panic_nounwind`.
/// This function is called directly by the codegen backend, and must not have
/// any extra arguments (including those synthesized by track_caller).
-#[cfg(not(bootstrap))]
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[lang = "panic_in_cleanup"] // needed by codegen for panic in nounwind function
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index 94c682b61..bca97d4ee 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -1085,17 +1085,19 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// # assert_eq!(42, block_on(async { 42 }));
/// ```
///
-/// ### With `Generator`s
+/// ### With `Coroutine`s
///
/// ```rust
-/// #![feature(generators, generator_trait)]
+/// #![cfg_attr(bootstrap, feature(generators))]
+/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(coroutine_trait)]
/// use core::{
-/// ops::{Generator, GeneratorState},
+/// ops::{Coroutine, CoroutineState},
/// pin::pin,
/// };
///
-/// fn generator_fn() -> impl Generator<Yield = usize, Return = ()> /* not Unpin */ {
-/// // Allow generator to be self-referential (not `Unpin`)
+/// fn coroutine_fn() -> impl Coroutine<Yield = usize, Return = ()> /* not Unpin */ {
+/// // Allow coroutine to be self-referential (not `Unpin`)
/// // vvvvvv so that locals can cross yield points.
/// static || {
/// let foo = String::from("foo");
@@ -1107,18 +1109,18 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// }
///
/// fn main() {
-/// let mut generator = pin!(generator_fn());
-/// match generator.as_mut().resume(()) {
-/// GeneratorState::Yielded(0) => {},
+/// let mut coroutine = pin!(coroutine_fn());
+/// match coroutine.as_mut().resume(()) {
+/// CoroutineState::Yielded(0) => {},
/// _ => unreachable!(),
/// }
-/// match generator.as_mut().resume(()) {
-/// GeneratorState::Yielded(3) => {},
+/// match coroutine.as_mut().resume(()) {
+/// CoroutineState::Yielded(3) => {},
/// _ => unreachable!(),
/// }
-/// match generator.resume(()) {
-/// GeneratorState::Yielded(_) => unreachable!(),
-/// GeneratorState::Complete(()) => {},
+/// match coroutine.resume(()) {
+/// CoroutineState::Yielded(_) => unreachable!(),
+/// CoroutineState::Complete(()) => {},
/// }
/// }
/// ```
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index fd5fe5a04..87e492108 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -283,7 +283,7 @@ mod prim_never {}
/// `char` type. For technical reasons, there is additional, separate
/// documentation in [the `std::char` module](char/index.html) as well.
///
-/// # Validity
+/// # Validity and Layout
///
/// A `char` is a '[Unicode scalar value]', which is any '[Unicode code point]'
/// other than a [surrogate code point]. This has a fixed numerical definition:
@@ -291,7 +291,7 @@ mod prim_never {}
/// Surrogate code points, used by UTF-16, are in the range 0xD800 to 0xDFFF.
///
/// No `char` may be constructed, whether as a literal or at runtime, that is not a
-/// Unicode scalar value:
+/// Unicode scalar value. Violating this rule causes undefined behavior.
///
/// ```compile_fail
/// // Each of these is a compiler error
@@ -308,9 +308,10 @@ mod prim_never {}
/// let _ = unsafe { char::from_u32_unchecked(0x110000) };
/// ```
///
-/// USVs are also the exact set of values that may be encoded in UTF-8. Because
-/// `char` values are USVs and `str` values are valid UTF-8, it is safe to store
-/// any `char` in a `str` or read any character from a `str` as a `char`.
+/// Unicode scalar values are also the exact set of values that may be encoded in UTF-8. Because
+/// `char` values are Unicode scalar values and functions may assume [incoming `str` values are
+/// valid UTF-8](primitive.str.html#invariant), it is safe to store any `char` in a `str` or read
+/// any character from a `str` as a `char`.
///
/// The gap in valid `char` values is understood by the compiler, so in the
/// below example the two ranges are understood to cover the whole range of
@@ -324,11 +325,17 @@ mod prim_never {}
/// };
/// ```
///
-/// All USVs are valid `char` values, but not all of them represent a real
-/// character. Many USVs are not currently assigned to a character, but may be
-/// in the future ("reserved"); some will never be a character
-/// ("noncharacters"); and some may be given different meanings by different
-/// users ("private use").
+/// All Unicode scalar values are valid `char` values, but not all of them represent a real
+/// character. Many Unicode scalar values are not currently assigned to a character, but may be in
+/// the future ("reserved"); some will never be a character ("noncharacters"); and some may be given
+/// different meanings by different users ("private use").
+///
+/// `char` is guaranteed to have the same size and alignment as `u32` on all
+/// platforms.
+/// ```
+/// use std::alloc::Layout;
+/// assert_eq!(Layout::new::<char>(), Layout::new::<u32>());
+/// ```
///
/// [Unicode code point]: https://www.unicode.org/glossary/#code_point
/// [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
@@ -887,8 +894,6 @@ mod prim_slice {}
/// type. It is usually seen in its borrowed form, `&str`. It is also the type
/// of string literals, `&'static str`.
///
-/// String slices are always valid UTF-8.
-///
/// # Basic Usage
///
/// String literals are string slices:
@@ -942,6 +947,14 @@ mod prim_slice {}
/// Note: This example shows the internals of `&str`. `unsafe` should not be
/// used to get a string slice under normal circumstances. Use `as_str`
/// instead.
+///
+/// # Invariant
+///
+/// Rust libraries may assume that string slices are always valid UTF-8.
+///
+/// Constructing a non-UTF-8 string slice is not immediate undefined behavior, but any function
+/// called on a string slice may assume that it is valid UTF-8, which means that a non-UTF-8 string
+/// slice can lead to undefined behavior down the road.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_str {}
@@ -1077,26 +1090,6 @@ mod prim_tuple {}
#[doc(hidden)]
impl<T> (T,) {}
-// Fake impl that's only really used for docs.
-#[cfg(doc)]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(fake_variadic)]
-/// This trait is implemented on arbitrary-length tuples.
-impl<T: Clone> Clone for (T,) {
- fn clone(&self) -> Self {
- loop {}
- }
-}
-
-// Fake impl that's only really used for docs.
-#[cfg(doc)]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[doc(fake_variadic)]
-/// This trait is implemented on arbitrary-length tuples.
-impl<T: Copy> Copy for (T,) {
- // empty
-}
-
#[rustc_doc_primitive = "f32"]
/// A 32-bit floating point type (specifically, the "binary32" type defined in IEEE 754-2008).
///
@@ -1142,10 +1135,9 @@ impl<T: Copy> Copy for (T,) {
/// surprising results upon inspecting the bit patterns,
/// as the same calculations might produce NaNs with different bit patterns.
///
-/// When the number resulting from a primitive operation (addition,
-/// subtraction, multiplication, or division) on this type is not exactly
-/// representable as `f32`, it is rounded according to the roundTiesToEven
-/// direction defined in IEEE 754-2008. That means:
+/// When a primitive operation (addition, subtraction, multiplication, or
+/// division) is performed on this type, the result is rounded according to the
+/// roundTiesToEven direction defined in IEEE 754-2008. That means:
///
/// - The result is the representable value closest to the true value, if there
/// is a unique closest representable value.
@@ -1154,6 +1146,9 @@ impl<T: Copy> Copy for (T,) {
/// - If the true value's magnitude is ≥ `f32::MAX` + 2<sup>(`f32::MAX_EXP` −
/// `f32::MANTISSA_DIGITS` − 1)</sup>, the result is ∞ or −∞ (preserving the
/// true value's sign).
+/// - If the result of a sum exactly equals zero, the outcome is +0.0 unless
+/// both arguments were negative, then it is -0.0. Subtraction `a - b` is
+/// regarded as a sum `a + (-b)`.
///
/// For more information on floating point numbers, see [Wikipedia][wikipedia].
///
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 9af8f1228..36685f756 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -480,8 +480,9 @@ impl<T: ?Sized> *const T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset(self, count: isize) -> Self {
// SAFETY: the caller must uphold the safety contract for `offset`.
@@ -560,8 +561,9 @@ impl<T: ?Sized> *const T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
pub const fn wrapping_byte_offset(self, count: isize) -> Self {
self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
}
@@ -726,8 +728,9 @@ impl<T: ?Sized> *const T {
/// For non-`Sized` pointees this operation considers only the data pointers,
/// ignoring the metadata.
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
// SAFETY: the caller must uphold the safety contract for `offset_from`.
@@ -842,7 +845,7 @@ impl<T: ?Sized> *const T {
where
T: Sized,
{
- match intrinsics::ptr_guaranteed_cmp(self as _, other as _) {
+ match intrinsics::ptr_guaranteed_cmp(self, other) {
2 => None,
other => Some(other == 1),
}
@@ -952,8 +955,9 @@ impl<T: ?Sized> *const T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
@@ -1045,8 +1049,9 @@ impl<T: ?Sized> *const T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_sub(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `sub`.
@@ -1125,8 +1130,9 @@ impl<T: ?Sized> *const T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
pub const fn wrapping_byte_add(self, count: usize) -> Self {
self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
}
@@ -1203,8 +1209,9 @@ impl<T: ?Sized> *const T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
pub const fn wrapping_byte_sub(self, count: usize) -> Self {
self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
}
@@ -1372,7 +1379,6 @@ impl<T: ?Sized> *const T {
///
/// ```
/// #![feature(pointer_is_aligned)]
- /// #![feature(pointer_byte_offsets)]
///
/// // On some platforms, the alignment of i32 is less than 4.
/// #[repr(align(4))]
@@ -1494,7 +1500,6 @@ impl<T: ?Sized> *const T {
///
/// ```
/// #![feature(pointer_is_aligned)]
- /// #![feature(pointer_byte_offsets)]
///
/// // On some platforms, the alignment of i32 is less than 4.
/// #[repr(align(4))]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index d1286a1de..d71079dd0 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -494,6 +494,7 @@ mod mut_ptr;
#[stable(feature = "drop_in_place", since = "1.8.0")]
#[lang = "drop_in_place"]
#[allow(unconditional_recursion)]
+#[rustc_diagnostic_item = "ptr_drop_in_place"]
pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
// Code here does not matter - this is replaced by the
// real drop glue by the compiler.
@@ -504,6 +505,10 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
/// Creates a null raw pointer.
///
+/// This function is equivalent to zero-initializing the pointer:
+/// `MaybeUninit::<*const T>::zeroed().assume_init()`.
+/// The resulting pointer has the address 0.
+///
/// # Examples
///
/// ```
@@ -511,6 +516,7 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
///
/// let p: *const i32 = ptr::null();
/// assert!(p.is_null());
+/// assert_eq!(p as usize, 0); // this pointer has the address 0
/// ```
#[inline(always)]
#[must_use]
@@ -525,6 +531,10 @@ pub const fn null<T: ?Sized + Thin>() -> *const T {
/// Creates a null mutable raw pointer.
///
+/// This function is equivalent to zero-initializing the pointer:
+/// `MaybeUninit::<*mut T>::zeroed().assume_init()`.
+/// The resulting pointer has the address 0.
+///
/// # Examples
///
/// ```
@@ -532,6 +542,7 @@ pub const fn null<T: ?Sized + Thin>() -> *const T {
///
/// let p: *mut i32 = ptr::null_mut();
/// assert!(p.is_null());
+/// assert_eq!(p as usize, 0); // this pointer has the address 0
/// ```
#[inline(always)]
#[must_use]
@@ -698,7 +709,7 @@ where
#[inline(always)]
#[must_use]
#[unstable(feature = "ptr_from_ref", issue = "106116")]
-#[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+#[rustc_never_returns_null_ptr]
#[rustc_diagnostic_item = "ptr_from_ref"]
pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
r
@@ -711,7 +722,7 @@ pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
#[inline(always)]
#[must_use]
#[unstable(feature = "ptr_from_ref", issue = "106116")]
-#[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+#[rustc_never_returns_null_ptr]
pub const fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
r
}
@@ -740,6 +751,7 @@ pub const fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
#[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
#[rustc_const_stable(feature = "const_slice_from_raw_parts", since = "1.64.0")]
#[rustc_allow_const_fn_unstable(ptr_metadata)]
+#[rustc_diagnostic_item = "ptr_slice_from_raw_parts"]
pub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
from_raw_parts(data.cast(), len)
}
@@ -772,6 +784,7 @@ pub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
#[inline]
#[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
#[rustc_const_unstable(feature = "const_slice_from_raw_parts_mut", issue = "67456")]
+#[rustc_diagnostic_item = "ptr_slice_from_raw_parts_mut"]
pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
from_raw_parts_mut(data.cast(), len)
}
@@ -850,6 +863,7 @@ pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+#[rustc_diagnostic_item = "ptr_swap"]
pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with.
// We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
@@ -911,6 +925,7 @@ pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
#[inline]
#[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+#[rustc_diagnostic_item = "ptr_swap_nonoverlapping"]
pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
#[allow(unused)]
macro_rules! attempt_swap_as_chunks {
@@ -1022,6 +1037,7 @@ const unsafe fn swap_nonoverlapping_simple_untyped<T>(x: *mut T, y: *mut T, coun
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_replace", issue = "83164")]
+#[rustc_diagnostic_item = "ptr_replace"]
pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
// SAFETY: the caller must guarantee that `dst` is valid to be
// cast to a mutable reference (valid for writes, aligned, initialized),
@@ -1147,6 +1163,7 @@ pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
#[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
#[rustc_allow_const_fn_unstable(const_mut_refs, const_maybe_uninit_as_mut_ptr)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[rustc_diagnostic_item = "ptr_read"]
pub const unsafe fn read<T>(src: *const T) -> T {
// It would be semantically correct to implement this via `copy_nonoverlapping`
// and `MaybeUninit`, as was done before PR #109035. Calling `assume_init`
@@ -1264,6 +1281,7 @@ pub const unsafe fn read<T>(src: *const T) -> T {
#[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
#[rustc_allow_const_fn_unstable(const_mut_refs, const_maybe_uninit_as_mut_ptr)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[rustc_diagnostic_item = "ptr_read_unaligned"]
pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp = MaybeUninit::<T>::uninit();
// SAFETY: the caller must guarantee that `src` is valid for reads.
@@ -1539,6 +1557,7 @@ pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
#[inline]
#[stable(feature = "volatile", since = "1.9.0")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[rustc_diagnostic_item = "ptr_read_volatile"]
pub unsafe fn read_volatile<T>(src: *const T) -> T {
// SAFETY: the caller must uphold the safety contract for `volatile_load`.
unsafe {
@@ -1864,10 +1883,35 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
/// ```
#[stable(feature = "ptr_eq", since = "1.17.0")]
#[inline(always)]
+#[must_use = "pointer comparison produces a value"]
+#[rustc_diagnostic_item = "ptr_eq"]
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
a == b
}
+/// Compares the *addresses* of the two pointers for equality,
+/// ignoring any metadata in fat pointers.
+///
+/// If the arguments are thin pointers of the same type,
+/// then this is the same as [`eq`].
+///
+/// # Examples
+///
+/// ```
+/// #![feature(ptr_addr_eq)]
+///
+/// let whole: &[i32; 3] = &[1, 2, 3];
+/// let first: &i32 = &whole[0];
+/// assert!(std::ptr::addr_eq(whole, first));
+/// assert!(!std::ptr::eq::<dyn std::fmt::Debug>(whole, first));
+/// ```
+#[unstable(feature = "ptr_addr_eq", issue = "116324")]
+#[inline(always)]
+#[must_use = "pointer comparison produces a value"]
+pub fn addr_eq<T: ?Sized, U: ?Sized>(p: *const T, q: *const U) -> bool {
+ (p as *const ()) == (q as *const ())
+}
+
/// Hash a raw pointer.
///
/// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly)
@@ -1955,9 +1999,18 @@ impl<F: FnPtr> fmt::Debug for F {
/// as all other references. This macro can create a raw pointer *without* creating
/// a reference first.
///
-/// Note, however, that the `expr` in `addr_of!(expr)` is still subject to all
-/// the usual rules. In particular, `addr_of!(*ptr::null())` is Undefined
-/// Behavior because it dereferences a null pointer.
+/// The `expr` in `addr_of!(expr)` is evaluated as a place expression, but never loads
+/// from the place or requires the place to be dereferenceable. This means that
+/// `addr_of!(*ptr)` is defined behavior even if `ptr` is null, dangling, or misaligned.
+/// Note however that `addr_of!((*ptr).field)` still requires the projection to
+/// `field` to be in-bounds, using the same rules as [`offset`].
+///
+/// Note that `Deref`/`Index` coercions (and their mutable counterparts) are applied inside
+/// `addr_of!` like everywhere else, in which case a reference is created to call `Deref::deref` or
+/// `Index::index`, respectively. The statements above only apply when no such coercions are
+/// applied.
+///
+/// [`offset`]: pointer::offset
///
/// # Example
///
@@ -1995,9 +2048,18 @@ pub macro addr_of($place:expr) {
/// as all other references. This macro can create a raw pointer *without* creating
/// a reference first.
///
-/// Note, however, that the `expr` in `addr_of_mut!(expr)` is still subject to all
-/// the usual rules. In particular, `addr_of_mut!(*ptr::null_mut())` is Undefined
-/// Behavior because it dereferences a null pointer.
+/// The `expr` in `addr_of_mut!(expr)` is evaluated as a place expression, but never loads
+/// from the place or requires the place to be dereferenceable. This means that
+/// `addr_of_mut!(*ptr)` is defined behavior even if `ptr` is null, dangling, or misaligned.
+/// Note however that `addr_of_mut!((*ptr).field)` still requires the projection to
+/// `field` to be in-bounds, using the same rules as [`offset`].
+///
+/// Note that `Deref`/`Index` coercions (and their mutable counterparts) are applied inside
+/// `addr_of_mut!` like everywhere else, in which case a reference is created to call `Deref::deref`
+/// or `Index::index`, respectively. The statements above only apply when no such coercions are
+/// applied.
+///
+/// [`offset`]: pointer::offset
///
/// # Examples
///
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 109c28692..bc362fb62 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -495,8 +495,9 @@ impl<T: ?Sized> *mut T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset(self, count: isize) -> Self {
// SAFETY: the caller must uphold the safety contract for `offset`.
@@ -574,8 +575,9 @@ impl<T: ?Sized> *mut T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
pub const fn wrapping_byte_offset(self, count: isize) -> Self {
self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
}
@@ -898,8 +900,9 @@ impl<T: ?Sized> *mut T {
/// For non-`Sized` pointees this operation considers only the data pointers,
/// ignoring the metadata.
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
// SAFETY: the caller must uphold the safety contract for `offset_from`.
@@ -1053,8 +1056,9 @@ impl<T: ?Sized> *mut T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
@@ -1146,8 +1150,9 @@ impl<T: ?Sized> *mut T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_sub(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `sub`.
@@ -1226,8 +1231,9 @@ impl<T: ?Sized> *mut T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
pub const fn wrapping_byte_add(self, count: usize) -> Self {
self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
}
@@ -1304,8 +1310,9 @@ impl<T: ?Sized> *mut T {
/// leaving the metadata untouched.
#[must_use]
#[inline(always)]
- #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
- #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
pub const fn wrapping_byte_sub(self, count: usize) -> Self {
self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
}
@@ -1639,7 +1646,6 @@ impl<T: ?Sized> *mut T {
///
/// ```
/// #![feature(pointer_is_aligned)]
- /// #![feature(pointer_byte_offsets)]
///
/// // On some platforms, the alignment of i32 is less than 4.
/// #[repr(align(4))]
@@ -1763,7 +1769,6 @@ impl<T: ?Sized> *mut T {
///
/// ```
/// #![feature(pointer_is_aligned)]
- /// #![feature(pointer_byte_offsets)]
///
/// // On some platforms, the alignment of i32 is less than 4.
/// #[repr(align(4))]
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index d5bd54fd5..ae673b779 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -68,6 +68,7 @@ use crate::slice::{self, SliceIndex};
#[repr(transparent)]
#[rustc_layout_scalar_valid_range_start(1)]
#[rustc_nonnull_optimization_guaranteed]
+#[rustc_diagnostic_item = "NonNull"]
pub struct NonNull<T: ?Sized> {
pointer: *const T,
}
@@ -338,7 +339,7 @@ impl<T: ?Sized> NonNull<T> {
/// ```
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_stable(feature = "const_nonnull_as_ptr", since = "1.32.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[must_use]
#[inline(always)]
pub const fn as_ptr(self) -> *mut T {
@@ -598,7 +599,7 @@ impl<T> NonNull<[T]> {
#[must_use]
#[unstable(feature = "slice_ptr_get", issue = "74265")]
#[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub const fn as_mut_ptr(self) -> *mut T {
self.as_non_null_ptr().as_ptr()
}
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 6981abc9b..50127b27f 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -1422,6 +1422,7 @@ impl<T, E> Result<T, E> {
/// assert_eq!(Err("foo").unwrap_or_else(count), 3);
/// ```
#[inline]
+ #[track_caller]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or_else<F: FnOnce(E) -> T>(self, op: F) -> T {
match self {
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index d313e8e01..1da3a87e1 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -152,10 +152,7 @@ mod private_slice_index {
#[rustc_on_unimplemented(
on(T = "str", label = "string indices are ranges of `usize`",),
on(
- all(
- any(T = "str", T = "&str", T = "alloc::string::String", T = "std::string::String"),
- _Self = "{integer}"
- ),
+ all(any(T = "str", T = "&str", T = "alloc::string::String"), _Self = "{integer}"),
note = "you can use `.chars().nth()` or `.bytes().nth()`\n\
for more information, see chapter 8 in The Book: \
<https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index cc9313553..5e229bf52 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -59,6 +59,7 @@ impl<'a, T> IntoIterator for &'a mut [T] {
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[rustc_diagnostic_item = "SliceIter"]
pub struct Iter<'a, T: 'a> {
/// The pointer to the next element to return, or the past-the-end location
/// if the iterator is empty.
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index a19fcf93c..6cf5d48a1 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -640,6 +640,11 @@ impl<T> [T] {
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
+ /// You can think of this like `.get(index).unwrap_unchecked()`. It's UB
+ /// to call `.get_unchecked(len)`, even if you immediately convert to a
+ /// pointer. And it's UB to call `.get_unchecked(..len + 1)`,
+ /// `.get_unchecked(..=len)`, or similar.
+ ///
/// [`get`]: slice::get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
@@ -675,6 +680,11 @@ impl<T> [T] {
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
+ /// You can think of this like `.get_mut(index).unwrap_unchecked()`. It's
+ /// UB to call `.get_unchecked_mut(len)`, even if you immediately convert
+ /// to a pointer. And it's UB to call `.get_unchecked_mut(..len + 1)`,
+ /// `.get_unchecked_mut(..=len)`, or similar.
+ ///
/// [`get_mut`]: slice::get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
@@ -730,7 +740,7 @@ impl<T> [T] {
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[inline(always)]
#[must_use]
pub const fn as_ptr(&self) -> *const T {
@@ -761,7 +771,7 @@ impl<T> [T] {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[rustc_allow_const_fn_unstable(const_mut_refs)]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[inline(always)]
#[must_use]
pub const fn as_mut_ptr(&mut self) -> *mut T {
@@ -2482,6 +2492,62 @@ impl<T> [T] {
RSplitNMut::new(self.rsplit_mut(pred), n)
}
+ /// Splits the slice on the first element that matches the specified
+ /// predicate.
+ ///
+ /// If any matching elements are resent in the slice, returns the prefix
+ /// before the match and suffix after. The matching element itself is not
+ /// included. If no elements match, returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_split_once)]
+ /// let s = [1, 2, 3, 2, 4];
+ /// assert_eq!(s.split_once(|&x| x == 2), Some((
+ /// &[1][..],
+ /// &[3, 2, 4][..]
+ /// )));
+ /// assert_eq!(s.split_once(|&x| x == 0), None);
+ /// ```
+ #[unstable(feature = "slice_split_once", reason = "newly added", issue = "112811")]
+ #[inline]
+ pub fn split_once<F>(&self, pred: F) -> Option<(&[T], &[T])>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ let index = self.iter().position(pred)?;
+ Some((&self[..index], &self[index + 1..]))
+ }
+
+ /// Splits the slice on the last element that matches the specified
+ /// predicate.
+ ///
+ /// If any matching elements are resent in the slice, returns the prefix
+ /// before the match and suffix after. The matching element itself is not
+ /// included. If no elements match, returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_split_once)]
+ /// let s = [1, 2, 3, 2, 4];
+ /// assert_eq!(s.rsplit_once(|&x| x == 2), Some((
+ /// &[1, 2, 3][..],
+ /// &[4][..]
+ /// )));
+ /// assert_eq!(s.rsplit_once(|&x| x == 0), None);
+ /// ```
+ #[unstable(feature = "slice_split_once", reason = "newly added", issue = "112811")]
+ #[inline]
+ pub fn rsplit_once<F>(&self, pred: F) -> Option<(&[T], &[T])>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ let index = self.iter().rposition(pred)?;
+ Some((&self[..index], &self[index + 1..]))
+ }
+
/// Returns `true` if the slice contains an element with the given value.
///
/// This operation is *O*(*n*).
diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs
index 48a6eb03b..9cdf9b68a 100644
--- a/library/core/src/slice/raw.rs
+++ b/library/core/src/slice/raw.rs
@@ -90,6 +90,7 @@ use crate::ptr;
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_from_raw_parts", since = "1.64.0")]
#[must_use]
+#[rustc_diagnostic_item = "slice_from_raw_parts"]
pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe {
@@ -136,6 +137,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_from_raw_parts_mut", issue = "67456")]
#[must_use]
+#[rustc_diagnostic_item = "slice_from_raw_parts_mut"]
pub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe {
diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs
index db76d2625..993a608f4 100644
--- a/library/core/src/slice/sort.rs
+++ b/library/core/src/slice/sort.rs
@@ -628,9 +628,14 @@ where
let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot };
let pivot = &*tmp;
+ let len = v.len();
+ if len == 0 {
+ return 0;
+ }
+
// Now partition the slice.
let mut l = 0;
- let mut r = v.len();
+ let mut r = len;
loop {
// SAFETY: The unsafety below involves indexing an array.
// For the first one: We already do the bounds checking here with `l < r`.
@@ -643,8 +648,11 @@ where
}
// Find the last element equal to the pivot.
- while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
+ loop {
r -= 1;
+ if l >= r || !is_less(pivot, v.get_unchecked(r)) {
+ break;
+ }
}
// Are we done?
@@ -653,7 +661,6 @@ where
}
// Swap the found pair of out-of-order elements.
- r -= 1;
let ptr = v.as_mut_ptr();
ptr::swap(ptr.add(l), ptr.add(r));
l += 1;
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
index cd16810c4..c30f01b3c 100644
--- a/library/core/src/str/iter.rs
+++ b/library/core/src/str/iter.rs
@@ -1360,7 +1360,7 @@ impl<'a, P: Pattern<'a, Searcher: Clone>> Clone for SplitInclusive<'a, P> {
}
#[stable(feature = "split_inclusive", since = "1.51.0")]
-impl<'a, P: Pattern<'a, Searcher: ReverseSearcher<'a>>> DoubleEndedIterator
+impl<'a, P: Pattern<'a, Searcher: DoubleEndedSearcher<'a>>> DoubleEndedIterator
for SplitInclusive<'a, P>
{
#[inline]
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index dfa2d4fd5..27178328b 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -81,6 +81,7 @@ use iter::{MatchesInternal, SplitNInternal};
#[cold]
#[track_caller]
#[rustc_allow_const_fn_unstable(const_eval_select)]
+#[cfg(not(feature = "panic_immediate_abort"))]
const fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
// SAFETY: panics for both branches
unsafe {
@@ -92,6 +93,11 @@ const fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
}
}
+#[cfg(feature = "panic_immediate_abort")]
+const fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
+ slice_error_fail_ct(s, begin, end)
+}
+
#[track_caller]
const fn slice_error_fail_ct(_: &str, _: usize, _: usize) -> ! {
panic!("failed to slice string");
@@ -386,7 +392,7 @@ impl str {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "rustc_str_as_ptr", since = "1.32.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[must_use]
#[inline(always)]
pub const fn as_ptr(&self) -> *const u8 {
@@ -402,7 +408,7 @@ impl str {
/// It is your responsibility to make sure that the string slice only gets
/// modified in a way that it remains valid UTF-8.
#[stable(feature = "str_as_mut_ptr", since = "1.36.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[must_use]
#[inline(always)]
pub fn as_mut_ptr(&mut self) -> *mut u8 {
@@ -808,7 +814,7 @@ impl str {
/// assert_eq!(Some((0, 'y')), char_indices.next()); // not (0, 'y̆')
/// assert_eq!(Some((1, '\u{0306}')), char_indices.next());
///
- /// // note the 3 here - the last character took up two bytes
+ /// // note the 3 here - the previous character took up two bytes
/// assert_eq!(Some((3, 'e')), char_indices.next());
/// assert_eq!(Some((4, 's')), char_indices.next());
///
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
index d5d6d60ac..701e61e66 100644
--- a/library/core/src/str/pattern.rs
+++ b/library/core/src/str/pattern.rs
@@ -806,6 +806,8 @@ unsafe impl<'a, const N: usize> ReverseSearcher<'a> for CharArraySearcher<'a, N>
searcher_methods!(reverse);
}
+impl<'a, const N: usize> DoubleEndedSearcher<'a> for CharArraySearcher<'a, N> {}
+
/// Searches for chars that are equal to any of the [`char`]s in the array.
///
/// # Examples
@@ -826,6 +828,8 @@ unsafe impl<'a, 'b, const N: usize> ReverseSearcher<'a> for CharArrayRefSearcher
searcher_methods!(reverse);
}
+impl<'a, 'b, const N: usize> DoubleEndedSearcher<'a> for CharArrayRefSearcher<'a, 'b, N> {}
+
/////////////////////////////////////////////////////////////////////////////
// Impl for &[char]
/////////////////////////////////////////////////////////////////////////////
diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs
index 2b37af66b..16fb1dad7 100644
--- a/library/core/src/str/traits.rs
+++ b/library/core/src/str/traits.rs
@@ -624,6 +624,7 @@ pub trait FromStr: Sized {
/// assert_eq!(5, x);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "from_str_method"]
fn from_str(s: &str) -> Result<Self, Self::Err>;
}
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index cf1fbe2d3..5f1f41e68 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -4,26 +4,12 @@
//! threads, and are the building blocks of other concurrent
//! types.
//!
-//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically `atomic_ref`.
-//! Basically, creating a *shared reference* to one of the Rust atomic types corresponds to creating
-//! an `atomic_ref` in C++; the `atomic_ref` is destroyed when the lifetime of the shared reference
-//! ends. (A Rust atomic type that is exclusively owned or behind a mutable reference does *not*
-//! correspond to an "atomic object" in C++, since it can be accessed via non-atomic operations.)
-//!
//! This module defines atomic versions of a select number of primitive
//! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
//! [`AtomicI8`], [`AtomicU16`], etc.
//! Atomic types present operations that, when used correctly, synchronize
//! updates between threads.
//!
-//! Each method takes an [`Ordering`] which represents the strength of
-//! the memory barrier for that operation. These orderings are the
-//! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
-//!
-//! [cpp]: https://en.cppreference.com/w/cpp/atomic
-//! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
-//! [2]: ../../../nomicon/atomics.html
-//!
//! Atomic variables are safe to share between threads (they implement [`Sync`])
//! but they do not themselves provide the mechanism for sharing and follow the
//! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
@@ -36,6 +22,75 @@
//! the constant initializers like [`AtomicBool::new`]. Atomic statics
//! are often used for lazy global initialization.
//!
+//! ## Memory model for atomic accesses
+//!
+//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically `atomic_ref`.
+//! Basically, creating a *shared reference* to one of the Rust atomic types corresponds to creating
+//! an `atomic_ref` in C++; the `atomic_ref` is destroyed when the lifetime of the shared reference
+//! ends. (A Rust atomic type that is exclusively owned or behind a mutable reference does *not*
+//! correspond to an "atomic object" in C++, since it can be accessed via non-atomic operations.)
+//!
+//! [cpp]: https://en.cppreference.com/w/cpp/atomic
+//!
+//! Each method takes an [`Ordering`] which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
+//!
+//! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
+//! [2]: ../../../nomicon/atomics.html
+//!
+//! Since C++ does not support mixing atomic and non-atomic accesses, or non-synchronized
+//! different-sized accesses to the same data, Rust does not support those operations either.
+//! Note that both of those restrictions only apply if the accesses are non-synchronized.
+//!
+//! ```rust,no_run undefined_behavior
+//! use std::sync::atomic::{AtomicU16, AtomicU8, Ordering};
+//! use std::mem::transmute;
+//! use std::thread;
+//!
+//! let atomic = AtomicU16::new(0);
+//!
+//! thread::scope(|s| {
+//! // This is UB: mixing atomic and non-atomic accesses
+//! s.spawn(|| atomic.store(1, Ordering::Relaxed));
+//! s.spawn(|| unsafe { atomic.as_ptr().write(2) });
+//! });
+//!
+//! thread::scope(|s| {
+//! // This is UB: even reads are not allowed to be mixed
+//! s.spawn(|| atomic.load(Ordering::Relaxed));
+//! s.spawn(|| unsafe { atomic.as_ptr().read() });
+//! });
+//!
+//! thread::scope(|s| {
+//! // This is fine, `join` synchronizes the code in a way such that atomic
+//! // and non-atomic accesses can't happen "at the same time"
+//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed));
+//! handle.join().unwrap();
+//! s.spawn(|| unsafe { atomic.as_ptr().write(2) });
+//! });
+//!
+//! thread::scope(|s| {
+//! // This is UB: using different-sized atomic accesses to the same data
+//! s.spawn(|| atomic.store(1, Ordering::Relaxed));
+//! s.spawn(|| unsafe {
+//! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic);
+//! differently_sized.store(2, Ordering::Relaxed);
+//! });
+//! });
+//!
+//! thread::scope(|s| {
+//! // This is fine, `join` synchronizes the code in a way such that
+//! // differently-sized accesses can't happen "at the same time"
+//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed));
+//! handle.join().unwrap();
+//! s.spawn(|| unsafe {
+//! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic);
+//! differently_sized.store(2, Ordering::Relaxed);
+//! });
+//! });
+//! ```
+//!
//! # Portability
//!
//! All atomic types in this module are guaranteed to be [lock-free] if they're
@@ -79,6 +134,40 @@
//!
//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
//!
+//! # Atomic accesses to read-only memory
+//!
+//! In general, *all* atomic accesses on read-only memory are Undefined Behavior. For instance, attempting
+//! to do a `compare_exchange` that will definitely fail (making it conceptually a read-only
+//! operation) can still cause a page fault if the underlying memory page is mapped read-only. Since
+//! atomic `load`s might be implemented using compare-exchange operations, even a `load` can fault
+//! on read-only memory.
+//!
+//! For the purpose of this section, "read-only memory" is defined as memory that is read-only in
+//! the underlying target, i.e., the pages are mapped with a read-only flag and any attempt to write
+//! will cause a page fault. In particular, an `&u128` reference that points to memory that is
+//! read-write mapped is *not* considered to point to "read-only memory". In Rust, almost all memory
+//! is read-write; the only exceptions are memory created by `const` items or `static` items without
+//! interior mutability, and memory that was specifically marked as read-only by the operating
+//! system via platform-specific APIs.
+//!
+//! As an exception from the general rule stated above, "sufficiently small" atomic loads with
+//! `Ordering::Relaxed` are implemented in a way that works on read-only memory, and are hence not
+//! Undefined Behavior. The exact size limit for what makes a load "sufficiently small" varies
+//! depending on the target:
+//!
+//! | `target_arch` | Size limit |
+//! |---------------|---------|
+//! | `x86`, `arm`, `mips`, `mips32r6`, `powerpc`, `riscv32`, `sparc`, `hexagon` | 4 bytes |
+//! | `x86_64`, `aarch64`, `loongarch64`, `mips64`, `mips64r6`, `powerpc64`, `riscv64`, `sparc64`, `s390x` | 8 bytes |
+//!
+//! Atomics loads that are larger than this limit as well as atomic loads with ordering other
+//! than `Relaxed`, as well as *all* atomic loads on targets not listed in the table, might still be
+//! read-only under certain conditions, but that is not a stable guarantee and should not be relied
+//! upon.
+//!
+//! If you need to do an acquire load on read-only memory, you can do a relaxed load followed by an
+//! acquire fence instead.
+//!
//! # Examples
//!
//! A simple spinlock:
@@ -319,7 +408,7 @@ impl AtomicBool {
/// # Examples
///
/// ```
- /// #![feature(atomic_from_ptr, pointer_is_aligned)]
+ /// #![feature(pointer_is_aligned)]
/// use std::sync::atomic::{self, AtomicBool};
/// use std::mem::align_of;
///
@@ -346,13 +435,17 @@ impl AtomicBool {
///
/// # Safety
///
- /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that on some platforms this can be bigger than `align_of::<bool>()`).
+ /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that on some platforms this can
+ /// be bigger than `align_of::<bool>()`).
/// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
- /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`.
+ /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
+ /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes,
+ /// without synchronization.
///
/// [valid]: crate::ptr#safety
- #[unstable(feature = "atomic_from_ptr", issue = "108652")]
- #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")]
+ /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
+ #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
+ #[rustc_const_unstable(feature = "const_atomic_from_ptr", issue = "108652")]
pub const unsafe fn from_ptr<'a>(ptr: *mut bool) -> &'a AtomicBool {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
@@ -1018,7 +1111,7 @@ impl AtomicBool {
#[inline]
#[stable(feature = "atomic_as_ptr", since = "1.70.0")]
#[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub const fn as_ptr(&self) -> *mut bool {
self.v.get().cast()
}
@@ -1113,7 +1206,7 @@ impl<T> AtomicPtr<T> {
/// # Examples
///
/// ```
- /// #![feature(atomic_from_ptr, pointer_is_aligned)]
+ /// #![feature(pointer_is_aligned)]
/// use std::sync::atomic::{self, AtomicPtr};
/// use std::mem::align_of;
///
@@ -1140,13 +1233,17 @@ impl<T> AtomicPtr<T> {
///
/// # Safety
///
- /// * `ptr` must be aligned to `align_of::<AtomicPtr<T>>()` (note that on some platforms this can be bigger than `align_of::<*mut T>()`).
+ /// * `ptr` must be aligned to `align_of::<AtomicPtr<T>>()` (note that on some platforms this
+ /// can be bigger than `align_of::<*mut T>()`).
/// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
- /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`.
+ /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
+ /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes,
+ /// without synchronization.
///
/// [valid]: crate::ptr#safety
- #[unstable(feature = "atomic_from_ptr", issue = "108652")]
- #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")]
+ /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
+ #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
+ #[rustc_const_unstable(feature = "const_atomic_from_ptr", issue = "108652")]
pub const unsafe fn from_ptr<'a>(ptr: *mut *mut T) -> &'a AtomicPtr<T> {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
@@ -1954,7 +2051,7 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable(feature = "atomic_as_ptr", since = "1.70.0")]
#[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub const fn as_ptr(&self) -> *mut *mut T {
self.p.get()
}
@@ -2083,7 +2180,7 @@ macro_rules! atomic_int {
/// # Examples
///
/// ```
- /// #![feature(atomic_from_ptr, pointer_is_aligned)]
+ /// #![feature(pointer_is_aligned)]
#[doc = concat!($extra_feature, "use std::sync::atomic::{self, ", stringify!($atomic_type), "};")]
/// use std::mem::align_of;
///
@@ -2111,14 +2208,18 @@ macro_rules! atomic_int {
///
/// # Safety
///
- /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that on some platforms this can be bigger than `align_of::<bool>()`).
- #[doc = concat!(" * `ptr` must be aligned to `align_of::<", stringify!($atomic_type), ">()` (note that on some platforms this can be bigger than `align_of::<", stringify!($int_type), ">()`).")]
+ #[doc = concat!(" * `ptr` must be aligned to \
+ `align_of::<", stringify!($atomic_type), ">()` (note that on some platforms this \
+ can be bigger than `align_of::<", stringify!($int_type), ">()`).")]
/// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
- /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`.
+ /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
+ /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes,
+ /// without synchronization.
///
/// [valid]: crate::ptr#safety
- #[unstable(feature = "atomic_from_ptr", issue = "108652")]
- #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")]
+ /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
+ #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
+ #[rustc_const_unstable(feature = "const_atomic_from_ptr", issue = "108652")]
pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
@@ -2893,7 +2994,7 @@ macro_rules! atomic_int {
#[inline]
#[stable(feature = "atomic_as_ptr", since = "1.70.0")]
#[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs
index ff538d55c..fa02dd52e 100644
--- a/library/core/src/sync/exclusive.rs
+++ b/library/core/src/sync/exclusive.rs
@@ -3,7 +3,7 @@
use core::fmt;
use core::future::Future;
use core::marker::Tuple;
-use core::ops::{Generator, GeneratorState};
+use core::ops::{Coroutine, CoroutineState};
use core::pin::Pin;
use core::task::{Context, Poll};
@@ -206,16 +206,16 @@ where
}
}
-#[unstable(feature = "generator_trait", issue = "43122")] // also #98407
-impl<R, G> Generator<R> for Exclusive<G>
+#[unstable(feature = "coroutine_trait", issue = "43122")] // also #98407
+impl<R, G> Coroutine<R> for Exclusive<G>
where
- G: Generator<R> + ?Sized,
+ G: Coroutine<R> + ?Sized,
{
type Yield = G::Yield;
type Return = G::Return;
#[inline]
- fn resume(self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ fn resume(self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume(self.get_pin_mut(), arg)
}
}
diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs
index b63fd5c90..817e39942 100644
--- a/library/core/src/task/wake.rs
+++ b/library/core/src/task/wake.rs
@@ -231,6 +231,10 @@ impl fmt::Debug for Context<'_> {
/// this might be done to wake a future when a blocking function call completes on another
/// thread.
///
+/// Note that it is preferable to use `waker.clone_from(&new_waker)` instead
+/// of `*waker = new_waker.clone()`, as the former will avoid cloning the waker
+/// unnecessarily if the two wakers [wake the same task](Self::will_wake).
+///
/// [`Future::poll()`]: core::future::Future::poll
/// [`Poll::Pending`]: core::task::Poll::Pending
#[cfg_attr(not(doc), repr(transparent))] // work around https://github.com/rust-lang/rust/issues/66401
@@ -302,7 +306,9 @@ impl Waker {
/// when the `Waker`s would awaken the same task. However, if this function
/// returns `true`, it is guaranteed that the `Waker`s will awaken the same task.
///
- /// This function is primarily used for optimization purposes.
+ /// This function is primarily used for optimization purposes — for example,
+ /// this type's [`clone_from`](Self::clone_from) implementation uses it to
+ /// avoid cloning the waker when they would wake the same task anyway.
#[inline]
#[must_use]
#[stable(feature = "futures_api", since = "1.36.0")]
@@ -382,6 +388,13 @@ impl Clone for Waker {
waker: unsafe { (self.waker.vtable.clone)(self.waker.data) },
}
}
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ if !self.will_wake(source) {
+ *self = source.clone();
+ }
+ }
}
#[stable(feature = "futures_api", since = "1.36.0")]
diff --git a/library/core/src/time.rs b/library/core/src/time.rs
index 1e8d28979..6ef35d841 100644
--- a/library/core/src/time.rs
+++ b/library/core/src/time.rs
@@ -910,6 +910,7 @@ impl Duration {
impl Add for Duration {
type Output = Duration;
+ #[inline]
fn add(self, rhs: Duration) -> Duration {
self.checked_add(rhs).expect("overflow when adding durations")
}
@@ -917,6 +918,7 @@ impl Add for Duration {
#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
impl AddAssign for Duration {
+ #[inline]
fn add_assign(&mut self, rhs: Duration) {
*self = *self + rhs;
}
@@ -926,6 +928,7 @@ impl AddAssign for Duration {
impl Sub for Duration {
type Output = Duration;
+ #[inline]
fn sub(self, rhs: Duration) -> Duration {
self.checked_sub(rhs).expect("overflow when subtracting durations")
}
@@ -933,6 +936,7 @@ impl Sub for Duration {
#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
impl SubAssign for Duration {
+ #[inline]
fn sub_assign(&mut self, rhs: Duration) {
*self = *self - rhs;
}
@@ -942,6 +946,7 @@ impl SubAssign for Duration {
impl Mul<u32> for Duration {
type Output = Duration;
+ #[inline]
fn mul(self, rhs: u32) -> Duration {
self.checked_mul(rhs).expect("overflow when multiplying duration by scalar")
}
@@ -951,6 +956,7 @@ impl Mul<u32> for Duration {
impl Mul<Duration> for u32 {
type Output = Duration;
+ #[inline]
fn mul(self, rhs: Duration) -> Duration {
rhs * self
}
@@ -958,6 +964,7 @@ impl Mul<Duration> for u32 {
#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
impl MulAssign<u32> for Duration {
+ #[inline]
fn mul_assign(&mut self, rhs: u32) {
*self = *self * rhs;
}
@@ -967,6 +974,7 @@ impl MulAssign<u32> for Duration {
impl Div<u32> for Duration {
type Output = Duration;
+ #[inline]
fn div(self, rhs: u32) -> Duration {
self.checked_div(rhs).expect("divide by zero error when dividing duration by scalar")
}
@@ -974,6 +982,7 @@ impl Div<u32> for Duration {
#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
impl DivAssign<u32> for Duration {
+ #[inline]
fn div_assign(&mut self, rhs: u32) {
*self = *self / rhs;
}
diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
index 982d7853f..81da75d32 100644
--- a/library/core/tests/array.rs
+++ b/library/core/tests/array.rs
@@ -663,7 +663,7 @@ fn array_mixed_equality_nans() {
#[test]
fn array_into_iter_fold() {
- // Strings to help MIRI catch if we double-free or something
+ // Strings to help Miri catch if we double-free or something
let a = ["Aa".to_string(), "Bb".to_string(), "Cc".to_string()];
let mut s = "s".to_string();
a.into_iter().for_each(|b| s += &b);
@@ -679,7 +679,7 @@ fn array_into_iter_fold() {
#[test]
fn array_into_iter_rfold() {
- // Strings to help MIRI catch if we double-free or something
+ // Strings to help Miri catch if we double-free or something
let a = ["Aa".to_string(), "Bb".to_string(), "Cc".to_string()];
let mut s = "s".to_string();
a.into_iter().rev().for_each(|b| s += &b);
diff --git a/library/std/src/io/readbuf/tests.rs b/library/core/tests/io/borrowed_buf.rs
index 89a2f6b22..69511e49a 100644
--- a/library/std/src/io/readbuf/tests.rs
+++ b/library/core/tests/io/borrowed_buf.rs
@@ -1,5 +1,5 @@
-use super::BorrowedBuf;
-use crate::mem::MaybeUninit;
+use core::io::BorrowedBuf;
+use core::mem::MaybeUninit;
/// Test that BorrowedBuf has the correct numbers when created with new
#[test]
diff --git a/library/core/tests/io/mod.rs b/library/core/tests/io/mod.rs
new file mode 100644
index 000000000..a24893a52
--- /dev/null
+++ b/library/core/tests/io/mod.rs
@@ -0,0 +1 @@
+mod borrowed_buf;
diff --git a/library/core/tests/iter/adapters/zip.rs b/library/core/tests/iter/adapters/zip.rs
index 585cfbb90..c3508be85 100644
--- a/library/core/tests/iter/adapters/zip.rs
+++ b/library/core/tests/iter/adapters/zip.rs
@@ -184,7 +184,11 @@ fn test_zip_nested_sideffectful() {
let it = xs.iter_mut().map(|x| *x = 1).enumerate().zip(&ys);
it.count();
}
- assert_eq!(&xs, &[1, 1, 1, 1, 1, 0]);
+ let length_aware = &xs == &[1, 1, 1, 1, 0, 0];
+ let probe_first = &xs == &[1, 1, 1, 1, 1, 0];
+
+ // either implementation is valid according to zip documentation
+ assert!(length_aware || probe_first);
}
#[test]
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index e4003a208..168b47dc9 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -15,9 +15,7 @@
#![feature(const_hash)]
#![feature(const_heap)]
#![feature(const_maybe_uninit_as_mut_ptr)]
-#![feature(const_maybe_uninit_assume_init_read)]
#![feature(const_nonnull_new)]
-#![feature(const_pointer_byte_offsets)]
#![feature(const_pointer_is_aligned)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_write)]
@@ -25,6 +23,7 @@
#![feature(const_likely)]
#![feature(const_location_fields)]
#![feature(core_intrinsics)]
+#![feature(core_io_borrowed_buf)]
#![feature(core_private_bignum)]
#![feature(core_private_diy_float)]
#![feature(dec2flt)]
@@ -49,6 +48,7 @@
#![feature(sort_internals)]
#![feature(slice_take)]
#![feature(slice_from_ptr_range)]
+#![feature(slice_split_once)]
#![feature(split_as_slice)]
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_write_slice)]
@@ -87,7 +87,6 @@
#![feature(const_waker)]
#![feature(never_type)]
#![feature(unwrap_infallible)]
-#![feature(pointer_byte_offsets)]
#![feature(pointer_is_aligned)]
#![feature(portable_simd)]
#![feature(ptr_metadata)]
@@ -120,8 +119,6 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(fuzzy_provenance_casts)]
-extern crate test;
-
mod alloc;
mod any;
mod array;
@@ -139,6 +136,7 @@ mod fmt;
mod future;
mod hash;
mod intrinsics;
+mod io;
mod iter;
mod lazy;
#[cfg(test)]
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
index 5c2e18745..20498b16c 100644
--- a/library/core/tests/mem.rs
+++ b/library/core/tests/mem.rs
@@ -565,3 +565,24 @@ fn offset_of_addr() {
assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, z.0), ptr::addr_of!(base.z.0).addr());
assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, z.1), ptr::addr_of!(base.z.1).addr());
}
+
+#[test]
+fn const_maybe_uninit_zeroed() {
+ // Sanity check for `MaybeUninit::zeroed` in a realistic const situation (plugin array term)
+ #[repr(C)]
+ struct Foo {
+ a: Option<&'static str>,
+ b: Bar,
+ c: f32,
+ d: *const u8,
+ }
+ #[repr(C)]
+ struct Bar(usize);
+ struct FooPtr(*const Foo);
+ unsafe impl Sync for FooPtr {}
+
+ static UNINIT: FooPtr = FooPtr([unsafe { MaybeUninit::zeroed().assume_init() }].as_ptr());
+ const SIZE: usize = size_of::<Foo>();
+
+ assert_eq!(unsafe { (*UNINIT.0.cast::<[[u8; SIZE]; 1]>())[0] }, [0u8; SIZE]);
+}
diff --git a/library/core/tests/net/socket_addr.rs b/library/core/tests/net/socket_addr.rs
index 35a69cead..3d013d37e 100644
--- a/library/core/tests/net/socket_addr.rs
+++ b/library/core/tests/net/socket_addr.rs
@@ -199,6 +199,9 @@ fn compare() {
let v6_1 = "[2001:db8:f00::1002]:23456".parse::<SocketAddrV6>().unwrap();
let v6_2 = "[2001:db8:f00::2001]:12345".parse::<SocketAddrV6>().unwrap();
let v6_3 = "[2001:db8:f00::2001]:23456".parse::<SocketAddrV6>().unwrap();
+ let v6_4 = "[2001:db8:f00::2001%42]:23456".parse::<SocketAddrV6>().unwrap();
+ let mut v6_5 = "[2001:db8:f00::2001]:23456".parse::<SocketAddrV6>().unwrap();
+ v6_5.set_flowinfo(17);
// equality
assert_eq!(v4_1, v4_1);
@@ -207,6 +210,8 @@ fn compare() {
assert_eq!(SocketAddr::V6(v6_1), SocketAddr::V6(v6_1));
assert!(v4_1 != v4_2);
assert!(v6_1 != v6_2);
+ assert!(v6_3 != v6_4);
+ assert!(v6_3 != v6_5);
// compare different addresses
assert!(v4_1 < v4_2);
@@ -226,6 +231,12 @@ fn compare() {
assert!(v4_3 > v4_1);
assert!(v6_3 > v6_1);
+ // compare the same address with different scope_id
+ assert!(v6_3 < v6_4);
+
+ // compare the same address with different flowinfo
+ assert!(v6_3 < v6_5);
+
// compare with an inferred right-hand side
assert_eq!(v4_1, "224.120.45.1:23456".parse().unwrap());
assert_eq!(v6_1, "[2001:db8:f00::1002]:23456".parse().unwrap());
diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
index 30843cc3d..83e2707b5 100644
--- a/library/core/tests/num/flt2dec/mod.rs
+++ b/library/core/tests/num/flt2dec/mod.rs
@@ -8,8 +8,6 @@ use core::num::flt2dec::{
};
use core::num::fmt::{Formatted, Part};
-pub use test::Bencher;
-
mod estimator;
mod strategy {
mod dragon;
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 865e702b5..666452ead 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -2476,6 +2476,26 @@ fn slice_rsplit_array_mut_out_of_bounds() {
let _ = v.rsplit_array_mut::<7>();
}
+#[test]
+fn slice_split_once() {
+ let v = &[1, 2, 3, 2, 4][..];
+
+ assert_eq!(v.split_once(|&x| x == 2), Some((&[1][..], &[3, 2, 4][..])));
+ assert_eq!(v.split_once(|&x| x == 1), Some((&[][..], &[2, 3, 2, 4][..])));
+ assert_eq!(v.split_once(|&x| x == 4), Some((&[1, 2, 3, 2][..], &[][..])));
+ assert_eq!(v.split_once(|&x| x == 0), None);
+}
+
+#[test]
+fn slice_rsplit_once() {
+ let v = &[1, 2, 3, 2, 4][..];
+
+ assert_eq!(v.rsplit_once(|&x| x == 2), Some((&[1, 2, 3][..], &[4][..])));
+ assert_eq!(v.rsplit_once(|&x| x == 1), Some((&[][..], &[2, 3, 2, 4][..])));
+ assert_eq!(v.rsplit_once(|&x| x == 4), Some((&[1, 2, 3, 2][..], &[][..])));
+ assert_eq!(v.rsplit_once(|&x| x == 0), None);
+}
+
macro_rules! take_tests {
(slice: &[], $($tts:tt)*) => {
take_tests!(ty: &[()], slice: &[], $($tts)*);
diff --git a/library/panic_unwind/src/gcc.rs b/library/panic_unwind/src/gcc.rs
index 08858dd92..54eb6627c 100644
--- a/library/panic_unwind/src/gcc.rs
+++ b/library/panic_unwind/src/gcc.rs
@@ -63,7 +63,7 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
_uwe: uw::_Unwind_Exception {
exception_class: rust_exception_class(),
exception_cleanup,
- private: [0; uw::unwinder_private_data_size],
+ private: [core::ptr::null(); uw::unwinder_private_data_size],
},
canary: &CANARY,
cause: data,
diff --git a/library/portable-simd/crates/core_simd/src/mod.rs b/library/portable-simd/crates/core_simd/src/mod.rs
index f9891a3b7..194267698 100644
--- a/library/portable-simd/crates/core_simd/src/mod.rs
+++ b/library/portable-simd/crates/core_simd/src/mod.rs
@@ -35,6 +35,5 @@ pub mod simd {
pub use crate::core_simd::masks::*;
pub use crate::core_simd::ord::*;
pub use crate::core_simd::swizzle::*;
- pub use crate::core_simd::swizzle_dyn::*;
pub use crate::core_simd::vector::*;
}
diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs
index 0a70c488a..991fdb125 100644
--- a/library/proc_macro/src/lib.rs
+++ b/library/proc_macro/src/lib.rs
@@ -17,6 +17,8 @@
test(no_crate_inject, attr(deny(warnings))),
test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
)]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
// This library is copied into rust-analyzer to allow loading rustc compiled proc macros.
// Please avoid unstable features where possible to minimize the amount of changes necessary
// to make it compile with rust-analyzer on stable.
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index 965132bde..f666b1888 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -17,8 +17,8 @@ cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
panic_unwind = { path = "../panic_unwind", optional = true }
panic_abort = { path = "../panic_abort" }
core = { path = "../core", public = true }
-libc = { version = "0.2.148", default-features = false, features = ['rustc-dep-of-std'], public = true }
-compiler_builtins = { version = "0.1.100" }
+libc = { version = "0.2.150", default-features = false, features = ['rustc-dep-of-std'], public = true }
+compiler_builtins = { version = "0.1.103" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
hashbrown = { version = "0.14", default-features = false, features = ['rustc-dep-of-std'] }
@@ -72,7 +72,7 @@ llvm-libunwind = ["unwind/llvm-libunwind"]
system-llvm-libunwind = ["unwind/system-llvm-libunwind"]
# Make panics and failed asserts immediately abort without formatting any message
-panic_immediate_abort = ["core/panic_immediate_abort"]
+panic_immediate_abort = ["core/panic_immediate_abort", "alloc/panic_immediate_abort"]
# Enable std_detect default features for stdarch/crates/std_detect:
# https://github.com/rust-lang/stdarch/blob/master/crates/std_detect/Cargo.toml
diff --git a/library/std/build.rs b/library/std/build.rs
index 36516978b..ad0a82eab 100644
--- a/library/std/build.rs
+++ b/library/std/build.rs
@@ -3,17 +3,11 @@ use std::env;
fn main() {
println!("cargo:rerun-if-changed=build.rs");
let target = env::var("TARGET").expect("TARGET was not set");
- if target.contains("freebsd") {
- if env::var("RUST_STD_FREEBSD_12_ABI").is_ok() {
- println!("cargo:rustc-cfg=freebsd12");
- } else if env::var("RUST_STD_FREEBSD_13_ABI").is_ok() {
- println!("cargo:rustc-cfg=freebsd12");
- println!("cargo:rustc-cfg=freebsd13");
- }
- } else if target.contains("linux")
+ if target.contains("linux")
|| target.contains("netbsd")
|| target.contains("dragonfly")
|| target.contains("openbsd")
+ || target.contains("freebsd")
|| target.contains("solaris")
|| target.contains("illumos")
|| target.contains("apple-darwin")
@@ -36,6 +30,7 @@ fn main() {
|| target.contains("solid")
|| target.contains("nintendo-3ds")
|| target.contains("vita")
+ || target.contains("aix")
|| target.contains("nto")
|| target.contains("xous")
|| target.contains("hurd")
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index be173a7ac..4d109285d 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -24,7 +24,7 @@ use crate::sys;
/// reasonable best-effort is made to generate this seed from a high quality,
/// secure source of randomness provided by the host without blocking the
/// program. Because of this, the randomness of the seed depends on the output
-/// quality of the system's random number generator when the seed is created.
+/// quality of the system's random number coroutine when the seed is created.
/// In particular, seeds generated when the system's entropy pool is abnormally
/// low such as during system boot may be of a lower quality.
///
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index 6d85b26af..6a87f6e5f 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -144,7 +144,7 @@ impl<T> HashSet<T, RandomState> {
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize) -> HashSet<T, RandomState> {
- HashSet { base: base::HashSet::with_capacity_and_hasher(capacity, Default::default()) }
+ HashSet::with_capacity_and_hasher(capacity, Default::default())
}
}
diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs
index 73cce35ac..4310e1083 100644
--- a/library/std/src/fs.rs
+++ b/library/std/src/fs.rs
@@ -184,11 +184,12 @@ pub struct DirEntry(fs_imp::DirEntry);
/// ```
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "FsOpenOptions")]
pub struct OpenOptions(fs_imp::OpenOptions);
/// Representation of the various timestamps on a file.
#[derive(Copy, Clone, Debug, Default)]
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
pub struct FileTimes(fs_imp::FileTimes);
/// Representation of the various permissions on a file.
@@ -201,6 +202,7 @@ pub struct FileTimes(fs_imp::FileTimes);
/// [`PermissionsExt`]: crate::os::unix::fs::PermissionsExt
#[derive(Clone, PartialEq, Eq, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "FsPermissions")]
pub struct Permissions(fs_imp::FilePermissions);
/// A structure representing a type of file with accessors for each file type.
@@ -674,8 +676,6 @@ impl File {
/// # Examples
///
/// ```no_run
- /// #![feature(file_set_times)]
- ///
/// fn main() -> std::io::Result<()> {
/// use std::fs::{self, File, FileTimes};
///
@@ -688,7 +688,7 @@ impl File {
/// Ok(())
/// }
/// ```
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
#[doc(alias = "futimens")]
#[doc(alias = "futimes")]
#[doc(alias = "SetFileTime")]
@@ -699,7 +699,7 @@ impl File {
/// Changes the modification time of the underlying file.
///
/// This is an alias for `set_times(FileTimes::new().set_modified(time))`.
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
#[inline]
pub fn set_modified(&self, time: SystemTime) -> io::Result<()> {
self.set_times(FileTimes::new().set_modified(time))
@@ -1413,20 +1413,20 @@ impl FileTimes {
/// Create a new `FileTimes` with no times set.
///
/// Using the resulting `FileTimes` in [`File::set_times`] will not modify any timestamps.
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
pub fn new() -> Self {
Self::default()
}
/// Set the last access time of a file.
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
pub fn set_accessed(mut self, t: SystemTime) -> Self {
self.0.set_accessed(t.into_inner());
self
}
/// Set the last modified time of a file.
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
pub fn set_modified(mut self, t: SystemTime) -> Self {
self.0.set_modified(t.into_inner());
self
@@ -1440,7 +1440,7 @@ impl AsInnerMut<fs_imp::FileTimes> for FileTimes {
}
// For implementing OS extension traits in `std::os`
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
impl Sealed for FileTimes {}
impl Permissions {
@@ -2241,6 +2241,7 @@ pub fn canonicalize<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
/// ```
#[doc(alias = "mkdir")]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "fs_create_dir")]
pub fn create_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
DirBuilder::new().create(path.as_ref())
}
diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs
index d74f0f00e..547a7b705 100644
--- a/library/std/src/fs/tests.rs
+++ b/library/std/src/fs/tests.rs
@@ -1707,3 +1707,89 @@ fn test_file_times() {
assert_eq!(metadata.created().unwrap(), created);
}
}
+
+#[test]
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "tvos", target_os = "watchos"))]
+fn test_file_times_pre_epoch_with_nanos() {
+ #[cfg(target_os = "ios")]
+ use crate::os::ios::fs::FileTimesExt;
+ #[cfg(target_os = "macos")]
+ use crate::os::macos::fs::FileTimesExt;
+ #[cfg(target_os = "tvos")]
+ use crate::os::tvos::fs::FileTimesExt;
+ #[cfg(target_os = "watchos")]
+ use crate::os::watchos::fs::FileTimesExt;
+
+ let tmp = tmpdir();
+ let file = File::create(tmp.join("foo")).unwrap();
+
+ for (accessed, modified, created) in [
+ // The first round is to set filetimes to something we know works, but this time
+ // it's validated with nanoseconds as well which probe the numeric boundary.
+ (
+ SystemTime::UNIX_EPOCH + Duration::new(12345, 1),
+ SystemTime::UNIX_EPOCH + Duration::new(54321, 100_000_000),
+ SystemTime::UNIX_EPOCH + Duration::new(32123, 999_999_999),
+ ),
+ // The second rounds uses pre-epoch dates along with nanoseconds that probe
+ // the numeric boundary.
+ (
+ SystemTime::UNIX_EPOCH - Duration::new(1, 1),
+ SystemTime::UNIX_EPOCH - Duration::new(60, 100_000_000),
+ SystemTime::UNIX_EPOCH - Duration::new(3600, 999_999_999),
+ ),
+ ] {
+ let mut times = FileTimes::new();
+ times = times.set_accessed(accessed).set_modified(modified).set_created(created);
+ file.set_times(times).unwrap();
+
+ let metadata = file.metadata().unwrap();
+ assert_eq!(metadata.accessed().unwrap(), accessed);
+ assert_eq!(metadata.modified().unwrap(), modified);
+ assert_eq!(metadata.created().unwrap(), created);
+ }
+}
+
+#[test]
+#[cfg(windows)]
+fn windows_unix_socket_exists() {
+ use crate::sys::{c, net};
+ use crate::{mem, ptr};
+
+ let tmp = tmpdir();
+ let socket_path = tmp.join("socket");
+
+ // std doesn't currently support Unix sockets on Windows so manually create one here.
+ net::init();
+ unsafe {
+ let socket = c::WSASocketW(
+ c::AF_UNIX as i32,
+ c::SOCK_STREAM,
+ 0,
+ ptr::null_mut(),
+ 0,
+ c::WSA_FLAG_OVERLAPPED | c::WSA_FLAG_NO_HANDLE_INHERIT,
+ );
+ // AF_UNIX is not supported on earlier versions of Windows,
+ // so skip this test if it's unsupported and we're not in CI.
+ if socket == c::INVALID_SOCKET {
+ let error = c::WSAGetLastError();
+ if env::var_os("CI").is_none() && error == c::WSAEAFNOSUPPORT {
+ return;
+ } else {
+ panic!("Creating AF_UNIX socket failed (OS error {error})");
+ }
+ }
+ let mut addr = c::SOCKADDR_UN { sun_family: c::AF_UNIX, sun_path: mem::zeroed() };
+ let bytes = socket_path.as_os_str().as_encoded_bytes();
+ addr.sun_path[..bytes.len()].copy_from_slice(bytes);
+ let len = mem::size_of_val(&addr) as i32;
+ let result = c::bind(socket, ptr::addr_of!(addr).cast::<c::SOCKADDR>(), len);
+ c::closesocket(socket);
+ assert_eq!(result, 0);
+ }
+ // Make sure all ways of testing a file exist work for a Unix socket.
+ assert_eq!(socket_path.exists(), true);
+ assert_eq!(socket_path.try_exists().unwrap(), true);
+ assert_eq!(socket_path.metadata().is_ok(), true);
+}
diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs
index 7097dfef8..55aafc3db 100644
--- a/library/std/src/io/buffered/bufreader.rs
+++ b/library/std/src/io/buffered/bufreader.rs
@@ -2,7 +2,8 @@ mod buffer;
use crate::fmt;
use crate::io::{
- self, BorrowedCursor, BufRead, IoSliceMut, Read, Seek, SeekFrom, SizeHint, DEFAULT_BUF_SIZE,
+ self, uninlined_slow_read_byte, BorrowedCursor, BufRead, IoSliceMut, Read, Seek, SeekFrom,
+ SizeHint, SpecReadByte, DEFAULT_BUF_SIZE,
};
use buffer::Buffer;
@@ -259,6 +260,22 @@ impl<R: ?Sized + Seek> BufReader<R> {
}
}
+impl<R> SpecReadByte for BufReader<R>
+where
+ Self: Read,
+{
+ #[inline]
+ fn spec_read_byte(&mut self) -> Option<io::Result<u8>> {
+ let mut byte = 0;
+ if self.buf.consume_with(1, |claimed| byte = claimed[0]) {
+ return Some(Ok(byte));
+ }
+
+ // Fallback case, only reached once per buffer refill.
+ uninlined_slow_read_byte(self)
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<R: ?Sized + Read> Read for BufReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
@@ -269,10 +286,8 @@ impl<R: ?Sized + Read> Read for BufReader<R> {
self.discard_buffer();
return self.inner.read(buf);
}
- let nread = {
- let mut rem = self.fill_buf()?;
- rem.read(buf)?
- };
+ let mut rem = self.fill_buf()?;
+ let nread = rem.read(buf)?;
self.consume(nread);
Ok(nread)
}
diff --git a/library/std/src/io/copy.rs b/library/std/src/io/copy.rs
index eafd078a7..4d51a719f 100644
--- a/library/std/src/io/copy.rs
+++ b/library/std/src/io/copy.rs
@@ -1,5 +1,7 @@
use super::{BorrowedBuf, BufReader, BufWriter, Read, Result, Write, DEFAULT_BUF_SIZE};
use crate::alloc::Allocator;
+use crate::cmp;
+use crate::cmp::min;
use crate::collections::VecDeque;
use crate::io::IoSlice;
use crate::mem::MaybeUninit;
@@ -254,6 +256,78 @@ impl<I: Write + ?Sized> BufferedWriterSpec for BufWriter<I> {
}
}
+impl<A: Allocator> BufferedWriterSpec for Vec<u8, A> {
+ fn buffer_size(&self) -> usize {
+ cmp::max(DEFAULT_BUF_SIZE, self.capacity() - self.len())
+ }
+
+ fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
+ let mut bytes = 0;
+
+ // avoid inflating empty/small vecs before we have determined that there's anything to read
+ if self.capacity() < DEFAULT_BUF_SIZE {
+ let stack_read_limit = DEFAULT_BUF_SIZE as u64;
+ bytes = stack_buffer_copy(&mut reader.take(stack_read_limit), self)?;
+ // fewer bytes than requested -> EOF reached
+ if bytes < stack_read_limit {
+ return Ok(bytes);
+ }
+ }
+
+ // don't immediately offer the vec's whole spare capacity, otherwise
+ // we might have to fully initialize it if the reader doesn't have a custom read_buf() impl
+ let mut max_read_size = DEFAULT_BUF_SIZE;
+
+ loop {
+ self.reserve(DEFAULT_BUF_SIZE);
+ let mut initialized_spare_capacity = 0;
+
+ loop {
+ let buf = self.spare_capacity_mut();
+ let read_size = min(max_read_size, buf.len());
+ let mut buf = BorrowedBuf::from(&mut buf[..read_size]);
+ // SAFETY: init is either 0 or the init_len from the previous iteration.
+ unsafe {
+ buf.set_init(initialized_spare_capacity);
+ }
+ match reader.read_buf(buf.unfilled()) {
+ Ok(()) => {
+ let bytes_read = buf.len();
+
+ // EOF
+ if bytes_read == 0 {
+ return Ok(bytes);
+ }
+
+ // the reader is returning short reads but it doesn't call ensure_init()
+ if buf.init_len() < buf.capacity() {
+ max_read_size = usize::MAX;
+ }
+ // the reader hasn't returned short reads so far
+ if bytes_read == buf.capacity() {
+ max_read_size *= 2;
+ }
+
+ initialized_spare_capacity = buf.init_len() - bytes_read;
+ bytes += bytes_read as u64;
+ // SAFETY: BorrowedBuf guarantees all of its filled bytes are init
+ // and the number of read bytes can't exceed the spare capacity since
+ // that's what the buffer is borrowing from.
+ unsafe { self.set_len(self.len() + bytes_read) };
+
+ // spare capacity full, reserve more
+ if self.len() == self.capacity() {
+ break;
+ }
+ }
+ Err(e) if e.is_interrupted() => continue,
+ Err(e) => return Err(e),
+ }
+ }
+ }
+ }
+}
+
fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
reader: &mut R,
writer: &mut W,
diff --git a/library/std/src/io/copy/tests.rs b/library/std/src/io/copy/tests.rs
index d9998e87c..af137eaf8 100644
--- a/library/std/src/io/copy/tests.rs
+++ b/library/std/src/io/copy/tests.rs
@@ -81,6 +81,18 @@ fn copy_specializes_bufreader() {
}
#[test]
+fn copy_specializes_to_vec() {
+ let cap = 123456;
+ let mut source = ShortReader { cap, observed_buffer: 0, read_size: 1337 };
+ let mut sink = Vec::new();
+ assert_eq!(cap as u64, io::copy(&mut source, &mut sink).unwrap());
+ assert!(
+ source.observed_buffer > DEFAULT_BUF_SIZE,
+ "expected a large buffer to be provided to the reader"
+ );
+}
+
+#[test]
fn copy_specializes_from_vecdeque() {
let mut source = VecDeque::with_capacity(100 * 1024);
for _ in 0..20 * 1024 {
diff --git a/library/std/src/io/impls.rs b/library/std/src/io/impls.rs
index a7428776d..d8c8d933e 100644
--- a/library/std/src/io/impls.rs
+++ b/library/std/src/io/impls.rs
@@ -475,6 +475,24 @@ impl<A: Allocator> Read for VecDeque<u8, A> {
}
}
+/// BufRead is implemented for `VecDeque<u8>` by reading bytes from the front of the `VecDeque`.
+#[stable(feature = "vecdeque_buf_read", since = "1.75.0")]
+impl<A: Allocator> BufRead for VecDeque<u8, A> {
+ /// Returns the contents of the "front" slice as returned by
+ /// [`as_slices`][`VecDeque::as_slices`]. If the contained byte slices of the `VecDeque` are
+ /// discontiguous, multiple calls to `fill_buf` will be needed to read the entire content.
+ #[inline]
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ let (front, _) = self.as_slices();
+ Ok(front)
+ }
+
+ #[inline]
+ fn consume(&mut self, amt: usize) {
+ self.drain(..amt);
+ }
+}
+
/// Write is implemented for `VecDeque<u8>` by appending to the `VecDeque`, growing it as needed.
#[stable(feature = "vecdeque_read_write", since = "1.63.0")]
impl<A: Allocator> Write for VecDeque<u8, A> {
@@ -510,3 +528,17 @@ impl<A: Allocator> Write for VecDeque<u8, A> {
Ok(())
}
}
+
+#[unstable(feature = "read_buf", issue = "78485")]
+impl<'a> io::Write for core::io::BorrowedCursor<'a> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let amt = cmp::min(buf.len(), self.capacity());
+ self.append(&buf[..amt]);
+ Ok(amt)
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs
index 604b795cd..7d70a0bac 100644
--- a/library/std/src/io/mod.rs
+++ b/library/std/src/io/mod.rs
@@ -317,6 +317,7 @@ pub use self::stdio::set_output_capture;
#[stable(feature = "is_terminal", since = "1.70.0")]
pub use self::stdio::IsTerminal;
#[unstable(feature = "print_internals", issue = "none")]
+#[doc(hidden)]
pub use self::stdio::{_eprint, _print};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::{
@@ -329,7 +330,7 @@ pub use self::{
};
#[unstable(feature = "read_buf", issue = "78485")]
-pub use self::readbuf::{BorrowedBuf, BorrowedCursor};
+pub use core::io::{BorrowedBuf, BorrowedCursor};
pub(crate) use error::const_io_error;
mod buffered;
@@ -338,7 +339,6 @@ mod cursor;
mod error;
mod impls;
pub mod prelude;
-mod readbuf;
mod stdio;
mod util;
@@ -513,8 +513,7 @@ pub(crate) fn default_read_exact<R: Read + ?Sized>(this: &mut R, mut buf: &mut [
match this.read(buf) {
Ok(0) => break,
Ok(n) => {
- let tmp = buf;
- buf = &mut tmp[n..];
+ buf = &mut buf[n..];
}
Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
@@ -1141,10 +1140,10 @@ pub fn read_to_string<R: Read>(mut reader: R) -> Result<String> {
#[repr(transparent)]
pub struct IoSliceMut<'a>(sys::io::IoSliceMut<'a>);
-#[stable(feature = "iovec-send-sync", since = "1.44.0")]
+#[stable(feature = "iovec_send_sync", since = "1.44.0")]
unsafe impl<'a> Send for IoSliceMut<'a> {}
-#[stable(feature = "iovec-send-sync", since = "1.44.0")]
+#[stable(feature = "iovec_send_sync", since = "1.44.0")]
unsafe impl<'a> Sync for IoSliceMut<'a> {}
#[stable(feature = "iovec", since = "1.36.0")]
@@ -1284,10 +1283,10 @@ impl<'a> DerefMut for IoSliceMut<'a> {
#[repr(transparent)]
pub struct IoSlice<'a>(sys::io::IoSlice<'a>);
-#[stable(feature = "iovec-send-sync", since = "1.44.0")]
+#[stable(feature = "iovec_send_sync", since = "1.44.0")]
unsafe impl<'a> Send for IoSlice<'a> {}
-#[stable(feature = "iovec-send-sync", since = "1.44.0")]
+#[stable(feature = "iovec_send_sync", since = "1.44.0")]
unsafe impl<'a> Sync for IoSlice<'a> {}
#[stable(feature = "iovec", since = "1.36.0")]
@@ -1830,6 +1829,7 @@ pub trait Write {
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "IoSeek")]
pub trait Seek {
/// Seek to an offset, in bytes, in a stream.
///
@@ -2777,23 +2777,55 @@ pub struct Bytes<R> {
impl<R: Read> Iterator for Bytes<R> {
type Item = Result<u8>;
+ // Not `#[inline]`. This function gets inlined even without it, but having
+ // the inline annotation can result in worse code generation. See #116785.
fn next(&mut self) -> Option<Result<u8>> {
- let mut byte = 0;
- loop {
- return match self.inner.read(slice::from_mut(&mut byte)) {
- Ok(0) => None,
- Ok(..) => Some(Ok(byte)),
- Err(ref e) if e.is_interrupted() => continue,
- Err(e) => Some(Err(e)),
- };
- }
+ SpecReadByte::spec_read_byte(&mut self.inner)
}
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
SizeHint::size_hint(&self.inner)
}
}
+/// For the specialization of `Bytes::next`.
+trait SpecReadByte {
+ fn spec_read_byte(&mut self) -> Option<Result<u8>>;
+}
+
+impl<R> SpecReadByte for R
+where
+ Self: Read,
+{
+ #[inline]
+ default fn spec_read_byte(&mut self) -> Option<Result<u8>> {
+ inlined_slow_read_byte(self)
+ }
+}
+
+/// Read a single byte in a slow, generic way. This is used by the default
+/// `spec_read_byte`.
+#[inline]
+fn inlined_slow_read_byte<R: Read>(reader: &mut R) -> Option<Result<u8>> {
+ let mut byte = 0;
+ loop {
+ return match reader.read(slice::from_mut(&mut byte)) {
+ Ok(0) => None,
+ Ok(..) => Some(Ok(byte)),
+ Err(ref e) if e.is_interrupted() => continue,
+ Err(e) => Some(Err(e)),
+ };
+ }
+}
+
+// Used by `BufReader::spec_read_byte`, for which the `inline(ever)` is
+// important.
+#[inline(never)]
+fn uninlined_slow_read_byte<R: Read>(reader: &mut R) -> Option<Result<u8>> {
+ inlined_slow_read_byte(reader)
+}
+
trait SizeHint {
fn lower_bound(&self) -> usize;
@@ -2893,6 +2925,7 @@ impl<B: BufRead> Iterator for Split<B> {
/// [`lines`]: BufRead::lines
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "IoLines")]
pub struct Lines<B> {
buf: B,
}
diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs
index 9098d36ee..05b21eeb4 100644
--- a/library/std/src/io/stdio.rs
+++ b/library/std/src/io/stdio.rs
@@ -611,6 +611,7 @@ static STDOUT: OnceLock<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = OnceLo
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "io_stdout")]
pub fn stdout() -> Stdout {
Stdout {
inner: STDOUT
@@ -847,6 +848,7 @@ pub struct StderrLock<'a> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "io_stderr")]
pub fn stderr() -> Stderr {
// Note that unlike `stdout()` we don't use `at_exit` here to register a
// destructor. Stderr is not buffered, so there's no need to run a
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index f1f0f8b16..425890122 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -227,6 +227,7 @@
test(no_crate_inject, attr(deny(warnings))),
test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
)]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
#![doc(cfg_hide(
not(test),
not(any(test, bootstrap)),
@@ -259,7 +260,7 @@
all(target_vendor = "fortanix", target_env = "sgx"),
feature(slice_index_methods, coerce_unsized, sgx_platform)
)]
-#![cfg_attr(windows, feature(round_char_boundary))]
+#![cfg_attr(any(windows, target_os = "uefi"), feature(round_char_boundary))]
#![cfg_attr(target_os = "xous", feature(slice_ptr_len))]
//
// Language features:
@@ -270,6 +271,7 @@
#![feature(allow_internal_unstable)]
#![feature(c_unwind)]
#![feature(cfg_target_thread_local)]
+#![feature(cfi_encoding)]
#![feature(concat_idents)]
#![feature(const_mut_refs)]
#![feature(const_trait_impl)]
@@ -292,6 +294,7 @@
#![feature(needs_panic_runtime)]
#![feature(negative_impls)]
#![feature(never_type)]
+#![feature(no_sanitize)]
#![feature(platform_intrinsics)]
#![feature(prelude_import)]
#![feature(rustc_attrs)]
@@ -307,6 +310,7 @@
// tidy-alphabetical-start
#![feature(char_internals)]
#![feature(core_intrinsics)]
+#![feature(core_io_borrowed_buf)]
#![feature(duration_constants)]
#![feature(error_generic_member_access)]
#![feature(error_in_core)]
@@ -328,7 +332,6 @@
#![feature(panic_can_unwind)]
#![feature(panic_info_message)]
#![feature(panic_internals)]
-#![feature(pointer_byte_offsets)]
#![feature(pointer_is_aligned)]
#![feature(portable_simd)]
#![feature(prelude_2024)]
diff --git a/library/std/src/net/udp.rs b/library/std/src/net/udp.rs
index 227e418b7..60347a11d 100644
--- a/library/std/src/net/udp.rs
+++ b/library/std/src/net/udp.rs
@@ -99,6 +99,16 @@ impl UdpSocket {
///
/// let socket = UdpSocket::bind("127.0.0.1:0").unwrap();
/// ```
+ ///
+ /// Note that `bind` declares the scope of your network connection.
+ /// You can only receive datagrams from and send datagrams to
+ /// participants in that view of the network.
+ /// For instance, binding to a loopback address as in the example
+ /// above will prevent you from sending datagrams to another device
+ /// in your local network.
+ ///
+ /// In order to limit your view of the network the least, `bind` to
+ /// [`Ipv4Addr::UNSPECIFIED`] or [`Ipv6Addr::UNSPECIFIED`].
#[stable(feature = "rust1", since = "1.0.0")]
pub fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<UdpSocket> {
super::each_addr(addr, net_imp::UdpSocket::bind).map(UdpSocket)
@@ -157,7 +167,9 @@ impl UdpSocket {
}
/// Sends data on the socket to the given address. On success, returns the
- /// number of bytes written.
+ /// number of bytes written. Note that the operating system may refuse
+ /// buffers larger than 65507. However, partial writes are not possible
+ /// until buffer sizes above `i32::MAX`.
///
/// Address type can be any implementor of [`ToSocketAddrs`] trait. See its
/// documentation for concrete examples.
@@ -652,12 +664,19 @@ impl UdpSocket {
/// function of a UDP socket is not a useful thing to do: The OS will be
/// unable to determine whether something is listening on the remote
/// address without the application sending data.
+ ///
+ /// If your first `connect` is to a loopback address, subsequent
+ /// `connect`s to non-loopback addresses might fail, depending
+ /// on the platform.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn connect<A: ToSocketAddrs>(&self, addr: A) -> io::Result<()> {
super::each_addr(addr, |addr| self.0.connect(addr))
}
/// Sends data on the socket to the remote address to which it is connected.
+ /// On success, returns the number of bytes written. Note that the operating
+ /// system may refuse buffers larger than 65507. However, partial writes are
+ /// not possible until buffer sizes above `i32::MAX`.
///
/// [`UdpSocket::connect`] will connect this socket to a remote address. This
/// method will fail if the socket is not connected.
diff --git a/library/std/src/os/aix/fs.rs b/library/std/src/os/aix/fs.rs
new file mode 100644
index 000000000..ac9dd45f0
--- /dev/null
+++ b/library/std/src/os/aix/fs.rs
@@ -0,0 +1,348 @@
+//! AIX specific extensions to primitives in the [`std::fs`] module.
+//!
+//! [`std::fs`]: crate::fs
+
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Returns the device ID on which this file resides.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_dev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ /// Returns the inode number.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ino());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ /// Returns the file type and mode.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mode());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ /// Returns the number of hard links to file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_nlink());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ /// Returns the user ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_uid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ /// Returns the group ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_gid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ /// Returns the device ID that this file represents. Only relevant for special file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_rdev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ /// Returns the size of the file (if it is a regular file or a symbolic link) in bytes.
+ ///
+ /// The size of a symbolic link is the length of the pathname it contains,
+ /// without a terminating null byte.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_size());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ /// Returns the last access time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ /// Returns the last access time of the file, in nanoseconds since [`st_atime`].
+ ///
+ /// [`st_atime`]: Self::st_atime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ /// Returns the last modification time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ /// Returns the last modification time of the file, in nanoseconds since [`st_mtime`].
+ ///
+ /// [`st_mtime`]: Self::st_mtime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ /// Returns the last status change time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ /// Returns the last status change time of the file, in nanoseconds since [`st_ctime`].
+ ///
+ /// [`st_ctime`]: Self::st_ctime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ /// Returns the "preferred" block size for efficient filesystem I/O.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blksize());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ /// Returns the number of blocks allocated to the file, 512-byte units.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::aix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blocks());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime.tv_sec as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime.tv_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime.tv_sec as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime.tv_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime.tv_sec as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime.tv_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/aix/mod.rs b/library/std/src/os/aix/mod.rs
new file mode 100644
index 000000000..7f86a3c77
--- /dev/null
+++ b/library/std/src/os/aix/mod.rs
@@ -0,0 +1,6 @@
+//! AIX specific definitions.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/aix/raw.rs b/library/std/src/os/aix/raw.rs
new file mode 100644
index 000000000..b4c8dc72c
--- /dev/null
+++ b/library/std/src/os/aix/raw.rs
@@ -0,0 +1,9 @@
+//! AIX specific raw type definitions.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub use libc::pthread_t;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use libc::{blkcnt_t, blksize_t, dev_t, ino_t, mode_t, nlink_t, off_t, stat, time_t};
diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs
index 81106d6c6..24f2bdcf4 100644
--- a/library/std/src/os/fd/owned.rs
+++ b/library/std/src/os/fd/owned.rs
@@ -97,14 +97,14 @@ impl BorrowedFd<'_> {
// We want to atomically duplicate this file descriptor and set the
// CLOEXEC flag, and currently that's done via F_DUPFD_CLOEXEC. This
// is a POSIX flag that was added to Linux in 2.6.24.
- #[cfg(not(target_os = "espidf"))]
+ #[cfg(not(any(target_os = "espidf", target_os = "vita")))]
let cmd = libc::F_DUPFD_CLOEXEC;
// For ESP-IDF, F_DUPFD is used instead, because the CLOEXEC semantics
// will never be supported, as this is a bare metal framework with
// no capabilities for multi-process execution. While F_DUPFD is also
// not supported yet, it might be (currently it returns ENOSYS).
- #[cfg(target_os = "espidf")]
+ #[cfg(any(target_os = "espidf", target_os = "vita"))]
let cmd = libc::F_DUPFD;
// Avoid using file descriptors below 3 as they are used for stdio
@@ -119,7 +119,7 @@ impl BorrowedFd<'_> {
pub fn try_clone_to_owned(&self) -> crate::io::Result<OwnedFd> {
Err(crate::io::const_io_error!(
crate::io::ErrorKind::Unsupported,
- "operation not supported on WASI yet",
+ "operation not supported on this platform",
))
}
}
diff --git a/library/std/src/os/freebsd/fs.rs b/library/std/src/os/freebsd/fs.rs
index 8db3a950c..5689a82e0 100644
--- a/library/std/src/os/freebsd/fs.rs
+++ b/library/std/src/os/freebsd/fs.rs
@@ -76,12 +76,7 @@ impl MetadataExt for Metadata {
fn as_raw_stat(&self) -> &raw::stat {
// The methods below use libc::stat, so they work fine when libc is built with FreeBSD 12 ABI.
// This method would just return nonsense.
- #[cfg(freebsd12)]
panic!("as_raw_stat not supported with FreeBSD 12 ABI");
- #[cfg(not(freebsd12))]
- unsafe {
- &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat)
- }
}
fn st_dev(&self) -> u64 {
self.as_inner().as_inner().st_dev as u64
@@ -143,12 +138,7 @@ impl MetadataExt for Metadata {
fn st_flags(&self) -> u32 {
self.as_inner().as_inner().st_flags as u32
}
- #[cfg(freebsd12)]
fn st_lspare(&self) -> u32 {
panic!("st_lspare not supported with FreeBSD 12 ABI");
}
- #[cfg(not(freebsd12))]
- fn st_lspare(&self) -> u32 {
- self.as_inner().as_inner().st_lspare as u32
- }
}
diff --git a/library/std/src/os/ios/fs.rs b/library/std/src/os/ios/fs.rs
index b319527a5..e5df4de0b 100644
--- a/library/std/src/os/ios/fs.rs
+++ b/library/std/src/os/ios/fs.rs
@@ -144,14 +144,14 @@ impl MetadataExt for Metadata {
}
/// OS-specific extensions to [`fs::FileTimes`].
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
pub trait FileTimesExt: Sealed {
/// Set the creation time of a file.
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
fn set_created(self, t: SystemTime) -> Self;
}
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
impl FileTimesExt for fs::FileTimes {
fn set_created(mut self, t: SystemTime) -> Self {
self.as_inner_mut().set_created(t.into_inner());
diff --git a/library/std/src/os/linux/fs.rs b/library/std/src/os/linux/fs.rs
index 479bbcc17..ab0b2a3ed 100644
--- a/library/std/src/os/linux/fs.rs
+++ b/library/std/src/os/linux/fs.rs
@@ -329,7 +329,14 @@ pub trait MetadataExt {
impl MetadataExt for Metadata {
#[allow(deprecated)]
fn as_raw_stat(&self) -> &raw::stat {
- unsafe { &*(self.as_inner().as_inner() as *const libc::stat64 as *const raw::stat) }
+ #[cfg(target_env = "musl")]
+ unsafe {
+ &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat)
+ }
+ #[cfg(not(target_env = "musl"))]
+ unsafe {
+ &*(self.as_inner().as_inner() as *const libc::stat64 as *const raw::stat)
+ }
}
fn st_dev(&self) -> u64 {
self.as_inner().as_inner().st_dev as u64
diff --git a/library/std/src/os/macos/fs.rs b/library/std/src/os/macos/fs.rs
index fe82d03d8..573426d1a 100644
--- a/library/std/src/os/macos/fs.rs
+++ b/library/std/src/os/macos/fs.rs
@@ -150,14 +150,14 @@ impl MetadataExt for Metadata {
}
/// OS-specific extensions to [`fs::FileTimes`].
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
pub trait FileTimesExt: Sealed {
/// Set the creation time of a file.
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
fn set_created(self, t: SystemTime) -> Self;
}
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
impl FileTimesExt for fs::FileTimes {
fn set_created(mut self, t: SystemTime) -> Self {
self.as_inner_mut().set_created(t.into_inner());
diff --git a/library/std/src/os/mod.rs b/library/std/src/os/mod.rs
index 11ad21515..6e11b92b6 100644
--- a/library/std/src/os/mod.rs
+++ b/library/std/src/os/mod.rs
@@ -97,6 +97,8 @@ pub mod wasi;
pub mod windows;
// Others.
+#[cfg(target_os = "aix")]
+pub mod aix;
#[cfg(target_os = "android")]
pub mod android;
#[cfg(target_os = "dragonfly")]
diff --git a/library/std/src/os/unix/mod.rs b/library/std/src/os/unix/mod.rs
index 3724e90af..5ba8719e6 100644
--- a/library/std/src/os/unix/mod.rs
+++ b/library/std/src/os/unix/mod.rs
@@ -37,6 +37,8 @@ use crate::os::linux as platform;
#[cfg(not(doc))]
mod platform {
+ #[cfg(target_os = "aix")]
+ pub use crate::os::aix::*;
#[cfg(target_os = "android")]
pub use crate::os::android::*;
#[cfg(target_os = "dragonfly")]
diff --git a/library/std/src/os/watchos/fs.rs b/library/std/src/os/watchos/fs.rs
index 2ecc4c68a..ee215dd59 100644
--- a/library/std/src/os/watchos/fs.rs
+++ b/library/std/src/os/watchos/fs.rs
@@ -144,14 +144,14 @@ impl MetadataExt for Metadata {
}
/// OS-specific extensions to [`fs::FileTimes`].
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
pub trait FileTimesExt: Sealed {
/// Set the creation time of a file.
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
fn set_created(self, t: SystemTime) -> Self;
}
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
impl FileTimesExt for fs::FileTimes {
fn set_created(mut self, t: SystemTime) -> Self {
self.as_inner_mut().set_created(t.into_inner());
diff --git a/library/std/src/os/windows/fs.rs b/library/std/src/os/windows/fs.rs
index 94509e547..1b013d1c1 100644
--- a/library/std/src/os/windows/fs.rs
+++ b/library/std/src/os/windows/fs.rs
@@ -528,14 +528,14 @@ impl FileTypeExt for fs::FileType {
}
/// Windows-specific extensions to [`fs::FileTimes`].
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
pub trait FileTimesExt: Sealed {
/// Set the creation time of a file.
- #[unstable(feature = "file_set_times", issue = "98245")]
+ #[stable(feature = "file_set_times", since = "1.75.0")]
fn set_created(self, t: SystemTime) -> Self;
}
-#[unstable(feature = "file_set_times", issue = "98245")]
+#[stable(feature = "file_set_times", since = "1.75.0")]
impl FileTimesExt for fs::FileTimes {
fn set_created(mut self, t: SystemTime) -> Self {
self.as_inner_mut().set_created(t.into_inner());
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index d7a2baa1f..55f4917a9 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -295,12 +295,53 @@ fn default_hook(info: &PanicInfo<'_>) {
#[cfg(not(test))]
#[doc(hidden)]
+#[cfg(feature = "panic_immediate_abort")]
+#[unstable(feature = "update_panic_count", issue = "none")]
+pub mod panic_count {
+ /// A reason for forcing an immediate abort on panic.
+ #[derive(Debug)]
+ pub enum MustAbort {
+ AlwaysAbort,
+ PanicInHook,
+ }
+
+ #[inline]
+ pub fn increase(run_panic_hook: bool) -> Option<MustAbort> {
+ None
+ }
+
+ #[inline]
+ pub fn finished_panic_hook() {}
+
+ #[inline]
+ pub fn decrease() {}
+
+ #[inline]
+ pub fn set_always_abort() {}
+
+ // Disregards ALWAYS_ABORT_FLAG
+ #[inline]
+ #[must_use]
+ pub fn get_count() -> usize {
+ 0
+ }
+
+ #[must_use]
+ #[inline]
+ pub fn count_is_zero() -> bool {
+ true
+ }
+}
+
+#[cfg(not(test))]
+#[doc(hidden)]
+#[cfg(not(feature = "panic_immediate_abort"))]
#[unstable(feature = "update_panic_count", issue = "none")]
pub mod panic_count {
use crate::cell::Cell;
use crate::sync::atomic::{AtomicUsize, Ordering};
- pub const ALWAYS_ABORT_FLAG: usize = 1 << (usize::BITS - 1);
+ const ALWAYS_ABORT_FLAG: usize = 1 << (usize::BITS - 1);
/// A reason for forcing an immediate abort on panic.
#[derive(Debug)]
@@ -421,6 +462,13 @@ pub mod panic_count {
pub use realstd::rt::panic_count;
/// Invoke a closure, capturing the cause of an unwinding panic if one occurs.
+#[cfg(feature = "panic_immediate_abort")]
+pub unsafe fn r#try<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<dyn Any + Send>> {
+ Ok(f())
+}
+
+/// Invoke a closure, capturing the cause of an unwinding panic if one occurs.
+#[cfg(not(feature = "panic_immediate_abort"))]
pub unsafe fn r#try<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<dyn Any + Send>> {
union Data<F, R> {
f: ManuallyDrop<F>,
@@ -755,6 +803,7 @@ fn rust_panic_with_hook(
/// This is the entry point for `resume_unwind`.
/// It just forwards the payload to the panic runtime.
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
pub fn rust_panic_without_hook(payload: Box<dyn Any + Send>) -> ! {
panic_count::increase(false);
@@ -777,7 +826,16 @@ pub fn rust_panic_without_hook(payload: Box<dyn Any + Send>) -> ! {
/// yer breakpoints.
#[inline(never)]
#[cfg_attr(not(test), rustc_std_internal_symbol)]
+#[cfg(not(feature = "panic_immediate_abort"))]
fn rust_panic(msg: &mut dyn PanicPayload) -> ! {
let code = unsafe { __rust_start_panic(msg) };
rtabort!("failed to initiate panic, error {code}")
}
+
+#[cfg_attr(not(test), rustc_std_internal_symbol)]
+#[cfg(feature = "panic_immediate_abort")]
+fn rust_panic(_: &mut dyn PanicPayload) -> ! {
+ unsafe {
+ crate::intrinsics::abort();
+ }
+}
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
index 8c1497613..af6bef1a7 100644
--- a/library/std/src/process.rs
+++ b/library/std/src/process.rs
@@ -526,6 +526,7 @@ impl fmt::Debug for ChildStderr {
/// list_dir.status().expect("process failed to execute");
/// ```
#[stable(feature = "process", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Command")]
pub struct Command {
inner: imp::Command,
}
@@ -607,7 +608,7 @@ impl Command {
///
/// Note that the argument is not passed through a shell, but given
/// literally to the program. This means that shell syntax like quotes,
- /// escaped characters, word splitting, glob patterns, substitution, etc.
+ /// escaped characters, word splitting, glob patterns, variable substitution, etc.
/// have no effect.
///
/// # Examples
@@ -637,7 +638,7 @@ impl Command {
///
/// Note that the arguments are not passed through a shell, but given
/// literally to the program. This means that shell syntax like quotes,
- /// escaped characters, word splitting, glob patterns, substitution, etc.
+ /// escaped characters, word splitting, glob patterns, variable substitution, etc.
/// have no effect.
///
/// # Examples
@@ -1593,7 +1594,7 @@ impl From<io::Stderr> for Stdio {
pub struct ExitStatus(imp::ExitStatus);
/// The default value is one which indicates successful completion.
-#[stable(feature = "process-exitcode-default", since = "1.73.0")]
+#[stable(feature = "process_exitstatus_default", since = "1.73.0")]
impl Default for ExitStatus {
fn default() -> Self {
// Ideally this would be done by ExitCode::default().into() but that is complicated.
@@ -1959,6 +1960,14 @@ impl ExitCode {
}
}
+/// The default value is [`ExitCode::SUCCESS`]
+#[stable(feature = "process_exitcode_default", since = "1.75.0")]
+impl Default for ExitCode {
+ fn default() -> Self {
+ ExitCode::SUCCESS
+ }
+}
+
#[stable(feature = "process_exitcode", since = "1.61.0")]
impl From<u8> for ExitCode {
/// Construct an `ExitCode` from an arbitrary u8 value.
@@ -2196,6 +2205,7 @@ impl Child {
/// process::exit(0x0100);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "process_exit")]
pub fn exit(code: i32) -> ! {
crate::rt::cleanup();
crate::sys::os::exit(code)
diff --git a/library/std/src/rt.rs b/library/std/src/rt.rs
index f1eeb75be..5c83f72f3 100644
--- a/library/std/src/rt.rs
+++ b/library/std/src/rt.rs
@@ -155,6 +155,7 @@ fn lang_start_internal(
}
#[cfg(not(test))]
+#[inline(never)]
#[lang = "start"]
fn lang_start<T: crate::process::Termination + 'static>(
main: fn() -> T,
diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs
index 8c46080e4..2bb4f3f9e 100644
--- a/library/std/src/sync/once.rs
+++ b/library/std/src/sync/once.rs
@@ -125,7 +125,7 @@ impl Once {
///
/// # Panics
///
- /// The closure `f` will only be executed once if this is called
+ /// The closure `f` will only be executed once even if this is called
/// concurrently amongst many threads. If that closure panics, however, then
/// it will *poison* this [`Once`] instance, causing all future invocations of
/// `call_once` to also panic.
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index e2b7b893c..f49630907 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -126,11 +126,48 @@ impl<T> OnceLock<T> {
#[inline]
#[stable(feature = "once_cell", since = "1.70.0")]
pub fn set(&self, value: T) -> Result<(), T> {
+ match self.try_insert(value) {
+ Ok(_) => Ok(()),
+ Err((_, value)) => Err(value),
+ }
+ }
+
+ /// Sets the contents of this cell to `value` if the cell was empty, then
+ /// returns a reference to it.
+ ///
+ /// May block if another thread is currently attempting to initialize the cell. The cell is
+ /// guaranteed to contain a value when set returns, though not necessarily the one provided.
+ ///
+ /// Returns `Ok(&value)` if the cell was empty and `Err(&current_value, value)` if it was full.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell_try_insert)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// static CELL: OnceLock<i32> = OnceLock::new();
+ ///
+ /// fn main() {
+ /// assert!(CELL.get().is_none());
+ ///
+ /// std::thread::spawn(|| {
+ /// assert_eq!(CELL.try_insert(92), Ok(&92));
+ /// }).join().unwrap();
+ ///
+ /// assert_eq!(CELL.try_insert(62), Err((&92, 62)));
+ /// assert_eq!(CELL.get(), Some(&92));
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "once_cell_try_insert", issue = "116693")]
+ pub fn try_insert(&self, value: T) -> Result<&T, (&T, T)> {
let mut value = Some(value);
- self.get_or_init(|| value.take().unwrap());
+ let res = self.get_or_init(|| value.take().unwrap());
match value {
- None => Ok(()),
- Some(value) => Err(value),
+ None => Ok(res),
+ Some(value) => Err((res, value)),
}
}
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index 26aaa2414..ac7c800ff 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -380,7 +380,7 @@ impl<T: ?Sized> RwLock<T> {
///
/// If the lock is poisoned, it will remain poisoned until this function is called. This allows
/// recovering from a poisoned state and marking that it has recovered. For example, if the
- /// value is overwritten by a known-good value, then the mutex can be marked as un-poisoned. Or
+ /// value is overwritten by a known-good value, then the lock can be marked as un-poisoned. Or
/// possibly, the value could be inspected to determine if it is in a consistent state, and if
/// so the poison is removed.
///
@@ -397,7 +397,7 @@ impl<T: ?Sized> RwLock<T> {
///
/// let _ = thread::spawn(move || {
/// let _lock = c_lock.write().unwrap();
- /// panic!(); // the mutex gets poisoned
+ /// panic!(); // the lock gets poisoned
/// }).join();
///
/// assert_eq!(lock.is_poisoned(), true);
diff --git a/library/std/src/sys/common/mod.rs b/library/std/src/sys/common/mod.rs
index 2b8782ddf..b35c5d30b 100644
--- a/library/std/src/sys/common/mod.rs
+++ b/library/std/src/sys/common/mod.rs
@@ -12,6 +12,7 @@
pub mod alloc;
pub mod small_c_string;
+#[allow(unused_imports)]
pub mod thread_local;
#[cfg(test)]
diff --git a/library/std/src/sys/hermit/net.rs b/library/std/src/sys/hermit/net.rs
index a564f1698..bd8b493d6 100644
--- a/library/std/src/sys/hermit/net.rs
+++ b/library/std/src/sys/hermit/net.rs
@@ -56,6 +56,12 @@ impl Socket {
unimplemented!()
}
+ pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> {
+ let (addr, len) = addr.into_inner();
+ cvt_r(|| unsafe { netc::connect(self.as_raw_fd(), addr.as_ptr(), len) })?;
+ Ok(())
+ }
+
pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
self.set_nonblocking(true)?;
let r = unsafe {
diff --git a/library/std/src/sys/hermit/thread_local_dtor.rs b/library/std/src/sys/hermit/thread_local_dtor.rs
index 613266b95..98adaf4bf 100644
--- a/library/std/src/sys/hermit/thread_local_dtor.rs
+++ b/library/std/src/sys/hermit/thread_local_dtor.rs
@@ -5,23 +5,25 @@
// The this solution works like the implementation of macOS and
// doesn't additional OS support
-use crate::mem;
+use crate::cell::RefCell;
#[thread_local]
-static mut DTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
+static DTORS: RefCell<Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>> = RefCell::new(Vec::new());
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
- let list = &mut DTORS;
- list.push((t, dtor));
+ match DTORS.try_borrow_mut() {
+ Ok(mut dtors) => dtors.push((t, dtor)),
+ Err(_) => rtabort!("global allocator may not use TLS"),
+ }
}
// every thread call this function to run through all possible destructors
pub unsafe fn run_dtors() {
- let mut list = mem::take(&mut DTORS);
+ let mut list = DTORS.take();
while !list.is_empty() {
for (ptr, dtor) in list {
dtor(ptr);
}
- list = mem::take(&mut DTORS);
+ list = DTORS.take();
}
}
diff --git a/library/std/src/sys/personality/dwarf/eh.rs b/library/std/src/sys/personality/dwarf/eh.rs
index 79624703a..a78084de0 100644
--- a/library/std/src/sys/personality/dwarf/eh.rs
+++ b/library/std/src/sys/personality/dwarf/eh.rs
@@ -1,6 +1,7 @@
//! Parsing of GCC-style Language-Specific Data Area (LSDA)
//! For details see:
//! * <https://refspecs.linuxfoundation.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html>
+//! * <https://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/dwarfext.html>
//! * <https://itanium-cxx-abi.github.io/cxx-abi/exceptions.pdf>
//! * <https://www.airs.com/blog/archives/460>
//! * <https://www.airs.com/blog/archives/464>
@@ -37,17 +38,19 @@ pub const DW_EH_PE_indirect: u8 = 0x80;
#[derive(Copy, Clone)]
pub struct EHContext<'a> {
- pub ip: usize, // Current instruction pointer
- pub func_start: usize, // Address of the current function
- pub get_text_start: &'a dyn Fn() -> usize, // Get address of the code section
- pub get_data_start: &'a dyn Fn() -> usize, // Get address of the data section
+ pub ip: *const u8, // Current instruction pointer
+ pub func_start: *const u8, // Pointer to the current function
+ pub get_text_start: &'a dyn Fn() -> *const u8, // Get pointer to the code section
+ pub get_data_start: &'a dyn Fn() -> *const u8, // Get pointer to the data section
}
+/// Landing pad.
+type LPad = *const u8;
pub enum EHAction {
None,
- Cleanup(usize),
- Catch(usize),
- Filter(usize),
+ Cleanup(LPad),
+ Catch(LPad),
+ Filter(LPad),
Terminate,
}
@@ -81,22 +84,24 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
let ip = context.ip;
if !USING_SJLJ_EXCEPTIONS {
+ // read the callsite table
while reader.ptr < action_table {
- let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
- let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
- let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
+ // these are offsets rather than pointers;
+ let cs_start = read_encoded_offset(&mut reader, call_site_encoding)?;
+ let cs_len = read_encoded_offset(&mut reader, call_site_encoding)?;
+ let cs_lpad = read_encoded_offset(&mut reader, call_site_encoding)?;
let cs_action_entry = reader.read_uleb128();
// Callsite table is sorted by cs_start, so if we've passed the ip, we
// may stop searching.
- if ip < func_start + cs_start {
+ if ip < func_start.wrapping_add(cs_start) {
break;
}
- if ip < func_start + cs_start + cs_len {
+ if ip < func_start.wrapping_add(cs_start + cs_len) {
if cs_lpad == 0 {
return Ok(EHAction::None);
} else {
- let lpad = lpad_base + cs_lpad;
- return Ok(interpret_cs_action(action_table as *mut u8, cs_action_entry, lpad));
+ let lpad = lpad_base.wrapping_add(cs_lpad);
+ return Ok(interpret_cs_action(action_table, cs_action_entry, lpad));
}
}
}
@@ -106,12 +111,12 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
// SjLj version:
// The "IP" is an index into the call-site table, with two exceptions:
// -1 means 'no-action', and 0 means 'terminate'.
- match ip as isize {
+ match ip.addr() as isize {
-1 => return Ok(EHAction::None),
0 => return Ok(EHAction::Terminate),
_ => (),
}
- let mut idx = ip;
+ let mut idx = ip.addr();
loop {
let cs_lpad = reader.read_uleb128();
let cs_action_entry = reader.read_uleb128();
@@ -119,17 +124,18 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
if idx == 0 {
// Can never have null landing pad for sjlj -- that would have
// been indicated by a -1 call site index.
- let lpad = (cs_lpad + 1) as usize;
- return Ok(interpret_cs_action(action_table as *mut u8, cs_action_entry, lpad));
+ // FIXME(strict provenance)
+ let lpad = ptr::from_exposed_addr((cs_lpad + 1) as usize);
+ return Ok(interpret_cs_action(action_table, cs_action_entry, lpad));
}
}
}
}
unsafe fn interpret_cs_action(
- action_table: *mut u8,
+ action_table: *const u8,
cs_action_entry: u64,
- lpad: usize,
+ lpad: LPad,
) -> EHAction {
if cs_action_entry == 0 {
// If cs_action_entry is 0 then this is a cleanup (Drop::drop). We run these
@@ -138,7 +144,7 @@ unsafe fn interpret_cs_action(
} else {
// If lpad != 0 and cs_action_entry != 0, we have to check ttype_index.
// If ttype_index == 0 under the condition, we take cleanup action.
- let action_record = (action_table as *mut u8).offset(cs_action_entry as isize - 1);
+ let action_record = action_table.offset(cs_action_entry as isize - 1);
let mut action_reader = DwarfReader::new(action_record);
let ttype_index = action_reader.read_sleb128();
if ttype_index == 0 {
@@ -157,22 +163,24 @@ fn round_up(unrounded: usize, align: usize) -> Result<usize, ()> {
if align.is_power_of_two() { Ok((unrounded + align - 1) & !(align - 1)) } else { Err(()) }
}
-unsafe fn read_encoded_pointer(
- reader: &mut DwarfReader,
- context: &EHContext<'_>,
- encoding: u8,
-) -> Result<usize, ()> {
- if encoding == DW_EH_PE_omit {
+/// Read a offset (`usize`) from `reader` whose encoding is described by `encoding`.
+///
+/// `encoding` must be a [DWARF Exception Header Encoding as described by the LSB spec][LSB-dwarf-ext].
+/// In addition the upper ("application") part must be zero.
+///
+/// # Errors
+/// Returns `Err` if `encoding`
+/// * is not a valid DWARF Exception Header Encoding,
+/// * is `DW_EH_PE_omit`, or
+/// * has a non-zero application part.
+///
+/// [LSB-dwarf-ext]: https://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/dwarfext.html
+unsafe fn read_encoded_offset(reader: &mut DwarfReader, encoding: u8) -> Result<usize, ()> {
+ if encoding == DW_EH_PE_omit || encoding & 0xF0 != 0 {
return Err(());
}
-
- // DW_EH_PE_aligned implies it's an absolute pointer value
- if encoding == DW_EH_PE_aligned {
- reader.ptr = reader.ptr.with_addr(round_up(reader.ptr.addr(), mem::size_of::<usize>())?);
- return Ok(reader.read::<usize>());
- }
-
- let mut result = match encoding & 0x0F {
+ let result = match encoding & 0x0F {
+ // despite the name, LLVM also uses absptr for offsets instead of pointers
DW_EH_PE_absptr => reader.read::<usize>(),
DW_EH_PE_uleb128 => reader.read_uleb128() as usize,
DW_EH_PE_udata2 => reader.read::<u16>() as usize,
@@ -184,25 +192,66 @@ unsafe fn read_encoded_pointer(
DW_EH_PE_sdata8 => reader.read::<i64>() as usize,
_ => return Err(()),
};
+ Ok(result)
+}
+
+/// Read a pointer from `reader` whose encoding is described by `encoding`.
+///
+/// `encoding` must be a [DWARF Exception Header Encoding as described by the LSB spec][LSB-dwarf-ext].
+///
+/// # Errors
+/// Returns `Err` if `encoding`
+/// * is not a valid DWARF Exception Header Encoding,
+/// * is `DW_EH_PE_omit`, or
+/// * combines `DW_EH_PE_absptr` or `DW_EH_PE_aligned` application part with an integer encoding
+/// (not `DW_EH_PE_absptr`) in the value format part.
+///
+/// [LSB-dwarf-ext]: https://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/dwarfext.html
+unsafe fn read_encoded_pointer(
+ reader: &mut DwarfReader,
+ context: &EHContext<'_>,
+ encoding: u8,
+) -> Result<*const u8, ()> {
+ if encoding == DW_EH_PE_omit {
+ return Err(());
+ }
- result += match encoding & 0x70 {
- DW_EH_PE_absptr => 0,
+ let base_ptr = match encoding & 0x70 {
+ DW_EH_PE_absptr => core::ptr::null(),
// relative to address of the encoded value, despite the name
- DW_EH_PE_pcrel => reader.ptr.expose_addr(),
+ DW_EH_PE_pcrel => reader.ptr,
DW_EH_PE_funcrel => {
- if context.func_start == 0 {
+ if context.func_start.is_null() {
return Err(());
}
context.func_start
}
DW_EH_PE_textrel => (*context.get_text_start)(),
DW_EH_PE_datarel => (*context.get_data_start)(),
+ // aligned means the value is aligned to the size of a pointer
+ DW_EH_PE_aligned => {
+ reader.ptr =
+ reader.ptr.with_addr(round_up(reader.ptr.addr(), mem::size_of::<*const u8>())?);
+ core::ptr::null()
+ }
_ => return Err(()),
};
+ let mut ptr = if base_ptr.is_null() {
+ // any value encoding other than absptr would be nonsensical here;
+ // there would be no source of pointer provenance
+ if encoding & 0x0F != DW_EH_PE_absptr {
+ return Err(());
+ }
+ reader.read::<*const u8>()
+ } else {
+ let offset = read_encoded_offset(reader, encoding & 0x0F)?;
+ base_ptr.wrapping_add(offset)
+ };
+
if encoding & DW_EH_PE_indirect != 0 {
- result = *ptr::from_exposed_addr::<usize>(result);
+ ptr = *(ptr.cast::<*const u8>());
}
- Ok(result)
+ Ok(ptr)
}
diff --git a/library/std/src/sys/personality/gcc.rs b/library/std/src/sys/personality/gcc.rs
index e477a0cd7..6f3171311 100644
--- a/library/std/src/sys/personality/gcc.rs
+++ b/library/std/src/sys/personality/gcc.rs
@@ -38,7 +38,6 @@
use super::dwarf::eh::{self, EHAction, EHContext};
use crate::ffi::c_int;
-use libc::uintptr_t;
use unwind as uw;
// Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister()
@@ -95,7 +94,7 @@ const UNWIND_DATA_REG: (i32, i32) = (4, 5); // a0, a1
cfg_if::cfg_if! {
if #[cfg(all(target_arch = "arm", not(target_os = "ios"), not(target_os = "tvos"), not(target_os = "watchos"), not(target_os = "netbsd")))] {
// ARM EHABI personality routine.
- // https://infocenter.arm.com/help/topic/com.arm.doc.ihi0038b/IHI0038B_ehabi.pdf
+ // https://web.archive.org/web/20190728160938/https://infocenter.arm.com/help/topic/com.arm.doc.ihi0038b/IHI0038B_ehabi.pdf
//
// iOS uses the default routine instead since it uses SjLj unwinding.
#[lang = "eh_personality"]
@@ -160,9 +159,9 @@ cfg_if::cfg_if! {
uw::_Unwind_SetGR(
context,
UNWIND_DATA_REG.0,
- exception_object as uintptr_t,
+ exception_object as uw::_Unwind_Ptr,
);
- uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
+ uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, core::ptr::null());
uw::_Unwind_SetIP(context, lpad);
return uw::_URC_INSTALL_CONTEXT;
}
@@ -222,9 +221,9 @@ cfg_if::cfg_if! {
uw::_Unwind_SetGR(
context,
UNWIND_DATA_REG.0,
- exception_object as uintptr_t,
+ exception_object.cast(),
);
- uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
+ uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, core::ptr::null());
uw::_Unwind_SetIP(context, lpad);
uw::_URC_INSTALL_CONTEXT
}
diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
index 01505e944..817c33b66 100644
--- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
@@ -3,6 +3,8 @@
use crate::arch::asm;
use crate::cell::UnsafeCell;
use crate::cmp;
+use crate::convert::TryInto;
+use crate::intrinsics;
use crate::mem;
use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut};
use crate::ptr::{self, NonNull};
@@ -306,20 +308,35 @@ where
}
}
-// Split a memory region ptr..ptr + len into three parts:
-// +--------+
-// | small0 | Chunk smaller than 8 bytes
-// +--------+
-// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
-// +--------+
-// | small1 | Chunk smaller than 8 bytes
-// +--------+
-fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) {
- let small0_size = if ptr.is_aligned_to(8) { 0 } else { 8 - ptr.addr() % 8 };
- let small1_size = (len - small0_size) % 8;
- let big_size = len - small0_size - small1_size;
-
- (small0_size, big_size, small1_size)
+/// Divide the slice `(ptr, len)` into three parts, where the middle part is
+/// aligned to `u64`.
+///
+/// The return values `(prefix_len, mid_len, suffix_len)` add back up to `len`.
+/// The return values are such that the memory region `(ptr + prefix_len,
+/// mid_len)` is the largest possible region where `ptr + prefix_len` is aligned
+/// to `u64` and `mid_len` is a multiple of the byte size of `u64`. This means
+/// that `prefix_len` and `suffix_len` are guaranteed to be less than the byte
+/// size of `u64`, and that `(ptr, prefix_len)` and `(ptr + prefix_len +
+/// mid_len, suffix_len)` don't straddle an alignment boundary.
+// Standard Rust functions such as `<[u8]>::align_to::<u64>` and
+// `<*const u8>::align_offset` aren't _guaranteed_ to compute the largest
+// possible middle region, and as such can't be used.
+fn u64_align_to_guaranteed(ptr: *const u8, mut len: usize) -> (usize, usize, usize) {
+ const QWORD_SIZE: usize = mem::size_of::<u64>();
+
+ let offset = ptr as usize % QWORD_SIZE;
+
+ let prefix_len = if intrinsics::unlikely(offset > 0) { QWORD_SIZE - offset } else { 0 };
+
+ len = match len.checked_sub(prefix_len) {
+ Some(remaining_len) => remaining_len,
+ None => return (len, 0, 0),
+ };
+
+ let suffix_len = len % QWORD_SIZE;
+ len -= suffix_len;
+
+ (prefix_len, len, suffix_len)
}
unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) {
@@ -352,7 +369,13 @@ unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) {
/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00615.html
/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/processor-mmio-stale-data-vulnerabilities.html#inpage-nav-3-2-2
pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
- unsafe fn copy_bytewise_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+ /// Like `ptr::copy(src, dst, len)`, except it uses the Intel-recommended
+ /// instruction sequence for unaligned writes.
+ unsafe fn write_bytewise_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+ if intrinsics::likely(len == 0) {
+ return;
+ }
+
unsafe {
let mut seg_sel: u16 = 0;
for off in 0..len {
@@ -380,41 +403,15 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
assert!(!src.addr().overflowing_add(len).1);
assert!(!dst.addr().overflowing_add(len).1);
- if len < 8 {
- // Can't align on 8 byte boundary: copy safely byte per byte
- unsafe {
- copy_bytewise_to_userspace(src, dst, len);
- }
- } else if len % 8 == 0 && dst.is_aligned_to(8) {
- // Copying 8-byte aligned quadwords: copy quad word per quad word
- unsafe {
- copy_quadwords(src, dst, len);
- }
- } else {
- // Split copies into three parts:
- // +--------+
- // | small0 | Chunk smaller than 8 bytes
- // +--------+
- // | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
- // +--------+
- // | small1 | Chunk smaller than 8 bytes
- // +--------+
- let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
+ unsafe {
+ let (len1, len2, len3) = u64_align_to_guaranteed(dst, len);
+ let (src1, dst1) = (src, dst);
+ let (src2, dst2) = (src1.add(len1), dst1.add(len1));
+ let (src3, dst3) = (src2.add(len2), dst2.add(len2));
- unsafe {
- // Copy small0
- copy_bytewise_to_userspace(src, dst, small0_size);
-
- // Copy big
- let big_src = src.add(small0_size);
- let big_dst = dst.add(small0_size);
- copy_quadwords(big_src, big_dst, big_size);
-
- // Copy small1
- let small1_src = src.add(big_size + small0_size);
- let small1_dst = dst.add(big_size + small0_size);
- copy_bytewise_to_userspace(small1_src, small1_dst, small1_size);
- }
+ write_bytewise_to_userspace(src1, dst1, len1);
+ copy_quadwords(src2, dst2, len2);
+ write_bytewise_to_userspace(src3, dst3, len3);
}
}
@@ -434,45 +431,33 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00657.html
/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/stale-data-read-from-xapic.html
pub(crate) unsafe fn copy_from_userspace(src: *const u8, dst: *mut u8, len: usize) {
- // Copies memory region `src..src + len` to the enclave at `dst`. The source memory region
- // is:
- // - strictly less than 8 bytes in size and may be
- // - located at a misaligned memory location
- fn copy_misaligned_chunk_to_enclave(src: *const u8, dst: *mut u8, len: usize) {
- let mut tmp_buff = [0u8; 16];
+ /// Like `ptr::copy(src, dst, len)`, except it uses only u64-aligned reads.
+ ///
+ /// # Safety
+ /// The source memory region must not straddle an alignment boundary.
+ unsafe fn read_misaligned_from_userspace(src: *const u8, dst: *mut u8, len: usize) {
+ if intrinsics::likely(len == 0) {
+ return;
+ }
unsafe {
- // Compute an aligned memory region to read from
- // +--------+ <-- aligned_src + aligned_len (8B-aligned)
- // | pad1 |
- // +--------+ <-- src + len (misaligned)
- // | |
- // | |
- // | |
- // +--------+ <-- src (misaligned)
- // | pad0 |
- // +--------+ <-- aligned_src (8B-aligned)
- let pad0_size = src as usize % 8;
- let aligned_src = src.sub(pad0_size);
-
- let pad1_size = 8 - (src.add(len) as usize % 8);
- let aligned_len = pad0_size + len + pad1_size;
-
- debug_assert!(len < 8);
- debug_assert_eq!(aligned_src as usize % 8, 0);
- debug_assert_eq!(aligned_len % 8, 0);
- debug_assert!(aligned_len <= 16);
-
- // Copy the aligned buffer to a temporary buffer
- // Note: copying from a slightly different memory location is a bit odd. In this case it
- // can't lead to page faults or inadvertent copying from the enclave as we only ensured
- // that the `src` pointer is aligned at an 8 byte boundary. As pages are 4096 bytes
- // aligned, `aligned_src` must be on the same page as `src`. A similar argument can be made
- // for `src + len`
- copy_quadwords(aligned_src as _, tmp_buff.as_mut_ptr(), aligned_len);
-
- // Copy the correct parts of the temporary buffer to the destination
- ptr::copy(tmp_buff.as_ptr().add(pad0_size), dst, len);
+ let offset: usize;
+ let data: u64;
+ // doing a memory read that's potentially out of bounds for `src`,
+ // this isn't supported by Rust, so have to use assembly
+ asm!("
+ movl {src:e}, {offset:e}
+ andl $7, {offset:e}
+ andq $-8, {src}
+ movq ({src}), {dst}
+ ",
+ src = inout(reg) src => _,
+ offset = out(reg) offset,
+ dst = out(reg) data,
+ options(nostack, att_syntax, readonly, pure)
+ );
+ let data = data.to_le_bytes();
+ ptr::copy_nonoverlapping(data.as_ptr().add(offset), dst, len);
}
}
@@ -480,41 +465,19 @@ pub(crate) unsafe fn copy_from_userspace(src: *const u8, dst: *mut u8, len: usiz
assert!(!dst.is_null());
assert!(is_user_range(src, len));
assert!(is_enclave_range(dst, len));
- assert!(!(src as usize).overflowing_add(len + 8).1);
- assert!(!(dst as usize).overflowing_add(len + 8).1);
+ assert!(len < isize::MAX as usize);
+ assert!(!(src as usize).overflowing_add(len).1);
+ assert!(!(dst as usize).overflowing_add(len).1);
- if len < 8 {
- copy_misaligned_chunk_to_enclave(src, dst, len);
- } else if len % 8 == 0 && src as usize % 8 == 0 {
- // Copying 8-byte aligned quadwords: copy quad word per quad word
- unsafe {
- copy_quadwords(src, dst, len);
- }
- } else {
- // Split copies into three parts:
- // +--------+
- // | small0 | Chunk smaller than 8 bytes
- // +--------+
- // | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
- // +--------+
- // | small1 | Chunk smaller than 8 bytes
- // +--------+
- let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
+ unsafe {
+ let (len1, len2, len3) = u64_align_to_guaranteed(src, len);
+ let (src1, dst1) = (src, dst);
+ let (src2, dst2) = (src1.add(len1), dst1.add(len1));
+ let (src3, dst3) = (src2.add(len2), dst2.add(len2));
- unsafe {
- // Copy small0
- copy_misaligned_chunk_to_enclave(src, dst, small0_size);
-
- // Copy big
- let big_src = src.add(small0_size);
- let big_dst = dst.add(small0_size);
- copy_quadwords(big_src, big_dst, big_size);
-
- // Copy small1
- let small1_src = src.add(big_size + small0_size);
- let small1_dst = dst.add(big_size + small0_size);
- copy_misaligned_chunk_to_enclave(small1_src, small1_dst, small1_size);
- }
+ read_misaligned_from_userspace(src1, dst1, len1);
+ copy_quadwords(src2, dst2, len2);
+ read_misaligned_from_userspace(src3, dst3, len3);
}
}
@@ -609,9 +572,9 @@ where
/// Copies the value from user memory into enclave memory.
pub fn to_enclave(&self) -> T {
unsafe {
- let mut data: T = mem::MaybeUninit::uninit().assume_init();
- copy_from_userspace(self.0.get() as _, &mut data as *mut T as _, mem::size_of::<T>());
- data
+ let mut data = mem::MaybeUninit::uninit();
+ copy_from_userspace(self.0.get() as _, data.as_mut_ptr() as _, mem::size_of::<T>());
+ data.assume_init()
}
}
}
diff --git a/library/std/src/sys/sgx/waitqueue/mod.rs b/library/std/src/sys/sgx/waitqueue/mod.rs
index 5e1d859ee..25eca61d6 100644
--- a/library/std/src/sys/sgx/waitqueue/mod.rs
+++ b/library/std/src/sys/sgx/waitqueue/mod.rs
@@ -18,6 +18,7 @@ mod unsafe_list;
use crate::num::NonZeroUsize;
use crate::ops::{Deref, DerefMut};
+use crate::panic::{self, AssertUnwindSafe};
use crate::time::Duration;
use super::abi::thread;
@@ -147,7 +148,8 @@ impl WaitQueue {
/// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
/// until a wakeup event.
///
- /// This function does not return until this thread has been awoken.
+ /// This function does not return until this thread has been awoken. When `before_wait` panics,
+ /// this function will abort.
pub fn wait<T, F: FnOnce()>(mut guard: SpinMutexGuard<'_, WaitVariable<T>>, before_wait: F) {
// very unsafe: check requirements of UnsafeList::push
unsafe {
@@ -157,8 +159,13 @@ impl WaitQueue {
}));
let entry = guard.queue.inner.push(&mut entry);
drop(guard);
- before_wait();
+ if let Err(_e) = panic::catch_unwind(AssertUnwindSafe(|| before_wait())) {
+ rtabort!("Panic before wait on wakeup event")
+ }
while !entry.lock().wake {
+ // `entry.wake` is only set in `notify_one` and `notify_all` functions. Both ensure
+ // the entry is removed from the queue _before_ setting this bool. There are no
+ // other references to `entry`.
// don't panic, this would invalidate `entry` during unwinding
let eventset = rtunwrap!(Ok, usercalls::wait(EV_UNPARK, WAIT_INDEFINITE));
rtassert!(eventset & EV_UNPARK == EV_UNPARK);
@@ -169,6 +176,7 @@ impl WaitQueue {
/// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
/// until a wakeup event or timeout. If event was observed, returns true.
/// If not, it will remove the calling thread from the wait queue.
+ /// When `before_wait` panics, this function will abort.
pub fn wait_timeout<T, F: FnOnce()>(
lock: &SpinMutex<WaitVariable<T>>,
timeout: Duration,
@@ -181,9 +189,13 @@ impl WaitQueue {
wake: false,
}));
let entry_lock = lock.lock().queue.inner.push(&mut entry);
- before_wait();
+ if let Err(_e) = panic::catch_unwind(AssertUnwindSafe(|| before_wait())) {
+ rtabort!("Panic before wait on wakeup event or timeout")
+ }
usercalls::wait_timeout(EV_UNPARK, timeout, || entry_lock.lock().wake);
- // acquire the wait queue's lock first to avoid deadlock.
+ // acquire the wait queue's lock first to avoid deadlock
+ // and ensure no other function can simultaneously access the list
+ // (e.g., `notify_one` or `notify_all`)
let mut guard = lock.lock();
let success = entry_lock.lock().wake;
if !success {
@@ -204,8 +216,8 @@ impl WaitQueue {
) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> {
// SAFETY: lifetime of the pop() return value is limited to the map
// closure (The closure return value is 'static). The underlying
- // stack frame won't be freed until after the WaitGuard created below
- // is dropped.
+ // stack frame won't be freed until after the lock on the queue is released
+ // (i.e., `guard` is dropped).
unsafe {
let tcs = guard.queue.inner.pop().map(|entry| -> Tcs {
let mut entry_guard = entry.lock();
@@ -231,7 +243,7 @@ impl WaitQueue {
) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> {
// SAFETY: lifetime of the pop() return values are limited to the
// while loop body. The underlying stack frames won't be freed until
- // after the WaitGuard created below is dropped.
+ // after the lock on the queue is released (i.e., `guard` is dropped).
unsafe {
let mut count = 0;
while let Some(entry) = guard.queue.inner.pop() {
diff --git a/library/std/src/sys/solid/net.rs b/library/std/src/sys/solid/net.rs
index 6adced787..1eae0fc06 100644
--- a/library/std/src/sys/solid/net.rs
+++ b/library/std/src/sys/solid/net.rs
@@ -233,12 +233,15 @@ impl Socket {
}
}
+ pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> {
+ let (addr, len) = addr.into_inner();
+ cvt(unsafe { netc::connect(self.0.raw(), addr.as_ptr(), len) })?;
+ Ok(())
+ }
+
pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
self.set_nonblocking(true)?;
- let r = unsafe {
- let (addr, len) = addr.into_inner();
- cvt(netc::connect(self.0.raw(), addr.as_ptr(), len))
- };
+ let r = self.connect(addr);
self.set_nonblocking(false)?;
match r {
diff --git a/library/std/src/sys/solid/thread_local_dtor.rs b/library/std/src/sys/solid/thread_local_dtor.rs
index bad14bb37..26918a4fc 100644
--- a/library/std/src/sys/solid/thread_local_dtor.rs
+++ b/library/std/src/sys/solid/thread_local_dtor.rs
@@ -4,14 +4,13 @@
// Simplify dtor registration by using a list of destructors.
use super::{abi, itron::task};
-use crate::cell::Cell;
-use crate::mem;
+use crate::cell::{Cell, RefCell};
#[thread_local]
static REGISTERED: Cell<bool> = Cell::new(false);
#[thread_local]
-static mut DTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
+static DTORS: RefCell<Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>> = RefCell::new(Vec::new());
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
if !REGISTERED.get() {
@@ -22,18 +21,20 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
REGISTERED.set(true);
}
- let list = unsafe { &mut DTORS };
- list.push((t, dtor));
+ match DTORS.try_borrow_mut() {
+ Ok(mut dtors) => dtors.push((t, dtor)),
+ Err(_) => rtabort!("global allocator may not use TLS"),
+ }
}
pub unsafe fn run_dtors() {
- let mut list = mem::take(unsafe { &mut DTORS });
+ let mut list = DTORS.take();
while !list.is_empty() {
for (ptr, dtor) in list {
unsafe { dtor(ptr) };
}
- list = mem::take(unsafe { &mut DTORS });
+ list = DTORS.take();
}
}
diff --git a/library/std/src/sys/uefi/alloc.rs b/library/std/src/sys/uefi/alloc.rs
index 789e3cbd8..ad3904d82 100644
--- a/library/std/src/sys/uefi/alloc.rs
+++ b/library/std/src/sys/uefi/alloc.rs
@@ -1,13 +1,17 @@
//! Global Allocator for UEFI.
//! Uses [r-efi-alloc](https://crates.io/crates/r-efi-alloc)
-use crate::alloc::{GlobalAlloc, Layout, System};
+use r_efi::protocols::loaded_image;
-const MEMORY_TYPE: u32 = r_efi::efi::LOADER_DATA;
+use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::sync::OnceLock;
+use crate::sys::uefi::helpers;
#[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ static EFI_MEMORY_TYPE: OnceLock<u32> = OnceLock::new();
+
// Return null pointer if boot services are not available
if crate::os::uefi::env::boot_services().is_none() {
return crate::ptr::null_mut();
@@ -15,8 +19,20 @@ unsafe impl GlobalAlloc for System {
// If boot services is valid then SystemTable is not null.
let system_table = crate::os::uefi::env::system_table().as_ptr().cast();
+
+ // Each loaded image has an image handle that supports `EFI_LOADED_IMAGE_PROTOCOL`. Thus, this
+ // will never fail.
+ let mem_type = EFI_MEMORY_TYPE.get_or_init(|| {
+ let protocol = helpers::image_handle_protocol::<loaded_image::Protocol>(
+ loaded_image::PROTOCOL_GUID,
+ )
+ .unwrap();
+ // Gives allocations the memory type that the data sections were loaded as.
+ unsafe { (*protocol.as_ptr()).image_data_type }
+ });
+
// The caller must ensure non-0 layout
- unsafe { r_efi_alloc::raw::alloc(system_table, layout, MEMORY_TYPE) }
+ unsafe { r_efi_alloc::raw::alloc(system_table, layout, *mem_type) }
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
diff --git a/library/std/src/sys/uefi/args.rs b/library/std/src/sys/uefi/args.rs
new file mode 100644
index 000000000..4ff7be748
--- /dev/null
+++ b/library/std/src/sys/uefi/args.rs
@@ -0,0 +1,158 @@
+use r_efi::protocols::loaded_image;
+
+use crate::env::current_exe;
+use crate::ffi::OsString;
+use crate::fmt;
+use crate::iter::Iterator;
+use crate::mem::size_of;
+use crate::sys::uefi::helpers;
+use crate::vec;
+
+pub struct Args {
+ parsed_args_list: vec::IntoIter<OsString>,
+}
+
+pub fn args() -> Args {
+ let lazy_current_exe = || Vec::from([current_exe().map(Into::into).unwrap_or_default()]);
+
+ // Each loaded image has an image handle that supports `EFI_LOADED_IMAGE_PROTOCOL`. Thus, this
+ // will never fail.
+ let protocol =
+ helpers::image_handle_protocol::<loaded_image::Protocol>(loaded_image::PROTOCOL_GUID)
+ .unwrap();
+
+ let lp_size = unsafe { (*protocol.as_ptr()).load_options_size } as usize;
+ // Break if we are sure that it cannot be UTF-16
+ if lp_size < size_of::<u16>() || lp_size % size_of::<u16>() != 0 {
+ return Args { parsed_args_list: lazy_current_exe().into_iter() };
+ }
+ let lp_size = lp_size / size_of::<u16>();
+
+ let lp_cmd_line = unsafe { (*protocol.as_ptr()).load_options as *const u16 };
+ if !lp_cmd_line.is_aligned() {
+ return Args { parsed_args_list: lazy_current_exe().into_iter() };
+ }
+ let lp_cmd_line = unsafe { crate::slice::from_raw_parts(lp_cmd_line, lp_size) };
+
+ Args {
+ parsed_args_list: parse_lp_cmd_line(lp_cmd_line)
+ .unwrap_or_else(lazy_current_exe)
+ .into_iter(),
+ }
+}
+
+impl fmt::Debug for Args {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.parsed_args_list.as_slice().fmt(f)
+ }
+}
+
+impl Iterator for Args {
+ type Item = OsString;
+
+ fn next(&mut self) -> Option<OsString> {
+ self.parsed_args_list.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.parsed_args_list.size_hint()
+ }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize {
+ self.parsed_args_list.len()
+ }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> {
+ self.parsed_args_list.next_back()
+ }
+}
+
+/// Implements the UEFI command-line argument parsing algorithm.
+///
+/// This implementation is based on what is defined in Section 3.4 of
+/// [UEFI Shell Specification](https://uefi.org/sites/default/files/resources/UEFI_Shell_Spec_2_0.pdf)
+///
+/// Return None in the following cases:
+/// - Invalid UTF-16 (unpaired surrogate)
+/// - Empty/improper arguments
+fn parse_lp_cmd_line(code_units: &[u16]) -> Option<Vec<OsString>> {
+ const QUOTE: char = '"';
+ const SPACE: char = ' ';
+ const CARET: char = '^';
+ const NULL: char = '\0';
+
+ let mut ret_val = Vec::new();
+ let mut code_units_iter = char::decode_utf16(code_units.iter().cloned()).peekable();
+
+ // The executable name at the beginning is special.
+ let mut in_quotes = false;
+ let mut cur = String::new();
+ while let Some(w) = code_units_iter.next() {
+ let w = w.ok()?;
+ match w {
+ // break on NULL
+ NULL => break,
+ // A quote mark always toggles `in_quotes` no matter what because
+ // there are no escape characters when parsing the executable name.
+ QUOTE => in_quotes = !in_quotes,
+ // If not `in_quotes` then whitespace ends argv[0].
+ SPACE if !in_quotes => break,
+ // In all other cases the code unit is taken literally.
+ _ => cur.push(w),
+ }
+ }
+
+ // If exe name is missing, the cli args are invalid
+ if cur.is_empty() {
+ return None;
+ }
+
+ ret_val.push(OsString::from(cur));
+ // Skip whitespace.
+ while code_units_iter.next_if_eq(&Ok(SPACE)).is_some() {}
+
+ // Parse the arguments according to these rules:
+ // * All code units are taken literally except space, quote and caret.
+ // * When not `in_quotes`, space separate arguments. Consecutive spaces are
+ // treated as a single separator.
+ // * A space `in_quotes` is taken literally.
+ // * A quote toggles `in_quotes` mode unless it's escaped. An escaped quote is taken literally.
+ // * A quote can be escaped if preceded by caret.
+ // * A caret can be escaped if preceded by caret.
+ let mut cur = String::new();
+ let mut in_quotes = false;
+ while let Some(w) = code_units_iter.next() {
+ let w = w.ok()?;
+ match w {
+ // break on NULL
+ NULL => break,
+ // If not `in_quotes`, a space or tab ends the argument.
+ SPACE if !in_quotes => {
+ ret_val.push(OsString::from(&cur[..]));
+ cur.truncate(0);
+
+ // Skip whitespace.
+ while code_units_iter.next_if_eq(&Ok(SPACE)).is_some() {}
+ }
+ // Caret can escape quotes or carets
+ CARET if in_quotes => {
+ if let Some(x) = code_units_iter.next() {
+ cur.push(x.ok()?);
+ }
+ }
+ // If quote then flip `in_quotes`
+ QUOTE => in_quotes = !in_quotes,
+ // Everything else is always taken literally.
+ _ => cur.push(w),
+ }
+ }
+ // Push the final argument, if any.
+ if !cur.is_empty() || in_quotes {
+ ret_val.push(OsString::from(cur));
+ }
+ Some(ret_val)
+}
diff --git a/library/std/src/sys/uefi/helpers.rs b/library/std/src/sys/uefi/helpers.rs
index 126661bfc..9837cc89f 100644
--- a/library/std/src/sys/uefi/helpers.rs
+++ b/library/std/src/sys/uefi/helpers.rs
@@ -139,3 +139,10 @@ pub(crate) unsafe fn close_event(evt: NonNull<crate::ffi::c_void>) -> io::Result
if r.is_error() { Err(crate::io::Error::from_raw_os_error(r.as_usize())) } else { Ok(()) }
}
+
+/// Get the Protocol for current system handle.
+/// Note: Some protocols need to be manually freed. It is the callers responsibility to do so.
+pub(crate) fn image_handle_protocol<T>(protocol_guid: Guid) -> Option<NonNull<T>> {
+ let system_handle = uefi::env::try_image_handle()?;
+ open_protocol(system_handle, protocol_guid).ok()
+}
diff --git a/library/std/src/sys/uefi/mod.rs b/library/std/src/sys/uefi/mod.rs
index 9a10395af..4edc00e3e 100644
--- a/library/std/src/sys/uefi/mod.rs
+++ b/library/std/src/sys/uefi/mod.rs
@@ -13,7 +13,6 @@
//! [`OsString`]: crate::ffi::OsString
pub mod alloc;
-#[path = "../unsupported/args.rs"]
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
@@ -36,7 +35,6 @@ pub mod path;
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
-#[path = "../unsupported/stdio.rs"]
pub mod stdio;
#[path = "../unsupported/thread.rs"]
pub mod thread;
diff --git a/library/std/src/sys/uefi/stdio.rs b/library/std/src/sys/uefi/stdio.rs
new file mode 100644
index 000000000..a533d8a05
--- /dev/null
+++ b/library/std/src/sys/uefi/stdio.rs
@@ -0,0 +1,162 @@
+use crate::io;
+use crate::iter::Iterator;
+use crate::mem::MaybeUninit;
+use crate::os::uefi;
+use crate::ptr::NonNull;
+
+const MAX_BUFFER_SIZE: usize = 8192;
+
+pub struct Stdin;
+pub struct Stdout;
+pub struct Stderr;
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let st: NonNull<r_efi::efi::SystemTable> = uefi::env::system_table().cast();
+ let stdin = unsafe { (*st.as_ptr()).con_in };
+
+ // Try reading any pending data
+ let inp = match read_key_stroke(stdin) {
+ Ok(x) => x,
+ Err(e) if e == r_efi::efi::Status::NOT_READY => {
+ // Wait for keypress for new data
+ wait_stdin(stdin)?;
+ read_key_stroke(stdin).map_err(|x| io::Error::from_raw_os_error(x.as_usize()))?
+ }
+ Err(e) => {
+ return Err(io::Error::from_raw_os_error(e.as_usize()));
+ }
+ };
+
+ // Check if the key is printiable character
+ if inp.scan_code != 0x00 {
+ return Err(io::const_io_error!(io::ErrorKind::Interrupted, "Special Key Press"));
+ }
+
+ // SAFETY: Iterator will have only 1 character since we are reading only 1 Key
+ // SAFETY: This character will always be UCS-2 and thus no surrogates.
+ let ch: char = char::decode_utf16([inp.unicode_char]).next().unwrap().unwrap();
+ if ch.len_utf8() > buf.len() {
+ return Ok(0);
+ }
+
+ ch.encode_utf8(buf);
+
+ Ok(ch.len_utf8())
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let st: NonNull<r_efi::efi::SystemTable> = uefi::env::system_table().cast();
+ let stdout = unsafe { (*st.as_ptr()).con_out };
+
+ write(stdout, buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let st: NonNull<r_efi::efi::SystemTable> = uefi::env::system_table().cast();
+ let stderr = unsafe { (*st.as_ptr()).std_err };
+
+ write(stderr, buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+// UCS-2 character should occupy 3 bytes at most in UTF-8
+pub const STDIN_BUF_SIZE: usize = 3;
+
+pub fn is_ebadf(_err: &io::Error) -> bool {
+ true
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ uefi::env::try_system_table().map(|_| Stderr::new())
+}
+
+fn write(
+ protocol: *mut r_efi::protocols::simple_text_output::Protocol,
+ buf: &[u8],
+) -> io::Result<usize> {
+ let mut utf16 = [0; MAX_BUFFER_SIZE / 2];
+
+ // Get valid UTF-8 buffer
+ let utf8 = match crate::str::from_utf8(buf) {
+ Ok(x) => x,
+ Err(e) => unsafe { crate::str::from_utf8_unchecked(&buf[..e.valid_up_to()]) },
+ };
+ // Clip UTF-8 buffer to max UTF-16 buffer we support
+ let utf8 = &utf8[..utf8.floor_char_boundary(utf16.len() - 1)];
+
+ for (i, ch) in utf8.encode_utf16().enumerate() {
+ utf16[i] = ch;
+ }
+
+ unsafe { simple_text_output(protocol, &mut utf16) }?;
+
+ Ok(utf8.len())
+}
+
+unsafe fn simple_text_output(
+ protocol: *mut r_efi::protocols::simple_text_output::Protocol,
+ buf: &mut [u16],
+) -> io::Result<()> {
+ let res = unsafe { ((*protocol).output_string)(protocol, buf.as_mut_ptr()) };
+ if res.is_error() { Err(io::Error::from_raw_os_error(res.as_usize())) } else { Ok(()) }
+}
+
+fn wait_stdin(stdin: *mut r_efi::protocols::simple_text_input::Protocol) -> io::Result<()> {
+ let boot_services: NonNull<r_efi::efi::BootServices> =
+ uefi::env::boot_services().unwrap().cast();
+ let wait_for_event = unsafe { (*boot_services.as_ptr()).wait_for_event };
+ let wait_for_key_event = unsafe { (*stdin).wait_for_key };
+
+ let r = {
+ let mut x: usize = 0;
+ (wait_for_event)(1, [wait_for_key_event].as_mut_ptr(), &mut x)
+ };
+ if r.is_error() { Err(io::Error::from_raw_os_error(r.as_usize())) } else { Ok(()) }
+}
+
+fn read_key_stroke(
+ stdin: *mut r_efi::protocols::simple_text_input::Protocol,
+) -> Result<r_efi::protocols::simple_text_input::InputKey, r_efi::efi::Status> {
+ let mut input_key: MaybeUninit<r_efi::protocols::simple_text_input::InputKey> =
+ MaybeUninit::uninit();
+
+ let r = unsafe { ((*stdin).read_key_stroke)(stdin, input_key.as_mut_ptr()) };
+
+ if r.is_error() {
+ Err(r)
+ } else {
+ let input_key = unsafe { input_key.assume_init() };
+ Ok(input_key)
+ }
+}
diff --git a/library/std/src/sys/unix/args.rs b/library/std/src/sys/unix/args.rs
index 19334e2af..2da17fabc 100644
--- a/library/std/src/sys/unix/args.rs
+++ b/library/std/src/sys/unix/args.rs
@@ -70,6 +70,7 @@ impl DoubleEndedIterator for Args {
target_os = "redox",
target_os = "vxworks",
target_os = "horizon",
+ target_os = "aix",
target_os = "nto",
target_os = "hurd",
))]
diff --git a/library/std/src/sys/unix/env.rs b/library/std/src/sys/unix/env.rs
index c6d8578a6..3bb492fa9 100644
--- a/library/std/src/sys/unix/env.rs
+++ b/library/std/src/sys/unix/env.rs
@@ -261,3 +261,14 @@ pub mod os {
pub const EXE_SUFFIX: &str = "";
pub const EXE_EXTENSION: &str = "";
}
+
+#[cfg(target_os = "aix")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "aix";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".a";
+ pub const DLL_EXTENSION: &str = "a";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs
index 6c4f40842..bf1fb3123 100644
--- a/library/std/src/sys/unix/fd.rs
+++ b/library/std/src/sys/unix/fd.rs
@@ -126,9 +126,17 @@ impl FileDesc {
}
pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
- #[cfg(not(any(target_os = "linux", target_os = "android", target_os = "hurd")))]
+ #[cfg(not(any(
+ all(target_os = "linux", not(target_env = "musl")),
+ target_os = "android",
+ target_os = "hurd"
+ )))]
use libc::pread as pread64;
- #[cfg(any(target_os = "linux", target_os = "android", target_os = "hurd"))]
+ #[cfg(any(
+ all(target_os = "linux", not(target_env = "musl")),
+ target_os = "android",
+ target_os = "hurd"
+ ))]
use libc::pread64;
unsafe {
@@ -285,9 +293,17 @@ impl FileDesc {
}
pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
- #[cfg(not(any(target_os = "linux", target_os = "android", target_os = "hurd")))]
+ #[cfg(not(any(
+ all(target_os = "linux", not(target_env = "musl")),
+ target_os = "android",
+ target_os = "hurd"
+ )))]
use libc::pwrite as pwrite64;
- #[cfg(any(target_os = "linux", target_os = "android", target_os = "hurd"))]
+ #[cfg(any(
+ all(target_os = "linux", not(target_env = "musl")),
+ target_os = "android",
+ target_os = "hurd"
+ ))]
use libc::pwrite64;
unsafe {
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index 764e1f257..40eb910fd 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -40,13 +40,17 @@ use libc::{c_int, mode_t};
))]
use libc::c_char;
#[cfg(any(
- target_os = "linux",
+ all(target_os = "linux", not(target_env = "musl")),
target_os = "emscripten",
target_os = "android",
- target_os = "hurd",
+ target_os = "hurd"
))]
use libc::dirfd;
-#[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "hurd"))]
+#[cfg(any(
+ all(target_os = "linux", not(target_env = "musl")),
+ target_os = "emscripten",
+ target_os = "hurd"
+))]
use libc::fstatat64;
#[cfg(any(
target_os = "android",
@@ -54,11 +58,13 @@ use libc::fstatat64;
target_os = "fuchsia",
target_os = "redox",
target_os = "illumos",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
+ all(target_os = "linux", target_env = "musl"),
))]
use libc::readdir as readdir64;
-#[cfg(any(target_os = "linux", target_os = "hurd"))]
+#[cfg(any(all(target_os = "linux", not(target_env = "musl")), target_os = "hurd"))]
use libc::readdir64;
#[cfg(any(target_os = "emscripten", target_os = "l4re"))]
use libc::readdir64_r;
@@ -71,6 +77,7 @@ use libc::readdir64_r;
target_os = "l4re",
target_os = "fuchsia",
target_os = "redox",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
target_os = "hurd",
@@ -82,7 +89,7 @@ use libc::{
lstat as lstat64, off64_t, open as open64, stat as stat64,
};
#[cfg(not(any(
- target_os = "linux",
+ all(target_os = "linux", not(target_env = "musl")),
target_os = "emscripten",
target_os = "l4re",
target_os = "android",
@@ -93,7 +100,7 @@ use libc::{
lstat as lstat64, off_t as off64_t, open as open64, stat as stat64,
};
#[cfg(any(
- target_os = "linux",
+ all(target_os = "linux", not(target_env = "musl")),
target_os = "emscripten",
target_os = "l4re",
target_os = "hurd"
@@ -288,6 +295,7 @@ unsafe impl Sync for Dir {}
target_os = "illumos",
target_os = "fuchsia",
target_os = "redox",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
target_os = "hurd",
@@ -311,6 +319,7 @@ pub struct DirEntry {
target_os = "illumos",
target_os = "fuchsia",
target_os = "redox",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
target_os = "hurd",
@@ -320,8 +329,9 @@ struct dirent64_min {
#[cfg(not(any(
target_os = "solaris",
target_os = "illumos",
+ target_os = "aix",
target_os = "nto",
- target_os = "vita"
+ target_os = "vita",
)))]
d_type: u8,
}
@@ -333,6 +343,7 @@ struct dirent64_min {
target_os = "illumos",
target_os = "fuchsia",
target_os = "redox",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
target_os = "hurd",
@@ -464,7 +475,22 @@ impl FileAttr {
}
}
-#[cfg(not(any(target_os = "netbsd", target_os = "nto")))]
+#[cfg(target_os = "aix")]
+impl FileAttr {
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_mtime.tv_sec as i64, self.stat.st_mtime.tv_nsec as i64))
+ }
+
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_atime.tv_sec as i64, self.stat.st_atime.tv_nsec as i64))
+ }
+
+ pub fn created(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_ctime.tv_sec as i64, self.stat.st_ctime.tv_nsec as i64))
+ }
+}
+
+#[cfg(not(any(target_os = "netbsd", target_os = "nto", target_os = "aix")))]
impl FileAttr {
#[cfg(not(any(
target_os = "vxworks",
@@ -671,6 +697,7 @@ impl Iterator for ReadDir {
target_os = "fuchsia",
target_os = "redox",
target_os = "illumos",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
target_os = "hurd",
@@ -748,6 +775,7 @@ impl Iterator for ReadDir {
#[cfg(not(any(
target_os = "solaris",
target_os = "illumos",
+ target_os = "aix",
target_os = "nto",
)))]
d_type: *offset_ptr!(entry_ptr, d_type) as u8,
@@ -772,6 +800,7 @@ impl Iterator for ReadDir {
target_os = "fuchsia",
target_os = "redox",
target_os = "illumos",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
target_os = "hurd",
@@ -829,10 +858,10 @@ impl DirEntry {
#[cfg(all(
any(
- target_os = "linux",
+ all(target_os = "linux", not(target_env = "musl")),
target_os = "emscripten",
target_os = "android",
- target_os = "hurd",
+ target_os = "hurd"
),
not(miri)
))]
@@ -858,7 +887,7 @@ impl DirEntry {
#[cfg(any(
not(any(
- target_os = "linux",
+ all(target_os = "linux", not(target_env = "musl")),
target_os = "emscripten",
target_os = "android",
target_os = "hurd",
@@ -874,6 +903,7 @@ impl DirEntry {
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
))]
@@ -886,6 +916,7 @@ impl DirEntry {
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
)))]
@@ -920,6 +951,7 @@ impl DirEntry {
target_os = "espidf",
target_os = "horizon",
target_os = "vita",
+ target_os = "aix",
target_os = "nto",
target_os = "hurd",
))]
@@ -977,6 +1009,7 @@ impl DirEntry {
target_os = "illumos",
target_os = "fuchsia",
target_os = "redox",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
target_os = "hurd",
@@ -991,6 +1024,7 @@ impl DirEntry {
target_os = "illumos",
target_os = "fuchsia",
target_os = "redox",
+ target_os = "aix",
target_os = "nto",
target_os = "vita",
target_os = "hurd",
@@ -1391,6 +1425,7 @@ impl FromInner<FileDesc> for File {
}
impl AsFd for File {
+ #[inline]
fn as_fd(&self) -> BorrowedFd<'_> {
self.0.as_fd()
}
@@ -2025,6 +2060,7 @@ mod remove_dir_impl {
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
+ target_os = "aix",
))]
fn is_dir(_ent: &DirEntry) -> Option<bool> {
None
@@ -2035,6 +2071,7 @@ mod remove_dir_impl {
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
+ target_os = "aix",
)))]
fn is_dir(ent: &DirEntry) -> Option<bool> {
match ent.entry.d_type {
diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs
index 3edafde71..4b28f6feb 100644
--- a/library/std/src/sys/unix/mod.rs
+++ b/library/std/src/sys/unix/mod.rs
@@ -241,6 +241,7 @@ pub unsafe fn cleanup() {
#[cfg(target_os = "android")]
pub use crate::sys::android::signal;
+#[allow(unused_imports)]
#[cfg(not(target_os = "android"))]
pub use libc::signal;
@@ -278,6 +279,7 @@ pub fn decode_error_kind(errno: i32) -> ErrorKind {
libc::ENETUNREACH => NetworkUnreachable,
libc::ENOTCONN => NotConnected,
libc::ENOTDIR => NotADirectory,
+ #[cfg(not(target_os = "aix"))]
libc::ENOTEMPTY => DirectoryNotEmpty,
libc::EPIPE => BrokenPipe,
libc::EROFS => ReadOnlyFilesystem,
@@ -413,7 +415,6 @@ cfg_if::cfg_if! {
} else if #[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos"))] {
#[link(name = "System")]
#[link(name = "objc")]
- #[link(name = "Security", kind = "framework")]
#[link(name = "Foundation", kind = "framework")]
extern "C" {}
} else if #[cfg(target_os = "fuchsia")] {
diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs
index f450d708d..ec861f9cb 100644
--- a/library/std/src/sys/unix/net.rs
+++ b/library/std/src/sys/unix/net.rs
@@ -6,6 +6,7 @@ use crate::net::{Shutdown, SocketAddr};
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
use crate::str;
use crate::sys::fd::FileDesc;
+use crate::sys::unix::IsMinusOne;
use crate::sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr};
use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::time::{Duration, Instant};
@@ -103,7 +104,7 @@ impl Socket {
}
}
- #[cfg(not(any(target_os = "vxworks", target_os = "vita")))]
+ #[cfg(not(target_os = "vxworks"))]
pub fn new_pair(fam: c_int, ty: c_int) -> io::Result<(Socket, Socket)> {
unsafe {
let mut fds = [0, 0];
@@ -135,11 +136,27 @@ impl Socket {
}
}
- #[cfg(any(target_os = "vxworks", target_os = "vita"))]
+ #[cfg(target_os = "vxworks")]
pub fn new_pair(_fam: c_int, _ty: c_int) -> io::Result<(Socket, Socket)> {
unimplemented!()
}
+ pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> {
+ let (addr, len) = addr.into_inner();
+ loop {
+ let result = unsafe { libc::connect(self.as_raw_fd(), addr.as_ptr(), len) };
+ if result.is_minus_one() {
+ let err = crate::sys::os::errno();
+ match err {
+ libc::EINTR => continue,
+ libc::EISCONN => return Ok(()),
+ _ => return Err(io::Error::from_raw_os_error(err)),
+ }
+ }
+ return Ok(());
+ }
+ }
+
pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
self.set_nonblocking(true)?;
let r = unsafe {
diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs
index 01ff375d2..dc3c037c0 100644
--- a/library/std/src/sys/unix/os.rs
+++ b/library/std/src/sys/unix/os.rs
@@ -74,6 +74,7 @@ extern "C" {
link_name = "__error"
)]
#[cfg_attr(target_os = "haiku", link_name = "_errnop")]
+ #[cfg_attr(target_os = "aix", link_name = "_Errno")]
fn errno_location() -> *mut c_int;
}
@@ -254,6 +255,41 @@ impl StdError for JoinPathsError {
}
}
+#[cfg(target_os = "aix")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ use crate::io::ErrorKind;
+
+ #[cfg(test)]
+ use realstd::env;
+
+ #[cfg(not(test))]
+ use crate::env;
+
+ let exe_path = env::args().next().ok_or(io::const_io_error!(
+ ErrorKind::NotFound,
+ "an executable path was not found because no arguments were provided through argv"
+ ))?;
+ let path = PathBuf::from(exe_path);
+ if path.is_absolute() {
+ return path.canonicalize();
+ }
+ // Search PWD to infer current_exe.
+ if let Some(pstr) = path.to_str() && pstr.contains("/") {
+ return getcwd().map(|cwd| cwd.join(path))?.canonicalize();
+ }
+ // Search PATH to infer current_exe.
+ if let Some(p) = getenv(OsStr::from_bytes("PATH".as_bytes())) {
+ for search_path in split_paths(&p) {
+ let pb = search_path.join(&path);
+ if pb.is_file() && let Ok(metadata) = crate::fs::metadata(&pb) &&
+ metadata.permissions().mode() & 0o111 != 0 {
+ return pb.canonicalize();
+ }
+ }
+ }
+ Err(io::const_io_error!(ErrorKind::NotFound, "an executable path was not found"))
+}
+
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
pub fn current_exe() -> io::Result<PathBuf> {
unsafe {
diff --git a/library/std/src/sys/unix/process/mod.rs b/library/std/src/sys/unix/process/mod.rs
index 0cf163d9f..074f0a105 100644
--- a/library/std/src/sys/unix/process/mod.rs
+++ b/library/std/src/sys/unix/process/mod.rs
@@ -1,11 +1,13 @@
pub use self::process_common::{Command, CommandArgs, ExitCode, Stdio, StdioPipes};
pub use self::process_inner::{ExitStatus, ExitStatusError, Process};
pub use crate::ffi::OsString as EnvKey;
-pub use crate::sys_common::process::CommandEnvs;
#[cfg_attr(any(target_os = "espidf", target_os = "horizon"), allow(unused))]
mod process_common;
+#[cfg(any(target_os = "espidf", target_os = "horizon", target_os = "vita"))]
+mod process_unsupported;
+
cfg_if::cfg_if! {
if #[cfg(target_os = "fuchsia")] {
#[path = "process_fuchsia.rs"]
@@ -15,8 +17,9 @@ cfg_if::cfg_if! {
#[path = "process_vxworks.rs"]
mod process_inner;
} else if #[cfg(any(target_os = "espidf", target_os = "horizon", target_os = "vita"))] {
- #[path = "process_unsupported.rs"]
- mod process_inner;
+ mod process_inner {
+ pub use super::process_unsupported::*;
+ }
} else {
#[path = "process_unix.rs"]
mod process_inner;
diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs
index 1ca11a7f9..bac32d9e6 100644
--- a/library/std/src/sys/unix/process/process_common.rs
+++ b/library/std/src/sys/unix/process/process_common.rs
@@ -75,6 +75,7 @@ cfg_if::cfg_if! {
return 0;
}
} else {
+ #[allow(unused_imports)]
pub use libc::{sigemptyset, sigaddset};
}
}
diff --git a/library/std/src/sys/unix/process/process_common/tests.rs b/library/std/src/sys/unix/process/process_common/tests.rs
index 03631e4e3..4e41efc90 100644
--- a/library/std/src/sys/unix/process/process_common/tests.rs
+++ b/library/std/src/sys/unix/process/process_common/tests.rs
@@ -159,3 +159,36 @@ fn test_program_kind() {
);
}
}
+
+// Test that Rust std handles wait status values (`ExitStatus`) the way that Unix does,
+// at least for the values which represent a Unix exit status (`ExitCode`).
+// Should work on every #[cfg(unix)] platform. However:
+#[cfg(not(any(
+ // Fuchsia is not Unix and has totally broken std::os::unix.
+ // https://github.com/rust-lang/rust/issues/58590#issuecomment-836535609
+ target_os = "fuchsia",
+)))]
+#[test]
+fn unix_exit_statuses() {
+ use crate::num::NonZeroI32;
+ use crate::os::unix::process::ExitStatusExt;
+ use crate::process::*;
+
+ for exit_code in 0..=0xff {
+ // FIXME impl From<ExitCode> for ExitStatus and then test that here too;
+ // the two ExitStatus values should be the same
+ let raw_wait_status = exit_code << 8;
+ let exit_status = ExitStatus::from_raw(raw_wait_status);
+
+ assert_eq!(exit_status.code(), Some(exit_code));
+
+ if let Ok(nz) = NonZeroI32::try_from(exit_code) {
+ assert!(!exit_status.success());
+ let es_error = exit_status.exit_ok().unwrap_err();
+ assert_eq!(es_error.code().unwrap(), i32::from(nz));
+ } else {
+ assert!(exit_status.success());
+ assert_eq!(exit_status.exit_ok(), Ok(()));
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs
index 564f8c482..72aca4e66 100644
--- a/library/std/src/sys/unix/process/process_unix.rs
+++ b/library/std/src/sys/unix/process/process_unix.rs
@@ -1074,3 +1074,8 @@ impl crate::os::linux::process::ChildExt for crate::process::Child {
#[cfg(test)]
#[path = "process_unix/tests.rs"]
mod tests;
+
+// See [`process_unsupported_wait_status::compare_with_linux`];
+#[cfg(all(test, target_os = "linux"))]
+#[path = "process_unsupported/wait_status.rs"]
+mod process_unsupported_wait_status;
diff --git a/library/std/src/sys/unix/process/process_unsupported.rs b/library/std/src/sys/unix/process/process_unsupported.rs
index 8e0b971af..2fbb31922 100644
--- a/library/std/src/sys/unix/process/process_unsupported.rs
+++ b/library/std/src/sys/unix/process/process_unsupported.rs
@@ -55,68 +55,20 @@ impl Process {
}
}
-#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
-pub struct ExitStatus(c_int);
-
-impl ExitStatus {
- #[cfg_attr(target_os = "horizon", allow(unused))]
- pub fn success(&self) -> bool {
- self.code() == Some(0)
- }
-
- pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
- Err(ExitStatusError(1.try_into().unwrap()))
- }
-
- pub fn code(&self) -> Option<i32> {
- None
- }
-
- pub fn signal(&self) -> Option<i32> {
- None
- }
-
- pub fn core_dumped(&self) -> bool {
- false
- }
-
- pub fn stopped_signal(&self) -> Option<i32> {
- None
- }
-
- pub fn continued(&self) -> bool {
- false
- }
-
- pub fn into_raw(&self) -> c_int {
- 0
- }
-}
-
-/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it without copying.
-impl From<c_int> for ExitStatus {
- fn from(a: c_int) -> ExitStatus {
- ExitStatus(a as i32)
- }
-}
-
-impl fmt::Display for ExitStatus {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "exit code: {}", self.0)
- }
-}
+mod wait_status;
+pub use wait_status::ExitStatus;
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct ExitStatusError(NonZero_c_int);
impl Into<ExitStatus> for ExitStatusError {
fn into(self) -> ExitStatus {
- ExitStatus(self.0.into())
+ ExitStatus::from(c_int::from(self.0))
}
}
impl ExitStatusError {
pub fn code(self) -> Option<NonZeroI32> {
- ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap())
+ ExitStatus::from(c_int::from(self.0)).code().map(|st| st.try_into().unwrap())
}
}
diff --git a/library/std/src/sys/unix/process/process_unsupported/wait_status.rs b/library/std/src/sys/unix/process/process_unsupported/wait_status.rs
new file mode 100644
index 000000000..72b7ae18c
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_unsupported/wait_status.rs
@@ -0,0 +1,84 @@
+//! Emulated wait status for non-Unix #[cfg(unix) platforms
+//!
+//! Separate module to facilitate testing against a real Unix implementation.
+use core::ffi::NonZero_c_int;
+
+use crate::ffi::c_int;
+use crate::fmt;
+
+use super::ExitStatusError;
+
+/// Emulated wait status for use by `process_unsupported.rs`
+///
+/// Uses the "traditional unix" encoding. For use on platfors which are `#[cfg(unix)]`
+/// but do not actually support subprocesses at all.
+///
+/// These platforms aren't Unix, but are simply pretending to be for porting convenience.
+/// So, we provide a faithful pretence here.
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
+pub struct ExitStatus {
+ wait_status: c_int,
+}
+
+/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it
+impl From<c_int> for ExitStatus {
+ fn from(wait_status: c_int) -> ExitStatus {
+ ExitStatus { wait_status }
+ }
+}
+
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "emulated wait status: {}", self.wait_status)
+ }
+}
+
+impl ExitStatus {
+ pub fn code(&self) -> Option<i32> {
+ // Linux and FreeBSD both agree that values linux 0x80
+ // count as "WIFEXITED" even though this is quite mad.
+ // Likewise the macros disregard all the high bits, so are happy to declare
+ // out-of-range values to be WIFEXITED, WIFSTOPPED, etc.
+ let w = self.wait_status;
+ if (w & 0x7f) == 0 { Some((w & 0xff00) >> 8) } else { None }
+ }
+
+ #[allow(unused)]
+ pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
+ // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
+ // true on all actual versions of Unix, is widely assumed, and is specified in SuS
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not
+ // true for a platform pretending to be Unix, the tests (our doctests, and also
+ // process_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
+ match NonZero_c_int::try_from(self.wait_status) {
+ /* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)),
+ /* was zero, couldn't convert */ Err(_) => Ok(()),
+ }
+ }
+
+ pub fn signal(&self) -> Option<i32> {
+ let signal = self.wait_status & 0x007f;
+ if signal > 0 && signal < 0x7f { Some(signal) } else { None }
+ }
+
+ pub fn core_dumped(&self) -> bool {
+ self.signal().is_some() && (self.wait_status & 0x80) != 0
+ }
+
+ pub fn stopped_signal(&self) -> Option<i32> {
+ let w = self.wait_status;
+ if (w & 0xff) == 0x7f { Some((w & 0xff00) >> 8) } else { None }
+ }
+
+ pub fn continued(&self) -> bool {
+ self.wait_status == 0xffff
+ }
+
+ pub fn into_raw(&self) -> c_int {
+ self.wait_status
+ }
+}
+
+#[cfg(test)]
+#[path = "wait_status/tests.rs"] // needed because of strange layout of process_unsupported
+mod tests;
diff --git a/library/std/src/sys/unix/process/process_unsupported/wait_status/tests.rs b/library/std/src/sys/unix/process/process_unsupported/wait_status/tests.rs
new file mode 100644
index 000000000..5132eab10
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_unsupported/wait_status/tests.rs
@@ -0,0 +1,36 @@
+// Note that tests in this file are run on Linux as well as on platforms using process_unsupported
+
+// Test that our emulation exactly matches Linux
+//
+// This test runs *on Linux* but it tests
+// the implementation used on non-Unix `#[cfg(unix)]` platforms.
+//
+// I.e. we're using Linux as a proxy for "trad unix".
+#[cfg(target_os = "linux")]
+#[test]
+fn compare_with_linux() {
+ use super::ExitStatus as Emulated;
+ use crate::os::unix::process::ExitStatusExt as _;
+ use crate::process::ExitStatus as Real;
+
+ // Check that we handle out-of-range values similarly, too.
+ for wstatus in -0xf_ffff..0xf_ffff {
+ let emulated = Emulated::from(wstatus);
+ let real = Real::from_raw(wstatus);
+
+ macro_rules! compare { { $method:ident } => {
+ assert_eq!(
+ emulated.$method(),
+ real.$method(),
+ "{wstatus:#x}.{}()",
+ stringify!($method),
+ );
+ } }
+ compare!(code);
+ compare!(signal);
+ compare!(core_dumped);
+ compare!(stopped_signal);
+ compare!(continued);
+ compare!(into_raw);
+ }
+}
diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs
index fbf158f56..2825d1677 100644
--- a/library/std/src/sys/unix/rand.rs
+++ b/library/std/src/sys/unix/rand.rs
@@ -62,18 +62,15 @@ mod imp {
unsafe { getrandom(buf.as_mut_ptr().cast(), buf.len(), libc::GRND_NONBLOCK) }
}
- #[cfg(any(target_os = "espidf", target_os = "horizon"))]
+ #[cfg(any(target_os = "espidf", target_os = "horizon", target_os = "freebsd"))]
fn getrandom(buf: &mut [u8]) -> libc::ssize_t {
- unsafe { libc::getrandom(buf.as_mut_ptr().cast(), buf.len(), 0) }
- }
-
- #[cfg(target_os = "freebsd")]
- fn getrandom(buf: &mut [u8]) -> libc::ssize_t {
- // FIXME: using the above when libary std's libc is updated
+ #[cfg(not(target_os = "freebsd"))]
+ use libc::getrandom;
+ #[cfg(target_os = "freebsd")]
extern "C" {
fn getrandom(
- buffer: *mut libc::c_void,
- length: libc::size_t,
+ buf: *mut libc::c_void,
+ buflen: libc::size_t,
flags: libc::c_uint,
) -> libc::ssize_t;
}
@@ -154,40 +151,65 @@ mod imp {
}
}
-#[cfg(target_os = "macos")]
+#[cfg(target_vendor = "apple")]
mod imp {
- use crate::fs::File;
- use crate::io::Read;
- use crate::sys::os::errno;
- use crate::sys::weak::weak;
+ use crate::io;
use libc::{c_int, c_void, size_t};
- fn getentropy_fill_bytes(v: &mut [u8]) -> bool {
- weak!(fn getentropy(*mut c_void, size_t) -> c_int);
-
- getentropy
- .get()
- .map(|f| {
- // getentropy(2) permits a maximum buffer size of 256 bytes
- for s in v.chunks_mut(256) {
- let ret = unsafe { f(s.as_mut_ptr() as *mut c_void, s.len()) };
- if ret == -1 {
- panic!("unexpected getentropy error: {}", errno());
- }
- }
- true
- })
- .unwrap_or(false)
+ #[inline(always)]
+ fn random_failure() -> ! {
+ panic!("unexpected random generation error: {}", io::Error::last_os_error());
}
- pub fn fill_bytes(v: &mut [u8]) {
- if getentropy_fill_bytes(v) {
- return;
+ #[cfg(target_os = "macos")]
+ fn getentropy_fill_bytes(v: &mut [u8]) {
+ extern "C" {
+ fn getentropy(bytes: *mut c_void, count: size_t) -> c_int;
}
- // for older macos which doesn't support getentropy
- let mut file = File::open("/dev/urandom").expect("failed to open /dev/urandom");
- file.read_exact(v).expect("failed to read /dev/urandom")
+ // getentropy(2) permits a maximum buffer size of 256 bytes
+ for s in v.chunks_mut(256) {
+ let ret = unsafe { getentropy(s.as_mut_ptr().cast(), s.len()) };
+ if ret == -1 {
+ random_failure()
+ }
+ }
+ }
+
+ #[cfg(not(target_os = "macos"))]
+ fn ccrandom_fill_bytes(v: &mut [u8]) {
+ extern "C" {
+ fn CCRandomGenerateBytes(bytes: *mut c_void, count: size_t) -> c_int;
+ }
+
+ let ret = unsafe { CCRandomGenerateBytes(v.as_mut_ptr().cast(), v.len()) };
+ if ret == -1 {
+ random_failure()
+ }
+ }
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ // All supported versions of macOS (10.12+) support getentropy.
+ //
+ // `getentropy` is measurably faster (via Divan) then the other alternatives so its preferred
+ // when usable.
+ #[cfg(target_os = "macos")]
+ getentropy_fill_bytes(v);
+
+ // On Apple platforms, `CCRandomGenerateBytes` and `SecRandomCopyBytes` simply
+ // call into `CCRandomCopyBytes` with `kCCRandomDefault`. `CCRandomCopyBytes`
+ // manages a CSPRNG which is seeded from the kernel's CSPRNG and which runs on
+ // its own thread accessed via GCD. This seems needlessly heavyweight for our purposes
+ // so we only use it on non-Mac OSes where the better entrypoints are blocked.
+ //
+ // `CCRandomGenerateBytes` is used instead of `SecRandomCopyBytes` because the former is accessible
+ // via `libSystem` (libc) while the other needs to link to `Security.framework`.
+ //
+ // Note that while `getentropy` has a available attribute in the macOS headers, the lack
+ // of a header in the iOS (and others) SDK means that its can cause app store rejections.
+ // Just use `CCRandomGenerateBytes` instead.
+ #[cfg(not(target_os = "macos"))]
+ ccrandom_fill_bytes(v);
}
}
@@ -206,36 +228,7 @@ mod imp {
}
}
-// On iOS and MacOS `SecRandomCopyBytes` calls `CCRandomCopyBytes` with
-// `kCCRandomDefault`. `CCRandomCopyBytes` manages a CSPRNG which is seeded
-// from `/dev/random` and which runs on its own thread accessed via GCD.
-// This seems needlessly heavyweight for the purposes of generating two u64s
-// once per thread in `hashmap_random_keys`. Therefore `SecRandomCopyBytes` is
-// only used on iOS where direct access to `/dev/urandom` is blocked by the
-// sandbox.
-#[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos"))]
-mod imp {
- use crate::io;
- use crate::ptr;
- use libc::{c_int, size_t};
-
- enum SecRandom {}
-
- #[allow(non_upper_case_globals)]
- const kSecRandomDefault: *const SecRandom = ptr::null();
-
- extern "C" {
- fn SecRandomCopyBytes(rnd: *const SecRandom, count: size_t, bytes: *mut u8) -> c_int;
- }
-
- pub fn fill_bytes(v: &mut [u8]) {
- let ret = unsafe { SecRandomCopyBytes(kSecRandomDefault, v.len(), v.as_mut_ptr()) };
- if ret == -1 {
- panic!("couldn't generate random bytes: {}", io::Error::last_os_error());
- }
- }
-}
-
+// FIXME: once the 10.x release becomes the minimum, this can be dropped for simplification.
#[cfg(target_os = "netbsd")]
mod imp {
use crate::ptr;
diff --git a/library/std/src/sys/unix/stack_overflow.rs b/library/std/src/sys/unix/stack_overflow.rs
index 73c530786..3dbab4cc4 100644
--- a/library/std/src/sys/unix/stack_overflow.rs
+++ b/library/std/src/sys/unix/stack_overflow.rs
@@ -134,9 +134,19 @@ mod imp {
// OpenBSD requires this flag for stack mapping
// otherwise the said mapping will fail as a no-op on most systems
// and has a different meaning on FreeBSD
- #[cfg(any(target_os = "openbsd", target_os = "netbsd", target_os = "linux",))]
+ #[cfg(any(
+ target_os = "openbsd",
+ target_os = "netbsd",
+ target_os = "linux",
+ target_os = "dragonfly",
+ ))]
let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
- #[cfg(not(any(target_os = "openbsd", target_os = "netbsd", target_os = "linux",)))]
+ #[cfg(not(any(
+ target_os = "openbsd",
+ target_os = "netbsd",
+ target_os = "linux",
+ target_os = "dragonfly",
+ )))]
let flags = MAP_PRIVATE | MAP_ANON;
let stackp =
mmap64(ptr::null_mut(), SIGSTKSZ + page_size(), PROT_READ | PROT_WRITE, flags, -1, 0);
diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs
index 311ed9502..29db9468e 100644
--- a/library/std/src/sys/unix/thread.rs
+++ b/library/std/src/sys/unix/thread.rs
@@ -207,7 +207,9 @@ impl Thread {
pub fn set_name(name: &CStr) {
unsafe {
let thread_self = libc::find_thread(ptr::null_mut());
- libc::rename_thread(thread_self, name.as_ptr());
+ let res = libc::rename_thread(thread_self, name.as_ptr());
+ // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
+ debug_assert_eq!(res, libc::B_OK);
}
}
@@ -218,6 +220,7 @@ impl Thread {
target_os = "redox",
target_os = "vxworks",
target_os = "hurd",
+ target_os = "aix",
))]
pub fn set_name(_name: &CStr) {
// Newlib, Emscripten, and VxWorks have no way to set a thread name.
@@ -317,6 +320,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> {
target_os = "macos",
target_os = "solaris",
target_os = "illumos",
+ target_os = "aix",
))] {
#[allow(unused_assignments)]
#[allow(unused_mut)]
diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs
index fba2a676f..06399e8a2 100644
--- a/library/std/src/sys/unix/thread_local_dtor.rs
+++ b/library/std/src/sys/unix/thread_local_dtor.rs
@@ -11,28 +11,47 @@
// Note, however, that we run on lots older linuxes, as well as cross
// compiling from a newer linux to an older linux, so we also have a
// fallback implementation to use as well.
+#[allow(unexpected_cfgs)]
#[cfg(any(target_os = "linux", target_os = "fuchsia", target_os = "redox", target_os = "hurd"))]
+// FIXME: The Rust compiler currently omits weakly function definitions (i.e.,
+// __cxa_thread_atexit_impl) and its metadata from LLVM IR.
+#[no_sanitize(cfi, kcfi)]
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
use crate::mem;
use crate::sys_common::thread_local_dtor::register_dtor_fallback;
+ /// This is necessary because the __cxa_thread_atexit_impl implementation
+ /// std links to by default may be a C or C++ implementation that was not
+ /// compiled using the Clang integer normalization option.
+ #[cfg(not(sanitizer_cfi_normalize_integers))]
+ #[cfi_encoding = "i"]
+ #[repr(transparent)]
+ pub struct c_int(pub libc::c_int);
+
extern "C" {
#[linkage = "extern_weak"]
static __dso_handle: *mut u8;
#[linkage = "extern_weak"]
- static __cxa_thread_atexit_impl: *const libc::c_void;
+ static __cxa_thread_atexit_impl: Option<
+ extern "C" fn(
+ unsafe extern "C" fn(*mut libc::c_void),
+ *mut libc::c_void,
+ *mut libc::c_void,
+ ) -> c_int,
+ >;
}
- if !__cxa_thread_atexit_impl.is_null() {
- type F = unsafe extern "C" fn(
- dtor: unsafe extern "C" fn(*mut u8),
- arg: *mut u8,
- dso_handle: *mut u8,
- ) -> libc::c_int;
- mem::transmute::<*const libc::c_void, F>(__cxa_thread_atexit_impl)(
- dtor,
- t,
- &__dso_handle as *const _ as *mut _,
- );
+
+ if let Some(f) = __cxa_thread_atexit_impl {
+ unsafe {
+ f(
+ mem::transmute::<
+ unsafe extern "C" fn(*mut u8),
+ unsafe extern "C" fn(*mut libc::c_void),
+ >(dtor),
+ t.cast(),
+ &__dso_handle as *const _ as *mut _,
+ );
+ }
return;
}
register_dtor_fallback(t, dtor);
@@ -48,17 +67,16 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
// workaround below is to register, via _tlv_atexit, a custom DTOR list once per
// thread. thread_local dtors are pushed to the DTOR list without calling
// _tlv_atexit.
-#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos", target_os = "tvos"))]
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
- use crate::cell::Cell;
- use crate::mem;
+ use crate::cell::{Cell, RefCell};
use crate::ptr;
#[thread_local]
static REGISTERED: Cell<bool> = Cell::new(false);
#[thread_local]
- static mut DTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
+ static DTORS: RefCell<Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>> = RefCell::new(Vec::new());
if !REGISTERED.get() {
_tlv_atexit(run_dtors, ptr::null_mut());
@@ -69,21 +87,28 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
fn _tlv_atexit(dtor: unsafe extern "C" fn(*mut u8), arg: *mut u8);
}
- let list = &mut DTORS;
- list.push((t, dtor));
+ match DTORS.try_borrow_mut() {
+ Ok(mut dtors) => dtors.push((t, dtor)),
+ Err(_) => rtabort!("global allocator may not use TLS"),
+ }
unsafe extern "C" fn run_dtors(_: *mut u8) {
- let mut list = mem::take(&mut DTORS);
+ let mut list = DTORS.take();
while !list.is_empty() {
for (ptr, dtor) in list {
dtor(ptr);
}
- list = mem::take(&mut DTORS);
+ list = DTORS.take();
}
}
}
-#[cfg(any(target_os = "vxworks", target_os = "horizon", target_os = "emscripten"))]
+#[cfg(any(
+ target_os = "vxworks",
+ target_os = "horizon",
+ target_os = "emscripten",
+ target_os = "aix"
+))]
#[cfg_attr(target_family = "wasm", allow(unused))] // might remain unused depending on target details (e.g. wasm32-unknown-emscripten)
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
use crate::sys_common::thread_local_dtor::register_dtor_fallback;
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index 4fe61b284..f2e86a4fb 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -1,8 +1,6 @@
use crate::fmt;
use crate::time::Duration;
-pub use self::inner::Instant;
-
const NSEC_PER_SEC: u64 = 1_000_000_000;
pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() };
#[allow(dead_code)] // Used for pthread condvar timeouts
@@ -40,6 +38,10 @@ impl SystemTime {
SystemTime { t: Timespec::new(tv_sec, tv_nsec) }
}
+ pub fn now() -> SystemTime {
+ SystemTime { t: Timespec::now(libc::CLOCK_REALTIME) }
+ }
+
pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
self.t.sub_timespec(&other.t)
}
@@ -74,11 +76,65 @@ impl Timespec {
}
const fn new(tv_sec: i64, tv_nsec: i64) -> Timespec {
+ // On Apple OS, dates before epoch are represented differently than on other
+ // Unix platforms: e.g. 1/10th of a second before epoch is represented as `seconds=-1`
+ // and `nanoseconds=100_000_000` on other platforms, but is `seconds=0` and
+ // `nanoseconds=-900_000_000` on Apple OS.
+ //
+ // To compensate, we first detect this special case by checking if both
+ // seconds and nanoseconds are in range, and then correct the value for seconds
+ // and nanoseconds to match the common unix representation.
+ //
+ // Please note that Apple OS nonetheless accepts the standard unix format when
+ // setting file times, which makes this compensation round-trippable and generally
+ // transparent.
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "tvos",
+ target_os = "watchos"
+ ))]
+ let (tv_sec, tv_nsec) =
+ if (tv_sec <= 0 && tv_sec > i64::MIN) && (tv_nsec < 0 && tv_nsec > -1_000_000_000) {
+ (tv_sec - 1, tv_nsec + 1_000_000_000)
+ } else {
+ (tv_sec, tv_nsec)
+ };
assert!(tv_nsec >= 0 && tv_nsec < NSEC_PER_SEC as i64);
// SAFETY: The assert above checks tv_nsec is within the valid range
Timespec { tv_sec, tv_nsec: unsafe { Nanoseconds(tv_nsec as u32) } }
}
+ pub fn now(clock: libc::clockid_t) -> Timespec {
+ use crate::mem::MaybeUninit;
+ use crate::sys::cvt;
+
+ // Try to use 64-bit time in preparation for Y2038.
+ #[cfg(all(
+ target_os = "linux",
+ target_env = "gnu",
+ target_pointer_width = "32",
+ not(target_arch = "riscv32")
+ ))]
+ {
+ use crate::sys::weak::weak;
+
+ // __clock_gettime64 was added to 32-bit arches in glibc 2.34,
+ // and it handles both vDSO calls and ENOSYS fallbacks itself.
+ weak!(fn __clock_gettime64(libc::clockid_t, *mut __timespec64) -> libc::c_int);
+
+ if let Some(clock_gettime64) = __clock_gettime64.get() {
+ let mut t = MaybeUninit::uninit();
+ cvt(unsafe { clock_gettime64(clock, t.as_mut_ptr()) }).unwrap();
+ return Timespec::from(unsafe { t.assume_init() });
+ }
+ }
+
+ let mut t = MaybeUninit::uninit();
+ cvt(unsafe { libc::clock_gettime(clock, t.as_mut_ptr()) }).unwrap();
+ Timespec::from(unsafe { t.assume_init() })
+ }
+
pub fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
if self >= other {
// NOTE(eddyb) two aspects of this `if`-`else` are required for LLVM
@@ -216,209 +272,59 @@ impl From<__timespec64> for Timespec {
}
}
-#[cfg(any(
- all(target_os = "macos", any(not(target_arch = "aarch64"))),
- target_os = "ios",
- target_os = "watchos",
- target_os = "tvos"
-))]
-mod inner {
- use crate::sync::atomic::{AtomicU64, Ordering};
- use crate::sys::cvt;
- use crate::sys_common::mul_div_u64;
- use crate::time::Duration;
-
- use super::{SystemTime, Timespec, NSEC_PER_SEC};
-
- #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
- pub struct Instant {
- t: u64,
- }
-
- #[repr(C)]
- #[derive(Copy, Clone)]
- struct mach_timebase_info {
- numer: u32,
- denom: u32,
- }
- type mach_timebase_info_t = *mut mach_timebase_info;
- type kern_return_t = libc::c_int;
-
- impl Instant {
- pub fn now() -> Instant {
- extern "C" {
- fn mach_absolute_time() -> u64;
- }
- Instant { t: unsafe { mach_absolute_time() } }
- }
-
- pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
- let diff = self.t.checked_sub(other.t)?;
- let info = info();
- let nanos = mul_div_u64(diff, info.numer as u64, info.denom as u64);
- Some(Duration::new(nanos / NSEC_PER_SEC, (nanos % NSEC_PER_SEC) as u32))
- }
-
- pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
- Some(Instant { t: self.t.checked_add(checked_dur2intervals(other)?)? })
- }
-
- pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
- Some(Instant { t: self.t.checked_sub(checked_dur2intervals(other)?)? })
- }
- }
-
- impl SystemTime {
- pub fn now() -> SystemTime {
- use crate::ptr;
-
- let mut s = libc::timeval { tv_sec: 0, tv_usec: 0 };
- cvt(unsafe { libc::gettimeofday(&mut s, ptr::null_mut()) }).unwrap();
- return SystemTime::from(s);
- }
- }
-
- impl From<libc::timeval> for Timespec {
- fn from(t: libc::timeval) -> Timespec {
- Timespec::new(t.tv_sec as i64, 1000 * t.tv_usec as i64)
- }
- }
-
- impl From<libc::timeval> for SystemTime {
- fn from(t: libc::timeval) -> SystemTime {
- SystemTime { t: Timespec::from(t) }
- }
- }
-
- fn checked_dur2intervals(dur: &Duration) -> Option<u64> {
- let nanos =
- dur.as_secs().checked_mul(NSEC_PER_SEC)?.checked_add(dur.subsec_nanos() as u64)?;
- let info = info();
- Some(mul_div_u64(nanos, info.denom as u64, info.numer as u64))
- }
-
- fn info() -> mach_timebase_info {
- // INFO_BITS conceptually is an `Option<mach_timebase_info>`. We can do
- // this in 64 bits because we know 0 is never a valid value for the
- // `denom` field.
- //
- // Encoding this as a single `AtomicU64` allows us to use `Relaxed`
- // operations, as we are only interested in the effects on a single
- // memory location.
- static INFO_BITS: AtomicU64 = AtomicU64::new(0);
-
- // If a previous thread has initialized `INFO_BITS`, use it.
- let info_bits = INFO_BITS.load(Ordering::Relaxed);
- if info_bits != 0 {
- return info_from_bits(info_bits);
- }
-
- // ... otherwise learn for ourselves ...
- extern "C" {
- fn mach_timebase_info(info: mach_timebase_info_t) -> kern_return_t;
- }
-
- let mut info = info_from_bits(0);
- unsafe {
- mach_timebase_info(&mut info);
- }
- INFO_BITS.store(info_to_bits(info), Ordering::Relaxed);
- info
- }
-
- #[inline]
- fn info_to_bits(info: mach_timebase_info) -> u64 {
- ((info.denom as u64) << 32) | (info.numer as u64)
- }
-
- #[inline]
- fn info_from_bits(bits: u64) -> mach_timebase_info {
- mach_timebase_info { numer: bits as u32, denom: (bits >> 32) as u32 }
- }
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Instant {
+ t: Timespec,
}
-#[cfg(not(any(
- all(target_os = "macos", any(not(target_arch = "aarch64"))),
- target_os = "ios",
- target_os = "watchos",
- target_os = "tvos"
-)))]
-mod inner {
- use crate::fmt;
- use crate::mem::MaybeUninit;
- use crate::sys::cvt;
- use crate::time::Duration;
-
- use super::{SystemTime, Timespec};
-
- #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
- pub struct Instant {
- t: Timespec,
+impl Instant {
+ pub fn now() -> Instant {
+ // https://www.manpagez.com/man/3/clock_gettime/
+ //
+ // CLOCK_UPTIME_RAW clock that increments monotonically, in the same man-
+ // ner as CLOCK_MONOTONIC_RAW, but that does not incre-
+ // ment while the system is asleep. The returned value
+ // is identical to the result of mach_absolute_time()
+ // after the appropriate mach_timebase conversion is
+ // applied.
+ //
+ // Instant on macos was historically implemented using mach_absolute_time;
+ // we preserve this value domain out of an abundance of caution.
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "tvos"
+ ))]
+ const clock_id: libc::clockid_t = libc::CLOCK_UPTIME_RAW;
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "tvos"
+ )))]
+ const clock_id: libc::clockid_t = libc::CLOCK_MONOTONIC;
+ Instant { t: Timespec::now(clock_id) }
}
- impl Instant {
- pub fn now() -> Instant {
- #[cfg(target_os = "macos")]
- const clock_id: libc::clockid_t = libc::CLOCK_UPTIME_RAW;
- #[cfg(not(target_os = "macos"))]
- const clock_id: libc::clockid_t = libc::CLOCK_MONOTONIC;
- Instant { t: Timespec::now(clock_id) }
- }
-
- pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
- self.t.sub_timespec(&other.t).ok()
- }
-
- pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
- Some(Instant { t: self.t.checked_add_duration(other)? })
- }
-
- pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
- Some(Instant { t: self.t.checked_sub_duration(other)? })
- }
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ self.t.sub_timespec(&other.t).ok()
}
- impl fmt::Debug for Instant {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Instant")
- .field("tv_sec", &self.t.tv_sec)
- .field("tv_nsec", &self.t.tv_nsec.0)
- .finish()
- }
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_add_duration(other)? })
}
- impl SystemTime {
- pub fn now() -> SystemTime {
- SystemTime { t: Timespec::now(libc::CLOCK_REALTIME) }
- }
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_sub_duration(other)? })
}
+}
- impl Timespec {
- pub fn now(clock: libc::clockid_t) -> Timespec {
- // Try to use 64-bit time in preparation for Y2038.
- #[cfg(all(
- target_os = "linux",
- target_env = "gnu",
- target_pointer_width = "32",
- not(target_arch = "riscv32")
- ))]
- {
- use crate::sys::weak::weak;
-
- // __clock_gettime64 was added to 32-bit arches in glibc 2.34,
- // and it handles both vDSO calls and ENOSYS fallbacks itself.
- weak!(fn __clock_gettime64(libc::clockid_t, *mut super::__timespec64) -> libc::c_int);
-
- if let Some(clock_gettime64) = __clock_gettime64.get() {
- let mut t = MaybeUninit::uninit();
- cvt(unsafe { clock_gettime64(clock, t.as_mut_ptr()) }).unwrap();
- return Timespec::from(unsafe { t.assume_init() });
- }
- }
-
- let mut t = MaybeUninit::uninit();
- cvt(unsafe { libc::clock_gettime(clock, t.as_mut_ptr()) }).unwrap();
- Timespec::from(unsafe { t.assume_init() })
- }
+impl fmt::Debug for Instant {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Instant")
+ .field("tv_sec", &self.t.tv_sec)
+ .field("tv_nsec", &self.t.tv_nsec.0)
+ .finish()
}
}
diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs
index 5cbb5cb65..5919cc506 100644
--- a/library/std/src/sys/wasi/mod.rs
+++ b/library/std/src/sys/wasi/mod.rs
@@ -82,31 +82,99 @@ pub fn is_interrupted(errno: i32) -> bool {
}
pub fn decode_error_kind(errno: i32) -> std_io::ErrorKind {
- use std_io::ErrorKind::*;
- if errno > u16::MAX as i32 || errno < 0 {
- return Uncategorized;
+ use std_io::ErrorKind;
+
+ let Ok(errno) = u16::try_from(errno) else {
+ return ErrorKind::Uncategorized;
+ };
+
+ macro_rules! match_errno {
+ ($($($errno:ident)|+ => $errkind:ident),*, _ => $wildcard:ident $(,)?) => {
+ match errno {
+ $(e if $(e == ::wasi::$errno.raw())||+ => ErrorKind::$errkind),*,
+ _ => ErrorKind::$wildcard,
+ }
+ };
}
- match errno {
- e if e == wasi::ERRNO_CONNREFUSED.raw().into() => ConnectionRefused,
- e if e == wasi::ERRNO_CONNRESET.raw().into() => ConnectionReset,
- e if e == wasi::ERRNO_PERM.raw().into() || e == wasi::ERRNO_ACCES.raw().into() => {
- PermissionDenied
- }
- e if e == wasi::ERRNO_PIPE.raw().into() => BrokenPipe,
- e if e == wasi::ERRNO_NOTCONN.raw().into() => NotConnected,
- e if e == wasi::ERRNO_CONNABORTED.raw().into() => ConnectionAborted,
- e if e == wasi::ERRNO_ADDRNOTAVAIL.raw().into() => AddrNotAvailable,
- e if e == wasi::ERRNO_ADDRINUSE.raw().into() => AddrInUse,
- e if e == wasi::ERRNO_NOENT.raw().into() => NotFound,
- e if e == wasi::ERRNO_INTR.raw().into() => Interrupted,
- e if e == wasi::ERRNO_INVAL.raw().into() => InvalidInput,
- e if e == wasi::ERRNO_TIMEDOUT.raw().into() => TimedOut,
- e if e == wasi::ERRNO_EXIST.raw().into() => AlreadyExists,
- e if e == wasi::ERRNO_AGAIN.raw().into() => WouldBlock,
- e if e == wasi::ERRNO_NOSYS.raw().into() => Unsupported,
- e if e == wasi::ERRNO_NOMEM.raw().into() => OutOfMemory,
- _ => Uncategorized,
+ match_errno! {
+ ERRNO_2BIG => ArgumentListTooLong,
+ ERRNO_ACCES => PermissionDenied,
+ ERRNO_ADDRINUSE => AddrInUse,
+ ERRNO_ADDRNOTAVAIL => AddrNotAvailable,
+ ERRNO_AFNOSUPPORT => Unsupported,
+ ERRNO_AGAIN => WouldBlock,
+ // ALREADY => "connection already in progress",
+ // BADF => "bad file descriptor",
+ // BADMSG => "bad message",
+ ERRNO_BUSY => ResourceBusy,
+ // CANCELED => "operation canceled",
+ // CHILD => "no child processes",
+ ERRNO_CONNABORTED => ConnectionAborted,
+ ERRNO_CONNREFUSED => ConnectionRefused,
+ ERRNO_CONNRESET => ConnectionReset,
+ ERRNO_DEADLK => Deadlock,
+ // DESTADDRREQ => "destination address required",
+ ERRNO_DOM => InvalidInput,
+ // DQUOT => /* reserved */,
+ ERRNO_EXIST => AlreadyExists,
+ // FAULT => "bad address",
+ ERRNO_FBIG => FileTooLarge,
+ ERRNO_HOSTUNREACH => HostUnreachable,
+ // IDRM => "identifier removed",
+ // ILSEQ => "illegal byte sequence",
+ // INPROGRESS => "operation in progress",
+ ERRNO_INTR => Interrupted,
+ ERRNO_INVAL => InvalidInput,
+ ERRNO_IO => Uncategorized,
+ // ISCONN => "socket is connected",
+ ERRNO_ISDIR => IsADirectory,
+ ERRNO_LOOP => FilesystemLoop,
+ // MFILE => "file descriptor value too large",
+ ERRNO_MLINK => TooManyLinks,
+ // MSGSIZE => "message too large",
+ // MULTIHOP => /* reserved */,
+ ERRNO_NAMETOOLONG => InvalidFilename,
+ ERRNO_NETDOWN => NetworkDown,
+ // NETRESET => "connection aborted by network",
+ ERRNO_NETUNREACH => NetworkUnreachable,
+ // NFILE => "too many files open in system",
+ // NOBUFS => "no buffer space available",
+ ERRNO_NODEV => NotFound,
+ ERRNO_NOENT => NotFound,
+ // NOEXEC => "executable file format error",
+ // NOLCK => "no locks available",
+ // NOLINK => /* reserved */,
+ ERRNO_NOMEM => OutOfMemory,
+ // NOMSG => "no message of the desired type",
+ // NOPROTOOPT => "protocol not available",
+ ERRNO_NOSPC => StorageFull,
+ ERRNO_NOSYS => Unsupported,
+ ERRNO_NOTCONN => NotConnected,
+ ERRNO_NOTDIR => NotADirectory,
+ ERRNO_NOTEMPTY => DirectoryNotEmpty,
+ // NOTRECOVERABLE => "state not recoverable",
+ // NOTSOCK => "not a socket",
+ ERRNO_NOTSUP => Unsupported,
+ // NOTTY => "inappropriate I/O control operation",
+ ERRNO_NXIO => NotFound,
+ // OVERFLOW => "value too large to be stored in data type",
+ // OWNERDEAD => "previous owner died",
+ ERRNO_PERM => PermissionDenied,
+ ERRNO_PIPE => BrokenPipe,
+ // PROTO => "protocol error",
+ ERRNO_PROTONOSUPPORT => Unsupported,
+ // PROTOTYPE => "protocol wrong type for socket",
+ // RANGE => "result too large",
+ ERRNO_ROFS => ReadOnlyFilesystem,
+ ERRNO_SPIPE => NotSeekable,
+ ERRNO_SRCH => NotFound,
+ // STALE => /* reserved */,
+ ERRNO_TIMEDOUT => TimedOut,
+ ERRNO_TXTBSY => ResourceBusy,
+ ERRNO_XDEV => CrossesDevices,
+ ERRNO_NOTCAPABLE => PermissionDenied,
+ _ => Uncategorized,
}
}
@@ -124,6 +192,7 @@ pub fn hashmap_random_keys() -> (u64, u64) {
return ret;
}
+#[inline]
fn err2io(err: wasi::Errno) -> std_io::Error {
std_io::Error::from_raw_os_error(err.raw().into())
}
diff --git a/library/std/src/sys/windows/api.rs b/library/std/src/sys/windows/api.rs
new file mode 100644
index 000000000..e9f0bbfbe
--- /dev/null
+++ b/library/std/src/sys/windows/api.rs
@@ -0,0 +1,157 @@
+//! # Safe(r) wrappers around Windows API functions.
+//!
+//! This module contains fairly thin wrappers around Windows API functions,
+//! aimed at centralising safety instead of having unsafe blocks spread
+//! throughout higher level code. This makes it much easier to audit FFI safety.
+//!
+//! Not all functions can be made completely safe without more context but in
+//! such cases we should still endeavour to reduce the caller's burden of safety
+//! as much as possible.
+//!
+//! ## Guidelines for wrappers
+//!
+//! Items here should be named similarly to their raw Windows API name, except
+//! that they follow Rust's case conventions. E.g. function names are
+//! lower_snake_case. The idea here is that it should be easy for a Windows
+//! C/C++ programmer to identify the underlying function that's being wrapped
+//! while not looking too out of place in Rust code.
+//!
+//! Every use of an `unsafe` block must have a related SAFETY comment, even if
+//! it's trivially safe (for example, see `get_last_error`). Public unsafe
+//! functions must document what the caller has to do to call them safely.
+//!
+//! Avoid unchecked `as` casts. For integers, either assert that the integer
+//! is in range or use `try_into` instead. For pointers, prefer to use
+//! `ptr.cast::<Type>()` when possible.
+//!
+//! This module must only depend on core and not on std types as the eventual
+//! hope is to have std depend on sys and not the other way around.
+//! However, some amount of glue code may currently be necessary so such code
+//! should go in sys/windows/mod.rs rather than here. See `IoResult` as an example.
+
+use core::ffi::c_void;
+use core::ptr::addr_of;
+
+use super::c;
+
+/// Helper method for getting the size of `T` as a u32.
+/// Errors at compile time if the size would overflow.
+///
+/// While a type larger than u32::MAX is unlikely, it is possible if only because of a bug.
+/// However, one key motivation for this function is to avoid the temptation to
+/// use frequent `as` casts. This is risky because they are too powerful.
+/// For example, the following will compile today:
+///
+/// `std::mem::size_of::<u64> as u32`
+///
+/// Note that `size_of` is never actually called, instead a function pointer is
+/// converted to a `u32`. Clippy would warn about this but, alas, it's not run
+/// on the standard library.
+const fn win32_size_of<T: Sized>() -> u32 {
+ // Const assert that the size is less than u32::MAX.
+ // Uses a trait to workaround restriction on using generic types in inner items.
+ trait Win32SizeOf: Sized {
+ const WIN32_SIZE_OF: u32 = {
+ let size = core::mem::size_of::<Self>();
+ assert!(size <= u32::MAX as usize);
+ size as u32
+ };
+ }
+ impl<T: Sized> Win32SizeOf for T {}
+
+ T::WIN32_SIZE_OF
+}
+
+/// The `SetFileInformationByHandle` function takes a generic parameter by
+/// making the user specify the type (class), a pointer to the data and its
+/// size. This trait allows attaching that information to a Rust type so that
+/// [`set_file_information_by_handle`] can be called safely.
+///
+/// This trait is designed so that it can support variable sized types.
+/// However, currently Rust's std only uses fixed sized structures.
+///
+/// # Safety
+///
+/// * `as_ptr` must return a pointer to memory that is readable up to `size` bytes.
+/// * `CLASS` must accurately reflect the type pointed to by `as_ptr`. E.g.
+/// the `FILE_BASIC_INFO` structure has the class `FileBasicInfo`.
+pub unsafe trait SetFileInformation {
+ /// The type of information to set.
+ const CLASS: i32;
+ /// A pointer to the file information to set.
+ fn as_ptr(&self) -> *const c_void;
+ /// The size of the type pointed to by `as_ptr`.
+ fn size(&self) -> u32;
+}
+/// Helper trait for implementing `SetFileInformation` for statically sized types.
+unsafe trait SizedSetFileInformation: Sized {
+ const CLASS: i32;
+}
+unsafe impl<T: SizedSetFileInformation> SetFileInformation for T {
+ const CLASS: i32 = T::CLASS;
+ fn as_ptr(&self) -> *const c_void {
+ addr_of!(*self).cast::<c_void>()
+ }
+ fn size(&self) -> u32 {
+ win32_size_of::<Self>()
+ }
+}
+
+// SAFETY: FILE_BASIC_INFO, FILE_END_OF_FILE_INFO, FILE_ALLOCATION_INFO,
+// FILE_DISPOSITION_INFO, FILE_DISPOSITION_INFO_EX and FILE_IO_PRIORITY_HINT_INFO
+// are all plain `repr(C)` structs that only contain primitive types.
+// The given information classes correctly match with the struct.
+unsafe impl SizedSetFileInformation for c::FILE_BASIC_INFO {
+ const CLASS: i32 = c::FileBasicInfo;
+}
+unsafe impl SizedSetFileInformation for c::FILE_END_OF_FILE_INFO {
+ const CLASS: i32 = c::FileEndOfFileInfo;
+}
+unsafe impl SizedSetFileInformation for c::FILE_ALLOCATION_INFO {
+ const CLASS: i32 = c::FileAllocationInfo;
+}
+unsafe impl SizedSetFileInformation for c::FILE_DISPOSITION_INFO {
+ const CLASS: i32 = c::FileDispositionInfo;
+}
+unsafe impl SizedSetFileInformation for c::FILE_DISPOSITION_INFO_EX {
+ const CLASS: i32 = c::FileDispositionInfoEx;
+}
+unsafe impl SizedSetFileInformation for c::FILE_IO_PRIORITY_HINT_INFO {
+ const CLASS: i32 = c::FileIoPriorityHintInfo;
+}
+
+#[inline]
+pub fn set_file_information_by_handle<T: SetFileInformation>(
+ handle: c::HANDLE,
+ info: &T,
+) -> Result<(), WinError> {
+ unsafe fn set_info(
+ handle: c::HANDLE,
+ class: i32,
+ info: *const c_void,
+ size: u32,
+ ) -> Result<(), WinError> {
+ let result = c::SetFileInformationByHandle(handle, class, info, size);
+ (result != 0).then_some(()).ok_or_else(|| get_last_error())
+ }
+ // SAFETY: The `SetFileInformation` trait ensures that this is safe.
+ unsafe { set_info(handle, T::CLASS, info.as_ptr(), info.size()) }
+}
+
+/// Gets the error from the last function.
+/// This must be called immediately after the function that sets the error to
+/// avoid the risk of another function overwriting it.
+pub fn get_last_error() -> WinError {
+ // SAFETY: This just returns a thread-local u32 and has no other effects.
+ unsafe { WinError { code: c::GetLastError() } }
+}
+
+/// An error code as returned by [`get_last_error`].
+///
+/// This is usually a 16-bit Win32 error code but may be a 32-bit HRESULT or NTSTATUS.
+/// Check the documentation of the Windows API function being called for expected errors.
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(transparent)]
+pub struct WinError {
+ pub code: u32,
+}
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index f3637cbb9..a349e24b0 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -46,6 +46,10 @@ pub use FD_SET as fd_set;
pub use LINGER as linger;
pub use TIMEVAL as timeval;
+// https://learn.microsoft.com/en-us/cpp/c-runtime-library/exit-success-exit-failure?view=msvc-170
+pub const EXIT_SUCCESS: u32 = 0;
+pub const EXIT_FAILURE: u32 = 1;
+
pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { Ptr: ptr::null_mut() };
pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { Ptr: ptr::null_mut() };
pub const INIT_ONCE_STATIC_INIT: INIT_ONCE = INIT_ONCE { Ptr: ptr::null_mut() };
diff --git a/library/std/src/sys/windows/c/windows_sys.lst b/library/std/src/sys/windows/c/windows_sys.lst
index 0aca37e2d..38bf15b7c 100644
--- a/library/std/src/sys/windows/c/windows_sys.lst
+++ b/library/std/src/sys/windows/c/windows_sys.lst
@@ -1964,6 +1964,7 @@ Windows.Win32.Networking.WinSock.ADDRESS_FAMILY
Windows.Win32.Networking.WinSock.ADDRINFOA
Windows.Win32.Networking.WinSock.AF_INET
Windows.Win32.Networking.WinSock.AF_INET6
+Windows.Win32.Networking.WinSock.AF_UNIX
Windows.Win32.Networking.WinSock.AF_UNSPEC
Windows.Win32.Networking.WinSock.bind
Windows.Win32.Networking.WinSock.closesocket
@@ -2058,6 +2059,7 @@ Windows.Win32.Networking.WinSock.SOCK_RDM
Windows.Win32.Networking.WinSock.SOCK_SEQPACKET
Windows.Win32.Networking.WinSock.SOCK_STREAM
Windows.Win32.Networking.WinSock.SOCKADDR
+Windows.Win32.Networking.WinSock.SOCKADDR_UN
Windows.Win32.Networking.WinSock.SOCKET
Windows.Win32.Networking.WinSock.SOCKET_ERROR
Windows.Win32.Networking.WinSock.SOL_SOCKET
@@ -2222,6 +2224,7 @@ Windows.Win32.Storage.FileSystem.FILE_ACCESS_RIGHTS
Windows.Win32.Storage.FileSystem.FILE_ADD_FILE
Windows.Win32.Storage.FileSystem.FILE_ADD_SUBDIRECTORY
Windows.Win32.Storage.FileSystem.FILE_ALL_ACCESS
+Windows.Win32.Storage.FileSystem.FILE_ALLOCATION_INFO
Windows.Win32.Storage.FileSystem.FILE_APPEND_DATA
Windows.Win32.Storage.FileSystem.FILE_ATTRIBUTE_ARCHIVE
Windows.Win32.Storage.FileSystem.FILE_ATTRIBUTE_COMPRESSED
@@ -2282,6 +2285,7 @@ Windows.Win32.Storage.FileSystem.FILE_GENERIC_READ
Windows.Win32.Storage.FileSystem.FILE_GENERIC_WRITE
Windows.Win32.Storage.FileSystem.FILE_ID_BOTH_DIR_INFO
Windows.Win32.Storage.FileSystem.FILE_INFO_BY_HANDLE_CLASS
+Windows.Win32.Storage.FileSystem.FILE_IO_PRIORITY_HINT_INFO
Windows.Win32.Storage.FileSystem.FILE_LIST_DIRECTORY
Windows.Win32.Storage.FileSystem.FILE_NAME_NORMALIZED
Windows.Win32.Storage.FileSystem.FILE_NAME_OPENED
@@ -2503,9 +2507,12 @@ Windows.Win32.System.Threading.CREATE_SEPARATE_WOW_VDM
Windows.Win32.System.Threading.CREATE_SHARED_WOW_VDM
Windows.Win32.System.Threading.CREATE_SUSPENDED
Windows.Win32.System.Threading.CREATE_UNICODE_ENVIRONMENT
+Windows.Win32.System.Threading.CREATE_WAITABLE_TIMER_HIGH_RESOLUTION
+Windows.Win32.System.Threading.CREATE_WAITABLE_TIMER_MANUAL_RESET
Windows.Win32.System.Threading.CreateEventW
Windows.Win32.System.Threading.CreateProcessW
Windows.Win32.System.Threading.CreateThread
+Windows.Win32.System.Threading.CreateWaitableTimerExW
Windows.Win32.System.Threading.DEBUG_ONLY_THIS_PROCESS
Windows.Win32.System.Threading.DEBUG_PROCESS
Windows.Win32.System.Threading.DeleteProcThreadAttributeList
@@ -2542,6 +2549,7 @@ Windows.Win32.System.Threading.REALTIME_PRIORITY_CLASS
Windows.Win32.System.Threading.ReleaseSRWLockExclusive
Windows.Win32.System.Threading.ReleaseSRWLockShared
Windows.Win32.System.Threading.SetThreadStackGuarantee
+Windows.Win32.System.Threading.SetWaitableTimer
Windows.Win32.System.Threading.Sleep
Windows.Win32.System.Threading.SleepConditionVariableSRW
Windows.Win32.System.Threading.SleepEx
@@ -2568,6 +2576,8 @@ Windows.Win32.System.Threading.TerminateProcess
Windows.Win32.System.Threading.THREAD_CREATE_RUN_IMMEDIATELY
Windows.Win32.System.Threading.THREAD_CREATE_SUSPENDED
Windows.Win32.System.Threading.THREAD_CREATION_FLAGS
+Windows.Win32.System.Threading.TIMER_ALL_ACCESS
+Windows.Win32.System.Threading.TIMER_MODIFY_STATE
Windows.Win32.System.Threading.TLS_OUT_OF_INDEXES
Windows.Win32.System.Threading.TlsAlloc
Windows.Win32.System.Threading.TlsFree
diff --git a/library/std/src/sys/windows/c/windows_sys.rs b/library/std/src/sys/windows/c/windows_sys.rs
index 851d15915..e0509e6a5 100644
--- a/library/std/src/sys/windows/c/windows_sys.rs
+++ b/library/std/src/sys/windows/c/windows_sys.rs
@@ -152,6 +152,15 @@ extern "system" {
}
#[link(name = "kernel32")]
extern "system" {
+ pub fn CreateWaitableTimerExW(
+ lptimerattributes: *const SECURITY_ATTRIBUTES,
+ lptimername: PCWSTR,
+ dwflags: u32,
+ dwdesiredaccess: u32,
+ ) -> HANDLE;
+}
+#[link(name = "kernel32")]
+extern "system" {
pub fn DeleteFileW(lpfilename: PCWSTR) -> BOOL;
}
#[link(name = "kernel32")]
@@ -509,6 +518,17 @@ extern "system" {
}
#[link(name = "kernel32")]
extern "system" {
+ pub fn SetWaitableTimer(
+ htimer: HANDLE,
+ lpduetime: *const i64,
+ lperiod: i32,
+ pfncompletionroutine: PTIMERAPCROUTINE,
+ lpargtocompletionroutine: *const ::core::ffi::c_void,
+ fresume: BOOL,
+ ) -> BOOL;
+}
+#[link(name = "kernel32")]
+extern "system" {
pub fn Sleep(dwmilliseconds: u32) -> ();
}
#[link(name = "kernel32")]
@@ -847,6 +867,7 @@ impl ::core::clone::Clone for ADDRINFOA {
}
pub const AF_INET: ADDRESS_FAMILY = 2u16;
pub const AF_INET6: ADDRESS_FAMILY = 23u16;
+pub const AF_UNIX: u16 = 1u16;
pub const AF_UNSPEC: ADDRESS_FAMILY = 0u16;
pub const ALL_PROCESSOR_GROUPS: u32 = 65535u32;
#[repr(C)]
@@ -1164,6 +1185,8 @@ pub const CREATE_SEPARATE_WOW_VDM: PROCESS_CREATION_FLAGS = 2048u32;
pub const CREATE_SHARED_WOW_VDM: PROCESS_CREATION_FLAGS = 4096u32;
pub const CREATE_SUSPENDED: PROCESS_CREATION_FLAGS = 4u32;
pub const CREATE_UNICODE_ENVIRONMENT: PROCESS_CREATION_FLAGS = 1024u32;
+pub const CREATE_WAITABLE_TIMER_HIGH_RESOLUTION: u32 = 2u32;
+pub const CREATE_WAITABLE_TIMER_MANUAL_RESET: u32 = 1u32;
pub const CSTR_EQUAL: COMPARESTRING_RESULT = 2i32;
pub const CSTR_GREATER_THAN: COMPARESTRING_RESULT = 3i32;
pub const CSTR_LESS_THAN: COMPARESTRING_RESULT = 1i32;
@@ -3106,6 +3129,16 @@ impl ::core::clone::Clone for FILETIME {
pub type FILE_ACCESS_RIGHTS = u32;
pub const FILE_ADD_FILE: FILE_ACCESS_RIGHTS = 2u32;
pub const FILE_ADD_SUBDIRECTORY: FILE_ACCESS_RIGHTS = 4u32;
+#[repr(C)]
+pub struct FILE_ALLOCATION_INFO {
+ pub AllocationSize: i64,
+}
+impl ::core::marker::Copy for FILE_ALLOCATION_INFO {}
+impl ::core::clone::Clone for FILE_ALLOCATION_INFO {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
pub const FILE_ALL_ACCESS: FILE_ACCESS_RIGHTS = 2032127u32;
pub const FILE_APPEND_DATA: FILE_ACCESS_RIGHTS = 4u32;
pub const FILE_ATTRIBUTE_ARCHIVE: FILE_FLAGS_AND_ATTRIBUTES = 32u32;
@@ -3247,6 +3280,16 @@ impl ::core::clone::Clone for FILE_ID_BOTH_DIR_INFO {
}
}
pub type FILE_INFO_BY_HANDLE_CLASS = i32;
+#[repr(C)]
+pub struct FILE_IO_PRIORITY_HINT_INFO {
+ pub PriorityHint: PRIORITY_HINT,
+}
+impl ::core::marker::Copy for FILE_IO_PRIORITY_HINT_INFO {}
+impl ::core::clone::Clone for FILE_IO_PRIORITY_HINT_INFO {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
pub const FILE_LIST_DIRECTORY: FILE_ACCESS_RIGHTS = 1u32;
pub const FILE_NAME_NORMALIZED: GETFINALPATHNAMEBYHANDLE_FLAGS = 0u32;
pub const FILE_NAME_OPENED: GETFINALPATHNAMEBYHANDLE_FLAGS = 8u32;
@@ -3752,6 +3795,7 @@ pub const PIPE_SERVER_END: NAMED_PIPE_MODE = 1u32;
pub const PIPE_TYPE_BYTE: NAMED_PIPE_MODE = 0u32;
pub const PIPE_TYPE_MESSAGE: NAMED_PIPE_MODE = 4u32;
pub const PIPE_WAIT: NAMED_PIPE_MODE = 0u32;
+pub type PRIORITY_HINT = i32;
pub type PROCESSOR_ARCHITECTURE = u16;
pub type PROCESS_CREATION_FLAGS = u32;
#[repr(C)]
@@ -3774,6 +3818,13 @@ pub const PROFILE_SERVER: PROCESS_CREATION_FLAGS = 1073741824u32;
pub const PROFILE_USER: PROCESS_CREATION_FLAGS = 268435456u32;
pub const PROGRESS_CONTINUE: u32 = 0u32;
pub type PSTR = *mut u8;
+pub type PTIMERAPCROUTINE = ::core::option::Option<
+ unsafe extern "system" fn(
+ lpargtocompletionroutine: *const ::core::ffi::c_void,
+ dwtimerlowvalue: u32,
+ dwtimerhighvalue: u32,
+ ) -> (),
+>;
pub type PWSTR = *mut u16;
pub const READ_CONTROL: FILE_ACCESS_RIGHTS = 131072u32;
pub const REALTIME_PRIORITY_CLASS: PROCESS_CREATION_FLAGS = 256u32;
@@ -3813,6 +3864,17 @@ impl ::core::clone::Clone for SOCKADDR {
*self
}
}
+#[repr(C)]
+pub struct SOCKADDR_UN {
+ pub sun_family: ADDRESS_FAMILY,
+ pub sun_path: [u8; 108],
+}
+impl ::core::marker::Copy for SOCKADDR_UN {}
+impl ::core::clone::Clone for SOCKADDR_UN {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
pub type SOCKET = usize;
pub const SOCKET_ERROR: i32 = -1i32;
pub const SOCK_DGRAM: WINSOCK_SOCKET_TYPE = 2i32;
@@ -3910,6 +3972,7 @@ pub type SYMBOLIC_LINK_FLAGS = u32;
pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: SYMBOLIC_LINK_FLAGS = 2u32;
pub const SYMBOLIC_LINK_FLAG_DIRECTORY: SYMBOLIC_LINK_FLAGS = 1u32;
pub const SYMLINK_FLAG_RELATIVE: u32 = 1u32;
+pub type SYNCHRONIZATION_ACCESS_RIGHTS = u32;
pub const SYNCHRONIZE: FILE_ACCESS_RIGHTS = 1048576u32;
#[repr(C)]
pub struct SYSTEM_INFO {
@@ -3956,6 +4019,8 @@ pub const TCP_NODELAY: i32 = 1i32;
pub const THREAD_CREATE_RUN_IMMEDIATELY: THREAD_CREATION_FLAGS = 0u32;
pub const THREAD_CREATE_SUSPENDED: THREAD_CREATION_FLAGS = 4u32;
pub type THREAD_CREATION_FLAGS = u32;
+pub const TIMER_ALL_ACCESS: SYNCHRONIZATION_ACCESS_RIGHTS = 2031619u32;
+pub const TIMER_MODIFY_STATE: SYNCHRONIZATION_ACCESS_RIGHTS = 2u32;
#[repr(C)]
pub struct TIMEVAL {
pub tv_sec: i32,
diff --git a/library/std/src/sys/windows/cmath.rs b/library/std/src/sys/windows/cmath.rs
index 1b2a86f3c..36578d5a3 100644
--- a/library/std/src/sys/windows/cmath.rs
+++ b/library/std/src/sys/windows/cmath.rs
@@ -1,6 +1,6 @@
#![cfg(not(test))]
-use libc::{c_double, c_float, c_int};
+use core::ffi::{c_double, c_float, c_int};
extern "C" {
pub fn acos(n: c_double) -> c_double;
@@ -33,7 +33,7 @@ pub use self::shims::*;
#[cfg(not(all(target_env = "msvc", target_arch = "x86")))]
mod shims {
- use libc::c_float;
+ use core::ffi::c_float;
extern "C" {
pub fn acosf(n: c_float) -> c_float;
@@ -52,7 +52,7 @@ mod shims {
// back to f32. While not precisely correct should be "correct enough" for now.
#[cfg(all(target_env = "msvc", target_arch = "x86"))]
mod shims {
- use libc::c_float;
+ use core::ffi::c_float;
#[inline]
pub unsafe fn acosf(n: c_float) -> c_float {
diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs
index 21a65bc25..d7e36b9a3 100644
--- a/library/std/src/sys/windows/fs.rs
+++ b/library/std/src/sys/windows/fs.rs
@@ -16,8 +16,10 @@ use crate::sys::{c, cvt, Align8};
use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::thread;
+use core::ffi::c_void;
+
use super::path::maybe_verbatim;
-use super::to_u16s;
+use super::{api, to_u16s, IoResult};
pub struct File {
handle: Handle,
@@ -121,7 +123,7 @@ impl Iterator for ReadDir {
let mut wfd = mem::zeroed();
loop {
if c::FindNextFileW(self.handle.0, &mut wfd) == 0 {
- if c::GetLastError() == c::ERROR_NO_MORE_FILES {
+ if api::get_last_error().code == c::ERROR_NO_MORE_FILES {
return None;
} else {
return Some(Err(Error::last_os_error()));
@@ -316,17 +318,8 @@ impl File {
}
pub fn truncate(&self, size: u64) -> io::Result<()> {
- let mut info = c::FILE_END_OF_FILE_INFO { EndOfFile: size as c::LARGE_INTEGER };
- let size = mem::size_of_val(&info);
- cvt(unsafe {
- c::SetFileInformationByHandle(
- self.handle.as_raw_handle(),
- c::FileEndOfFileInfo,
- &mut info as *mut _ as *mut _,
- size as c::DWORD,
- )
- })?;
- Ok(())
+ let info = c::FILE_END_OF_FILE_INFO { EndOfFile: size as i64 };
+ api::set_file_information_by_handle(self.handle.as_raw_handle(), &info).io_result()
}
#[cfg(not(target_vendor = "uwp"))]
@@ -371,7 +364,7 @@ impl File {
cvt(c::GetFileInformationByHandleEx(
self.handle.as_raw_handle(),
c::FileBasicInfo,
- &mut info as *mut _ as *mut libc::c_void,
+ &mut info as *mut _ as *mut c_void,
size as c::DWORD,
))?;
let mut attr = FileAttr {
@@ -399,7 +392,7 @@ impl File {
cvt(c::GetFileInformationByHandleEx(
self.handle.as_raw_handle(),
c::FileStandardInfo,
- &mut info as *mut _ as *mut libc::c_void,
+ &mut info as *mut _ as *mut c_void,
size as c::DWORD,
))?;
attr.file_size = info.AllocationSize as u64;
@@ -563,23 +556,14 @@ impl File {
}
pub fn set_permissions(&self, perm: FilePermissions) -> io::Result<()> {
- let mut info = c::FILE_BASIC_INFO {
+ let info = c::FILE_BASIC_INFO {
CreationTime: 0,
LastAccessTime: 0,
LastWriteTime: 0,
ChangeTime: 0,
FileAttributes: perm.attrs,
};
- let size = mem::size_of_val(&info);
- cvt(unsafe {
- c::SetFileInformationByHandle(
- self.handle.as_raw_handle(),
- c::FileBasicInfo,
- &mut info as *mut _ as *mut _,
- size as c::DWORD,
- )
- })?;
- Ok(())
+ api::set_file_information_by_handle(self.handle.as_raw_handle(), &info).io_result()
}
pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
@@ -624,7 +608,7 @@ impl File {
cvt(c::GetFileInformationByHandleEx(
self.handle.as_raw_handle(),
c::FileBasicInfo,
- &mut info as *mut _ as *mut libc::c_void,
+ &mut info as *mut _ as *mut c_void,
size as c::DWORD,
))?;
Ok(info)
@@ -639,38 +623,20 @@ impl File {
/// If the operation is not supported for this filesystem or OS version
/// then errors will be `ERROR_NOT_SUPPORTED` or `ERROR_INVALID_PARAMETER`.
fn posix_delete(&self) -> io::Result<()> {
- let mut info = c::FILE_DISPOSITION_INFO_EX {
+ let info = c::FILE_DISPOSITION_INFO_EX {
Flags: c::FILE_DISPOSITION_FLAG_DELETE
| c::FILE_DISPOSITION_FLAG_POSIX_SEMANTICS
| c::FILE_DISPOSITION_FLAG_IGNORE_READONLY_ATTRIBUTE,
};
- let size = mem::size_of_val(&info);
- cvt(unsafe {
- c::SetFileInformationByHandle(
- self.handle.as_raw_handle(),
- c::FileDispositionInfoEx,
- &mut info as *mut _ as *mut _,
- size as c::DWORD,
- )
- })?;
- Ok(())
+ api::set_file_information_by_handle(self.handle.as_raw_handle(), &info).io_result()
}
/// Delete a file using win32 semantics. The file won't actually be deleted
/// until all file handles are closed. However, marking a file for deletion
/// will prevent anyone from opening a new handle to the file.
fn win32_delete(&self) -> io::Result<()> {
- let mut info = c::FILE_DISPOSITION_INFO { DeleteFile: c::TRUE as _ };
- let size = mem::size_of_val(&info);
- cvt(unsafe {
- c::SetFileInformationByHandle(
- self.handle.as_raw_handle(),
- c::FileDispositionInfo,
- &mut info as *mut _ as *mut _,
- size as c::DWORD,
- )
- })?;
- Ok(())
+ let info = c::FILE_DISPOSITION_INFO { DeleteFile: c::TRUE as _ };
+ api::set_file_information_by_handle(self.handle.as_raw_handle(), &info).io_result()
}
/// Fill the given buffer with as many directory entries as will fit.
@@ -1064,6 +1030,14 @@ impl DirBuilder {
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
+ // We push a `*` to the end of the path which cause the empty path to be
+ // treated as the current directory. So, for consistency with other platforms,
+ // we explicitly error on the empty path.
+ if p.as_os_str().is_empty() {
+ // Return an error code consistent with other ways of opening files.
+ // E.g. fs::metadata or File::open.
+ return Err(io::Error::from_raw_os_error(c::ERROR_PATH_NOT_FOUND as i32));
+ }
let root = p.to_path_buf();
let star = p.join("*");
let path = maybe_verbatim(&star)?;
@@ -1513,6 +1487,13 @@ pub fn try_exists(path: &Path) -> io::Result<bool> {
// as the file existing.
_ if e.raw_os_error() == Some(c::ERROR_SHARING_VIOLATION as i32) => Ok(true),
+ // `ERROR_CANT_ACCESS_FILE` means that a file exists but that the
+ // reparse point could not be handled by `CreateFile`.
+ // This can happen for special files such as:
+ // * Unix domain sockets which you need to `connect` to
+ // * App exec links which require using `CreateProcess`
+ _ if e.raw_os_error() == Some(c::ERROR_CANT_ACCESS_FILE as i32) => Ok(true),
+
// Other errors such as `ERROR_ACCESS_DENIED` may indicate that the
// file exists. However, these types of errors are usually more
// permanent so we report them here.
diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs
index fc9856cae..9b540ee07 100644
--- a/library/std/src/sys/windows/io.rs
+++ b/library/std/src/sys/windows/io.rs
@@ -3,7 +3,7 @@ use crate::mem::size_of;
use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle};
use crate::slice;
use crate::sys::c;
-use libc;
+use core::ffi::c_void;
#[derive(Copy, Clone)]
#[repr(transparent)]
@@ -136,7 +136,7 @@ unsafe fn msys_tty_on(handle: c::HANDLE) -> bool {
let res = c::GetFileInformationByHandleEx(
handle,
c::FileNameInfo,
- &mut name_info as *mut _ as *mut libc::c_void,
+ &mut name_info as *mut _ as *mut c_void,
size_of::<FILE_NAME_INFO>() as u32,
);
if res == 0 {
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index b609ad247..c4e56e13b 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -44,6 +44,18 @@ cfg_if::cfg_if! {
}
}
+mod api;
+
+/// Map a Result<T, WinError> to io::Result<T>.
+trait IoResult<T> {
+ fn io_result(self) -> crate::io::Result<T>;
+}
+impl<T> IoResult<T> for Result<T, api::WinError> {
+ fn io_result(self) -> crate::io::Result<T> {
+ self.map_err(|e| crate::io::Error::from_raw_os_error(e.code as i32))
+ }
+}
+
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {
@@ -241,11 +253,11 @@ where
// not an actual error.
c::SetLastError(0);
let k = match f1(buf.as_mut_ptr().cast::<u16>(), n as c::DWORD) {
- 0 if c::GetLastError() == 0 => 0,
+ 0 if api::get_last_error().code == 0 => 0,
0 => return Err(crate::io::Error::last_os_error()),
n => n,
} as usize;
- if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER {
+ if k == n && api::get_last_error().code == c::ERROR_INSUFFICIENT_BUFFER {
n = n.saturating_mul(2).min(c::DWORD::MAX as usize);
} else if k > n {
n = k;
diff --git a/library/std/src/sys/windows/net.rs b/library/std/src/sys/windows/net.rs
index abdcab424..c29b86366 100644
--- a/library/std/src/sys/windows/net.rs
+++ b/library/std/src/sys/windows/net.rs
@@ -15,7 +15,7 @@ use crate::sys_common::net;
use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::time::Duration;
-use libc::{c_int, c_long, c_ulong, c_ushort};
+use core::ffi::{c_int, c_long, c_ulong, c_ushort};
pub type wrlen_t = i32;
@@ -140,13 +140,15 @@ impl Socket {
}
}
+ pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> {
+ let (addr, len) = addr.into_inner();
+ let result = unsafe { c::connect(self.as_raw(), addr.as_ptr(), len) };
+ cvt(result).map(drop)
+ }
+
pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
self.set_nonblocking(true)?;
- let result = {
- let (addr, len) = addr.into_inner();
- let result = unsafe { c::connect(self.as_raw(), addr.as_ptr(), len) };
- cvt(result).map(drop)
- };
+ let result = self.connect(addr);
self.set_nonblocking(false)?;
match result {
diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs
index 58afca088..8cc905101 100644
--- a/library/std/src/sys/windows/os.rs
+++ b/library/std/src/sys/windows/os.rs
@@ -17,10 +17,10 @@ use crate::ptr;
use crate::slice;
use crate::sys::{c, cvt};
-use super::to_u16s;
+use super::{api, to_u16s};
pub fn errno() -> i32 {
- unsafe { c::GetLastError() as i32 }
+ api::get_last_error().code as i32
}
/// Gets a detailed string description for the given error number.
@@ -336,7 +336,7 @@ fn home_dir_crt() -> Option<PathBuf> {
super::fill_utf16_buf(
|buf, mut sz| {
match c::GetUserProfileDirectoryW(token, buf, &mut sz) {
- 0 if c::GetLastError() != c::ERROR_INSUFFICIENT_BUFFER => 0,
+ 0 if api::get_last_error().code != c::ERROR_INSUFFICIENT_BUFFER => 0,
0 => sz,
_ => sz - 1, // sz includes the null terminator
}
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index cd5bf7f15..f4078d359 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -19,8 +19,7 @@ use crate::path::{Path, PathBuf};
use crate::ptr;
use crate::sync::Mutex;
use crate::sys::args::{self, Arg};
-use crate::sys::c;
-use crate::sys::c::NonZeroDWORD;
+use crate::sys::c::{self, NonZeroDWORD, EXIT_FAILURE, EXIT_SUCCESS};
use crate::sys::cvt;
use crate::sys::fs::{File, OpenOptions};
use crate::sys::handle::Handle;
@@ -30,7 +29,7 @@ use crate::sys::stdio;
use crate::sys_common::process::{CommandEnv, CommandEnvs};
use crate::sys_common::IntoInner;
-use libc::{c_void, EXIT_FAILURE, EXIT_SUCCESS};
+use core::ffi::c_void;
////////////////////////////////////////////////////////////////////////////////
// Command
diff --git a/library/std/src/sys/windows/stack_overflow.rs b/library/std/src/sys/windows/stack_overflow.rs
index 0caf0a317..627763da8 100644
--- a/library/std/src/sys/windows/stack_overflow.rs
+++ b/library/std/src/sys/windows/stack_overflow.rs
@@ -3,6 +3,8 @@
use crate::sys::c;
use crate::thread;
+use super::api;
+
pub struct Handler;
impl Handler {
@@ -10,7 +12,7 @@ impl Handler {
// This API isn't available on XP, so don't panic in that case and just
// pray it works out ok.
if c::SetThreadStackGuarantee(&mut 0x5000) == 0
- && c::GetLastError() as u32 != c::ERROR_CALL_NOT_IMPLEMENTED as u32
+ && api::get_last_error().code != c::ERROR_CALL_NOT_IMPLEMENTED
{
panic!("failed to reserve stack space for exception handling");
}
diff --git a/library/std/src/sys/windows/stdio.rs b/library/std/src/sys/windows/stdio.rs
index 3fcaaa508..a9ff909aa 100644
--- a/library/std/src/sys/windows/stdio.rs
+++ b/library/std/src/sys/windows/stdio.rs
@@ -9,6 +9,7 @@ use crate::str;
use crate::sys::c;
use crate::sys::cvt;
use crate::sys::handle::Handle;
+use crate::sys::windows::api;
use core::str::utf8_char_width;
#[cfg(test)]
@@ -369,7 +370,7 @@ fn read_u16s(handle: c::HANDLE, buf: &mut [MaybeUninit<u16>]) -> io::Result<usiz
// ReadConsoleW returns success with ERROR_OPERATION_ABORTED for Ctrl-C or Ctrl-Break.
// Explicitly check for that case here and try again.
- if amount == 0 && unsafe { c::GetLastError() } == c::ERROR_OPERATION_ABORTED {
+ if amount == 0 && api::get_last_error().code == c::ERROR_OPERATION_ABORTED {
continue;
}
break;
diff --git a/library/std/src/sys/windows/thread.rs b/library/std/src/sys/windows/thread.rs
index 18cecb656..1fe744935 100644
--- a/library/std/src/sys/windows/thread.rs
+++ b/library/std/src/sys/windows/thread.rs
@@ -10,8 +10,9 @@ use crate::sys::stack_overflow;
use crate::sys_common::FromInner;
use crate::time::Duration;
-use libc::c_void;
+use core::ffi::c_void;
+use super::time::WaitableTimer;
use super::to_u16s;
pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
@@ -87,7 +88,17 @@ impl Thread {
}
pub fn sleep(dur: Duration) {
- unsafe { c::Sleep(super::dur2timeout(dur)) }
+ fn high_precision_sleep(dur: Duration) -> Result<(), ()> {
+ let timer = WaitableTimer::high_resolution()?;
+ timer.set(dur)?;
+ timer.wait()
+ }
+ // Attempt to use high-precision sleep (Windows 10, version 1803+).
+ // On error fallback to the standard `Sleep` function.
+ // Also preserves the zero duration behaviour of `Sleep`.
+ if dur.is_zero() || high_precision_sleep(dur).is_err() {
+ unsafe { c::Sleep(super::dur2timeout(dur)) }
+ }
}
pub fn handle(&self) -> &Handle {
diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs
index 036d96596..5eee4a966 100644
--- a/library/std/src/sys/windows/thread_local_key.rs
+++ b/library/std/src/sys/windows/thread_local_key.rs
@@ -16,14 +16,19 @@ static HAS_DTORS: AtomicBool = AtomicBool::new(false);
// Using a per-thread list avoids the problems in synchronizing global state.
#[thread_local]
#[cfg(target_thread_local)]
-static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
+static DESTRUCTORS: crate::cell::RefCell<Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>> =
+ crate::cell::RefCell::new(Vec::new());
// Ensure this can never be inlined because otherwise this may break in dylibs.
// See #44391.
#[inline(never)]
#[cfg(target_thread_local)]
pub unsafe fn register_keyless_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
- DESTRUCTORS.push((t, dtor));
+ match DESTRUCTORS.try_borrow_mut() {
+ Ok(mut dtors) => dtors.push((t, dtor)),
+ Err(_) => rtabort!("global allocator may not use TLS"),
+ }
+
HAS_DTORS.store(true, Relaxed);
}
@@ -37,11 +42,17 @@ unsafe fn run_keyless_dtors() {
// the case that this loop always terminates because we provide the
// guarantee that a TLS key cannot be set after it is flagged for
// destruction.
- while let Some((ptr, dtor)) = DESTRUCTORS.pop() {
+ loop {
+ // Use a let-else binding to ensure the `RefCell` guard is dropped
+ // immediately. Otherwise, a panic would occur if a TLS destructor
+ // tries to access the list.
+ let Some((ptr, dtor)) = DESTRUCTORS.borrow_mut().pop() else {
+ break;
+ };
(dtor)(ptr);
}
// We're done so free the memory.
- DESTRUCTORS = Vec::new();
+ DESTRUCTORS.replace(Vec::new());
}
type Key = c::DWORD;
diff --git a/library/std/src/sys/windows/time.rs b/library/std/src/sys/windows/time.rs
index b8209a854..bece48e79 100644
--- a/library/std/src/sys/windows/time.rs
+++ b/library/std/src/sys/windows/time.rs
@@ -1,11 +1,13 @@
use crate::cmp::Ordering;
use crate::fmt;
use crate::mem;
+use crate::ptr::{null, null_mut};
use crate::sys::c;
use crate::sys_common::IntoInner;
use crate::time::Duration;
use core::hash::{Hash, Hasher};
+use core::ops::Neg;
const NANOS_PER_SEC: u64 = 1_000_000_000;
const INTERVALS_PER_SEC: u64 = NANOS_PER_SEC / 100;
@@ -222,3 +224,39 @@ mod perf_counter {
qpc_value
}
}
+
+/// A timer you can wait on.
+pub(super) struct WaitableTimer {
+ handle: c::HANDLE,
+}
+impl WaitableTimer {
+ /// Create a high-resolution timer. Will fail before Windows 10, version 1803.
+ pub fn high_resolution() -> Result<Self, ()> {
+ let handle = unsafe {
+ c::CreateWaitableTimerExW(
+ null(),
+ null(),
+ c::CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,
+ c::TIMER_ALL_ACCESS,
+ )
+ };
+ if handle != null_mut() { Ok(Self { handle }) } else { Err(()) }
+ }
+ pub fn set(&self, duration: Duration) -> Result<(), ()> {
+ // Convert the Duration to a format similar to FILETIME.
+ // Negative values are relative times whereas positive values are absolute.
+ // Therefore we negate the relative duration.
+ let time = checked_dur2intervals(&duration).ok_or(())?.neg();
+ let result = unsafe { c::SetWaitableTimer(self.handle, &time, 0, None, null(), c::FALSE) };
+ if result != 0 { Ok(()) } else { Err(()) }
+ }
+ pub fn wait(&self) -> Result<(), ()> {
+ let result = unsafe { c::WaitForSingleObject(self.handle, c::INFINITE) };
+ if result != c::WAIT_FAILED { Ok(()) } else { Err(()) }
+ }
+}
+impl Drop for WaitableTimer {
+ fn drop(&mut self) {
+ unsafe { c::CloseHandle(self.handle) };
+ }
+}
diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs
index 4f5b17dea..8712bd2ec 100644
--- a/library/std/src/sys_common/net.rs
+++ b/library/std/src/sys_common/net.rs
@@ -226,9 +226,7 @@ impl TcpStream {
init();
let sock = Socket::new(addr, c::SOCK_STREAM)?;
-
- let (addr, len) = addr.into_inner();
- cvt_r(|| unsafe { c::connect(sock.as_raw(), addr.as_ptr(), len) })?;
+ sock.connect(addr)?;
Ok(TcpStream { inner: sock })
}
diff --git a/library/std/src/sys_common/thread_local_dtor.rs b/library/std/src/sys_common/thread_local_dtor.rs
index 844946eda..98382fc6a 100644
--- a/library/std/src/sys_common/thread_local_dtor.rs
+++ b/library/std/src/sys_common/thread_local_dtor.rs
@@ -13,6 +13,7 @@
#![unstable(feature = "thread_local_internals", issue = "none")]
#![allow(dead_code)]
+use crate::cell::RefCell;
use crate::ptr;
use crate::sys_common::thread_local_key::StaticKey;
@@ -28,17 +29,23 @@ pub unsafe fn register_dtor_fallback(t: *mut u8, dtor: unsafe extern "C" fn(*mut
// flagged for destruction.
static DTORS: StaticKey = StaticKey::new(Some(run_dtors));
- type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>;
+ // FIXME(joboet): integrate RefCell into pointer to avoid infinite recursion
+ // when the global allocator tries to register a destructor and just panic
+ // instead.
+ type List = RefCell<Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>>;
if DTORS.get().is_null() {
- let v: Box<List> = Box::new(Vec::new());
+ let v: Box<List> = Box::new(RefCell::new(Vec::new()));
DTORS.set(Box::into_raw(v) as *mut u8);
}
- let list: &mut List = &mut *(DTORS.get() as *mut List);
- list.push((t, dtor));
+ let list = &*(DTORS.get() as *const List);
+ match list.try_borrow_mut() {
+ Ok(mut dtors) => dtors.push((t, dtor)),
+ Err(_) => rtabort!("global allocator may not use TLS"),
+ }
unsafe extern "C" fn run_dtors(mut ptr: *mut u8) {
while !ptr.is_null() {
- let list: Box<List> = Box::from_raw(ptr as *mut List);
+ let list = Box::from_raw(ptr as *mut List).into_inner();
for (ptr, dtor) in list.into_iter() {
dtor(ptr);
}
diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs
index 09994e47f..def94acd4 100644
--- a/library/std/src/thread/local.rs
+++ b/library/std/src/thread/local.rs
@@ -29,9 +29,9 @@ use crate::fmt;
/// within a thread, and values that implement [`Drop`] get destructed when a
/// thread exits. Some caveats apply, which are explained below.
///
-/// A `LocalKey`'s initializer cannot recursively depend on itself, and using
-/// a `LocalKey` in this way will cause the initializer to infinitely recurse
-/// on the first call to `with`.
+/// A `LocalKey`'s initializer cannot recursively depend on itself. Using a
+/// `LocalKey` in this way may cause panics, aborts or infinite recursion on
+/// the first call to `with`.
///
/// # Examples
///
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index 7b26068c2..4097eb554 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -545,6 +545,15 @@ impl Builder {
scope_data.increment_num_running_threads();
}
+ let main = Box::new(main);
+ // SAFETY: dynamic size and alignment of the Box remain the same. See below for why the
+ // lifetime change is justified.
+ #[cfg(bootstrap)]
+ let main =
+ unsafe { mem::transmute::<Box<dyn FnOnce() + 'a>, Box<dyn FnOnce() + 'static>>(main) };
+ #[cfg(not(bootstrap))]
+ let main = unsafe { Box::from_raw(Box::into_raw(main) as *mut (dyn FnOnce() + 'static)) };
+
Ok(JoinInner {
// SAFETY:
//
@@ -559,14 +568,7 @@ impl Builder {
// Similarly, the `sys` implementation must guarantee that no references to the closure
// exist after the thread has terminated, which is signaled by `Thread::join`
// returning.
- native: unsafe {
- imp::Thread::new(
- stack_size,
- mem::transmute::<Box<dyn FnOnce() + 'a>, Box<dyn FnOnce() + 'static>>(
- Box::new(main),
- ),
- )?
- },
+ native: unsafe { imp::Thread::new(stack_size, main)? },
thread: my_thread,
packet: my_packet,
})
diff --git a/library/std/src/time.rs b/library/std/src/time.rs
index 005d8c767..91c010ef2 100644
--- a/library/std/src/time.rs
+++ b/library/std/src/time.rs
@@ -111,7 +111,7 @@ pub use core::time::TryFromFloatSecsError;
/// |-----------|----------------------------------------------------------------------|
/// | SGX | [`insecure_time` usercall]. More information on [timekeeping in SGX] |
/// | UNIX | [clock_gettime (Monotonic Clock)] |
-/// | Darwin | [mach_absolute_time] |
+/// | Darwin | [clock_gettime (Monotonic Clock)] |
/// | VXWorks | [clock_gettime (Monotonic Clock)] |
/// | SOLID | `get_tim` |
/// | WASI | [__wasi_clock_time_get (Monotonic Clock)] |
@@ -123,7 +123,6 @@ pub use core::time::TryFromFloatSecsError;
/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode
/// [__wasi_clock_time_get (Monotonic Clock)]: https://github.com/WebAssembly/WASI/blob/main/legacy/preview1/docs.md#clock_time_get
/// [clock_gettime (Monotonic Clock)]: https://linux.die.net/man/3/clock_gettime
-/// [mach_absolute_time]: https://developer.apple.com/library/archive/documentation/Darwin/Conceptual/KernelProgramming/services/services.html
///
/// **Disclaimer:** These system calls might change over time.
///
@@ -153,6 +152,7 @@ pub use core::time::TryFromFloatSecsError;
///
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[stable(feature = "time2", since = "1.8.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Instant")]
pub struct Instant(time::Instant);
/// A measurement of the system clock, useful for talking to
@@ -223,7 +223,7 @@ pub struct Instant(time::Instant);
/// |-----------|----------------------------------------------------------------------|
/// | SGX | [`insecure_time` usercall]. More information on [timekeeping in SGX] |
/// | UNIX | [clock_gettime (Realtime Clock)] |
-/// | Darwin | [gettimeofday] |
+/// | Darwin | [clock_gettime (Realtime Clock)] |
/// | VXWorks | [clock_gettime (Realtime Clock)] |
/// | SOLID | `SOLID_RTC_ReadTime` |
/// | WASI | [__wasi_clock_time_get (Realtime Clock)] |
@@ -232,7 +232,6 @@ pub struct Instant(time::Instant);
/// [currently]: crate::io#platform-specific-behavior
/// [`insecure_time` usercall]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/struct.Usercalls.html#method.insecure_time
/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode
-/// [gettimeofday]: https://man7.org/linux/man-pages/man2/gettimeofday.2.html
/// [clock_gettime (Realtime Clock)]: https://linux.die.net/man/3/clock_gettime
/// [__wasi_clock_time_get (Realtime Clock)]: https://github.com/WebAssembly/WASI/blob/main/legacy/preview1/docs.md#clock_time_get
/// [GetSystemTimePreciseAsFileTime]: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemtimepreciseasfiletime
diff --git a/library/std/tests/switch-stdout.rs b/library/std/tests/switch-stdout.rs
index 2605664d2..27f3e8a9b 100644
--- a/library/std/tests/switch-stdout.rs
+++ b/library/std/tests/switch-stdout.rs
@@ -5,32 +5,48 @@ use std::io::{Read, Write};
mod common;
+#[cfg(windows)]
+use std::os::windows::io::OwnedHandle;
+
#[cfg(unix)]
-fn switch_stdout_to(file: File) {
+use std::os::fd::OwnedFd;
+
+#[cfg(unix)]
+fn switch_stdout_to(file: OwnedFd) -> OwnedFd {
use std::os::unix::prelude::*;
extern "C" {
+ fn dup(old: i32) -> i32;
fn dup2(old: i32, new: i32) -> i32;
}
unsafe {
+ let orig_fd = dup(1);
+ assert_ne!(orig_fd, -1);
+ let res = OwnedFd::from_raw_fd(orig_fd);
assert_eq!(dup2(file.as_raw_fd(), 1), 1);
+ res
}
}
#[cfg(windows)]
-fn switch_stdout_to(file: File) {
+fn switch_stdout_to(file: OwnedHandle) -> OwnedHandle {
use std::os::windows::prelude::*;
extern "system" {
+ fn GetStdHandle(nStdHandle: u32) -> *mut u8;
fn SetStdHandle(nStdHandle: u32, handle: *mut u8) -> i32;
}
const STD_OUTPUT_HANDLE: u32 = (-11i32) as u32;
+ const INVALID_HANDLE_VALUE: *mut u8 = !0 as *mut u8;
unsafe {
+ let orig_hdl = GetStdHandle(STD_OUTPUT_HANDLE);
+ assert!(!orig_hdl.is_null() && orig_hdl != INVALID_HANDLE_VALUE);
let rc = SetStdHandle(STD_OUTPUT_HANDLE, file.into_raw_handle() as *mut _);
assert!(rc != 0);
+ OwnedHandle::from_raw_handle(orig_hdl as _)
}
}
@@ -43,10 +59,12 @@ fn switch_stdout() {
let mut stdout = std::io::stdout();
stdout.write(b"foo\n").unwrap();
stdout.flush().unwrap();
- switch_stdout_to(f);
+ let orig_hdl = switch_stdout_to(f.into());
stdout.write(b"bar\n").unwrap();
stdout.flush().unwrap();
+ switch_stdout_to(orig_hdl);
+
let mut contents = String::new();
File::open(&path).unwrap().read_to_string(&mut contents).unwrap();
assert_eq!(contents, "bar\n");
diff --git a/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
index 7ea795cac..af26b2682 100644
--- a/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
+++ b/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
@@ -2,8 +2,12 @@ FROM ubuntu:23.04
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc libc6-dev qemu-user ca-certificates \
- gcc-riscv64-linux-gnu libc6-dev-riscv64-cross
+ gcc-riscv64-linux-gnu libc6-dev-riscv64-cross \
+ llvm
ENV CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_LINKER=riscv64-linux-gnu-gcc \
- CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_RUNNER="qemu-riscv64 -L /usr/riscv64-linux-gnu -cpu rv64,zk=true,zbb=true,zbc=true" \
- OBJDUMP=riscv64-linux-gnu-objdump
+ CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_RUNNER="qemu-riscv64 \
+ -L /usr/riscv64-linux-gnu \
+ -cpu rv64,zk=true,zks=true,zbb=true,zbc=true \
+ " \
+ OBJDUMP=llvm-objdump
diff --git a/library/stdarch/ci/run.sh b/library/stdarch/ci/run.sh
index 7b2416fda..20cd4c564 100755
--- a/library/stdarch/ci/run.sh
+++ b/library/stdarch/ci/run.sh
@@ -33,6 +33,11 @@ case ${TARGET} in
i686-* | i586-*)
export RUSTFLAGS="${RUSTFLAGS} -C relocation-model=static -Z plt=yes"
;;
+ # Some x86_64 targets enable by default more features beyond SSE2,
+ # which cause some instruction assertion checks to fail.
+ x86_64-*)
+ export RUSTFLAGS="${RUSTFLAGS} -C target-feature=-sse3"
+ ;;
#Unoptimized build uses fast-isel which breaks with msa
mips-* | mipsel-*)
export RUSTFLAGS="${RUSTFLAGS} -C llvm-args=-fast-isel=false"
@@ -47,7 +52,7 @@ case ${TARGET} in
# Some of our test dependencies use the deprecated `gcc` crates which
# doesn't detect RISC-V compilers automatically, so do it manually here.
riscv64*)
- export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+zk,+zbb,+zbc"
+ export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+zk,+zks,+zbb,+zbc"
export TARGET_CC="riscv64-linux-gnu-gcc"
;;
esac
diff --git a/library/stdarch/crates/assert-instr-macro/Cargo.toml b/library/stdarch/crates/assert-instr-macro/Cargo.toml
index 4ad654e69..881c8109c 100644
--- a/library/stdarch/crates/assert-instr-macro/Cargo.toml
+++ b/library/stdarch/crates/assert-instr-macro/Cargo.toml
@@ -11,4 +11,4 @@ test = false
[dependencies]
proc-macro2 = "1.0"
quote = "1.0"
-syn = { version = "1.0", features = ["full"] }
+syn = { version = "2.0", features = ["full"] }
diff --git a/library/stdarch/crates/assert-instr-macro/src/lib.rs b/library/stdarch/crates/assert-instr-macro/src/lib.rs
index 99e37c910..c9de43943 100644
--- a/library/stdarch/crates/assert-instr-macro/src/lib.rs
+++ b/library/stdarch/crates/assert-instr-macro/src/lib.rs
@@ -35,6 +35,15 @@ pub fn assert_instr(
let instr = &invoc.instr;
let name = &func.sig.ident;
+ let maybe_allow_deprecated = if func
+ .attrs
+ .iter()
+ .any(|attr| attr.path().is_ident("deprecated"))
+ {
+ quote! { #[allow(deprecated)] }
+ } else {
+ quote! {}
+ };
// Disable assert_instr for x86 targets compiled with avx enabled, which
// causes LLVM to generate different intrinsics that the ones we are
@@ -108,7 +117,7 @@ pub fn assert_instr(
.attrs
.iter()
.filter(|attr| {
- attr.path
+ attr.path()
.segments
.first()
.expect("attr.path.segments.first() failed")
@@ -135,6 +144,7 @@ pub fn assert_instr(
let to_test = if disable_dedup_guard {
quote! {
#attrs
+ #maybe_allow_deprecated
#[no_mangle]
#[inline(never)]
pub unsafe extern #abi fn #shim_name(#(#inputs),*) #ret {
@@ -147,6 +157,7 @@ pub fn assert_instr(
const #shim_name_ptr : *const u8 = #shim_name_str.as_ptr();
#attrs
+ #maybe_allow_deprecated
#[no_mangle]
#[inline(never)]
pub unsafe extern #abi fn #shim_name(#(#inputs),*) #ret {
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/barrier/cp15.rs b/library/stdarch/crates/core_arch/src/arm_shared/barrier/cp15.rs
index 6faae0fee..fe540a7d8 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/barrier/cp15.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/barrier/cp15.rs
@@ -11,7 +11,8 @@ impl super::super::sealed::Dmb for SY {
#[inline(always)]
unsafe fn __dmb(&self) {
asm!(
- "mcr p15, 0, r0, c7, c10, 5",
+ "mcr p15, 0, {}, c7, c10, 5",
+ in(reg) 0_u32,
options(preserves_flags, nostack)
)
}
@@ -21,7 +22,8 @@ impl super::super::sealed::Dsb for SY {
#[inline(always)]
unsafe fn __dsb(&self) {
asm!(
- "mcr p15, 0, r0, c7, c10, 4",
+ "mcr p15, 0, {}, c7, c10, 4",
+ in(reg) 0_u32,
options(preserves_flags, nostack)
)
}
@@ -31,7 +33,8 @@ impl super::super::sealed::Isb for SY {
#[inline(always)]
unsafe fn __isb(&self) {
asm!(
- "mcr p15, 0, r0, c7, c5, 4",
+ "mcr p15, 0, {}, c7, c5, 4",
+ in(reg) 0_u32,
options(preserves_flags, nostack)
)
}
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs
index ebb8b7b9e..54bffa450 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/shift_and_insert_tests.rs
@@ -22,7 +22,7 @@ macro_rules! test_vsli {
let a = [$($a as $t),*];
let b = [$($b as $t),*];
let n_bit_mask: $t = (1 << $n) - 1;
- let e = [$(($a as $t & n_bit_mask) | ($b as $t << $n)),*];
+ let e = [$(($a as $t & n_bit_mask) | (($b as $t) << $n)),*];
let r = $fn_id::<$n>(transmute(a), transmute(b));
let mut d = e;
d = transmute(r);
@@ -60,7 +60,7 @@ macro_rules! test_vsri {
unsafe fn $test_id() {
let a = [$($a as $t),*];
let b = [$($b as $t),*];
- let n_bit_mask = ((1 as $t << $n) - 1).rotate_right($n);
+ let n_bit_mask = (((1 as $t) << $n) - 1).rotate_right($n);
let e = [$(($a as $t & n_bit_mask) | (($b as $t >> $n) & !n_bit_mask)),*];
let r = $fn_id::<$n>(transmute(a), transmute(b));
let mut d = e;
diff --git a/library/stdarch/crates/core_arch/src/riscv64/zk.rs b/library/stdarch/crates/core_arch/src/riscv64/zk.rs
index 3dbe3705d..9b403fc95 100644
--- a/library/stdarch/crates/core_arch/src/riscv64/zk.rs
+++ b/library/stdarch/crates/core_arch/src/riscv64/zk.rs
@@ -20,6 +20,9 @@ extern "unadjusted" {
#[link_name = "llvm.riscv.aes64ks2"]
fn _aes64ks2(rs1: i64, rs2: i64) -> i64;
+ #[link_name = "llvm.riscv.aes64im"]
+ fn _aes64im(rs1: i64) -> i64;
+
#[link_name = "llvm.riscv.sha512sig0"]
fn _sha512sig0(rs1: i64) -> i64;
@@ -50,8 +53,7 @@ extern "unadjusted" {
///
/// This function is safe to use if the `zkne` target feature is present.
#[target_feature(enable = "zkne")]
-// See #1464
-// #[cfg_attr(test, assert_instr(aes64es))]
+#[cfg_attr(test, assert_instr(aes64es))]
#[inline]
pub unsafe fn aes64es(rs1: u64, rs2: u64) -> u64 {
_aes64es(rs1 as i64, rs2 as i64) as u64
@@ -74,8 +76,7 @@ pub unsafe fn aes64es(rs1: u64, rs2: u64) -> u64 {
///
/// This function is safe to use if the `zkne` target feature is present.
#[target_feature(enable = "zkne")]
-// See #1464
-// #[cfg_attr(test, assert_instr(aes64esm))]
+#[cfg_attr(test, assert_instr(aes64esm))]
#[inline]
pub unsafe fn aes64esm(rs1: u64, rs2: u64) -> u64 {
_aes64esm(rs1 as i64, rs2 as i64) as u64
@@ -98,8 +99,7 @@ pub unsafe fn aes64esm(rs1: u64, rs2: u64) -> u64 {
///
/// This function is safe to use if the `zknd` target feature is present.
#[target_feature(enable = "zknd")]
-// See #1464
-// #[cfg_attr(test, assert_instr(aes64ds))]
+#[cfg_attr(test, assert_instr(aes64ds))]
#[inline]
pub unsafe fn aes64ds(rs1: u64, rs2: u64) -> u64 {
_aes64ds(rs1 as i64, rs2 as i64) as u64
@@ -122,8 +122,7 @@ pub unsafe fn aes64ds(rs1: u64, rs2: u64) -> u64 {
///
/// This function is safe to use if the `zknd` target feature is present.
#[target_feature(enable = "zknd")]
-// See #1464
-// #[cfg_attr(test, assert_instr(aes64dsm))]
+#[cfg_attr(test, assert_instr(aes64dsm))]
#[inline]
pub unsafe fn aes64dsm(rs1: u64, rs2: u64) -> u64 {
_aes64dsm(rs1 as i64, rs2 as i64) as u64
@@ -152,8 +151,7 @@ pub unsafe fn aes64dsm(rs1: u64, rs2: u64) -> u64 {
/// This function is safe to use if the `zkne` or `zknd` target feature is present.
#[target_feature(enable = "zkne", enable = "zknd")]
#[rustc_legacy_const_generics(1)]
-// See #1464
-// #[cfg_attr(test, assert_instr(aes64ks1i, RNUM = 0))]
+#[cfg_attr(test, assert_instr(aes64ks1i, RNUM = 0))]
#[inline]
pub unsafe fn aes64ks1i<const RNUM: u8>(rs1: u64) -> u64 {
static_assert!(RNUM <= 10);
@@ -177,13 +175,36 @@ pub unsafe fn aes64ks1i<const RNUM: u8>(rs1: u64) -> u64 {
///
/// This function is safe to use if the `zkne` or `zknd` target feature is present.
#[target_feature(enable = "zkne", enable = "zknd")]
-// See #1464
-// #[cfg_attr(test, assert_instr(aes64ks2))]
+#[cfg_attr(test, assert_instr(aes64ks2))]
#[inline]
pub unsafe fn aes64ks2(rs1: u64, rs2: u64) -> u64 {
_aes64ks2(rs1 as i64, rs2 as i64) as u64
}
+/// This instruction accelerates the inverse MixColumns step of the AES Block Cipher, and is used to aid creation of
+/// the decryption KeySchedule.
+///
+/// The instruction applies the inverse MixColumns transformation to two columns of the state array, packed
+/// into a single 64-bit register. It is used to create the inverse cipher KeySchedule, according to the equivalent
+/// inverse cipher construction in (Page 23, Section 5.3.5). This instruction must always be implemented
+/// such that its execution latency does not depend on the data being operated on.
+///
+/// Source: RISC-V Cryptography Extensions Volume I: Scalar & Entropy Source Instructions
+///
+/// Version: v1.0.1
+///
+/// Section: 3.9
+///
+/// # Safety
+///
+/// This function is safe to use if the `zkne` or `zknd` target feature is present.
+#[target_feature(enable = "zkne", enable = "zknd")]
+#[cfg_attr(test, assert_instr(aes64im))]
+#[inline]
+pub unsafe fn aes64im(rs1: u64) -> u64 {
+ _aes64im(rs1 as i64) as u64
+}
+
/// Implements the Sigma0 transformation function as used in the SHA2-512 hash function \[49\]
/// (Section 4.1.3).
///
@@ -201,8 +222,7 @@ pub unsafe fn aes64ks2(rs1: u64, rs2: u64) -> u64 {
///
/// This function is safe to use if the `zknh` target feature is present.
#[target_feature(enable = "zknh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sha512sig0))]
+#[cfg_attr(test, assert_instr(sha512sig0))]
#[inline]
pub unsafe fn sha512sig0(rs1: u64) -> u64 {
_sha512sig0(rs1 as i64) as u64
@@ -225,8 +245,7 @@ pub unsafe fn sha512sig0(rs1: u64) -> u64 {
///
/// This function is safe to use if the `zknh` target feature is present.
#[target_feature(enable = "zknh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sha512sig1))]
+#[cfg_attr(test, assert_instr(sha512sig1))]
#[inline]
pub unsafe fn sha512sig1(rs1: u64) -> u64 {
_sha512sig1(rs1 as i64) as u64
@@ -249,8 +268,7 @@ pub unsafe fn sha512sig1(rs1: u64) -> u64 {
///
/// This function is safe to use if the `zknh` target feature is present.
#[target_feature(enable = "zknh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sha512sum0))]
+#[cfg_attr(test, assert_instr(sha512sum0))]
#[inline]
pub unsafe fn sha512sum0(rs1: u64) -> u64 {
_sha512sum0(rs1 as i64) as u64
@@ -273,8 +291,7 @@ pub unsafe fn sha512sum0(rs1: u64) -> u64 {
///
/// This function is safe to use if the `zknh` target feature is present.
#[target_feature(enable = "zknh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sha512sum1))]
+#[cfg_attr(test, assert_instr(sha512sum1))]
#[inline]
pub unsafe fn sha512sum1(rs1: u64) -> u64 {
_sha512sum1(rs1 as i64) as u64
diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs b/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs
index cfae6caa5..6785c04fd 100644
--- a/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs
+++ b/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs
@@ -47,8 +47,7 @@ extern "unadjusted" {
///
/// This function is safe to use if the `zbb` target feature is present.
#[target_feature(enable = "zbb")]
-// See #1464
-// #[cfg_attr(test, assert_instr(orc.b))]
+#[cfg_attr(test, assert_instr(orc.b))]
#[inline]
pub unsafe fn orc_b(rs: usize) -> usize {
#[cfg(target_arch = "riscv32")]
@@ -76,8 +75,7 @@ pub unsafe fn orc_b(rs: usize) -> usize {
///
/// This function is safe to use if the `zbc` target feature is present.
#[target_feature(enable = "zbc")]
-// See #1464
-// #[cfg_attr(test, assert_instr(clmul))]
+#[cfg_attr(test, assert_instr(clmul))]
#[inline]
pub unsafe fn clmul(rs1: usize, rs2: usize) -> usize {
#[cfg(target_arch = "riscv32")]
@@ -105,8 +103,7 @@ pub unsafe fn clmul(rs1: usize, rs2: usize) -> usize {
///
/// This function is safe to use if the `zbc` target feature is present.
#[target_feature(enable = "zbc")]
-// See #1464
-// #[cfg_attr(test, assert_instr(clmulh))]
+#[cfg_attr(test, assert_instr(clmulh))]
#[inline]
pub unsafe fn clmulh(rs1: usize, rs2: usize) -> usize {
#[cfg(target_arch = "riscv32")]
@@ -134,8 +131,7 @@ pub unsafe fn clmulh(rs1: usize, rs2: usize) -> usize {
///
/// This function is safe to use if the `zbc` target feature is present.
#[target_feature(enable = "zbc")]
-// See #1464
-// #[cfg_attr(test, assert_instr(clmulr))]
+#[cfg_attr(test, assert_instr(clmulr))]
#[inline]
pub unsafe fn clmulr(rs1: usize, rs2: usize) -> usize {
#[cfg(target_arch = "riscv32")]
diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/zk.rs b/library/stdarch/crates/core_arch/src/riscv_shared/zk.rs
index db97f72bc..5fc5b4cda 100644
--- a/library/stdarch/crates/core_arch/src/riscv_shared/zk.rs
+++ b/library/stdarch/crates/core_arch/src/riscv_shared/zk.rs
@@ -62,8 +62,7 @@ extern "unadjusted" {
///
/// This function is safe to use if the `zbkx` target feature is present.
#[target_feature(enable = "zbkx")]
-// See #1464
-// #[cfg_attr(test, assert_instr(xperm8))]
+#[cfg_attr(test, assert_instr(xperm8))]
#[inline]
pub unsafe fn xperm8(rs1: usize, rs2: usize) -> usize {
#[cfg(target_arch = "riscv32")]
@@ -94,8 +93,7 @@ pub unsafe fn xperm8(rs1: usize, rs2: usize) -> usize {
///
/// This function is safe to use if the `zbkx` target feature is present.
#[target_feature(enable = "zbkx")]
-// See #1464
-// #[cfg_attr(test, assert_instr(xperm4))]
+#[cfg_attr(test, assert_instr(xperm4))]
#[inline]
pub unsafe fn xperm4(rs1: usize, rs2: usize) -> usize {
#[cfg(target_arch = "riscv32")]
@@ -129,8 +127,7 @@ pub unsafe fn xperm4(rs1: usize, rs2: usize) -> usize {
///
/// This function is safe to use if the `zknh` target feature is present.
#[target_feature(enable = "zknh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sha256sig0))]
+#[cfg_attr(test, assert_instr(sha256sig0))]
#[inline]
pub unsafe fn sha256sig0(rs1: u32) -> u32 {
_sha256sig0(rs1 as i32) as u32
@@ -156,8 +153,7 @@ pub unsafe fn sha256sig0(rs1: u32) -> u32 {
///
/// This function is safe to use if the `zknh` target feature is present.
#[target_feature(enable = "zknh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sha256sig1))]
+#[cfg_attr(test, assert_instr(sha256sig1))]
#[inline]
pub unsafe fn sha256sig1(rs1: u32) -> u32 {
_sha256sig1(rs1 as i32) as u32
@@ -183,8 +179,7 @@ pub unsafe fn sha256sig1(rs1: u32) -> u32 {
///
/// This function is safe to use if the `zknh` target feature is present.
#[target_feature(enable = "zknh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sha256sum0))]
+#[cfg_attr(test, assert_instr(sha256sum0))]
#[inline]
pub unsafe fn sha256sum0(rs1: u32) -> u32 {
_sha256sum0(rs1 as i32) as u32
@@ -210,8 +205,7 @@ pub unsafe fn sha256sum0(rs1: u32) -> u32 {
///
/// This function is safe to use if the `zknh` target feature is present.
#[target_feature(enable = "zknh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sha256sum1))]
+#[cfg_attr(test, assert_instr(sha256sum1))]
#[inline]
pub unsafe fn sha256sum1(rs1: u32) -> u32 {
_sha256sum1(rs1 as i32) as u32
@@ -288,8 +282,7 @@ pub unsafe fn sha256sum1(rs1: u32) -> u32 {
/// ```
#[target_feature(enable = "zksed")]
#[rustc_legacy_const_generics(2)]
-// See #1464
-// #[cfg_attr(test, assert_instr(sm4ed, BS = 0))]
+#[cfg_attr(test, assert_instr(sm4ed, BS = 0))]
#[inline]
pub unsafe fn sm4ed<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
static_assert!(BS < 4);
@@ -368,8 +361,7 @@ pub unsafe fn sm4ed<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
/// ```
#[target_feature(enable = "zksed")]
#[rustc_legacy_const_generics(2)]
-// See #1464
-// #[cfg_attr(test, assert_instr(sm4ks, BS = 0))]
+#[cfg_attr(test, assert_instr(sm4ks, BS = 0))]
#[inline]
pub unsafe fn sm4ks<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
static_assert!(BS < 4);
@@ -409,8 +401,7 @@ pub unsafe fn sm4ks<const BS: u8>(rs1: u32, rs2: u32) -> u32 {
/// compression function `CF` uses the intermediate value `TT2` to calculate
/// the variable `E` in one iteration for subsequent processes.
#[target_feature(enable = "zksh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sm3p0))]
+#[cfg_attr(test, assert_instr(sm3p0))]
#[inline]
pub unsafe fn sm3p0(rs1: u32) -> u32 {
_sm3p0(rs1 as i32) as u32
@@ -454,8 +445,7 @@ pub unsafe fn sm3p0(rs1: u32) -> u32 {
/// ENDFOR
/// ```
#[target_feature(enable = "zksh")]
-// See #1464
-// #[cfg_attr(test, assert_instr(sm3p1))]
+#[cfg_attr(test, assert_instr(sm3p1))]
#[inline]
pub unsafe fn sm3p1(rs1: u32) -> u32 {
_sm3p1(rs1 as i32) as u32
diff --git a/library/stdarch/crates/core_arch/src/x86/avx.rs b/library/stdarch/crates/core_arch/src/x86/avx.rs
index 00bcc1fa1..de5dc05b8 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx.rs
@@ -268,7 +268,11 @@ pub unsafe fn _mm256_mul_ps(a: __m256, b: __m256) -> __m256 {
#[cfg_attr(test, assert_instr(vaddsubpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_addsub_pd(a: __m256d, b: __m256d) -> __m256d {
- addsubpd256(a, b)
+ let a = a.as_f64x4();
+ let b = b.as_f64x4();
+ let add = simd_add(a, b);
+ let sub = simd_sub(a, b);
+ simd_shuffle!(add, sub, [4, 1, 6, 3])
}
/// Alternatively adds and subtracts packed single-precision (32-bit)
@@ -280,7 +284,11 @@ pub unsafe fn _mm256_addsub_pd(a: __m256d, b: __m256d) -> __m256d {
#[cfg_attr(test, assert_instr(vaddsubps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_addsub_ps(a: __m256, b: __m256) -> __m256 {
- addsubps256(a, b)
+ let a = a.as_f32x8();
+ let b = b.as_f32x8();
+ let add = simd_add(a, b);
+ let sub = simd_sub(a, b);
+ simd_shuffle!(add, sub, [8, 1, 10, 3, 12, 5, 14, 7])
}
/// Subtracts packed double-precision (64-bit) floating-point elements in `b`
@@ -511,7 +519,8 @@ pub unsafe fn _mm256_blend_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 {
#[cfg_attr(test, assert_instr(vblendvpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_blendv_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d {
- vblendvpd(a, b, c)
+ let mask: i64x4 = simd_lt(transmute::<_, i64x4>(c), i64x4::splat(0));
+ transmute(simd_select(mask, b.as_f64x4(), a.as_f64x4()))
}
/// Blends packed single-precision (32-bit) floating-point elements from
@@ -523,7 +532,8 @@ pub unsafe fn _mm256_blendv_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d {
#[cfg_attr(test, assert_instr(vblendvps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_blendv_ps(a: __m256, b: __m256, c: __m256) -> __m256 {
- vblendvps(a, b, c)
+ let mask: i32x8 = simd_lt(transmute::<_, i32x8>(c), i32x8::splat(0));
+ transmute(simd_select(mask, b.as_f32x8(), a.as_f32x8()))
}
/// Conditionally multiplies the packed single-precision (32-bit) floating-point
@@ -2056,7 +2066,10 @@ pub unsafe fn _mm_testnzc_ps(a: __m128, b: __m128) -> i32 {
#[cfg_attr(test, assert_instr(vmovmskpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_movemask_pd(a: __m256d) -> i32 {
- movmskpd256(a)
+ // Propagate the highest bit to the rest, because simd_bitmask
+ // requires all-1 or all-0.
+ let mask: i64x4 = simd_lt(transmute(a), i64x4::splat(0));
+ simd_bitmask::<i64x4, u8>(mask).into()
}
/// Sets each bit of the returned mask based on the most significant bit of the
@@ -2069,7 +2082,10 @@ pub unsafe fn _mm256_movemask_pd(a: __m256d) -> i32 {
#[cfg_attr(test, assert_instr(vmovmskps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_movemask_ps(a: __m256) -> i32 {
- movmskps256(a)
+ // Propagate the highest bit to the rest, because simd_bitmask
+ // requires all-1 or all-0.
+ let mask: i32x8 = simd_lt(transmute(a), i32x8::splat(0));
+ simd_bitmask::<i32x8, u8>(mask).into()
}
/// Returns vector of type __m256d with all elements set to zero.
@@ -2904,20 +2920,12 @@ pub unsafe fn _mm256_cvtss_f32(a: __m256) -> f32 {
// LLVM intrinsics used in the above functions
#[allow(improper_ctypes)]
extern "C" {
- #[link_name = "llvm.x86.avx.addsub.pd.256"]
- fn addsubpd256(a: __m256d, b: __m256d) -> __m256d;
- #[link_name = "llvm.x86.avx.addsub.ps.256"]
- fn addsubps256(a: __m256, b: __m256) -> __m256;
#[link_name = "llvm.x86.avx.round.pd.256"]
fn roundpd256(a: __m256d, b: i32) -> __m256d;
#[link_name = "llvm.x86.avx.round.ps.256"]
fn roundps256(a: __m256, b: i32) -> __m256;
#[link_name = "llvm.x86.avx.sqrt.ps.256"]
fn sqrtps256(a: __m256) -> __m256;
- #[link_name = "llvm.x86.avx.blendv.pd.256"]
- fn vblendvpd(a: __m256d, b: __m256d, c: __m256d) -> __m256d;
- #[link_name = "llvm.x86.avx.blendv.ps.256"]
- fn vblendvps(a: __m256, b: __m256, c: __m256) -> __m256;
#[link_name = "llvm.x86.avx.dp.ps.256"]
fn vdpps(a: __m256, b: __m256, imm8: i32) -> __m256;
#[link_name = "llvm.x86.avx.hadd.pd.256"]
@@ -3026,10 +3034,6 @@ extern "C" {
fn vtestcps(a: __m128, b: __m128) -> i32;
#[link_name = "llvm.x86.avx.vtestnzc.ps"]
fn vtestnzcps(a: __m128, b: __m128) -> i32;
- #[link_name = "llvm.x86.avx.movmsk.pd.256"]
- fn movmskpd256(a: __m256d) -> i32;
- #[link_name = "llvm.x86.avx.movmsk.ps.256"]
- fn movmskps256(a: __m256) -> i32;
#[link_name = "llvm.x86.avx.min.ps.256"]
fn vminps(a: __m256, b: __m256) -> __m256;
#[link_name = "llvm.x86.avx.max.ps.256"]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs
index e23c795ee..243a4cdab 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs
@@ -344,7 +344,10 @@ pub unsafe fn _mm256_andnot_si256(a: __m256i, b: __m256i) -> __m256i {
#[cfg_attr(test, assert_instr(vpavgw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_avg_epu16(a: __m256i, b: __m256i) -> __m256i {
- transmute(pavgw(a.as_u16x16(), b.as_u16x16()))
+ let a = simd_cast::<_, u32x16>(a.as_u16x16());
+ let b = simd_cast::<_, u32x16>(b.as_u16x16());
+ let r = simd_shr(simd_add(simd_add(a, b), u32x16::splat(1)), u32x16::splat(1));
+ transmute(simd_cast::<_, u16x16>(r))
}
/// Averages packed unsigned 8-bit integers in `a` and `b`.
@@ -355,7 +358,10 @@ pub unsafe fn _mm256_avg_epu16(a: __m256i, b: __m256i) -> __m256i {
#[cfg_attr(test, assert_instr(vpavgb))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_avg_epu8(a: __m256i, b: __m256i) -> __m256i {
- transmute(pavgb(a.as_u8x32(), b.as_u8x32()))
+ let a = simd_cast::<_, u16x32>(a.as_u8x32());
+ let b = simd_cast::<_, u16x32>(b.as_u8x32());
+ let r = simd_shr(simd_add(simd_add(a, b), u16x32::splat(1)), u16x32::splat(1));
+ transmute(simd_cast::<_, u8x32>(r))
}
/// Blends packed 32-bit integers from `a` and `b` using control mask `IMM4`.
@@ -458,7 +464,8 @@ pub unsafe fn _mm256_blend_epi16<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
#[cfg_attr(test, assert_instr(vpblendvb))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_blendv_epi8(a: __m256i, b: __m256i, mask: __m256i) -> __m256i {
- transmute(pblendvb(a.as_i8x32(), b.as_i8x32(), mask.as_i8x32()))
+ let mask: i8x32 = simd_lt(mask.as_i8x32(), i8x32::splat(0));
+ transmute(simd_select(mask, b.as_i8x32(), a.as_i8x32()))
}
/// Broadcasts the low packed 8-bit integer from `a` to all elements of
@@ -2060,7 +2067,9 @@ pub unsafe fn _mm256_mpsadbw_epu8<const IMM8: i32>(a: __m256i, b: __m256i) -> __
#[cfg_attr(test, assert_instr(vpmuldq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_mul_epi32(a: __m256i, b: __m256i) -> __m256i {
- transmute(pmuldq(a.as_i32x8(), b.as_i32x8()))
+ let a = simd_cast::<_, i64x4>(simd_cast::<_, i32x4>(a.as_i64x4()));
+ let b = simd_cast::<_, i64x4>(simd_cast::<_, i32x4>(b.as_i64x4()));
+ transmute(simd_mul(a, b))
}
/// Multiplies the low unsigned 32-bit integers from each packed 64-bit
@@ -2074,7 +2083,10 @@ pub unsafe fn _mm256_mul_epi32(a: __m256i, b: __m256i) -> __m256i {
#[cfg_attr(test, assert_instr(vpmuludq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_mul_epu32(a: __m256i, b: __m256i) -> __m256i {
- transmute(pmuludq(a.as_u32x8(), b.as_u32x8()))
+ let a = a.as_u64x4();
+ let b = b.as_u64x4();
+ let mask = u64x4::splat(u32::MAX.into());
+ transmute(simd_mul(simd_and(a, mask), simd_and(b, mask)))
}
/// Multiplies the packed 16-bit integers in `a` and `b`, producing
@@ -2087,7 +2099,10 @@ pub unsafe fn _mm256_mul_epu32(a: __m256i, b: __m256i) -> __m256i {
#[cfg_attr(test, assert_instr(vpmulhw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_mulhi_epi16(a: __m256i, b: __m256i) -> __m256i {
- transmute(pmulhw(a.as_i16x16(), b.as_i16x16()))
+ let a = simd_cast::<_, i32x16>(a.as_i16x16());
+ let b = simd_cast::<_, i32x16>(b.as_i16x16());
+ let r = simd_shr(simd_mul(a, b), i32x16::splat(16));
+ transmute(simd_cast::<i32x16, i16x16>(r))
}
/// Multiplies the packed unsigned 16-bit integers in `a` and `b`, producing
@@ -2100,7 +2115,10 @@ pub unsafe fn _mm256_mulhi_epi16(a: __m256i, b: __m256i) -> __m256i {
#[cfg_attr(test, assert_instr(vpmulhuw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_mulhi_epu16(a: __m256i, b: __m256i) -> __m256i {
- transmute(pmulhuw(a.as_u16x16(), b.as_u16x16()))
+ let a = simd_cast::<_, u32x16>(a.as_u16x16());
+ let b = simd_cast::<_, u32x16>(b.as_u16x16());
+ let r = simd_shr(simd_mul(a, b), u32x16::splat(16));
+ transmute(simd_cast::<u32x16, u16x16>(r))
}
/// Multiplies the packed 16-bit integers in `a` and `b`, producing
@@ -3629,12 +3647,6 @@ extern "C" {
fn pabsw(a: i16x16) -> u16x16;
#[link_name = "llvm.x86.avx2.pabs.d"]
fn pabsd(a: i32x8) -> u32x8;
- #[link_name = "llvm.x86.avx2.pavg.b"]
- fn pavgb(a: u8x32, b: u8x32) -> u8x32;
- #[link_name = "llvm.x86.avx2.pavg.w"]
- fn pavgw(a: u16x16, b: u16x16) -> u16x16;
- #[link_name = "llvm.x86.avx2.pblendvb"]
- fn pblendvb(a: i8x32, b: i8x32, mask: i8x32) -> i8x32;
#[link_name = "llvm.x86.avx2.phadd.w"]
fn phaddw(a: i16x16, b: i16x16) -> i16x16;
#[link_name = "llvm.x86.avx2.phadd.d"]
@@ -3669,14 +3681,6 @@ extern "C" {
fn maskstoreq256(mem_addr: *mut i8, mask: i64x4, a: i64x4);
#[link_name = "llvm.x86.avx2.mpsadbw"]
fn mpsadbw(a: u8x32, b: u8x32, imm8: i32) -> u16x16;
- #[link_name = "llvm.x86.avx2.pmulhu.w"]
- fn pmulhuw(a: u16x16, b: u16x16) -> u16x16;
- #[link_name = "llvm.x86.avx2.pmulh.w"]
- fn pmulhw(a: i16x16, b: i16x16) -> i16x16;
- #[link_name = "llvm.x86.avx2.pmul.dq"]
- fn pmuldq(a: i32x8, b: i32x8) -> i64x4;
- #[link_name = "llvm.x86.avx2.pmulu.dq"]
- fn pmuludq(a: u32x8, b: u32x8) -> u64x4;
#[link_name = "llvm.x86.avx2.pmul.hr.sw"]
fn pmulhrsw(a: i16x16, b: i16x16) -> i16x16;
#[link_name = "llvm.x86.avx2.packsswb"]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs
index 92e572eb1..ce4e402a8 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs
@@ -311,7 +311,7 @@ pub unsafe fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
pub unsafe fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64 {
- transmute(bitshuffle_512(b.as_i8x64(), c.as_i8x64(), !0))
+ bitshuffle_512(b.as_i8x64(), c.as_i8x64(), !0)
}
/// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers.
@@ -326,7 +326,7 @@ pub unsafe fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
pub unsafe fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m512i) -> __mmask64 {
- transmute(bitshuffle_512(b.as_i8x64(), c.as_i8x64(), k))
+ bitshuffle_512(b.as_i8x64(), c.as_i8x64(), k)
}
/// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers.
@@ -338,7 +338,7 @@ pub unsafe fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
pub unsafe fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32 {
- transmute(bitshuffle_256(b.as_i8x32(), c.as_i8x32(), !0))
+ bitshuffle_256(b.as_i8x32(), c.as_i8x32(), !0)
}
/// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers.
@@ -353,7 +353,7 @@ pub unsafe fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
pub unsafe fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m256i) -> __mmask32 {
- transmute(bitshuffle_256(b.as_i8x32(), c.as_i8x32(), k))
+ bitshuffle_256(b.as_i8x32(), c.as_i8x32(), k)
}
/// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers.
@@ -365,7 +365,7 @@ pub unsafe fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
pub unsafe fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 {
- transmute(bitshuffle_128(b.as_i8x16(), c.as_i8x16(), !0))
+ bitshuffle_128(b.as_i8x16(), c.as_i8x16(), !0)
}
/// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers.
@@ -380,7 +380,7 @@ pub unsafe fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 {
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
pub unsafe fn _mm_mask_bitshuffle_epi64_mask(k: __mmask16, b: __m128i, c: __m128i) -> __mmask16 {
- transmute(bitshuffle_128(b.as_i8x16(), c.as_i8x16(), k))
+ bitshuffle_128(b.as_i8x16(), c.as_i8x16(), k)
}
#[cfg(test)]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
index 364023539..0b4a56d36 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
@@ -3703,8 +3703,7 @@ pub unsafe fn _mm512_cmp_epu16_mask<const IMM8: i32>(a: __m512i, b: __m512i) ->
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x32();
let b = b.as_u16x32();
- let r = vpcmpuw(a, b, IMM8, 0b11111111_11111111_11111111_11111111);
- transmute(r)
+ vpcmpuw(a, b, IMM8, 0b11111111_11111111_11111111_11111111)
}
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3722,8 +3721,7 @@ pub unsafe fn _mm512_mask_cmp_epu16_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x32();
let b = b.as_u16x32();
- let r = vpcmpuw(a, b, IMM8, k1);
- transmute(r)
+ vpcmpuw(a, b, IMM8, k1)
}
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -3737,8 +3735,7 @@ pub unsafe fn _mm256_cmp_epu16_mask<const IMM8: i32>(a: __m256i, b: __m256i) ->
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x16();
let b = b.as_u16x16();
- let r = vpcmpuw256(a, b, IMM8, 0b11111111_11111111);
- transmute(r)
+ vpcmpuw256(a, b, IMM8, 0b11111111_11111111)
}
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3756,8 +3753,7 @@ pub unsafe fn _mm256_mask_cmp_epu16_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x16();
let b = b.as_u16x16();
- let r = vpcmpuw256(a, b, IMM8, k1);
- transmute(r)
+ vpcmpuw256(a, b, IMM8, k1)
}
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -3771,8 +3767,7 @@ pub unsafe fn _mm_cmp_epu16_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __m
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x8();
let b = b.as_u16x8();
- let r = vpcmpuw128(a, b, IMM8, 0b11111111);
- transmute(r)
+ vpcmpuw128(a, b, IMM8, 0b11111111)
}
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3790,8 +3785,7 @@ pub unsafe fn _mm_mask_cmp_epu16_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x8();
let b = b.as_u16x8();
- let r = vpcmpuw128(a, b, IMM8, k1);
- transmute(r)
+ vpcmpuw128(a, b, IMM8, k1)
}
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -3805,13 +3799,12 @@ pub unsafe fn _mm512_cmp_epu8_mask<const IMM8: i32>(a: __m512i, b: __m512i) -> _
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x64();
let b = b.as_u8x64();
- let r = vpcmpub(
+ vpcmpub(
a,
b,
IMM8,
0b11111111_11111111_11111111_11111111_11111111_11111111_11111111_11111111,
- );
- transmute(r)
+ )
}
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3829,8 +3822,7 @@ pub unsafe fn _mm512_mask_cmp_epu8_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x64();
let b = b.as_u8x64();
- let r = vpcmpub(a, b, IMM8, k1);
- transmute(r)
+ vpcmpub(a, b, IMM8, k1)
}
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -3844,8 +3836,7 @@ pub unsafe fn _mm256_cmp_epu8_mask<const IMM8: i32>(a: __m256i, b: __m256i) -> _
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x32();
let b = b.as_u8x32();
- let r = vpcmpub256(a, b, IMM8, 0b11111111_11111111_11111111_11111111);
- transmute(r)
+ vpcmpub256(a, b, IMM8, 0b11111111_11111111_11111111_11111111)
}
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3863,8 +3854,7 @@ pub unsafe fn _mm256_mask_cmp_epu8_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x32();
let b = b.as_u8x32();
- let r = vpcmpub256(a, b, IMM8, k1);
- transmute(r)
+ vpcmpub256(a, b, IMM8, k1)
}
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -3878,8 +3868,7 @@ pub unsafe fn _mm_cmp_epu8_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __mm
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x16();
let b = b.as_u8x16();
- let r = vpcmpub128(a, b, IMM8, 0b11111111_11111111);
- transmute(r)
+ vpcmpub128(a, b, IMM8, 0b11111111_11111111)
}
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3897,8 +3886,7 @@ pub unsafe fn _mm_mask_cmp_epu8_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x16();
let b = b.as_u8x16();
- let r = vpcmpub128(a, b, IMM8, k1);
- transmute(r)
+ vpcmpub128(a, b, IMM8, k1)
}
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -3912,8 +3900,7 @@ pub unsafe fn _mm512_cmp_epi16_mask<const IMM8: i32>(a: __m512i, b: __m512i) ->
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x32();
let b = b.as_i16x32();
- let r = vpcmpw(a, b, IMM8, 0b11111111_11111111_11111111_11111111);
- transmute(r)
+ vpcmpw(a, b, IMM8, 0b11111111_11111111_11111111_11111111)
}
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3931,8 +3918,7 @@ pub unsafe fn _mm512_mask_cmp_epi16_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x32();
let b = b.as_i16x32();
- let r = vpcmpw(a, b, IMM8, k1);
- transmute(r)
+ vpcmpw(a, b, IMM8, k1)
}
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -3946,8 +3932,7 @@ pub unsafe fn _mm256_cmp_epi16_mask<const IMM8: i32>(a: __m256i, b: __m256i) ->
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x16();
let b = b.as_i16x16();
- let r = vpcmpw256(a, b, IMM8, 0b11111111_11111111);
- transmute(r)
+ vpcmpw256(a, b, IMM8, 0b11111111_11111111)
}
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3965,8 +3950,7 @@ pub unsafe fn _mm256_mask_cmp_epi16_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x16();
let b = b.as_i16x16();
- let r = vpcmpw256(a, b, IMM8, k1);
- transmute(r)
+ vpcmpw256(a, b, IMM8, k1)
}
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -3980,8 +3964,7 @@ pub unsafe fn _mm_cmp_epi16_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __m
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x8();
let b = b.as_i16x8();
- let r = vpcmpw128(a, b, IMM8, 0b11111111);
- transmute(r)
+ vpcmpw128(a, b, IMM8, 0b11111111)
}
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -3999,8 +3982,7 @@ pub unsafe fn _mm_mask_cmp_epi16_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x8();
let b = b.as_i16x8();
- let r = vpcmpw128(a, b, IMM8, k1);
- transmute(r)
+ vpcmpw128(a, b, IMM8, k1)
}
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -4014,13 +3996,12 @@ pub unsafe fn _mm512_cmp_epi8_mask<const IMM8: i32>(a: __m512i, b: __m512i) -> _
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x64();
let b = b.as_i8x64();
- let r = vpcmpb(
+ vpcmpb(
a,
b,
IMM8,
0b11111111_11111111_11111111_11111111_11111111_11111111_11111111_11111111,
- );
- transmute(r)
+ )
}
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -4038,8 +4019,7 @@ pub unsafe fn _mm512_mask_cmp_epi8_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x64();
let b = b.as_i8x64();
- let r = vpcmpb(a, b, IMM8, k1);
- transmute(r)
+ vpcmpb(a, b, IMM8, k1)
}
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -4053,8 +4033,7 @@ pub unsafe fn _mm256_cmp_epi8_mask<const IMM8: i32>(a: __m256i, b: __m256i) -> _
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x32();
let b = b.as_i8x32();
- let r = vpcmpb256(a, b, IMM8, 0b11111111_11111111_11111111_11111111);
- transmute(r)
+ vpcmpb256(a, b, IMM8, 0b11111111_11111111_11111111_11111111)
}
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -4072,8 +4051,7 @@ pub unsafe fn _mm256_mask_cmp_epi8_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x32();
let b = b.as_i8x32();
- let r = vpcmpb256(a, b, IMM8, k1);
- transmute(r)
+ vpcmpb256(a, b, IMM8, k1)
}
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
@@ -4087,8 +4065,7 @@ pub unsafe fn _mm_cmp_epi8_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __mm
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x16();
let b = b.as_i8x16();
- let r = vpcmpb128(a, b, IMM8, 0b11111111_11111111);
- transmute(r)
+ vpcmpb128(a, b, IMM8, 0b11111111_11111111)
}
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
@@ -4106,8 +4083,7 @@ pub unsafe fn _mm_mask_cmp_epi8_mask<const IMM8: i32>(
static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x16();
let b = b.as_i8x16();
- let r = vpcmpb128(a, b, IMM8, k1);
- transmute(r)
+ vpcmpb128(a, b, IMM8, k1)
}
/// Load 512-bits (composed of 32 packed 16-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
@@ -8566,7 +8542,7 @@ pub unsafe fn _mm_movm_epi8(k: __mmask16) -> __m128i {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
- transmute(a + b)
+ a + b
}
/// Add 64-bit masks in a and b, and store the result in k.
@@ -8575,7 +8551,7 @@ pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
- transmute(a + b)
+ a + b
}
/// Compute the bitwise AND of 32-bit masks a and b, and store the result in k.
@@ -8584,7 +8560,7 @@ pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
- transmute(a & b)
+ a & b
}
/// Compute the bitwise AND of 64-bit masks a and b, and store the result in k.
@@ -8593,7 +8569,7 @@ pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
- transmute(a & b)
+ a & b
}
/// Compute the bitwise NOT of 32-bit mask a, and store the result in k.
@@ -8602,7 +8578,7 @@ pub unsafe fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _knot_mask32(a: __mmask32) -> __mmask32 {
- transmute(a ^ 0b11111111_11111111_11111111_11111111)
+ a ^ 0b11111111_11111111_11111111_11111111
}
/// Compute the bitwise NOT of 64-bit mask a, and store the result in k.
@@ -8611,7 +8587,7 @@ pub unsafe fn _knot_mask32(a: __mmask32) -> __mmask32 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _knot_mask64(a: __mmask64) -> __mmask64 {
- transmute(a ^ 0b11111111_11111111_11111111_11111111_11111111_11111111_11111111_11111111)
+ a ^ 0b11111111_11111111_11111111_11111111_11111111_11111111_11111111_11111111
}
/// Compute the bitwise NOT of 32-bit masks a and then AND with b, and store the result in k.
@@ -8620,7 +8596,7 @@ pub unsafe fn _knot_mask64(a: __mmask64) -> __mmask64 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
- transmute(_knot_mask32(a) & b)
+ _knot_mask32(a) & b
}
/// Compute the bitwise NOT of 64-bit masks a and then AND with b, and store the result in k.
@@ -8629,7 +8605,7 @@ pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
- transmute(_knot_mask64(a) & b)
+ _knot_mask64(a) & b
}
/// Compute the bitwise OR of 32-bit masks a and b, and store the result in k.
@@ -8638,7 +8614,7 @@ pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
- transmute(a | b)
+ a | b
}
/// Compute the bitwise OR of 64-bit masks a and b, and store the result in k.
@@ -8647,7 +8623,7 @@ pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
- transmute(a | b)
+ a | b
}
/// Compute the bitwise XOR of 32-bit masks a and b, and store the result in k.
@@ -8656,7 +8632,7 @@ pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
- transmute(a ^ b)
+ a ^ b
}
/// Compute the bitwise XOR of 64-bit masks a and b, and store the result in k.
@@ -8665,7 +8641,7 @@ pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
- transmute(a ^ b)
+ a ^ b
}
/// Compute the bitwise XNOR of 32-bit masks a and b, and store the result in k.
@@ -8674,7 +8650,7 @@ pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
- transmute(_knot_mask32(a ^ b))
+ _knot_mask32(a ^ b)
}
/// Compute the bitwise XNOR of 64-bit masks a and b, and store the result in k.
@@ -8683,7 +8659,7 @@ pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
- transmute(_knot_mask64(a ^ b))
+ _knot_mask64(a ^ b)
}
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs
index 5412237ca..280135292 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs
@@ -17144,7 +17144,7 @@ pub unsafe fn _mm512_slli_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
if IMM8 >= 32 {
_mm512_setzero_si512()
} else {
- transmute(simd_shl(a.as_u32x16(), u32x16::splat(IMM8 as u32)))
+ transmute(simd_shl(a.as_u32x16(), u32x16::splat(IMM8)))
}
}
@@ -20132,7 +20132,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi32(k: __mmask16, idx: __m512i, a: __m5
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermd
pub unsafe fn _mm256_permutexvar_epi32(idx: __m256i, a: __m256i) -> __m256i {
- transmute(_mm256_permutevar8x32_epi32(a, idx)) // llvm use llvm.x86.avx2.permd
+ _mm256_permutevar8x32_epi32(a, idx) // llvm use llvm.x86.avx2.permd
}
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -20284,7 +20284,7 @@ pub unsafe fn _mm512_maskz_permutexvar_ps(k: __mmask16, idx: __m512i, a: __m512)
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermps))]
pub unsafe fn _mm256_permutexvar_ps(idx: __m256i, a: __m256) -> __m256 {
- transmute(_mm256_permutevar8x32_ps(a, idx)) //llvm.x86.avx2.permps
+ _mm256_permutevar8x32_ps(a, idx) //llvm.x86.avx2.permps
}
/// Shuffle single-precision (32-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -23943,7 +23943,7 @@ pub unsafe fn _mm512_castsi512_pd(a: __m512i) -> __m512d {
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(vmovd))]
pub unsafe fn _mm512_cvtsi512_si32(a: __m512i) -> i32 {
let extract: i32 = simd_extract(a.as_i32x16(), 0);
- transmute(extract)
+ extract
}
/// Broadcast the low packed 32-bit integer from a to all elements of dst.
@@ -25744,7 +25744,7 @@ pub unsafe fn _mm512_andnot_si512(a: __m512i, b: __m512i) -> __m512i {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandw
pub unsafe fn _kand_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
- transmute(a & b)
+ a & b
}
/// Compute the bitwise AND of 16-bit masks a and b, and store the result in k.
@@ -25754,7 +25754,7 @@ pub unsafe fn _kand_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandw
pub unsafe fn _mm512_kand(a: __mmask16, b: __mmask16) -> __mmask16 {
- transmute(a & b)
+ a & b
}
/// Compute the bitwise OR of 16-bit masks a and b, and store the result in k.
@@ -25764,7 +25764,7 @@ pub unsafe fn _mm512_kand(a: __mmask16, b: __mmask16) -> __mmask16 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(or))] // generate normal or code instead of korw
pub unsafe fn _kor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
- transmute(a | b)
+ a | b
}
/// Compute the bitwise OR of 16-bit masks a and b, and store the result in k.
@@ -25774,7 +25774,7 @@ pub unsafe fn _kor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(or))] // generate normal or code instead of korw
pub unsafe fn _mm512_kor(a: __mmask16, b: __mmask16) -> __mmask16 {
- transmute(a | b)
+ a | b
}
/// Compute the bitwise XOR of 16-bit masks a and b, and store the result in k.
@@ -25784,7 +25784,7 @@ pub unsafe fn _mm512_kor(a: __mmask16, b: __mmask16) -> __mmask16 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(xor))] // generate normal xor code instead of kxorw
pub unsafe fn _kxor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
- transmute(a ^ b)
+ a ^ b
}
/// Compute the bitwise XOR of 16-bit masks a and b, and store the result in k.
@@ -25794,7 +25794,7 @@ pub unsafe fn _kxor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(xor))] // generate normal xor code instead of kxorw
pub unsafe fn _mm512_kxor(a: __mmask16, b: __mmask16) -> __mmask16 {
- transmute(a ^ b)
+ a ^ b
}
/// Compute the bitwise NOT of 16-bit mask a, and store the result in k.
@@ -25803,7 +25803,7 @@ pub unsafe fn _mm512_kxor(a: __mmask16, b: __mmask16) -> __mmask16 {
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _knot_mask16(a: __mmask16) -> __mmask16 {
- transmute(a ^ 0b11111111_11111111)
+ a ^ 0b11111111_11111111
}
/// Compute the bitwise NOT of 16-bit mask a, and store the result in k.
@@ -25812,7 +25812,7 @@ pub unsafe fn _knot_mask16(a: __mmask16) -> __mmask16 {
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_knot(a: __mmask16) -> __mmask16 {
- transmute(a ^ 0b11111111_11111111)
+ a ^ 0b11111111_11111111
}
/// Compute the bitwise NOT of 16-bit masks a and then AND with b, and store the result in k.
@@ -25862,8 +25862,7 @@ pub unsafe fn _mm512_kxnor(a: __mmask16, b: __mmask16) -> __mmask16 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kmovw
pub unsafe fn _mm512_kmov(a: __mmask16) -> __mmask16 {
- let r: u16 = a;
- transmute(r)
+ a
}
/// Converts integer mask into bitmask, storing the result in dst.
@@ -25872,8 +25871,7 @@ pub unsafe fn _mm512_kmov(a: __mmask16) -> __mmask16 {
#[inline]
#[target_feature(enable = "avx512f")] // generate normal and code instead of kmovw
pub unsafe fn _mm512_int2mask(mask: i32) -> __mmask16 {
- let r: u16 = mask as u16;
- transmute(r)
+ mask as u16
}
/// Converts bit mask k1 into an integer value, storing the results in dst.
@@ -25883,8 +25881,7 @@ pub unsafe fn _mm512_int2mask(mask: i32) -> __mmask16 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kmovw
pub unsafe fn _mm512_mask2int(k1: __mmask16) -> i32 {
- let r: i32 = k1 as i32;
- transmute(r)
+ k1 as i32
}
/// Unpack and interleave 8 bits from masks a and b, and store the 16-bit result in k.
@@ -25896,7 +25893,7 @@ pub unsafe fn _mm512_mask2int(k1: __mmask16) -> i32 {
pub unsafe fn _mm512_kunpackb(a: __mmask16, b: __mmask16) -> __mmask16 {
let a = a & 0b00000000_11111111;
let b = b & 0b11111111_00000000;
- transmute(a | b)
+ a | b
}
/// Performs bitwise OR between k1 and k2, storing the result in dst. CF flag is set if dst consists of all 1's.
@@ -32352,8 +32349,7 @@ pub unsafe fn _mm_mask_move_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -
if (k & 0b00000001) != 0 {
mov = simd_extract(b, 0);
}
- let r = simd_insert(a, 0, mov);
- transmute(r)
+ simd_insert(a, 0, mov)
}
/// Move the lower single-precision (32-bit) floating-point element from b to the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32367,8 +32363,7 @@ pub unsafe fn _mm_maskz_move_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
if (k & 0b00000001) != 0 {
mov = simd_extract(b, 0);
}
- let r = simd_insert(a, 0, mov);
- transmute(r)
+ simd_insert(a, 0, mov)
}
/// Move the lower double-precision (64-bit) floating-point element from b to the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32383,8 +32378,7 @@ pub unsafe fn _mm_mask_move_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d
if (k & 0b00000001) != 0 {
mov = simd_extract(b, 0);
}
- let r = simd_insert(a, 0, mov);
- transmute(r)
+ simd_insert(a, 0, mov)
}
/// Move the lower double-precision (64-bit) floating-point element from b to the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32398,8 +32392,7 @@ pub unsafe fn _mm_maskz_move_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d
if (k & 0b00000001) != 0 {
mov = simd_extract(b, 0);
}
- let r = simd_insert(a, 0, mov);
- transmute(r)
+ simd_insert(a, 0, mov)
}
/// Add the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32416,8 +32409,7 @@ pub unsafe fn _mm_mask_add_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
let extractb: f32 = simd_extract(b, 0);
add = extracta + extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Add the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32433,8 +32425,7 @@ pub unsafe fn _mm_maskz_add_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
let extractb: f32 = simd_extract(b, 0);
add = extracta + extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Add the lower double-precision (64-bit) floating-point element in a and b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32451,8 +32442,7 @@ pub unsafe fn _mm_mask_add_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
let extractb: f64 = simd_extract(b, 0);
add = extracta + extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Add the lower double-precision (64-bit) floating-point element in a and b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32468,8 +32458,7 @@ pub unsafe fn _mm_maskz_add_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
let extractb: f64 = simd_extract(b, 0);
add = extracta + extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Subtract the lower single-precision (32-bit) floating-point element in b from the lower single-precision (32-bit) floating-point element in a, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32486,8 +32475,7 @@ pub unsafe fn _mm_mask_sub_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
let extractb: f32 = simd_extract(b, 0);
add = extracta - extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Subtract the lower single-precision (32-bit) floating-point element in b from the lower single-precision (32-bit) floating-point element in a, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32503,8 +32491,7 @@ pub unsafe fn _mm_maskz_sub_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
let extractb: f32 = simd_extract(b, 0);
add = extracta - extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Subtract the lower double-precision (64-bit) floating-point element in b from the lower double-precision (64-bit) floating-point element in a, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32521,8 +32508,7 @@ pub unsafe fn _mm_mask_sub_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
let extractb: f64 = simd_extract(b, 0);
add = extracta - extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Subtract the lower double-precision (64-bit) floating-point element in b from the lower double-precision (64-bit) floating-point element in a, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32538,8 +32524,7 @@ pub unsafe fn _mm_maskz_sub_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
let extractb: f64 = simd_extract(b, 0);
add = extracta - extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Multiply the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32556,8 +32541,7 @@ pub unsafe fn _mm_mask_mul_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
let extractb: f32 = simd_extract(b, 0);
add = extracta * extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Multiply the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32573,8 +32557,7 @@ pub unsafe fn _mm_maskz_mul_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
let extractb: f32 = simd_extract(b, 0);
add = extracta * extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Multiply the lower double-precision (64-bit) floating-point element in a and b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32591,8 +32574,7 @@ pub unsafe fn _mm_mask_mul_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
let extractb: f64 = simd_extract(b, 0);
add = extracta * extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Multiply the lower double-precision (64-bit) floating-point element in a and b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32608,8 +32590,7 @@ pub unsafe fn _mm_maskz_mul_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
let extractb: f64 = simd_extract(b, 0);
add = extracta * extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Divide the lower single-precision (32-bit) floating-point element in a by the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32626,8 +32607,7 @@ pub unsafe fn _mm_mask_div_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
let extractb: f32 = simd_extract(b, 0);
add = extracta / extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Divide the lower single-precision (32-bit) floating-point element in a by the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -32643,8 +32623,7 @@ pub unsafe fn _mm_maskz_div_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
let extractb: f32 = simd_extract(b, 0);
add = extracta / extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Divide the lower double-precision (64-bit) floating-point element in a by the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32661,8 +32640,7 @@ pub unsafe fn _mm_mask_div_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
let extractb: f64 = simd_extract(b, 0);
add = extracta / extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Divide the lower double-precision (64-bit) floating-point element in a by the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -32678,8 +32656,7 @@ pub unsafe fn _mm_maskz_div_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
let extractb: f64 = simd_extract(b, 0);
add = extracta / extractb;
}
- let r = simd_insert(a, 0, add);
- transmute(r)
+ simd_insert(a, 0, add)
}
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -33587,8 +33564,7 @@ pub unsafe fn _mm_mask_fmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) ->
let extractc: f32 = simd_extract(c, 0);
fmadd = vfmadd132ss(fmadd, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -33605,8 +33581,7 @@ pub unsafe fn _mm_maskz_fmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -
let extractc: f32 = simd_extract(c, 0);
fmadd = vfmadd132ss(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.
@@ -33622,8 +33597,7 @@ pub unsafe fn _mm_mask3_fmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -
let extractb: f32 = simd_extract(b, 0);
fmadd = vfmadd132ss(extracta, extractb, fmadd, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(c, 0, fmadd);
- transmute(r)
+ simd_insert(c, 0, fmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -33639,8 +33613,7 @@ pub unsafe fn _mm_mask_fmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d)
let extractc: f64 = simd_extract(c, 0);
fmadd = vfmadd132sd(fmadd, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -33657,8 +33630,7 @@ pub unsafe fn _mm_maskz_fmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d
let extractc: f64 = simd_extract(c, 0);
fmadd = vfmadd132sd(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.
@@ -33674,8 +33646,7 @@ pub unsafe fn _mm_mask3_fmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8
let extractb: f64 = simd_extract(b, 0);
fmadd = vfmadd132sd(extracta, extractb, fmadd, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(c, 0, fmadd);
- transmute(r)
+ simd_insert(c, 0, fmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -33692,8 +33663,7 @@ pub unsafe fn _mm_mask_fmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) ->
let extractc = -extractc;
fmsub = vfmadd132ss(fmsub, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -33711,8 +33681,7 @@ pub unsafe fn _mm_maskz_fmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -
let extractc = -extractc;
fmsub = vfmadd132ss(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.
@@ -33729,8 +33698,7 @@ pub unsafe fn _mm_mask3_fmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -
let extractc = -fmsub;
fmsub = vfmadd132ss(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(c, 0, fmsub);
- transmute(r)
+ simd_insert(c, 0, fmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -33747,8 +33715,7 @@ pub unsafe fn _mm_mask_fmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d)
let extractc = -extractc;
fmsub = vfmadd132sd(fmsub, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -33766,8 +33733,7 @@ pub unsafe fn _mm_maskz_fmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d
let extractc = -extractc;
fmsub = vfmadd132sd(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.
@@ -33784,8 +33750,7 @@ pub unsafe fn _mm_mask3_fmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8
let extractc = -fmsub;
fmsub = vfmadd132sd(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(c, 0, fmsub);
- transmute(r)
+ simd_insert(c, 0, fmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -33802,8 +33767,7 @@ pub unsafe fn _mm_mask_fnmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -
let extractc: f32 = simd_extract(c, 0);
fnmadd = vfmadd132ss(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -33821,8 +33785,7 @@ pub unsafe fn _mm_maskz_fnmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128)
let extractc: f32 = simd_extract(c, 0);
fnmadd = vfmadd132ss(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.
@@ -33839,8 +33802,7 @@ pub unsafe fn _mm_mask3_fnmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8)
let extractb: f32 = simd_extract(b, 0);
fnmadd = vfmadd132ss(extracta, extractb, fnmadd, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(c, 0, fnmadd);
- transmute(r)
+ simd_insert(c, 0, fnmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -33857,8 +33819,7 @@ pub unsafe fn _mm_mask_fnmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d
let extractc: f64 = simd_extract(c, 0);
fnmadd = vfmadd132sd(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -33876,8 +33837,7 @@ pub unsafe fn _mm_maskz_fnmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128
let extractc: f64 = simd_extract(c, 0);
fnmadd = vfmadd132sd(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.
@@ -33894,8 +33854,7 @@ pub unsafe fn _mm_mask3_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask
let extractb: f64 = simd_extract(b, 0);
fnmadd = vfmadd132sd(extracta, extractb, fnmadd, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(c, 0, fnmadd);
- transmute(r)
+ simd_insert(c, 0, fnmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -33913,8 +33872,7 @@ pub unsafe fn _mm_mask_fnmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -
let extractc = -extractc;
fnmsub = vfmadd132ss(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -33933,8 +33891,7 @@ pub unsafe fn _mm_maskz_fnmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128)
let extractc = -extractc;
fnmsub = vfmadd132ss(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.
@@ -33952,8 +33909,7 @@ pub unsafe fn _mm_mask3_fnmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8)
let extractc = -fnmsub;
fnmsub = vfmadd132ss(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(c, 0, fnmsub);
- transmute(r)
+ simd_insert(c, 0, fnmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -33971,8 +33927,7 @@ pub unsafe fn _mm_mask_fnmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d
let extractc = -extractc;
fnmsub = vfmadd132sd(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
@@ -33991,8 +33946,7 @@ pub unsafe fn _mm_maskz_fnmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128
let extractc = -extractc;
fnmsub = vfmadd132sd(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.
@@ -34010,8 +33964,7 @@ pub unsafe fn _mm_mask3_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask
let extractc = -fnmsub;
fnmsub = vfmadd132sd(extracta, extractb, extractc, _MM_FROUND_CUR_DIRECTION);
}
- let r = simd_insert(c, 0, fnmsub);
- transmute(r)
+ simd_insert(c, 0, fnmsub)
}
/// Add the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -35705,8 +35658,7 @@ pub unsafe fn _mm_fmadd_round_ss<const ROUNDING: i32>(a: __m128, b: __m128, c: _
let extractb: f32 = simd_extract(b, 0);
let extractc: f32 = simd_extract(c, 0);
let r = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
- let r = simd_insert(a, 0, r);
- transmute(r)
+ simd_insert(a, 0, r)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -35736,8 +35688,7 @@ pub unsafe fn _mm_mask_fmadd_round_ss<const ROUNDING: i32>(
let extractc: f32 = simd_extract(c, 0);
fmadd = vfmadd132ss(fmadd, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -35768,8 +35719,7 @@ pub unsafe fn _mm_maskz_fmadd_round_ss<const ROUNDING: i32>(
let extractc: f32 = simd_extract(c, 0);
fmadd = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.\
@@ -35799,8 +35749,7 @@ pub unsafe fn _mm_mask3_fmadd_round_ss<const ROUNDING: i32>(
let extractb: f32 = simd_extract(b, 0);
fmadd = vfmadd132ss(extracta, extractb, fmadd, ROUNDING);
}
- let r = simd_insert(c, 0, fmadd);
- transmute(r)
+ simd_insert(c, 0, fmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.\
@@ -35827,8 +35776,7 @@ pub unsafe fn _mm_fmadd_round_sd<const ROUNDING: i32>(
let extractb: f64 = simd_extract(b, 0);
let extractc: f64 = simd_extract(c, 0);
let fmadd = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
@@ -35858,8 +35806,7 @@ pub unsafe fn _mm_mask_fmadd_round_sd<const ROUNDING: i32>(
let extractc: f64 = simd_extract(c, 0);
fmadd = vfmadd132sd(fmadd, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
@@ -35890,8 +35837,7 @@ pub unsafe fn _mm_maskz_fmadd_round_sd<const ROUNDING: i32>(
let extractc: f64 = simd_extract(c, 0);
fmadd = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fmadd);
- transmute(r)
+ simd_insert(a, 0, fmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.\
@@ -35921,8 +35867,7 @@ pub unsafe fn _mm_mask3_fmadd_round_sd<const ROUNDING: i32>(
let extractb: f64 = simd_extract(b, 0);
fmadd = vfmadd132sd(extracta, extractb, fmadd, ROUNDING);
}
- let r = simd_insert(c, 0, fmadd);
- transmute(r)
+ simd_insert(c, 0, fmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -35946,8 +35891,7 @@ pub unsafe fn _mm_fmsub_round_ss<const ROUNDING: i32>(a: __m128, b: __m128, c: _
let extractc: f32 = simd_extract(c, 0);
let extractc = -extractc;
let fmsub = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -35978,8 +35922,7 @@ pub unsafe fn _mm_mask_fmsub_round_ss<const ROUNDING: i32>(
let extractc = -extractc;
fmsub = vfmadd132ss(fmsub, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -36011,8 +35954,7 @@ pub unsafe fn _mm_maskz_fmsub_round_ss<const ROUNDING: i32>(
let extractc = -extractc;
fmsub = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.\
@@ -36043,8 +35985,7 @@ pub unsafe fn _mm_mask3_fmsub_round_ss<const ROUNDING: i32>(
let extractc = -fmsub;
fmsub = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(c, 0, fmsub);
- transmute(r)
+ simd_insert(c, 0, fmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.\
@@ -36072,8 +36013,7 @@ pub unsafe fn _mm_fmsub_round_sd<const ROUNDING: i32>(
let extractc: f64 = simd_extract(c, 0);
let extractc = -extractc;
let fmsub = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
@@ -36104,8 +36044,7 @@ pub unsafe fn _mm_mask_fmsub_round_sd<const ROUNDING: i32>(
let extractc = -extractc;
fmsub = vfmadd132sd(fmsub, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
@@ -36137,8 +36076,7 @@ pub unsafe fn _mm_maskz_fmsub_round_sd<const ROUNDING: i32>(
let extractc = -extractc;
fmsub = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fmsub);
- transmute(r)
+ simd_insert(a, 0, fmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.\
@@ -36169,8 +36107,7 @@ pub unsafe fn _mm_mask3_fmsub_round_sd<const ROUNDING: i32>(
let extractc = -fmsub;
fmsub = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(c, 0, fmsub);
- transmute(r)
+ simd_insert(c, 0, fmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -36194,8 +36131,7 @@ pub unsafe fn _mm_fnmadd_round_ss<const ROUNDING: i32>(a: __m128, b: __m128, c:
let extractb: f32 = simd_extract(b, 0);
let extractc: f32 = simd_extract(c, 0);
let fnmadd = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -36226,8 +36162,7 @@ pub unsafe fn _mm_mask_fnmadd_round_ss<const ROUNDING: i32>(
let extractc: f32 = simd_extract(c, 0);
fnmadd = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -36259,8 +36194,7 @@ pub unsafe fn _mm_maskz_fnmadd_round_ss<const ROUNDING: i32>(
let extractc: f32 = simd_extract(c, 0);
fnmadd = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.\
@@ -36291,8 +36225,7 @@ pub unsafe fn _mm_mask3_fnmadd_round_ss<const ROUNDING: i32>(
let extractb: f32 = simd_extract(b, 0);
fnmadd = vfmadd132ss(extracta, extractb, fnmadd, ROUNDING);
}
- let r = simd_insert(c, 0, fnmadd);
- transmute(r)
+ simd_insert(c, 0, fnmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.\
@@ -36320,8 +36253,7 @@ pub unsafe fn _mm_fnmadd_round_sd<const ROUNDING: i32>(
let extractb: f64 = simd_extract(b, 0);
let extractc: f64 = simd_extract(c, 0);
let fnmadd = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
@@ -36352,8 +36284,7 @@ pub unsafe fn _mm_mask_fnmadd_round_sd<const ROUNDING: i32>(
let extractc: f64 = simd_extract(c, 0);
fnmadd = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
@@ -36385,8 +36316,7 @@ pub unsafe fn _mm_maskz_fnmadd_round_sd<const ROUNDING: i32>(
let extractc: f64 = simd_extract(c, 0);
fnmadd = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fnmadd);
- transmute(r)
+ simd_insert(a, 0, fnmadd)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.\
@@ -36417,8 +36347,7 @@ pub unsafe fn _mm_mask3_fnmadd_round_sd<const ROUNDING: i32>(
let extractb: f64 = simd_extract(b, 0);
fnmadd = vfmadd132sd(extracta, extractb, fnmadd, ROUNDING);
}
- let r = simd_insert(c, 0, fnmadd);
- transmute(r)
+ simd_insert(c, 0, fnmadd)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, subtract the lower element in c from the negated intermediate result, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -36443,8 +36372,7 @@ pub unsafe fn _mm_fnmsub_round_ss<const ROUNDING: i32>(a: __m128, b: __m128, c:
let extractc: f32 = simd_extract(c, 0);
let extractc = -extractc;
let fnmsub = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -36476,8 +36404,7 @@ pub unsafe fn _mm_mask_fnmsub_round_ss<const ROUNDING: i32>(
let extractc = -extractc;
fnmsub = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -36510,8 +36437,7 @@ pub unsafe fn _mm_maskz_fnmsub_round_ss<const ROUNDING: i32>(
let extractc = -extractc;
fnmsub = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.\
@@ -36543,8 +36469,7 @@ pub unsafe fn _mm_mask3_fnmsub_round_ss<const ROUNDING: i32>(
let extractc = -fnmsub;
fnmsub = vfmadd132ss(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(c, 0, fnmsub);
- transmute(r)
+ simd_insert(c, 0, fnmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.\
@@ -36573,8 +36498,7 @@ pub unsafe fn _mm_fnmsub_round_sd<const ROUNDING: i32>(
let extractc: f64 = simd_extract(c, 0);
let extractc = -extractc;
let fnmsub = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
@@ -36606,8 +36530,7 @@ pub unsafe fn _mm_mask_fnmsub_round_sd<const ROUNDING: i32>(
let extractc = -extractc;
fnmsub = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
@@ -36640,8 +36563,7 @@ pub unsafe fn _mm_maskz_fnmsub_round_sd<const ROUNDING: i32>(
let extractc = -extractc;
fnmsub = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(a, 0, fnmsub);
- transmute(r)
+ simd_insert(a, 0, fnmsub)
}
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.\
@@ -36673,8 +36595,7 @@ pub unsafe fn _mm_mask3_fnmsub_round_sd<const ROUNDING: i32>(
let extractc = -fnmsub;
fnmsub = vfmadd132sd(extracta, extractb, extractc, ROUNDING);
}
- let r = simd_insert(c, 0, fnmsub);
- transmute(r)
+ simd_insert(c, 0, fnmsub)
}
/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.
@@ -37168,8 +37089,7 @@ pub unsafe fn _mm_maskz_cvt_roundsd_ss<const ROUNDING: i32>(
pub unsafe fn _mm_cvt_roundss_si32<const ROUNDING: i32>(a: __m128) -> i32 {
static_assert_rounding!(ROUNDING);
let a = a.as_f32x4();
- let r = vcvtss2si(a, ROUNDING);
- transmute(r)
+ vcvtss2si(a, ROUNDING)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer, and store the result in dst.\
@@ -37188,8 +37108,7 @@ pub unsafe fn _mm_cvt_roundss_si32<const ROUNDING: i32>(a: __m128) -> i32 {
pub unsafe fn _mm_cvt_roundss_i32<const ROUNDING: i32>(a: __m128) -> i32 {
static_assert_rounding!(ROUNDING);
let a = a.as_f32x4();
- let r = vcvtss2si(a, ROUNDING);
- transmute(r)
+ vcvtss2si(a, ROUNDING)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 32-bit integer, and store the result in dst.\
@@ -37208,8 +37127,7 @@ pub unsafe fn _mm_cvt_roundss_i32<const ROUNDING: i32>(a: __m128) -> i32 {
pub unsafe fn _mm_cvt_roundss_u32<const ROUNDING: i32>(a: __m128) -> u32 {
static_assert_rounding!(ROUNDING);
let a = a.as_f32x4();
- let r = vcvtss2usi(a, ROUNDING);
- transmute(r)
+ vcvtss2usi(a, ROUNDING)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer, and store the result in dst.
@@ -37219,7 +37137,7 @@ pub unsafe fn _mm_cvt_roundss_u32<const ROUNDING: i32>(a: __m128) -> u32 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si))]
pub unsafe fn _mm_cvtss_i32(a: __m128) -> i32 {
- transmute(vcvtss2si(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION))
+ vcvtss2si(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 32-bit integer, and store the result in dst.
@@ -37229,7 +37147,7 @@ pub unsafe fn _mm_cvtss_i32(a: __m128) -> i32 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi))]
pub unsafe fn _mm_cvtss_u32(a: __m128) -> u32 {
- transmute(vcvtss2usi(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION))
+ vcvtss2usi(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer, and store the result in dst.\
@@ -37248,8 +37166,7 @@ pub unsafe fn _mm_cvtss_u32(a: __m128) -> u32 {
pub unsafe fn _mm_cvt_roundsd_si32<const ROUNDING: i32>(a: __m128d) -> i32 {
static_assert_rounding!(ROUNDING);
let a = a.as_f64x2();
- let r = vcvtsd2si(a, ROUNDING);
- transmute(r)
+ vcvtsd2si(a, ROUNDING)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer, and store the result in dst.\
@@ -37268,8 +37185,7 @@ pub unsafe fn _mm_cvt_roundsd_si32<const ROUNDING: i32>(a: __m128d) -> i32 {
pub unsafe fn _mm_cvt_roundsd_i32<const ROUNDING: i32>(a: __m128d) -> i32 {
static_assert_rounding!(ROUNDING);
let a = a.as_f64x2();
- let r = vcvtsd2si(a, ROUNDING);
- transmute(r)
+ vcvtsd2si(a, ROUNDING)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 32-bit integer, and store the result in dst.\
@@ -37288,8 +37204,7 @@ pub unsafe fn _mm_cvt_roundsd_i32<const ROUNDING: i32>(a: __m128d) -> i32 {
pub unsafe fn _mm_cvt_roundsd_u32<const ROUNDING: i32>(a: __m128d) -> u32 {
static_assert_rounding!(ROUNDING);
let a = a.as_f64x2();
- let r = vcvtsd2usi(a, ROUNDING);
- transmute(r)
+ vcvtsd2usi(a, ROUNDING)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer, and store the result in dst.
@@ -37299,7 +37214,7 @@ pub unsafe fn _mm_cvt_roundsd_u32<const ROUNDING: i32>(a: __m128d) -> u32 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si))]
pub unsafe fn _mm_cvtsd_i32(a: __m128d) -> i32 {
- transmute(vcvtsd2si(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION))
+ vcvtsd2si(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 32-bit integer, and store the result in dst.
@@ -37309,7 +37224,7 @@ pub unsafe fn _mm_cvtsd_i32(a: __m128d) -> i32 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi))]
pub unsafe fn _mm_cvtsd_u32(a: __m128d) -> u32 {
- transmute(vcvtsd2usi(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION))
+ vcvtsd2usi(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the signed 32-bit integer b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.\
@@ -37382,8 +37297,7 @@ pub unsafe fn _mm_cvt_roundu32_ss<const ROUNDING: i32>(a: __m128, b: u32) -> __m
#[cfg_attr(test, assert_instr(vcvtsi2ss))]
pub unsafe fn _mm_cvti32_ss(a: __m128, b: i32) -> __m128 {
let b = b as f32;
- let r = simd_insert(a, 0, b);
- transmute(r)
+ simd_insert(a, 0, b)
}
/// Convert the signed 32-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
@@ -37394,8 +37308,7 @@ pub unsafe fn _mm_cvti32_ss(a: __m128, b: i32) -> __m128 {
#[cfg_attr(test, assert_instr(vcvtsi2sd))]
pub unsafe fn _mm_cvti32_sd(a: __m128d, b: i32) -> __m128d {
let b = b as f64;
- let r = simd_insert(a, 0, b);
- transmute(r)
+ simd_insert(a, 0, b)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.\
@@ -37409,8 +37322,7 @@ pub unsafe fn _mm_cvti32_sd(a: __m128d, b: i32) -> __m128d {
pub unsafe fn _mm_cvtt_roundss_si32<const SAE: i32>(a: __m128) -> i32 {
static_assert_sae!(SAE);
let a = a.as_f32x4();
- let r = vcvtss2si(a, SAE);
- transmute(r)
+ vcvtss2si(a, SAE)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.\
@@ -37424,8 +37336,7 @@ pub unsafe fn _mm_cvtt_roundss_si32<const SAE: i32>(a: __m128) -> i32 {
pub unsafe fn _mm_cvtt_roundss_i32<const SAE: i32>(a: __m128) -> i32 {
static_assert_sae!(SAE);
let a = a.as_f32x4();
- let r = vcvtss2si(a, SAE);
- transmute(r)
+ vcvtss2si(a, SAE)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 32-bit integer with truncation, and store the result in dst.\
@@ -37439,8 +37350,7 @@ pub unsafe fn _mm_cvtt_roundss_i32<const SAE: i32>(a: __m128) -> i32 {
pub unsafe fn _mm_cvtt_roundss_u32<const SAE: i32>(a: __m128) -> u32 {
static_assert_sae!(SAE);
let a = a.as_f32x4();
- let r = vcvtss2usi(a, SAE);
- transmute(r)
+ vcvtss2usi(a, SAE)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.
@@ -37450,7 +37360,7 @@ pub unsafe fn _mm_cvtt_roundss_u32<const SAE: i32>(a: __m128) -> u32 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si))]
pub unsafe fn _mm_cvttss_i32(a: __m128) -> i32 {
- transmute(vcvtss2si(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION))
+ vcvtss2si(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 32-bit integer with truncation, and store the result in dst.
@@ -37460,7 +37370,7 @@ pub unsafe fn _mm_cvttss_i32(a: __m128) -> i32 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi))]
pub unsafe fn _mm_cvttss_u32(a: __m128) -> u32 {
- transmute(vcvtss2usi(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION))
+ vcvtss2usi(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.\
@@ -37474,8 +37384,7 @@ pub unsafe fn _mm_cvttss_u32(a: __m128) -> u32 {
pub unsafe fn _mm_cvtt_roundsd_si32<const SAE: i32>(a: __m128d) -> i32 {
static_assert_sae!(SAE);
let a = a.as_f64x2();
- let r = vcvtsd2si(a, SAE);
- transmute(r)
+ vcvtsd2si(a, SAE)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.\
@@ -37489,8 +37398,7 @@ pub unsafe fn _mm_cvtt_roundsd_si32<const SAE: i32>(a: __m128d) -> i32 {
pub unsafe fn _mm_cvtt_roundsd_i32<const SAE: i32>(a: __m128d) -> i32 {
static_assert_sae!(SAE);
let a = a.as_f64x2();
- let r = vcvtsd2si(a, SAE);
- transmute(r)
+ vcvtsd2si(a, SAE)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 32-bit integer with truncation, and store the result in dst.\
@@ -37504,8 +37412,7 @@ pub unsafe fn _mm_cvtt_roundsd_i32<const SAE: i32>(a: __m128d) -> i32 {
pub unsafe fn _mm_cvtt_roundsd_u32<const SAE: i32>(a: __m128d) -> u32 {
static_assert_sae!(SAE);
let a = a.as_f64x2();
- let r = vcvtsd2usi(a, SAE);
- transmute(r)
+ vcvtsd2usi(a, SAE)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.
@@ -37515,7 +37422,7 @@ pub unsafe fn _mm_cvtt_roundsd_u32<const SAE: i32>(a: __m128d) -> u32 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si))]
pub unsafe fn _mm_cvttsd_i32(a: __m128d) -> i32 {
- transmute(vcvtsd2si(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION))
+ vcvtsd2si(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 32-bit integer with truncation, and store the result in dst.
@@ -37525,7 +37432,7 @@ pub unsafe fn _mm_cvttsd_i32(a: __m128d) -> i32 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi))]
pub unsafe fn _mm_cvttsd_u32(a: __m128d) -> u32 {
- transmute(vcvtsd2usi(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION))
+ vcvtsd2usi(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the unsigned 32-bit integer b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -37536,8 +37443,7 @@ pub unsafe fn _mm_cvttsd_u32(a: __m128d) -> u32 {
#[cfg_attr(test, assert_instr(vcvtusi2ss))]
pub unsafe fn _mm_cvtu32_ss(a: __m128, b: u32) -> __m128 {
let b = b as f32;
- let r = simd_insert(a, 0, b);
- transmute(r)
+ simd_insert(a, 0, b)
}
/// Convert the unsigned 32-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
@@ -37548,8 +37454,7 @@ pub unsafe fn _mm_cvtu32_ss(a: __m128, b: u32) -> __m128 {
#[cfg_attr(test, assert_instr(vcvtusi2sd))]
pub unsafe fn _mm_cvtu32_sd(a: __m128d, b: u32) -> __m128d {
let b = b as f64;
- let r = simd_insert(a, 0, b);
- transmute(r)
+ simd_insert(a, 0, b)
}
/// Compare the lower single-precision (32-bit) floating-point element in a and b based on the comparison operand specified by imm8, and return the boolean result (0 or 1).\
@@ -37565,8 +37470,7 @@ pub unsafe fn _mm_comi_round_ss<const IMM5: i32, const SAE: i32>(a: __m128, b: _
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
- let r = vcomiss(a, b, IMM5, SAE);
- transmute(r)
+ vcomiss(a, b, IMM5, SAE)
}
/// Compare the lower double-precision (64-bit) floating-point element in a and b based on the comparison operand specified by imm8, and return the boolean result (0 or 1).\
@@ -37582,8 +37486,7 @@ pub unsafe fn _mm_comi_round_sd<const IMM5: i32, const SAE: i32>(a: __m128d, b:
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
- let r = vcomisd(a, b, IMM5, SAE);
- transmute(r)
+ vcomisd(a, b, IMM5, SAE)
}
/// Equal
diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs
index 3d4471ba3..6a2be0921 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse.rs
@@ -790,8 +790,7 @@ pub unsafe fn _mm_ucomineq_ss(a: __m128, b: __m128) -> i32 {
///
/// The result is rounded according to the current rounding mode. If the result
/// cannot be represented as a 32 bit integer the result will be `0x8000_0000`
-/// (`i32::MIN`) or an invalid operation floating point exception if
-/// unmasked (see [`_mm_setcsr`](fn._mm_setcsr.html)).
+/// (`i32::MIN`).
///
/// This corresponds to the `CVTSS2SI` instruction (with 32 bit output).
///
@@ -821,8 +820,7 @@ pub unsafe fn _mm_cvt_ss2si(a: __m128) -> i32 {
///
/// The result is rounded always using truncation (round towards zero). If the
/// result cannot be represented as a 32 bit integer the result will be
-/// `0x8000_0000` (`i32::MIN`) or an invalid operation floating point
-/// exception if unmasked (see [`_mm_setcsr`](fn._mm_setcsr.html)).
+/// `0x8000_0000` (`i32::MIN`).
///
/// This corresponds to the `CVTTSS2SI` instruction (with 32 bit output).
///
@@ -1083,7 +1081,10 @@ pub unsafe fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128 {
#[cfg_attr(test, assert_instr(movmskps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_movemask_ps(a: __m128) -> i32 {
- movmskps(a)
+ // Propagate the highest bit to the rest, because simd_bitmask
+ // requires all-1 or all-0.
+ let mask: i32x4 = simd_lt(transmute(a), i32x4::splat(0));
+ simd_bitmask::<i32x4, u8>(mask).into()
}
/// Construct a `__m128` with the lowest element read from `p` and the other
@@ -1365,6 +1366,15 @@ pub unsafe fn _mm_sfence() {
/// Gets the unsigned 32-bit value of the MXCSR control and status register.
///
+/// Note that Rust makes no guarantees whatsoever about the contents of this register: Rust
+/// floating-point operations may or may not result in this register getting updated with exception
+/// state, and the register can change between two invocations of this function even when no
+/// floating-point operations appear in the source code (since floating-point operations appearing
+/// earlier or later can be reordered).
+///
+/// If you need to perform some floating-point operations and check whether they raised an
+/// exception, use an inline assembly block for the entire sequence of operations.
+///
/// For more info see [`_mm_setcsr`](fn._mm_setcsr.html)
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getcsr)
@@ -1372,6 +1382,10 @@ pub unsafe fn _mm_sfence() {
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(stmxcsr))]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_getcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _mm_getcsr() -> u32 {
let mut result = 0_i32;
stmxcsr(&mut result as *mut _ as *mut i8);
@@ -1401,6 +1415,16 @@ pub unsafe fn _mm_getcsr() -> u32 {
/// * The *denormals-are-zero mode flag* turns all numbers which would be
/// denormalized (exponent bits are all zeros) into zeros.
///
+/// Note that modifying the masking flags, rounding mode, or denormals-are-zero mode flags leads to
+/// **immediate Undefined Behavior**: Rust assumes that these are always in their default state and
+/// will optimize accordingly. This even applies when the register is altered and later reset to its
+/// original value without any floating-point operations appearing in the source code between those
+/// operations (since floating-point operations appearing earlier or later can be reordered).
+///
+/// If you need to perform some floating-point operations under a different masking flags, rounding
+/// mode, or denormals-are-zero mode, use an inline assembly block and make sure to restore the
+/// original MXCSR register state before the end of the block.
+///
/// ## Exception Flags
///
/// * `_MM_EXCEPT_INVALID`: An invalid operation was performed (e.g., dividing
@@ -1509,6 +1533,10 @@ pub unsafe fn _mm_getcsr() -> u32 {
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(ldmxcsr))]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_setcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _mm_setcsr(val: u32) {
ldmxcsr(&val as *const _ as *const i8);
}
@@ -1588,9 +1616,14 @@ pub const _MM_FLUSH_ZERO_OFF: u32 = 0x0000;
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_EXCEPTION_MASK)
#[inline]
+#[allow(deprecated)] // Deprecated function implemented on top of deprecated function
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_getcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _MM_GET_EXCEPTION_MASK() -> u32 {
_mm_getcsr() & _MM_MASK_MASK
}
@@ -1599,9 +1632,14 @@ pub unsafe fn _MM_GET_EXCEPTION_MASK() -> u32 {
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_EXCEPTION_STATE)
#[inline]
+#[allow(deprecated)] // Deprecated function implemented on top of deprecated function
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_getcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _MM_GET_EXCEPTION_STATE() -> u32 {
_mm_getcsr() & _MM_EXCEPT_MASK
}
@@ -1610,9 +1648,14 @@ pub unsafe fn _MM_GET_EXCEPTION_STATE() -> u32 {
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_FLUSH_ZERO_MODE)
#[inline]
+#[allow(deprecated)] // Deprecated function implemented on top of deprecated function
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_getcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _MM_GET_FLUSH_ZERO_MODE() -> u32 {
_mm_getcsr() & _MM_FLUSH_ZERO_MASK
}
@@ -1621,9 +1664,14 @@ pub unsafe fn _MM_GET_FLUSH_ZERO_MODE() -> u32 {
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_ROUNDING_MODE)
#[inline]
+#[allow(deprecated)] // Deprecated function implemented on top of deprecated function
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_getcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _MM_GET_ROUNDING_MODE() -> u32 {
_mm_getcsr() & _MM_ROUND_MASK
}
@@ -1632,9 +1680,14 @@ pub unsafe fn _MM_GET_ROUNDING_MODE() -> u32 {
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_EXCEPTION_MASK)
#[inline]
+#[allow(deprecated)] // Deprecated function implemented on top of deprecated function
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_setcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _MM_SET_EXCEPTION_MASK(x: u32) {
_mm_setcsr((_mm_getcsr() & !_MM_MASK_MASK) | x)
}
@@ -1643,9 +1696,14 @@ pub unsafe fn _MM_SET_EXCEPTION_MASK(x: u32) {
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_EXCEPTION_STATE)
#[inline]
+#[allow(deprecated)] // Deprecated function implemented on top of deprecated function
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_setcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _MM_SET_EXCEPTION_STATE(x: u32) {
_mm_setcsr((_mm_getcsr() & !_MM_EXCEPT_MASK) | x)
}
@@ -1654,9 +1712,14 @@ pub unsafe fn _MM_SET_EXCEPTION_STATE(x: u32) {
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_FLUSH_ZERO_MODE)
#[inline]
+#[allow(deprecated)] // Deprecated function implemented on top of deprecated function
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_setcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _MM_SET_FLUSH_ZERO_MODE(x: u32) {
let val = (_mm_getcsr() & !_MM_FLUSH_ZERO_MASK) | x;
// println!("setting csr={:x}", val);
@@ -1667,9 +1730,14 @@ pub unsafe fn _MM_SET_FLUSH_ZERO_MODE(x: u32) {
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_ROUNDING_MODE)
#[inline]
+#[allow(deprecated)] // Deprecated function implemented on top of deprecated function
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
+#[deprecated(
+ since = "1.75.0",
+ note = "see `_mm_setcsr` documentation - use inline assembly instead"
+)]
pub unsafe fn _MM_SET_ROUNDING_MODE(x: u32) {
_mm_setcsr((_mm_getcsr() & !_MM_ROUND_MASK) | x)
}
@@ -1820,8 +1888,6 @@ extern "C" {
fn maxss(a: __m128, b: __m128) -> __m128;
#[link_name = "llvm.x86.sse.max.ps"]
fn maxps(a: __m128, b: __m128) -> __m128;
- #[link_name = "llvm.x86.sse.movmsk.ps"]
- fn movmskps(a: __m128) -> i32;
#[link_name = "llvm.x86.sse.cmp.ps"]
fn cmpps(a: __m128, b: __m128, imm8: i8) -> __m128;
#[link_name = "llvm.x86.sse.comieq.ss"]
@@ -1974,7 +2040,11 @@ mod tests {
let a = _mm_setr_ps(4.0, 13.0, 16.0, 100.0);
let r = _mm_rcp_ss(a);
let e = _mm_setr_ps(0.24993896, 13.0, 16.0, 100.0);
- assert_eq_m128(r, e);
+ let rel_err = 0.00048828125;
+ assert_approx_eq!(get_m128(r, 0), get_m128(e, 0), 2. * rel_err);
+ for i in 1..4 {
+ assert_eq!(get_m128(r, i), get_m128(e, i));
+ }
}
#[simd_test(enable = "sse")]
@@ -2055,6 +2125,17 @@ mod tests {
let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0);
let r = _mm_max_ps(a, b);
assert_eq_m128(r, _mm_setr_ps(-1.0, 20.0, 0.0, -5.0));
+
+ // Check SSE-specific semantics for -0.0 handling.
+ let a = _mm_setr_ps(-0.0, 0.0, 0.0, 0.0);
+ let b = _mm_setr_ps(0.0, 0.0, 0.0, 0.0);
+ let r1: [u8; 16] = transmute(_mm_max_ps(a, b));
+ let r2: [u8; 16] = transmute(_mm_max_ps(b, a));
+ let a: [u8; 16] = transmute(a);
+ let b: [u8; 16] = transmute(b);
+ assert_eq!(r1, b);
+ assert_eq!(r2, a);
+ assert_ne!(a, b); // sanity check that -0.0 is actually present
}
#[simd_test(enable = "sse")]
@@ -2098,12 +2179,12 @@ mod tests {
let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0);
let b = _mm_setr_ps(-1.0, 5.0, 6.0, 7.0);
let r: u32x4 = transmute(_mm_cmpeq_ss(a, b));
- let e: u32x4 = transmute(_mm_setr_ps(transmute(0u32), 2.0, 3.0, 4.0));
+ let e: u32x4 = transmute(_mm_setr_ps(f32::from_bits(0), 2.0, 3.0, 4.0));
assert_eq!(r, e);
let b2 = _mm_setr_ps(1.0, 5.0, 6.0, 7.0);
let r2: u32x4 = transmute(_mm_cmpeq_ss(a, b2));
- let e2: u32x4 = transmute(_mm_setr_ps(transmute(0xffffffffu32), 2.0, 3.0, 4.0));
+ let e2: u32x4 = transmute(_mm_setr_ps(f32::from_bits(0xffffffff), 2.0, 3.0, 4.0));
assert_eq!(r2, e2);
}
@@ -2119,15 +2200,15 @@ mod tests {
let d1 = !0u32; // a.extract(0) < d.extract(0)
let rb: u32x4 = transmute(_mm_cmplt_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmplt_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmplt_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2143,15 +2224,15 @@ mod tests {
let d1 = !0u32; // a.extract(0) <= d.extract(0)
let rb: u32x4 = transmute(_mm_cmple_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmple_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmple_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2167,15 +2248,15 @@ mod tests {
let d1 = 0u32; // a.extract(0) > d.extract(0)
let rb: u32x4 = transmute(_mm_cmpgt_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpgt_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpgt_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2191,15 +2272,15 @@ mod tests {
let d1 = 0u32; // a.extract(0) >= d.extract(0)
let rb: u32x4 = transmute(_mm_cmpge_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpge_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpge_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2215,15 +2296,15 @@ mod tests {
let d1 = !0u32; // a.extract(0) != d.extract(0)
let rb: u32x4 = transmute(_mm_cmpneq_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpneq_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpneq_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2244,15 +2325,15 @@ mod tests {
let d1 = 0u32; // a.extract(0) >= d.extract(0)
let rb: u32x4 = transmute(_mm_cmpnlt_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpnlt_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpnlt_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2273,15 +2354,15 @@ mod tests {
let d1 = 0u32; // a.extract(0) > d.extract(0)
let rb: u32x4 = transmute(_mm_cmpnle_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpnle_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpnle_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2302,15 +2383,15 @@ mod tests {
let d1 = !0u32; // a.extract(0) <= d.extract(0)
let rb: u32x4 = transmute(_mm_cmpngt_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpngt_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpngt_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2331,15 +2412,15 @@ mod tests {
let d1 = !0u32; // a.extract(0) < d.extract(0)
let rb: u32x4 = transmute(_mm_cmpnge_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpnge_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpnge_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2355,15 +2436,15 @@ mod tests {
let d1 = !0u32; // a.extract(0) ord d.extract(0)
let rb: u32x4 = transmute(_mm_cmpord_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpord_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpord_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2379,15 +2460,15 @@ mod tests {
let d1 = 0u32; // a.extract(0) unord d.extract(0)
let rb: u32x4 = transmute(_mm_cmpunord_ss(a, b));
- let eb: u32x4 = transmute(_mm_setr_ps(transmute(b1), 2.0, 3.0, 4.0));
+ let eb: u32x4 = transmute(_mm_setr_ps(f32::from_bits(b1), 2.0, 3.0, 4.0));
assert_eq!(rb, eb);
let rc: u32x4 = transmute(_mm_cmpunord_ss(a, c));
- let ec: u32x4 = transmute(_mm_setr_ps(transmute(c1), 2.0, 3.0, 4.0));
+ let ec: u32x4 = transmute(_mm_setr_ps(f32::from_bits(c1), 2.0, 3.0, 4.0));
assert_eq!(rc, ec);
let rd: u32x4 = transmute(_mm_cmpunord_ss(a, d));
- let ed: u32x4 = transmute(_mm_setr_ps(transmute(d1), 2.0, 3.0, 4.0));
+ let ed: u32x4 = transmute(_mm_setr_ps(f32::from_bits(d1), 2.0, 3.0, 4.0));
assert_eq!(rd, ed);
}
@@ -2766,7 +2847,9 @@ mod tests {
}
}
+ #[allow(deprecated)] // FIXME: This test uses deprecated CSR access functions
#[simd_test(enable = "sse")]
+ #[cfg_attr(miri, ignore)] // Uses _mm_setcsr, which is not supported by Miri
unsafe fn test_mm_comieq_ss_vs_ucomieq_ss() {
// If one of the arguments is a quiet NaN `comieq_ss` should signal an
// Invalid Operation Exception while `ucomieq_ss` should not.
@@ -3072,7 +3155,7 @@ mod tests {
let mut p = vals.as_mut_ptr();
if (p as usize) & 0xf != 0 {
- ofs = ((16 - (p as usize)) & 0xf) >> 2;
+ ofs = (16 - ((p as usize) & 0xf)) >> 2;
p = p.add(ofs);
}
@@ -3098,7 +3181,7 @@ mod tests {
// Align p to 16-byte boundary
if (p as usize) & 0xf != 0 {
- ofs = ((16 - (p as usize)) & 0xf) >> 2;
+ ofs = (16 - ((p as usize) & 0xf)) >> 2;
p = p.add(ofs);
}
@@ -3124,7 +3207,7 @@ mod tests {
// Align p to 16-byte boundary
if (p as usize) & 0xf != 0 {
- ofs = ((16 - (p as usize)) & 0xf) >> 2;
+ ofs = (16 - ((p as usize) & 0xf)) >> 2;
p = p.add(ofs);
}
@@ -3186,11 +3269,15 @@ mod tests {
}
#[simd_test(enable = "sse")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_sfence() {
_mm_sfence();
}
+ #[allow(deprecated)] // FIXME: This tests functions that are immediate UB
#[simd_test(enable = "sse")]
+ #[cfg_attr(miri, ignore)] // Miri does not support accesing the CSR
unsafe fn test_mm_getcsr_setcsr_1() {
let saved_csr = _mm_getcsr();
@@ -3206,7 +3293,9 @@ mod tests {
assert_eq_m128(r, exp); // first component is a denormalized f32
}
+ #[allow(deprecated)] // FIXME: This tests functions that are immediate UB
#[simd_test(enable = "sse")]
+ #[cfg_attr(miri, ignore)] // Miri does not support accesing the CSR
unsafe fn test_mm_getcsr_setcsr_2() {
// Same as _mm_setcsr_1 test, but with opposite flag value.
@@ -3224,7 +3313,9 @@ mod tests {
assert_eq_m128(r, exp); // first component is a denormalized f32
}
+ #[allow(deprecated)] // FIXME: This tests functions that are immediate UB
#[simd_test(enable = "sse")]
+ #[cfg_attr(miri, ignore)] // Miri does not support accesing the CSR
unsafe fn test_mm_getcsr_setcsr_underflow() {
_MM_SET_EXCEPTION_STATE(0);
@@ -3263,6 +3354,9 @@ mod tests {
}
#[simd_test(enable = "sse")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ // (non-temporal store)
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_stream_ps() {
let a = _mm_set1_ps(7.0);
let mut mem = Memory { data: [-1.0; 4] };
diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs
index 3d572a1f5..7831ea743 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs
@@ -165,7 +165,10 @@ pub unsafe fn _mm_adds_epu16(a: __m128i, b: __m128i) -> __m128i {
#[cfg_attr(test, assert_instr(pavgb))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_avg_epu8(a: __m128i, b: __m128i) -> __m128i {
- transmute(pavgb(a.as_u8x16(), b.as_u8x16()))
+ let a = simd_cast::<_, u16x16>(a.as_u8x16());
+ let b = simd_cast::<_, u16x16>(b.as_u8x16());
+ let r = simd_shr(simd_add(simd_add(a, b), u16x16::splat(1)), u16x16::splat(1));
+ transmute(simd_cast::<_, u8x16>(r))
}
/// Averages packed unsigned 16-bit integers in `a` and `b`.
@@ -176,7 +179,10 @@ pub unsafe fn _mm_avg_epu8(a: __m128i, b: __m128i) -> __m128i {
#[cfg_attr(test, assert_instr(pavgw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_avg_epu16(a: __m128i, b: __m128i) -> __m128i {
- transmute(pavgw(a.as_u16x8(), b.as_u16x8()))
+ let a = simd_cast::<_, u32x8>(a.as_u16x8());
+ let b = simd_cast::<_, u32x8>(b.as_u16x8());
+ let r = simd_shr(simd_add(simd_add(a, b), u32x8::splat(1)), u32x8::splat(1));
+ transmute(simd_cast::<_, u16x8>(r))
}
/// Multiplies and then horizontally add signed 16 bit integers in `a` and `b`.
@@ -261,7 +267,10 @@ pub unsafe fn _mm_min_epu8(a: __m128i, b: __m128i) -> __m128i {
#[cfg_attr(test, assert_instr(pmulhw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_mulhi_epi16(a: __m128i, b: __m128i) -> __m128i {
- transmute(pmulhw(a.as_i16x8(), b.as_i16x8()))
+ let a = simd_cast::<_, i32x8>(a.as_i16x8());
+ let b = simd_cast::<_, i32x8>(b.as_i16x8());
+ let r = simd_shr(simd_mul(a, b), i32x8::splat(16));
+ transmute(simd_cast::<i32x8, i16x8>(r))
}
/// Multiplies the packed unsigned 16-bit integers in `a` and `b`.
@@ -275,7 +284,10 @@ pub unsafe fn _mm_mulhi_epi16(a: __m128i, b: __m128i) -> __m128i {
#[cfg_attr(test, assert_instr(pmulhuw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_mulhi_epu16(a: __m128i, b: __m128i) -> __m128i {
- transmute(pmulhuw(a.as_u16x8(), b.as_u16x8()))
+ let a = simd_cast::<_, u32x8>(a.as_u16x8());
+ let b = simd_cast::<_, u32x8>(b.as_u16x8());
+ let r = simd_shr(simd_mul(a, b), u32x8::splat(16));
+ transmute(simd_cast::<u32x8, u16x8>(r))
}
/// Multiplies the packed 16-bit integers in `a` and `b`.
@@ -303,7 +315,10 @@ pub unsafe fn _mm_mullo_epi16(a: __m128i, b: __m128i) -> __m128i {
#[cfg_attr(test, assert_instr(pmuludq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_mul_epu32(a: __m128i, b: __m128i) -> __m128i {
- transmute(pmuludq(a.as_u32x4(), b.as_u32x4()))
+ let a = a.as_u64x2();
+ let b = b.as_u64x2();
+ let mask = u64x2::splat(u32::MAX.into());
+ transmute(simd_mul(simd_and(a, mask), simd_and(b, mask)))
}
/// Sum the absolute differences of packed unsigned 8-bit integers.
@@ -952,7 +967,7 @@ pub unsafe fn _mm_cvtsi32_sd(a: __m128d, b: i32) -> __m128d {
#[cfg_attr(test, assert_instr(cvtdq2ps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepi32_ps(a: __m128i) -> __m128 {
- cvtdq2ps(a.as_i32x4())
+ transmute(simd_cast::<_, f32x4>(a.as_i32x4()))
}
/// Converts packed single-precision (32-bit) floating-point elements in `a`
@@ -2240,7 +2255,9 @@ pub unsafe fn _mm_ucomineq_sd(a: __m128d, b: __m128d) -> i32 {
#[cfg_attr(test, assert_instr(cvtpd2ps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtpd_ps(a: __m128d) -> __m128 {
- cvtpd2ps(a)
+ let r = simd_cast::<_, f32x2>(a.as_f64x2());
+ let zero = f32x2::new(0.0, 0.0);
+ transmute::<f32x4, _>(simd_shuffle!(r, zero, [0, 1, 2, 3]))
}
/// Converts packed single-precision (32-bit) floating-point elements in `a` to
@@ -2253,7 +2270,8 @@ pub unsafe fn _mm_cvtpd_ps(a: __m128d) -> __m128 {
#[cfg_attr(test, assert_instr(cvtps2pd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtps_pd(a: __m128) -> __m128d {
- cvtps2pd(a)
+ let a = a.as_f32x4();
+ transmute(simd_cast::<f32x2, f64x2>(simd_shuffle!(a, a, [0, 1])))
}
/// Converts packed double-precision (64-bit) floating-point elements in `a` to
@@ -2432,7 +2450,10 @@ pub unsafe fn _mm_setzero_pd() -> __m128d {
#[cfg_attr(test, assert_instr(movmskpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_movemask_pd(a: __m128d) -> i32 {
- movmskpd(a)
+ // Propagate the highest bit to the rest, because simd_bitmask
+ // requires all-1 or all-0.
+ let mask: i64x2 = simd_lt(transmute(a), i64x2::splat(0));
+ simd_bitmask::<i64x2, u8>(mask).into()
}
/// Loads 128-bits (composed of 2 packed double-precision (64-bit)
@@ -2826,18 +2847,8 @@ extern "C" {
fn lfence();
#[link_name = "llvm.x86.sse2.mfence"]
fn mfence();
- #[link_name = "llvm.x86.sse2.pavg.b"]
- fn pavgb(a: u8x16, b: u8x16) -> u8x16;
- #[link_name = "llvm.x86.sse2.pavg.w"]
- fn pavgw(a: u16x8, b: u16x8) -> u16x8;
#[link_name = "llvm.x86.sse2.pmadd.wd"]
fn pmaddwd(a: i16x8, b: i16x8) -> i32x4;
- #[link_name = "llvm.x86.sse2.pmulh.w"]
- fn pmulhw(a: i16x8, b: i16x8) -> i16x8;
- #[link_name = "llvm.x86.sse2.pmulhu.w"]
- fn pmulhuw(a: u16x8, b: u16x8) -> u16x8;
- #[link_name = "llvm.x86.sse2.pmulu.dq"]
- fn pmuludq(a: u32x4, b: u32x4) -> u64x2;
#[link_name = "llvm.x86.sse2.psad.bw"]
fn psadbw(a: u8x16, b: u8x16) -> u64x2;
#[link_name = "llvm.x86.sse2.psll.w"]
@@ -2856,8 +2867,6 @@ extern "C" {
fn psrld(a: i32x4, count: i32x4) -> i32x4;
#[link_name = "llvm.x86.sse2.psrl.q"]
fn psrlq(a: i64x2, count: i64x2) -> i64x2;
- #[link_name = "llvm.x86.sse2.cvtdq2ps"]
- fn cvtdq2ps(a: i32x4) -> __m128;
#[link_name = "llvm.x86.sse2.cvtps2dq"]
fn cvtps2dq(a: __m128) -> i32x4;
#[link_name = "llvm.x86.sse2.maskmov.dqu"]
@@ -2908,12 +2917,6 @@ extern "C" {
fn ucomigesd(a: __m128d, b: __m128d) -> i32;
#[link_name = "llvm.x86.sse2.ucomineq.sd"]
fn ucomineqsd(a: __m128d, b: __m128d) -> i32;
- #[link_name = "llvm.x86.sse2.movmsk.pd"]
- fn movmskpd(a: __m128d) -> i32;
- #[link_name = "llvm.x86.sse2.cvtpd2ps"]
- fn cvtpd2ps(a: __m128d) -> __m128;
- #[link_name = "llvm.x86.sse2.cvtps2pd"]
- fn cvtps2pd(a: __m128) -> __m128d;
#[link_name = "llvm.x86.sse2.cvtpd2dq"]
fn cvtpd2dq(a: __m128d) -> i32x4;
#[link_name = "llvm.x86.sse2.cvtsd2si"]
@@ -2956,11 +2959,15 @@ mod tests {
}
#[simd_test(enable = "sse2")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_lfence() {
_mm_lfence();
}
#[simd_test(enable = "sse2")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_mfence() {
_mm_mfence();
}
@@ -3343,83 +3350,124 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_slli_epi16() {
- #[rustfmt::skip]
- let a = _mm_setr_epi16(
- 0xFFFF as u16 as i16, 0x0FFF, 0x00FF, 0x000F, 0, 0, 0, 0,
- );
+ let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF);
let r = _mm_slli_epi16::<4>(a);
-
- #[rustfmt::skip]
- let e = _mm_setr_epi16(
- 0xFFF0 as u16 as i16, 0xFFF0 as u16 as i16, 0x0FF0, 0x00F0,
- 0, 0, 0, 0,
+ assert_eq_m128i(
+ r,
+ _mm_setr_epi16(0xCC0, -0xCC0, 0xDD0, -0xDD0, 0xEE0, -0xEE0, 0xFF0, -0xFF0),
);
- assert_eq_m128i(r, e);
+ let r = _mm_slli_epi16::<16>(a);
+ assert_eq_m128i(r, _mm_set1_epi16(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_sll_epi16() {
- let a = _mm_setr_epi16(0xFF, 0, 0, 0, 0, 0, 0, 0);
- let r = _mm_sll_epi16(a, _mm_setr_epi16(4, 0, 0, 0, 0, 0, 0, 0));
- assert_eq_m128i(r, _mm_setr_epi16(0xFF0, 0, 0, 0, 0, 0, 0, 0));
- let r = _mm_sll_epi16(a, _mm_setr_epi16(0, 0, 0, 0, 4, 0, 0, 0));
- assert_eq_m128i(r, _mm_setr_epi16(0xFF, 0, 0, 0, 0, 0, 0, 0));
+ let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF);
+ let r = _mm_sll_epi16(a, _mm_set_epi64x(0, 4));
+ assert_eq_m128i(
+ r,
+ _mm_setr_epi16(0xCC0, -0xCC0, 0xDD0, -0xDD0, 0xEE0, -0xEE0, 0xFF0, -0xFF0),
+ );
+ let r = _mm_sll_epi16(a, _mm_set_epi64x(4, 0));
+ assert_eq_m128i(r, a);
+ let r = _mm_sll_epi16(a, _mm_set_epi64x(0, 16));
+ assert_eq_m128i(r, _mm_set1_epi16(0));
+ let r = _mm_sll_epi16(a, _mm_set_epi64x(0, i64::MAX));
+ assert_eq_m128i(r, _mm_set1_epi16(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_slli_epi32() {
- let r = _mm_slli_epi32::<4>(_mm_set1_epi32(0xFFFF));
- assert_eq_m128i(r, _mm_set1_epi32(0xFFFF0));
+ let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF);
+ let r = _mm_slli_epi32::<4>(a);
+ assert_eq_m128i(r, _mm_setr_epi32(0xEEEE0, -0xEEEE0, 0xFFFF0, -0xFFFF0));
+ let r = _mm_slli_epi32::<32>(a);
+ assert_eq_m128i(r, _mm_set1_epi32(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_sll_epi32() {
- let a = _mm_set1_epi32(0xFFFF);
- let b = _mm_setr_epi32(4, 0, 0, 0);
- let r = _mm_sll_epi32(a, b);
- assert_eq_m128i(r, _mm_set1_epi32(0xFFFF0));
+ let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF);
+ let r = _mm_sll_epi32(a, _mm_set_epi64x(0, 4));
+ assert_eq_m128i(r, _mm_setr_epi32(0xEEEE0, -0xEEEE0, 0xFFFF0, -0xFFFF0));
+ let r = _mm_sll_epi32(a, _mm_set_epi64x(4, 0));
+ assert_eq_m128i(r, a);
+ let r = _mm_sll_epi32(a, _mm_set_epi64x(0, 32));
+ assert_eq_m128i(r, _mm_set1_epi32(0));
+ let r = _mm_sll_epi32(a, _mm_set_epi64x(0, i64::MAX));
+ assert_eq_m128i(r, _mm_set1_epi32(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_slli_epi64() {
- let r = _mm_slli_epi64::<4>(_mm_set1_epi64x(0xFFFFFFFF));
- assert_eq_m128i(r, _mm_set1_epi64x(0xFFFFFFFF0));
+ let a = _mm_set_epi64x(0xFFFFFFFF, -0xFFFFFFFF);
+ let r = _mm_slli_epi64::<4>(a);
+ assert_eq_m128i(r, _mm_set_epi64x(0xFFFFFFFF0, -0xFFFFFFFF0));
+ let r = _mm_slli_epi64::<64>(a);
+ assert_eq_m128i(r, _mm_set1_epi64x(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_sll_epi64() {
- let a = _mm_set1_epi64x(0xFFFFFFFF);
- let b = _mm_setr_epi64x(4, 0);
- let r = _mm_sll_epi64(a, b);
- assert_eq_m128i(r, _mm_set1_epi64x(0xFFFFFFFF0));
+ let a = _mm_set_epi64x(0xFFFFFFFF, -0xFFFFFFFF);
+ let r = _mm_sll_epi64(a, _mm_set_epi64x(0, 4));
+ assert_eq_m128i(r, _mm_set_epi64x(0xFFFFFFFF0, -0xFFFFFFFF0));
+ let r = _mm_sll_epi64(a, _mm_set_epi64x(4, 0));
+ assert_eq_m128i(r, a);
+ let r = _mm_sll_epi64(a, _mm_set_epi64x(0, 64));
+ assert_eq_m128i(r, _mm_set1_epi64x(0));
+ let r = _mm_sll_epi64(a, _mm_set_epi64x(0, i64::MAX));
+ assert_eq_m128i(r, _mm_set1_epi64x(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_srai_epi16() {
- let r = _mm_srai_epi16::<1>(_mm_set1_epi16(-1));
- assert_eq_m128i(r, _mm_set1_epi16(-1));
+ let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF);
+ let r = _mm_srai_epi16::<4>(a);
+ assert_eq_m128i(
+ r,
+ _mm_setr_epi16(0xC, -0xD, 0xD, -0xE, 0xE, -0xF, 0xF, -0x10),
+ );
+ let r = _mm_srai_epi16::<16>(a);
+ assert_eq_m128i(r, _mm_setr_epi16(0, -1, 0, -1, 0, -1, 0, -1));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_sra_epi16() {
- let a = _mm_set1_epi16(-1);
- let b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
- let r = _mm_sra_epi16(a, b);
- assert_eq_m128i(r, _mm_set1_epi16(-1));
+ let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF);
+ let r = _mm_sra_epi16(a, _mm_set_epi64x(0, 4));
+ assert_eq_m128i(
+ r,
+ _mm_setr_epi16(0xC, -0xD, 0xD, -0xE, 0xE, -0xF, 0xF, -0x10),
+ );
+ let r = _mm_sra_epi16(a, _mm_set_epi64x(4, 0));
+ assert_eq_m128i(r, a);
+ let r = _mm_sra_epi16(a, _mm_set_epi64x(0, 16));
+ assert_eq_m128i(r, _mm_setr_epi16(0, -1, 0, -1, 0, -1, 0, -1));
+ let r = _mm_sra_epi16(a, _mm_set_epi64x(0, i64::MAX));
+ assert_eq_m128i(r, _mm_setr_epi16(0, -1, 0, -1, 0, -1, 0, -1));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_srai_epi32() {
- let r = _mm_srai_epi32::<1>(_mm_set1_epi32(-1));
- assert_eq_m128i(r, _mm_set1_epi32(-1));
+ let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF);
+ let r = _mm_srai_epi32::<4>(a);
+ assert_eq_m128i(r, _mm_setr_epi32(0xEEE, -0xEEF, 0xFFF, -0x1000));
+ let r = _mm_srai_epi32::<32>(a);
+ assert_eq_m128i(r, _mm_setr_epi32(0, -1, 0, -1));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_sra_epi32() {
- let a = _mm_set1_epi32(-1);
- let b = _mm_setr_epi32(1, 0, 0, 0);
- let r = _mm_sra_epi32(a, b);
- assert_eq_m128i(r, _mm_set1_epi32(-1));
+ let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF);
+ let r = _mm_sra_epi32(a, _mm_set_epi64x(0, 4));
+ assert_eq_m128i(r, _mm_setr_epi32(0xEEE, -0xEEF, 0xFFF, -0x1000));
+ let r = _mm_sra_epi32(a, _mm_set_epi64x(4, 0));
+ assert_eq_m128i(r, a);
+ let r = _mm_sra_epi32(a, _mm_set_epi64x(0, 32));
+ assert_eq_m128i(r, _mm_setr_epi32(0, -1, 0, -1));
+ let r = _mm_sra_epi32(a, _mm_set_epi64x(0, i64::MAX));
+ assert_eq_m128i(r, _mm_setr_epi32(0, -1, 0, -1));
}
#[simd_test(enable = "sse2")]
@@ -3453,53 +3501,74 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_srli_epi16() {
- #[rustfmt::skip]
- let a = _mm_setr_epi16(
- 0xFFFF as u16 as i16, 0x0FFF, 0x00FF, 0x000F, 0, 0, 0, 0,
- );
+ let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF);
let r = _mm_srli_epi16::<4>(a);
- #[rustfmt::skip]
- let e = _mm_setr_epi16(
- 0xFFF as u16 as i16, 0xFF as u16 as i16, 0xF, 0, 0, 0, 0, 0,
+ assert_eq_m128i(
+ r,
+ _mm_setr_epi16(0xC, 0xFF3, 0xD, 0xFF2, 0xE, 0xFF1, 0xF, 0xFF0),
);
- assert_eq_m128i(r, e);
+ let r = _mm_srli_epi16::<16>(a);
+ assert_eq_m128i(r, _mm_set1_epi16(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_srl_epi16() {
- let a = _mm_setr_epi16(0xFF, 0, 0, 0, 0, 0, 0, 0);
- let r = _mm_srl_epi16(a, _mm_setr_epi16(4, 0, 0, 0, 0, 0, 0, 0));
- assert_eq_m128i(r, _mm_setr_epi16(0xF, 0, 0, 0, 0, 0, 0, 0));
- let r = _mm_srl_epi16(a, _mm_setr_epi16(0, 0, 0, 0, 4, 0, 0, 0));
- assert_eq_m128i(r, _mm_setr_epi16(0xFF, 0, 0, 0, 0, 0, 0, 0));
+ let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF);
+ let r = _mm_srl_epi16(a, _mm_set_epi64x(0, 4));
+ assert_eq_m128i(
+ r,
+ _mm_setr_epi16(0xC, 0xFF3, 0xD, 0xFF2, 0xE, 0xFF1, 0xF, 0xFF0),
+ );
+ let r = _mm_srl_epi16(a, _mm_set_epi64x(4, 0));
+ assert_eq_m128i(r, a);
+ let r = _mm_srl_epi16(a, _mm_set_epi64x(0, 16));
+ assert_eq_m128i(r, _mm_set1_epi16(0));
+ let r = _mm_srl_epi16(a, _mm_set_epi64x(0, i64::MAX));
+ assert_eq_m128i(r, _mm_set1_epi16(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_srli_epi32() {
- let r = _mm_srli_epi32::<4>(_mm_set1_epi32(0xFFFF));
- assert_eq_m128i(r, _mm_set1_epi32(0xFFF));
+ let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF);
+ let r = _mm_srli_epi32::<4>(a);
+ assert_eq_m128i(r, _mm_setr_epi32(0xEEE, 0xFFFF111, 0xFFF, 0xFFFF000));
+ let r = _mm_srli_epi32::<32>(a);
+ assert_eq_m128i(r, _mm_set1_epi32(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_srl_epi32() {
- let a = _mm_set1_epi32(0xFFFF);
- let b = _mm_setr_epi32(4, 0, 0, 0);
- let r = _mm_srl_epi32(a, b);
- assert_eq_m128i(r, _mm_set1_epi32(0xFFF));
+ let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF);
+ let r = _mm_srl_epi32(a, _mm_set_epi64x(0, 4));
+ assert_eq_m128i(r, _mm_setr_epi32(0xEEE, 0xFFFF111, 0xFFF, 0xFFFF000));
+ let r = _mm_srl_epi32(a, _mm_set_epi64x(4, 0));
+ assert_eq_m128i(r, a);
+ let r = _mm_srl_epi32(a, _mm_set_epi64x(0, 32));
+ assert_eq_m128i(r, _mm_set1_epi32(0));
+ let r = _mm_srl_epi32(a, _mm_set_epi64x(0, i64::MAX));
+ assert_eq_m128i(r, _mm_set1_epi32(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_srli_epi64() {
- let r = _mm_srli_epi64::<4>(_mm_set1_epi64x(0xFFFFFFFF));
- assert_eq_m128i(r, _mm_set1_epi64x(0xFFFFFFF));
+ let a = _mm_set_epi64x(0xFFFFFFFF, -0xFFFFFFFF);
+ let r = _mm_srli_epi64::<4>(a);
+ assert_eq_m128i(r, _mm_set_epi64x(0xFFFFFFF, 0xFFFFFFFF0000000));
+ let r = _mm_srli_epi64::<64>(a);
+ assert_eq_m128i(r, _mm_set1_epi64x(0));
}
#[simd_test(enable = "sse2")]
unsafe fn test_mm_srl_epi64() {
- let a = _mm_set1_epi64x(0xFFFFFFFF);
- let b = _mm_setr_epi64x(4, 0);
- let r = _mm_srl_epi64(a, b);
- assert_eq_m128i(r, _mm_set1_epi64x(0xFFFFFFF));
+ let a = _mm_set_epi64x(0xFFFFFFFF, -0xFFFFFFFF);
+ let r = _mm_srl_epi64(a, _mm_set_epi64x(0, 4));
+ assert_eq_m128i(r, _mm_set_epi64x(0xFFFFFFF, 0xFFFFFFFF0000000));
+ let r = _mm_srl_epi64(a, _mm_set_epi64x(4, 0));
+ assert_eq_m128i(r, a);
+ let r = _mm_srl_epi64(a, _mm_set_epi64x(0, 64));
+ assert_eq_m128i(r, _mm_set1_epi64x(0));
+ let r = _mm_srl_epi64(a, _mm_set_epi64x(0, i64::MAX));
+ assert_eq_m128i(r, _mm_set1_epi64x(0));
}
#[simd_test(enable = "sse2")]
@@ -3766,6 +3835,9 @@ mod tests {
}
#[simd_test(enable = "sse2")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ // (non-temporal store)
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_maskmoveu_si128() {
let a = _mm_set1_epi8(9);
#[rustfmt::skip]
@@ -3804,6 +3876,9 @@ mod tests {
}
#[simd_test(enable = "sse2")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ // (non-temporal store)
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_stream_si128() {
let a = _mm_setr_epi32(1, 2, 3, 4);
let mut r = _mm_undefined_si128();
@@ -3812,6 +3887,9 @@ mod tests {
}
#[simd_test(enable = "sse2")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ // (non-temporal store)
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_stream_si32() {
let a: i32 = 7;
let mut mem = boxed::Box::<i32>::new(-1);
@@ -4055,6 +4133,17 @@ mod tests {
let b = _mm_setr_pd(5.0, 10.0);
let r = _mm_max_pd(a, b);
assert_eq_m128d(r, _mm_setr_pd(5.0, 10.0));
+
+ // Check SSE(2)-specific semantics for -0.0 handling.
+ let a = _mm_setr_pd(-0.0, 0.0);
+ let b = _mm_setr_pd(0.0, 0.0);
+ let r1: [u8; 16] = transmute(_mm_max_pd(a, b));
+ let r2: [u8; 16] = transmute(_mm_max_pd(b, a));
+ let a: [u8; 16] = transmute(a);
+ let b: [u8; 16] = transmute(b);
+ assert_eq!(r1, b);
+ assert_eq!(r2, a);
+ assert_ne!(a, b); // sanity check that -0.0 is actually present
}
#[simd_test(enable = "sse2")]
@@ -4071,6 +4160,17 @@ mod tests {
let b = _mm_setr_pd(5.0, 10.0);
let r = _mm_min_pd(a, b);
assert_eq_m128d(r, _mm_setr_pd(1.0, 2.0));
+
+ // Check SSE(2)-specific semantics for -0.0 handling.
+ let a = _mm_setr_pd(-0.0, 0.0);
+ let b = _mm_setr_pd(0.0, 0.0);
+ let r1: [u8; 16] = transmute(_mm_min_pd(a, b));
+ let r2: [u8; 16] = transmute(_mm_min_pd(b, a));
+ let a: [u8; 16] = transmute(a);
+ let b: [u8; 16] = transmute(b);
+ assert_eq!(r1, b);
+ assert_eq!(r2, a);
+ assert_ne!(a, b); // sanity check that -0.0 is actually present
}
#[simd_test(enable = "sse2")]
@@ -4158,7 +4258,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpeq_sd() {
let (a, b) = (_mm_setr_pd(1.0, 2.0), _mm_setr_pd(1.0, 3.0));
- let e = _mm_setr_epi64x(!0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(!0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpeq_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4166,7 +4266,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmplt_sd() {
let (a, b) = (_mm_setr_pd(1.0, 2.0), _mm_setr_pd(5.0, 3.0));
- let e = _mm_setr_epi64x(!0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(!0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmplt_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4174,7 +4274,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmple_sd() {
let (a, b) = (_mm_setr_pd(1.0, 2.0), _mm_setr_pd(1.0, 3.0));
- let e = _mm_setr_epi64x(!0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(!0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmple_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4182,7 +4282,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpgt_sd() {
let (a, b) = (_mm_setr_pd(5.0, 2.0), _mm_setr_pd(1.0, 3.0));
- let e = _mm_setr_epi64x(!0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(!0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpgt_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4190,7 +4290,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpge_sd() {
let (a, b) = (_mm_setr_pd(1.0, 2.0), _mm_setr_pd(1.0, 3.0));
- let e = _mm_setr_epi64x(!0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(!0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpge_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4198,7 +4298,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpord_sd() {
let (a, b) = (_mm_setr_pd(NAN, 2.0), _mm_setr_pd(5.0, 3.0));
- let e = _mm_setr_epi64x(0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpord_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4206,7 +4306,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpunord_sd() {
let (a, b) = (_mm_setr_pd(NAN, 2.0), _mm_setr_pd(5.0, 3.0));
- let e = _mm_setr_epi64x(!0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(!0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpunord_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4214,7 +4314,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpneq_sd() {
let (a, b) = (_mm_setr_pd(1.0, 2.0), _mm_setr_pd(5.0, 3.0));
- let e = _mm_setr_epi64x(!0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(!0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpneq_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4222,7 +4322,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpnlt_sd() {
let (a, b) = (_mm_setr_pd(1.0, 2.0), _mm_setr_pd(5.0, 3.0));
- let e = _mm_setr_epi64x(0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpnlt_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4230,7 +4330,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpnle_sd() {
let (a, b) = (_mm_setr_pd(1.0, 2.0), _mm_setr_pd(1.0, 3.0));
- let e = _mm_setr_epi64x(0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpnle_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4238,7 +4338,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpngt_sd() {
let (a, b) = (_mm_setr_pd(5.0, 2.0), _mm_setr_pd(1.0, 3.0));
- let e = _mm_setr_epi64x(0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpngt_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4246,7 +4346,7 @@ mod tests {
#[simd_test(enable = "sse2")]
unsafe fn test_mm_cmpnge_sd() {
let (a, b) = (_mm_setr_pd(1.0, 2.0), _mm_setr_pd(1.0, 3.0));
- let e = _mm_setr_epi64x(0, transmute(2.0f64));
+ let e = _mm_setr_epi64x(0, 2.0f64.to_bits() as i64);
let r = transmute::<_, __m128i>(_mm_cmpnge_sd(a, b));
assert_eq_m128i(r, e);
}
@@ -4478,6 +4578,9 @@ mod tests {
}
#[simd_test(enable = "sse2")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ // (non-temporal store)
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_stream_pd() {
#[repr(align(128))]
struct Memory {
diff --git a/library/stdarch/crates/core_arch/src/x86/sse3.rs b/library/stdarch/crates/core_arch/src/x86/sse3.rs
index 092a8d9cd..df0d78e5b 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse3.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse3.rs
@@ -1,7 +1,7 @@
//! Streaming SIMD Extensions 3 (SSE3)
use crate::{
- core_arch::{simd::*, simd_llvm::simd_shuffle, x86::*},
+ core_arch::{simd::*, simd_llvm::*, x86::*},
mem::transmute,
};
@@ -17,7 +17,11 @@ use stdarch_test::assert_instr;
#[cfg_attr(test, assert_instr(addsubps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_addsub_ps(a: __m128, b: __m128) -> __m128 {
- addsubps(a, b)
+ let a = a.as_f32x4();
+ let b = b.as_f32x4();
+ let add = simd_add(a, b);
+ let sub = simd_sub(a, b);
+ simd_shuffle!(add, sub, [4, 1, 6, 3])
}
/// Alternatively add and subtract packed double-precision (64-bit)
@@ -29,7 +33,11 @@ pub unsafe fn _mm_addsub_ps(a: __m128, b: __m128) -> __m128 {
#[cfg_attr(test, assert_instr(addsubpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_addsub_pd(a: __m128d, b: __m128d) -> __m128d {
- addsubpd(a, b)
+ let a = a.as_f64x2();
+ let b = b.as_f64x2();
+ let add = simd_add(a, b);
+ let sub = simd_sub(a, b);
+ simd_shuffle!(add, sub, [2, 1])
}
/// Horizontally adds adjacent pairs of double-precision (64-bit)
@@ -143,10 +151,6 @@ pub unsafe fn _mm_moveldup_ps(a: __m128) -> __m128 {
#[allow(improper_ctypes)]
extern "C" {
- #[link_name = "llvm.x86.sse3.addsub.ps"]
- fn addsubps(a: __m128, b: __m128) -> __m128;
- #[link_name = "llvm.x86.sse3.addsub.pd"]
- fn addsubpd(a: __m128d, b: __m128d) -> __m128d;
#[link_name = "llvm.x86.sse3.hadd.pd"]
fn haddpd(a: __m128d, b: __m128d) -> __m128d;
#[link_name = "llvm.x86.sse3.hadd.ps"]
diff --git a/library/stdarch/crates/core_arch/src/x86/sse41.rs b/library/stdarch/crates/core_arch/src/x86/sse41.rs
index 7ba86e5f7..6d33238b0 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse41.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse41.rs
@@ -62,7 +62,8 @@ pub const _MM_FROUND_NEARBYINT: i32 = _MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTI
#[cfg_attr(test, assert_instr(pblendvb))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i {
- transmute(pblendvb(a.as_i8x16(), b.as_i8x16(), mask.as_i8x16()))
+ let mask: i8x16 = simd_lt(mask.as_i8x16(), i8x16::splat(0));
+ transmute(simd_select(mask, b.as_i8x16(), a.as_i8x16()))
}
/// Blend packed 16-bit integers from `a` and `b` using the mask `IMM8`.
@@ -74,15 +75,25 @@ pub unsafe fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_epi16)
#[inline]
#[target_feature(enable = "sse4.1")]
-// Note: LLVM7 prefers the single-precision floating-point domain when possible
-// see https://bugs.llvm.org/show_bug.cgi?id=38195
-// #[cfg_attr(test, assert_instr(pblendw, IMM8 = 0xF0))]
-#[cfg_attr(test, assert_instr(blendps, IMM8 = 0xF0))]
+#[cfg_attr(test, assert_instr(pblendw, IMM8 = 0xB1))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blend_epi16<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
static_assert_uimm_bits!(IMM8, 8);
- transmute(pblendw(a.as_i16x8(), b.as_i16x8(), IMM8 as u8))
+ transmute::<i16x8, _>(simd_shuffle!(
+ a.as_i16x8(),
+ b.as_i16x8(),
+ [
+ [0, 8][IMM8 as usize & 1],
+ [1, 9][(IMM8 >> 1) as usize & 1],
+ [2, 10][(IMM8 >> 2) as usize & 1],
+ [3, 11][(IMM8 >> 3) as usize & 1],
+ [4, 12][(IMM8 >> 4) as usize & 1],
+ [5, 13][(IMM8 >> 5) as usize & 1],
+ [6, 14][(IMM8 >> 6) as usize & 1],
+ [7, 15][(IMM8 >> 7) as usize & 1],
+ ]
+ ))
}
/// Blend packed double-precision (64-bit) floating-point elements from `a`
@@ -94,7 +105,8 @@ pub unsafe fn _mm_blend_epi16<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
#[cfg_attr(test, assert_instr(blendvpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blendv_pd(a: __m128d, b: __m128d, mask: __m128d) -> __m128d {
- blendvpd(a, b, mask)
+ let mask: i64x2 = simd_lt(transmute::<_, i64x2>(mask), i64x2::splat(0));
+ transmute(simd_select(mask, b.as_f64x2(), a.as_f64x2()))
}
/// Blend packed single-precision (32-bit) floating-point elements from `a`
@@ -106,7 +118,8 @@ pub unsafe fn _mm_blendv_pd(a: __m128d, b: __m128d, mask: __m128d) -> __m128d {
#[cfg_attr(test, assert_instr(blendvps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128 {
- blendvps(a, b, mask)
+ let mask: i32x4 = simd_lt(transmute::<_, i32x4>(mask), i32x4::splat(0));
+ transmute(simd_select(mask, b.as_f32x4(), a.as_f32x4()))
}
/// Blend packed double-precision (64-bit) floating-point elements from `a`
@@ -123,7 +136,11 @@ pub unsafe fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128 {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blend_pd<const IMM2: i32>(a: __m128d, b: __m128d) -> __m128d {
static_assert_uimm_bits!(IMM2, 2);
- blendpd(a, b, IMM2 as u8)
+ transmute::<f64x2, _>(simd_shuffle!(
+ a.as_f64x2(),
+ b.as_f64x2(),
+ [[0, 2][IMM2 as usize & 1], [1, 3][(IMM2 >> 1) as usize & 1]]
+ ))
}
/// Blend packed single-precision (32-bit) floating-point elements from `a`
@@ -137,7 +154,16 @@ pub unsafe fn _mm_blend_pd<const IMM2: i32>(a: __m128d, b: __m128d) -> __m128d {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blend_ps<const IMM4: i32>(a: __m128, b: __m128) -> __m128 {
static_assert_uimm_bits!(IMM4, 4);
- blendps(a, b, IMM4 as u8)
+ transmute::<f32x4, _>(simd_shuffle!(
+ a.as_f32x4(),
+ b.as_f32x4(),
+ [
+ [0, 4][IMM4 as usize & 1],
+ [1, 5][(IMM4 >> 1) as usize & 1],
+ [2, 6][(IMM4 >> 2) as usize & 1],
+ [3, 7][(IMM4 >> 3) as usize & 1],
+ ]
+ ))
}
/// Extracts a single-precision (32-bit) floating-point element from `a`,
@@ -175,7 +201,7 @@ pub unsafe fn _mm_blend_ps<const IMM4: i32>(a: __m128, b: __m128) -> __m128 {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_extract_ps<const IMM8: i32>(a: __m128) -> i32 {
static_assert_uimm_bits!(IMM8, 2);
- transmute(simd_extract::<_, f32>(a, IMM8 as u32))
+ simd_extract::<_, f32>(a, IMM8 as u32).to_bits() as i32
}
/// Extracts an 8-bit integer from `a`, selected with `IMM8`. Returns a 32-bit
@@ -923,7 +949,9 @@ pub unsafe fn _mm_minpos_epu16(a: __m128i) -> __m128i {
#[cfg_attr(test, assert_instr(pmuldq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_mul_epi32(a: __m128i, b: __m128i) -> __m128i {
- transmute(pmuldq(a.as_i32x4(), b.as_i32x4()))
+ let a = simd_cast::<_, i64x2>(simd_cast::<_, i32x2>(a.as_i64x2()));
+ let b = simd_cast::<_, i64x2>(simd_cast::<_, i32x2>(b.as_i64x2()));
+ transmute(simd_mul(a, b))
}
/// Multiplies the packed 32-bit integers in `a` and `b`, producing intermediate
@@ -1124,18 +1152,6 @@ pub unsafe fn _mm_test_mix_ones_zeros(a: __m128i, mask: __m128i) -> i32 {
#[allow(improper_ctypes)]
extern "C" {
- #[link_name = "llvm.x86.sse41.pblendvb"]
- fn pblendvb(a: i8x16, b: i8x16, mask: i8x16) -> i8x16;
- #[link_name = "llvm.x86.sse41.blendvpd"]
- fn blendvpd(a: __m128d, b: __m128d, mask: __m128d) -> __m128d;
- #[link_name = "llvm.x86.sse41.blendvps"]
- fn blendvps(a: __m128, b: __m128, mask: __m128) -> __m128;
- #[link_name = "llvm.x86.sse41.blendpd"]
- fn blendpd(a: __m128d, b: __m128d, imm2: u8) -> __m128d;
- #[link_name = "llvm.x86.sse41.blendps"]
- fn blendps(a: __m128, b: __m128, imm4: u8) -> __m128;
- #[link_name = "llvm.x86.sse41.pblendw"]
- fn pblendw(a: i16x8, b: i16x8, imm8: u8) -> i16x8;
#[link_name = "llvm.x86.sse41.insertps"]
fn insertps(a: __m128, b: __m128, imm8: u8) -> __m128;
#[link_name = "llvm.x86.sse41.packusdw"]
@@ -1154,8 +1170,6 @@ extern "C" {
fn roundss(a: __m128, b: __m128, rounding: i32) -> __m128;
#[link_name = "llvm.x86.sse41.phminposuw"]
fn phminposuw(a: u16x8) -> u16x8;
- #[link_name = "llvm.x86.sse41.pmuldq"]
- fn pmuldq(a: i32x4, b: i32x4) -> i64x2;
#[link_name = "llvm.x86.sse41.mpsadbw"]
fn mpsadbw(a: u8x16, b: u8x16, imm8: u8) -> u16x8;
#[link_name = "llvm.x86.sse41.ptestz"]
@@ -1245,9 +1259,9 @@ mod tests {
#[simd_test(enable = "sse4.1")]
unsafe fn test_mm_extract_ps() {
let a = _mm_setr_ps(0.0, 1.0, 2.0, 3.0);
- let r: f32 = transmute(_mm_extract_ps::<1>(a));
+ let r: f32 = f32::from_bits(_mm_extract_ps::<1>(a) as u32);
assert_eq!(r, 1.0);
- let r: f32 = transmute(_mm_extract_ps::<3>(a));
+ let r: f32 = f32::from_bits(_mm_extract_ps::<3>(a) as u32);
assert_eq!(r, 3.0);
}
@@ -1668,6 +1682,7 @@ mod tests {
assert_eq_m128(r, e);
}
+ #[allow(deprecated)] // FIXME: This test uses deprecated CSR access functions
#[simd_test(enable = "sse4.1")]
unsafe fn test_mm_round_sd() {
let a = _mm_setr_pd(1.5, 3.5);
@@ -1680,6 +1695,7 @@ mod tests {
assert_eq_m128d(r, e);
}
+ #[allow(deprecated)] // FIXME: This test uses deprecated CSR access functions
#[simd_test(enable = "sse4.1")]
unsafe fn test_mm_round_ss() {
let a = _mm_setr_ps(1.5, 3.5, 7.5, 15.5);
diff --git a/library/stdarch/crates/core_arch/src/x86/test.rs b/library/stdarch/crates/core_arch/src/x86/test.rs
index ec4298033..50b2d93be 100644
--- a/library/stdarch/crates/core_arch/src/x86/test.rs
+++ b/library/stdarch/crates/core_arch/src/x86/test.rs
@@ -3,11 +3,13 @@
use crate::core_arch::x86::*;
use std::mem::transmute;
+#[track_caller]
#[target_feature(enable = "sse2")]
pub unsafe fn assert_eq_m128i(a: __m128i, b: __m128i) {
assert_eq!(transmute::<_, [u64; 2]>(a), transmute::<_, [u64; 2]>(b))
}
+#[track_caller]
#[target_feature(enable = "sse2")]
pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
@@ -20,6 +22,7 @@ pub unsafe fn get_m128d(a: __m128d, idx: usize) -> f64 {
transmute::<_, [f64; 2]>(a)[idx]
}
+#[track_caller]
#[target_feature(enable = "sse")]
pub unsafe fn assert_eq_m128(a: __m128, b: __m128) {
let r = _mm_cmpeq_ps(a, b);
@@ -40,11 +43,13 @@ pub unsafe fn _mm_setr_epi64x(a: i64, b: i64) -> __m128i {
_mm_set_epi64x(b, a)
}
+#[track_caller]
#[target_feature(enable = "avx")]
pub unsafe fn assert_eq_m256i(a: __m256i, b: __m256i) {
assert_eq!(transmute::<_, [u64; 4]>(a), transmute::<_, [u64; 4]>(b))
}
+#[track_caller]
#[target_feature(enable = "avx")]
pub unsafe fn assert_eq_m256d(a: __m256d, b: __m256d) {
let cmp = _mm256_cmp_pd::<_CMP_EQ_OQ>(a, b);
@@ -58,6 +63,7 @@ pub unsafe fn get_m256d(a: __m256d, idx: usize) -> f64 {
transmute::<_, [f64; 4]>(a)[idx]
}
+#[track_caller]
#[target_feature(enable = "avx")]
pub unsafe fn assert_eq_m256(a: __m256, b: __m256) {
let cmp = _mm256_cmp_ps::<_CMP_EQ_OQ>(a, b);
@@ -125,10 +131,12 @@ mod x86_polyfill {
}
pub use self::x86_polyfill::*;
+#[track_caller]
pub unsafe fn assert_eq_m512i(a: __m512i, b: __m512i) {
assert_eq!(transmute::<_, [i32; 16]>(a), transmute::<_, [i32; 16]>(b))
}
+#[track_caller]
pub unsafe fn assert_eq_m512(a: __m512, b: __m512) {
let cmp = _mm512_cmp_ps_mask::<_CMP_EQ_OQ>(a, b);
if cmp != 0b11111111_11111111 {
@@ -136,6 +144,7 @@ pub unsafe fn assert_eq_m512(a: __m512, b: __m512) {
}
}
+#[track_caller]
pub unsafe fn assert_eq_m512d(a: __m512d, b: __m512d) {
let cmp = _mm512_cmp_pd_mask::<_CMP_EQ_OQ>(a, b);
if cmp != 0b11111111 {
diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs b/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
index 68f332767..bace11d13 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
@@ -33,7 +33,7 @@ pub unsafe fn _mm_cvtss_i64(a: __m128) -> i64 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi))]
pub unsafe fn _mm_cvtss_u64(a: __m128) -> u64 {
- transmute(vcvtss2usi64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION))
+ vcvtss2usi64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 64-bit integer, and store the result in dst.
@@ -43,7 +43,7 @@ pub unsafe fn _mm_cvtss_u64(a: __m128) -> u64 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi))]
pub unsafe fn _mm_cvtsd_u64(a: __m128d) -> u64 {
- transmute(vcvtsd2usi64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION))
+ vcvtsd2usi64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the signed 64-bit integer b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -54,8 +54,7 @@ pub unsafe fn _mm_cvtsd_u64(a: __m128d) -> u64 {
#[cfg_attr(test, assert_instr(vcvtsi2ss))]
pub unsafe fn _mm_cvti64_ss(a: __m128, b: i64) -> __m128 {
let b = b as f32;
- let r = simd_insert(a, 0, b);
- transmute(r)
+ simd_insert(a, 0, b)
}
/// Convert the signed 64-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
@@ -66,8 +65,7 @@ pub unsafe fn _mm_cvti64_ss(a: __m128, b: i64) -> __m128 {
#[cfg_attr(test, assert_instr(vcvtsi2sd))]
pub unsafe fn _mm_cvti64_sd(a: __m128d, b: i64) -> __m128d {
let b = b as f64;
- let r = simd_insert(a, 0, b);
- transmute(r)
+ simd_insert(a, 0, b)
}
/// Convert the unsigned 64-bit integer b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
@@ -78,8 +76,7 @@ pub unsafe fn _mm_cvti64_sd(a: __m128d, b: i64) -> __m128d {
#[cfg_attr(test, assert_instr(vcvtusi2ss))]
pub unsafe fn _mm_cvtu64_ss(a: __m128, b: u64) -> __m128 {
let b = b as f32;
- let r = simd_insert(a, 0, b);
- transmute(r)
+ simd_insert(a, 0, b)
}
/// Convert the unsigned 64-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
@@ -90,8 +87,7 @@ pub unsafe fn _mm_cvtu64_ss(a: __m128, b: u64) -> __m128 {
#[cfg_attr(test, assert_instr(vcvtusi2sd))]
pub unsafe fn _mm_cvtu64_sd(a: __m128d, b: u64) -> __m128d {
let b = b as f64;
- let r = simd_insert(a, 0, b);
- transmute(r)
+ simd_insert(a, 0, b)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.
@@ -101,7 +97,7 @@ pub unsafe fn _mm_cvtu64_sd(a: __m128d, b: u64) -> __m128d {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si))]
pub unsafe fn _mm_cvttsd_i64(a: __m128d) -> i64 {
- transmute(vcvtsd2si64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION))
+ vcvtsd2si64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 64-bit integer with truncation, and store the result in dst.
@@ -111,7 +107,7 @@ pub unsafe fn _mm_cvttsd_i64(a: __m128d) -> i64 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi))]
pub unsafe fn _mm_cvttsd_u64(a: __m128d) -> u64 {
- transmute(vcvtsd2usi64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION))
+ vcvtsd2usi64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.
@@ -121,7 +117,7 @@ pub unsafe fn _mm_cvttsd_u64(a: __m128d) -> u64 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si))]
pub unsafe fn _mm_cvttss_i64(a: __m128) -> i64 {
- transmute(vcvtss2si64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION))
+ vcvtss2si64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 64-bit integer with truncation, and store the result in dst.
@@ -131,7 +127,7 @@ pub unsafe fn _mm_cvttss_i64(a: __m128) -> i64 {
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi))]
pub unsafe fn _mm_cvttss_u64(a: __m128) -> u64 {
- transmute(vcvtss2usi64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION))
+ vcvtss2usi64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION)
}
/// Convert the signed 64-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
@@ -270,8 +266,7 @@ pub unsafe fn _mm_cvt_roundu64_ss<const ROUNDING: i32>(a: __m128, b: u64) -> __m
pub unsafe fn _mm_cvt_roundsd_si64<const ROUNDING: i32>(a: __m128d) -> i64 {
static_assert_rounding!(ROUNDING);
let a = a.as_f64x2();
- let r = vcvtsd2si64(a, ROUNDING);
- transmute(r)
+ vcvtsd2si64(a, ROUNDING)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 64-bit integer, and store the result in dst.\
@@ -290,8 +285,7 @@ pub unsafe fn _mm_cvt_roundsd_si64<const ROUNDING: i32>(a: __m128d) -> i64 {
pub unsafe fn _mm_cvt_roundsd_i64<const ROUNDING: i32>(a: __m128d) -> i64 {
static_assert_rounding!(ROUNDING);
let a = a.as_f64x2();
- let r = vcvtsd2si64(a, ROUNDING);
- transmute(r)
+ vcvtsd2si64(a, ROUNDING)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 64-bit integer, and store the result in dst.\
@@ -310,8 +304,7 @@ pub unsafe fn _mm_cvt_roundsd_i64<const ROUNDING: i32>(a: __m128d) -> i64 {
pub unsafe fn _mm_cvt_roundsd_u64<const ROUNDING: i32>(a: __m128d) -> u64 {
static_assert_rounding!(ROUNDING);
let a = a.as_f64x2();
- let r = vcvtsd2usi64(a, ROUNDING);
- transmute(r)
+ vcvtsd2usi64(a, ROUNDING)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer, and store the result in dst.\
@@ -330,8 +323,7 @@ pub unsafe fn _mm_cvt_roundsd_u64<const ROUNDING: i32>(a: __m128d) -> u64 {
pub unsafe fn _mm_cvt_roundss_si64<const ROUNDING: i32>(a: __m128) -> i64 {
static_assert_rounding!(ROUNDING);
let a = a.as_f32x4();
- let r = vcvtss2si64(a, ROUNDING);
- transmute(r)
+ vcvtss2si64(a, ROUNDING)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer, and store the result in dst.\
@@ -350,8 +342,7 @@ pub unsafe fn _mm_cvt_roundss_si64<const ROUNDING: i32>(a: __m128) -> i64 {
pub unsafe fn _mm_cvt_roundss_i64<const ROUNDING: i32>(a: __m128) -> i64 {
static_assert_rounding!(ROUNDING);
let a = a.as_f32x4();
- let r = vcvtss2si64(a, ROUNDING);
- transmute(r)
+ vcvtss2si64(a, ROUNDING)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 64-bit integer, and store the result in dst.\
@@ -370,8 +361,7 @@ pub unsafe fn _mm_cvt_roundss_i64<const ROUNDING: i32>(a: __m128) -> i64 {
pub unsafe fn _mm_cvt_roundss_u64<const ROUNDING: i32>(a: __m128) -> u64 {
static_assert_rounding!(ROUNDING);
let a = a.as_f32x4();
- let r = vcvtss2usi64(a, ROUNDING);
- transmute(r)
+ vcvtss2usi64(a, ROUNDING)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.\
@@ -385,8 +375,7 @@ pub unsafe fn _mm_cvt_roundss_u64<const ROUNDING: i32>(a: __m128) -> u64 {
pub unsafe fn _mm_cvtt_roundsd_si64<const SAE: i32>(a: __m128d) -> i64 {
static_assert_sae!(SAE);
let a = a.as_f64x2();
- let r = vcvtsd2si64(a, SAE);
- transmute(r)
+ vcvtsd2si64(a, SAE)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.\
@@ -400,8 +389,7 @@ pub unsafe fn _mm_cvtt_roundsd_si64<const SAE: i32>(a: __m128d) -> i64 {
pub unsafe fn _mm_cvtt_roundsd_i64<const SAE: i32>(a: __m128d) -> i64 {
static_assert_sae!(SAE);
let a = a.as_f64x2();
- let r = vcvtsd2si64(a, SAE);
- transmute(r)
+ vcvtsd2si64(a, SAE)
}
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 64-bit integer with truncation, and store the result in dst.\
@@ -415,8 +403,7 @@ pub unsafe fn _mm_cvtt_roundsd_i64<const SAE: i32>(a: __m128d) -> i64 {
pub unsafe fn _mm_cvtt_roundsd_u64<const SAE: i32>(a: __m128d) -> u64 {
static_assert_sae!(SAE);
let a = a.as_f64x2();
- let r = vcvtsd2usi64(a, SAE);
- transmute(r)
+ vcvtsd2usi64(a, SAE)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.\
@@ -430,8 +417,7 @@ pub unsafe fn _mm_cvtt_roundsd_u64<const SAE: i32>(a: __m128d) -> u64 {
pub unsafe fn _mm_cvtt_roundss_i64<const SAE: i32>(a: __m128) -> i64 {
static_assert_sae!(SAE);
let a = a.as_f32x4();
- let r = vcvtss2si64(a, SAE);
- transmute(r)
+ vcvtss2si64(a, SAE)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.\
@@ -445,8 +431,7 @@ pub unsafe fn _mm_cvtt_roundss_i64<const SAE: i32>(a: __m128) -> i64 {
pub unsafe fn _mm_cvtt_roundss_si64<const SAE: i32>(a: __m128) -> i64 {
static_assert_sae!(SAE);
let a = a.as_f32x4();
- let r = vcvtss2si64(a, SAE);
- transmute(r)
+ vcvtss2si64(a, SAE)
}
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 64-bit integer with truncation, and store the result in dst.\
@@ -460,8 +445,7 @@ pub unsafe fn _mm_cvtt_roundss_si64<const SAE: i32>(a: __m128) -> i64 {
pub unsafe fn _mm_cvtt_roundss_u64<const SAE: i32>(a: __m128) -> u64 {
static_assert_sae!(SAE);
let a = a.as_f32x4();
- let r = vcvtss2usi64(a, SAE);
- transmute(r)
+ vcvtss2usi64(a, SAE)
}
#[allow(improper_ctypes)]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/sse2.rs b/library/stdarch/crates/core_arch/src/x86_64/sse2.rs
index bf2394eba..9619cb748 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/sse2.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/sse2.rs
@@ -181,6 +181,9 @@ mod tests {
}
#[simd_test(enable = "sse2")]
+ // Miri cannot support this until it is clear how it fits in the Rust memory model
+ // (non-temporal store)
+ #[cfg_attr(miri, ignore)]
unsafe fn test_mm_stream_si64() {
let a: i64 = 7;
let mut mem = boxed::Box::<i64>::new(-1);
diff --git a/library/stdarch/crates/intrinsic-test/Cargo.toml b/library/stdarch/crates/intrinsic-test/Cargo.toml
index d977dd659..c7a18f77f 100644
--- a/library/stdarch/crates/intrinsic-test/Cargo.toml
+++ b/library/stdarch/crates/intrinsic-test/Cargo.toml
@@ -12,10 +12,10 @@ lazy_static = "1.4.0"
serde = { version = "1", features = ["derive"] }
serde_json = "1.0"
csv = "1.1"
-clap = "2.33.3"
+clap = { version = "4.4", features = ["derive"] }
regex = "1.4.2"
log = "0.4.11"
-pretty_env_logger = "0.4.0"
+pretty_env_logger = "0.5.0"
rayon = "1.5.0"
diff = "0.1.12"
-itertools = "0.10.1"
+itertools = "0.11.0"
diff --git a/library/stdarch/crates/intrinsic-test/README.md b/library/stdarch/crates/intrinsic-test/README.md
index 8a8ddab40..2b3f0c75a 100644
--- a/library/stdarch/crates/intrinsic-test/README.md
+++ b/library/stdarch/crates/intrinsic-test/README.md
@@ -4,15 +4,17 @@ each produces the same result from random inputs.
# Usage
```
USAGE:
- intrinsic-test [OPTIONS] <INPUT>
+ intrinsic-test [FLAGS] [OPTIONS] <INPUT>
FLAGS:
+ --a32 Run tests for A32 instrinsics instead of A64
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
--cppcompiler <CPPCOMPILER> The C++ compiler to use for compiling the c++ code [default: clang++]
--runner <RUNNER> Run the C programs under emulation with this command
+ --skip <SKIP> Filename for a list of intrinsics to skip (one per line)
--toolchain <TOOLCHAIN> The rust toolchain to use for building the rust code
ARGS:
diff --git a/library/stdarch/crates/intrinsic-test/src/json_parser.rs b/library/stdarch/crates/intrinsic-test/src/json_parser.rs
index bc6fa4a9e..8b3c7869c 100644
--- a/library/stdarch/crates/intrinsic-test/src/json_parser.rs
+++ b/library/stdarch/crates/intrinsic-test/src/json_parser.rs
@@ -1,4 +1,5 @@
use std::collections::HashMap;
+use std::path::Path;
use serde::Deserialize;
@@ -41,7 +42,7 @@ struct JsonIntrinsic {
architectures: Vec<String>,
}
-pub fn get_neon_intrinsics(filename: &str) -> Result<Vec<Intrinsic>, Box<dyn std::error::Error>> {
+pub fn get_neon_intrinsics(filename: &Path) -> Result<Vec<Intrinsic>, Box<dyn std::error::Error>> {
let file = std::fs::File::open(filename)?;
let reader = std::io::BufReader::new(file);
let json: Vec<JsonIntrinsic> = serde_json::from_reader(reader).expect("Couldn't parse JSON");
diff --git a/library/stdarch/crates/intrinsic-test/src/main.rs b/library/stdarch/crates/intrinsic-test/src/main.rs
index 76d2da3ab..15bc021c7 100644
--- a/library/stdarch/crates/intrinsic-test/src/main.rs
+++ b/library/stdarch/crates/intrinsic-test/src/main.rs
@@ -4,9 +4,9 @@ extern crate log;
use std::fs::File;
use std::io::Write;
+use std::path::PathBuf;
use std::process::Command;
-use clap::{App, Arg};
use intrinsic::Intrinsic;
use itertools::Itertools;
use rayon::prelude::*;
@@ -320,58 +320,47 @@ path = "{intrinsic}/main.rs""#,
}
}
+/// Intrinsic test tool
+#[derive(clap::Parser)]
+#[command(
+ name = "Intrinsic test tool",
+ about = "Generates Rust and C programs for intrinsics and compares the output"
+)]
+struct Cli {
+ /// The input file containing the intrinsics
+ input: PathBuf,
+
+ /// The rust toolchain to use for building the rust code
+ #[arg(long)]
+ toolchain: Option<String>,
+
+ /// The C++ compiler to use for compiling the c++ code
+ #[arg(long, default_value_t = String::from("clang++"))]
+ cppcompiler: String,
+
+ /// Run the C programs under emulation with this command
+ #[arg(long)]
+ runner: Option<String>,
+
+ /// Filename for a list of intrinsics to skip (one per line)
+ #[arg(long)]
+ skip: Option<PathBuf>,
+
+ /// Run tests for A32 instrinsics instead of A64
+ #[arg(long)]
+ a32: bool,
+}
+
fn main() {
pretty_env_logger::init();
- let matches = App::new("Intrinsic test tool")
- .about("Generates Rust and C programs for intrinsics and compares the output")
- .arg(
- Arg::with_name("INPUT")
- .help("The input file containing the intrinsics")
- .required(true)
- .index(1),
- )
- .arg(
- Arg::with_name("TOOLCHAIN")
- .takes_value(true)
- .long("toolchain")
- .help("The rust toolchain to use for building the rust code"),
- )
- .arg(
- Arg::with_name("CPPCOMPILER")
- .takes_value(true)
- .default_value("clang++")
- .long("cppcompiler")
- .help("The C++ compiler to use for compiling the c++ code"),
- )
- .arg(
- Arg::with_name("RUNNER")
- .takes_value(true)
- .long("runner")
- .help("Run the C programs under emulation with this command"),
- )
- .arg(
- Arg::with_name("SKIP")
- .takes_value(true)
- .long("skip")
- .help("Filename for a list of intrinsics to skip (one per line)"),
- )
- .arg(
- Arg::with_name("A32")
- .takes_value(false)
- .long("a32")
- .help("Run tests for A32 instrinsics instead of A64"),
- )
- .get_matches();
-
- let filename = matches.value_of("INPUT").unwrap();
- let toolchain = matches
- .value_of("TOOLCHAIN")
- .map_or("".into(), |t| format!("+{t}"));
+ let args: Cli = clap::Parser::parse();
- let cpp_compiler = matches.value_of("CPPCOMPILER").unwrap();
- let c_runner = matches.value_of("RUNNER").unwrap_or("");
- let skip = if let Some(filename) = matches.value_of("SKIP") {
+ let filename = args.input;
+ let toolchain = args.toolchain.map_or_else(String::new, |t| format!("+{t}"));
+ let cpp_compiler = args.cppcompiler;
+ let c_runner = args.runner.unwrap_or_else(String::new);
+ let skip = if let Some(filename) = args.skip {
let data = std::fs::read_to_string(&filename).expect("Failed to open file");
data.lines()
.map(str::trim)
@@ -381,8 +370,8 @@ fn main() {
} else {
Default::default()
};
- let a32 = matches.is_present("A32");
- let mut intrinsics = get_neon_intrinsics(filename).expect("Error parsing input file");
+ let a32 = args.a32;
+ let mut intrinsics = get_neon_intrinsics(&filename).expect("Error parsing input file");
intrinsics.sort_by(|a, b| a.name.cmp(&b.name));
@@ -409,7 +398,7 @@ fn main() {
let notices = build_notices("// ");
- if !build_c(&notices, &intrinsics, cpp_compiler, a32) {
+ if !build_c(&notices, &intrinsics, &cpp_compiler, a32) {
std::process::exit(2);
}
diff --git a/library/stdarch/crates/simd-test-macro/Cargo.toml b/library/stdarch/crates/simd-test-macro/Cargo.toml
index cd110c1d3..c9e692d8e 100644
--- a/library/stdarch/crates/simd-test-macro/Cargo.toml
+++ b/library/stdarch/crates/simd-test-macro/Cargo.toml
@@ -11,3 +11,4 @@ test = false
[dependencies]
proc-macro2 = "1.0"
quote = "1.0"
+syn = { version = "2.0", features = ["full"] }
diff --git a/library/stdarch/crates/simd-test-macro/src/lib.rs b/library/stdarch/crates/simd-test-macro/src/lib.rs
index 2a31dd745..9e089f86b 100644
--- a/library/stdarch/crates/simd-test-macro/src/lib.rs
+++ b/library/stdarch/crates/simd-test-macro/src/lib.rs
@@ -7,7 +7,7 @@
#[macro_use]
extern crate quote;
-use proc_macro2::{Delimiter, Ident, Literal, Span, TokenStream, TokenTree};
+use proc_macro2::{Ident, Literal, Span, TokenStream, TokenTree};
use quote::ToTokens;
use std::env;
@@ -44,13 +44,9 @@ pub fn simd_test(
.collect();
let enable_feature = string(enable_feature);
- let item = TokenStream::from(item);
- let name = find_name(item.clone());
-
- let name: TokenStream = name
- .to_string()
- .parse()
- .unwrap_or_else(|_| panic!("failed to parse name: {}", name.to_string()));
+ let mut item = syn::parse_macro_input!(item as syn::ItemFn);
+ let item_attrs = std::mem::take(&mut item.attrs);
+ let name = &item.sig.ident;
let target = env::var("TARGET").expect(
"TARGET environment variable should be set for rustc (e.g. TARGET=x86_64-apple-darwin cargo test)"
@@ -109,6 +105,7 @@ pub fn simd_test(
#[allow(non_snake_case)]
#[test]
#maybe_ignore
+ #(#item_attrs)*
fn #name() {
if #force_test | (#cfg_target_features) {
let v = unsafe { #name() };
@@ -123,29 +120,3 @@ pub fn simd_test(
};
ret.into()
}
-
-fn find_name(item: TokenStream) -> Ident {
- let mut tokens = item.into_iter();
- while let Some(tok) = tokens.next() {
- if let TokenTree::Ident(word) = tok {
- if word == "fn" {
- break;
- }
- }
- }
-
- fn get_ident(tt: TokenTree) -> Option<Ident> {
- match tt {
- TokenTree::Ident(i) => Some(i),
- TokenTree::Group(g) if g.delimiter() == Delimiter::None => {
- get_ident(g.stream().into_iter().next()?)
- }
- _ => None,
- }
- }
-
- tokens
- .next()
- .and_then(get_ident)
- .expect("failed to find function name")
-}
diff --git a/library/stdarch/crates/std_detect/Cargo.toml b/library/stdarch/crates/std_detect/Cargo.toml
index 589a3900a..12d4a658c 100644
--- a/library/stdarch/crates/std_detect/Cargo.toml
+++ b/library/stdarch/crates/std_detect/Cargo.toml
@@ -30,7 +30,6 @@ compiler_builtins = { version = "0.1.2", optional = true }
alloc = { version = "1.0.0", optional = true, package = "rustc-std-workspace-alloc" }
[dev-dependencies]
-auxv = "0.3.3"
cupid = "0.6.0"
[features]
diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
index 8bc0b30c3..ee46aa1ac 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
@@ -19,6 +19,7 @@ pub(crate) const AT_HWCAP2: usize = 26;
/// If an entry cannot be read all the bits in the bitfield are set to zero.
/// This should be interpreted as all the features being disabled.
#[derive(Debug, Copy, Clone)]
+#[cfg_attr(test, derive(PartialEq))]
pub(crate) struct AuxVec {
pub hwcap: usize,
#[cfg(any(
@@ -174,9 +175,12 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
/// Tries to read the `key` from the auxiliary vector by calling the
/// dynamically-linked `getauxval` function. If the function is not linked,
/// this function return `Err`.
-#[cfg(all(
- feature = "std_detect_dlsym_getauxval",
- not(all(target_os = "linux", target_env = "gnu"))
+#[cfg(any(
+ test,
+ all(
+ feature = "std_detect_dlsym_getauxval",
+ not(all(target_os = "linux", target_env = "gnu"))
+ )
))]
fn getauxval(key: usize) -> Result<usize, ()> {
use libc;
@@ -262,35 +266,8 @@ fn auxv_from_buf(buf: &[usize]) -> Result<AuxVec, ()> {
#[cfg(test)]
mod tests {
- extern crate auxv as auxv_crate;
use super::*;
- // Reads the Auxiliary Vector key from /proc/self/auxv
- // using the auxv crate.
- #[cfg(feature = "std_detect_file_io")]
- fn auxv_crate_getprocfs(key: usize) -> Option<usize> {
- use self::auxv_crate::procfs::search_procfs_auxv;
- use self::auxv_crate::AuxvType;
- let k = key as AuxvType;
- match search_procfs_auxv(&[k]) {
- Ok(v) => Some(v[&k] as usize),
- Err(_) => None,
- }
- }
-
- // Reads the Auxiliary Vector key from getauxval()
- // using the auxv crate.
- #[cfg(not(any(target_arch = "mips", target_arch = "mips64")))]
- fn auxv_crate_getauxval(key: usize) -> Option<usize> {
- use self::auxv_crate::getauxval::Getauxval;
- use self::auxv_crate::AuxvType;
- let q = auxv_crate::getauxval::NativeGetauxval {};
- match q.getauxval(key as AuxvType) {
- Ok(v) => Some(v as usize),
- Err(_) => None,
- }
- }
-
// FIXME: on mips/mips64 getauxval returns 0, and /proc/self/auxv
// does not always contain the AT_HWCAP key under qemu.
#[cfg(any(
@@ -301,7 +278,7 @@ mod tests {
#[test]
fn auxv_crate() {
let v = auxv();
- if let Some(hwcap) = auxv_crate_getauxval(AT_HWCAP) {
+ if let Ok(hwcap) = getauxval(AT_HWCAP) {
let rt_hwcap = v.expect("failed to find hwcap key").hwcap;
assert_eq!(rt_hwcap, hwcap);
}
@@ -314,7 +291,7 @@ mod tests {
target_arch = "powerpc64"
))]
{
- if let Some(hwcap2) = auxv_crate_getauxval(AT_HWCAP2) {
+ if let Ok(hwcap2) = getauxval(AT_HWCAP2) {
let rt_hwcap2 = v.expect("failed to find hwcap2 key").hwcap2;
assert_eq!(rt_hwcap2, hwcap2);
}
@@ -391,22 +368,8 @@ mod tests {
#[test]
#[cfg(feature = "std_detect_file_io")]
fn auxv_crate_procfs() {
- let v = auxv();
- if let Some(hwcap) = auxv_crate_getprocfs(AT_HWCAP) {
- assert_eq!(v.unwrap().hwcap, hwcap);
- }
-
- // Targets with AT_HWCAP and AT_HWCAP2:
- #[cfg(any(
- target_arch = "aarch64",
- target_arch = "arm",
- target_arch = "powerpc",
- target_arch = "powerpc64"
- ))]
- {
- if let Some(hwcap2) = auxv_crate_getprocfs(AT_HWCAP2) {
- assert_eq!(v.unwrap().hwcap2, hwcap2);
- }
+ if let Ok(procfs_auxv) = auxv_from_file("/proc/self/auxv") {
+ assert_eq!(auxv().unwrap(), procfs_auxv);
}
}
}
diff --git a/library/stdarch/crates/std_detect/src/detect/os/x86.rs b/library/stdarch/crates/std_detect/src/detect/os/x86.rs
index d8afc1aca..d8dd84db4 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/x86.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/x86.rs
@@ -49,11 +49,7 @@ pub(crate) fn detect_features() -> cache::Initializer {
ecx,
edx,
} = __cpuid(0);
- let vendor_id: [[u8; 4]; 3] = [
- mem::transmute(ebx),
- mem::transmute(edx),
- mem::transmute(ecx),
- ];
+ let vendor_id: [[u8; 4]; 3] = [ebx.to_ne_bytes(), edx.to_ne_bytes(), ecx.to_ne_bytes()];
let vendor_id: [u8; 12] = mem::transmute(vendor_id);
(max_basic_leaf, vendor_id)
};
diff --git a/library/stdarch/crates/stdarch-test/Cargo.toml b/library/stdarch/crates/stdarch-test/Cargo.toml
index 3a2130d4e..3682fcd7e 100644
--- a/library/stdarch/crates/stdarch-test/Cargo.toml
+++ b/library/stdarch/crates/stdarch-test/Cargo.toml
@@ -20,7 +20,7 @@ cc = "1.0"
# time, and we want to make updates to this explicit rather than automatically
# picking up updates which might break CI with new instruction names.
[target.'cfg(target_arch = "wasm32")'.dependencies]
-wasmprinter = "=0.2.53"
+wasmprinter = "=0.2.67"
[features]
default = []
diff --git a/library/stdarch/crates/stdarch-test/src/disassembly.rs b/library/stdarch/crates/stdarch-test/src/disassembly.rs
index 54df7261e..087fc46d4 100644
--- a/library/stdarch/crates/stdarch-test/src/disassembly.rs
+++ b/library/stdarch/crates/stdarch-test/src/disassembly.rs
@@ -81,6 +81,8 @@ pub(crate) fn disassemble_myself() -> HashSet<Function> {
let add_args = if cfg!(target_os = "macos") && cfg!(target_arch = "aarch64") {
// Target features need to be enabled for LLVM objdump on Macos ARM64
vec!["--mattr=+v8.6a,+crypto,+tme"]
+ } else if cfg!(target_arch = "riscv64") {
+ vec!["--mattr=+zk,+zks,+zbc,+zbb"]
} else {
vec![]
};
diff --git a/library/stdarch/crates/stdarch-verify/Cargo.toml b/library/stdarch/crates/stdarch-verify/Cargo.toml
index 10ae90074..515f05138 100644
--- a/library/stdarch/crates/stdarch-verify/Cargo.toml
+++ b/library/stdarch/crates/stdarch-verify/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
proc-macro2 = "1.0"
quote = "1.0"
-syn = { version = "1.0", features = ["full"] }
+syn = { version = "2.0", features = ["full"] }
[lib]
proc-macro = true
@@ -15,5 +15,5 @@ test = false
[dev-dependencies]
serde = { version = "1.0", features = ['derive'] }
-serde-xml-rs = "0.3"
+serde-xml-rs = "0.6"
serde_json = "1.0.96"
diff --git a/library/stdarch/crates/stdarch-verify/src/lib.rs b/library/stdarch/crates/stdarch-verify/src/lib.rs
index a9bf89f70..3f9eb3bf9 100644
--- a/library/stdarch/crates/stdarch-verify/src/lib.rs
+++ b/library/stdarch/crates/stdarch-verify/src/lib.rs
@@ -7,6 +7,7 @@ extern crate syn;
use proc_macro::TokenStream;
use std::{fs::File, io::Read, path::Path};
use syn::ext::IdentExt;
+use syn::parse::Parser as _;
#[proc_macro]
pub fn x86_functions(input: TokenStream) -> TokenStream {
@@ -416,7 +417,7 @@ fn walk(root: &Path, files: &mut Vec<(syn::File, String)>) {
fn find_instrs(attrs: &[syn::Attribute]) -> Vec<String> {
struct AssertInstr {
- instr: String,
+ instr: Option<String>,
}
// A small custom parser to parse out the instruction in `assert_instr`.
@@ -424,15 +425,21 @@ fn find_instrs(attrs: &[syn::Attribute]) -> Vec<String> {
// TODO: should probably just reuse `Invoc` from the `assert-instr-macro`
// crate.
impl syn::parse::Parse for AssertInstr {
- fn parse(content: syn::parse::ParseStream<'_>) -> syn::Result<Self> {
- let input;
- parenthesized!(input in content);
- let _ = input.parse::<syn::Meta>()?;
- let _ = input.parse::<Token![,]>()?;
- let ident = input.parse::<syn::Ident>()?;
- if ident != "assert_instr" {
- return Err(input.error("expected `assert_instr`"));
+ fn parse(input: syn::parse::ParseStream<'_>) -> syn::Result<Self> {
+ let _ = input.parse::<syn::Meta>().unwrap();
+ let _ = input.parse::<Token![,]>().unwrap();
+
+ match input.parse::<syn::Ident>() {
+ Ok(ident) if ident == "assert_instr" => {}
+ _ => {
+ while !input.is_empty() {
+ // consume everything
+ drop(input.parse::<proc_macro2::TokenStream>());
+ }
+ return Ok(Self { instr: None });
+ }
}
+
let instrs;
parenthesized!(instrs in input);
@@ -452,18 +459,24 @@ fn find_instrs(attrs: &[syn::Attribute]) -> Vec<String> {
return Err(input.error("failed to parse instruction"));
}
}
- Ok(Self { instr })
+ Ok(Self { instr: Some(instr) })
}
}
attrs
.iter()
- .filter(|a| a.path.is_ident("cfg_attr"))
.filter_map(|a| {
- syn::parse2::<AssertInstr>(a.tokens.clone())
- .ok()
- .map(|a| a.instr)
+ if let syn::Meta::List(ref l) = a.meta {
+ if l.path.is_ident("cfg_attr") {
+ Some(l)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
})
+ .filter_map(|l| syn::parse2::<AssertInstr>(l.tokens.clone()).unwrap().instr)
.collect()
}
@@ -471,19 +484,26 @@ fn find_target_feature(attrs: &[syn::Attribute]) -> Option<syn::Lit> {
attrs
.iter()
.flat_map(|a| {
- if let Ok(syn::Meta::List(i)) = a.parse_meta() {
- if i.path.is_ident("target_feature") {
- return i.nested;
+ if let syn::Meta::List(ref l) = a.meta {
+ if l.path.is_ident("target_feature") {
+ if let Ok(l) =
+ syn::punctuated::Punctuated::<syn::Meta, Token![,]>::parse_terminated
+ .parse2(l.tokens.clone())
+ {
+ return l;
+ }
}
}
syn::punctuated::Punctuated::new()
})
- .filter_map(|nested| match nested {
- syn::NestedMeta::Meta(m) => Some(m),
- syn::NestedMeta::Lit(_) => None,
- })
.find_map(|m| match m {
- syn::Meta::NameValue(ref i) if i.path.is_ident("enable") => Some(i.clone().lit),
+ syn::Meta::NameValue(i) if i.path.is_ident("enable") => {
+ if let syn::Expr::Lit(lit) = i.value {
+ Some(lit.lit)
+ } else {
+ None
+ }
+ }
_ => None,
})
}
@@ -491,9 +511,16 @@ fn find_target_feature(attrs: &[syn::Attribute]) -> Option<syn::Lit> {
fn find_required_const(name: &str, attrs: &[syn::Attribute]) -> Vec<usize> {
attrs
.iter()
- .flat_map(|a| {
- if a.path.segments[0].ident == name {
- syn::parse::<RustcArgsRequiredConst>(a.tokens.clone().into())
+ .filter_map(|a| {
+ if let syn::Meta::List(ref l) = a.meta {
+ Some(l)
+ } else {
+ None
+ }
+ })
+ .flat_map(|l| {
+ if l.path.segments[0].ident == name {
+ syn::parse2::<RustcArgsRequiredConst>(l.tokens.clone())
.unwrap()
.args
} else {
@@ -509,10 +536,7 @@ struct RustcArgsRequiredConst {
impl syn::parse::Parse for RustcArgsRequiredConst {
fn parse(input: syn::parse::ParseStream<'_>) -> syn::Result<Self> {
- let content;
- parenthesized!(content in input);
- let list =
- syn::punctuated::Punctuated::<syn::LitInt, Token![,]>::parse_terminated(&content)?;
+ let list = syn::punctuated::Punctuated::<syn::LitInt, Token![,]>::parse_terminated(&input)?;
Ok(Self {
args: list
.into_iter()
diff --git a/library/stdarch/examples/Cargo.toml b/library/stdarch/examples/Cargo.toml
index 38f497fa6..d9034dd80 100644
--- a/library/stdarch/examples/Cargo.toml
+++ b/library/stdarch/examples/Cargo.toml
@@ -13,8 +13,8 @@ default-run = "hex"
[dependencies]
core_arch = { path = "../crates/core_arch" }
std_detect = { path = "../crates/std_detect" }
-quickcheck = "0.9"
-rand = "0.7"
+quickcheck = "1.0"
+rand = "0.8"
[[bin]]
name = "hex"
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index 413f0fba3..bddf75dff 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -16,6 +16,8 @@
#![unstable(feature = "test", issue = "50297")]
#![doc(test(attr(deny(warnings))))]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
#![feature(internal_output_capture)]
#![feature(staged_api)]
#![feature(process_exitcode_internals)]
diff --git a/library/unwind/Cargo.toml b/library/unwind/Cargo.toml
index eab2717c4..9aa552ed8 100644
--- a/library/unwind/Cargo.toml
+++ b/library/unwind/Cargo.toml
@@ -19,9 +19,6 @@ libc = { version = "0.2.79", features = ['rustc-dep-of-std'], default-features =
compiler_builtins = "0.1.0"
cfg-if = "1.0"
-[build-dependencies]
-cc = "1.0.76"
-
[features]
# Only applies for Linux and Fuchsia targets
diff --git a/library/unwind/build.rs b/library/unwind/build.rs
deleted file mode 100644
index 5c3c02fb6..000000000
--- a/library/unwind/build.rs
+++ /dev/null
@@ -1,25 +0,0 @@
-use std::env;
-
-fn main() {
- println!("cargo:rerun-if-changed=build.rs");
- println!("cargo:rerun-if-env-changed=CARGO_CFG_MIRI");
-
- if env::var_os("CARGO_CFG_MIRI").is_some() {
- // Miri doesn't need the linker flags or a libunwind build.
- return;
- }
-
- let target = env::var("TARGET").expect("TARGET was not set");
- if target.contains("android") {
- let build = cc::Build::new();
-
- // Since ndk r23 beta 3 `libgcc` was replaced with `libunwind` thus
- // check if we have `libunwind` available and if so use it. Otherwise
- // fall back to `libgcc` to support older ndk versions.
- let has_unwind = build.is_flag_supported("-lunwind").expect("Unable to invoke compiler");
-
- if has_unwind {
- println!("cargo:rustc-cfg=feature=\"system-llvm-libunwind\"");
- }
- }
-}
diff --git a/library/unwind/src/lib.rs b/library/unwind/src/lib.rs
index df4f286a5..335bded71 100644
--- a/library/unwind/src/lib.rs
+++ b/library/unwind/src/lib.rs
@@ -4,6 +4,7 @@
#![feature(staged_api)]
#![feature(c_unwind)]
#![feature(cfg_target_abi)]
+#![feature(strict_provenance)]
#![cfg_attr(not(target_env = "msvc"), feature(libc))]
#![allow(internal_features)]
@@ -75,14 +76,10 @@ cfg_if::cfg_if! {
cfg_if::cfg_if! {
if #[cfg(feature = "llvm-libunwind")] {
compile_error!("`llvm-libunwind` is not supported for Android targets");
- } else if #[cfg(feature = "system-llvm-libunwind")] {
+ } else {
#[link(name = "unwind", kind = "static", modifiers = "-bundle", cfg(target_feature = "crt-static"))]
#[link(name = "unwind", cfg(not(target_feature = "crt-static")))]
extern "C" {}
- } else {
- #[link(name = "gcc", kind = "static", modifiers = "-bundle", cfg(target_feature = "crt-static"))]
- #[link(name = "gcc", cfg(not(target_feature = "crt-static")))]
- extern "C" {}
}
}
// Android's unwinding library depends on dl_iterate_phdr in `libdl`.
@@ -145,6 +142,10 @@ extern "C" {}
#[link(name = "gcc_s")]
extern "C" {}
+#[cfg(target_os = "aix")]
+#[link(name = "unwind")]
+extern "C" {}
+
#[cfg(target_os = "nto")]
#[link(name = "gcc_s")]
extern "C" {}
diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs
index a2bfa8e96..dba64aa74 100644
--- a/library/unwind/src/libunwind.rs
+++ b/library/unwind/src/libunwind.rs
@@ -1,6 +1,6 @@
#![allow(nonstandard_style)]
-use libc::{c_int, c_void, uintptr_t};
+use libc::{c_int, c_void};
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq)]
@@ -19,8 +19,8 @@ pub enum _Unwind_Reason_Code {
pub use _Unwind_Reason_Code::*;
pub type _Unwind_Exception_Class = u64;
-pub type _Unwind_Word = uintptr_t;
-pub type _Unwind_Ptr = uintptr_t;
+pub type _Unwind_Word = *const u8;
+pub type _Unwind_Ptr = *const u8;
pub type _Unwind_Trace_Fn =
extern "C" fn(ctx: *mut _Unwind_Context, arg: *mut c_void) -> _Unwind_Reason_Code;
@@ -214,7 +214,7 @@ if #[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos", targe
// On Android or ARM/Linux, these are implemented as macros:
pub unsafe fn _Unwind_GetGR(ctx: *mut _Unwind_Context, reg_index: c_int) -> _Unwind_Word {
- let mut val: _Unwind_Word = 0;
+ let mut val: _Unwind_Word = core::ptr::null();
_Unwind_VRS_Get(ctx, _UVRSC_CORE, reg_index as _Unwind_Word, _UVRSD_UINT32,
&mut val as *mut _ as *mut c_void);
val
@@ -229,14 +229,14 @@ if #[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos", targe
pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context)
-> _Unwind_Word {
let val = _Unwind_GetGR(ctx, UNWIND_IP_REG);
- (val & !1) as _Unwind_Word
+ val.map_addr(|v| v & !1)
}
pub unsafe fn _Unwind_SetIP(ctx: *mut _Unwind_Context,
value: _Unwind_Word) {
// Propagate thumb bit to instruction pointer
- let thumb_state = _Unwind_GetGR(ctx, UNWIND_IP_REG) & 1;
- let value = value | thumb_state;
+ let thumb_state = _Unwind_GetGR(ctx, UNWIND_IP_REG).addr() & 1;
+ let value = value.map_addr(|v| v | thumb_state);
_Unwind_SetGR(ctx, UNWIND_IP_REG, value);
}