summaryrefslogtreecommitdiffstats
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to 'library')
-rw-r--r--library/alloc/Cargo.toml4
-rw-r--r--library/alloc/benches/btree/map.rs1
-rw-r--r--library/alloc/benches/str.rs1
-rw-r--r--library/alloc/benches/vec.rs124
-rw-r--r--library/alloc/benches/vec_deque.rs1
-rw-r--r--library/alloc/src/alloc.rs3
-rw-r--r--library/alloc/src/boxed.rs14
-rw-r--r--library/alloc/src/boxed/thin.rs1
-rw-r--r--library/alloc/src/collections/binary_heap/mod.rs11
-rw-r--r--library/alloc/src/collections/binary_heap/tests.rs2
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs7
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs3
-rw-r--r--library/alloc/src/collections/linked_list.rs93
-rw-r--r--library/alloc/src/collections/mod.rs1
-rw-r--r--library/alloc/src/ffi/c_str.rs6
-rw-r--r--library/alloc/src/ffi/c_str/tests.rs2
-rw-r--r--library/alloc/src/lib.rs8
-rw-r--r--library/alloc/src/raw_vec.rs56
-rw-r--r--library/alloc/src/raw_vec/tests.rs9
-rw-r--r--library/alloc/src/rc.rs4
-rw-r--r--library/alloc/src/rc/tests.rs5
-rw-r--r--library/alloc/src/sync.rs12
-rw-r--r--library/alloc/src/sync/tests.rs13
-rw-r--r--library/alloc/src/tests.rs2
-rw-r--r--library/alloc/src/vec/in_place_collect.rs164
-rw-r--r--library/alloc/src/vec/in_place_drop.rs26
-rw-r--r--library/alloc/src/vec/into_iter.rs16
-rw-r--r--library/alloc/src/vec/mod.rs66
-rw-r--r--library/alloc/src/vec/spec_from_elem.rs23
-rw-r--r--library/alloc/src/vec/spec_from_iter.rs14
-rw-r--r--library/alloc/tests/arc.rs1
-rw-r--r--library/alloc/tests/borrow.rs2
-rw-r--r--library/alloc/tests/lib.rs5
-rw-r--r--library/alloc/tests/rc.rs1
-rw-r--r--library/alloc/tests/str.rs11
-rw-r--r--library/alloc/tests/vec.rs53
-rw-r--r--library/core/benches/lib.rs1
-rw-r--r--library/core/benches/num/flt2dec/mod.rs1
-rw-r--r--library/core/benches/num/flt2dec/strategy/dragon.rs1
-rw-r--r--library/core/benches/num/flt2dec/strategy/grisu.rs1
-rw-r--r--library/core/benches/str.rs1
-rw-r--r--library/core/benches/str/iter.rs17
-rw-r--r--library/core/src/alloc/layout.rs6
-rw-r--r--library/core/src/any.rs44
-rw-r--r--library/core/src/array/iter.rs2
-rw-r--r--library/core/src/array/mod.rs4
-rw-r--r--library/core/src/async_iter/async_iter.rs25
-rw-r--r--library/core/src/cell.rs11
-rw-r--r--library/core/src/char/methods.rs1
-rw-r--r--library/core/src/clone.rs2
-rw-r--r--library/core/src/cmp.rs14
-rw-r--r--library/core/src/convert/num.rs2
-rw-r--r--library/core/src/escape.rs16
-rw-r--r--library/core/src/ffi/c_str.rs4
-rw-r--r--library/core/src/ffi/mod.rs6
-rw-r--r--library/core/src/fmt/num.rs17
-rw-r--r--library/core/src/future/future.rs1
-rw-r--r--library/core/src/hash/mod.rs29
-rw-r--r--library/core/src/hash/sip.rs26
-rw-r--r--library/core/src/internal_macros.rs4
-rw-r--r--library/core/src/intrinsics.rs49
-rw-r--r--library/core/src/intrinsics/mir.rs90
-rw-r--r--library/core/src/intrinsics/simd.rs473
-rw-r--r--library/core/src/iter/adapters/array_chunks.rs34
-rw-r--r--library/core/src/iter/adapters/chain.rs2
-rw-r--r--library/core/src/iter/adapters/copied.rs2
-rw-r--r--library/core/src/iter/adapters/enumerate.rs10
-rw-r--r--library/core/src/iter/adapters/filter.rs11
-rw-r--r--library/core/src/iter/adapters/filter_map.rs18
-rw-r--r--library/core/src/iter/adapters/flatten.rs125
-rw-r--r--library/core/src/iter/adapters/fuse.rs24
-rw-r--r--library/core/src/iter/adapters/inspect.rs11
-rw-r--r--library/core/src/iter/adapters/map.rs11
-rw-r--r--library/core/src/iter/adapters/map_while.rs7
-rw-r--r--library/core/src/iter/adapters/map_windows.rs2
-rw-r--r--library/core/src/iter/adapters/mod.rs13
-rw-r--r--library/core/src/iter/adapters/scan.rs7
-rw-r--r--library/core/src/iter/adapters/skip.rs9
-rw-r--r--library/core/src/iter/adapters/skip_while.rs12
-rw-r--r--library/core/src/iter/adapters/step_by.rs6
-rw-r--r--library/core/src/iter/adapters/take.rs11
-rw-r--r--library/core/src/iter/adapters/take_while.rs12
-rw-r--r--library/core/src/iter/adapters/zip.rs16
-rw-r--r--library/core/src/iter/mod.rs2
-rw-r--r--library/core/src/iter/sources/from_coroutine.rs3
-rw-r--r--library/core/src/iter/traits/iterator.rs2
-rw-r--r--library/core/src/iter/traits/marker.rs28
-rw-r--r--library/core/src/iter/traits/mod.rs2
-rw-r--r--library/core/src/lib.rs16
-rw-r--r--library/core/src/mem/maybe_uninit.rs5
-rw-r--r--library/core/src/mem/mod.rs7
-rw-r--r--library/core/src/net/ip_addr.rs54
-rw-r--r--library/core/src/num/f32.rs14
-rw-r--r--library/core/src/num/f64.rs14
-rw-r--r--library/core/src/num/mod.rs6
-rw-r--r--library/core/src/num/nonzero.rs66
-rw-r--r--library/core/src/ops/arith.rs10
-rw-r--r--library/core/src/ops/coroutine.rs10
-rw-r--r--library/core/src/ops/function.rs12
-rw-r--r--library/core/src/ops/index_range.rs13
-rw-r--r--library/core/src/option.rs21
-rw-r--r--library/core/src/panic.rs30
-rw-r--r--library/core/src/panicking.rs66
-rw-r--r--library/core/src/pin.rs6
-rw-r--r--library/core/src/primitive_docs.rs113
-rw-r--r--library/core/src/ptr/alignment.rs57
-rw-r--r--library/core/src/ptr/const_ptr.rs59
-rw-r--r--library/core/src/ptr/mod.rs110
-rw-r--r--library/core/src/ptr/mut_ptr.rs50
-rw-r--r--library/core/src/ptr/non_null.rs1049
-rw-r--r--library/core/src/ptr/unique.rs1
-rw-r--r--library/core/src/result.rs8
-rw-r--r--library/core/src/slice/ascii.rs3
-rw-r--r--library/core/src/slice/index.rs72
-rw-r--r--library/core/src/slice/iter.rs2
-rw-r--r--library/core/src/slice/memchr.rs18
-rw-r--r--library/core/src/slice/mod.rs116
-rw-r--r--library/core/src/str/iter.rs50
-rw-r--r--library/core/src/str/mod.rs79
-rw-r--r--library/core/src/str/pattern.rs20
-rw-r--r--library/core/src/str/traits.rs46
-rw-r--r--library/core/src/task/poll.rs1
-rw-r--r--library/core/src/task/wake.rs8
-rw-r--r--library/core/src/time.rs21
-rw-r--r--library/core/src/tuple.rs158
-rw-r--r--library/core/tests/array.rs1
-rw-r--r--library/core/tests/cell.rs2
-rw-r--r--library/core/tests/char.rs1
-rw-r--r--library/core/tests/fmt/num.rs23
-rw-r--r--library/core/tests/hash/mod.rs3
-rw-r--r--library/core/tests/iter/adapters/array_chunks.rs3
-rw-r--r--library/core/tests/iter/mod.rs1
-rw-r--r--library/core/tests/lib.rs4
-rw-r--r--library/core/tests/nonzero.rs3
-rw-r--r--library/core/tests/num/mod.rs5
-rw-r--r--library/core/tests/option.rs9
-rw-r--r--library/core/tests/simd.rs3
-rw-r--r--library/core/tests/slice.rs1
-rw-r--r--library/core/tests/time.rs13
-rw-r--r--library/panic_abort/src/lib.rs10
-rw-r--r--library/panic_unwind/src/lib.rs1
-rw-r--r--library/portable-simd/.github/workflows/ci.yml71
-rw-r--r--library/portable-simd/Cargo.lock304
-rw-r--r--library/portable-simd/crates/core_simd/Cargo.toml1
-rw-r--r--library/portable-simd/crates/core_simd/examples/dot_product.rs2
-rw-r--r--library/portable-simd/crates/core_simd/examples/matrix_inversion.rs49
-rw-r--r--library/portable-simd/crates/core_simd/examples/nbody.rs3
-rw-r--r--library/portable-simd/crates/core_simd/examples/spectral_norm.rs2
-rw-r--r--library/portable-simd/crates/core_simd/src/core_simd_docs.md35
-rw-r--r--library/portable-simd/crates/core_simd/src/fmt.rs4
-rw-r--r--library/portable-simd/crates/core_simd/src/intrinsics.rs6
-rw-r--r--library/portable-simd/crates/core_simd/src/iter.rs16
-rw-r--r--library/portable-simd/crates/core_simd/src/lane_count.rs8
-rw-r--r--library/portable-simd/crates/core_simd/src/lib.rs4
-rw-r--r--library/portable-simd/crates/core_simd/src/masks.rs379
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/bitmask.rs118
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/full_masks.rs185
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs97
-rw-r--r--library/portable-simd/crates/core_simd/src/mod.rs20
-rw-r--r--library/portable-simd/crates/core_simd/src/ops.rs11
-rw-r--r--library/portable-simd/crates/core_simd/src/ops/assign.rs26
-rw-r--r--library/portable-simd/crates/core_simd/src/ops/deref.rs46
-rw-r--r--library/portable-simd/crates/core_simd/src/ops/shift_scalar.rs62
-rw-r--r--library/portable-simd/crates/core_simd/src/ops/unary.rs46
-rw-r--r--library/portable-simd/crates/core_simd/src/select.rs22
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/cmp.rs7
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/cmp/eq.rs (renamed from library/portable-simd/crates/core_simd/src/eq.rs)30
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/cmp/ord.rs (renamed from library/portable-simd/crates/core_simd/src/ord.rs)67
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/num.rs (renamed from library/portable-simd/crates/core_simd/src/elements.rs)6
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/num/float.rs (renamed from library/portable-simd/crates/core_simd/src/elements/float.rs)97
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/num/int.rs (renamed from library/portable-simd/crates/core_simd/src/elements/int.rs)114
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/num/uint.rs (renamed from library/portable-simd/crates/core_simd/src/elements/uint.rs)97
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/prelude.rs6
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/ptr.rs11
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/ptr/const_ptr.rs (renamed from library/portable-simd/crates/core_simd/src/elements/const_ptr.rs)47
-rw-r--r--library/portable-simd/crates/core_simd/src/simd/ptr/mut_ptr.rs (renamed from library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs)45
-rw-r--r--library/portable-simd/crates/core_simd/src/swizzle.rs408
-rw-r--r--library/portable-simd/crates/core_simd/src/swizzle_dyn.rs6
-rw-r--r--library/portable-simd/crates/core_simd/src/to_bytes.rs132
-rw-r--r--library/portable-simd/crates/core_simd/src/vector.rs43
-rw-r--r--library/portable-simd/crates/core_simd/src/vendor.rs2
-rw-r--r--library/portable-simd/crates/core_simd/src/vendor/x86.rs2
-rw-r--r--library/portable-simd/crates/core_simd/tests/cast.rs2
-rw-r--r--library/portable-simd/crates/core_simd/tests/masks.rs14
-rw-r--r--library/portable-simd/crates/core_simd/tests/ops_macros.rs135
-rw-r--r--library/portable-simd/crates/core_simd/tests/pointers.rs7
-rw-r--r--library/portable-simd/crates/core_simd/tests/round.rs4
-rw-r--r--library/portable-simd/crates/core_simd/tests/swizzle.rs28
-rw-r--r--library/portable-simd/crates/core_simd/tests/swizzle_dyn.rs1
-rw-r--r--library/portable-simd/crates/core_simd/tests/to_bytes.rs22
-rw-r--r--library/portable-simd/crates/std_float/src/lib.rs9
-rw-r--r--library/portable-simd/crates/test_helpers/Cargo.toml6
-rw-r--r--library/portable-simd/crates/test_helpers/src/biteq.rs32
-rw-r--r--library/portable-simd/crates/test_helpers/src/lib.rs128
-rw-r--r--library/portable-simd/crates/test_helpers/src/subnormals.rs91
-rw-r--r--library/proc_macro/src/bridge/mod.rs1
-rw-r--r--library/proc_macro/src/lib.rs33
-rw-r--r--library/std/Cargo.toml5
-rw-r--r--library/std/build.rs2
-rw-r--r--library/std/src/backtrace.rs4
-rw-r--r--library/std/src/backtrace/tests.rs2
-rw-r--r--library/std/src/collections/hash/map.rs157
-rw-r--r--library/std/src/collections/hash/map/tests.rs2
-rw-r--r--library/std/src/collections/hash/set.rs10
-rw-r--r--library/std/src/collections/hash/set/tests.rs2
-rw-r--r--library/std/src/collections/mod.rs5
-rw-r--r--library/std/src/env.rs50
-rw-r--r--library/std/src/env/tests.rs2
-rw-r--r--library/std/src/error.rs8
-rw-r--r--library/std/src/ffi/os_str.rs82
-rw-r--r--library/std/src/ffi/os_str/tests.rs54
-rw-r--r--library/std/src/fs/tests.rs29
-rw-r--r--library/std/src/hash/mod.rs91
-rw-r--r--library/std/src/hash/random.rs161
-rw-r--r--library/std/src/io/buffered/bufreader.rs10
-rw-r--r--library/std/src/io/copy.rs69
-rw-r--r--library/std/src/io/copy/tests.rs11
-rw-r--r--library/std/src/io/error/repr_bitpacked.rs1
-rw-r--r--library/std/src/io/error/repr_unpacked.rs1
-rw-r--r--library/std/src/io/mod.rs236
-rw-r--r--library/std/src/io/tests.rs30
-rw-r--r--library/std/src/lib.rs13
-rw-r--r--library/std/src/macros.rs6
-rw-r--r--library/std/src/net/tcp/tests.rs2
-rw-r--r--library/std/src/net/udp/tests.rs1
-rw-r--r--library/std/src/os/l4re/raw.rs1
-rw-r--r--library/std/src/os/linux/process.rs6
-rw-r--r--library/std/src/os/linux/raw.rs1
-rw-r--r--library/std/src/os/solid/io.rs300
-rw-r--r--library/std/src/os/solid/mod.rs2
-rw-r--r--library/std/src/os/windows/io/handle.rs2
-rw-r--r--library/std/src/os/windows/io/socket.rs4
-rw-r--r--library/std/src/os/windows/process.rs6
-rw-r--r--library/std/src/panicking.rs4
-rw-r--r--library/std/src/path/tests.rs8
-rw-r--r--library/std/src/process.rs4
-rw-r--r--library/std/src/rt.rs3
-rw-r--r--library/std/src/sync/mpsc/sync_tests.rs1
-rw-r--r--library/std/src/sync/mpsc/tests.rs1
-rw-r--r--library/std/src/sync/mutex.rs2
-rw-r--r--library/std/src/sync/once_lock.rs61
-rw-r--r--library/std/src/sync/rwlock.rs4
-rw-r--r--library/std/src/sys/common/alloc.rs1
-rw-r--r--library/std/src/sys/mod.rs3
-rw-r--r--library/std/src/sys/personality/mod.rs3
-rw-r--r--library/std/src/sys/solid/net.rs201
-rw-r--r--library/std/src/sys/teeos/alloc.rs57
-rw-r--r--library/std/src/sys/teeos/locks/condvar.rs100
-rw-r--r--library/std/src/sys/teeos/locks/mod.rs8
-rw-r--r--library/std/src/sys/teeos/locks/rwlock.rs44
-rw-r--r--library/std/src/sys/teeos/mod.rs167
-rw-r--r--library/std/src/sys/teeos/net.rs372
-rw-r--r--library/std/src/sys/teeos/os.rs134
-rw-r--r--library/std/src/sys/teeos/rand.rs21
-rw-r--r--library/std/src/sys/teeos/stdio.rs88
-rw-r--r--library/std/src/sys/teeos/thread.rs164
-rw-r--r--library/std/src/sys/teeos/thread_local_dtor.rs4
-rw-r--r--library/std/src/sys/unix/args.rs16
-rw-r--r--library/std/src/sys/unix/env.rs11
-rw-r--r--library/std/src/sys/unix/fs.rs20
-rw-r--r--library/std/src/sys/unix/mod.rs7
-rw-r--r--library/std/src/sys/unix/os.rs12
-rw-r--r--library/std/src/sys/unix/process/process_common.rs9
-rw-r--r--library/std/src/sys/unix/process/process_unix.rs76
-rw-r--r--library/std/src/sys/unix/process/process_unix/tests.rs19
-rw-r--r--library/std/src/sys/unix/thread.rs3
-rw-r--r--library/std/src/sys/unix/thread_local_dtor.rs10
-rw-r--r--library/std/src/sys/unix/time.rs12
-rw-r--r--library/std/src/sys/wasi/fs.rs13
-rw-r--r--library/std/src/sys/windows/api.rs4
-rw-r--r--library/std/src/sys/windows/c.rs9
-rw-r--r--library/std/src/sys/windows/c/windows_sys.lst4
-rw-r--r--library/std/src/sys/windows/c/windows_sys.rs9
-rw-r--r--library/std/src/sys/windows/compat.rs6
-rw-r--r--library/std/src/sys/windows/fs.rs49
-rw-r--r--library/std/src/sys/windows/handle.rs10
-rw-r--r--library/std/src/sys/windows/io.rs6
-rw-r--r--library/std/src/sys/windows/mod.rs6
-rw-r--r--library/std/src/sys/windows/net.rs2
-rw-r--r--library/std/src/sys/windows/os.rs6
-rw-r--r--library/std/src/sys/windows/path.rs16
-rw-r--r--library/std/src/sys/windows/process.rs13
-rw-r--r--library/std/src/sys/windows/stdio.rs8
-rw-r--r--library/std/src/sys/windows/time.rs4
-rw-r--r--library/std/src/sys/xous/mod.rs1
-rw-r--r--library/std/src/sys/xous/os.rs29
-rw-r--r--library/std/src/sys/xous/thread_parking.rs94
-rw-r--r--library/std/src/sys_common/backtrace.rs12
-rw-r--r--library/std/src/sys_common/mod.rs16
-rw-r--r--library/std/src/sys_common/once/futex.rs3
-rw-r--r--library/std/src/sys_common/thread.rs2
-rw-r--r--library/std/src/sys_common/wtf8/tests.rs1
-rw-r--r--library/std/src/thread/mod.rs5
-rw-r--r--library/std/tests/common/mod.rs4
-rw-r--r--library/test/src/helpers/shuffle.rs3
-rw-r--r--library/test/src/lib.rs11
-rw-r--r--library/test/src/term/terminfo/parm/tests.rs2
-rw-r--r--library/test/src/tests.rs17
-rw-r--r--library/unwind/Cargo.toml5
-rw-r--r--library/unwind/src/lib.rs3
-rw-r--r--library/unwind/src/libunwind.rs13
-rw-r--r--library/unwind/src/unwinding.rs105
302 files changed, 8684 insertions, 2442 deletions
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
index 63aec14f4..e8afed6b3 100644
--- a/library/alloc/Cargo.toml
+++ b/library/alloc/Cargo.toml
@@ -17,11 +17,11 @@ rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
rand_xorshift = "0.3.0"
[[test]]
-name = "collectionstests"
+name = "alloctests"
path = "tests/lib.rs"
[[bench]]
-name = "collectionsbenches"
+name = "allocbenches"
path = "benches/lib.rs"
test = true
diff --git a/library/alloc/benches/btree/map.rs b/library/alloc/benches/btree/map.rs
index 7d2366477..4fe07eb02 100644
--- a/library/alloc/benches/btree/map.rs
+++ b/library/alloc/benches/btree/map.rs
@@ -1,6 +1,5 @@
use std::collections::BTreeMap;
use std::ops::RangeBounds;
-use std::vec::Vec;
use rand::{seq::SliceRandom, Rng};
use test::{black_box, Bencher};
diff --git a/library/alloc/benches/str.rs b/library/alloc/benches/str.rs
index 54af389de..c148ab6b2 100644
--- a/library/alloc/benches/str.rs
+++ b/library/alloc/benches/str.rs
@@ -1,4 +1,3 @@
-use core::iter::Iterator;
use test::{black_box, Bencher};
#[bench]
diff --git a/library/alloc/benches/vec.rs b/library/alloc/benches/vec.rs
index c1d3e1bdf..8ebfe313d 100644
--- a/library/alloc/benches/vec.rs
+++ b/library/alloc/benches/vec.rs
@@ -658,13 +658,17 @@ fn random_sorted_fill(mut seed: u32, buf: &mut [u32]) {
buf.sort();
}
-fn bench_vec_dedup_old(b: &mut Bencher, sz: usize) {
+// Measures performance of slice dedup impl.
+// This was used to justify separate implementation of dedup for Vec.
+// This algorithm was used for Vecs prior to Rust 1.52.
+fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
random_sorted_fill(0x43, &mut template);
let mut vec = template.clone();
b.iter(|| {
+ let vec = black_box(&mut vec);
let len = {
let (dedup, _) = vec.partition_dedup();
dedup.len()
@@ -672,59 +676,143 @@ fn bench_vec_dedup_old(b: &mut Bencher, sz: usize) {
vec.truncate(len);
black_box(vec.first());
+ let vec = black_box(vec);
vec.clear();
vec.extend_from_slice(&template);
});
}
-fn bench_vec_dedup_new(b: &mut Bencher, sz: usize) {
+// Measures performance of Vec::dedup on random data.
+fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
random_sorted_fill(0x43, &mut template);
let mut vec = template.clone();
b.iter(|| {
+ let vec = black_box(&mut vec);
vec.dedup();
black_box(vec.first());
+ let vec = black_box(vec);
+ vec.clear();
+ vec.extend_from_slice(&template);
+ });
+}
+
+// Measures performance of Vec::dedup when there is no items removed
+fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) {
+ let mut template = vec![0u32; sz];
+ b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
+ template.chunks_exact_mut(2).for_each(|w| {
+ w[0] = black_box(0);
+ w[1] = black_box(5);
+ });
+
+ let mut vec = template.clone();
+ b.iter(|| {
+ let vec = black_box(&mut vec);
+ vec.dedup();
+ black_box(vec.first());
+ // Unlike other benches of `dedup`
+ // this doesn't reinitialize vec
+ // because we measure how efficient dedup is
+ // when no memory written
+ });
+}
+
+// Measures performance of Vec::dedup when there is all items removed
+fn bench_vec_dedup_all(b: &mut Bencher, sz: usize) {
+ let mut template = vec![0u32; sz];
+ b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
+ template.iter_mut().for_each(|w| {
+ *w = black_box(0);
+ });
+
+ let mut vec = template.clone();
+ b.iter(|| {
+ let vec = black_box(&mut vec);
+ vec.dedup();
+ black_box(vec.first());
+ let vec = black_box(vec);
vec.clear();
vec.extend_from_slice(&template);
});
}
#[bench]
-fn bench_dedup_old_100(b: &mut Bencher) {
- bench_vec_dedup_old(b, 100);
+fn bench_dedup_slice_truncate_100(b: &mut Bencher) {
+ bench_dedup_slice_truncate(b, 100);
}
#[bench]
-fn bench_dedup_new_100(b: &mut Bencher) {
- bench_vec_dedup_new(b, 100);
+fn bench_dedup_random_100(b: &mut Bencher) {
+ bench_vec_dedup_random(b, 100);
}
#[bench]
-fn bench_dedup_old_1000(b: &mut Bencher) {
- bench_vec_dedup_old(b, 1000);
+fn bench_dedup_none_100(b: &mut Bencher) {
+ bench_vec_dedup_none(b, 100);
}
+
+#[bench]
+fn bench_dedup_all_100(b: &mut Bencher) {
+ bench_vec_dedup_all(b, 100);
+}
+
+#[bench]
+fn bench_dedup_slice_truncate_1000(b: &mut Bencher) {
+ bench_dedup_slice_truncate(b, 1000);
+}
+#[bench]
+fn bench_dedup_random_1000(b: &mut Bencher) {
+ bench_vec_dedup_random(b, 1000);
+}
+
+#[bench]
+fn bench_dedup_none_1000(b: &mut Bencher) {
+ bench_vec_dedup_none(b, 1000);
+}
+
#[bench]
-fn bench_dedup_new_1000(b: &mut Bencher) {
- bench_vec_dedup_new(b, 1000);
+fn bench_dedup_all_1000(b: &mut Bencher) {
+ bench_vec_dedup_all(b, 1000);
}
#[bench]
-fn bench_dedup_old_10000(b: &mut Bencher) {
- bench_vec_dedup_old(b, 10000);
+fn bench_dedup_slice_truncate_10000(b: &mut Bencher) {
+ bench_dedup_slice_truncate(b, 10000);
}
#[bench]
-fn bench_dedup_new_10000(b: &mut Bencher) {
- bench_vec_dedup_new(b, 10000);
+fn bench_dedup_random_10000(b: &mut Bencher) {
+ bench_vec_dedup_random(b, 10000);
}
#[bench]
-fn bench_dedup_old_100000(b: &mut Bencher) {
- bench_vec_dedup_old(b, 100000);
+fn bench_dedup_none_10000(b: &mut Bencher) {
+ bench_vec_dedup_none(b, 10000);
}
+
+#[bench]
+fn bench_dedup_all_10000(b: &mut Bencher) {
+ bench_vec_dedup_all(b, 10000);
+}
+
+#[bench]
+fn bench_dedup_slice_truncate_100000(b: &mut Bencher) {
+ bench_dedup_slice_truncate(b, 100000);
+}
+#[bench]
+fn bench_dedup_random_100000(b: &mut Bencher) {
+ bench_vec_dedup_random(b, 100000);
+}
+
+#[bench]
+fn bench_dedup_none_100000(b: &mut Bencher) {
+ bench_vec_dedup_none(b, 100000);
+}
+
#[bench]
-fn bench_dedup_new_100000(b: &mut Bencher) {
- bench_vec_dedup_new(b, 100000);
+fn bench_dedup_all_100000(b: &mut Bencher) {
+ bench_vec_dedup_all(b, 100000);
}
#[bench]
diff --git a/library/alloc/benches/vec_deque.rs b/library/alloc/benches/vec_deque.rs
index 313a97ed1..35939f489 100644
--- a/library/alloc/benches/vec_deque.rs
+++ b/library/alloc/benches/vec_deque.rs
@@ -1,4 +1,3 @@
-use core::iter::Iterator;
use std::{
collections::{vec_deque, VecDeque},
mem,
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index 2499f1053..1663aa849 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -423,12 +423,14 @@ pub mod __alloc_error_handler {
}
}
+#[cfg(not(no_global_oom_handling))]
/// Specialize clones into pre-allocated, uninitialized memory.
/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
pub(crate) trait WriteCloneIntoRaw: Sized {
unsafe fn write_clone_into_raw(&self, target: *mut Self);
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Clone> WriteCloneIntoRaw for T {
#[inline]
default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
@@ -438,6 +440,7 @@ impl<T: Clone> WriteCloneIntoRaw for T {
}
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Copy> WriteCloneIntoRaw for T {
#[inline]
unsafe fn write_clone_into_raw(&self, target: *mut Self) {
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index 25c63b425..fdf5e134f 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -1038,10 +1038,18 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
/// use std::ptr;
///
/// let x = Box::new(String::from("Hello"));
- /// let p = Box::into_raw(x);
+ /// let ptr = Box::into_raw(x);
+ /// unsafe {
+ /// ptr::drop_in_place(ptr);
+ /// dealloc(ptr as *mut u8, Layout::new::<String>());
+ /// }
+ /// ```
+ /// Note: This is equivalent to the following:
+ /// ```
+ /// let x = Box::new(String::from("Hello"));
+ /// let ptr = Box::into_raw(x);
/// unsafe {
- /// ptr::drop_in_place(p);
- /// dealloc(p as *mut u8, Layout::new::<String>());
+ /// drop(Box::from_raw(ptr));
/// }
/// ```
///
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
index f83c8f83c..a8005b706 100644
--- a/library/alloc/src/boxed/thin.rs
+++ b/library/alloc/src/boxed/thin.rs
@@ -171,6 +171,7 @@ struct WithHeader<H>(NonNull<u8>, PhantomData<H>);
/// An opaque representation of `WithHeader<H>` to avoid the
/// projection invariance of `<T as Pointee>::Metadata`.
#[repr(transparent)]
+#[allow(unused_tuple_struct_fields)] // Field only used through `WithHeader` type above.
struct WithOpaqueHeader(NonNull<u8>);
impl WithOpaqueHeader {
diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs
index 61c5950b0..00a101541 100644
--- a/library/alloc/src/collections/binary_heap/mod.rs
+++ b/library/alloc/src/collections/binary_heap/mod.rs
@@ -145,7 +145,7 @@
use core::alloc::Allocator;
use core::fmt;
-use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
+use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen};
use core::mem::{self, swap, ManuallyDrop};
use core::num::NonZeroUsize;
use core::ops::{Deref, DerefMut};
@@ -1542,6 +1542,10 @@ impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
+
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T> Default for IntoIter<T> {
/// Creates an empty `binary_heap::IntoIter`.
@@ -1571,7 +1575,10 @@ unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
-unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {}
+unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {
+ const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+ const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+}
unsafe impl<I> AsVecIntoIter for IntoIter<I> {
type Item = I;
diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs
index 565a7b797..d4bc6226a 100644
--- a/library/alloc/src/collections/binary_heap/tests.rs
+++ b/library/alloc/src/collections/binary_heap/tests.rs
@@ -1,8 +1,6 @@
use super::*;
use crate::boxed::Box;
use crate::testing::crash_test::{CrashTestDummy, Panic};
-use core::mem;
-use std::iter::TrustedLen;
use std::panic::{catch_unwind, AssertUnwindSafe};
#[test]
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
index 8681cfcd6..a1b7cfe6b 100644
--- a/library/alloc/src/collections/btree/map/tests.rs
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -1,4 +1,3 @@
-use super::Entry::{Occupied, Vacant};
use super::*;
use crate::boxed::Box;
use crate::fmt::Debug;
@@ -7,13 +6,9 @@ use crate::string::{String, ToString};
use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::testing::ord_chaos::{Cyclic3, Governed, Governor};
use crate::testing::rng::DeterministicRng;
-use crate::vec::Vec;
use core::assert_matches::assert_matches;
-use std::cmp::Ordering;
use std::iter;
-use std::mem;
-use std::ops::Bound::{self, Excluded, Included, Unbounded};
-use std::ops::RangeBounds;
+use std::ops::Bound::{Excluded, Included, Unbounded};
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
index e05bf0e20..8726c5bfe 100644
--- a/library/alloc/src/collections/btree/set/tests.rs
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -1,9 +1,6 @@
use super::*;
use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::testing::rng::DeterministicRng;
-use crate::vec::Vec;
-use std::cmp::Ordering;
-use std::hash::{Hash, Hasher};
use std::ops::Bound::{Excluded, Included};
use std::panic::{catch_unwind, AssertUnwindSafe};
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
index 2c26f9e03..9e109feb3 100644
--- a/library/alloc/src/collections/linked_list.rs
+++ b/library/alloc/src/collections/linked_list.rs
@@ -1026,6 +1026,99 @@ impl<T, A: Allocator> LinkedList<T, A> {
}
}
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns false.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(linked_list_retain)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// d.retain(|&x| x % 2 == 0);
+ ///
+ /// assert_eq!(d.pop_front(), Some(2));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// #![feature(linked_list_retain)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// let keep = [false, true, false];
+ /// let mut iter = keep.iter();
+ /// d.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(d.pop_front(), Some(2));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ #[unstable(feature = "linked_list_retain", issue = "114135")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.retain_mut(|elem| f(elem));
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns false.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(linked_list_retain)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// d.retain_mut(|x| if *x % 2 == 0 {
+ /// *x += 1;
+ /// true
+ /// } else {
+ /// false
+ /// });
+ /// assert_eq!(d.pop_front(), Some(3));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ #[unstable(feature = "linked_list_retain", issue = "114135")]
+ pub fn retain_mut<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let mut cursor = self.cursor_front_mut();
+ while let Some(node) = cursor.current() {
+ if !f(node) {
+ cursor.remove_current().unwrap();
+ } else {
+ cursor.move_next();
+ }
+ }
+ }
+
/// Creates an iterator which uses a closure to determine if an element should be removed.
///
/// If the closure returns true, then the element is removed and yielded.
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
index 3e0b0f735..705b81535 100644
--- a/library/alloc/src/collections/mod.rs
+++ b/library/alloc/src/collections/mod.rs
@@ -148,6 +148,7 @@ impl Display for TryReserveError {
/// An intermediate trait for specialization of `Extend`.
#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs
index 62856fc9a..4a4a3abd4 100644
--- a/library/alloc/src/ffi/c_str.rs
+++ b/library/alloc/src/ffi/c_str.rs
@@ -421,7 +421,7 @@ impl CString {
/// Failure to call [`CString::from_raw`] will lead to a memory leak.
///
/// The C side must **not** modify the length of the string (by writing a
- /// `null` somewhere inside the string or removing the final one) before
+ /// nul byte somewhere inside the string or removing the final one) before
/// it makes it back into Rust using [`CString::from_raw`]. See the safety section
/// in [`CString::from_raw`].
///
@@ -797,7 +797,7 @@ impl From<Box<CStr>> for CString {
#[stable(feature = "cstring_from_vec_of_nonzerou8", since = "1.43.0")]
impl From<Vec<NonZeroU8>> for CString {
/// Converts a <code>[Vec]<[NonZeroU8]></code> into a [`CString`] without
- /// copying nor checking for inner null bytes.
+ /// copying nor checking for inner nul bytes.
#[inline]
fn from(v: Vec<NonZeroU8>) -> CString {
unsafe {
@@ -809,7 +809,7 @@ impl From<Vec<NonZeroU8>> for CString {
let (ptr, len, cap): (*mut NonZeroU8, _, _) = Vec::into_raw_parts(v);
Vec::from_raw_parts(ptr.cast::<u8>(), len, cap)
};
- // SAFETY: `v` cannot contain null bytes, given the type-level
+ // SAFETY: `v` cannot contain nul bytes, given the type-level
// invariant of `NonZeroU8`.
Self::_from_vec_unchecked(v)
}
diff --git a/library/alloc/src/ffi/c_str/tests.rs b/library/alloc/src/ffi/c_str/tests.rs
index 0b7476d5c..9f51e17a4 100644
--- a/library/alloc/src/ffi/c_str/tests.rs
+++ b/library/alloc/src/ffi/c_str/tests.rs
@@ -1,6 +1,4 @@
use super::*;
-use crate::rc::Rc;
-use crate::sync::Arc;
use core::assert_matches::assert_matches;
use core::ffi::FromBytesUntilNulError;
use core::hash::{Hash, Hasher};
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index d33c4418e..0af3ac38e 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -78,8 +78,8 @@
not(no_sync),
target_has_atomic = "ptr"
))]
-#![cfg_attr(not(bootstrap), doc(rust_logo))]
-#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
+#![doc(rust_logo)]
+#![feature(rustdoc_internals)]
#![no_std]
#![needs_allocator]
// Lints:
@@ -140,7 +140,6 @@
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
-#![feature(ptr_addr_eq)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
@@ -155,6 +154,7 @@
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
+#![feature(trusted_fused)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
@@ -270,7 +270,7 @@ pub(crate) mod test_helpers {
/// seed not being the same for every RNG invocation too.
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use std::hash::{BuildHasher, Hash, Hasher};
- let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ let mut hasher = std::hash::RandomState::new().build_hasher();
std::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec =
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index 817b93720..99ec68f5a 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -25,6 +25,16 @@ enum AllocInit {
Zeroed,
}
+#[repr(transparent)]
+#[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))]
+#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))]
+#[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))]
+struct Cap(usize);
+
+impl Cap {
+ const ZERO: Cap = unsafe { Cap(0) };
+}
+
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
@@ -50,7 +60,12 @@ enum AllocInit {
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
- cap: usize,
+ /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
+ ///
+ /// # Safety
+ ///
+ /// `cap` must be in the `0..=isize::MAX` range.
+ cap: Cap,
alloc: A,
}
@@ -119,7 +134,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// the returned `RawVec`.
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
- Self { ptr: Unique::dangling(), cap: 0, alloc }
+ Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
@@ -194,7 +209,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// here should change to `ptr.len() / mem::size_of::<T>()`.
Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: capacity,
+ cap: unsafe { Cap(capacity) },
alloc,
}
}
@@ -207,12 +222,13 @@ impl<T, A: Allocator> RawVec<T, A> {
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
- /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// systems). For ZSTs capacity is ignored.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
- Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
+ let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
+ Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
@@ -228,7 +244,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
- if T::IS_ZST { usize::MAX } else { self.cap }
+ if T::IS_ZST { usize::MAX } else { self.cap.0 }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -237,7 +253,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
- if T::IS_ZST || self.cap == 0 {
+ if T::IS_ZST || self.cap.0 == 0 {
None
} else {
// We could use Layout::array here which ensures the absence of isize and usize overflows
@@ -247,7 +263,7 @@ impl<T, A: Allocator> RawVec<T, A> {
let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
let align = mem::align_of::<T>();
- let size = mem::size_of::<T>().unchecked_mul(self.cap);
+ let size = mem::size_of::<T>().unchecked_mul(self.cap.0);
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
@@ -375,12 +391,15 @@ impl<T, A: Allocator> RawVec<T, A> {
additional > self.capacity().wrapping_sub(len)
}
- fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
+ /// # Safety:
+ ///
+ /// `cap` must not exceed `isize::MAX`.
+ unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
- self.cap = cap;
+ self.cap = unsafe { Cap(cap) };
}
// This method is usually instantiated many times. So we want it to be as
@@ -405,14 +424,15 @@ impl<T, A: Allocator> RawVec<T, A> {
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
- let cap = cmp::max(self.cap * 2, required_cap);
+ let cap = cmp::max(self.cap.0 * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
+ unsafe { self.set_ptr_and_cap(ptr, cap) };
Ok(())
}
@@ -431,7 +451,10 @@ impl<T, A: Allocator> RawVec<T, A> {
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
+ unsafe {
+ self.set_ptr_and_cap(ptr, cap);
+ }
Ok(())
}
@@ -449,7 +472,7 @@ impl<T, A: Allocator> RawVec<T, A> {
if cap == 0 {
unsafe { self.alloc.deallocate(ptr, layout) };
self.ptr = Unique::dangling();
- self.cap = 0;
+ self.cap = Cap::ZERO;
} else {
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
@@ -460,7 +483,10 @@ impl<T, A: Allocator> RawVec<T, A> {
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: if the allocation is valid, then the capacity is too
+ unsafe {
+ self.set_ptr_and_cap(ptr, cap);
+ }
}
Ok(())
}
diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs
index ff322f0da..f8cada01c 100644
--- a/library/alloc/src/raw_vec/tests.rs
+++ b/library/alloc/src/raw_vec/tests.rs
@@ -1,4 +1,5 @@
use super::*;
+use core::mem::size_of;
use std::cell::Cell;
#[test]
@@ -161,3 +162,11 @@ fn zst_reserve_exact_panic() {
v.reserve_exact(101, usize::MAX - 100);
}
+
+#[test]
+fn niches() {
+ let baseline = size_of::<RawVec<u8>>();
+ assert_eq!(size_of::<Option<RawVec<u8>>>(), baseline);
+ assert_eq!(size_of::<Option<Option<RawVec<u8>>>>(), baseline);
+ assert_eq!(size_of::<Option<Option<Option<RawVec<u8>>>>>(), baseline);
+}
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index dd7876bed..59f3a50dd 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -1748,7 +1748,6 @@ impl<T: Clone, A: Allocator + Clone> Rc<T, A> {
/// # Examples
///
/// ```
- /// #![feature(arc_unwrap_or_clone)]
/// # use std::{ptr, rc::Rc};
/// let inner = String::from("test");
/// let ptr = inner.as_ptr();
@@ -1769,7 +1768,7 @@ impl<T: Clone, A: Allocator + Clone> Rc<T, A> {
/// assert!(ptr::eq(ptr, inner.as_ptr()));
/// ```
#[inline]
- #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")]
+ #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
pub fn unwrap_or_clone(this: Self) -> T {
Rc::try_unwrap(this).unwrap_or_else(|rc| (*rc).clone())
}
@@ -2023,6 +2022,7 @@ impl<T, A: Allocator> Rc<[T], A> {
}
}
+#[cfg(not(no_global_oom_handling))]
/// Specialization trait used for `From<&[T]>`.
trait RcFromSlice<T> {
fn from_slice(slice: &[T]) -> Self;
diff --git a/library/alloc/src/rc/tests.rs b/library/alloc/src/rc/tests.rs
index 1f221b86f..c8a40603d 100644
--- a/library/alloc/src/rc/tests.rs
+++ b/library/alloc/src/rc/tests.rs
@@ -1,12 +1,7 @@
use super::*;
-use std::boxed::Box;
use std::cell::RefCell;
use std::clone::Clone;
-use std::convert::{From, TryInto};
-use std::mem::drop;
-use std::option::Option::{self, None, Some};
-use std::result::Result::{Err, Ok};
#[test]
fn test_clone() {
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 351e6c1a4..85df49163 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -2174,7 +2174,6 @@ impl<T: Clone, A: Allocator + Clone> Arc<T, A> {
/// # Examples
///
/// ```
- /// #![feature(arc_unwrap_or_clone)]
/// # use std::{ptr, sync::Arc};
/// let inner = String::from("test");
/// let ptr = inner.as_ptr();
@@ -2195,7 +2194,7 @@ impl<T: Clone, A: Allocator + Clone> Arc<T, A> {
/// assert!(ptr::eq(ptr, inner.as_ptr()));
/// ```
#[inline]
- #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")]
+ #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
pub fn unwrap_or_clone(this: Self) -> T {
Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
}
@@ -2843,16 +2842,14 @@ impl<T: ?Sized, A: Allocator> Weak<T, A> {
/// (i.e., when this `Weak` was created by `Weak::new`).
#[inline]
fn inner(&self) -> Option<WeakInner<'_>> {
- if is_dangling(self.ptr.as_ptr()) {
+ let ptr = self.ptr.as_ptr();
+ if is_dangling(ptr) {
None
} else {
// We are careful to *not* create a reference covering the "data" field, as
// the field may be mutated concurrently (for example, if the last `Arc`
// is dropped, the data field will be dropped in-place).
- Some(unsafe {
- let ptr = self.ptr.as_ptr();
- WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
- })
+ Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
}
}
@@ -3503,6 +3500,7 @@ impl<T> FromIterator<T> for Arc<[T]> {
}
}
+#[cfg(not(no_global_oom_handling))]
/// Specialization trait used for collecting into `Arc<[T]>`.
trait ToArcSlice<T>: Iterator<Item = T> + Sized {
fn to_arc_slice(self) -> Arc<[T]>;
diff --git a/library/alloc/src/sync/tests.rs b/library/alloc/src/sync/tests.rs
index 863d58bdf..d37e45569 100644
--- a/library/alloc/src/sync/tests.rs
+++ b/library/alloc/src/sync/tests.rs
@@ -1,21 +1,12 @@
use super::*;
-use std::boxed::Box;
use std::clone::Clone;
-use std::convert::{From, TryInto};
-use std::mem::drop;
-use std::ops::Drop;
-use std::option::Option::{self, None, Some};
-use std::sync::atomic::{
- self,
- Ordering::{Acquire, SeqCst},
-};
+use std::option::Option::None;
+use std::sync::atomic::Ordering::SeqCst;
use std::sync::mpsc::channel;
use std::sync::Mutex;
use std::thread;
-use crate::vec::Vec;
-
struct Canary(*mut atomic::AtomicUsize);
impl Drop for Canary {
diff --git a/library/alloc/src/tests.rs b/library/alloc/src/tests.rs
index b1d3a9fa8..ab256ceae 100644
--- a/library/alloc/src/tests.rs
+++ b/library/alloc/src/tests.rs
@@ -1,8 +1,6 @@
//! Test for `boxed` mod.
use core::any::Any;
-use core::clone::Clone;
-use core::convert::TryInto;
use core::ops::Deref;
use std::boxed::Box;
diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs
index 5ecd04799..e68cce04c 100644
--- a/library/alloc/src/vec/in_place_collect.rs
+++ b/library/alloc/src/vec/in_place_collect.rs
@@ -6,11 +6,11 @@
//! The specialization in this module applies to iterators in the shape of
//! `source.adapter().adapter().adapter().collect::<Vec<U>>()`
//! where `source` is an owning iterator obtained from [`Vec<T>`], [`Box<[T]>`][box] (by conversion to `Vec`)
-//! or [`BinaryHeap<T>`], the adapters each consume one or more items per step
-//! (represented by [`InPlaceIterable`]), provide transitive access to `source` (via [`SourceIter`])
-//! and thus the underlying allocation. And finally the layouts of `T` and `U` must
-//! have the same size and alignment, this is currently ensured via const eval instead of trait bounds
-//! in the specialized [`SpecFromIter`] implementation.
+//! or [`BinaryHeap<T>`], the adapters guarantee to consume enough items per step to make room
+//! for the results (represented by [`InPlaceIterable`]), provide transitive access to `source`
+//! (via [`SourceIter`]) and thus the underlying allocation.
+//! And finally there are alignment and size constriants to consider, this is currently ensured via
+//! const eval instead of trait bounds in the specialized [`SpecFromIter`] implementation.
//!
//! [`BinaryHeap<T>`]: crate::collections::BinaryHeap
//! [box]: crate::boxed::Box
@@ -35,11 +35,28 @@
//! the step of reading a value and getting a reference to write to. Instead raw pointers must be
//! used on the reader and writer side.
//!
-//! That writes never clobber a yet-to-be-read item is ensured by the [`InPlaceIterable`] requirements.
+//! That writes never clobber a yet-to-be-read items is ensured by the [`InPlaceIterable`] requirements.
//!
//! # Layout constraints
//!
-//! [`Allocator`] requires that `allocate()` and `deallocate()` have matching alignment and size.
+//! When recycling an allocation between different types we must uphold the [`Allocator`] contract
+//! which means that the input and output Layouts have to "fit".
+//!
+//! To complicate things further `InPlaceIterable` supports splitting or merging items into smaller/
+//! larger ones to enable (de)aggregation of arrays.
+//!
+//! Ultimately each step of the iterator must free up enough *bytes* in the source to make room
+//! for the next output item.
+//! If `T` and `U` have the same size no fixup is needed.
+//! If `T`'s size is a multiple of `U`'s we can compensate by multiplying the capacity accordingly.
+//! Otherwise the input capacity (and thus layout) in bytes may not be representable by the output
+//! `Vec<U>`. In that case `alloc.shrink()` is used to update the allocation's layout.
+//!
+//! Alignments of `T` must be the same or larger than `U`. Since alignments are always a power
+//! of two _larger_ implies _is a multiple of_.
+//!
+//! See `in_place_collectible()` for the current conditions.
+//!
//! Additionally this specialization doesn't make sense for ZSTs as there is no reallocation to
//! avoid and it would make pointer arithmetic more difficult.
//!
@@ -55,7 +72,7 @@
//! This is handled by the [`InPlaceDrop`] guard for sink items (`U`) and by
//! [`vec::IntoIter::forget_allocation_drop_remaining()`] for remaining source items (`T`).
//!
-//! If dropping any remaining source item (`T`) panics then [`InPlaceDstBufDrop`] will handle dropping
+//! If dropping any remaining source item (`T`) panics then [`InPlaceDstDataSrcBufDrop`] will handle dropping
//! the already collected sink items (`U`) and freeing the allocation.
//!
//! [`vec::IntoIter::forget_allocation_drop_remaining()`]: super::IntoIter::forget_allocation_drop_remaining()
@@ -137,44 +154,98 @@
//! }
//! vec.truncate(write_idx);
//! ```
+use crate::alloc::{handle_alloc_error, Global};
+use core::alloc::Allocator;
+use core::alloc::Layout;
use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
+use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
-use core::ptr::{self};
+use core::num::NonZeroUsize;
+use core::ptr::{self, NonNull};
+
+use super::{InPlaceDrop, InPlaceDstDataSrcBufDrop, SpecFromIter, SpecFromIterNested, Vec};
+
+const fn in_place_collectible<DEST, SRC>(
+ step_merge: Option<NonZeroUsize>,
+ step_expand: Option<NonZeroUsize>,
+) -> bool {
+ // Require matching alignments because an alignment-changing realloc is inefficient on many
+ // system allocators and better implementations would require the unstable Allocator trait.
+ if const { SRC::IS_ZST || DEST::IS_ZST || mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
+ return false;
+ }
-use super::{InPlaceDrop, InPlaceDstBufDrop, SpecFromIter, SpecFromIterNested, Vec};
+ match (step_merge, step_expand) {
+ (Some(step_merge), Some(step_expand)) => {
+ // At least N merged source items -> at most M expanded destination items
+ // e.g.
+ // - 1 x [u8; 4] -> 4x u8, via flatten
+ // - 4 x u8 -> 1x [u8; 4], via array_chunks
+ mem::size_of::<SRC>() * step_merge.get() >= mem::size_of::<DEST>() * step_expand.get()
+ }
+ // Fall back to other from_iter impls if an overflow occurred in the step merge/expansion
+ // tracking.
+ _ => false,
+ }
+}
+
+const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
+ if const { mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
+ // FIXME: use unreachable! once that works in const
+ panic!("in_place_collectible() prevents this");
+ }
-/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the
-/// source allocation, i.e. executing the pipeline in place.
-#[rustc_unsafe_specialization_marker]
-pub(super) trait InPlaceIterableMarker {}
+ // If src type size is an integer multiple of the destination type size then
+ // the caller will have calculated a `dst_cap` that is an integer multiple of
+ // `src_cap` without remainder.
+ if const {
+ let src_sz = mem::size_of::<SRC>();
+ let dest_sz = mem::size_of::<DEST>();
+ dest_sz != 0 && src_sz % dest_sz == 0
+ } {
+ return false;
+ }
-impl<T> InPlaceIterableMarker for T where T: InPlaceIterable {}
+ // type layouts don't guarantee a fit, so do a runtime check to see if
+ // the allocations happen to match
+ return src_cap > 0 && src_cap * mem::size_of::<SRC>() != dst_cap * mem::size_of::<DEST>();
+}
+
+/// This provides a shorthand for the source type since local type aliases aren't a thing.
+#[rustc_specialization_trait]
+trait InPlaceCollect: SourceIter<Source: AsVecIntoIter> + InPlaceIterable {
+ type Src;
+}
+
+impl<T> InPlaceCollect for T
+where
+ T: SourceIter<Source: AsVecIntoIter> + InPlaceIterable,
+{
+ type Src = <<T as SourceIter>::Source as AsVecIntoIter>::Item;
+}
impl<T, I> SpecFromIter<T, I> for Vec<T>
where
- I: Iterator<Item = T> + SourceIter<Source: AsVecIntoIter> + InPlaceIterableMarker,
+ I: Iterator<Item = T> + InPlaceCollect,
+ <I as SourceIter>::Source: AsVecIntoIter,
{
default fn from_iter(mut iterator: I) -> Self {
// See "Layout constraints" section in the module documentation. We rely on const
// optimization here since these conditions currently cannot be expressed as trait bounds
- if T::IS_ZST
- || mem::size_of::<T>()
- != mem::size_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
- || mem::align_of::<T>()
- != mem::align_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
- {
+ if const { !in_place_collectible::<T, I::Src>(I::MERGE_BY, I::EXPAND_BY) } {
// fallback to more generic implementations
return SpecFromIterNested::from_iter(iterator);
}
- let (src_buf, src_ptr, dst_buf, dst_end, cap) = unsafe {
+ let (src_buf, src_ptr, src_cap, mut dst_buf, dst_end, dst_cap) = unsafe {
let inner = iterator.as_inner().as_into_iter();
(
inner.buf.as_ptr(),
inner.ptr,
+ inner.cap,
inner.buf.as_ptr() as *mut T,
inner.end as *const T,
- inner.cap,
+ inner.cap * mem::size_of::<I::Src>() / mem::size_of::<T>(),
)
};
@@ -195,19 +266,56 @@ where
);
}
- // The ownership of the allocation and the new `T` values is temporarily moved into `dst_guard`.
- // This is safe because `forget_allocation_drop_remaining` immediately forgets the allocation
+ // The ownership of the source allocation and the new `T` values is temporarily moved into `dst_guard`.
+ // This is safe because
+ // * `forget_allocation_drop_remaining` immediately forgets the allocation
// before any panic can occur in order to avoid any double free, and then proceeds to drop
// any remaining values at the tail of the source.
+ // * the shrink either panics without invalidating the allocation, aborts or
+ // succeeds. In the last case we disarm the guard.
//
// Note: This access to the source wouldn't be allowed by the TrustedRandomIteratorNoCoerce
// contract (used by SpecInPlaceCollect below). But see the "O(1) collect" section in the
// module documentation why this is ok anyway.
- let dst_guard = InPlaceDstBufDrop { ptr: dst_buf, len, cap };
+ let dst_guard =
+ InPlaceDstDataSrcBufDrop { ptr: dst_buf, len, src_cap, src: PhantomData::<I::Src> };
src.forget_allocation_drop_remaining();
+
+ // Adjust the allocation if the source had a capacity in bytes that wasn't a multiple
+ // of the destination type size.
+ // Since the discrepancy should generally be small this should only result in some
+ // bookkeeping updates and no memmove.
+ if needs_realloc::<I::Src, T>(src_cap, dst_cap) {
+ let alloc = Global;
+ debug_assert_ne!(src_cap, 0);
+ debug_assert_ne!(dst_cap, 0);
+ unsafe {
+ // The old allocation exists, therefore it must have a valid layout.
+ let src_align = mem::align_of::<I::Src>();
+ let src_size = mem::size_of::<I::Src>().unchecked_mul(src_cap);
+ let old_layout = Layout::from_size_align_unchecked(src_size, src_align);
+
+ // The allocation must be equal or smaller for in-place iteration to be possible
+ // therefore the new layout must be ≤ the old one and therefore valid.
+ let dst_align = mem::align_of::<T>();
+ let dst_size = mem::size_of::<T>().unchecked_mul(dst_cap);
+ let new_layout = Layout::from_size_align_unchecked(dst_size, dst_align);
+
+ let result = alloc.shrink(
+ NonNull::new_unchecked(dst_buf as *mut u8),
+ old_layout,
+ new_layout,
+ );
+ let Ok(reallocated) = result else { handle_alloc_error(new_layout) };
+ dst_buf = reallocated.as_ptr() as *mut T;
+ }
+ } else {
+ debug_assert_eq!(src_cap * mem::size_of::<I::Src>(), dst_cap * mem::size_of::<T>());
+ }
+
mem::forget(dst_guard);
- let vec = unsafe { Vec::from_raw_parts(dst_buf, len, cap) };
+ let vec = unsafe { Vec::from_raw_parts(dst_buf, len, dst_cap) };
vec
}
diff --git a/library/alloc/src/vec/in_place_drop.rs b/library/alloc/src/vec/in_place_drop.rs
index 25ca33c6a..40a540b57 100644
--- a/library/alloc/src/vec/in_place_drop.rs
+++ b/library/alloc/src/vec/in_place_drop.rs
@@ -1,6 +1,10 @@
-use core::ptr::{self};
+use core::marker::PhantomData;
+use core::ptr::{self, drop_in_place};
use core::slice::{self};
+use crate::alloc::Global;
+use crate::raw_vec::RawVec;
+
// A helper struct for in-place iteration that drops the destination slice of iteration,
// i.e. the head. The source slice (the tail) is dropped by IntoIter.
pub(super) struct InPlaceDrop<T> {
@@ -23,17 +27,23 @@ impl<T> Drop for InPlaceDrop<T> {
}
}
-// A helper struct for in-place collection that drops the destination allocation and elements,
-// to avoid leaking them if some other destructor panics.
-pub(super) struct InPlaceDstBufDrop<T> {
- pub(super) ptr: *mut T,
+// A helper struct for in-place collection that drops the destination items together with
+// the source allocation - i.e. before the reallocation happened - to avoid leaking them
+// if some other destructor panics.
+pub(super) struct InPlaceDstDataSrcBufDrop<Src, Dest> {
+ pub(super) ptr: *mut Dest,
pub(super) len: usize,
- pub(super) cap: usize,
+ pub(super) src_cap: usize,
+ pub(super) src: PhantomData<Src>,
}
-impl<T> Drop for InPlaceDstBufDrop<T> {
+impl<Src, Dest> Drop for InPlaceDstDataSrcBufDrop<Src, Dest> {
#[inline]
fn drop(&mut self) {
- unsafe { super::Vec::from_raw_parts(self.ptr, self.len, self.cap) };
+ unsafe {
+ let _drop_allocation =
+ RawVec::<Src>::from_raw_parts_in(self.ptr.cast::<Src>(), self.src_cap, Global);
+ drop_in_place(core::ptr::slice_from_raw_parts_mut::<Dest>(self.ptr, self.len));
+ };
}
}
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index b2db2fdfd..b03e04b7c 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -7,7 +7,8 @@ use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
use core::iter::{
- FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
+ FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen,
+ TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
@@ -285,9 +286,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// Also note the implementation of `Self: TrustedRandomAccess` requires
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
- unsafe {
- if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
- }
+ unsafe { if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } }
}
}
@@ -339,6 +338,10 @@ impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
@@ -423,7 +426,10 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
-unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
+unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {
+ const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+ const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+}
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index 6c78d65f1..b2e920397 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -58,6 +58,7 @@ use core::cmp;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hash, Hasher};
+#[cfg(not(no_global_oom_handling))]
use core::iter;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
@@ -101,6 +102,7 @@ mod into_iter;
#[cfg(not(no_global_oom_handling))]
use self::is_zero::IsZero;
+#[cfg(not(no_global_oom_handling))]
mod is_zero;
#[cfg(not(no_global_oom_handling))]
@@ -121,7 +123,7 @@ use self::set_len_on_drop::SetLenOnDrop;
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
-use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop};
+use self::in_place_drop::{InPlaceDrop, InPlaceDstDataSrcBufDrop};
#[cfg(not(no_global_oom_handling))]
mod in_place_drop;
@@ -1775,7 +1777,32 @@ impl<T, A: Allocator> Vec<T, A> {
return;
}
- /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+ // Check if we ever want to remove anything.
+ // This allows to use copy_non_overlapping in next cycle.
+ // And avoids any memory writes if we don't need to remove anything.
+ let mut first_duplicate_idx: usize = 1;
+ let start = self.as_mut_ptr();
+ while first_duplicate_idx != len {
+ let found_duplicate = unsafe {
+ // SAFETY: first_duplicate always in range [1..len)
+ // Note that we start iteration from 1 so we never overflow.
+ let prev = start.add(first_duplicate_idx.wrapping_sub(1));
+ let current = start.add(first_duplicate_idx);
+ // We explicitly say in docs that references are reversed.
+ same_bucket(&mut *current, &mut *prev)
+ };
+ if found_duplicate {
+ break;
+ }
+ first_duplicate_idx += 1;
+ }
+ // Don't need to remove anything.
+ // We cannot get bigger than len.
+ if first_duplicate_idx == len {
+ return;
+ }
+
+ /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
/* Offset of the element we want to check if it is duplicate */
read: usize,
@@ -1821,31 +1848,39 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
- let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
- let ptr = gap.vec.as_mut_ptr();
-
/* Drop items while going through Vec, it should be more efficient than
* doing slice partition_dedup + truncate */
+ // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
+ let mut gap =
+ FillGapOnDrop { read: first_duplicate_idx + 1, write: first_duplicate_idx, vec: self };
+ unsafe {
+ // SAFETY: we checked that first_duplicate_idx in bounds before.
+ // If drop panics, `gap` would remove this item without drop.
+ ptr::drop_in_place(start.add(first_duplicate_idx));
+ }
+
/* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
* are always in-bounds and read_ptr never aliases prev_ptr */
unsafe {
while gap.read < len {
- let read_ptr = ptr.add(gap.read);
- let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+ let read_ptr = start.add(gap.read);
+ let prev_ptr = start.add(gap.write.wrapping_sub(1));
- if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+ // We explicitly say in docs that references are reversed.
+ let found_duplicate = same_bucket(&mut *read_ptr, &mut *prev_ptr);
+ if found_duplicate {
// Increase `gap.read` now since the drop may panic.
gap.read += 1;
/* We have found duplicate, drop it in-place */
ptr::drop_in_place(read_ptr);
} else {
- let write_ptr = ptr.add(gap.write);
+ let write_ptr = start.add(gap.write);
- /* Because `read_ptr` can be equal to `write_ptr`, we either
- * have to use `copy` or conditional `copy_nonoverlapping`.
- * Looks like the first option is faster. */
- ptr::copy(read_ptr, write_ptr, 1);
+ /* read_ptr cannot be equal to write_ptr because at this point
+ * we guaranteed to skip at least one element (before loop starts).
+ */
+ ptr::copy_nonoverlapping(read_ptr, write_ptr, 1);
/* We have filled that place, so go further */
gap.write += 1;
@@ -2599,6 +2634,7 @@ pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<
<T as SpecFromElem>::from_elem(elem, n, alloc)
}
+#[cfg(not(no_global_oom_handling))]
trait ExtendFromWithinSpec {
/// # Safety
///
@@ -2607,6 +2643,7 @@ trait ExtendFromWithinSpec {
unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
// SAFETY:
@@ -2626,6 +2663,7 @@ impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
}
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
let count = src.len();
@@ -2706,7 +2744,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// ```
/// use std::hash::BuildHasher;
///
-/// let b = std::collections::hash_map::RandomState::new();
+/// let b = std::hash::RandomState::new();
/// let v: Vec<u8> = vec![0xa8, 0x3c, 0x09];
/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
/// assert_eq!(b.hash_one(v), b.hash_one(s));
diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs
index da43d17bf..01a6db144 100644
--- a/library/alloc/src/vec/spec_from_elem.rs
+++ b/library/alloc/src/vec/spec_from_elem.rs
@@ -36,12 +36,12 @@ impl SpecFromElem for i8 {
if elem == 0 {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
}
+ let mut v = Vec::with_capacity_in(n, alloc);
unsafe {
- let mut v = Vec::with_capacity_in(n, alloc);
ptr::write_bytes(v.as_mut_ptr(), elem as u8, n);
v.set_len(n);
- v
}
+ v
}
}
@@ -51,11 +51,26 @@ impl SpecFromElem for u8 {
if elem == 0 {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
}
+ let mut v = Vec::with_capacity_in(n, alloc);
unsafe {
- let mut v = Vec::with_capacity_in(n, alloc);
ptr::write_bytes(v.as_mut_ptr(), elem, n);
v.set_len(n);
- v
}
+ v
+ }
+}
+
+// A better way would be to implement this for all ZSTs which are `Copy` and have trivial `Clone`
+// but the latter cannot be detected currently
+impl SpecFromElem for () {
+ #[inline]
+ fn from_elem<A: Allocator>(_elem: (), n: usize, alloc: A) -> Vec<(), A> {
+ let mut v = Vec::with_capacity_in(n, alloc);
+ // SAFETY: the capacity has just been set to `n`
+ // and `()` is a ZST with trivial `Clone` implementation
+ unsafe {
+ v.set_len(n);
+ }
+ v
}
}
diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs
index efa686847..2ddfdde59 100644
--- a/library/alloc/src/vec/spec_from_iter.rs
+++ b/library/alloc/src/vec/spec_from_iter.rs
@@ -13,13 +13,13 @@ use super::{IntoIter, SpecExtend, SpecFromIterNested, Vec};
/// +-+-----------+
/// |
/// v
-/// +-+-------------------------------+ +---------------------+
-/// |SpecFromIter +---->+SpecFromIterNested |
-/// |where I: | | |where I: |
-/// | Iterator (default)----------+ | | Iterator (default) |
-/// | vec::IntoIter | | | TrustedLen |
-/// | SourceIterMarker---fallback-+ | +---------------------+
-/// +---------------------------------+
+/// +-+---------------------------------+ +---------------------+
+/// |SpecFromIter +---->+SpecFromIterNested |
+/// |where I: | | |where I: |
+/// | Iterator (default)------------+ | | Iterator (default) |
+/// | vec::IntoIter | | | TrustedLen |
+/// | InPlaceCollect--(fallback to)-+ | +---------------------+
+/// +-----------------------------------+
/// ```
pub(super) trait SpecFromIter<T, I> {
fn from_iter(iter: I) -> Self;
diff --git a/library/alloc/tests/arc.rs b/library/alloc/tests/arc.rs
index ce40b5c9b..d564a30b1 100644
--- a/library/alloc/tests/arc.rs
+++ b/library/alloc/tests/arc.rs
@@ -1,6 +1,5 @@
use std::any::Any;
use std::cell::RefCell;
-use std::cmp::PartialEq;
use std::iter::TrustedLen;
use std::mem;
use std::sync::{Arc, Weak};
diff --git a/library/alloc/tests/borrow.rs b/library/alloc/tests/borrow.rs
index 57976aa6c..af7efb7d7 100644
--- a/library/alloc/tests/borrow.rs
+++ b/library/alloc/tests/borrow.rs
@@ -1,4 +1,4 @@
-use std::borrow::{Cow, ToOwned};
+use std::borrow::Cow;
use std::ffi::{CStr, OsStr};
use std::path::Path;
use std::rc::Rc;
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
index aa7a331b3..2dcfc6b4a 100644
--- a/library/alloc/tests/lib.rs
+++ b/library/alloc/tests/lib.rs
@@ -1,5 +1,6 @@
#![feature(allocator_api)]
#![feature(alloc_layout_extra)]
+#![feature(iter_array_chunks)]
#![feature(assert_matches)]
#![feature(btree_extract_if)]
#![feature(cow_is_borrowed)]
@@ -40,11 +41,11 @@
#![feature(thin_box)]
#![feature(strict_provenance)]
#![feature(drain_keep_rest)]
+#![allow(internal_features)]
#![deny(fuzzy_provenance_casts)]
#![deny(unsafe_op_in_unsafe_fn)]
-use std::collections::hash_map::DefaultHasher;
-use std::hash::{Hash, Hasher};
+use std::hash::{DefaultHasher, Hash, Hasher};
mod arc;
mod autotraits;
diff --git a/library/alloc/tests/rc.rs b/library/alloc/tests/rc.rs
index efb39a609..499740e73 100644
--- a/library/alloc/tests/rc.rs
+++ b/library/alloc/tests/rc.rs
@@ -1,6 +1,5 @@
use std::any::Any;
use std::cell::RefCell;
-use std::cmp::PartialEq;
use std::iter::TrustedLen;
use std::mem;
use std::rc::{Rc, Weak};
diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs
index cb59a9d4a..df8a26062 100644
--- a/library/alloc/tests/str.rs
+++ b/library/alloc/tests/str.rs
@@ -1171,6 +1171,17 @@ fn test_iterator() {
}
#[test]
+fn test_iterator_advance() {
+ let s = "「赤錆」と呼ばれる鉄錆は、水の存在下での鉄の自然酸化によって生じる、オキシ水酸化鉄(III) 等の(含水)酸化物粒子の疎な凝集膜であるとみなせる。";
+ let chars: Vec<char> = s.chars().collect();
+ let mut it = s.chars();
+ it.advance_by(1).unwrap();
+ assert_eq!(it.next(), Some(chars[1]));
+ it.advance_by(33).unwrap();
+ assert_eq!(it.next(), Some(chars[35]));
+}
+
+#[test]
fn test_rev_iterator() {
let s = "ศไทย中华Việt Nam";
let v = ['m', 'a', 'N', ' ', 't', 'ệ', 'i', 'V', '华', '中', 'ย', 'ท', 'ไ', 'ศ'];
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index d44dcfbf6..364dc9201 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -1,6 +1,5 @@
use core::alloc::{Allocator, Layout};
-use core::assert_eq;
-use core::iter::IntoIterator;
+use core::{assert_eq, assert_ne};
use core::num::NonZeroUsize;
use core::ptr::NonNull;
use std::alloc::System;
@@ -1185,6 +1184,54 @@ fn test_from_iter_specialization_with_iterator_adapters() {
}
#[test]
+fn test_in_place_specialization_step_up_down() {
+ fn assert_in_place_trait<T: InPlaceIterable>(_: &T) {}
+ let src = vec![[0u8; 4]; 256];
+ let srcptr = src.as_ptr();
+ let src_cap = src.capacity();
+ let iter = src.into_iter().flatten();
+ assert_in_place_trait(&iter);
+ let sink = iter.collect::<Vec<_>>();
+ let sinkptr = sink.as_ptr();
+ assert_eq!(srcptr as *const u8, sinkptr);
+ assert_eq!(src_cap * 4, sink.capacity());
+
+ let iter = sink.into_iter().array_chunks::<4>();
+ assert_in_place_trait(&iter);
+ let sink = iter.collect::<Vec<_>>();
+ let sinkptr = sink.as_ptr();
+ assert_eq!(srcptr, sinkptr);
+ assert_eq!(src_cap, sink.capacity());
+
+ let mut src: Vec<u8> = Vec::with_capacity(17);
+ let src_bytes = src.capacity();
+ src.resize(8, 0u8);
+ let sink: Vec<[u8; 4]> = src.into_iter().array_chunks::<4>().collect();
+ let sink_bytes = sink.capacity() * 4;
+ assert_ne!(src_bytes, sink_bytes);
+ assert_eq!(sink.len(), 2);
+
+ let mut src: Vec<[u8; 3]> = Vec::with_capacity(17);
+ src.resize( 8, [0; 3]);
+ let iter = src.into_iter().map(|[a, b, _]| [a, b]);
+ assert_in_place_trait(&iter);
+ let sink: Vec<[u8; 2]> = iter.collect();
+ assert_eq!(sink.len(), 8);
+ assert!(sink.capacity() <= 25);
+
+ let src = vec![[0u8; 4]; 256];
+ let srcptr = src.as_ptr();
+ let iter = src
+ .into_iter()
+ .flat_map(|a| {
+ a.into_iter().map(|b| b.wrapping_add(1))
+ });
+ assert_in_place_trait(&iter);
+ let sink = iter.collect::<Vec<_>>();
+ assert_eq!(srcptr as *const u8, sink.as_ptr());
+}
+
+#[test]
fn test_from_iter_specialization_head_tail_drop() {
let drop_count: Vec<_> = (0..=2).map(|_| Rc::new(())).collect();
let src: Vec<_> = drop_count.iter().cloned().collect();
@@ -1933,7 +1980,7 @@ fn vec_macro_repeating_null_raw_fat_pointer() {
let vec = vec![null_raw_dyn; 1];
dbg!(ptr_metadata(vec[0]));
- assert!(vec[0] == null_raw_dyn);
+ assert!(std::ptr::eq(vec[0], null_raw_dyn));
// Polyfill for https://github.com/rust-lang/rfcs/pull/2580
diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs
index 74ef0949b..fdefc9a71 100644
--- a/library/core/benches/lib.rs
+++ b/library/core/benches/lib.rs
@@ -5,6 +5,7 @@
#![feature(trusted_random_access)]
#![feature(iter_array_chunks)]
#![feature(iter_next_chunk)]
+#![feature(iter_advance_by)]
extern crate test;
diff --git a/library/core/benches/num/flt2dec/mod.rs b/library/core/benches/num/flt2dec/mod.rs
index 1a330ef5f..b1a9fc56b 100644
--- a/library/core/benches/num/flt2dec/mod.rs
+++ b/library/core/benches/num/flt2dec/mod.rs
@@ -6,7 +6,6 @@ mod strategy {
use core::num::flt2dec::MAX_SIG_DIGITS;
use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
use std::io::Write;
-use std::vec::Vec;
use test::{black_box, Bencher};
pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
diff --git a/library/core/benches/num/flt2dec/strategy/dragon.rs b/library/core/benches/num/flt2dec/strategy/dragon.rs
index 377c99eff..babedc6c0 100644
--- a/library/core/benches/num/flt2dec/strategy/dragon.rs
+++ b/library/core/benches/num/flt2dec/strategy/dragon.rs
@@ -1,7 +1,6 @@
use super::super::*;
use core::num::flt2dec::strategy::dragon::*;
use std::mem::MaybeUninit;
-use test::{black_box, Bencher};
#[bench]
fn bench_small_shortest(b: &mut Bencher) {
diff --git a/library/core/benches/num/flt2dec/strategy/grisu.rs b/library/core/benches/num/flt2dec/strategy/grisu.rs
index 17d6b474a..b5bddb2c7 100644
--- a/library/core/benches/num/flt2dec/strategy/grisu.rs
+++ b/library/core/benches/num/flt2dec/strategy/grisu.rs
@@ -1,7 +1,6 @@
use super::super::*;
use core::num::flt2dec::strategy::grisu::*;
use std::mem::MaybeUninit;
-use test::{black_box, Bencher};
pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
match decode(v).1 {
diff --git a/library/core/benches/str.rs b/library/core/benches/str.rs
index 78865d81f..7d36eff3d 100644
--- a/library/core/benches/str.rs
+++ b/library/core/benches/str.rs
@@ -3,6 +3,7 @@ use test::{black_box, Bencher};
mod char_count;
mod corpora;
+mod iter;
#[bench]
fn str_validate_emoji(b: &mut Bencher) {
diff --git a/library/core/benches/str/iter.rs b/library/core/benches/str/iter.rs
new file mode 100644
index 000000000..58ae71fc1
--- /dev/null
+++ b/library/core/benches/str/iter.rs
@@ -0,0 +1,17 @@
+use super::corpora;
+use test::{black_box, Bencher};
+
+#[bench]
+fn chars_advance_by_1000(b: &mut Bencher) {
+ b.iter(|| black_box(corpora::ru::LARGE).chars().advance_by(1000));
+}
+
+#[bench]
+fn chars_advance_by_0010(b: &mut Bencher) {
+ b.iter(|| black_box(corpora::ru::LARGE).chars().advance_by(10));
+}
+
+#[bench]
+fn chars_advance_by_0001(b: &mut Bencher) {
+ b.iter(|| black_box(corpora::ru::LARGE).chars().advance_by(1));
+}
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
index 65946e09f..9ef0a7d76 100644
--- a/library/core/src/alloc/layout.rs
+++ b/library/core/src/alloc/layout.rs
@@ -450,7 +450,11 @@ impl Layout {
return Err(LayoutError);
}
- let array_size = element_size * n;
+ // SAFETY: We just checked that we won't overflow `usize` when we multiply.
+ // This is a useless hint inside this function, but after inlining this helps
+ // deduplicate checks for whether the overall capacity is zero (e.g., in RawVec's
+ // allocation path) before/after this multiplication.
+ let array_size = unsafe { element_size.unchecked_mul(n) };
// SAFETY: We just checked above that the `array_size` will not
// exceed `isize::MAX` even when rounded up to the alignment.
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index 8f5404d97..e8f00e876 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -115,6 +115,11 @@ use crate::intrinsics;
pub trait Any: 'static {
/// Gets the `TypeId` of `self`.
///
+ /// If called on a `dyn Any` trait object
+ /// (or a trait object of a subtrait of `Any`),
+ /// this returns the `TypeId` of the underlying
+ /// concrete type, not that of `dyn Any` itself.
+ ///
/// # Examples
///
/// ```
@@ -690,44 +695,41 @@ pub const fn type_name<T: ?Sized>() -> &'static str {
intrinsics::type_name::<T>()
}
-/// Returns the name of the type of the pointed-to value as a string slice.
+/// Returns the type name of the pointed-to value as a string slice.
+///
/// This is the same as `type_name::<T>()`, but can be used where the type of a
/// variable is not easily available.
///
/// # Note
///
-/// This is intended for diagnostic use. The exact contents and format of the
-/// string are not specified, other than being a best-effort description of the
-/// type. For example, `type_name_of_val::<Option<String>>(None)` could return
-/// `"Option<String>"` or `"std::option::Option<std::string::String>"`, but not
-/// `"foobar"`. In addition, the output may change between versions of the
-/// compiler.
+/// Like [`type_name`], this is intended for diagnostic use and the exact output is not
+/// guaranteed. It provides a best-effort description, but the output may change between
+/// versions of the compiler.
///
-/// This function does not resolve trait objects,
-/// meaning that `type_name_of_val(&7u32 as &dyn Debug)`
-/// may return `"dyn Debug"`, but not `"u32"`.
+/// In short: use this for debugging, avoid using the output to affect program behavior. More
+/// information is available at [`type_name`].
///
-/// The type name should not be considered a unique identifier of a type;
-/// multiple types may share the same type name.
-///
-/// The current implementation uses the same infrastructure as compiler
-/// diagnostics and debuginfo, but this is not guaranteed.
+/// Additionally, this function does not resolve trait objects. This means that
+/// `type_name_of_val(&7u32 as &dyn Debug)` may return `"dyn Debug"`, but will not return `"u32"`
+/// at this time.
///
/// # Examples
///
/// Prints the default integer and float types.
///
/// ```rust
-/// #![feature(type_name_of_val)]
/// use std::any::type_name_of_val;
///
-/// let x = 1;
-/// println!("{}", type_name_of_val(&x));
-/// let y = 1.0;
-/// println!("{}", type_name_of_val(&y));
+/// let s = "foo";
+/// let x: i32 = 1;
+/// let y: f32 = 1.0;
+///
+/// assert!(type_name_of_val(&s).contains("str"));
+/// assert!(type_name_of_val(&x).contains("i32"));
+/// assert!(type_name_of_val(&y).contains("f32"));
/// ```
#[must_use]
-#[unstable(feature = "type_name_of_val", issue = "66359")]
+#[stable(feature = "type_name_of_val", since = "1.76.0")]
#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
pub const fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
type_name::<T>()
diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs
index 321357a15..2b22488b8 100644
--- a/library/core/src/array/iter.rs
+++ b/library/core/src/array/iter.rs
@@ -4,7 +4,7 @@ use crate::num::NonZeroUsize;
use crate::{
fmt,
intrinsics::transmute_unchecked,
- iter::{self, ExactSizeIterator, FusedIterator, TrustedLen, TrustedRandomAccessNoCoerce},
+ iter::{self, FusedIterator, TrustedLen, TrustedRandomAccessNoCoerce},
mem::MaybeUninit,
ops::{IndexRange, Range},
ptr,
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index ebd4a8c05..34213637a 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -206,7 +206,7 @@ where
#[inline]
fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
- <&Self>::try_from(slice).map(|r| *r)
+ <&Self>::try_from(slice).copied()
}
}
@@ -297,7 +297,7 @@ impl<'a, T, const N: usize> TryFrom<&'a mut [T]> for &'a mut [T; N] {
/// ```
/// use std::hash::BuildHasher;
///
-/// let b = std::collections::hash_map::RandomState::new();
+/// let b = std::hash::RandomState::new();
/// let a: [u8; 3] = [0xa8, 0x3c, 0x09];
/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
/// assert_eq!(b.hash_one(a), b.hash_one(s));
diff --git a/library/core/src/async_iter/async_iter.rs b/library/core/src/async_iter/async_iter.rs
index 12a47f9fc..8a45bd36f 100644
--- a/library/core/src/async_iter/async_iter.rs
+++ b/library/core/src/async_iter/async_iter.rs
@@ -13,6 +13,7 @@ use crate::task::{Context, Poll};
#[unstable(feature = "async_iterator", issue = "79024")]
#[must_use = "async iterators do nothing unless polled"]
#[doc(alias = "Stream")]
+#[cfg_attr(not(bootstrap), lang = "async_iterator")]
pub trait AsyncIterator {
/// The type of items yielded by the async iterator.
type Item;
@@ -109,3 +110,27 @@ where
(**self).size_hint()
}
}
+
+#[unstable(feature = "async_gen_internals", issue = "none")]
+impl<T> Poll<Option<T>> {
+ /// A helper function for internal desugaring -- produces `Ready(Some(t))`,
+ /// which corresponds to the async iterator yielding a value.
+ #[unstable(feature = "async_gen_internals", issue = "none")]
+ #[cfg_attr(not(bootstrap), lang = "AsyncGenReady")]
+ pub fn async_gen_ready(t: T) -> Self {
+ Poll::Ready(Some(t))
+ }
+
+ /// A helper constant for internal desugaring -- produces `Pending`,
+ /// which corresponds to the async iterator pending on an `.await`.
+ #[unstable(feature = "async_gen_internals", issue = "none")]
+ #[cfg_attr(not(bootstrap), lang = "AsyncGenPending")]
+ // FIXME(gen_blocks): This probably could be deduplicated.
+ pub const PENDING: Self = Poll::Pending;
+
+ /// A helper constant for internal desugaring -- produces `Ready(None)`,
+ /// which corresponds to the async iterator finishing its iteration.
+ #[unstable(feature = "async_gen_internals", issue = "none")]
+ #[cfg_attr(not(bootstrap), lang = "AsyncGenFinished")]
+ pub const FINISHED: Self = Poll::Ready(None);
+}
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 0978b3c92..030040ba0 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -143,17 +143,17 @@
//!
//! ```
//! # #![allow(dead_code)]
-//! use std::cell::RefCell;
+//! use std::cell::OnceCell;
//!
//! struct Graph {
//! edges: Vec<(i32, i32)>,
-//! span_tree_cache: RefCell<Option<Vec<(i32, i32)>>>
+//! span_tree_cache: OnceCell<Vec<(i32, i32)>>
//! }
//!
//! impl Graph {
//! fn minimum_spanning_tree(&self) -> Vec<(i32, i32)> {
-//! self.span_tree_cache.borrow_mut()
-//! .get_or_insert_with(|| self.calc_span_tree())
+//! self.span_tree_cache
+//! .get_or_init(|| self.calc_span_tree())
//! .clone()
//! }
//!
@@ -409,8 +409,7 @@ impl<T> Cell<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn set(&self, val: T) {
- let old = self.replace(val);
- drop(old);
+ self.replace(val);
}
/// Swaps the values of two `Cell`s.
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index 7ce33bdd4..a93b94867 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -1,6 +1,5 @@
//! impl char {}
-use crate::ascii;
use crate::slice;
use crate::str::from_utf8_unchecked_mut;
use crate::unicode::printable::is_printable;
diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs
index d7ca9c22d..ba86334f9 100644
--- a/library/core/src/clone.rs
+++ b/library/core/src/clone.rs
@@ -210,8 +210,6 @@ pub struct AssertParamIsCopy<T: Copy + ?Sized> {
/// are implemented in `traits::SelectionContext::copy_clone_conditions()`
/// in `rustc_trait_selection`.
mod impls {
- use super::Clone;
-
macro_rules! impl_clone {
($($t:ty)*) => {
$(
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index fadf2fcc9..d7c41ac5c 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -224,11 +224,13 @@ use self::Ordering::*;
append_const_msg
)]
#[rustc_diagnostic_item = "PartialEq"]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait PartialEq<Rhs: ?Sized = Self> {
/// This method tests for `self` and `other` values to be equal, and is used
/// by `==`.
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialeq_eq")]
fn eq(&self, other: &Rhs) -> bool;
/// This method tests for `!=`. The default implementation is almost always
@@ -236,6 +238,7 @@ pub trait PartialEq<Rhs: ?Sized = Self> {
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialeq_ne")]
fn ne(&self, other: &Rhs) -> bool {
!self.eq(other)
}
@@ -1414,12 +1417,23 @@ mod impls {
macro_rules! partial_eq_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(bootstrap)]
impl PartialEq for $t {
#[inline]
fn eq(&self, other: &$t) -> bool { (*self) == (*other) }
#[inline]
fn ne(&self, other: &$t) -> bool { (*self) != (*other) }
}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ #[cfg(not(bootstrap))]
+ impl const PartialEq for $t {
+ #[inline]
+ fn eq(&self, other: &$t) -> bool { (*self) == (*other) }
+ #[inline]
+ fn ne(&self, other: &$t) -> bool { (*self) != (*other) }
+ }
)*)
}
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
index b048b5135..08dc8f48d 100644
--- a/library/core/src/convert/num.rs
+++ b/library/core/src/convert/num.rs
@@ -1,4 +1,4 @@
-use super::{From, TryFrom};
+use super::TryFrom;
use crate::num::TryFromIntError;
mod private {
diff --git a/library/core/src/escape.rs b/library/core/src/escape.rs
index 24bb9ad1a..60b5df752 100644
--- a/library/core/src/escape.rs
+++ b/library/core/src/escape.rs
@@ -21,12 +21,16 @@ pub(crate) fn escape_ascii_into(output: &mut [ascii::Char; 4], byte: u8) -> Rang
b'\\' => backslash(ascii::Char::ReverseSolidus),
b'\'' => backslash(ascii::Char::Apostrophe),
b'\"' => backslash(ascii::Char::QuotationMark),
- _ => if let Some(a) = byte.as_ascii() && !byte.is_ascii_control() {
- ([a, ascii::Char::Null, ascii::Char::Null, ascii::Char::Null], 1)
- } else {
- let hi = HEX_DIGITS[usize::from(byte >> 4)];
- let lo = HEX_DIGITS[usize::from(byte & 0xf)];
- ([ascii::Char::ReverseSolidus, ascii::Char::SmallX, hi, lo], 4)
+ _ => {
+ if let Some(a) = byte.as_ascii()
+ && !byte.is_ascii_control()
+ {
+ ([a, ascii::Char::Null, ascii::Char::Null, ascii::Char::Null], 1)
+ } else {
+ let hi = HEX_DIGITS[usize::from(byte >> 4)];
+ let lo = HEX_DIGITS[usize::from(byte & 0xf)];
+ ([ascii::Char::ReverseSolidus, ascii::Char::SmallX, hi, lo], 4)
+ }
}
};
*output = data;
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index e7ec1fb73..bb839a71e 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -205,7 +205,7 @@ impl CStr {
/// * The memory pointed to by `ptr` must contain a valid nul terminator at the
/// end of the string.
///
- /// * `ptr` must be [valid] for reads of bytes up to and including the null terminator.
+ /// * `ptr` must be [valid] for reads of bytes up to and including the nul terminator.
/// This means in particular:
///
/// * The entire memory range of this `CStr` must be contained within a single allocated object!
@@ -415,7 +415,7 @@ impl CStr {
let mut i = bytes.len().saturating_sub(1);
assert!(!bytes.is_empty() && bytes[i] == 0, "input was not nul-terminated");
- // Ending null byte exists, skip to the rest.
+ // Ending nul byte exists, skip to the rest.
while i != 0 {
i -= 1;
let byte = bytes[i];
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
index 6908c824f..7340ad90d 100644
--- a/library/core/src/ffi/mod.rs
+++ b/library/core/src/ffi/mod.rs
@@ -241,7 +241,6 @@ impl fmt::Debug for c_void {
),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
- target_arch = "asmjs",
target_os = "uefi",
windows,
))]
@@ -270,7 +269,6 @@ pub struct VaListImpl<'f> {
),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
- target_arch = "asmjs",
target_os = "uefi",
windows,
))]
@@ -395,7 +393,6 @@ pub struct VaList<'a, 'f: 'a> {
any(target_os = "macos", target_os = "ios", target_os = "tvos")
),
target_family = "wasm",
- target_arch = "asmjs",
target_os = "uefi",
windows,
))]
@@ -413,7 +410,6 @@ pub struct VaList<'a, 'f: 'a> {
not(any(target_os = "macos", target_os = "ios", target_os = "tvos"))
),
not(target_family = "wasm"),
- not(target_arch = "asmjs"),
not(target_os = "uefi"),
not(windows),
))]
@@ -431,7 +427,6 @@ pub struct VaList<'a, 'f: 'a> {
),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
- target_arch = "asmjs",
target_os = "uefi",
windows,
))]
@@ -461,7 +456,6 @@ impl<'f> VaListImpl<'f> {
not(any(target_os = "macos", target_os = "ios", target_os = "tvos"))
),
not(target_family = "wasm"),
- not(target_arch = "asmjs"),
not(target_os = "uefi"),
not(windows),
))]
diff --git a/library/core/src/fmt/num.rs b/library/core/src/fmt/num.rs
index 4f42f73eb..ab2158394 100644
--- a/library/core/src/fmt/num.rs
+++ b/library/core/src/fmt/num.rs
@@ -15,7 +15,7 @@ trait DisplayInt:
fn zero() -> Self;
fn from_u8(u: u8) -> Self;
fn to_u8(&self) -> u8;
- fn to_u16(&self) -> u16;
+ #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))]
fn to_u32(&self) -> u32;
fn to_u64(&self) -> u64;
fn to_u128(&self) -> u128;
@@ -27,7 +27,7 @@ macro_rules! impl_int {
fn zero() -> Self { 0 }
fn from_u8(u: u8) -> Self { u as Self }
fn to_u8(&self) -> u8 { *self as u8 }
- fn to_u16(&self) -> u16 { *self as u16 }
+ #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))]
fn to_u32(&self) -> u32 { *self as u32 }
fn to_u64(&self) -> u64 { *self as u64 }
fn to_u128(&self) -> u128 { *self as u128 }
@@ -40,7 +40,7 @@ macro_rules! impl_uint {
fn zero() -> Self { 0 }
fn from_u8(u: u8) -> Self { u as Self }
fn to_u8(&self) -> u8 { *self as u8 }
- fn to_u16(&self) -> u16 { *self as u16 }
+ #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))]
fn to_u32(&self) -> u32 { *self as u32 }
fn to_u64(&self) -> u64 { *self as u64 }
fn to_u128(&self) -> u128 { *self as u128 }
@@ -309,7 +309,6 @@ macro_rules! impl_Exp {
n /= 10;
exponent += 1;
}
-
let (added_precision, subtracted_precision) = match f.precision() {
Some(fmt_prec) => {
// number of decimal digits minus 1
@@ -331,9 +330,15 @@ macro_rules! impl_Exp {
let rem = n % 10;
n /= 10;
exponent += 1;
- // round up last digit
- if rem >= 5 {
+ // round up last digit, round to even on a tie
+ if rem > 5 || (rem == 5 && (n % 2 != 0 || subtracted_precision > 1 )) {
n += 1;
+ // if the digit is rounded to the next power
+ // instead adjust the exponent
+ if n.ilog10() > (n - 1).ilog10() {
+ n /= 10;
+ exponent += 1;
+ }
}
}
(n, exponent, exponent, added_precision)
diff --git a/library/core/src/future/future.rs b/library/core/src/future/future.rs
index 8c7111cb3..71b9464ef 100644
--- a/library/core/src/future/future.rs
+++ b/library/core/src/future/future.rs
@@ -1,6 +1,5 @@
#![stable(feature = "futures_api", since = "1.36.0")]
-use crate::marker::Unpin;
use crate::ops;
use crate::pin::Pin;
use crate::task::{Context, Poll};
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index 35b757dc1..153971a59 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -12,8 +12,7 @@
//! # Examples
//!
//! ```rust
-//! use std::collections::hash_map::DefaultHasher;
-//! use std::hash::{Hash, Hasher};
+//! use std::hash::{DefaultHasher, Hash, Hasher};
//!
//! #[derive(Hash)]
//! struct Person {
@@ -46,8 +45,7 @@
//! the [`Hash`] trait:
//!
//! ```rust
-//! use std::collections::hash_map::DefaultHasher;
-//! use std::hash::{Hash, Hasher};
+//! use std::hash::{DefaultHasher, Hash, Hasher};
//!
//! struct Person {
//! id: u32,
@@ -194,8 +192,7 @@ pub trait Hash {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::DefaultHasher;
- /// use std::hash::{Hash, Hasher};
+ /// use std::hash::{DefaultHasher, Hash, Hasher};
///
/// let mut hasher = DefaultHasher::new();
/// 7920.hash(&mut hasher);
@@ -224,8 +221,7 @@ pub trait Hash {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::DefaultHasher;
- /// use std::hash::{Hash, Hasher};
+ /// use std::hash::{DefaultHasher, Hash, Hasher};
///
/// let mut hasher = DefaultHasher::new();
/// let numbers = [6, 28, 496, 8128];
@@ -300,8 +296,7 @@ pub use macros::Hash;
/// # Examples
///
/// ```
-/// use std::collections::hash_map::DefaultHasher;
-/// use std::hash::Hasher;
+/// use std::hash::{DefaultHasher, Hasher};
///
/// let mut hasher = DefaultHasher::new();
///
@@ -329,8 +324,7 @@ pub trait Hasher {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::DefaultHasher;
- /// use std::hash::Hasher;
+ /// use std::hash::{DefaultHasher, Hasher};
///
/// let mut hasher = DefaultHasher::new();
/// hasher.write(b"Cool!");
@@ -347,8 +341,7 @@ pub trait Hasher {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::DefaultHasher;
- /// use std::hash::Hasher;
+ /// use std::hash::{DefaultHasher, Hasher};
///
/// let mut hasher = DefaultHasher::new();
/// let data = [0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
@@ -627,8 +620,7 @@ impl<H: Hasher + ?Sized> Hasher for &mut H {
/// # Examples
///
/// ```
-/// use std::collections::hash_map::RandomState;
-/// use std::hash::{BuildHasher, Hasher};
+/// use std::hash::{BuildHasher, Hasher, RandomState};
///
/// let s = RandomState::new();
/// let mut hasher_1 = s.build_hasher();
@@ -656,8 +648,7 @@ pub trait BuildHasher {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::RandomState;
- /// use std::hash::BuildHasher;
+ /// use std::hash::{BuildHasher, RandomState};
///
/// let s = RandomState::new();
/// let new_s = s.build_hasher();
@@ -690,7 +681,7 @@ pub trait BuildHasher {
/// }
///
/// // Then later, in a `#[test]` for the type...
- /// let bh = std::collections::hash_map::RandomState::new();
+ /// let bh = std::hash::RandomState::new();
/// assert_eq!(
/// bh.hash_one(OrderAmbivalentPair(1, 2)),
/// bh.hash_one(OrderAmbivalentPair(2, 1))
diff --git a/library/core/src/hash/sip.rs b/library/core/src/hash/sip.rs
index 6b9f2e842..78a232faa 100644
--- a/library/core/src/hash/sip.rs
+++ b/library/core/src/hash/sip.rs
@@ -14,7 +14,7 @@ use crate::ptr;
///
/// See: <https://131002.net/siphash>
#[unstable(feature = "hashmap_internals", issue = "none")]
-#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
#[doc(hidden)]
pub struct SipHasher13 {
@@ -25,7 +25,7 @@ pub struct SipHasher13 {
///
/// See: <https://131002.net/siphash/>
#[unstable(feature = "hashmap_internals", issue = "none")]
-#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
struct SipHasher24 {
hasher: Hasher<Sip24Rounds>,
@@ -44,7 +44,7 @@ struct SipHasher24 {
/// it is not intended for cryptographic purposes. As such, all
/// cryptographic uses of this implementation are _strongly discouraged_.
#[stable(feature = "rust1", since = "1.0.0")]
-#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
pub struct SipHasher(SipHasher24);
@@ -147,10 +147,7 @@ impl SipHasher {
/// Creates a new `SipHasher` with the two initial keys set to 0.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(
- since = "1.13.0",
- note = "use `std::collections::hash_map::DefaultHasher` instead"
- )]
+ #[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
pub const fn new() -> SipHasher {
@@ -160,10 +157,7 @@ impl SipHasher {
/// Creates a `SipHasher` that is keyed off the provided keys.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(
- since = "1.13.0",
- note = "use `std::collections::hash_map::DefaultHasher` instead"
- )]
+ #[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
pub const fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
@@ -175,10 +169,7 @@ impl SipHasher13 {
/// Creates a new `SipHasher13` with the two initial keys set to 0.
#[inline]
#[unstable(feature = "hashmap_internals", issue = "none")]
- #[deprecated(
- since = "1.13.0",
- note = "use `std::collections::hash_map::DefaultHasher` instead"
- )]
+ #[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
pub const fn new() -> SipHasher13 {
SipHasher13::new_with_keys(0, 0)
@@ -187,10 +178,7 @@ impl SipHasher13 {
/// Creates a `SipHasher13` that is keyed off the provided keys.
#[inline]
#[unstable(feature = "hashmap_internals", issue = "none")]
- #[deprecated(
- since = "1.13.0",
- note = "use `std::collections::hash_map::DefaultHasher` instead"
- )]
+ #[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
pub const fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
SipHasher13 { hasher: Hasher::new_with_keys(key0, key1) }
diff --git a/library/core/src/internal_macros.rs b/library/core/src/internal_macros.rs
index 5774107f5..bf53b2245 100644
--- a/library/core/src/internal_macros.rs
+++ b/library/core/src/internal_macros.rs
@@ -31,6 +31,7 @@ macro_rules! forward_ref_binop {
type Output = <$t as $imp<$u>>::Output;
#[inline]
+ #[track_caller]
fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
@@ -41,6 +42,7 @@ macro_rules! forward_ref_binop {
type Output = <$t as $imp<$u>>::Output;
#[inline]
+ #[track_caller]
fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
@@ -51,6 +53,7 @@ macro_rules! forward_ref_binop {
type Output = <$t as $imp<$u>>::Output;
#[inline]
+ #[track_caller]
fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
@@ -69,6 +72,7 @@ macro_rules! forward_ref_op_assign {
#[$attr]
impl $imp<&$u> for $t {
#[inline]
+ #[track_caller]
fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index c5aef67b5..5107ba1a9 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -59,6 +59,7 @@ use crate::marker::Tuple;
use crate::mem;
pub mod mir;
+pub mod simd;
// These imports are used for simplifying intra-doc links
#[allow(unused_imports)]
@@ -341,6 +342,9 @@ extern "rust-intrinsic" {
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
#[rustc_nounwind]
pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
+ /// Do NOT use this intrinsic; "unordered" operations do not exist in our memory model!
+ /// In terms of the Rust Abstract Machine, this operation is equivalent to `src.read()`,
+ /// i.e., it performs a non-atomic read.
#[rustc_nounwind]
pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
@@ -365,6 +369,9 @@ extern "rust-intrinsic" {
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::store`].
#[rustc_nounwind]
pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
+ /// Do NOT use this intrinsic; "unordered" operations do not exist in our memory model!
+ /// In terms of the Rust Abstract Machine, this operation is equivalent to `dst.write(val)`,
+ /// i.e., it performs a non-atomic write.
#[rustc_nounwind]
pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
@@ -1900,6 +1907,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::ctlz;
///
@@ -1912,6 +1920,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::ctlz;
///
@@ -1933,6 +1942,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::ctlz_nonzero;
///
@@ -1959,6 +1969,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::cttz;
///
@@ -1971,6 +1982,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::cttz;
///
@@ -1992,6 +2004,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::cttz_nonzero;
///
@@ -2312,6 +2325,10 @@ extern "rust-intrinsic" {
/// Emits a `!nontemporal` store according to LLVM (see their docs).
/// Probably will never become stable.
+ ///
+ /// Do NOT use this intrinsic; "nontemporal" operations do not exist in our memory model!
+ /// It exists to support current stdarch, but the plan is to change stdarch and remove this intrinsic.
+ /// See <https://github.com/rust-lang/rust/issues/114582> for some more discussion.
#[rustc_nounwind]
pub fn nontemporal_store<T>(ptr: *mut T, val: T);
@@ -2453,6 +2470,7 @@ extern "rust-intrinsic" {
/// ```no_run
/// #![feature(const_eval_select)]
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
/// use std::hint::unreachable_unchecked;
/// use std::intrinsics::const_eval_select;
///
@@ -2487,12 +2505,6 @@ extern "rust-intrinsic" {
where
G: FnOnce<ARG, Output = RET>,
F: FnOnce<ARG, Output = RET>;
-
- /// This method creates a pointer to any `Some` value. If the argument is
- /// `None`, an invalid within-bounds pointer (that is still acceptable for
- /// constructing an empty slice) is returned.
- #[rustc_nounwind]
- pub fn option_payload_ptr<T>(arg: *const Option<T>) -> *const T;
}
// Some functions are defined here because they accidentally got made
@@ -2855,3 +2867,28 @@ pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
write_bytes(dst, val, count)
}
}
+
+/// Inform Miri that a given pointer definitely has a certain alignment.
+#[cfg(miri)]
+pub(crate) const fn miri_promise_symbolic_alignment(ptr: *const (), align: usize) {
+ extern "Rust" {
+ /// Miri-provided extern function to promise that a given pointer is properly aligned for
+ /// "symbolic" alignment checks. Will fail if the pointer is not actually aligned or `align` is
+ /// not a power of two. Has no effect when alignment checks are concrete (which is the default).
+ fn miri_promise_symbolic_alignment(ptr: *const (), align: usize);
+ }
+
+ fn runtime(ptr: *const (), align: usize) {
+ // SAFETY: this call is always safe.
+ unsafe {
+ miri_promise_symbolic_alignment(ptr, align);
+ }
+ }
+
+ const fn compiletime(_ptr: *const (), _align: usize) {}
+
+ // SAFETY: the extra behavior at runtime is for UB checks only.
+ unsafe {
+ const_eval_select((ptr, align), compiletime, runtime);
+ }
+}
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
index b26a17ec3..34a61e76f 100644
--- a/library/core/src/intrinsics/mir.rs
+++ b/library/core/src/intrinsics/mir.rs
@@ -110,15 +110,15 @@
//! let popped;
//!
//! {
-//! Call(_unused = Vec::push(v, value), pop)
+//! Call(_unused = Vec::push(v, value), pop, UnwindContinue())
//! }
//!
//! pop = {
-//! Call(popped = Vec::pop(v), drop)
+//! Call(popped = Vec::pop(v), drop, UnwindContinue())
//! }
//!
//! drop = {
-//! Drop(popped, ret)
+//! Drop(popped, ret, UnwindContinue())
//! }
//!
//! ret = {
@@ -193,7 +193,7 @@
//! 27 | | )
//! | |_____- binding declared here but left uninitialized
//!
-//! error: aborting due to previous error
+//! error: aborting due to 1 previous error
//!
//! For more information about this error, try `rustc --explain E0381`.
//! ```
@@ -238,10 +238,6 @@
//!
//! #### Terminators
//!
-//! Custom MIR does not currently support cleanup blocks or non-trivial unwind paths. As such, there
-//! are no resume and abort terminators, and terminators that might unwind do not have any way to
-//! indicate the unwind block.
-//!
//! - [`Goto`], [`Return`], [`Unreachable`] and [`Drop`](Drop()) have associated functions.
//! - `match some_int_operand` becomes a `SwitchInt`. Each arm should be `literal => basic_block`
//! - The exception is the last arm, which must be `_ => basic_block` and corresponds to the
@@ -260,7 +256,26 @@
/// Type representing basic blocks.
///
/// All terminators will have this type as a return type. It helps achieve some type safety.
-pub struct BasicBlock;
+#[rustc_diagnostic_item = "mir_basic_block"]
+pub enum BasicBlock {
+ /// A non-cleanup basic block.
+ Normal,
+ /// A basic block that lies on an unwind path.
+ Cleanup,
+}
+
+/// The reason we are terminating the process during unwinding.
+#[rustc_diagnostic_item = "mir_unwind_terminate_reason"]
+pub enum UnwindTerminateReason {
+ /// Unwinding is just not possible given the ABI of this function.
+ Abi,
+ /// We were already cleaning up for an ongoing unwind, and a *second*, *nested* unwind was
+ /// triggered by the drop glue.
+ InCleanup,
+}
+
+pub use UnwindTerminateReason::Abi as ReasonAbi;
+pub use UnwindTerminateReason::InCleanup as ReasonInCleanup;
macro_rules! define {
($name:literal, $( #[ $meta:meta ] )* fn $($sig:tt)*) => {
@@ -271,11 +286,41 @@ macro_rules! define {
}
}
+// Unwind actions
+define!(
+ "mir_unwind_continue",
+ /// An unwind action that continues unwinding.
+ fn UnwindContinue()
+);
+define!(
+ "mir_unwind_unreachable",
+ /// An unwind action that triggers undefined behaviour.
+ fn UnwindUnreachable() -> BasicBlock
+);
+define!(
+ "mir_unwind_terminate",
+ /// An unwind action that terminates the execution.
+ ///
+ /// `UnwindTerminate` can also be used as a terminator.
+ fn UnwindTerminate(reason: UnwindTerminateReason)
+);
+define!(
+ "mir_unwind_cleanup",
+ /// An unwind action that continues execution in a given basic blok.
+ fn UnwindCleanup(goto: BasicBlock)
+);
+
+// Terminators
define!("mir_return", fn Return() -> BasicBlock);
define!("mir_goto", fn Goto(destination: BasicBlock) -> BasicBlock);
define!("mir_unreachable", fn Unreachable() -> BasicBlock);
-define!("mir_drop", fn Drop<T>(place: T, goto: BasicBlock));
-define!("mir_call", fn Call(call: (), goto: BasicBlock));
+define!("mir_drop", fn Drop<T, U>(place: T, goto: BasicBlock, unwind_action: U));
+define!("mir_call", fn Call<U>(call: (), goto: BasicBlock, unwind_action: U));
+define!("mir_unwind_resume",
+ /// A terminator that resumes the unwinding.
+ fn UnwindResume()
+);
+
define!("mir_storage_live", fn StorageLive<T>(local: T));
define!("mir_storage_dead", fn StorageDead<T>(local: T));
define!("mir_deinit", fn Deinit<T>(place: T));
@@ -382,16 +427,15 @@ pub macro mir {
}
$(
- $block_name:ident = {
+ $block_name:ident $(($block_cleanup:ident))? = {
$($block:tt)*
}
)*
) => {{
// First, we declare all basic blocks.
- $(
- let $block_name: ::core::intrinsics::mir::BasicBlock;
- )*
-
+ __internal_declare_basic_blocks!($(
+ $block_name $(($block_cleanup))?
+ )*);
{
// Now all locals
#[allow(non_snake_case)]
@@ -585,3 +629,17 @@ pub macro __internal_remove_let {
}
},
}
+
+/// Helper macro that declares the basic blocks.
+#[doc(hidden)]
+pub macro __internal_declare_basic_blocks {
+ () => {},
+ ($name:ident (cleanup) $($rest:tt)*) => {
+ let $name = ::core::intrinsics::mir::BasicBlock::Cleanup;
+ __internal_declare_basic_blocks!($($rest)*)
+ },
+ ($name:ident $($rest:tt)*) => {
+ let $name = ::core::intrinsics::mir::BasicBlock::Normal;
+ __internal_declare_basic_blocks!($($rest)*)
+ },
+}
diff --git a/library/core/src/intrinsics/simd.rs b/library/core/src/intrinsics/simd.rs
new file mode 100644
index 000000000..68c8a335b
--- /dev/null
+++ b/library/core/src/intrinsics/simd.rs
@@ -0,0 +1,473 @@
+//! SIMD compiler intrinsics.
+//!
+//! In this module, a "vector" is any `repr(simd)` type.
+
+extern "platform-intrinsic" {
+ /// Add two simd vectors elementwise.
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ pub fn simd_add<T>(x: T, y: T) -> T;
+
+ /// Subtract `rhs` from `lhs` elementwise.
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ pub fn simd_sub<T>(lhs: T, rhs: T) -> T;
+
+ /// Multiply two simd vectors elementwise.
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ pub fn simd_mul<T>(x: T, y: T) -> T;
+
+ /// Divide `lhs` by `rhs` elementwise.
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ ///
+ /// # Safety
+ /// For integers, `rhs` must not contain any zero elements.
+ /// Additionally for signed integers, `<int>::MIN / -1` is undefined behavior.
+ pub fn simd_div<T>(lhs: T, rhs: T) -> T;
+
+ /// Remainder of two vectors elementwise
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ ///
+ /// # Safety
+ /// For integers, `rhs` must not contain any zero elements.
+ /// Additionally for signed integers, `<int>::MIN / -1` is undefined behavior.
+ pub fn simd_rem<T>(lhs: T, rhs: T) -> T;
+
+ /// Elementwise vector left shift, with UB on overflow.
+ ///
+ /// Shift `lhs` left by `rhs`, shifting in sign bits for signed types.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// # Safety
+ ///
+ /// Each element of `rhs` must be less than `<int>::BITS`.
+ pub fn simd_shl<T>(lhs: T, rhs: T) -> T;
+
+ /// Elementwise vector right shift, with UB on overflow.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// Shift `lhs` right by `rhs`, shifting in sign bits for signed types.
+ ///
+ /// # Safety
+ ///
+ /// Each element of `rhs` must be less than `<int>::BITS`.
+ pub fn simd_shr<T>(lhs: T, rhs: T) -> T;
+
+ /// Elementwise vector "and".
+ ///
+ /// `T` must be a vector of integer primitive types.
+ pub fn simd_and<T>(x: T, y: T) -> T;
+
+ /// Elementwise vector "or".
+ ///
+ /// `T` must be a vector of integer primitive types.
+ pub fn simd_or<T>(x: T, y: T) -> T;
+
+ /// Elementwise vector "exclusive or".
+ ///
+ /// `T` must be a vector of integer primitive types.
+ pub fn simd_xor<T>(x: T, y: T) -> T;
+
+ /// Numerically cast a vector, elementwise.
+ ///
+ /// `T` and `U` must be vectors of integer or floating point primitive types, and must have the
+ /// same length.
+ ///
+ /// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB.
+ /// When casting integers to floats, the result is rounded.
+ /// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
+ ///
+ /// # Safety
+ /// Casting from integer types is always safe.
+ /// Casting between two float types is also always safe.
+ ///
+ /// Casting floats to integers truncates, following the same rules as `to_int_unchecked`.
+ /// Specifically, each element must:
+ /// * Not be `NaN`
+ /// * Not be infinite
+ /// * Be representable in the return type, after truncating off its fractional part
+ pub fn simd_cast<T, U>(x: T) -> U;
+
+ /// Numerically cast a vector, elementwise.
+ ///
+ /// `T` and `U` be a vectors of integer or floating point primitive types, and must have the
+ /// same length.
+ ///
+ /// Like `simd_cast`, but saturates float-to-integer conversions (NaN becomes 0).
+ /// This matches regular `as` and is always safe.
+ ///
+ /// When casting floats to integers, the result is truncated.
+ /// When casting integers to floats, the result is rounded.
+ /// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
+ pub fn simd_as<T, U>(x: T) -> U;
+
+ /// Elementwise negation of a vector.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// Rust panics for `-<int>::Min` due to overflow, but it is not UB with this intrinsic.
+ pub fn simd_neg<T>(x: T) -> T;
+
+ /// Elementwise absolute value of a vector.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ pub fn simd_fabs<T>(x: T) -> T;
+
+ /// Elementwise minimum of a vector.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// Follows IEEE-754 `minNum` semantics.
+ pub fn simd_fmin<T>(x: T, y: T) -> T;
+
+ /// Elementwise maximum of a vector.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// Follows IEEE-754 `maxNum` semantics.
+ pub fn simd_fmax<T>(x: T, y: T) -> T;
+
+ /// Tests elementwise equality of two vectors.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_eq<T, U>(x: T, y: T) -> U;
+
+ /// Tests elementwise inequality equality of two vectors.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_ne<T, U>(x: T, y: T) -> U;
+
+ /// Tests if `x` is less than `y`, elementwise.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_lt<T, U>(x: T, y: T) -> U;
+
+ /// Tests if `x` is less than or equal to `y`, elementwise.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_le<T, U>(x: T, y: T) -> U;
+
+ /// Tests if `x` is greater than `y`, elementwise.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_gt<T, U>(x: T, y: T) -> U;
+
+ /// Tests if `x` is greater than or equal to `y`, elementwise.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_ge<T, U>(x: T, y: T) -> U;
+
+ /// Shuffle two vectors by const indices.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a const array of `i32`s.
+ ///
+ /// `V` must be a vector with the same element type as `T` and the same length as `U`.
+ ///
+ /// Concatenates `x` and `y`, then returns a new vector such that each element is selected from
+ /// the concatenation by the matching index in `idx`.
+ pub fn simd_shuffle<T, U, V>(x: T, y: T, idx: U) -> V;
+
+ /// Read a vector of pointers.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a vector of pointers to the element type of `T`, with the same length as `T`.
+ ///
+ /// `V` must be a vector of integers with the same length as `T` (but any element size).
+ ///
+ /// `idx` must be a constant: either naming a constant item, or an inline
+ /// `const {}` expression.
+ ///
+ /// For each pointer in `ptr`, if the corresponding value in `mask` is `!0`, read the pointer.
+ /// Otherwise if the corresponding value in `mask` is `0`, return the corresponding value from
+ /// `val`.
+ ///
+ /// # Safety
+ /// Unmasked values in `T` must be readable as if by `<ptr>::read` (e.g. aligned to the element
+ /// type).
+ ///
+ /// `mask` must only contain `0` or `!0` values.
+ pub fn simd_gather<T, U, V>(val: T, ptr: U, mask: V) -> T;
+
+ /// Write to a vector of pointers.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a vector of pointers to the element type of `T`, with the same length as `T`.
+ ///
+ /// `V` must be a vector of integers with the same length as `T` (but any element size).
+ ///
+ /// For each pointer in `ptr`, if the corresponding value in `mask` is `!0`, write the
+ /// corresponding value in `val` to the pointer.
+ /// Otherwise if the corresponding value in `mask` is `0`, do nothing.
+ ///
+ /// # Safety
+ /// Unmasked values in `T` must be writeable as if by `<ptr>::write` (e.g. aligned to the element
+ /// type).
+ ///
+ /// `mask` must only contain `0` or `!0` values.
+ pub fn simd_scatter<T, U, V>(val: T, ptr: U, mask: V);
+
+ /// Read a vector of pointers.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a vector of pointers to the element type of `T`, with the same length as `T`.
+ ///
+ /// `V` must be a vector of integers with the same length as `T` (but any element size).
+ ///
+ /// For each element, if the corresponding value in `mask` is `!0`, read the corresponding
+ /// pointer from `ptr`.
+ /// Otherwise if the corresponding value in `mask` is `0`, return the corresponding value from
+ /// `val`.
+ ///
+ /// # Safety
+ /// Unmasked values in `T` must be readable as if by `<ptr>::read` (e.g. aligned to the element
+ /// type).
+ ///
+ /// `mask` must only contain `0` or `!0` values.
+ #[cfg(not(bootstrap))]
+ pub fn simd_masked_load<V, U, T>(mask: V, ptr: U, val: T) -> T;
+
+ /// Write to a vector of pointers.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a vector of pointers to the element type of `T`, with the same length as `T`.
+ ///
+ /// `V` must be a vector of integers with the same length as `T` (but any element size).
+ ///
+ /// For each element, if the corresponding value in `mask` is `!0`, write the corresponding
+ /// value in `val` to the pointer.
+ /// Otherwise if the corresponding value in `mask` is `0`, do nothing.
+ ///
+ /// # Safety
+ /// Unmasked values in `T` must be writeable as if by `<ptr>::write` (e.g. aligned to the element
+ /// type).
+ ///
+ /// `mask` must only contain `0` or `!0` values.
+ #[cfg(not(bootstrap))]
+ pub fn simd_masked_store<V, U, T>(mask: V, ptr: U, val: T);
+
+ /// Add two simd vectors elementwise, with saturation.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ pub fn simd_saturating_add<T>(x: T, y: T) -> T;
+
+ /// Subtract two simd vectors elementwise, with saturation.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// Subtract `rhs` from `lhs`.
+ pub fn simd_saturating_sub<T>(lhs: T, rhs: T) -> T;
+
+ /// Add elements within a vector from left to right.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ ///
+ /// Starting with the value `y`, add the elements of `x` and accumulate.
+ pub fn simd_reduce_add_ordered<T, U>(x: T, y: U) -> U;
+
+ /// Multiply elements within a vector from left to right.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ ///
+ /// Starting with the value `y`, multiply the elements of `x` and accumulate.
+ pub fn simd_reduce_mul_ordered<T, U>(x: T, y: U) -> U;
+
+ /// Check if all mask values are true.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// # Safety
+ /// `x` must contain only `0` or `!0`.
+ pub fn simd_reduce_all<T>(x: T) -> bool;
+
+ /// Check if all mask values are true.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// # Safety
+ /// `x` must contain only `0` or `!0`.
+ pub fn simd_reduce_any<T>(x: T) -> bool;
+
+ /// Return the maximum element of a vector.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ ///
+ /// For floating-point values, uses IEEE-754 `maxNum`.
+ pub fn simd_reduce_max<T, U>(x: T) -> U;
+
+ /// Return the minimum element of a vector.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ ///
+ /// For floating-point values, uses IEEE-754 `minNum`.
+ pub fn simd_reduce_min<T, U>(x: T) -> U;
+
+ /// Logical "and" all elements together.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ pub fn simd_reduce_and<T, U>(x: T) -> U;
+
+ /// Logical "or" all elements together.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ pub fn simd_reduce_or<T, U>(x: T) -> U;
+
+ /// Logical "exclusive or" all elements together.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ pub fn simd_reduce_xor<T, U>(x: T) -> U;
+
+ /// Truncate an integer vector to a bitmask.
+ ///
+ /// `T` must be an integer vector.
+ ///
+ /// `U` must be either the smallest unsigned integer with at least as many bits as the length
+ /// of `T`, or the smallest array of `u8` with as many bits as the length of `T`.
+ ///
+ /// Each element is truncated to a single bit and packed into the result.
+ ///
+ /// No matter whether the output is an array or an unsigned integer, it is treated as a single
+ /// contiguous list of bits. The bitmask is always packed on the least-significant side of the
+ /// output, and padded with 0s in the most-significant bits. The order of the bits depends on
+ /// endianess:
+ ///
+ /// * On little endian, the least significant bit corresponds to the first vector element.
+ /// * On big endian, the least significant bit corresponds to the last vector element.
+ ///
+ /// For example, `[!0, 0, !0, !0]` packs to `0b1101` on little endian and `0b1011` on big
+ /// endian.
+ ///
+ /// To consider a larger example, `[!0, 0, 0, 0, 0, 0, 0, 0, !0, !0, 0, 0, 0, 0, !0, 0]` packs
+ /// to `[0b00000001, 0b01000011]` or `0b0100001100000001` on little endian, and `[0b10000000,
+ /// 0b11000010]` or `0b1000000011000010` on big endian.
+ ///
+ /// # Safety
+ /// `x` must contain only `0` and `!0`.
+ pub fn simd_bitmask<T, U>(x: T) -> U;
+
+ /// Select elements from a mask.
+ ///
+ /// `M` must be an integer vector.
+ ///
+ /// `T` must be a vector with the same number of elements as `M`.
+ ///
+ /// For each element, if the corresponding value in `mask` is `!0`, select the element from
+ /// `if_true`. If the corresponding value in `mask` is `0`, select the element from
+ /// `if_false`.
+ ///
+ /// # Safety
+ /// `mask` must only contain `0` and `!0`.
+ pub fn simd_select<M, T>(mask: M, if_true: T, if_false: T) -> T;
+
+ /// Select elements from a bitmask.
+ ///
+ /// `M` must be an unsigned integer or array of `u8`, matching `simd_bitmask`.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// For each element, if the bit in `mask` is `1`, select the element from
+ /// `if_true`. If the corresponding bit in `mask` is `0`, select the element from
+ /// `if_false`.
+ ///
+ /// The bitmask bit order matches `simd_bitmask`.
+ ///
+ /// # Safety
+ /// Padding bits must be all zero.
+ pub fn simd_select_bitmask<M, T>(m: M, yes: T, no: T) -> T;
+
+ /// Elementwise calculates the offset from a pointer vector, potentially wrapping.
+ ///
+ /// `T` must be a vector of pointers.
+ ///
+ /// `U` must be a vector of `isize` or `usize` with the same number of elements as `T`.
+ ///
+ /// Operates as if by `<ptr>::wrapping_offset`.
+ pub fn simd_arith_offset<T, U>(ptr: T, offset: U) -> T;
+
+ /// Cast a vector of pointers.
+ ///
+ /// `T` and `U` must be vectors of pointers with the same number of elements.
+ pub fn simd_cast_ptr<T, U>(ptr: T) -> U;
+
+ /// Expose a vector of pointers as a vector of addresses.
+ ///
+ /// `T` must be a vector of pointers.
+ ///
+ /// `U` must be a vector of `usize` with the same length as `T`.
+ pub fn simd_expose_addr<T, U>(ptr: T) -> U;
+
+ /// Create a vector of pointers from a vector of addresses.
+ ///
+ /// `T` must be a vector of `usize`.
+ ///
+ /// `U` must be a vector of pointers, with the same length as `T`.
+ pub fn simd_from_exposed_addr<T, U>(addr: T) -> U;
+
+ /// Swap bytes of each element.
+ ///
+ /// `T` must be a vector of integers.
+ pub fn simd_bswap<T>(x: T) -> T;
+
+ /// Reverse bits of each element.
+ ///
+ /// `T` must be a vector of integers.
+ pub fn simd_bitreverse<T>(x: T) -> T;
+
+ /// Count the leading zeros of each element.
+ ///
+ /// `T` must be a vector of integers.
+ pub fn simd_ctlz<T>(x: T) -> T;
+
+ /// Count the trailing zeros of each element.
+ ///
+ /// `T` must be a vector of integers.
+ pub fn simd_cttz<T>(x: T) -> T;
+}
diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs
index 13719c727..946d0051c 100644
--- a/library/core/src/iter/adapters/array_chunks.rs
+++ b/library/core/src/iter/adapters/array_chunks.rs
@@ -1,5 +1,9 @@
use crate::array;
-use crate::iter::{ByRefSized, FusedIterator, Iterator, TrustedRandomAccessNoCoerce};
+use crate::iter::adapters::SourceIter;
+use crate::iter::{
+ ByRefSized, FusedIterator, InPlaceIterable, TrustedFused, TrustedRandomAccessNoCoerce,
+};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, NeverShortCircuit, Try};
/// An iterator over `N` elements of the iterator at a time.
@@ -159,6 +163,9 @@ where
#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
impl<I, const N: usize> FusedIterator for ArrayChunks<I, N> where I: FusedIterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I, const N: usize> TrustedFused for ArrayChunks<I, N> where I: TrustedFused + Iterator {}
+
#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
impl<I, const N: usize> ExactSizeIterator for ArrayChunks<I, N>
where
@@ -229,3 +236,28 @@ where
accum
}
}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, const N: usize> SourceIter for ArrayChunks<I, N>
+where
+ I: SourceIter + Iterator,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable + Iterator, const N: usize> InPlaceIterable for ArrayChunks<I, N> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = const {
+ match (I::MERGE_BY, NonZeroUsize::new(N)) {
+ (Some(m), Some(n)) => m.checked_mul(n),
+ _ => None,
+ }
+ };
+}
diff --git a/library/core/src/iter/adapters/chain.rs b/library/core/src/iter/adapters/chain.rs
index 26aa959e6..c748336cd 100644
--- a/library/core/src/iter/adapters/chain.rs
+++ b/library/core/src/iter/adapters/chain.rs
@@ -1,4 +1,4 @@
-use crate::iter::{DoubleEndedIterator, FusedIterator, Iterator, TrustedLen};
+use crate::iter::{FusedIterator, TrustedLen};
use crate::num::NonZeroUsize;
use crate::ops::Try;
diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs
index 8f6b2904e..7a2c9d839 100644
--- a/library/core/src/iter/adapters/copied.rs
+++ b/library/core/src/iter/adapters/copied.rs
@@ -193,7 +193,7 @@ where
T: Copy,
{
default fn spec_next_chunk(&mut self) -> Result<[T; N], array::IntoIter<T, N>> {
- array::iter_next_chunk(&mut self.map(|e| *e))
+ array::iter_next_chunk(&mut self.copied())
}
}
diff --git a/library/core/src/iter/adapters/enumerate.rs b/library/core/src/iter/adapters/enumerate.rs
index 00c1c377b..92f465ccd 100644
--- a/library/core/src/iter/adapters/enumerate.rs
+++ b/library/core/src/iter/adapters/enumerate.rs
@@ -1,7 +1,7 @@
use crate::iter::adapters::{
zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
};
-use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen};
+use crate::iter::{FusedIterator, InPlaceIterable, TrustedFused, TrustedLen};
use crate::num::NonZeroUsize;
use crate::ops::Try;
@@ -243,6 +243,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<I> FusedIterator for Enumerate<I> where I: FusedIterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused> TrustedFused for Enumerate<I> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<I> TrustedLen for Enumerate<I> where I: TrustedLen {}
@@ -261,7 +264,10 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable> InPlaceIterable for Enumerate<I> {}
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Enumerate<I> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<I: Default> Default for Enumerate<I> {
diff --git a/library/core/src/iter/adapters/filter.rs b/library/core/src/iter/adapters/filter.rs
index 723657b9e..882f3e3bc 100644
--- a/library/core/src/iter/adapters/filter.rs
+++ b/library/core/src/iter/adapters/filter.rs
@@ -1,5 +1,6 @@
use crate::fmt;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
use core::array;
use core::mem::{ManuallyDrop, MaybeUninit};
@@ -189,6 +190,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator, P> FusedIterator for Filter<I, P> where P: FnMut(&I::Item) -> bool {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, F> TrustedFused for Filter<I, F> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<P, I> SourceIter for Filter<I, P>
where
@@ -204,4 +208,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable, P> InPlaceIterable for Filter<I, P> where P: FnMut(&I::Item) -> bool {}
+unsafe impl<I: InPlaceIterable, P> InPlaceIterable for Filter<I, P> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
diff --git a/library/core/src/iter/adapters/filter_map.rs b/library/core/src/iter/adapters/filter_map.rs
index 693479977..81ac0eaa6 100644
--- a/library/core/src/iter/adapters/filter_map.rs
+++ b/library/core/src/iter/adapters/filter_map.rs
@@ -1,5 +1,6 @@
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
use crate::mem::{ManuallyDrop, MaybeUninit};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
use crate::{array, fmt};
@@ -97,9 +98,11 @@ where
// SAFETY: Loop conditions ensure the index is in bounds.
unsafe {
- let opt_payload_at = core::intrinsics::option_payload_ptr(&val);
+ let opt_payload_at: *const MaybeUninit<B> = (&val as *const Option<B>)
+ .byte_add(core::mem::offset_of!(Option<B>, Some.0))
+ .cast();
let dst = guard.array.as_mut_ptr().add(idx);
- crate::ptr::copy_nonoverlapping(opt_payload_at.cast(), dst, 1);
+ crate::ptr::copy_nonoverlapping(opt_payload_at, dst, 1);
crate::mem::forget(val);
};
@@ -188,6 +191,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<B, I: FusedIterator, F> FusedIterator for FilterMap<I, F> where F: FnMut(I::Item) -> Option<B> {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, F> TrustedFused for FilterMap<I, F> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<I, F> SourceIter for FilterMap<I, F>
where
@@ -203,7 +209,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<B, I: InPlaceIterable, F> InPlaceIterable for FilterMap<I, F> where
- F: FnMut(I::Item) -> Option<B>
-{
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for FilterMap<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs
index eee6e5bcc..6122332da 100644
--- a/library/core/src/iter/adapters/flatten.rs
+++ b/library/core/src/iter/adapters/flatten.rs
@@ -1,7 +1,13 @@
-use crate::fmt;
-use crate::iter::{DoubleEndedIterator, Fuse, FusedIterator, Iterator, Map, TrustedLen};
+use crate::iter::adapters::SourceIter;
+use crate::iter::{
+ Cloned, Copied, Filter, FilterMap, Fuse, FusedIterator, InPlaceIterable, Map, TrustedFused,
+ TrustedLen,
+};
+use crate::iter::{Once, OnceWith};
use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
+use crate::result;
+use crate::{array, fmt, option};
/// An iterator that maps each element to an iterator, and yields the elements
/// of the produced iterators.
@@ -145,6 +151,91 @@ where
{
}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, U, F> InPlaceIterable for FlatMap<I, U, F>
+where
+ I: InPlaceIterable,
+ U: BoundedSize + IntoIterator,
+{
+ const EXPAND_BY: Option<NonZeroUsize> = const {
+ match (I::EXPAND_BY, U::UPPER_BOUND) {
+ (Some(m), Some(n)) => m.checked_mul(n),
+ _ => None,
+ }
+ };
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, U, F> SourceIter for FlatMap<I, U, F>
+where
+ I: SourceIter + TrustedFused,
+ U: IntoIterator,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.inner.iter) }
+ }
+}
+
+/// Marker trait for iterators/iterables which have a statically known upper
+/// bound of the number of items they can produce.
+///
+/// # Safety
+///
+/// Implementations must not yield more elements than indicated by UPPER_BOUND if it is `Some`.
+/// Used in specializations. Implementations must not be conditional on lifetimes or
+/// user-implementable traits.
+#[rustc_specialization_trait]
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe trait BoundedSize {
+ const UPPER_BOUND: Option<NonZeroUsize> = NonZeroUsize::new(1);
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for Option<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for option::IntoIter<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T, U> BoundedSize for Result<T, U> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for result::IntoIter<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for Once<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for OnceWith<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T, const N: usize> BoundedSize for [T; N] {
+ const UPPER_BOUND: Option<NonZeroUsize> = NonZeroUsize::new(N);
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T, const N: usize> BoundedSize for array::IntoIter<T, N> {
+ const UPPER_BOUND: Option<NonZeroUsize> = NonZeroUsize::new(N);
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize, P> BoundedSize for Filter<I, P> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize, P> BoundedSize for FilterMap<I, P> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize, F> BoundedSize for Map<I, F> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize> BoundedSize for Copied<I> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize> BoundedSize for Cloned<I> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+
/// An iterator that flattens one level of nesting in an iterator of things
/// that can be turned into iterators.
///
@@ -289,6 +380,36 @@ where
{
}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> InPlaceIterable for Flatten<I>
+where
+ I: InPlaceIterable + Iterator,
+ <I as Iterator>::Item: IntoIterator + BoundedSize,
+{
+ const EXPAND_BY: Option<NonZeroUsize> = const {
+ match (I::EXPAND_BY, I::Item::UPPER_BOUND) {
+ (Some(m), Some(n)) => m.checked_mul(n),
+ _ => None,
+ }
+ };
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> SourceIter for Flatten<I>
+where
+ I: SourceIter + TrustedFused + Iterator,
+ <I as Iterator>::Item: IntoIterator,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.inner.iter) }
+ }
+}
+
#[stable(feature = "default_iters", since = "1.70.0")]
impl<I> Default for Flatten<I>
where
diff --git a/library/core/src/iter/adapters/fuse.rs b/library/core/src/iter/adapters/fuse.rs
index b1fa4f921..462a7e877 100644
--- a/library/core/src/iter/adapters/fuse.rs
+++ b/library/core/src/iter/adapters/fuse.rs
@@ -1,8 +1,8 @@
use crate::intrinsics;
use crate::iter::adapters::zip::try_get_unchecked;
+use crate::iter::adapters::SourceIter;
use crate::iter::{
- DoubleEndedIterator, ExactSizeIterator, FusedIterator, TrustedLen, TrustedRandomAccess,
- TrustedRandomAccessNoCoerce,
+ FusedIterator, TrustedFused, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
};
use crate::ops::Try;
@@ -29,6 +29,9 @@ impl<I> Fuse<I> {
#[stable(feature = "fused", since = "1.26.0")]
impl<I> FusedIterator for Fuse<I> where I: Iterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I> TrustedFused for Fuse<I> where I: TrustedFused {}
+
// Any specialized implementation here is made internal
// to avoid exposing default fns outside this trait.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -418,6 +421,23 @@ where
}
}
+// This is used by Flatten's SourceIter impl
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> SourceIter for Fuse<I>
+where
+ I: SourceIter + TrustedFused,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements.
+ // TrustedFused guarantees that we'll never encounter a case where `self.iter` would
+ // be set to None.
+ unsafe { SourceIter::as_inner(self.iter.as_mut().unwrap_unchecked()) }
+ }
+}
+
#[inline]
fn and_then_or_clear<T, U>(opt: &mut Option<T>, f: impl FnOnce(&mut T) -> Option<U>) -> Option<U> {
let x = f(opt.as_mut()?);
diff --git a/library/core/src/iter/adapters/inspect.rs b/library/core/src/iter/adapters/inspect.rs
index 19839fdfe..fd2d830b6 100644
--- a/library/core/src/iter/adapters/inspect.rs
+++ b/library/core/src/iter/adapters/inspect.rs
@@ -1,5 +1,6 @@
use crate::fmt;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// An iterator that calls a function with a reference to each element before
@@ -148,6 +149,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator, F> FusedIterator for Inspect<I, F> where F: FnMut(&I::Item) {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, F> TrustedFused for Inspect<I, F> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<I, F> SourceIter for Inspect<I, F>
where
@@ -163,4 +167,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable, F> InPlaceIterable for Inspect<I, F> where F: FnMut(&I::Item) {}
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for Inspect<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs
index 31d02a4da..e27fc7257 100644
--- a/library/core/src/iter/adapters/map.rs
+++ b/library/core/src/iter/adapters/map.rs
@@ -2,7 +2,8 @@ use crate::fmt;
use crate::iter::adapters::{
zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
};
-use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen, UncheckedIterator};
+use crate::iter::{FusedIterator, InPlaceIterable, TrustedFused, TrustedLen, UncheckedIterator};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// An iterator that maps the values of `iter` with `f`.
@@ -179,6 +180,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<B, I: FusedIterator, F> FusedIterator for Map<I, F> where F: FnMut(I::Item) -> B {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, F> TrustedFused for Map<I, F> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<B, I, F> TrustedLen for Map<I, F>
where
@@ -228,4 +232,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<B, I: InPlaceIterable, F> InPlaceIterable for Map<I, F> where F: FnMut(I::Item) -> B {}
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for Map<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
diff --git a/library/core/src/iter/adapters/map_while.rs b/library/core/src/iter/adapters/map_while.rs
index fbdeca4d4..bcae73cbe 100644
--- a/library/core/src/iter/adapters/map_while.rs
+++ b/library/core/src/iter/adapters/map_while.rs
@@ -1,5 +1,6 @@
use crate::fmt;
use crate::iter::{adapters::SourceIter, InPlaceIterable};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator that only accepts elements while `predicate` returns `Some(_)`.
@@ -82,7 +83,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<B, I: InPlaceIterable, P> InPlaceIterable for MapWhile<I, P> where
- P: FnMut(I::Item) -> Option<B>
-{
+unsafe impl<I: InPlaceIterable, P> InPlaceIterable for MapWhile<I, P> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/map_windows.rs b/library/core/src/iter/adapters/map_windows.rs
index 3c0e80b25..5f39b2458 100644
--- a/library/core/src/iter/adapters/map_windows.rs
+++ b/library/core/src/iter/adapters/map_windows.rs
@@ -1,6 +1,6 @@
use crate::{
fmt,
- iter::{ExactSizeIterator, FusedIterator},
+ iter::FusedIterator,
mem::{self, MaybeUninit},
ptr,
};
diff --git a/library/core/src/iter/adapters/mod.rs b/library/core/src/iter/adapters/mod.rs
index 6f4fa7010..4037e2e28 100644
--- a/library/core/src/iter/adapters/mod.rs
+++ b/library/core/src/iter/adapters/mod.rs
@@ -1,4 +1,5 @@
-use crate::iter::{InPlaceIterable, Iterator};
+use crate::iter::InPlaceIterable;
+use crate::num::NonZeroUsize;
use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try};
mod array_chunks;
@@ -119,8 +120,9 @@ pub unsafe trait SourceIter {
///
/// # Safety
///
- /// Implementations of must return the same mutable reference for their lifetime, unless
+ /// Implementations must return the same mutable reference for their lifetime, unless
/// replaced by a caller.
+ ///
/// Callers may only replace the reference when they stopped iteration and drop the
/// iterator pipeline after extracting the source.
///
@@ -228,7 +230,10 @@ where
// in order to return `Some(_)`. Since `iter` has type `I: InPlaceIterable` it's
// guaranteed that at least one item will be moved out from the underlying source.
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I, T, R> InPlaceIterable for GenericShunt<'_, I, R> where
- I: Iterator<Item: Try<Output = T, Residual = R>> + InPlaceIterable
+unsafe impl<I, R> InPlaceIterable for GenericShunt<'_, I, R>
+where
+ I: InPlaceIterable,
{
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/scan.rs b/library/core/src/iter/adapters/scan.rs
index 62470512c..635bad199 100644
--- a/library/core/src/iter/adapters/scan.rs
+++ b/library/core/src/iter/adapters/scan.rs
@@ -1,5 +1,6 @@
use crate::fmt;
use crate::iter::{adapters::SourceIter, InPlaceIterable};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator to maintain state while iterating another iterator.
@@ -92,7 +93,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<St, F, B, I: InPlaceIterable> InPlaceIterable for Scan<I, St, F> where
- F: FnMut(&mut St, I::Item) -> Option<B>
-{
+unsafe impl<St, F, I: InPlaceIterable> InPlaceIterable for Scan<I, St, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs
index 306338bc7..e6c946e7f 100644
--- a/library/core/src/iter/adapters/skip.rs
+++ b/library/core/src/iter/adapters/skip.rs
@@ -1,4 +1,5 @@
use crate::intrinsics::unlikely;
+use crate::iter::TrustedFused;
use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
@@ -214,6 +215,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<I> FusedIterator for Skip<I> where I: FusedIterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused> TrustedFused for Skip<I> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<I> SourceIter for Skip<I>
where
@@ -229,4 +233,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable> InPlaceIterable for Skip<I> {}
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Skip<I> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
diff --git a/library/core/src/iter/adapters/skip_while.rs b/library/core/src/iter/adapters/skip_while.rs
index f29661779..3a661973e 100644
--- a/library/core/src/iter/adapters/skip_while.rs
+++ b/library/core/src/iter/adapters/skip_while.rs
@@ -1,5 +1,6 @@
use crate::fmt;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// An iterator that rejects elements while `predicate` returns `true`.
@@ -104,6 +105,9 @@ where
{
}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, P> TrustedFused for SkipWhile<I, P> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<P, I> SourceIter for SkipWhile<I, P>
where
@@ -119,7 +123,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable, F> InPlaceIterable for SkipWhile<I, F> where
- F: FnMut(&I::Item) -> bool
-{
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for SkipWhile<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/step_by.rs b/library/core/src/iter/adapters/step_by.rs
index 7f58f7d17..9e83584e3 100644
--- a/library/core/src/iter/adapters/step_by.rs
+++ b/library/core/src/iter/adapters/step_by.rs
@@ -83,11 +83,7 @@ where
// last element. Used in the `DoubleEndedIterator` implementation.
fn next_back_index(&self) -> usize {
let rem = self.iter.len() % (self.step + 1);
- if self.first_take {
- if rem == 0 { self.step } else { rem - 1 }
- } else {
- rem
- }
+ if self.first_take { if rem == 0 { self.step } else { rem - 1 } } else { rem }
}
}
diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs
index c1d8cc4ff..80e06066d 100644
--- a/library/core/src/iter/adapters/take.rs
+++ b/library/core/src/iter/adapters/take.rs
@@ -1,6 +1,7 @@
use crate::cmp;
use crate::iter::{
- adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedLen, TrustedRandomAccess,
+ adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused, TrustedLen,
+ TrustedRandomAccess,
};
use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
@@ -143,7 +144,10 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable> InPlaceIterable for Take<I> {}
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Take<I> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
#[stable(feature = "double_ended_take_iterator", since = "1.38.0")]
impl<I> DoubleEndedIterator for Take<I>
@@ -241,6 +245,9 @@ impl<I> ExactSizeIterator for Take<I> where I: ExactSizeIterator {}
#[stable(feature = "fused", since = "1.26.0")]
impl<I> FusedIterator for Take<I> where I: FusedIterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused> TrustedFused for Take<I> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<I: TrustedLen> TrustedLen for Take<I> {}
diff --git a/library/core/src/iter/adapters/take_while.rs b/library/core/src/iter/adapters/take_while.rs
index ec66dc3ae..e55d55a6d 100644
--- a/library/core/src/iter/adapters/take_while.rs
+++ b/library/core/src/iter/adapters/take_while.rs
@@ -1,5 +1,6 @@
use crate::fmt;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator that only accepts elements while `predicate` returns `true`.
@@ -105,6 +106,9 @@ where
{
}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, P> TrustedFused for TakeWhile<I, P> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<P, I> SourceIter for TakeWhile<I, P>
where
@@ -120,7 +124,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable, F> InPlaceIterable for TakeWhile<I, F> where
- F: FnMut(&I::Item) -> bool
-{
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for TakeWhile<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs
index 77ccf5085..b33400fab 100644
--- a/library/core/src/iter/adapters/zip.rs
+++ b/library/core/src/iter/adapters/zip.rs
@@ -1,7 +1,8 @@
use crate::cmp;
use crate::fmt::{self, Debug};
-use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator};
+use crate::iter::{FusedIterator, TrustedFused};
use crate::iter::{InPlaceIterable, SourceIter, TrustedLen, UncheckedIterator};
+use crate::num::NonZeroUsize;
/// An iterator that iterates two other iterators simultaneously.
///
@@ -446,6 +447,14 @@ where
{
}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<A, B> TrustedFused for Zip<A, B>
+where
+ A: TrustedFused,
+ B: TrustedFused,
+{
+}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A, B> TrustedLen for Zip<A, B>
where
@@ -479,7 +488,10 @@ where
// Since SourceIter forwards the left hand side we do the same here
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<A: InPlaceIterable, B: Iterator> InPlaceIterable for Zip<A, B> {}
+unsafe impl<A: InPlaceIterable, B> InPlaceIterable for Zip<A, B> {
+ const EXPAND_BY: Option<NonZeroUsize> = A::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = A::MERGE_BY;
+}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Debug, B: Debug> Debug for Zip<A, B> {
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
index 937a149ac..44fef3e14 100644
--- a/library/core/src/iter/mod.rs
+++ b/library/core/src/iter/mod.rs
@@ -417,6 +417,8 @@ pub use self::sources::{successors, Successors};
pub use self::traits::FusedIterator;
#[unstable(issue = "none", feature = "inplace_iteration")]
pub use self::traits::InPlaceIterable;
+#[unstable(issue = "none", feature = "trusted_fused")]
+pub use self::traits::TrustedFused;
#[unstable(feature = "trusted_len", issue = "37572")]
pub use self::traits::TrustedLen;
#[unstable(feature = "trusted_step", issue = "85731")]
diff --git a/library/core/src/iter/sources/from_coroutine.rs b/library/core/src/iter/sources/from_coroutine.rs
index 16fbca9b6..bf413b24d 100644
--- a/library/core/src/iter/sources/from_coroutine.rs
+++ b/library/core/src/iter/sources/from_coroutine.rs
@@ -11,8 +11,7 @@ use crate::pin::Pin;
/// # Examples
///
/// ```
-/// #![cfg_attr(bootstrap, feature(generators))]
-/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(coroutines)]
/// #![feature(iter_from_coroutine)]
///
/// let it = std::iter::from_coroutine(|| {
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 6adea4442..8e2c887a1 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -69,7 +69,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
message = "`{Self}` is not an iterator"
)]
#[doc(notable_trait)]
-#[cfg_attr(not(bootstrap), lang = "iterator")]
+#[lang = "iterator"]
#[rustc_diagnostic_item = "Iterator"]
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub trait Iterator {
diff --git a/library/core/src/iter/traits/marker.rs b/library/core/src/iter/traits/marker.rs
index c21a2aac1..e7c1f195a 100644
--- a/library/core/src/iter/traits/marker.rs
+++ b/library/core/src/iter/traits/marker.rs
@@ -1,4 +1,16 @@
use crate::iter::Step;
+use crate::num::NonZeroUsize;
+
+/// Same as FusedIterator
+///
+/// # Safety
+///
+/// This is used for specialization. Therefore implementations must not
+/// be lifetime-dependent.
+#[unstable(issue = "none", feature = "trusted_fused")]
+#[doc(hidden)]
+#[rustc_specialization_trait]
+pub unsafe trait TrustedFused {}
/// An iterator that always continues to yield `None` when exhausted.
///
@@ -14,6 +26,8 @@ use crate::iter::Step;
/// [`Fuse`]: crate::iter::Fuse
#[stable(feature = "fused", since = "1.26.0")]
#[rustc_unsafe_specialization_marker]
+// FIXME: this should be a #[marker] and have another blanket impl for T: TrustedFused
+// but that ICEs iter::Fuse specializations.
pub trait FusedIterator: Iterator {}
#[stable(feature = "fused", since = "1.26.0")]
@@ -71,7 +85,19 @@ unsafe impl<I: TrustedLen + ?Sized> TrustedLen for &mut I {}
/// [`try_fold()`]: Iterator::try_fold
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
-pub unsafe trait InPlaceIterable: Iterator {}
+#[rustc_specialization_trait]
+pub unsafe trait InPlaceIterable {
+ /// The product of one-to-many item expansions that happen throughout the iterator pipeline.
+ /// E.g. [[u8; 4]; 4].iter().flatten().flatten() would have a `EXPAND_BY` of 16.
+ /// This is an upper bound, i.e. the transformations will produce at most this many items per
+ /// input. It's meant for layout calculations.
+ const EXPAND_BY: Option<NonZeroUsize>;
+ /// The product of many-to-one item reductions that happen throughout the iterator pipeline.
+ /// E.g. [u8].iter().array_chunks::<4>().array_chunks::<4>() would have a `MERGE_BY` of 16.
+ /// This is a lower bound, i.e. the transformations will consume at least this many items per
+ /// output.
+ const MERGE_BY: Option<NonZeroUsize>;
+}
/// A type that upholds all invariants of [`Step`].
///
diff --git a/library/core/src/iter/traits/mod.rs b/library/core/src/iter/traits/mod.rs
index 41ea29e6a..d4c9cc4b1 100644
--- a/library/core/src/iter/traits/mod.rs
+++ b/library/core/src/iter/traits/mod.rs
@@ -18,6 +18,8 @@ pub use self::{
#[unstable(issue = "none", feature = "inplace_iteration")]
pub use self::marker::InPlaceIterable;
+#[unstable(issue = "none", feature = "trusted_fused")]
+pub use self::marker::TrustedFused;
#[unstable(feature = "trusted_step", issue = "85731")]
pub use self::marker::TrustedStep;
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 5a6d242a7..07720f235 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -24,10 +24,9 @@
//! which are generated by Rust codegen backends. Additionally, this library can make explicit
//! calls to `strlen`. Their signatures are the same as found in C, but there are extra
//! assumptions about their semantics: For `memcpy`, `memmove`, `memset`, `memcmp`, and `bcmp`, if
-//! the `n` parameter is 0, the function is assumed to not be UB. Furthermore, for `memcpy`, if
-//! source and target pointer are equal, the function is assumed to not be UB.
-//! (Note that these are standard assumptions among compilers:
-//! [clang](https://reviews.llvm.org/D86993) and [GCC](https://gcc.gnu.org/bugzilla/show_bug.cgi?id=32667) do the same.)
+//! the `n` parameter is 0, the function is assumed to not be UB, even if the pointers are NULL or
+//! dangling. (Note that making extra assumptions about these functions is common among compilers:
+//! [clang](https://reviews.llvm.org/D86993) and [GCC](https://gcc.gnu.org/onlinedocs/gcc/Standards.html#C-Language) do the same.)
//! These functions are often provided by the system libc, but can also be provided by the
//! [compiler-builtins crate](https://crates.io/crates/compiler_builtins).
//! Note that the library does not guarantee that it will always make these assumptions, so Rust
@@ -69,7 +68,7 @@
test(no_crate_inject, attr(deny(warnings))),
test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
)]
-#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![doc(rust_logo)]
#![doc(cfg_hide(
not(test),
any(not(feature = "miri-test-libstd"), test, doctest),
@@ -178,6 +177,9 @@
#![feature(is_ascii_octdigit)]
#![feature(isqrt)]
#![feature(maybe_uninit_uninit_array)]
+#![feature(non_null_convenience)]
+#![feature(offset_of)]
+#![feature(offset_of_enum)]
#![feature(ptr_alignment_type)]
#![feature(ptr_metadata)]
#![feature(set_ptr_value)]
@@ -219,6 +221,7 @@
#![feature(doc_cfg)]
#![feature(doc_cfg_hide)]
#![feature(doc_notable_trait)]
+#![feature(effects)]
#![feature(exhaustive_patterns)]
#![feature(extern_types)]
#![feature(fundamental)]
@@ -443,9 +446,10 @@ pub mod arch;
#[unstable(feature = "portable_simd", issue = "86656")]
mod core_simd;
-#[doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
#[unstable(feature = "portable_simd", issue = "86656")]
pub mod simd {
+ #![doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
+
#[unstable(feature = "portable_simd", issue = "86656")]
pub use crate::core_simd::simd::*;
}
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
index 8a210c195..53e9a32e3 100644
--- a/library/core/src/mem/maybe_uninit.rs
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -691,10 +691,7 @@ impl<T> MaybeUninit<T> {
/// // they both get dropped!
/// ```
#[stable(feature = "maybe_uninit_extra", since = "1.60.0")]
- #[rustc_const_stable(
- feature = "const_maybe_uninit_assume_init_read",
- since = "1.75.0"
- )]
+ #[rustc_const_stable(feature = "const_maybe_uninit_assume_init_read", since = "1.75.0")]
#[inline(always)]
#[track_caller]
pub const unsafe fn assume_init_read(&self) -> T {
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index eef214528..c1687abb7 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -10,7 +10,7 @@ use crate::cmp;
use crate::fmt;
use crate::hash;
use crate::intrinsics;
-use crate::marker::{Copy, DiscriminantKind, Sized};
+use crate::marker::DiscriminantKind;
use crate::ptr;
mod manually_drop;
@@ -1360,7 +1360,7 @@ impl<T> SizedTypeProperties for T {}
///
/// ```
/// #![feature(offset_of)]
-/// # #![cfg_attr(not(bootstrap), feature(offset_of_enum))]
+/// # #![feature(offset_of_enum)]
///
/// use std::mem;
/// #[repr(C)]
@@ -1390,12 +1390,9 @@ impl<T> SizedTypeProperties for T {}
/// B { one: u8, two: u16 },
/// }
///
-/// # #[cfg(not(bootstrap))]
/// assert_eq!(mem::offset_of!(Enum, A.0), 1);
-/// # #[cfg(not(bootstrap))]
/// assert_eq!(mem::offset_of!(Enum, B.two), 2);
///
-/// # #[cfg(not(bootstrap))]
/// assert_eq!(mem::offset_of!(Option<&u8>, Some.0), 0);
/// ```
#[unstable(feature = "offset_of", issue = "106655")]
diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs
index 77f85215d..8bf15c736 100644
--- a/library/core/src/net/ip_addr.rs
+++ b/library/core/src/net/ip_addr.rs
@@ -468,7 +468,13 @@ impl Ipv4Addr {
#[unstable(feature = "ip_bits", issue = "113744")]
pub const BITS: u32 = 32;
- /// Converts an IPv4 address into host byte order `u32`.
+ /// Converts an IPv4 address into a `u32` representation using native byte order.
+ ///
+ /// Although IPv4 addresses are big-endian, the `u32` value will use the target platform's
+ /// native byte order. That is, the `u32` value is an integer representation of the IPv4
+ /// address and not an integer interpretation of the IPv4 address's big-endian bitstring. This
+ /// means that the `u32` value masked with `0xffffff00` will set the last octet in the address
+ /// to 0, regardless of the target platform's endianness.
///
/// # Examples
///
@@ -479,6 +485,16 @@ impl Ipv4Addr {
/// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
/// assert_eq!(0x12345678, addr.to_bits());
/// ```
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
+ /// let addr_bits = addr.to_bits() & 0xffffff00;
+ /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x00), Ipv4Addr::from_bits(addr_bits));
+ ///
+ /// ```
#[rustc_const_unstable(feature = "ip_bits", issue = "113744")]
#[unstable(feature = "ip_bits", issue = "113744")]
#[must_use]
@@ -487,7 +503,9 @@ impl Ipv4Addr {
u32::from_be_bytes(self.octets)
}
- /// Converts a host byte order `u32` into an IPv4 address.
+ /// Converts a native byte order `u32` into an IPv4 address.
+ ///
+ /// See [`Ipv4Addr::to_bits`] for an explanation on endianness.
///
/// # Examples
///
@@ -1224,7 +1242,13 @@ impl Ipv6Addr {
#[unstable(feature = "ip_bits", issue = "113744")]
pub const BITS: u32 = 128;
- /// Converts an IPv6 address into host byte order `u128`.
+ /// Converts an IPv6 address into a `u128` representation using native byte order.
+ ///
+ /// Although IPv6 addresses are big-endian, the `u128` value will use the target platform's
+ /// native byte order. That is, the `u128` value is an integer representation of the IPv6
+ /// address and not an integer interpretation of the IPv6 address's big-endian bitstring. This
+ /// means that the `u128` value masked with `0xffffffffffffffffffffffffffff0000_u128` will set
+ /// the last segment in the address to 0, regardless of the target platform's endianness.
///
/// # Examples
///
@@ -1238,6 +1262,24 @@ impl Ipv6Addr {
/// );
/// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
/// ```
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
+ /// );
+ /// let addr_bits = addr.to_bits() & 0xffffffffffffffffffffffffffff0000_u128;
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0x0000,
+ /// ),
+ /// Ipv6Addr::from_bits(addr_bits));
+ ///
+ /// ```
#[rustc_const_unstable(feature = "ip_bits", issue = "113744")]
#[unstable(feature = "ip_bits", issue = "113744")]
#[must_use]
@@ -1246,7 +1288,9 @@ impl Ipv6Addr {
u128::from_be_bytes(self.octets)
}
- /// Converts a host byte order `u128` into an IPv6 address.
+ /// Converts a native byte order `u128` into an IPv6 address.
+ ///
+ /// See [`Ipv6Addr::to_bits`] for an explanation on endianness.
///
/// # Examples
///
@@ -1393,7 +1437,7 @@ impl Ipv6Addr {
/// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified))
/// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback))
/// - IPv4-mapped addresses
- /// - Addresses reserved for benchmarking
+ /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv6Addr::is_benchmarking))
/// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation))
/// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local))
/// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local))
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index f60626b00..709eba2ff 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -1424,9 +1424,17 @@ impl f32 {
/// ];
///
/// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
- /// # assert!(bois.into_iter().map(|b| b.weight)
- /// # .zip([-5.0, 0.1, 10.0, 99.0, f32::INFINITY, f32::NAN].iter())
- /// # .all(|(a, b)| a.to_bits() == b.to_bits()))
+ ///
+ /// // `f32::NAN` could be positive or negative, which will affect the sort order.
+ /// if f32::NAN.is_sign_negative() {
+ /// assert!(bois.into_iter().map(|b| b.weight)
+ /// .zip([f32::NAN, -5.0, 0.1, 10.0, 99.0, f32::INFINITY].iter())
+ /// .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// } else {
+ /// assert!(bois.into_iter().map(|b| b.weight)
+ /// .zip([-5.0, 0.1, 10.0, 99.0, f32::INFINITY, f32::NAN].iter())
+ /// .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// }
/// ```
#[stable(feature = "total_cmp", since = "1.62.0")]
#[must_use]
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index 0a87021d8..73fa61574 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -1422,9 +1422,17 @@ impl f64 {
/// ];
///
/// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
- /// # assert!(bois.into_iter().map(|b| b.weight)
- /// # .zip([-5.0, 0.1, 10.0, 99.0, f64::INFINITY, f64::NAN].iter())
- /// # .all(|(a, b)| a.to_bits() == b.to_bits()))
+ ///
+ /// // `f64::NAN` could be positive or negative, which will affect the sort order.
+ /// if f64::NAN.is_sign_negative() {
+ /// assert!(bois.into_iter().map(|b| b.weight)
+ /// .zip([f64::NAN, -5.0, 0.1, 10.0, 99.0, f64::INFINITY].iter())
+ /// .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// } else {
+ /// assert!(bois.into_iter().map(|b| b.weight)
+ /// .zip([-5.0, 0.1, 10.0, 99.0, f64::INFINITY, f64::NAN].iter())
+ /// .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// }
/// ```
#[stable(feature = "total_cmp", since = "1.62.0")]
#[must_use]
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index 2a0b31404..695e87aaa 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -474,7 +474,7 @@ impl isize {
}
}
-/// If 6th bit is set ascii is lower case.
+/// If the 6th bit is set ascii is lower case.
const ASCII_CASE_MASK: u8 = 0b0010_0000;
impl u8 {
@@ -549,7 +549,7 @@ impl u8 {
#[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
#[inline]
pub const fn to_ascii_uppercase(&self) -> u8 {
- // Toggle the fifth bit if this is a lowercase letter
+ // Toggle the 6th bit if this is a lowercase letter
*self ^ ((self.is_ascii_lowercase() as u8) * ASCII_CASE_MASK)
}
@@ -574,7 +574,7 @@ impl u8 {
#[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
#[inline]
pub const fn to_ascii_lowercase(&self) -> u8 {
- // Set the fifth bit if this is an uppercase letter
+ // Set the 6th bit if this is an uppercase letter
*self | (self.is_ascii_uppercase() as u8 * ASCII_CASE_MASK)
}
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index 7f8d673c1..f5ecf501c 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -75,12 +75,12 @@ macro_rules! nonzero_integers {
#[must_use]
#[inline]
pub const unsafe fn new_unchecked(n: $Int) -> Self {
+ crate::panic::debug_assert_nounwind!(
+ n != 0,
+ concat!(stringify!($Ty), "::new_unchecked requires a non-zero argument")
+ );
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
- core::intrinsics::assert_unsafe_precondition!(
- concat!(stringify!($Ty), "::new_unchecked requires a non-zero argument"),
- (n: $Int) => n != 0
- );
Self(n)
}
}
@@ -353,8 +353,13 @@ macro_rules! nonzero_unsigned_operations {
#[inline]
pub const fn checked_add(self, other: $Int) -> Option<$Ty> {
if let Some(result) = self.get().checked_add(other) {
- // SAFETY: $Int::checked_add returns None on overflow
- // so the result cannot be zero.
+ // SAFETY:
+ // - `checked_add` returns `None` on overflow
+ // - `self` is non-zero
+ // - the only way to get zero from an addition without overflow is for both
+ // sides to be zero
+ //
+ // So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
@@ -386,8 +391,13 @@ macro_rules! nonzero_unsigned_operations {
without modifying the original"]
#[inline]
pub const fn saturating_add(self, other: $Int) -> $Ty {
- // SAFETY: $Int::saturating_add returns $Int::MAX on overflow
- // so the result cannot be zero.
+ // SAFETY:
+ // - `saturating_add` returns `u*::MAX` on overflow, which is non-zero
+ // - `self` is non-zero
+ // - the only way to get zero from an addition without overflow is for both
+ // sides to be zero
+ //
+ // So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_add(other)) }
}
@@ -1000,9 +1010,13 @@ macro_rules! nonzero_unsigned_signed_operations {
#[inline]
pub const fn checked_mul(self, other: $Ty) -> Option<$Ty> {
if let Some(result) = self.get().checked_mul(other.get()) {
- // SAFETY: checked_mul returns None on overflow
- // and `other` is also non-null
- // so the result cannot be zero.
+ // SAFETY:
+ // - `checked_mul` returns `None` on overflow
+ // - `self` and `other` are non-zero
+ // - the only way to get zero from a multiplication without overflow is for one
+ // of the sides to be zero
+ //
+ // So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
@@ -1034,9 +1048,14 @@ macro_rules! nonzero_unsigned_signed_operations {
without modifying the original"]
#[inline]
pub const fn saturating_mul(self, other: $Ty) -> $Ty {
- // SAFETY: saturating_mul returns u*::MAX on overflow
- // and `other` is also non-null
- // so the result cannot be zero.
+ // SAFETY:
+ // - `saturating_mul` returns `u*::MAX`/`i*::MAX`/`i*::MIN` on overflow/underflow,
+ // all of which are non-zero
+ // - `self` and `other` are non-zero
+ // - the only way to get zero from a multiplication without overflow is for one
+ // of the sides to be zero
+ //
+ // So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_mul(other.get())) }
}
@@ -1107,8 +1126,13 @@ macro_rules! nonzero_unsigned_signed_operations {
#[inline]
pub const fn checked_pow(self, other: u32) -> Option<$Ty> {
if let Some(result) = self.get().checked_pow(other) {
- // SAFETY: checked_pow returns None on overflow
- // so the result cannot be zero.
+ // SAFETY:
+ // - `checked_pow` returns `None` on overflow/underflow
+ // - `self` is non-zero
+ // - the only way to get zero from an exponentiation without overflow is
+ // for base to be zero
+ //
+ // So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
@@ -1149,8 +1173,14 @@ macro_rules! nonzero_unsigned_signed_operations {
without modifying the original"]
#[inline]
pub const fn saturating_pow(self, other: u32) -> $Ty {
- // SAFETY: saturating_pow returns u*::MAX on overflow
- // so the result cannot be zero.
+ // SAFETY:
+ // - `saturating_pow` returns `u*::MAX`/`i*::MAX`/`i*::MIN` on overflow/underflow,
+ // all of which are non-zero
+ // - `self` is non-zero
+ // - the only way to get zero from an exponentiation without overflow is
+ // for base to be zero
+ //
+ // So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_pow(other)) }
}
}
diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs
index 840c8cd2f..1773fdbf3 100644
--- a/library/core/src/ops/arith.rs
+++ b/library/core/src/ops/arith.rs
@@ -98,6 +98,7 @@ macro_rules! add_impl {
type Output = $t;
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn add(self, other: $t) -> $t { self + other }
}
@@ -206,6 +207,7 @@ macro_rules! sub_impl {
type Output = $t;
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn sub(self, other: $t) -> $t { self - other }
}
@@ -335,6 +337,7 @@ macro_rules! mul_impl {
type Output = $t;
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn mul(self, other: $t) -> $t { self * other }
}
@@ -474,6 +477,7 @@ macro_rules! div_impl_integer {
type Output = $t;
#[inline]
+ #[track_caller]
fn div(self, other: $t) -> $t { self / other }
}
@@ -575,6 +579,7 @@ macro_rules! rem_impl_integer {
type Output = $t;
#[inline]
+ #[track_caller]
fn rem(self, other: $t) -> $t { self % other }
}
@@ -749,6 +754,7 @@ macro_rules! add_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl AddAssign for $t {
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn add_assign(&mut self, other: $t) { *self += other }
}
@@ -815,6 +821,7 @@ macro_rules! sub_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl SubAssign for $t {
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn sub_assign(&mut self, other: $t) { *self -= other }
}
@@ -872,6 +879,7 @@ macro_rules! mul_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl MulAssign for $t {
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn mul_assign(&mut self, other: $t) { *self *= other }
}
@@ -929,6 +937,7 @@ macro_rules! div_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl DivAssign for $t {
#[inline]
+ #[track_caller]
fn div_assign(&mut self, other: $t) { *self /= other }
}
@@ -989,6 +998,7 @@ macro_rules! rem_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl RemAssign for $t {
#[inline]
+ #[track_caller]
fn rem_assign(&mut self, other: $t) { *self %= other }
}
diff --git a/library/core/src/ops/coroutine.rs b/library/core/src/ops/coroutine.rs
index cd5ca988f..e58c9068a 100644
--- a/library/core/src/ops/coroutine.rs
+++ b/library/core/src/ops/coroutine.rs
@@ -1,4 +1,3 @@
-use crate::marker::Unpin;
use crate::pin::Pin;
/// The result of a coroutine resumption.
@@ -7,8 +6,7 @@ use crate::pin::Pin;
/// possible return values of a coroutine. Currently this corresponds to either
/// a suspension point (`Yielded`) or a termination point (`Complete`).
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
-#[cfg_attr(bootstrap, lang = "generator_state")]
-#[cfg_attr(not(bootstrap), lang = "coroutine_state")]
+#[lang = "coroutine_state"]
#[unstable(feature = "coroutine_trait", issue = "43122")]
pub enum CoroutineState<Y, R> {
/// The coroutine suspended with a value.
@@ -40,8 +38,7 @@ pub enum CoroutineState<Y, R> {
/// closure-like:
///
/// ```rust
-/// #![cfg_attr(bootstrap, feature(generators))]
-/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(coroutines)]
/// #![feature(coroutine_trait)]
///
/// use std::ops::{Coroutine, CoroutineState};
@@ -68,8 +65,7 @@ pub enum CoroutineState<Y, R> {
///
/// [RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033
/// [unstable book]: ../../unstable-book/language-features/coroutines.html
-#[cfg_attr(bootstrap, lang = "generator")]
-#[cfg_attr(not(bootstrap), lang = "coroutine")]
+#[lang = "coroutine"]
#[unstable(feature = "coroutine_trait", issue = "43122")]
#[fundamental]
pub trait Coroutine<R = ()> {
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index 51e304dd7..3a3d3fcf1 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -56,7 +56,7 @@ use crate::marker::Tuple;
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
+#[rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -69,7 +69,7 @@ use crate::marker::Tuple;
),
message = "expected a `{Trait}` closure, found `{Self}`",
label = "expected an `{Trait}` closure, found `{Self}`"
-))]
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
@@ -143,7 +143,7 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
+#[rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -156,7 +156,7 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
),
message = "expected a `{Trait}` closure, found `{Self}`",
label = "expected an `{Trait}` closure, found `{Self}`"
-))]
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
@@ -222,7 +222,7 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
+#[rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -235,7 +235,7 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
),
message = "expected a `{Trait}` closure, found `{Self}`",
label = "expected an `{Trait}` closure, found `{Self}`"
-))]
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
diff --git a/library/core/src/ops/index_range.rs b/library/core/src/ops/index_range.rs
index 265022a39..743799c4b 100644
--- a/library/core/src/ops/index_range.rs
+++ b/library/core/src/ops/index_range.rs
@@ -1,4 +1,4 @@
-use crate::intrinsics::{assert_unsafe_precondition, unchecked_add, unchecked_sub};
+use crate::intrinsics::{unchecked_add, unchecked_sub};
use crate::iter::{FusedIterator, TrustedLen};
use crate::num::NonZeroUsize;
@@ -19,13 +19,10 @@ impl IndexRange {
/// - `start <= end`
#[inline]
pub const unsafe fn new_unchecked(start: usize, end: usize) -> Self {
- // SAFETY: comparisons on usize are pure
- unsafe {
- assert_unsafe_precondition!(
- "IndexRange::new_unchecked requires `start <= end`",
- (start: usize, end: usize) => start <= end
- )
- };
+ crate::panic::debug_assert_nounwind!(
+ start <= end,
+ "IndexRange::new_unchecked requires `start <= end`"
+ );
IndexRange { start, end }
}
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index 89d4532de..ff4353492 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -119,7 +119,7 @@
//! # Representation
//!
//! Rust guarantees to optimize the following types `T` such that
-//! [`Option<T>`] has the same size and alignment as `T`. In some
+//! [`Option<T>`] has the same size, alignment, and [function call ABI] as `T`. In some
//! of these cases, Rust further guarantees that
//! `transmute::<_, Option<T>>([0u8; size_of::<T>()])` is sound and
//! produces `Option::<T>::None`. These cases are identified by the
@@ -127,7 +127,7 @@
//!
//! | `T` | `transmute::<_, Option<T>>([0u8; size_of::<T>()])` sound? |
//! |---------------------------------------------------------------------|----------------------------------------------------------------------|
-//! | [`Box<U>`] | when `U: Sized` |
+//! | [`Box<U>`] (specifically, only `Box<U, Global>`) | when `U: Sized` |
//! | `&U` | when `U: Sized` |
//! | `&mut U` | when `U: Sized` |
//! | `fn`, `extern "C" fn`[^extern_fn] | always |
@@ -135,11 +135,12 @@
//! | [`ptr::NonNull<U>`] | when `U: Sized` |
//! | `#[repr(transparent)]` struct around one of the types in this list. | when it holds for the inner type |
//!
-//! [^extern_fn]: this remains true for any other ABI: `extern "abi" fn` (_e.g._, `extern "system" fn`)
+//! [^extern_fn]: this remains true for any argument/return types and any other ABI: `extern "abi" fn` (_e.g._, `extern "system" fn`)
//!
//! [`Box<U>`]: ../../std/boxed/struct.Box.html
//! [`num::NonZero*`]: crate::num
//! [`ptr::NonNull<U>`]: crate::ptr::NonNull
+//! [function call ABI]: ../primitive.fn.html#abi-compatibility
//!
//! This is called the "null pointer optimization" or NPO.
//!
@@ -779,7 +780,7 @@ impl<T> Option<T> {
// `None` case it's just padding).
unsafe {
slice::from_raw_parts(
- crate::intrinsics::option_payload_ptr(crate::ptr::from_ref(self)),
+ (self as *const Self).byte_add(core::mem::offset_of!(Self, Some.0)).cast(),
usize::from(self.is_some()),
)
}
@@ -835,8 +836,7 @@ impl<T> Option<T> {
// the `None` case it's just padding).
unsafe {
slice::from_raw_parts_mut(
- crate::intrinsics::option_payload_ptr(crate::ptr::from_mut(self).cast_const())
- .cast_mut(),
+ (self as *mut Self).byte_add(core::mem::offset_of!(Self, Some.0)).cast(),
usize::from(self.is_some()),
)
}
@@ -1079,8 +1079,6 @@ impl<T> Option<T> {
/// # Examples
///
/// ```
- /// #![feature(result_option_inspect)]
- ///
/// let v = vec![1, 2, 3, 4, 5];
///
/// // prints "got: 4"
@@ -1090,11 +1088,8 @@ impl<T> Option<T> {
/// let x: Option<&usize> = v.get(5).inspect(|x| println!("got: {x}"));
/// ```
#[inline]
- #[unstable(feature = "result_option_inspect", issue = "91345")]
- pub fn inspect<F>(self, f: F) -> Self
- where
- F: FnOnce(&T),
- {
+ #[stable(feature = "result_option_inspect", since = "1.76.0")]
+ pub fn inspect<F: FnOnce(&T)>(self, f: F) -> Self {
if let Some(ref x) = self {
f(x);
}
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
index a00fd322b..4ca5af1ea 100644
--- a/library/core/src/panic.rs
+++ b/library/core/src/panic.rs
@@ -47,7 +47,7 @@ pub macro panic_2015 {
#[allow_internal_unstable(core_panic, const_format_args)]
#[rustc_diagnostic_item = "core_panic_2021_macro"]
#[rustc_macro_transparency = "semitransparent"]
-#[cfg(any(bootstrap, feature = "panic_immediate_abort"))]
+#[cfg(feature = "panic_immediate_abort")]
pub macro panic_2021 {
() => (
$crate::panicking::panic("explicit panic")
@@ -75,7 +75,7 @@ pub macro panic_2021 {
)]
#[rustc_diagnostic_item = "core_panic_2021_macro"]
#[rustc_macro_transparency = "semitransparent"]
-#[cfg(not(any(bootstrap, feature = "panic_immediate_abort")))]
+#[cfg(not(feature = "panic_immediate_abort"))]
pub macro panic_2021 {
() => ({
// Create a function so that the argument for `track_caller`
@@ -139,6 +139,32 @@ pub macro unreachable_2021 {
),
}
+/// Asserts that a boolean expression is `true`, and perform a non-unwinding panic otherwise.
+///
+/// This macro is similar to `debug_assert!`, but is intended to be used in code that should not
+/// unwind. For example, checks in `_unchecked` functions that are intended for debugging but should
+/// not compromise unwind safety.
+#[doc(hidden)]
+#[unstable(feature = "core_panic", issue = "none")]
+#[allow_internal_unstable(core_panic, const_format_args)]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro debug_assert_nounwind {
+ ($cond:expr $(,)?) => {
+ if $crate::cfg!(debug_assertions) {
+ if !$cond {
+ $crate::panicking::panic_nounwind($crate::concat!("assertion failed: ", $crate::stringify!($cond)));
+ }
+ }
+ },
+ ($cond:expr, $($arg:tt)+) => {
+ if $crate::cfg!(debug_assertions) {
+ if !$cond {
+ $crate::panicking::panic_nounwind_fmt($crate::const_format_args!($($arg)+), false);
+ }
+ }
+ },
+}
+
/// An internal trait used by std to pass data from std to `panic_unwind` and
/// other panic runtimes. Not intended to be stabilized any time soon, do not
/// use.
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index 39a5e8d9f..1b6e77b96 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -82,28 +82,45 @@ pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
// and unwinds anyway, we will hit the "unwinding out of nounwind function" guard,
// which causes a "panic in a function that cannot unwind".
#[rustc_nounwind]
-pub fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>, force_no_backtrace: bool) -> ! {
- if cfg!(feature = "panic_immediate_abort") {
- super::intrinsics::abort()
- }
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>, force_no_backtrace: bool) -> ! {
+ #[inline] // this should always be inlined into `panic_nounwind_fmt`
+ #[track_caller]
+ fn runtime(fmt: fmt::Arguments<'_>, force_no_backtrace: bool) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
- // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
- // that gets resolved to the `#[panic_handler]` function.
- extern "Rust" {
- #[lang = "panic_impl"]
- fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
+ // that gets resolved to the `#[panic_handler]` function.
+ extern "Rust" {
+ #[lang = "panic_impl"]
+ fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ }
+
+ // PanicInfo with the `can_unwind` flag set to false forces an abort.
+ let pi = PanicInfo::internal_constructor(
+ Some(&fmt),
+ Location::caller(),
+ /* can_unwind */ false,
+ force_no_backtrace,
+ );
+
+ // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
+ unsafe { panic_impl(&pi) }
}
- // PanicInfo with the `can_unwind` flag set to false forces an abort.
- let pi = PanicInfo::internal_constructor(
- Some(&fmt),
- Location::caller(),
- /* can_unwind */ false,
- force_no_backtrace,
- );
+ #[inline]
+ #[track_caller]
+ const fn comptime(fmt: fmt::Arguments<'_>, _force_no_backtrace: bool) -> ! {
+ // We don't unwind anyway at compile-time so we can call the regular `panic_fmt`.
+ panic_fmt(fmt);
+ }
- // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
- unsafe { panic_impl(&pi) }
+ // SAFETY: const panic does not care about unwinding
+ unsafe {
+ super::intrinsics::const_eval_select((fmt, force_no_backtrace), comptime, runtime);
+ }
}
// Next we define a bunch of higher-level wrappers that all bottom out in the two core functions
@@ -127,12 +144,14 @@ pub const fn panic(expr: &'static str) -> ! {
panic_fmt(fmt::Arguments::new_const(&[expr]));
}
-/// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize.
+/// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize on the caller.
+/// If you want `#[track_caller]` for nicer errors, call `panic_nounwind_fmt` directly.
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[lang = "panic_nounwind"] // needed by codegen for non-unwinding panics
#[rustc_nounwind]
-pub fn panic_nounwind(expr: &'static str) -> ! {
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_nounwind(expr: &'static str) -> ! {
panic_nounwind_fmt(fmt::Arguments::new_const(&[expr]), /* force_no_backtrace */ false);
}
@@ -170,9 +189,8 @@ pub fn unreachable_display<T: fmt::Display>(x: &T) -> ! {
#[inline]
#[track_caller]
#[rustc_do_not_const_check] // hooked by const-eval
-#[cfg_attr(bootstrap, lang = "panic_display")]
// enforce a &&str argument in const-check and hook this by const-eval
-#[cfg_attr(not(bootstrap), rustc_const_panic_str)]
+#[rustc_const_panic_str]
#[rustc_const_unstable(feature = "core_panic", issue = "none")]
pub const fn panic_display<T: fmt::Display>(x: &T) -> ! {
panic_fmt(format_args!("{}", *x));
@@ -190,8 +208,8 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
panic!("index out of bounds: the len is {len} but the index is {index}")
}
-#[cold]
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[lang = "panic_misaligned_pointer_dereference"] // needed by codegen for panic on misaligned pointer deref
#[rustc_nounwind] // `CheckAlignment` MIR pass requires this function to never unwind
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index bca97d4ee..7d8c881ea 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -381,10 +381,9 @@
#![stable(feature = "pin", since = "1.33.0")]
-use crate::cmp::{self, PartialEq, PartialOrd};
+use crate::cmp;
use crate::fmt;
use crate::hash::{Hash, Hasher};
-use crate::marker::{Sized, Unpin};
use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Receiver};
/// A pinned pointer.
@@ -1088,8 +1087,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// ### With `Coroutine`s
///
/// ```rust
-/// #![cfg_attr(bootstrap, feature(generators))]
-/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(coroutines)]
/// #![feature(coroutine_trait)]
/// use core::{
/// ops::{Coroutine, CoroutineState},
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index 87e492108..99208fba6 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -330,7 +330,7 @@ mod prim_never {}
/// the future ("reserved"); some will never be a character ("noncharacters"); and some may be given
/// different meanings by different users ("private use").
///
-/// `char` is guaranteed to have the same size and alignment as `u32` on all
+/// `char` is guaranteed to have the same size, alignment, and function call ABI as `u32` on all
/// platforms.
/// ```
/// use std::alloc::Layout;
@@ -1493,7 +1493,7 @@ mod prim_ref {}
///
/// ### Casting to and from integers
///
-/// You cast function pointers directly to integers:
+/// You can cast function pointers directly to integers:
///
/// ```rust
/// let fnptr: fn(i32) -> i32 = |x| x+2;
@@ -1519,9 +1519,116 @@ mod prim_ref {}
/// Note that all of this is not portable to platforms where function pointers and data pointers
/// have different sizes.
///
+/// ### ABI compatibility
+///
+/// Generally, when a function is declared with one signature and called via a function pointer with
+/// a different signature, the two signatures must be *ABI-compatible* or else calling the function
+/// via that function pointer is Undefined Behavior. ABI compatibility is a lot stricter than merely
+/// having the same memory layout; for example, even if `i32` and `f32` have the same size and
+/// alignment, they might be passed in different registers and hence not be ABI-compatible.
+///
+/// ABI compatibility as a concern only arises in code that alters the type of function pointers,
+/// code that imports functions via `extern` blocks, and in code that combines `#[target_feature]`
+/// with `extern fn`. Altering the type of function pointers is wildly unsafe (as in, a lot more
+/// unsafe than even [`transmute_copy`][mem::transmute_copy]), and should only occur in the most
+/// exceptional circumstances. Most Rust code just imports functions via `use`. `#[target_feature]`
+/// is also used rarely. So, most likely you do not have to worry about ABI compatibility.
+///
+/// But assuming such circumstances, what are the rules? For this section, we are only considering
+/// the ABI of direct Rust-to-Rust calls, not linking in general -- once functions are imported via
+/// `extern` blocks, there are more things to consider that we do not go into here.
+///
+/// For two signatures to be considered *ABI-compatible*, they must use a compatible ABI string,
+/// must take the same number of arguments, the individual argument types and the return types must
+/// be ABI-compatible, and the target feature requirements must be met (see the subsection below for
+/// the last point). The ABI string is declared via `extern "ABI" fn(...) -> ...`; note that
+/// `fn name(...) -> ...` implicitly uses the `"Rust"` ABI string and `extern fn name(...) -> ...`
+/// implicitly uses the `"C"` ABI string.
+///
+/// The ABI strings are guaranteed to be compatible if they are the same, or if the caller ABI
+/// string is `$X-unwind` and the callee ABI string is `$X`, where `$X` is one of the following:
+/// "C", "aapcs", "fastcall", "stdcall", "system", "sysv64", "thiscall", "vectorcall", "win64".
+///
+/// The following types are guaranteed to be ABI-compatible:
+///
+/// - `*const T`, `*mut T`, `&T`, `&mut T`, `Box<T>` (specifically, only `Box<T, Global>`), and
+/// `NonNull<T>` are all ABI-compatible with each other for all `T`. They are also ABI-compatible
+/// with each other for _different_ `T` if they have the same metadata type (`<T as
+/// Pointee>::Metadata`).
+/// - `usize` is ABI-compatible with the `uN` integer type of the same size, and likewise `isize` is
+/// ABI-compatible with the `iN` integer type of the same size.
+/// - `char` is ABI-compatible with `u32`.
+/// - Any two `fn` (function pointer) types are ABI-compatible with each other if they have the same
+/// ABI string or the ABI string only differs in a trailing `-unwind`, independent of the rest of
+/// their signature. (This means you can pass `fn()` to a function expecting `fn(i32)`, and the
+/// call will be valid ABI-wise. The callee receives the result of transmuting the function pointer
+/// from `fn()` to `fn(i32)`; that transmutation is itself a well-defined operation, it's just
+/// almost certainly UB to later call that function pointer.)
+/// - Any two types with size 0 and alignment 1 are ABI-compatible.
+/// - A `repr(transparent)` type `T` is ABI-compatible with its unique non-trivial field, i.e., the
+/// unique field that doesn't have size 0 and alignment 1 (if there is such a field).
+/// - `i32` is ABI-compatible with `NonZeroI32`, and similar for all other integer types with their
+/// matching `NonZero*` type.
+/// - If `T` is guaranteed to be subject to the [null pointer
+/// optimization](option/index.html#representation), then `T` and `Option<T>` are ABI-compatible.
+///
+/// Furthermore, ABI compatibility satisfies the following general properties:
+///
+/// - Every type is ABI-compatible with itself.
+/// - If `T1` and `T2` are ABI-compatible and `T2` and `T3` are ABI-compatible, then so are `T1` and
+/// `T3` (i.e., ABI-compatibility is transitive).
+/// - If `T1` and `T2` are ABI-compatible, then so are `T2` and `T1` (i.e., ABI-compatibility is
+/// symmetric).
+///
+/// More signatures can be ABI-compatible on specific targets, but that should not be relied upon
+/// since it is not portable and not a stable guarantee.
+///
+/// Noteworthy cases of types *not* being ABI-compatible in general are:
+/// * `bool` vs `u8`, `i32` vs `u32`, `char` vs `i32`: on some targets, the calling conventions for
+/// these types differ in terms of what they guarantee for the remaining bits in the register that
+/// are not used by the value.
+/// * `i32` vs `f32` are not compatible either, as has already been mentioned above.
+/// * `struct Foo(u32)` and `u32` are not compatible (without `repr(transparent)`) since structs are
+/// aggregate types and often passed in a different way than primitives like `i32`.
+///
+/// Note that these rules describe when two completely known types are ABI-compatible. When
+/// considering ABI compatibility of a type declared in another crate (including the standard
+/// library), consider that any type that has a private field or the `#[non_exhaustive]` attribute
+/// may change its layout as a non-breaking update unless documented otherwise -- so for instance,
+/// even if such a type is a 1-ZST or `repr(transparent)` right now, this might change with any
+/// library version bump.
+///
+/// If the declared signature and the signature of the function pointer are ABI-compatible, then the
+/// function call behaves as if every argument was [`transmute`d][mem::transmute] from the
+/// type in the function pointer to the type at the function declaration, and the return value is
+/// [`transmute`d][mem::transmute] from the type in the declaration to the type in the
+/// pointer. All the usual caveats and concerns around transmutation apply; for instance, if the
+/// function expects a `NonNullI32` and the function pointer uses the ABI-compatible type
+/// `Option<NonNullI32>`, and the value used for the argument is `None`, then this call is Undefined
+/// Behavior since transmuting `None::<NonNullI32>` to `NonNullI32` violates the non-null
+/// requirement.
+///
+/// #### Requirements concerning target features
+///
+/// Under some conditions, the signature used by the caller and the callee can be ABI-incompatible
+/// even if the exact same ABI string and types are being used. As an example, the
+/// `std::arch::x86_64::__m256` type has a different `extern "C"` ABI when the `avx` feature is
+/// enabled vs when it is not enabled.
+///
+/// Therefore, to ensure ABI compatibility when code using different target features is combined
+/// (such as via `#[target_feature]`), we further require that one of the following conditions is
+/// met:
+///
+/// - The function uses the `"Rust"` ABI string (which is the default without `extern`).
+/// - Caller and callee are using the exact same set of target features. For the callee we consider
+/// the features enabled (via `#[target_feature]` and `-C target-feature`/`-C target-cpu`) at the
+/// declaration site; for the caller we consider the features enabled at the call site.
+/// - Neither any argument nor the return value involves a SIMD type (`#[repr(simd)]`) that is not
+/// behind a pointer indirection (i.e., `*mut __m256` is fine, but `(i32, __m256)` is not).
+///
/// ### Trait implementations
///
-/// In this documentation the shorthand `fn (T₁, T₂, …, Tₙ)` is used to represent non-variadic
+/// In this documentation the shorthand `fn(T₁, T₂, …, Tₙ)` is used to represent non-variadic
/// function pointers of varying length. Note that this is a convenience notation to avoid
/// repetitive documentation, not valid Rust syntax.
///
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index bbf7199ff..ce176e6fc 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -1,5 +1,4 @@
use crate::convert::{TryFrom, TryInto};
-use crate::intrinsics::assert_unsafe_precondition;
use crate::num::NonZeroUsize;
use crate::{cmp, fmt, hash, mem, num};
@@ -42,6 +41,7 @@ impl Alignment {
/// This provides the same numerical value as [`mem::align_of`],
/// but in an `Alignment` instead of a `usize`.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn of<T>() -> Self {
// SAFETY: rustc ensures that type alignment is always a power of two.
@@ -53,6 +53,7 @@ impl Alignment {
///
/// Note that `0` is not a power of two, nor a valid alignment.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn new(align: usize) -> Option<Self> {
if align.is_power_of_two() {
@@ -75,13 +76,10 @@ impl Alignment {
#[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const unsafe fn new_unchecked(align: usize) -> Self {
- // SAFETY: Precondition passed to the caller.
- unsafe {
- assert_unsafe_precondition!(
- "Alignment::new_unchecked requires a power of two",
- (align: usize) => align.is_power_of_two()
- )
- };
+ crate::panic::debug_assert_nounwind!(
+ align.is_power_of_two(),
+ "Alignment::new_unchecked requires a power of two"
+ );
// SAFETY: By precondition, this must be a power of two, and
// our variants encompass all possible powers of two.
@@ -98,6 +96,7 @@ impl Alignment {
/// Returns the alignment as a [`NonZeroUsize`]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn as_nonzero(self) -> NonZeroUsize {
// SAFETY: All the discriminants are non-zero.
@@ -118,10 +117,42 @@ impl Alignment {
/// assert_eq!(Alignment::new(1024).unwrap().log2(), 10);
/// ```
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
- pub fn log2(self) -> u32 {
+ pub const fn log2(self) -> u32 {
self.as_nonzero().trailing_zeros()
}
+
+ /// Returns a bit mask that can be used to match this alignment.
+ ///
+ /// This is equivalent to `!(self.as_usize() - 1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_alignment_type)]
+ /// #![feature(ptr_mask)]
+ /// use std::ptr::{Alignment, NonNull};
+ ///
+ /// #[repr(align(1))] struct Align1(u8);
+ /// #[repr(align(2))] struct Align2(u16);
+ /// #[repr(align(4))] struct Align4(u32);
+ /// let one = <NonNull<Align1>>::dangling().as_ptr();
+ /// let two = <NonNull<Align2>>::dangling().as_ptr();
+ /// let four = <NonNull<Align4>>::dangling().as_ptr();
+ ///
+ /// assert_eq!(four.mask(Alignment::of::<Align1>().mask()), four);
+ /// assert_eq!(four.mask(Alignment::of::<Align2>().mask()), four);
+ /// assert_eq!(four.mask(Alignment::of::<Align4>().mask()), four);
+ /// assert_ne!(one.mask(Alignment::of::<Align4>().mask()), one);
+ /// ```
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn mask(self) -> usize {
+ // SAFETY: The alignment is always nonzero, and therefore decrementing won't overflow.
+ !(unsafe { self.as_usize().unchecked_sub(1) })
+ }
}
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
@@ -193,6 +224,14 @@ impl hash::Hash for Alignment {
}
}
+/// Returns [`Alignment::MIN`], which is valid for any type.
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl Default for Alignment {
+ fn default() -> Alignment {
+ Alignment::MIN
+ }
+}
+
#[cfg(target_pointer_width = "16")]
type AlignmentEnum = AlignmentEnum16;
#[cfg(target_pointer_width = "32")]
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 36685f756..a444c30c7 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -1,7 +1,7 @@
use super::*;
-use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics::{self, const_eval_select};
-use crate::mem::{self, SizedTypeProperties};
+use crate::cmp::Ordering::{Equal, Greater, Less};
+use crate::intrinsics::const_eval_select;
+use crate::mem::SizedTypeProperties;
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *const T {
@@ -186,10 +186,10 @@ impl<T: ?Sized> *const T {
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
- /// required provenance, use [`expose_addr`][pointer::expose_addr] and
- /// [`from_exposed_addr`][from_exposed_addr] instead. However, note that this makes
- /// your code less portable and less amenable to tools that check for compliance with the Rust
- /// memory model.
+ /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
+ /// or [`expose_addr`][pointer::expose_addr] and [`from_exposed_addr`][from_exposed_addr]
+ /// instead. However, note that this makes your code less portable and less amenable to tools
+ /// that check for compliance with the Rust memory model.
///
/// On most platforms this will produce a value with the same bytes as the original
/// pointer, because all the bytes are dedicated to describing the address.
@@ -219,7 +219,8 @@ impl<T: ?Sized> *const T {
/// later call [`from_exposed_addr`][] to reconstitute the original pointer including its
/// provenance. (Reconstructing address space information, if required, is your responsibility.)
///
- /// Using this method means that code is *not* following Strict Provenance rules. Supporting
+ /// Using this method means that code is *not* following [Strict
+ /// Provenance][../index.html#strict-provenance] rules. Supporting
/// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`addr`][pointer::addr] wherever possible.
@@ -230,13 +231,13 @@ impl<T: ?Sized> *const T {
/// side-effect which is required for [`from_exposed_addr`][] to work is typically not
/// available.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
- /// [module documentation][crate::ptr] for details.
+ /// It is unclear whether this method can be given a satisfying unambiguous specification. This
+ /// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
///
/// [`from_exposed_addr`]: from_exposed_addr
#[must_use]
#[inline(always)]
- #[unstable(feature = "strict_provenance", issue = "95228")]
+ #[unstable(feature = "exposed_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self.cast::<()>() as usize
@@ -1367,10 +1368,16 @@ impl<T: ?Sized> *const T {
panic!("align_offset: align is not a power-of-two");
}
- {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(self, align) }
+ // SAFETY: `align` has been checked to be a power of 2 above
+ let ret = unsafe { align_offset(self, align) };
+
+ // Inform Miri that we want to consider the resulting pointer to be suitably aligned.
+ #[cfg(miri)]
+ if ret != usize::MAX {
+ intrinsics::miri_promise_symbolic_alignment(self.wrapping_add(ret).cast(), align);
}
+
+ ret
}
/// Returns whether the pointer is properly aligned for `T`.
@@ -1644,6 +1651,24 @@ impl<T> *const [T] {
metadata(self)
}
+ /// Returns `true` if the raw slice has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_len)]
+ /// use std::ptr;
+ ///
+ /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
+ /// assert!(!slice.is_empty());
+ /// ```
+ #[inline(always)]
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ pub const fn is_empty(self) -> bool {
+ self.len() == 0
+ }
+
/// Returns a raw pointer to the slice's buffer.
///
/// This is equivalent to casting `self` to `*const T`, but more type-safe.
@@ -1747,6 +1772,7 @@ impl<T> *const [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> PartialEq for *const T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &*const T) -> bool {
*self == *other
}
@@ -1759,6 +1785,7 @@ impl<T: ?Sized> Eq for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Ord for *const T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn cmp(&self, other: &*const T) -> Ordering {
if self < other {
Less
@@ -1778,21 +1805,25 @@ impl<T: ?Sized> PartialOrd for *const T {
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn lt(&self, other: &*const T) -> bool {
*self < *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn le(&self, other: &*const T) -> bool {
*self <= *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn gt(&self, other: &*const T) -> bool {
*self > *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn ge(&self, other: &*const T) -> bool {
*self >= *other
}
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index d71079dd0..390e07371 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -312,22 +312,30 @@
//! For instance, ARM explicitly supports high-bit tagging, and so CHERI on ARM inherits
//! that and should support it.
//!
-//! ## Pointer-usize-pointer roundtrips and 'exposed' provenance
+//! ## Exposed Provenance
//!
-//! **This section is *non-normative* and is part of the [Strict Provenance] experiment.**
+//! **This section is *non-normative* and is an extension to the [Strict Provenance] experiment.**
//!
//! As discussed above, pointer-usize-pointer roundtrips are not possible under [Strict Provenance].
-//! However, there exists legacy Rust code that is full of such roundtrips, and legacy platform APIs
-//! regularly assume that `usize` can capture all the information that makes up a pointer. There
-//! also might be code that cannot be ported to Strict Provenance (which is something we would [like
-//! to hear about][Strict Provenance]).
-//!
-//! For situations like this, there is a fallback plan, a way to 'opt out' of Strict Provenance.
-//! However, note that this makes your code a lot harder to specify, and the code will not work
-//! (well) with tools like [Miri] and [CHERI].
-//!
-//! This fallback plan is provided by the [`expose_addr`] and [`from_exposed_addr`] methods (which
-//! are equivalent to `as` casts between pointers and integers). [`expose_addr`] is a lot like
+//! This is by design: the goal of Strict Provenance is to provide a clear specification that we are
+//! confident can be formalized unambiguously and can be subject to precise formal reasoning.
+//!
+//! However, there exist situations where pointer-usize-pointer roundtrips cannot be avoided, or
+//! where avoiding them would require major refactoring. Legacy platform APIs also regularly assume
+//! that `usize` can capture all the information that makes up a pointer. The goal of Strict
+//! Provenance is not to rule out such code; the goal is to put all the *other* pointer-manipulating
+//! code onto a more solid foundation. Strict Provenance is about improving the situation where
+//! possible (all the code that can be written with Strict Provenance) without making things worse
+//! for situations where Strict Provenance is insufficient.
+//!
+//! For these situations, there is a highly experimental extension to Strict Provenance called
+//! *Exposed Provenance*. This extension permits pointer-usize-pointer roundtrips. However, its
+//! semantics are on much less solid footing than Strict Provenance, and at this point it is not yet
+//! clear where a satisfying unambiguous semantics can be defined for Exposed Provenance.
+//! Furthermore, Exposed Provenance will not work (well) with tools like [Miri] and [CHERI].
+//!
+//! Exposed Provenance is provided by the [`expose_addr`] and [`from_exposed_addr`] methods, which
+//! are meant to replace `as` casts between pointers and integers. [`expose_addr`] is a lot like
//! [`addr`], but additionally adds the provenance of the pointer to a global list of 'exposed'
//! provenances. (This list is purely conceptual, it exists for the purpose of specifying Rust but
//! is not materialized in actual executions, except in tools like [Miri].) [`from_exposed_addr`]
@@ -341,10 +349,11 @@
//! there is *no* previously 'exposed' provenance that justifies the way the returned pointer will
//! be used, the program has undefined behavior.
//!
-//! Using [`expose_addr`] or [`from_exposed_addr`] (or the equivalent `as` casts) means that code is
+//! Using [`expose_addr`] or [`from_exposed_addr`] (or the `as` casts) means that code is
//! *not* following Strict Provenance rules. The goal of the Strict Provenance experiment is to
-//! determine whether it is possible to use Rust without [`expose_addr`] and [`from_exposed_addr`].
-//! If this is successful, it would be a major win for avoiding specification complexity and to
+//! determine how far one can get in Rust without the use of [`expose_addr`] and
+//! [`from_exposed_addr`], and to encourage code to be written with Strict Provenance APIs only.
+//! Maximizing the amount of such code is a major win for avoiding specification complexity and to
//! facilitate adoption of tools like [CHERI] and [Miri] that can be a big help in increasing the
//! confidence in (unsafe) Rust code.
//!
@@ -619,12 +628,12 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// Convert an address back to a pointer, picking up a previously 'exposed' provenance.
///
-/// This is equivalent to `addr as *const T`. The provenance of the returned pointer is that of *any*
-/// pointer that was previously exposed by passing it to [`expose_addr`][pointer::expose_addr],
-/// or a `ptr as usize` cast. In addition, memory which is outside the control of the Rust abstract
-/// machine (MMIO registers, for example) is always considered to be exposed, so long as this memory
-/// is disjoint from memory that will be used by the abstract machine such as the stack, heap,
-/// and statics.
+/// This is a more rigorously specified alternative to `addr as *const T`. The provenance of the
+/// returned pointer is that of *any* pointer that was previously exposed by passing it to
+/// [`expose_addr`][pointer::expose_addr], or a `ptr as usize` cast. In addition, memory which is
+/// outside the control of the Rust abstract machine (MMIO registers, for example) is always
+/// considered to be exposed, so long as this memory is disjoint from memory that will be used by
+/// the abstract machine such as the stack, heap, and statics.
///
/// If there is no 'exposed' provenance that justifies the way this pointer will be used,
/// the program has undefined behavior. In particular, the aliasing rules still apply: pointers
@@ -639,7 +648,8 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// On platforms with multiple address spaces, it is your responsibility to ensure that the
/// address makes sense in the address space that this pointer will be used with.
///
-/// Using this method means that code is *not* following strict provenance rules. "Guessing" a
+/// Using this function means that code is *not* following [Strict
+/// Provenance][../index.html#strict-provenance] rules. "Guessing" a
/// suitable provenance complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`with_addr`][pointer::with_addr] wherever possible.
@@ -649,13 +659,13 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// since it is generally not possible to actually *compute* which provenance the returned
/// pointer has to pick up.
///
-/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
-/// [module documentation][crate::ptr] for details.
+/// It is unclear whether this function can be given a satisfying unambiguous specification. This
+/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
#[must_use]
#[inline(always)]
-#[unstable(feature = "strict_provenance", issue = "95228")]
+#[unstable(feature = "exposed_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
+#[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
pub fn from_exposed_addr<T>(addr: usize) -> *const T
where
T: Sized,
@@ -666,18 +676,20 @@ where
/// Convert an address back to a mutable pointer, picking up a previously 'exposed' provenance.
///
-/// This is equivalent to `addr as *mut T`. The provenance of the returned pointer is that of *any*
-/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize`
-/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be
-/// used, the program has undefined behavior. Note that there is no algorithm that decides which
-/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess
-/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined
-/// behavior, then that is the guess that will be taken.
+/// This is a more rigorously specified alternative to `addr as *mut T`. The provenance of the
+/// returned pointer is that of *any* pointer that was previously passed to
+/// [`expose_addr`][pointer::expose_addr] or a `ptr as usize` cast. If there is no previously
+/// 'exposed' provenance that justifies the way this pointer will be used, the program has undefined
+/// behavior. Note that there is no algorithm that decides which provenance will be used. You can
+/// think of this as "guessing" the right provenance, and the guess will be "maximally in your
+/// favor", in the sense that if there is any way to avoid undefined behavior, then that is the
+/// guess that will be taken.
///
/// On platforms with multiple address spaces, it is your responsibility to ensure that the
/// address makes sense in the address space that this pointer will be used with.
///
-/// Using this method means that code is *not* following strict provenance rules. "Guessing" a
+/// Using this function means that code is *not* following [Strict
+/// Provenance][../index.html#strict-provenance] rules. "Guessing" a
/// suitable provenance complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`with_addr`][pointer::with_addr] wherever possible.
@@ -687,13 +699,13 @@ where
/// since it is generally not possible to actually *compute* which provenance the returned
/// pointer has to pick up.
///
-/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
-/// [module documentation][crate::ptr] for details.
+/// It is unclear whether this function can be given a satisfying unambiguous specification. This
+/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
#[must_use]
#[inline(always)]
-#[unstable(feature = "strict_provenance", issue = "95228")]
+#[unstable(feature = "exposed_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
+#[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
where
T: Sized,
@@ -708,7 +720,8 @@ where
/// type or mutability, in particular if the code is refactored.
#[inline(always)]
#[must_use]
-#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_const_stable(feature = "ptr_from_ref", since = "1.76.0")]
#[rustc_never_returns_null_ptr]
#[rustc_diagnostic_item = "ptr_from_ref"]
pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
@@ -721,7 +734,9 @@ pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
/// type or mutability, in particular if the code is refactored.
#[inline(always)]
#[must_use]
-#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_const_stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_allow_const_fn_unstable(const_mut_refs)]
#[rustc_never_returns_null_ptr]
pub const fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
r
@@ -1885,6 +1900,7 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
#[inline(always)]
#[must_use = "pointer comparison produces a value"]
#[rustc_diagnostic_item = "ptr_eq"]
+#[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))] // it's actually clear here
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
a == b
}
@@ -1898,14 +1914,15 @@ pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
/// # Examples
///
/// ```
-/// #![feature(ptr_addr_eq)]
+/// use std::ptr;
///
/// let whole: &[i32; 3] = &[1, 2, 3];
/// let first: &i32 = &whole[0];
-/// assert!(std::ptr::addr_eq(whole, first));
-/// assert!(!std::ptr::eq::<dyn std::fmt::Debug>(whole, first));
+///
+/// assert!(ptr::addr_eq(whole, first));
+/// assert!(!ptr::eq::<dyn std::fmt::Debug>(whole, first));
/// ```
-#[unstable(feature = "ptr_addr_eq", issue = "116324")]
+#[stable(feature = "ptr_addr_eq", since = "1.76.0")]
#[inline(always)]
#[must_use = "pointer comparison produces a value"]
pub fn addr_eq<T: ?Sized, U: ?Sized>(p: *const T, q: *const U) -> bool {
@@ -1921,8 +1938,7 @@ pub fn addr_eq<T: ?Sized, U: ?Sized>(p: *const T, q: *const U) -> bool {
/// # Examples
///
/// ```
-/// use std::collections::hash_map::DefaultHasher;
-/// use std::hash::{Hash, Hasher};
+/// use std::hash::{DefaultHasher, Hash, Hasher};
/// use std::ptr;
///
/// let five = 5;
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index bc362fb62..9e7b8ec64 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -1,6 +1,6 @@
use super::*;
-use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics::{self, const_eval_select};
+use crate::cmp::Ordering::{Equal, Greater, Less};
+use crate::intrinsics::const_eval_select;
use crate::mem::SizedTypeProperties;
use crate::slice::{self, SliceIndex};
@@ -193,10 +193,10 @@ impl<T: ?Sized> *mut T {
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
- /// required provenance, use [`expose_addr`][pointer::expose_addr] and
- /// [`from_exposed_addr_mut`][from_exposed_addr_mut] instead. However, note that this makes
- /// your code less portable and less amenable to tools that check for compliance with the Rust
- /// memory model.
+ /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
+ /// or [`expose_addr`][pointer::expose_addr] and [`from_exposed_addr`][from_exposed_addr]
+ /// instead. However, note that this makes your code less portable and less amenable to tools
+ /// that check for compliance with the Rust memory model.
///
/// On most platforms this will produce a value with the same bytes as the original
/// pointer, because all the bytes are dedicated to describing the address.
@@ -226,7 +226,8 @@ impl<T: ?Sized> *mut T {
/// later call [`from_exposed_addr_mut`][] to reconstitute the original pointer including its
/// provenance. (Reconstructing address space information, if required, is your responsibility.)
///
- /// Using this method means that code is *not* following Strict Provenance rules. Supporting
+ /// Using this method means that code is *not* following [Strict
+ /// Provenance][../index.html#strict-provenance] rules. Supporting
/// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported
/// by tools that help you to stay conformant with the Rust memory model, so it is recommended
/// to use [`addr`][pointer::addr] wherever possible.
@@ -237,13 +238,13 @@ impl<T: ?Sized> *mut T {
/// side-effect which is required for [`from_exposed_addr_mut`][] to work is typically not
/// available.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
- /// [module documentation][crate::ptr] for details.
+ /// It is unclear whether this method can be given a satisfying unambiguous specification. This
+ /// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
///
/// [`from_exposed_addr_mut`]: from_exposed_addr_mut
#[must_use]
#[inline(always)]
- #[unstable(feature = "strict_provenance", issue = "95228")]
+ #[unstable(feature = "exposed_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self.cast::<()>() as usize
@@ -259,7 +260,7 @@ impl<T: ?Sized> *mut T {
/// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
/// `self` to the given address, and therefore has all the same capabilities and restrictions.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// This API and its claimed semantics are an extension to the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
@@ -1634,10 +1635,19 @@ impl<T: ?Sized> *mut T {
panic!("align_offset: align is not a power-of-two");
}
- {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(self, align) }
+ // SAFETY: `align` has been checked to be a power of 2 above
+ let ret = unsafe { align_offset(self, align) };
+
+ // Inform Miri that we want to consider the resulting pointer to be suitably aligned.
+ #[cfg(miri)]
+ if ret != usize::MAX {
+ intrinsics::miri_promise_symbolic_alignment(
+ self.wrapping_add(ret).cast_const().cast(),
+ align,
+ );
}
+
+ ret
}
/// Returns whether the pointer is properly aligned for `T`.
@@ -1920,10 +1930,10 @@ impl<T> *mut [T] {
///
/// ```
/// #![feature(slice_ptr_len)]
+ /// use std::ptr;
///
- /// let mut a = [1, 2, 3];
- /// let ptr = &mut a as *mut [_];
- /// assert!(!ptr.is_empty());
+ /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
+ /// assert!(!slice.is_empty());
/// ```
#[inline(always)]
#[unstable(feature = "slice_ptr_len", issue = "71146")]
@@ -2189,6 +2199,7 @@ impl<T> *mut [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> PartialEq for *mut T {
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &*mut T) -> bool {
*self == *other
}
@@ -2200,6 +2211,7 @@ impl<T: ?Sized> Eq for *mut T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Ord for *mut T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn cmp(&self, other: &*mut T) -> Ordering {
if self < other {
Less
@@ -2219,21 +2231,25 @@ impl<T: ?Sized> PartialOrd for *mut T {
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn lt(&self, other: &*mut T) -> bool {
*self < *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn le(&self, other: &*mut T) -> bool {
*self <= *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn gt(&self, other: &*mut T) -> bool {
*self > *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn ge(&self, other: &*mut T) -> bool {
*self >= *other
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index ae673b779..77961506e 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -1,12 +1,14 @@
use crate::cmp::Ordering;
-use crate::convert::From;
use crate::fmt;
use crate::hash;
+use crate::intrinsics;
use crate::intrinsics::assert_unsafe_precondition;
use crate::marker::Unsize;
+use crate::mem::SizedTypeProperties;
use crate::mem::{self, MaybeUninit};
use crate::num::NonZeroUsize;
use crate::ops::{CoerceUnsized, DispatchFromDyn};
+use crate::ptr;
use crate::ptr::Unique;
use crate::slice::{self, SliceIndex};
@@ -471,41 +473,1047 @@ impl<T: ?Sized> NonNull<T> {
unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) }
}
- /// See [`pointer::add`] for semantics and safety requirements.
+ /// Calculates the offset from a pointer.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut s = [1, 2, 3];
+ /// let ptr: NonNull<u32> = NonNull::new(s.as_mut_ptr()).unwrap();
+ ///
+ /// unsafe {
+ /// println!("{}", ptr.offset(1).read());
+ /// println!("{}", ptr.offset(2).read());
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset(self, count: isize) -> NonNull<T>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: intrinsics::offset(self.pointer, count) } }
+ }
+
+ /// Calculates the offset from a pointer in bytes.
+ ///
+ /// `count` is in units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [offset][pointer::offset] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset(self, count: isize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `offset` and `byte_offset` has
+ // the same safety contract.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_offset(count) } }
+ }
+
+ /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a `usize`.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let s: &str = "123";
+ /// let ptr: NonNull<u8> = NonNull::new(s.as_ptr().cast_mut()).unwrap();
+ ///
+ /// unsafe {
+ /// println!("{}", ptr.add(1).read() as char);
+ /// println!("{}", ptr.add(2).read() as char);
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: intrinsics::offset(self.pointer, count) } }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`add`][NonNull::add] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_add(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `add` and `byte_add` has the same
+ // safety contract.
+ // Additionally safety contract of `add` guarantees that the resulting pointer is pointing
+ // to an allocation, there can't be an allocation at null, thus it's safe to construct
+ // `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_add(count) } }
+ }
+
+ /// Calculates the offset from a pointer (convenience for
+ /// `.offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset cannot exceed `isize::MAX` **bytes**.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let s: &str = "123";
+ ///
+ /// unsafe {
+ /// let end: NonNull<u8> = NonNull::new(s.as_ptr().cast_mut()).unwrap().add(3);
+ /// println!("{}", end.sub(1).read() as char);
+ /// println!("{}", end.sub(2).read() as char);
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ // We could always go back to wrapping if unchecked becomes unacceptable
+ #[rustc_allow_const_fn_unstable(const_int_unchecked_arith)]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ if T::IS_ZST {
+ // Pointer arithmetic does nothing when the pointee is a ZST.
+ self
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Because the pointee is *not* a ZST, that means that `count` is
+ // at most `isize::MAX`, and thus the negation cannot overflow.
+ unsafe { self.offset(intrinsics::unchecked_sub(0, count as isize)) }
+ }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for
+ /// `.byte_offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`sub`][NonNull::sub] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_sub(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `sub` and `byte_sub` has the same
+ // safety contract.
+ // Additionally safety contract of `sub` guarantees that the resulting pointer is pointing
+ // to an allocation, there can't be an allocation at null, thus it's safe to construct
+ // `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_sub(count) } }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
+ ///
+ /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
+ /// except that it has a lot more opportunities for UB, in exchange for the compiler
+ /// better understanding what you are doing.
+ ///
+ /// The primary motivation of this method is for computing the `len` of an array/slice
+ /// of `T` that you are currently representing as a "start" and "end" pointer
+ /// (and "end" is "one past the end" of the array).
+ /// In that case, `end.offset_from(start)` gets you the length of the array.
+ ///
+ /// All of the following safety requirements are trivially satisfied for this usecase.
+ ///
+ /// [`offset`]: #method.offset
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both `self` and `origin` must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * Both pointers must be *derived from* a pointer to the same object.
+ /// (See below for an example.)
+ ///
+ /// * The distance between the pointers, in bytes, must be an exact multiple
+ /// of the size of `T`.
+ ///
+ /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The distance being in bounds cannot rely on "wrapping around" the address space.
+ ///
+ /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
+ /// address space, so two pointers within some value of any Rust type `T` will always satisfy
+ /// the last two conditions. The standard library also generally ensures that allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
+ /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
+ /// always satisfies the last two conditions.
+ ///
+ /// Most platforms fundamentally can't even construct such a large allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
+ /// such large allocations either.)
+ ///
+ /// The requirement for pointers to be derived from the same allocated object is primarily
+ /// needed for `const`-compatibility: the distance between pointers into *different* allocated
+ /// objects is not known at compile-time. However, the requirement also exists at
+ /// runtime and may be exploited by optimizations. If you wish to compute the difference between
+ /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
+ /// origin as isize) / mem::size_of::<T>()`.
+ // FIXME: recommend `addr()` instead of `as usize` once that is stable.
+ ///
+ /// [`add`]: #method.add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let a = [0; 5];
+ /// let ptr1: NonNull<u32> = NonNull::from(&a[1]);
+ /// let ptr2: NonNull<u32> = NonNull::from(&a[3]);
+ /// unsafe {
+ /// assert_eq!(ptr2.offset_from(ptr1), 2);
+ /// assert_eq!(ptr1.offset_from(ptr2), -2);
+ /// assert_eq!(ptr1.offset(2), ptr2);
+ /// assert_eq!(ptr2.offset(-2), ptr1);
+ /// }
+ /// ```
+ ///
+ /// *Incorrect* usage:
+ ///
+ /// ```rust,no_run
+ /// #![feature(non_null_convenience, strict_provenance)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let ptr1 = NonNull::new(Box::into_raw(Box::new(0u8))).unwrap();
+ /// let ptr2 = NonNull::new(Box::into_raw(Box::new(1u8))).unwrap();
+ /// let diff = (ptr2.addr().get() as isize).wrapping_sub(ptr1.addr().get() as isize);
+ /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
+ /// let ptr2_other = NonNull::new(ptr1.as_ptr().wrapping_byte_offset(diff)).unwrap();
+ /// assert_eq!(ptr2.addr(), ptr2_other.addr());
+ /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
+ /// // computing their offset is undefined behavior, even though
+ /// // they point to the same address!
+ /// unsafe {
+ /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset_from(self, origin: NonNull<T>) -> isize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset_from`.
+ unsafe { self.pointer.offset_from(origin.pointer) }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`offset_from`][NonNull::offset_from] on it. See that method for
+ /// documentation and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointers,
+ /// ignoring the metadata.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: NonNull<U>) -> isize {
+ // SAFETY: the caller must uphold the safety contract for `byte_offset_from`.
+ unsafe { self.pointer.byte_offset_from(origin.pointer) }
+ }
+
+ // N.B. `wrapping_offset``, `wrapping_add`, etc are not implemented because they can wrap to null
+
+ /// Calculates the distance between two pointers, *where it's known that
+ /// `self` is equal to or greater than `origin`*. The returned value is in
+ /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+ ///
+ /// This computes the same value that [`offset_from`](#method.offset_from)
+ /// would compute, but with the added precondition that the offset is
+ /// guaranteed to be non-negative. This method is equivalent to
+ /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`,
+ /// but it provides slightly more information to the optimizer, which can
+ /// sometimes allow it to optimize slightly better with some backends.
+ ///
+ /// This method can be though of as recovering the `count` that was passed
+ /// to [`add`](#method.add) (or, with the parameters in the other order,
+ /// to [`sub`](#method.sub)). The following are all equivalent, assuming
+ /// that their safety preconditions are met:
+ /// ```rust
+ /// # #![feature(non_null_convenience)]
+ /// # unsafe fn blah(ptr: std::ptr::NonNull<u32>, origin: std::ptr::NonNull<u32>, count: usize) -> bool {
+ /// ptr.sub_ptr(origin) == count
+ /// # &&
+ /// origin.add(count) == ptr
+ /// # &&
+ /// ptr.sub(count) == origin
+ /// # }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// - The distance between the pointers must be non-negative (`self >= origin`)
+ ///
+ /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
+ /// apply to this method as well; see it for the full details.
+ ///
+ /// Importantly, despite the return type of this method being able to represent
+ /// a larger offset, it's still *not permitted* to pass pointers which differ
+ /// by more than `isize::MAX` *bytes*. As such, the result of this method will
+ /// always be less than or equal to `isize::MAX as usize`.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let a = [0; 5];
+ /// let ptr1: NonNull<u32> = NonNull::from(&a[1]);
+ /// let ptr2: NonNull<u32> = NonNull::from(&a[3]);
+ /// unsafe {
+ /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
+ /// assert_eq!(ptr1.add(2), ptr2);
+ /// assert_eq!(ptr2.sub(2), ptr1);
+ /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
+ /// }
+ ///
+ /// // This would be incorrect, as the pointers are not correctly ordered:
+ /// // ptr1.sub_ptr(ptr2)
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ // #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
+ // #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub_ptr(self, subtracted: NonNull<T>) -> usize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `sub_ptr`.
+ unsafe { self.pointer.sub_ptr(subtracted.pointer) }
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// See [`ptr::read`] for safety concerns and examples.
+ ///
+ /// [`ptr::read`]: crate::ptr::read()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read`.
+ unsafe { ptr::read(self.pointer) }
+ }
+
+ /// Performs a volatile read of the value from `self` without moving it. This
+ /// leaves the memory in `self` unchanged.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::read_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
#[inline]
- pub(crate) const unsafe fn add(self, delta: usize) -> Self
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn read_volatile(self) -> T
where
T: Sized,
{
- // SAFETY: We require that the delta stays in-bounds of the object, and
- // thus it cannot become null, as that would require wrapping the
- // address space, which no legal objects are allowed to do.
- // And the caller promised the `delta` is sound to add.
- unsafe { NonNull { pointer: self.pointer.add(delta) } }
+ // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+ unsafe { ptr::read_volatile(self.pointer) }
}
- /// See [`pointer::sub`] for semantics and safety requirements.
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// Unlike `read`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::read_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
#[inline]
- pub(crate) const unsafe fn sub(self, delta: usize) -> Self
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read_unaligned(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+ unsafe { ptr::read_unaligned(self.pointer) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to(self, dest: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { ptr::copy(self.pointer, dest.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to_nonoverlapping(self, dest: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { ptr::copy_nonoverlapping(self.pointer, dest.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_from(self, src: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { ptr::copy(src.pointer, self.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_from_nonoverlapping(self, src: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { ptr::copy_nonoverlapping(src.pointer, self.as_ptr(), count) }
+ }
+
+ /// Executes the destructor (if any) of the pointed-to value.
+ ///
+ /// See [`ptr::drop_in_place`] for safety concerns and examples.
+ ///
+ /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ pub unsafe fn drop_in_place(self) {
+ // SAFETY: the caller must uphold the safety contract for `drop_in_place`.
+ unsafe { ptr::drop_in_place(self.as_ptr()) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// See [`ptr::write`] for safety concerns and examples.
+ ///
+ /// [`ptr::write`]: crate::ptr::write()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write`.
+ unsafe { ptr::write(self.as_ptr(), val) }
+ }
+
+ /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
+ /// bytes of memory starting at `self` to `val`.
+ ///
+ /// See [`ptr::write_bytes`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_bytes`]: crate::ptr::write_bytes()
+ #[doc(alias = "memset")]
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write_bytes(self, val: u8, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_bytes`.
+ unsafe { ptr::write_bytes(self.as_ptr(), val, count) }
+ }
+
+ /// Performs a volatile write of a memory location with the given value without
+ /// reading or dropping the old value.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::write_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_volatile`]: crate::ptr::write_volatile()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn write_volatile(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_volatile`.
+ unsafe { ptr::write_volatile(self.as_ptr(), val) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// Unlike `write`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::write_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write_unaligned(self, val: T)
where
T: Sized,
{
- // SAFETY: We require that the delta stays in-bounds of the object, and
- // thus it cannot become null, as no legal objects can be allocated
- // in such as way that the null address is part of them.
- // And the caller promised the `delta` is sound to subtract.
- unsafe { NonNull { pointer: self.pointer.sub(delta) } }
+ // SAFETY: the caller must uphold the safety contract for `write_unaligned`.
+ unsafe { ptr::write_unaligned(self.as_ptr(), val) }
}
- /// See [`pointer::sub_ptr`] for semantics and safety requirements.
+ /// Replaces the value at `self` with `src`, returning the old
+ /// value, without dropping either.
+ ///
+ /// See [`ptr::replace`] for safety concerns and examples.
+ ///
+ /// [`ptr::replace`]: crate::ptr::replace()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ pub unsafe fn replace(self, src: T) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `replace`.
+ unsafe { ptr::replace(self.as_ptr(), src) }
+ }
+
+ /// Swaps the values at two mutable locations of the same type, without
+ /// deinitializing either. They may overlap, unlike `mem::swap` which is
+ /// otherwise equivalent.
+ ///
+ /// See [`ptr::swap`] for safety concerns and examples.
+ ///
+ /// [`ptr::swap`]: crate::ptr::swap()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+ #[inline(always)]
+ pub const unsafe fn swap(self, with: NonNull<T>)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `swap`.
+ unsafe { ptr::swap(self.as_ptr(), with.as_ptr()) }
+ }
+
+ /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
+ /// `align`.
+ ///
+ /// If it is not possible to align the pointer, the implementation returns
+ /// `usize::MAX`. It is permissible for the implementation to *always*
+ /// return `usize::MAX`. Only your algorithm's performance can depend
+ /// on getting a usable offset here, not its correctness.
+ ///
+ /// The offset is expressed in number of `T` elements, and not bytes.
+ ///
+ /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
+ /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
+ /// the returned offset is correct in all terms other than alignment.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two.
+ ///
+ /// # Examples
+ ///
+ /// Accessing adjacent `u8` as `u16`
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::mem::align_of;
+ /// use std::ptr::NonNull;
+ ///
+ /// # unsafe {
+ /// let x = [5_u8, 6, 7, 8, 9];
+ /// let ptr = NonNull::new(x.as_ptr() as *mut u8).unwrap();
+ /// let offset = ptr.align_offset(align_of::<u16>());
+ ///
+ /// if offset < x.len() - 1 {
+ /// let u16_ptr = ptr.add(offset).cast::<u16>();
+ /// assert!(u16_ptr.read() == u16::from_ne_bytes([5, 6]) || u16_ptr.read() == u16::from_ne_bytes([6, 7]));
+ /// } else {
+ /// // while the pointer can be aligned via `offset`, it would point
+ /// // outside the allocation
+ /// }
+ /// # }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
+ #[must_use]
#[inline]
- pub(crate) const unsafe fn sub_ptr(self, subtrahend: Self) -> usize
+ pub const fn align_offset(self, align: usize) -> usize
where
T: Sized,
{
- // SAFETY: The caller promised that this is safe to do, and
- // the non-nullness is irrelevant to the operation.
- unsafe { self.pointer.sub_ptr(subtrahend.pointer) }
+ if !align.is_power_of_two() {
+ panic!("align_offset: align is not a power-of-two");
+ }
+
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above.
+ unsafe { ptr::align_offset(self.pointer, align) }
+ }
+ }
+
+ /// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = NonNull::<AlignedI32>::from(&data);
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!NonNull::new(ptr.as_ptr().wrapping_byte_add(1)).unwrap().is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(non_null_convenience)]
+ /// #![feature(const_option)]
+ /// #![feature(const_nonnull_new)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let data = [AlignedI32(42), AlignedI32(42)];
+ /// let ptr = NonNull::<AlignedI32>::new(&data[0] as *const _ as *mut _).unwrap();
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = unsafe { ptr.add(1).cast::<AlignedI64>() };
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_option)]
+ /// #![feature(const_nonnull_new)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = NonNull::new(40 as *mut AlignedI32).unwrap();
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = NonNull::new(ptr.as_ptr().wrapping_add(1)).unwrap().cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ #[must_use]
+ #[inline]
+ pub const fn is_aligned(self) -> bool
+ where
+ T: Sized,
+ {
+ self.pointer.is_aligned()
+ }
+
+ /// Returns whether the pointer is aligned to `align`.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointer,
+ /// ignoring the metadata.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ #[must_use]
+ #[inline]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
+ self.pointer.is_aligned_to(align)
}
}
@@ -783,6 +1791,7 @@ impl<T: ?Sized> Eq for NonNull<T> {}
#[stable(feature = "nonnull", since = "1.25.0")]
impl<T: ?Sized> PartialEq for NonNull<T> {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs
index bf8b86677..067f1541e 100644
--- a/library/core/src/ptr/unique.rs
+++ b/library/core/src/ptr/unique.rs
@@ -1,4 +1,3 @@
-use crate::convert::From;
use crate::fmt;
use crate::marker::{PhantomData, Unsize};
use crate::ops::{CoerceUnsized, DispatchFromDyn};
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 50127b27f..eff1b9b59 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -835,8 +835,6 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
- /// #![feature(result_option_inspect)]
- ///
/// let x: u8 = "4"
/// .parse::<u8>()
/// .inspect(|x| println!("original: {x}"))
@@ -844,7 +842,7 @@ impl<T, E> Result<T, E> {
/// .expect("failed to parse number");
/// ```
#[inline]
- #[unstable(feature = "result_option_inspect", issue = "91345")]
+ #[stable(feature = "result_option_inspect", since = "1.76.0")]
pub fn inspect<F: FnOnce(&T)>(self, f: F) -> Self {
if let Ok(ref t) = self {
f(t);
@@ -858,8 +856,6 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
- /// #![feature(result_option_inspect)]
- ///
/// use std::{fs, io};
///
/// fn read() -> io::Result<String> {
@@ -868,7 +864,7 @@ impl<T, E> Result<T, E> {
/// }
/// ```
#[inline]
- #[unstable(feature = "result_option_inspect", issue = "91345")]
+ #[stable(feature = "result_option_inspect", since = "1.76.0")]
pub fn inspect_err<F: FnOnce(&E)>(self, f: F) -> Self {
if let Err(ref e) = self {
f(e);
diff --git a/library/core/src/slice/ascii.rs b/library/core/src/slice/ascii.rs
index 4cfccd2e3..ce04a9f40 100644
--- a/library/core/src/slice/ascii.rs
+++ b/library/core/src/slice/ascii.rs
@@ -125,6 +125,7 @@ impl [u8] {
/// assert_eq!(b"".trim_ascii_start(), b"");
/// ```
#[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[inline]
pub const fn trim_ascii_start(&self) -> &[u8] {
let mut bytes = self;
// Note: A pattern matching based approach (instead of indexing) allows
@@ -154,6 +155,7 @@ impl [u8] {
/// assert_eq!(b"".trim_ascii_end(), b"");
/// ```
#[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[inline]
pub const fn trim_ascii_end(&self) -> &[u8] {
let mut bytes = self;
// Note: A pattern matching based approach (instead of indexing) allows
@@ -184,6 +186,7 @@ impl [u8] {
/// assert_eq!(b"".trim_ascii(), b"");
/// ```
#[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[inline]
pub const fn trim_ascii(&self) -> &[u8] {
self.trim_ascii_start().trim_ascii_end()
}
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index 1da3a87e1..373b4aee4 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -1,9 +1,9 @@
//! Indexing implementations for `[T]`.
-use crate::intrinsics::assert_unsafe_precondition;
use crate::intrinsics::const_eval_select;
use crate::intrinsics::unchecked_sub;
use crate::ops;
+use crate::panic::debug_assert_nounwind;
use crate::ptr;
#[stable(feature = "rust1", since = "1.0.0")]
@@ -225,31 +225,28 @@ unsafe impl<T> SliceIndex<[T]> for usize {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
- let this = self;
+ debug_assert_nounwind!(
+ self < slice.len(),
+ "slice::get_unchecked requires that the index is within the slice",
+ );
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked requires that the index is within the slice",
- [T](this: usize, slice: *const [T]) => this < slice.len()
- );
+ crate::intrinsics::assume(self < slice.len());
slice.as_ptr().add(self)
}
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
- let this = self;
+ debug_assert_nounwind!(
+ self < slice.len(),
+ "slice::get_unchecked_mut requires that the index is within the slice",
+ );
// SAFETY: see comments for `get_unchecked` above.
- unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked_mut requires that the index is within the slice",
- [T](this: usize, slice: *mut [T]) => this < slice.len()
- );
- slice.as_mut_ptr().add(self)
- }
+ unsafe { slice.as_mut_ptr().add(self) }
}
#[inline]
@@ -293,32 +290,25 @@ unsafe impl<T> SliceIndex<[T]> for ops::IndexRange {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
- let end = self.end();
+ debug_assert_nounwind!(
+ self.end() <= slice.len(),
+ "slice::get_unchecked requires that the index is within the slice"
+ );
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
-
- unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked requires that the index is within the slice",
- [T](end: usize, slice: *const [T]) => end <= slice.len()
- );
- ptr::slice_from_raw_parts(slice.as_ptr().add(self.start()), self.len())
- }
+ unsafe { ptr::slice_from_raw_parts(slice.as_ptr().add(self.start()), self.len()) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
- let end = self.end();
+ debug_assert_nounwind!(
+ self.end() <= slice.len(),
+ "slice::get_unchecked_mut requires that the index is within the slice",
+ );
// SAFETY: see comments for `get_unchecked` above.
- unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked_mut requires that the index is within the slice",
- [T](end: usize, slice: *mut [T]) => end <= slice.len()
- );
- ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start()), self.len())
- }
+ unsafe { ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start()), self.len()) }
}
#[inline]
@@ -369,17 +359,15 @@ unsafe impl<T> SliceIndex<[T]> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
- let this = ops::Range { ..self };
+ debug_assert_nounwind!(
+ self.end >= self.start && self.end <= slice.len(),
+ "slice::get_unchecked requires that the range is within the slice",
+ );
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe and the length calculation cannot overflow.
unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked requires that the range is within the slice",
- [T](this: ops::Range<usize>, slice: *const [T]) =>
- this.end >= this.start && this.end <= slice.len()
- );
let new_len = unchecked_sub(self.end, self.start);
ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), new_len)
}
@@ -387,14 +375,12 @@ unsafe impl<T> SliceIndex<[T]> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
- let this = ops::Range { ..self };
+ debug_assert_nounwind!(
+ self.end >= self.start && self.end <= slice.len(),
+ "slice::get_unchecked_mut requires that the range is within the slice",
+ );
// SAFETY: see comments for `get_unchecked` above.
unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked_mut requires that the range is within the slice",
- [T](this: ops::Range<usize>, slice: *mut [T]) =>
- this.end >= this.start && this.end <= slice.len()
- );
let new_len = unchecked_sub(self.end, self.start);
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), new_len)
}
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index 5e229bf52..fc54ea237 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -10,7 +10,7 @@ use crate::intrinsics::assume;
use crate::iter::{
FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, UncheckedIterator,
};
-use crate::marker::{PhantomData, Send, Sized, Sync};
+use crate::marker::PhantomData;
use crate::mem::{self, SizedTypeProperties};
use crate::num::NonZeroUsize;
use crate::ptr::{self, invalid, invalid_mut, NonNull};
diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs
index 3a8b59d72..da7ceb2dd 100644
--- a/library/core/src/slice/memchr.rs
+++ b/library/core/src/slice/memchr.rs
@@ -20,20 +20,6 @@ const fn contains_zero_byte(x: usize) -> bool {
x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
}
-#[inline]
-#[cfg(target_pointer_width = "16")]
-#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")]
-const fn repeat_byte(b: u8) -> usize {
- (b as usize) << 8 | b as usize
-}
-
-#[inline]
-#[cfg(not(target_pointer_width = "16"))]
-#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")]
-const fn repeat_byte(b: u8) -> usize {
- (b as usize) * (usize::MAX / 255)
-}
-
/// Returns the first index matching the byte `x` in `text`.
#[inline]
#[must_use]
@@ -93,7 +79,7 @@ const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> {
}
// search the body of the text
- let repeated_x = repeat_byte(x);
+ let repeated_x = usize::repeat_u8(x);
while offset <= len - 2 * USIZE_BYTES {
// SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes
// between the offset and the end of the slice.
@@ -149,7 +135,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
// Search the body of the text, make sure we don't cross min_aligned_offset.
// offset is always aligned, so just testing `>` is sufficient and avoids possible
// overflow.
- let repeated_x = repeat_byte(x);
+ let repeated_x = usize::repeat_u8(x);
let chunk_bytes = mem::size_of::<Chunk>();
while offset > min_aligned_offset {
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 6cf5d48a1..b14d97127 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -6,18 +6,14 @@
#![stable(feature = "rust1", since = "1.0.0")]
-use crate::cmp::Ordering::{self, Greater, Less};
+use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::fmt;
-use crate::intrinsics::{assert_unsafe_precondition, exact_div};
-use crate::marker::Copy;
+use crate::intrinsics::exact_div;
use crate::mem::{self, SizedTypeProperties};
use crate::num::NonZeroUsize;
-use crate::ops::{Bound, FnMut, OneSidedRange, Range, RangeBounds};
-use crate::option::Option;
-use crate::option::Option::{None, Some};
+use crate::ops::{Bound, OneSidedRange, Range, RangeBounds};
+use crate::panic::debug_assert_nounwind;
use crate::ptr;
-use crate::result::Result;
-use crate::result::Result::{Err, Ok};
use crate::simd::{self, Simd};
use crate::slice;
@@ -929,14 +925,14 @@ impl<T> [T] {
#[unstable(feature = "slice_swap_unchecked", issue = "88539")]
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
pub const unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
- let this = self;
- let ptr = this.as_mut_ptr();
+ debug_assert_nounwind!(
+ a < self.len() && b < self.len(),
+ "slice::swap_unchecked requires that the indices are within the slice",
+ );
+
+ let ptr = self.as_mut_ptr();
// SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()`
unsafe {
- assert_unsafe_precondition!(
- "slice::swap_unchecked requires that the indices are within the slice",
- [T](a: usize, b: usize, this: &mut [T]) => a < this.len() && b < this.len()
- );
ptr::swap(ptr.add(a), ptr.add(b));
}
}
@@ -1044,11 +1040,11 @@ impl<T> [T] {
/// # Examples
///
/// ```
- /// let slice = ['r', 'u', 's', 't'];
- /// let mut iter = slice.windows(2);
- /// assert_eq!(iter.next().unwrap(), &['r', 'u']);
- /// assert_eq!(iter.next().unwrap(), &['u', 's']);
- /// assert_eq!(iter.next().unwrap(), &['s', 't']);
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.windows(3);
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o', 'r']);
+ /// assert_eq!(iter.next().unwrap(), &['o', 'r', 'e']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e', 'm']);
/// assert!(iter.next().is_none());
/// ```
///
@@ -1269,15 +1265,12 @@ impl<T> [T] {
#[inline]
#[must_use]
pub const unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
- let this = self;
+ debug_assert_nounwind!(
+ N != 0 && self.len() % N == 0,
+ "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
+ );
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
- let new_len = unsafe {
- assert_unsafe_precondition!(
- "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
- [T](this: &[T], N: usize) => N != 0 && this.len() % N == 0
- );
- exact_div(self.len(), N)
- };
+ let new_len = unsafe { exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
@@ -1426,15 +1419,12 @@ impl<T> [T] {
#[inline]
#[must_use]
pub const unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
- let this = &*self;
+ debug_assert_nounwind!(
+ N != 0 && self.len() % N == 0,
+ "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
+ );
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
- let new_len = unsafe {
- assert_unsafe_precondition!(
- "slice::as_chunks_unchecked_mut requires `N != 0` and the slice to split exactly into `N`-element chunks",
- [T](this: &[T], N: usize) => N != 0 && this.len() % N == 0
- );
- exact_div(this.len(), N)
- };
+ let new_len = unsafe { exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts_mut(self.as_mut_ptr().cast(), new_len) }
@@ -1967,14 +1957,13 @@ impl<T> [T] {
let len = self.len();
let ptr = self.as_ptr();
+ debug_assert_nounwind!(
+ mid <= len,
+ "slice::split_at_unchecked requires the index to be within the slice",
+ );
+
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
- unsafe {
- assert_unsafe_precondition!(
- "slice::split_at_unchecked requires the index to be within the slice",
- (mid: usize, len: usize) => mid <= len
- );
- (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), len - mid))
- }
+ unsafe { (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), len - mid)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
@@ -2018,17 +2007,16 @@ impl<T> [T] {
let len = self.len();
let ptr = self.as_mut_ptr();
+ debug_assert_nounwind!(
+ mid <= len,
+ "slice::split_at_mut_unchecked requires the index to be within the slice",
+ );
+
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
- unsafe {
- assert_unsafe_precondition!(
- "slice::split_at_mut_unchecked requires the index to be within the slice",
- (mid: usize, len: usize) => mid <= len
- );
- (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid))
- }
+ unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
}
/// Divides one slice into an array and a remainder slice at an index.
@@ -2854,14 +2842,13 @@ impl<T> [T] {
// we have `left + size/2 < self.len()`, and this is in-bounds.
let cmp = f(unsafe { self.get_unchecked(mid) });
- // The reason why we use if/else control flow rather than match
- // is because match reorders comparison operations, which is perf sensitive.
- // This is x86 asm for u8: https://rust.godbolt.org/z/8Y8Pra.
- if cmp == Less {
- left = mid + 1;
- } else if cmp == Greater {
- right = mid;
- } else {
+ // This control flow produces conditional moves, which results in
+ // fewer branches and instructions than if/else or matching on
+ // cmp::Ordering.
+ // This is x86 asm for u8: https://rust.godbolt.org/z/698eYffTx.
+ left = if cmp == Less { mid + 1 } else { left };
+ right = if cmp == Greater { mid } else { right };
+ if cmp == Equal {
// SAFETY: same as the `get_unchecked` above
unsafe { crate::intrinsics::assume(mid < self.len()) };
return Ok(mid);
@@ -3876,6 +3863,12 @@ impl<T> [T] {
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
+ // Inform Miri that we want to consider the "middle" pointer to be suitably aligned.
+ #[cfg(miri)]
+ crate::intrinsics::miri_promise_symbolic_alignment(
+ rest.as_ptr().cast(),
+ mem::align_of::<U>(),
+ );
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
@@ -3946,6 +3939,12 @@ impl<T> [T] {
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
+ // Inform Miri that we want to consider the "middle" pointer to be suitably aligned.
+ #[cfg(miri)]
+ crate::intrinsics::miri_promise_symbolic_alignment(
+ mut_ptr.cast() as *const (),
+ mem::align_of::<U>(),
+ );
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
@@ -3987,7 +3986,7 @@ impl<T> [T] {
///
/// ```
/// #![feature(portable_simd)]
- /// use core::simd::SimdFloat;
+ /// use core::simd::prelude::*;
///
/// let short = &[1, 2, 3];
/// let (prefix, middle, suffix) = short.as_simd::<4>();
@@ -3999,7 +3998,6 @@ impl<T> [T] {
///
/// fn basic_simd_sum(x: &[f32]) -> f32 {
/// use std::ops::Add;
- /// use std::simd::f32x4;
/// let (prefix, middle, suffix) = x.as_simd();
/// let sums = f32x4::from_array([
/// prefix.iter().copied().sum(),
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
index c30f01b3c..dd2efb005 100644
--- a/library/core/src/str/iter.rs
+++ b/library/core/src/str/iter.rs
@@ -8,6 +8,7 @@ use crate::iter::{TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use crate::ops::Try;
use crate::option;
use crate::slice::{self, Split as SliceSplit};
+use core::num::NonZeroUsize;
use super::from_utf8_unchecked;
use super::pattern::Pattern;
@@ -50,6 +51,55 @@ impl<'a> Iterator for Chars<'a> {
}
#[inline]
+ fn advance_by(&mut self, mut remainder: usize) -> Result<(), NonZeroUsize> {
+ const CHUNK_SIZE: usize = 32;
+
+ if remainder >= CHUNK_SIZE {
+ let mut chunks = self.iter.as_slice().array_chunks::<CHUNK_SIZE>();
+ let mut bytes_skipped: usize = 0;
+
+ while remainder > CHUNK_SIZE
+ && let Some(chunk) = chunks.next()
+ {
+ bytes_skipped += CHUNK_SIZE;
+
+ let mut start_bytes = [false; CHUNK_SIZE];
+
+ for i in 0..CHUNK_SIZE {
+ start_bytes[i] = !super::validations::utf8_is_cont_byte(chunk[i]);
+ }
+
+ remainder -= start_bytes.into_iter().map(|i| i as u8).sum::<u8>() as usize;
+ }
+
+ // SAFETY: The amount of bytes exists since we just iterated over them,
+ // so advance_by will succeed.
+ unsafe { self.iter.advance_by(bytes_skipped).unwrap_unchecked() };
+
+ // skip trailing continuation bytes
+ while self.iter.len() > 0 {
+ let b = self.iter.as_slice()[0];
+ if !super::validations::utf8_is_cont_byte(b) {
+ break;
+ }
+ // SAFETY: We just peeked at the byte, therefore it exists
+ unsafe { self.iter.advance_by(1).unwrap_unchecked() };
+ }
+ }
+
+ while (remainder > 0) && (self.iter.len() > 0) {
+ remainder -= 1;
+ let b = self.iter.as_slice()[0];
+ let slurp = super::validations::utf8_char_width(b);
+ // SAFETY: utf8 validity requires that the string must contain
+ // the continuation bytes (if any)
+ unsafe { self.iter.advance_by(slurp).unwrap_unchecked() };
+ }
+
+ NonZeroUsize::new(remainder).map_or(Ok(()), Err)
+ }
+
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.iter.len();
// `(len + 3)` can't overflow, because we know that the `slice::Iter`
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index 27178328b..a22c46edc 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -2423,6 +2423,85 @@ impl str {
me.make_ascii_lowercase()
}
+ /// Returns a string slice with leading ASCII whitespace removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// [`u8::is_ascii_whitespace`].
+ ///
+ /// [`u8::is_ascii_whitespace`]: u8::is_ascii_whitespace
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!(" \t \u{3000}hello world\n".trim_ascii_start(), "\u{3000}hello world\n");
+ /// assert_eq!(" ".trim_ascii_start(), "");
+ /// assert_eq!("".trim_ascii_start(), "");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[inline]
+ pub const fn trim_ascii_start(&self) -> &str {
+ // SAFETY: Removing ASCII characters from a `&str` does not invalidate
+ // UTF-8.
+ unsafe { core::str::from_utf8_unchecked(self.as_bytes().trim_ascii_start()) }
+ }
+
+ /// Returns a string slice with trailing ASCII whitespace removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// [`u8::is_ascii_whitespace`].
+ ///
+ /// [`u8::is_ascii_whitespace`]: u8::is_ascii_whitespace
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!("\r hello world\u{3000}\n ".trim_ascii_end(), "\r hello world\u{3000}");
+ /// assert_eq!(" ".trim_ascii_end(), "");
+ /// assert_eq!("".trim_ascii_end(), "");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[inline]
+ pub const fn trim_ascii_end(&self) -> &str {
+ // SAFETY: Removing ASCII characters from a `&str` does not invalidate
+ // UTF-8.
+ unsafe { core::str::from_utf8_unchecked(self.as_bytes().trim_ascii_end()) }
+ }
+
+ /// Returns a string slice with leading and trailing ASCII whitespace
+ /// removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// [`u8::is_ascii_whitespace`].
+ ///
+ /// [`u8::is_ascii_whitespace`]: u8::is_ascii_whitespace
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!("\r hello world\n ".trim_ascii(), "hello world");
+ /// assert_eq!(" ".trim_ascii(), "");
+ /// assert_eq!("".trim_ascii(), "");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[inline]
+ pub const fn trim_ascii(&self) -> &str {
+ // SAFETY: Removing ASCII characters from a `&str` does not invalidate
+ // UTF-8.
+ unsafe { core::str::from_utf8_unchecked(self.as_bytes().trim_ascii()) }
+ }
+
/// Return an iterator that escapes each char in `self` with [`char::escape_debug`].
///
/// Note: only extended grapheme codepoints that begin the string will be
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
index 701e61e66..caa54e00f 100644
--- a/library/core/src/str/pattern.rs
+++ b/library/core/src/str/pattern.rs
@@ -1740,9 +1740,9 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
debug_assert!(needle.len() > 1);
use crate::ops::BitAnd;
+ use crate::simd::cmp::SimdPartialEq;
use crate::simd::mask8x16 as Mask;
use crate::simd::u8x16 as Block;
- use crate::simd::{SimdPartialEq, ToBitMask};
let first_probe = needle[0];
let last_byte_offset = needle.len() - 1;
@@ -1765,7 +1765,7 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
};
// do a naive search if the haystack is too small to fit
- if haystack.len() < Block::LANES + last_byte_offset {
+ if haystack.len() < Block::LEN + last_byte_offset {
return Some(haystack.windows(needle.len()).any(|c| c == needle));
}
@@ -1812,7 +1812,7 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
let eq_first: Mask = a.simd_eq(first_probe);
let eq_last: Mask = b.simd_eq(second_probe);
let both = eq_first.bitand(eq_last);
- let mask = both.to_bitmask();
+ let mask = both.to_bitmask() as u16;
return mask;
};
@@ -1822,32 +1822,32 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
// The loop condition must ensure that there's enough headroom to read LANE bytes,
// and not only at the current index but also at the index shifted by block_offset
const UNROLL: usize = 4;
- while i + last_byte_offset + UNROLL * Block::LANES < haystack.len() && !result {
+ while i + last_byte_offset + UNROLL * Block::LEN < haystack.len() && !result {
let mut masks = [0u16; UNROLL];
for j in 0..UNROLL {
- masks[j] = test_chunk(i + j * Block::LANES);
+ masks[j] = test_chunk(i + j * Block::LEN);
}
for j in 0..UNROLL {
let mask = masks[j];
if mask != 0 {
- result |= check_mask(i + j * Block::LANES, mask, result);
+ result |= check_mask(i + j * Block::LEN, mask, result);
}
}
- i += UNROLL * Block::LANES;
+ i += UNROLL * Block::LEN;
}
- while i + last_byte_offset + Block::LANES < haystack.len() && !result {
+ while i + last_byte_offset + Block::LEN < haystack.len() && !result {
let mask = test_chunk(i);
if mask != 0 {
result |= check_mask(i, mask, result);
}
- i += Block::LANES;
+ i += Block::LEN;
}
// Process the tail that didn't fit into LANES-sized steps.
// This simply repeats the same procedure but as right-aligned chunk instead
// of a left-aligned one. The last byte must be exactly flush with the string end so
// we don't miss a single byte or read out of bounds.
- let i = haystack.len() - last_byte_offset - Block::LANES;
+ let i = haystack.len() - last_byte_offset - Block::LEN;
let mask = test_chunk(i);
if mask != 0 {
result |= check_mask(i, mask, result);
diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs
index 16fb1dad7..777ad0d81 100644
--- a/library/core/src/str/traits.rs
+++ b/library/core/src/str/traits.rs
@@ -1,8 +1,8 @@
//! Trait implementations for `str`.
use crate::cmp::Ordering;
-use crate::intrinsics::assert_unsafe_precondition;
use crate::ops;
+use crate::panic::debug_assert_nounwind;
use crate::ptr;
use crate::slice::SliceIndex;
@@ -191,39 +191,35 @@ unsafe impl SliceIndex<str> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
let slice = slice as *const [u8];
+
+ debug_assert_nounwind!(
+ // We'd like to check that the bounds are on char boundaries,
+ // but there's not really a way to do so without reading
+ // behind the pointer, which has aliasing implications.
+ // It's also not possible to move this check up to
+ // `str::get_unchecked` without adding a special function
+ // to `SliceIndex` just for this.
+ self.end >= self.start && self.end <= slice.len(),
+ "str::get_unchecked requires that the range is within the string slice",
+ );
+
// SAFETY: the caller guarantees that `self` is in bounds of `slice`
// which satisfies all the conditions for `add`.
- let ptr = unsafe {
- let this = ops::Range { ..self };
- assert_unsafe_precondition!(
- "str::get_unchecked requires that the range is within the string slice",
- (this: ops::Range<usize>, slice: *const [u8]) =>
- // We'd like to check that the bounds are on char boundaries,
- // but there's not really a way to do so without reading
- // behind the pointer, which has aliasing implications.
- // It's also not possible to move this check up to
- // `str::get_unchecked` without adding a special function
- // to `SliceIndex` just for this.
- this.end >= this.start && this.end <= slice.len()
- );
- slice.as_ptr().add(self.start)
- };
+ let ptr = unsafe { slice.as_ptr().add(self.start) };
let len = self.end - self.start;
ptr::slice_from_raw_parts(ptr, len) as *const str
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
let slice = slice as *mut [u8];
+
+ debug_assert_nounwind!(
+ self.end >= self.start && self.end <= slice.len(),
+ "str::get_unchecked_mut requires that the range is within the string slice",
+ );
+
// SAFETY: see comments for `get_unchecked`.
- let ptr = unsafe {
- let this = ops::Range { ..self };
- assert_unsafe_precondition!(
- "str::get_unchecked_mut requires that the range is within the string slice",
- (this: ops::Range<usize>, slice: *mut [u8]) =>
- this.end >= this.start && this.end <= slice.len()
- );
- slice.as_mut_ptr().add(self.start)
- };
+ let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
let len = self.end - self.start;
ptr::slice_from_raw_parts_mut(ptr, len) as *mut str
}
diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs
index 0a0f702f6..bfa1cf096 100644
--- a/library/core/src/task/poll.rs
+++ b/library/core/src/task/poll.rs
@@ -2,7 +2,6 @@
use crate::convert;
use crate::ops::{self, ControlFlow};
-use crate::result::Result;
/// Indicates whether a value is available or if the current task has been
/// scheduled to receive a wakeup instead.
diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs
index 817e39942..9c41b8b4f 100644
--- a/library/core/src/task/wake.rs
+++ b/library/core/src/task/wake.rs
@@ -1,7 +1,7 @@
#![stable(feature = "futures_api", since = "1.36.0")]
use crate::fmt;
-use crate::marker::{PhantomData, Unpin};
+use crate::marker::PhantomData;
use crate::ptr;
/// A `RawWaker` allows the implementor of a task executor to create a [`Waker`]
@@ -48,7 +48,7 @@ impl RawWaker {
/// Get the `data` pointer used to create this `RawWaker`.
#[inline]
#[must_use]
- #[unstable(feature = "waker_getters", issue = "87021")]
+ #[unstable(feature = "waker_getters", issue = "96992")]
pub fn data(&self) -> *const () {
self.data
}
@@ -56,7 +56,7 @@ impl RawWaker {
/// Get the `vtable` pointer used to create this `RawWaker`.
#[inline]
#[must_use]
- #[unstable(feature = "waker_getters", issue = "87021")]
+ #[unstable(feature = "waker_getters", issue = "96992")]
pub fn vtable(&self) -> &'static RawWakerVTable {
self.vtable
}
@@ -371,7 +371,7 @@ impl Waker {
/// Get a reference to the underlying [`RawWaker`].
#[inline]
#[must_use]
- #[unstable(feature = "waker_getters", issue = "87021")]
+ #[unstable(feature = "waker_getters", issue = "96992")]
pub fn as_raw(&self) -> &RawWaker {
&self.waker
}
diff --git a/library/core/src/time.rs b/library/core/src/time.rs
index 6ef35d841..b67777644 100644
--- a/library/core/src/time.rs
+++ b/library/core/src/time.rs
@@ -461,6 +461,27 @@ impl Duration {
self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos.0 as u128
}
+ /// Computes the absolute difference between `self` and `other`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(duration_abs_diff)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(100, 0).abs_diff(Duration::new(80, 0)), Duration::new(20, 0));
+ /// assert_eq!(Duration::new(100, 400_000_000).abs_diff(Duration::new(110, 0)), Duration::new(9, 600_000_000));
+ /// ```
+ #[unstable(feature = "duration_abs_diff", issue = "117618")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn abs_diff(self, other: Duration) -> Duration {
+ if let Some(res) = self.checked_sub(other) { res } else { other.checked_sub(self).unwrap() }
+ }
+
/// Checked `Duration` addition. Computes `self + other`, returning [`None`]
/// if overflow occurred.
///
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index ff292ff2d..3689312e6 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -8,6 +8,7 @@ use crate::marker::{StructuralEq, StructuralPartialEq};
//
// Also provides implementations for tuples with lesser arity. For example, tuple_impls!(A B C)
// will implement everything for (A, B, C), (A, B) and (A,).
+#[cfg(bootstrap)]
macro_rules! tuple_impls {
// Stopping criteria (1-ary tuple)
($T:ident) => {
@@ -50,22 +51,19 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[unstable(feature = "structural_match", issue = "31434")]
- impl<$($T: ConstParamTy),+> ConstParamTy for ($($T,)+)
- {}
+ impl<$($T: ConstParamTy),+> ConstParamTy for ($($T,)+) {}
}
maybe_tuple_doc! {
$($T)+ @
#[unstable(feature = "structural_match", issue = "31434")]
- impl<$($T),+> StructuralPartialEq for ($($T,)+)
- {}
+ impl<$($T),+> StructuralPartialEq for ($($T,)+) {}
}
maybe_tuple_doc! {
$($T)+ @
#[unstable(feature = "structural_match", issue = "31434")]
- impl<$($T),+> StructuralEq for ($($T,)+)
- {}
+ impl<$($T),+> StructuralEq for ($($T,)+) {}
}
maybe_tuple_doc! {
@@ -118,7 +116,7 @@ macro_rules! tuple_impls {
impl<$($T: Default),+> Default for ($($T,)+) {
#[inline]
fn default() -> ($($T,)+) {
- ($({ let x: $T = Default::default(); x},)+)
+ ($($T::default(),)+)
}
}
}
@@ -145,6 +143,148 @@ macro_rules! tuple_impls {
}
}
+// Recursive macro for implementing n-ary tuple functions and operations
+//
+// Also provides implementations for tuples with lesser arity. For example, tuple_impls!(A B C)
+// will implement everything for (A, B, C), (A, B) and (A,).
+#[cfg(not(bootstrap))]
+macro_rules! tuple_impls {
+ // Stopping criteria (1-ary tuple)
+ ($T:ident) => {
+ tuple_impls!(@impl $T);
+ };
+ // Running criteria (n-ary tuple, with n >= 2)
+ ($T:ident $( $U:ident )+) => {
+ tuple_impls!($( $U )+);
+ tuple_impls!(@impl $T $( $U )+);
+ };
+ // "Private" internal implementation
+ (@impl $( $T:ident )+) => {
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: PartialEq),+> PartialEq for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn eq(&self, other: &($($T,)+)) -> bool {
+ $( ${ignore($T)} self.${index()} == other.${index()} )&&+
+ }
+ #[inline]
+ fn ne(&self, other: &($($T,)+)) -> bool {
+ $( ${ignore($T)} self.${index()} != other.${index()} )||+
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: Eq),+> Eq for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<$($T: ConstParamTy),+> ConstParamTy for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<$($T),+> StructuralPartialEq for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<$($T),+> StructuralEq for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: PartialOrd),+> PartialOrd for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn partial_cmp(&self, other: &($($T,)+)) -> Option<Ordering> {
+ lexical_partial_cmp!($( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn lt(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(lt, Less, $( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn le(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(le, Less, $( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn ge(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(ge, Greater, $( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn gt(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(gt, Greater, $( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: Ord),+> Ord for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn cmp(&self, other: &($($T,)+)) -> Ordering {
+ lexical_cmp!($( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: Default),+> Default for ($($T,)+) {
+ #[inline]
+ fn default() -> ($($T,)+) {
+ ($({ let x: $T = Default::default(); x},)+)
+ }
+ }
+ }
+
+ #[stable(feature = "array_tuple_conv", since = "1.71.0")]
+ impl<T> From<[T; ${count($T)}]> for ($(${ignore($T)} T,)+) {
+ #[inline]
+ #[allow(non_snake_case)]
+ fn from(array: [T; ${count($T)}]) -> Self {
+ let [$($T,)+] = array;
+ ($($T,)+)
+ }
+ }
+
+ #[stable(feature = "array_tuple_conv", since = "1.71.0")]
+ impl<T> From<($(${ignore($T)} T,)+)> for [T; ${count($T)}] {
+ #[inline]
+ #[allow(non_snake_case)]
+ fn from(tuple: ($(${ignore($T)} T,)+)) -> Self {
+ let ($($T,)+) = tuple;
+ [$($T,)+]
+ }
+ }
+ }
+}
+
// If this is a unary tuple, it adds a doc comment.
// Otherwise, it hides the docs entirely.
macro_rules! maybe_tuple_doc {
@@ -196,7 +336,7 @@ macro_rules! lexical_partial_cmp {
($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
match ($a).partial_cmp(&$b) {
Some(Equal) => lexical_partial_cmp!($($rest_a, $rest_b),+),
- ordering => ordering
+ ordering => ordering
}
};
($a:expr, $b:expr) => { ($a).partial_cmp(&$b) };
@@ -206,7 +346,7 @@ macro_rules! lexical_cmp {
($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
match ($a).cmp(&$b) {
Equal => lexical_cmp!($($rest_a, $rest_b),+),
- ordering => ordering
+ ordering => ordering
}
};
($a:expr, $b:expr) => { ($a).cmp(&$b) };
diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
index 81da75d32..3656eecca 100644
--- a/library/core/tests/array.rs
+++ b/library/core/tests/array.rs
@@ -1,5 +1,4 @@
use core::{array, assert_eq};
-use core::convert::TryFrom;
use core::num::NonZeroUsize;
use core::sync::atomic::{AtomicUsize, Ordering};
diff --git a/library/core/tests/cell.rs b/library/core/tests/cell.rs
index e084f8679..71b8eb296 100644
--- a/library/core/tests/cell.rs
+++ b/library/core/tests/cell.rs
@@ -1,6 +1,4 @@
use core::cell::*;
-use core::default::Default;
-use std::mem::drop;
#[test]
fn smoketest_unsafe_cell() {
diff --git a/library/core/tests/char.rs b/library/core/tests/char.rs
index 85ba51c92..6422387e9 100644
--- a/library/core/tests/char.rs
+++ b/library/core/tests/char.rs
@@ -1,4 +1,3 @@
-use std::convert::TryFrom;
use std::str::FromStr;
use std::{char, str};
diff --git a/library/core/tests/fmt/num.rs b/library/core/tests/fmt/num.rs
index b9ede65c9..bc387a46e 100644
--- a/library/core/tests/fmt/num.rs
+++ b/library/core/tests/fmt/num.rs
@@ -128,28 +128,43 @@ fn test_format_int_exp_precision() {
let big_int: u32 = 314_159_265;
assert_eq!(format!("{big_int:.1e}"), format!("{:.1e}", f64::from(big_int)));
- //test adding precision
+ // test adding precision
assert_eq!(format!("{:.10e}", i8::MIN), "-1.2800000000e2");
assert_eq!(format!("{:.10e}", i16::MIN), "-3.2768000000e4");
assert_eq!(format!("{:.10e}", i32::MIN), "-2.1474836480e9");
assert_eq!(format!("{:.20e}", i64::MIN), "-9.22337203685477580800e18");
assert_eq!(format!("{:.40e}", i128::MIN), "-1.7014118346046923173168730371588410572800e38");
- //test rounding
+ // test rounding
assert_eq!(format!("{:.1e}", i8::MIN), "-1.3e2");
assert_eq!(format!("{:.1e}", i16::MIN), "-3.3e4");
assert_eq!(format!("{:.1e}", i32::MIN), "-2.1e9");
assert_eq!(format!("{:.1e}", i64::MIN), "-9.2e18");
assert_eq!(format!("{:.1e}", i128::MIN), "-1.7e38");
- //test huge precision
+ // test huge precision
assert_eq!(format!("{:.1000e}", 1), format!("1.{}e0", "0".repeat(1000)));
//test zero precision
assert_eq!(format!("{:.0e}", 1), format!("1e0",));
assert_eq!(format!("{:.0e}", 35), format!("4e1",));
- //test padding with precision (and sign)
+ // test padding with precision (and sign)
assert_eq!(format!("{:+10.3e}", 1), " +1.000e0");
+
+ // test precision remains correct when rounding to next power
+ #[cfg(miri)] // can't cover all of `i16` in Miri
+ let range = [i16::MIN, -1, 1, i16::MAX];
+ #[cfg(not(miri))]
+ let range = i16::MIN..=i16::MAX;
+ for i in range {
+ for p in 0..=5 {
+ assert_eq!(
+ format!("{i:.p$e}"),
+ format!("{:.p$e}", f32::from(i)),
+ "integer {i} at precision {p}"
+ );
+ }
+ }
}
#[test]
diff --git a/library/core/tests/hash/mod.rs b/library/core/tests/hash/mod.rs
index 033bd1ed6..addc255de 100644
--- a/library/core/tests/hash/mod.rs
+++ b/library/core/tests/hash/mod.rs
@@ -1,6 +1,5 @@
mod sip;
-use std::default::Default;
use std::hash::{BuildHasher, Hash, Hasher};
use std::ptr;
use std::rc::Rc;
@@ -167,7 +166,7 @@ fn test_indirect_hasher() {
#[test]
fn test_build_hasher_object_safe() {
- use std::collections::hash_map::{DefaultHasher, RandomState};
+ use std::hash::{DefaultHasher, RandomState};
let _: &dyn BuildHasher<Hasher = DefaultHasher> = &RandomState::new();
}
diff --git a/library/core/tests/iter/adapters/array_chunks.rs b/library/core/tests/iter/adapters/array_chunks.rs
index ef4a7e53b..fb19a519f 100644
--- a/library/core/tests/iter/adapters/array_chunks.rs
+++ b/library/core/tests/iter/adapters/array_chunks.rs
@@ -1,5 +1,4 @@
-use core::cell::Cell;
-use core::iter::{self, Iterator};
+use core::iter::{self};
use super::*;
diff --git a/library/core/tests/iter/mod.rs b/library/core/tests/iter/mod.rs
index 770b6f760..5b2769d04 100644
--- a/library/core/tests/iter/mod.rs
+++ b/library/core/tests/iter/mod.rs
@@ -21,7 +21,6 @@ mod sources;
mod traits;
use core::cell::Cell;
-use core::convert::TryFrom;
use core::iter::*;
pub fn is_trusted_len<I: TrustedLen>(_: I) {}
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 168b47dc9..ac8b6f6b5 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -28,6 +28,7 @@
#![feature(core_private_diy_float)]
#![feature(dec2flt)]
#![feature(div_duration)]
+#![feature(duration_abs_diff)]
#![feature(duration_consts_float)]
#![feature(duration_constants)]
#![feature(exact_size_is_empty)]
@@ -116,6 +117,7 @@
#![feature(get_many_mut)]
#![feature(offset_of)]
#![feature(iter_map_windows)]
+#![allow(internal_features)]
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(fuzzy_provenance_casts)]
@@ -169,7 +171,7 @@ mod waker;
#[allow(dead_code)] // Not used in all configurations.
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use core::hash::{BuildHasher, Hash, Hasher};
- let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ let mut hasher = std::hash::RandomState::new().build_hasher();
core::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>();
diff --git a/library/core/tests/nonzero.rs b/library/core/tests/nonzero.rs
index 007f84425..8873d2688 100644
--- a/library/core/tests/nonzero.rs
+++ b/library/core/tests/nonzero.rs
@@ -1,9 +1,8 @@
-use core::convert::TryFrom;
use core::num::{
IntErrorKind, NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize,
NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,
};
-use core::option::Option::{self, None, Some};
+use core::option::Option::None;
use std::mem::size_of;
#[test]
diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs
index 3f3659ba8..863da9b18 100644
--- a/library/core/tests/num/mod.rs
+++ b/library/core/tests/num/mod.rs
@@ -1,11 +1,6 @@
-use core::cmp::PartialEq;
-use core::convert::{TryFrom, TryInto};
use core::fmt::Debug;
-use core::marker::Copy;
use core::num::{can_not_overflow, IntErrorKind, ParseIntError, TryFromIntError};
use core::ops::{Add, Div, Mul, Rem, Sub};
-use core::option::Option;
-use core::option::Option::None;
use core::str::FromStr;
#[macro_use]
diff --git a/library/core/tests/option.rs b/library/core/tests/option.rs
index 5defeb50d..00a308b29 100644
--- a/library/core/tests/option.rs
+++ b/library/core/tests/option.rs
@@ -1,5 +1,4 @@
use core::cell::Cell;
-use core::clone::Clone;
use core::mem;
use core::ops::DerefMut;
use core::option::*;
@@ -568,3 +567,11 @@ fn zip_unzip_roundtrip() {
let a = z.unzip();
assert_eq!(a, (x, y));
}
+
+#[test]
+fn as_slice() {
+ assert_eq!(Some(42).as_slice(), &[42]);
+ assert_eq!(Some(43).as_mut_slice(), &[43]);
+ assert_eq!(None::<i32>.as_slice(), &[]);
+ assert_eq!(None::<i32>.as_mut_slice(), &[]);
+}
diff --git a/library/core/tests/simd.rs b/library/core/tests/simd.rs
index 565c8975e..b8b5f26ca 100644
--- a/library/core/tests/simd.rs
+++ b/library/core/tests/simd.rs
@@ -1,5 +1,4 @@
-use core::simd::f32x4;
-use core::simd::SimdFloat;
+use core::simd::prelude::*;
#[test]
fn testing() {
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 666452ead..33a303398 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -2,7 +2,6 @@ use core::cell::Cell;
use core::cmp::Ordering;
use core::mem::MaybeUninit;
use core::num::NonZeroUsize;
-use core::result::Result::{Err, Ok};
use core::slice;
#[test]
diff --git a/library/core/tests/time.rs b/library/core/tests/time.rs
index bd6e63edb..24ab4be9d 100644
--- a/library/core/tests/time.rs
+++ b/library/core/tests/time.rs
@@ -74,6 +74,19 @@ fn nanos() {
}
#[test]
+fn abs_diff() {
+ assert_eq!(Duration::new(2, 0).abs_diff(Duration::new(1, 0)), Duration::new(1, 0));
+ assert_eq!(Duration::new(1, 0).abs_diff(Duration::new(2, 0)), Duration::new(1, 0));
+ assert_eq!(Duration::new(1, 0).abs_diff(Duration::new(1, 0)), Duration::new(0, 0));
+ assert_eq!(Duration::new(1, 1).abs_diff(Duration::new(0, 2)), Duration::new(0, 999_999_999));
+ assert_eq!(Duration::new(1, 1).abs_diff(Duration::new(2, 1)), Duration::new(1, 0));
+ assert_eq!(Duration::MAX.abs_diff(Duration::MAX), Duration::ZERO);
+ assert_eq!(Duration::ZERO.abs_diff(Duration::ZERO), Duration::ZERO);
+ assert_eq!(Duration::MAX.abs_diff(Duration::ZERO), Duration::MAX);
+ assert_eq!(Duration::ZERO.abs_diff(Duration::MAX), Duration::MAX);
+}
+
+#[test]
fn add() {
assert_eq!(Duration::new(0, 0) + Duration::new(0, 1), Duration::new(0, 1));
assert_eq!(Duration::new(0, 500_000_000) + Duration::new(0, 500_000_001), Duration::new(1, 1));
diff --git a/library/panic_abort/src/lib.rs b/library/panic_abort/src/lib.rs
index 6e097e2ca..8fd64279a 100644
--- a/library/panic_abort/src/lib.rs
+++ b/library/panic_abort/src/lib.rs
@@ -81,6 +81,16 @@ pub unsafe fn __rust_start_panic(_payload: &mut dyn PanicPayload) -> u32 {
}
core::intrinsics::unreachable();
}
+ } else if #[cfg(target_os = "teeos")] {
+ mod teeos {
+ extern "C" {
+ pub fn TEE_Panic(code: u32) -> !;
+ }
+ }
+
+ unsafe fn abort() -> ! {
+ teeos::TEE_Panic(1);
+ }
} else {
unsafe fn abort() -> ! {
core::intrinsics::abort();
diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs
index 9363fde5d..7a0bae346 100644
--- a/library/panic_unwind/src/lib.rs
+++ b/library/panic_unwind/src/lib.rs
@@ -49,6 +49,7 @@ cfg_if::cfg_if! {
} else if #[cfg(any(
all(target_family = "windows", target_env = "gnu"),
target_os = "psp",
+ target_os = "xous",
target_os = "solid_asp3",
all(target_family = "unix", not(target_os = "espidf")),
all(target_vendor = "fortanix", target_env = "sgx"),
diff --git a/library/portable-simd/.github/workflows/ci.yml b/library/portable-simd/.github/workflows/ci.yml
index 1ff377fce..90543044e 100644
--- a/library/portable-simd/.github/workflows/ci.yml
+++ b/library/portable-simd/.github/workflows/ci.yml
@@ -167,40 +167,33 @@ jobs:
RUSTFLAGS: ${{ matrix.rustflags }}
cross-tests:
- name: "${{ matrix.target }} (via cross)"
+ name: "${{ matrix.target_feature }} on ${{ matrix.target }} (via cross)"
runs-on: ubuntu-latest
strategy:
fail-fast: false
- # TODO: Sadly, we cant configure target-feature in a meaningful way
- # because `cross` doesn't tell qemu to enable any non-default cpu
- # features, nor does it give us a way to do so.
- #
- # Ultimately, we'd like to do something like [rust-lang/stdarch][stdarch].
- # This is a lot more complex... but in practice it's likely that we can just
- # snarf the docker config from around [here][1000-dockerfiles].
- #
- # [stdarch]: https://github.com/rust-lang/stdarch/blob/a5db4eaf/.github/workflows/main.yml#L67
- # [1000-dockerfiles]: https://github.com/rust-lang/stdarch/tree/a5db4eaf/ci/docker
matrix:
target:
- - i586-unknown-linux-gnu
- # 32-bit arm has a few idiosyncracies like having subnormal flushing
- # to zero on by default. Ideally we'd set
- armv7-unknown-linux-gnueabihf
- - aarch64-unknown-linux-gnu
- # Note: The issue above means neither of these mips targets will use
- # MSA (mips simd) but MIPS uses a nonstandard binary representation
- # for NaNs which makes it worth testing on despite that.
+ - thumbv7neon-unknown-linux-gnueabihf # includes neon by default
+ - aarch64-unknown-linux-gnu # includes neon by default
+ - powerpc-unknown-linux-gnu
+ - powerpc64le-unknown-linux-gnu # includes altivec by default
+ - riscv64gc-unknown-linux-gnu
+ # MIPS uses a nonstandard binary representation for NaNs which makes it worth testing
+ # non-nightly since https://github.com/rust-lang/rust/pull/113274
# - mips-unknown-linux-gnu
# - mips64-unknown-linux-gnuabi64
- - riscv64gc-unknown-linux-gnu
- # TODO this test works, but it appears to time out
- # - powerpc-unknown-linux-gnu
- # TODO this test is broken, but it appears to be a problem with QEMU, not us.
- # - powerpc64le-unknown-linux-gnu
- # TODO enable this once a new version of cross is released
+ # Lots of errors in QEMU and no real hardware to test on. Not clear if it's QEMU or bad codegen.
# - powerpc64-unknown-linux-gnu
+ target_feature: [default]
+ include:
+ - { target: powerpc64le-unknown-linux-gnu, target_feature: "+vsx" }
+ # Fails due to QEMU floating point errors, probably handling subnormals incorrectly.
+ # This target is somewhat redundant, since ppc64le has altivec as well.
+ # - { target: powerpc-unknown-linux-gnu, target_feature: "+altivec" }
+ # We should test this, but cross currently can't run it
+ # - { target: riscv64gc-unknown-linux-gnu, target_feature: "+v,+zvl128b" }
steps:
- uses: actions/checkout@v2
@@ -217,11 +210,27 @@ jobs:
# being part of the tarball means we can't just use the download/latest
# URL :(
run: |
- CROSS_URL=https://github.com/rust-embedded/cross/releases/download/v0.2.1/cross-v0.2.1-x86_64-unknown-linux-gnu.tar.gz
+ CROSS_URL=https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-gnu.tar.gz
mkdir -p "$HOME/.bin"
curl -sfSL --retry-delay 10 --retry 5 "${CROSS_URL}" | tar zxf - -C "$HOME/.bin"
echo "$HOME/.bin" >> $GITHUB_PATH
+ - name: Configure Emulated CPUs
+ run: |
+ echo "CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_RUNNER=qemu-ppc -cpu e600" >> $GITHUB_ENV
+ # echo "CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_RUNNER=qemu-riscv64 -cpu rv64,zba=true,zbb=true,v=true,vlen=256,vext_spec=v1.0" >> $GITHUB_ENV
+
+ - name: Configure RUSTFLAGS
+ shell: bash
+ run: |
+ case "${{ matrix.target_feature }}" in
+ default)
+ echo "RUSTFLAGS=" >> $GITHUB_ENV;;
+ *)
+ echo "RUSTFLAGS=-Ctarget-feature=${{ matrix.target_feature }}" >> $GITHUB_ENV
+ ;;
+ esac
+
- name: Test (debug)
run: cross test --verbose --target=${{ matrix.target }}
@@ -229,7 +238,7 @@ jobs:
run: cross test --verbose --target=${{ matrix.target }} --release
features:
- name: "Check cargo features (${{ matrix.simd }} × ${{ matrix.features }})"
+ name: "Test cargo features (${{ matrix.simd }} × ${{ matrix.features }})"
runs-on: ubuntu-latest
strategy:
fail-fast: false
@@ -240,12 +249,8 @@ jobs:
features:
- ""
- "--features std"
- - "--features generic_const_exprs"
- - "--features std --features generic_const_exprs"
- "--features all_lane_counts"
- - "--features all_lane_counts --features std"
- - "--features all_lane_counts --features generic_const_exprs"
- - "--features all_lane_counts --features std --features generic_const_exprs"
+ - "--all-features"
steps:
- uses: actions/checkout@v2
@@ -257,9 +262,9 @@ jobs:
run: echo "CPU_FEATURE=$(lscpu | grep -o avx512[a-z]* | sed s/avx/+avx/ | tr '\n' ',' )" >> $GITHUB_ENV
- name: Check build
if: ${{ matrix.simd == '' }}
- run: RUSTFLAGS="-Dwarnings" cargo check --all-targets --no-default-features ${{ matrix.features }}
+ run: RUSTFLAGS="-Dwarnings" cargo test --all-targets --no-default-features ${{ matrix.features }}
- name: Check AVX
if: ${{ matrix.simd == 'avx512' && contains(env.CPU_FEATURE, 'avx512') }}
run: |
echo "Found AVX features: $CPU_FEATURE"
- RUSTFLAGS="-Dwarnings -Ctarget-feature=$CPU_FEATURE" cargo check --all-targets --no-default-features ${{ matrix.features }}
+ RUSTFLAGS="-Dwarnings -Ctarget-feature=$CPU_FEATURE" cargo test --all-targets --no-default-features ${{ matrix.features }}
diff --git a/library/portable-simd/Cargo.lock b/library/portable-simd/Cargo.lock
new file mode 100644
index 000000000..46312c096
--- /dev/null
+++ b/library/portable-simd/Cargo.lock
@@ -0,0 +1,304 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bumpalo"
+version = "3.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
+
+[[package]]
+name = "byteorder"
+version = "1.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "console_error_panic_hook"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "core_simd"
+version = "0.1.0"
+dependencies = [
+ "proptest",
+ "std_float",
+ "test_helpers",
+ "wasm-bindgen",
+ "wasm-bindgen-test",
+]
+
+[[package]]
+name = "js-sys"
+version = "0.3.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "log"
+version = "0.4.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
+
+[[package]]
+name = "num-traits"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.18.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.66"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "proptest"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "12e6c80c1139113c28ee4670dc50cc42915228b51f56a9e407f0ec60f966646f"
+dependencies = [
+ "bitflags",
+ "byteorder",
+ "num-traits",
+ "rand",
+ "rand_chacha",
+ "rand_xorshift",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
+dependencies = [
+ "rand_chacha",
+ "rand_core",
+ "rand_hc",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
+
+[[package]]
+name = "rand_hc"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "rand_xorshift"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77d416b86801d23dde1aa643023b775c3a462efc0ed96443add11546cdf1dca8"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "scoped-tls"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
+
+[[package]]
+name = "std_float"
+version = "0.1.0"
+dependencies = [
+ "core_simd",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "test_helpers"
+version = "0.1.0"
+dependencies = [
+ "proptest",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-futures"
+version = "0.4.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
+
+[[package]]
+name = "wasm-bindgen-test"
+version = "0.3.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e6e302a7ea94f83a6d09e78e7dc7d9ca7b186bc2829c24a22d0753efd680671"
+dependencies = [
+ "console_error_panic_hook",
+ "js-sys",
+ "scoped-tls",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "wasm-bindgen-test-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-test-macro"
+version = "0.3.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575"
+dependencies = [
+ "proc-macro2",
+ "quote",
+]
+
+[[package]]
+name = "web-sys"
+version = "0.3.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
diff --git a/library/portable-simd/crates/core_simd/Cargo.toml b/library/portable-simd/crates/core_simd/Cargo.toml
index d1a3a515a..b4a8fd70f 100644
--- a/library/portable-simd/crates/core_simd/Cargo.toml
+++ b/library/portable-simd/crates/core_simd/Cargo.toml
@@ -12,7 +12,6 @@ license = "MIT OR Apache-2.0"
default = ["as_crate"]
as_crate = []
std = []
-generic_const_exprs = []
all_lane_counts = []
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
diff --git a/library/portable-simd/crates/core_simd/examples/dot_product.rs b/library/portable-simd/crates/core_simd/examples/dot_product.rs
index a7973ec74..f047010a6 100644
--- a/library/portable-simd/crates/core_simd/examples/dot_product.rs
+++ b/library/portable-simd/crates/core_simd/examples/dot_product.rs
@@ -6,7 +6,7 @@
#![feature(slice_as_chunks)]
// Add these imports to use the stdsimd library
#![feature(portable_simd)]
-use core_simd::simd::*;
+use core_simd::simd::prelude::*;
// This is your barebones dot product implementation:
// Take 2 vectors, multiply them element wise and *then*
diff --git a/library/portable-simd/crates/core_simd/examples/matrix_inversion.rs b/library/portable-simd/crates/core_simd/examples/matrix_inversion.rs
index 39f530f68..bad864144 100644
--- a/library/portable-simd/crates/core_simd/examples/matrix_inversion.rs
+++ b/library/portable-simd/crates/core_simd/examples/matrix_inversion.rs
@@ -2,8 +2,7 @@
// Code ported from the `packed_simd` crate
// Run this code with `cargo test --example matrix_inversion`
#![feature(array_chunks, portable_simd)]
-use core_simd::simd::*;
-use Which::*;
+use core_simd::simd::prelude::*;
// Gotta define our own 4x4 matrix since Rust doesn't ship multidim arrays yet :^)
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
@@ -164,10 +163,10 @@ pub fn simd_inv4x4(m: Matrix4x4) -> Option<Matrix4x4> {
let m_2 = f32x4::from_array(m[2]);
let m_3 = f32x4::from_array(m[3]);
- const SHUFFLE01: [Which; 4] = [First(0), First(1), Second(0), Second(1)];
- const SHUFFLE02: [Which; 4] = [First(0), First(2), Second(0), Second(2)];
- const SHUFFLE13: [Which; 4] = [First(1), First(3), Second(1), Second(3)];
- const SHUFFLE23: [Which; 4] = [First(2), First(3), Second(2), Second(3)];
+ const SHUFFLE01: [usize; 4] = [0, 1, 4, 5];
+ const SHUFFLE02: [usize; 4] = [0, 2, 4, 6];
+ const SHUFFLE13: [usize; 4] = [1, 3, 5, 7];
+ const SHUFFLE23: [usize; 4] = [2, 3, 6, 7];
let tmp = simd_swizzle!(m_0, m_1, SHUFFLE01);
let row1 = simd_swizzle!(m_2, m_3, SHUFFLE01);
@@ -180,58 +179,58 @@ pub fn simd_inv4x4(m: Matrix4x4) -> Option<Matrix4x4> {
let row2 = simd_swizzle!(tmp, row3, SHUFFLE02);
let row3 = simd_swizzle!(row3, tmp, SHUFFLE13);
- let tmp = (row2 * row3).reverse().rotate_lanes_right::<2>();
+ let tmp = (row2 * row3).reverse().rotate_elements_right::<2>();
let minor0 = row1 * tmp;
let minor1 = row0 * tmp;
- let tmp = tmp.rotate_lanes_right::<2>();
+ let tmp = tmp.rotate_elements_right::<2>();
let minor0 = (row1 * tmp) - minor0;
let minor1 = (row0 * tmp) - minor1;
- let minor1 = minor1.rotate_lanes_right::<2>();
+ let minor1 = minor1.rotate_elements_right::<2>();
- let tmp = (row1 * row2).reverse().rotate_lanes_right::<2>();
+ let tmp = (row1 * row2).reverse().rotate_elements_right::<2>();
let minor0 = (row3 * tmp) + minor0;
let minor3 = row0 * tmp;
- let tmp = tmp.rotate_lanes_right::<2>();
+ let tmp = tmp.rotate_elements_right::<2>();
let minor0 = minor0 - row3 * tmp;
let minor3 = row0 * tmp - minor3;
- let minor3 = minor3.rotate_lanes_right::<2>();
+ let minor3 = minor3.rotate_elements_right::<2>();
- let tmp = (row3 * row1.rotate_lanes_right::<2>())
+ let tmp = (row3 * row1.rotate_elements_right::<2>())
.reverse()
- .rotate_lanes_right::<2>();
- let row2 = row2.rotate_lanes_right::<2>();
+ .rotate_elements_right::<2>();
+ let row2 = row2.rotate_elements_right::<2>();
let minor0 = row2 * tmp + minor0;
let minor2 = row0 * tmp;
- let tmp = tmp.rotate_lanes_right::<2>();
+ let tmp = tmp.rotate_elements_right::<2>();
let minor0 = minor0 - row2 * tmp;
let minor2 = row0 * tmp - minor2;
- let minor2 = minor2.rotate_lanes_right::<2>();
+ let minor2 = minor2.rotate_elements_right::<2>();
- let tmp = (row0 * row1).reverse().rotate_lanes_right::<2>();
+ let tmp = (row0 * row1).reverse().rotate_elements_right::<2>();
let minor2 = minor2 + row3 * tmp;
let minor3 = row2 * tmp - minor3;
- let tmp = tmp.rotate_lanes_right::<2>();
+ let tmp = tmp.rotate_elements_right::<2>();
let minor2 = row3 * tmp - minor2;
let minor3 = minor3 - row2 * tmp;
- let tmp = (row0 * row3).reverse().rotate_lanes_right::<2>();
+ let tmp = (row0 * row3).reverse().rotate_elements_right::<2>();
let minor1 = minor1 - row2 * tmp;
let minor2 = row1 * tmp + minor2;
- let tmp = tmp.rotate_lanes_right::<2>();
+ let tmp = tmp.rotate_elements_right::<2>();
let minor1 = row2 * tmp + minor1;
let minor2 = minor2 - row1 * tmp;
- let tmp = (row0 * row2).reverse().rotate_lanes_right::<2>();
+ let tmp = (row0 * row2).reverse().rotate_elements_right::<2>();
let minor1 = row3 * tmp + minor1;
let minor3 = minor3 - row1 * tmp;
- let tmp = tmp.rotate_lanes_right::<2>();
+ let tmp = tmp.rotate_elements_right::<2>();
let minor1 = minor1 - row3 * tmp;
let minor3 = row1 * tmp + minor3;
let det = row0 * minor0;
- let det = det.rotate_lanes_right::<2>() + det;
- let det = det.reverse().rotate_lanes_right::<2>() + det;
+ let det = det.rotate_elements_right::<2>() + det;
+ let det = det.reverse().rotate_elements_right::<2>() + det;
if det.reduce_sum() == 0. {
return None;
diff --git a/library/portable-simd/crates/core_simd/examples/nbody.rs b/library/portable-simd/crates/core_simd/examples/nbody.rs
index df38a0096..65820d134 100644
--- a/library/portable-simd/crates/core_simd/examples/nbody.rs
+++ b/library/portable-simd/crates/core_simd/examples/nbody.rs
@@ -1,11 +1,12 @@
#![feature(portable_simd)]
+#![allow(clippy::excessive_precision)]
extern crate std_float;
/// Benchmarks game nbody code
/// Taken from the `packed_simd` crate
/// Run this benchmark with `cargo test --example nbody`
mod nbody {
- use core_simd::simd::*;
+ use core_simd::simd::prelude::*;
#[allow(unused)] // False positive?
use std_float::StdFloat;
diff --git a/library/portable-simd/crates/core_simd/examples/spectral_norm.rs b/library/portable-simd/crates/core_simd/examples/spectral_norm.rs
index d576bd0cc..bc7934c25 100644
--- a/library/portable-simd/crates/core_simd/examples/spectral_norm.rs
+++ b/library/portable-simd/crates/core_simd/examples/spectral_norm.rs
@@ -1,6 +1,6 @@
#![feature(portable_simd)]
-use core_simd::simd::*;
+use core_simd::simd::prelude::*;
fn a(i: usize, j: usize) -> f64 {
((i + j) * (i + j + 1) / 2 + i + 1) as f64
diff --git a/library/portable-simd/crates/core_simd/src/core_simd_docs.md b/library/portable-simd/crates/core_simd/src/core_simd_docs.md
index 15e8ed025..bf412e035 100644
--- a/library/portable-simd/crates/core_simd/src/core_simd_docs.md
+++ b/library/portable-simd/crates/core_simd/src/core_simd_docs.md
@@ -2,3 +2,38 @@ Portable SIMD module.
This module offers a portable abstraction for SIMD operations
that is not bound to any particular hardware architecture.
+
+# What is "portable"?
+
+This module provides a SIMD implementation that is fast and predictable on any target.
+
+### Portable SIMD works on every target
+
+Unlike target-specific SIMD in `std::arch`, portable SIMD compiles for every target.
+In this regard, it is just like "regular" Rust.
+
+### Portable SIMD is consistent between targets
+
+A program using portable SIMD can expect identical behavior on any target.
+In most regards, [`Simd<T, N>`] can be thought of as a parallelized `[T; N]` and operates like a sequence of `T`.
+
+This has one notable exception: a handful of older architectures (e.g. `armv7` and `powerpc`) flush [subnormal](`f32::is_subnormal`) `f32` values to zero.
+On these architectures, subnormal `f32` input values are replaced with zeros, and any operation producing subnormal `f32` values produces zeros instead.
+This doesn't affect most architectures or programs.
+
+### Operations use the best instructions available
+
+Operations provided by this module compile to the best available SIMD instructions.
+
+Portable SIMD is not a low-level vendor library, and operations in portable SIMD _do not_ necessarily map to a single instruction.
+Instead, they map to a reasonable implementation of the operation for the target.
+
+Consistency between targets is not compromised to use faster or fewer instructions.
+In some cases, `std::arch` will provide a faster function that has slightly different behavior than the `std::simd` equivalent.
+For example, `_mm_min_ps`[^1] can be slightly faster than [`SimdFloat::simd_min`](`num::SimdFloat::simd_min`), but does not conform to the IEEE standard also used by [`f32::min`].
+When necessary, [`Simd<T, N>`] can be converted to the types provided by `std::arch` to make use of target-specific functions.
+
+Many targets simply don't have SIMD, or don't support SIMD for a particular element type.
+In those cases, regular scalar operations are generated instead.
+
+[^1]: `_mm_min_ps(x, y)` is equivalent to `x.simd_lt(y).select(x, y)`
diff --git a/library/portable-simd/crates/core_simd/src/fmt.rs b/library/portable-simd/crates/core_simd/src/fmt.rs
index b7317969c..3a540f5a0 100644
--- a/library/portable-simd/crates/core_simd/src/fmt.rs
+++ b/library/portable-simd/crates/core_simd/src/fmt.rs
@@ -1,9 +1,9 @@
use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
use core::fmt;
-impl<T, const LANES: usize> fmt::Debug for Simd<T, LANES>
+impl<T, const N: usize> fmt::Debug for Simd<T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
T: SimdElement + fmt::Debug,
{
/// A `Simd<T, N>` has a debug format like the one for `[T]`:
diff --git a/library/portable-simd/crates/core_simd/src/intrinsics.rs b/library/portable-simd/crates/core_simd/src/intrinsics.rs
index dd6698e2b..b27893bc7 100644
--- a/library/portable-simd/crates/core_simd/src/intrinsics.rs
+++ b/library/portable-simd/crates/core_simd/src/intrinsics.rs
@@ -160,4 +160,10 @@ extern "platform-intrinsic" {
/// convert an exposed address back to a pointer
pub(crate) fn simd_from_exposed_addr<T, U>(addr: T) -> U;
+
+ // Integer operations
+ pub(crate) fn simd_bswap<T>(x: T) -> T;
+ pub(crate) fn simd_bitreverse<T>(x: T) -> T;
+ pub(crate) fn simd_ctlz<T>(x: T) -> T;
+ pub(crate) fn simd_cttz<T>(x: T) -> T;
}
diff --git a/library/portable-simd/crates/core_simd/src/iter.rs b/library/portable-simd/crates/core_simd/src/iter.rs
index 328c995b8..b3732fd74 100644
--- a/library/portable-simd/crates/core_simd/src/iter.rs
+++ b/library/portable-simd/crates/core_simd/src/iter.rs
@@ -6,9 +6,9 @@ use core::{
macro_rules! impl_traits {
{ $type:ty } => {
- impl<const LANES: usize> Sum<Self> for Simd<$type, LANES>
+ impl<const N: usize> Sum<Self> for Simd<$type, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
@@ -16,9 +16,9 @@ macro_rules! impl_traits {
}
}
- impl<const LANES: usize> Product<Self> for Simd<$type, LANES>
+ impl<const N: usize> Product<Self> for Simd<$type, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
@@ -26,9 +26,9 @@ macro_rules! impl_traits {
}
}
- impl<'a, const LANES: usize> Sum<&'a Self> for Simd<$type, LANES>
+ impl<'a, const N: usize> Sum<&'a Self> for Simd<$type, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
@@ -36,9 +36,9 @@ macro_rules! impl_traits {
}
}
- impl<'a, const LANES: usize> Product<&'a Self> for Simd<$type, LANES>
+ impl<'a, const N: usize> Product<&'a Self> for Simd<$type, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
diff --git a/library/portable-simd/crates/core_simd/src/lane_count.rs b/library/portable-simd/crates/core_simd/src/lane_count.rs
index 2b91eb9e8..4cd7265ed 100644
--- a/library/portable-simd/crates/core_simd/src/lane_count.rs
+++ b/library/portable-simd/crates/core_simd/src/lane_count.rs
@@ -4,11 +4,11 @@ mod sealed {
use sealed::Sealed;
/// Specifies the number of lanes in a SIMD vector as a type.
-pub struct LaneCount<const LANES: usize>;
+pub struct LaneCount<const N: usize>;
-impl<const LANES: usize> LaneCount<LANES> {
+impl<const N: usize> LaneCount<N> {
/// The number of bytes in a bitmask with this many lanes.
- pub const BITMASK_LEN: usize = (LANES + 7) / 8;
+ pub const BITMASK_LEN: usize = (N + 7) / 8;
}
/// Statically guarantees that a lane count is marked as supported.
@@ -21,7 +21,7 @@ pub trait SupportedLaneCount: Sealed {
type BitMask: Copy + Default + AsRef<[u8]> + AsMut<[u8]>;
}
-impl<const LANES: usize> Sealed for LaneCount<LANES> {}
+impl<const N: usize> Sealed for LaneCount<N> {}
macro_rules! supported_lane_count {
($($lanes:literal),+) => {
diff --git a/library/portable-simd/crates/core_simd/src/lib.rs b/library/portable-simd/crates/core_simd/src/lib.rs
index fde406bda..64ba9705e 100644
--- a/library/portable-simd/crates/core_simd/src/lib.rs
+++ b/library/portable-simd/crates/core_simd/src/lib.rs
@@ -5,6 +5,7 @@
const_mut_refs,
convert_float_to_int,
decl_macro,
+ inline_const,
intra_doc_pointers,
platform_intrinsics,
repr_simd,
@@ -14,10 +15,9 @@
strict_provenance,
ptr_metadata
)]
-#![cfg_attr(feature = "generic_const_exprs", feature(generic_const_exprs))]
-#![cfg_attr(feature = "generic_const_exprs", allow(incomplete_features))]
#![warn(missing_docs, clippy::missing_inline_in_public_items)] // basically all items, really
#![deny(unsafe_op_in_unsafe_fn, clippy::undocumented_unsafe_blocks)]
+#![allow(internal_features)]
#![unstable(feature = "portable_simd", issue = "86656")]
//! Portable SIMD module.
diff --git a/library/portable-simd/crates/core_simd/src/masks.rs b/library/portable-simd/crates/core_simd/src/masks.rs
index fea687bdc..0623d2bf3 100644
--- a/library/portable-simd/crates/core_simd/src/masks.rs
+++ b/library/portable-simd/crates/core_simd/src/masks.rs
@@ -1,4 +1,4 @@
-//! Types and traits associated with masking lanes of vectors.
+//! Types and traits associated with masking elements of vectors.
//! Types representing
#![allow(non_camel_case_types)]
@@ -12,13 +12,9 @@
)]
mod mask_impl;
-mod to_bitmask;
-pub use to_bitmask::ToBitMask;
-
-#[cfg(feature = "generic_const_exprs")]
-pub use to_bitmask::{bitmask_len, ToBitMaskArray};
-
-use crate::simd::{intrinsics, LaneCount, Simd, SimdElement, SimdPartialEq, SupportedLaneCount};
+use crate::simd::{
+ cmp::SimdPartialEq, intrinsics, LaneCount, Simd, SimdCast, SimdElement, SupportedLaneCount,
+};
use core::cmp::Ordering;
use core::{fmt, mem};
@@ -32,13 +28,17 @@ mod sealed {
/// prevent us from ever removing that bound, or from implementing `MaskElement` on
/// non-`PartialEq` types in the future.
pub trait Sealed {
- fn valid<const LANES: usize>(values: Simd<Self, LANES>) -> bool
+ fn valid<const N: usize>(values: Simd<Self, N>) -> bool
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
Self: SimdElement;
fn eq(self, other: Self) -> bool;
+ fn as_usize(self) -> usize;
+
+ type Unsigned: SimdElement;
+
const TRUE: Self;
const FALSE: Self;
@@ -50,15 +50,15 @@ use sealed::Sealed;
///
/// # Safety
/// Type must be a signed integer.
-pub unsafe trait MaskElement: SimdElement + Sealed {}
+pub unsafe trait MaskElement: SimdElement<Mask = Self> + SimdCast + Sealed {}
macro_rules! impl_element {
- { $ty:ty } => {
+ { $ty:ty, $unsigned:ty } => {
impl Sealed for $ty {
#[inline]
- fn valid<const LANES: usize>(value: Simd<Self, LANES>) -> bool
+ fn valid<const N: usize>(value: Simd<Self, N>) -> bool
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
(value.simd_eq(Simd::splat(0 as _)) | value.simd_eq(Simd::splat(-1 as _))).all()
}
@@ -66,6 +66,13 @@ macro_rules! impl_element {
#[inline]
fn eq(self, other: Self) -> bool { self == other }
+ #[inline]
+ fn as_usize(self) -> usize {
+ self as usize
+ }
+
+ type Unsigned = $unsigned;
+
const TRUE: Self = -1;
const FALSE: Self = 0;
}
@@ -75,36 +82,36 @@ macro_rules! impl_element {
}
}
-impl_element! { i8 }
-impl_element! { i16 }
-impl_element! { i32 }
-impl_element! { i64 }
-impl_element! { isize }
+impl_element! { i8, u8 }
+impl_element! { i16, u16 }
+impl_element! { i32, u32 }
+impl_element! { i64, u64 }
+impl_element! { isize, usize }
-/// A SIMD vector mask for `LANES` elements of width specified by `Element`.
+/// A SIMD vector mask for `N` elements of width specified by `Element`.
///
-/// Masks represent boolean inclusion/exclusion on a per-lane basis.
+/// Masks represent boolean inclusion/exclusion on a per-element basis.
///
/// The layout of this type is unspecified, and may change between platforms
/// and/or Rust versions, and code should not assume that it is equivalent to
-/// `[T; LANES]`.
-#[cfg_attr(not(doc), repr(transparent))] // work around https://github.com/rust-lang/rust/issues/90435
-pub struct Mask<T, const LANES: usize>(mask_impl::Mask<T, LANES>)
+/// `[T; N]`.
+#[repr(transparent)]
+pub struct Mask<T, const N: usize>(mask_impl::Mask<T, N>)
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount;
+ LaneCount<N>: SupportedLaneCount;
-impl<T, const LANES: usize> Copy for Mask<T, LANES>
+impl<T, const N: usize> Copy for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
}
-impl<T, const LANES: usize> Clone for Mask<T, LANES>
+impl<T, const N: usize> Clone for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn clone(&self) -> Self {
@@ -112,12 +119,12 @@ where
}
}
-impl<T, const LANES: usize> Mask<T, LANES>
+impl<T, const N: usize> Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- /// Construct a mask by setting all lanes to the given value.
+ /// Construct a mask by setting all elements to the given value.
#[inline]
pub fn splat(value: bool) -> Self {
Self(mask_impl::Mask::splat(value))
@@ -125,7 +132,7 @@ where
/// Converts an array of bools to a SIMD mask.
#[inline]
- pub fn from_array(array: [bool; LANES]) -> Self {
+ pub fn from_array(array: [bool; N]) -> Self {
// SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of
// true: 0b_0000_0001
// false: 0b_0000_0000
@@ -133,16 +140,15 @@ where
// This would be hypothetically valid as an "in-place" transmute,
// but these are "dependently-sized" types, so copy elision it is!
unsafe {
- let bytes: [u8; LANES] = mem::transmute_copy(&array);
- let bools: Simd<i8, LANES> =
- intrinsics::simd_ne(Simd::from_array(bytes), Simd::splat(0u8));
+ let bytes: [u8; N] = mem::transmute_copy(&array);
+ let bools: Simd<i8, N> = intrinsics::simd_ne(Simd::from_array(bytes), Simd::splat(0u8));
Mask::from_int_unchecked(intrinsics::simd_cast(bools))
}
}
/// Converts a SIMD mask to an array of bools.
#[inline]
- pub fn to_array(self) -> [bool; LANES] {
+ pub fn to_array(self) -> [bool; N] {
// This follows mostly the same logic as from_array.
// SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of
// true: 0b_0000_0001
@@ -154,7 +160,7 @@ where
// This would be hypothetically valid as an "in-place" transmute,
// but these are "dependently-sized" types, so copy elision it is!
unsafe {
- let mut bytes: Simd<i8, LANES> = intrinsics::simd_cast(self.to_int());
+ let mut bytes: Simd<i8, N> = intrinsics::simd_cast(self.to_int());
bytes &= Simd::splat(1i8);
mem::transmute_copy(&bytes)
}
@@ -164,10 +170,10 @@ where
/// represents `true`.
///
/// # Safety
- /// All lanes must be either 0 or -1.
+ /// All elements must be either 0 or -1.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- pub unsafe fn from_int_unchecked(value: Simd<T, LANES>) -> Self {
+ pub unsafe fn from_int_unchecked(value: Simd<T, N>) -> Self {
// Safety: the caller must confirm this invariant
unsafe { Self(mask_impl::Mask::from_int_unchecked(value)) }
}
@@ -176,11 +182,11 @@ where
/// represents `true`.
///
/// # Panics
- /// Panics if any lane is not 0 or -1.
+ /// Panics if any element is not 0 or -1.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
#[track_caller]
- pub fn from_int(value: Simd<T, LANES>) -> Self {
+ pub fn from_int(value: Simd<T, N>) -> Self {
assert!(T::valid(value), "all values must be either 0 or -1",);
// Safety: the validity has been checked
unsafe { Self::from_int_unchecked(value) }
@@ -190,121 +196,244 @@ where
/// represents `true`.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
- pub fn to_int(self) -> Simd<T, LANES> {
+ pub fn to_int(self) -> Simd<T, N> {
self.0.to_int()
}
- /// Converts the mask to a mask of any other lane size.
+ /// Converts the mask to a mask of any other element size.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- pub fn cast<U: MaskElement>(self) -> Mask<U, LANES> {
+ pub fn cast<U: MaskElement>(self) -> Mask<U, N> {
Mask(self.0.convert())
}
- /// Tests the value of the specified lane.
+ /// Tests the value of the specified element.
///
/// # Safety
- /// `lane` must be less than `LANES`.
+ /// `index` must be less than `self.len()`.
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
- pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
+ pub unsafe fn test_unchecked(&self, index: usize) -> bool {
// Safety: the caller must confirm this invariant
- unsafe { self.0.test_unchecked(lane) }
+ unsafe { self.0.test_unchecked(index) }
}
- /// Tests the value of the specified lane.
+ /// Tests the value of the specified element.
///
/// # Panics
- /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ /// Panics if `index` is greater than or equal to the number of elements in the vector.
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
#[track_caller]
- pub fn test(&self, lane: usize) -> bool {
- assert!(lane < LANES, "lane index out of range");
- // Safety: the lane index has been checked
- unsafe { self.test_unchecked(lane) }
+ pub fn test(&self, index: usize) -> bool {
+ assert!(index < N, "element index out of range");
+ // Safety: the element index has been checked
+ unsafe { self.test_unchecked(index) }
}
- /// Sets the value of the specified lane.
+ /// Sets the value of the specified element.
///
/// # Safety
- /// `lane` must be less than `LANES`.
+ /// `index` must be less than `self.len()`.
#[inline]
- pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
+ pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) {
// Safety: the caller must confirm this invariant
unsafe {
- self.0.set_unchecked(lane, value);
+ self.0.set_unchecked(index, value);
}
}
- /// Sets the value of the specified lane.
+ /// Sets the value of the specified element.
///
/// # Panics
- /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ /// Panics if `index` is greater than or equal to the number of elements in the vector.
#[inline]
#[track_caller]
- pub fn set(&mut self, lane: usize, value: bool) {
- assert!(lane < LANES, "lane index out of range");
- // Safety: the lane index has been checked
+ pub fn set(&mut self, index: usize, value: bool) {
+ assert!(index < N, "element index out of range");
+ // Safety: the element index has been checked
unsafe {
- self.set_unchecked(lane, value);
+ self.set_unchecked(index, value);
}
}
- /// Returns true if any lane is set, or false otherwise.
+ /// Returns true if any element is set, or false otherwise.
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
pub fn any(self) -> bool {
self.0.any()
}
- /// Returns true if all lanes are set, or false otherwise.
+ /// Returns true if all elements are set, or false otherwise.
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
pub fn all(self) -> bool {
self.0.all()
}
+
+ /// Create a bitmask from a mask.
+ ///
+ /// Each bit is set if the corresponding element in the mask is `true`.
+ /// If the mask contains more than 64 elements, the bitmask is truncated to the first 64.
+ #[inline]
+ #[must_use = "method returns a new integer and does not mutate the original value"]
+ pub fn to_bitmask(self) -> u64 {
+ self.0.to_bitmask_integer()
+ }
+
+ /// Create a mask from a bitmask.
+ ///
+ /// For each bit, if it is set, the corresponding element in the mask is set to `true`.
+ /// If the mask contains more than 64 elements, the remainder are set to `false`.
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn from_bitmask(bitmask: u64) -> Self {
+ Self(mask_impl::Mask::from_bitmask_integer(bitmask))
+ }
+
+ /// Create a bitmask vector from a mask.
+ ///
+ /// Each bit is set if the corresponding element in the mask is `true`.
+ /// The remaining bits are unset.
+ ///
+ /// The bits are packed into the first N bits of the vector:
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::mask32x8;
+ /// let mask = mask32x8::from_array([true, false, true, false, false, false, true, false]);
+ /// assert_eq!(mask.to_bitmask_vector()[0], 0b01000101);
+ /// ```
+ #[inline]
+ #[must_use = "method returns a new integer and does not mutate the original value"]
+ pub fn to_bitmask_vector(self) -> Simd<u8, N> {
+ self.0.to_bitmask_vector()
+ }
+
+ /// Create a mask from a bitmask vector.
+ ///
+ /// For each bit, if it is set, the corresponding element in the mask is set to `true`.
+ ///
+ /// The bits are packed into the first N bits of the vector:
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::{mask32x8, u8x8};
+ /// let bitmask = u8x8::from_array([0b01000101, 0, 0, 0, 0, 0, 0, 0]);
+ /// assert_eq!(
+ /// mask32x8::from_bitmask_vector(bitmask),
+ /// mask32x8::from_array([true, false, true, false, false, false, true, false]),
+ /// );
+ /// ```
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original value"]
+ pub fn from_bitmask_vector(bitmask: Simd<u8, N>) -> Self {
+ Self(mask_impl::Mask::from_bitmask_vector(bitmask))
+ }
+
+ /// Find the index of the first set element.
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::mask32x8;
+ /// assert_eq!(mask32x8::splat(false).first_set(), None);
+ /// assert_eq!(mask32x8::splat(true).first_set(), Some(0));
+ ///
+ /// let mask = mask32x8::from_array([false, true, false, false, true, false, false, true]);
+ /// assert_eq!(mask.first_set(), Some(1));
+ /// ```
+ #[inline]
+ #[must_use = "method returns the index and does not mutate the original value"]
+ pub fn first_set(self) -> Option<usize> {
+ // If bitmasks are efficient, using them is better
+ if cfg!(target_feature = "sse") && N <= 64 {
+ let tz = self.to_bitmask().trailing_zeros();
+ return if tz == 64 { None } else { Some(tz as usize) };
+ }
+
+ // To find the first set index:
+ // * create a vector 0..N
+ // * replace unset mask elements in that vector with -1
+ // * perform _unsigned_ reduce-min
+ // * check if the result is -1 or an index
+
+ let index = Simd::from_array(
+ const {
+ let mut index = [0; N];
+ let mut i = 0;
+ while i < N {
+ index[i] = i;
+ i += 1;
+ }
+ index
+ },
+ );
+
+ // Safety: the input and output are integer vectors
+ let index: Simd<T, N> = unsafe { intrinsics::simd_cast(index) };
+
+ let masked_index = self.select(index, Self::splat(true).to_int());
+
+ // Safety: the input and output are integer vectors
+ let masked_index: Simd<T::Unsigned, N> = unsafe { intrinsics::simd_cast(masked_index) };
+
+ // Safety: the input is an integer vector
+ let min_index: T::Unsigned = unsafe { intrinsics::simd_reduce_min(masked_index) };
+
+ // Safety: the return value is the unsigned version of T
+ let min_index: T = unsafe { core::mem::transmute_copy(&min_index) };
+
+ if min_index.eq(T::TRUE) {
+ None
+ } else {
+ Some(min_index.as_usize())
+ }
+ }
}
// vector/array conversion
-impl<T, const LANES: usize> From<[bool; LANES]> for Mask<T, LANES>
+impl<T, const N: usize> From<[bool; N]> for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
- fn from(array: [bool; LANES]) -> Self {
+ fn from(array: [bool; N]) -> Self {
Self::from_array(array)
}
}
-impl<T, const LANES: usize> From<Mask<T, LANES>> for [bool; LANES]
+impl<T, const N: usize> From<Mask<T, N>> for [bool; N]
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
- fn from(vector: Mask<T, LANES>) -> Self {
+ fn from(vector: Mask<T, N>) -> Self {
vector.to_array()
}
}
-impl<T, const LANES: usize> Default for Mask<T, LANES>
+impl<T, const N: usize> Default for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
- #[must_use = "method returns a defaulted mask with all lanes set to false (0)"]
+ #[must_use = "method returns a defaulted mask with all elements set to false (0)"]
fn default() -> Self {
Self::splat(false)
}
}
-impl<T, const LANES: usize> PartialEq for Mask<T, LANES>
+impl<T, const N: usize> PartialEq for Mask<T, N>
where
T: MaskElement + PartialEq,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
@@ -313,10 +442,10 @@ where
}
}
-impl<T, const LANES: usize> PartialOrd for Mask<T, LANES>
+impl<T, const N: usize> PartialOrd for Mask<T, N>
where
T: MaskElement + PartialOrd,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a new Ordering and does not mutate the original value"]
@@ -325,23 +454,23 @@ where
}
}
-impl<T, const LANES: usize> fmt::Debug for Mask<T, LANES>
+impl<T, const N: usize> fmt::Debug for Mask<T, N>
where
T: MaskElement + fmt::Debug,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list()
- .entries((0..LANES).map(|lane| self.test(lane)))
+ .entries((0..N).map(|i| self.test(i)))
.finish()
}
}
-impl<T, const LANES: usize> core::ops::BitAnd for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitAnd for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -351,10 +480,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitAnd<bool> for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitAnd<bool> for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -364,23 +493,23 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitAnd<Mask<T, LANES>> for bool
+impl<T, const N: usize> core::ops::BitAnd<Mask<T, N>> for bool
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Output = Mask<T, LANES>;
+ type Output = Mask<T, N>;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- fn bitand(self, rhs: Mask<T, LANES>) -> Mask<T, LANES> {
+ fn bitand(self, rhs: Mask<T, N>) -> Mask<T, N> {
Mask::splat(self) & rhs
}
}
-impl<T, const LANES: usize> core::ops::BitOr for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitOr for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -390,10 +519,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitOr<bool> for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitOr<bool> for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -403,23 +532,23 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitOr<Mask<T, LANES>> for bool
+impl<T, const N: usize> core::ops::BitOr<Mask<T, N>> for bool
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Output = Mask<T, LANES>;
+ type Output = Mask<T, N>;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- fn bitor(self, rhs: Mask<T, LANES>) -> Mask<T, LANES> {
+ fn bitor(self, rhs: Mask<T, N>) -> Mask<T, N> {
Mask::splat(self) | rhs
}
}
-impl<T, const LANES: usize> core::ops::BitXor for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitXor for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -429,10 +558,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitXor<bool> for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitXor<bool> for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -442,25 +571,25 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitXor<Mask<T, LANES>> for bool
+impl<T, const N: usize> core::ops::BitXor<Mask<T, N>> for bool
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Output = Mask<T, LANES>;
+ type Output = Mask<T, N>;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- fn bitxor(self, rhs: Mask<T, LANES>) -> Self::Output {
+ fn bitxor(self, rhs: Mask<T, N>) -> Self::Output {
Mask::splat(self) ^ rhs
}
}
-impl<T, const LANES: usize> core::ops::Not for Mask<T, LANES>
+impl<T, const N: usize> core::ops::Not for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Output = Mask<T, LANES>;
+ type Output = Mask<T, N>;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn not(self) -> Self::Output {
@@ -468,10 +597,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitAndAssign for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitAndAssign for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
@@ -479,10 +608,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitAndAssign<bool> for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitAndAssign<bool> for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
@@ -490,10 +619,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitOrAssign for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitOrAssign for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
@@ -501,10 +630,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitOrAssign<bool> for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitOrAssign<bool> for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
@@ -512,10 +641,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitXorAssign for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitXorAssign for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
@@ -523,10 +652,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitXorAssign<bool> for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitXorAssign<bool> for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {
@@ -537,12 +666,12 @@ where
macro_rules! impl_from {
{ $from:ty => $($to:ty),* } => {
$(
- impl<const LANES: usize> From<Mask<$from, LANES>> for Mask<$to, LANES>
+ impl<const N: usize> From<Mask<$from, N>> for Mask<$to, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
- fn from(value: Mask<$from, LANES>) -> Self {
+ fn from(value: Mask<$from, N>) -> Self {
value.cast()
}
}
diff --git a/library/portable-simd/crates/core_simd/src/masks/bitmask.rs b/library/portable-simd/crates/core_simd/src/masks/bitmask.rs
index 20465ba9b..6ddff07fe 100644
--- a/library/portable-simd/crates/core_simd/src/masks/bitmask.rs
+++ b/library/portable-simd/crates/core_simd/src/masks/bitmask.rs
@@ -1,30 +1,30 @@
#![allow(unused_imports)]
use super::MaskElement;
use crate::simd::intrinsics;
-use crate::simd::{LaneCount, Simd, SupportedLaneCount, ToBitMask};
+use crate::simd::{LaneCount, Simd, SupportedLaneCount};
use core::marker::PhantomData;
/// A mask where each lane is represented by a single bit.
#[repr(transparent)]
-pub struct Mask<T, const LANES: usize>(
- <LaneCount<LANES> as SupportedLaneCount>::BitMask,
+pub struct Mask<T, const N: usize>(
+ <LaneCount<N> as SupportedLaneCount>::BitMask,
PhantomData<T>,
)
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount;
+ LaneCount<N>: SupportedLaneCount;
-impl<T, const LANES: usize> Copy for Mask<T, LANES>
+impl<T, const N: usize> Copy for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
}
-impl<T, const LANES: usize> Clone for Mask<T, LANES>
+impl<T, const N: usize> Clone for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn clone(&self) -> Self {
@@ -32,10 +32,10 @@ where
}
}
-impl<T, const LANES: usize> PartialEq for Mask<T, LANES>
+impl<T, const N: usize> PartialEq for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
@@ -43,10 +43,10 @@ where
}
}
-impl<T, const LANES: usize> PartialOrd for Mask<T, LANES>
+impl<T, const N: usize> PartialOrd for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
@@ -54,17 +54,17 @@ where
}
}
-impl<T, const LANES: usize> Eq for Mask<T, LANES>
+impl<T, const N: usize> Eq for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
}
-impl<T, const LANES: usize> Ord for Mask<T, LANES>
+impl<T, const N: usize> Ord for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
@@ -72,22 +72,22 @@ where
}
}
-impl<T, const LANES: usize> Mask<T, LANES>
+impl<T, const N: usize> Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn splat(value: bool) -> Self {
- let mut mask = <LaneCount<LANES> as SupportedLaneCount>::BitMask::default();
+ let mut mask = <LaneCount<N> as SupportedLaneCount>::BitMask::default();
if value {
mask.as_mut().fill(u8::MAX)
} else {
mask.as_mut().fill(u8::MIN)
}
- if LANES % 8 > 0 {
- *mask.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - LANES % 8);
+ if N % 8 > 0 {
+ *mask.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - N % 8);
}
Self(mask, PhantomData)
}
@@ -107,7 +107,7 @@ where
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
- pub fn to_int(self) -> Simd<T, LANES> {
+ pub fn to_int(self) -> Simd<T, N> {
unsafe {
intrinsics::simd_select_bitmask(self.0, Simd::splat(T::TRUE), Simd::splat(T::FALSE))
}
@@ -115,51 +115,47 @@ where
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- pub unsafe fn from_int_unchecked(value: Simd<T, LANES>) -> Self {
+ pub unsafe fn from_int_unchecked(value: Simd<T, N>) -> Self {
unsafe { Self(intrinsics::simd_bitmask(value), PhantomData) }
}
- #[cfg(feature = "generic_const_exprs")]
#[inline]
- #[must_use = "method returns a new array and does not mutate the original value"]
- pub fn to_bitmask_array<const N: usize>(self) -> [u8; N] {
- assert!(core::mem::size_of::<Self>() == N);
-
- // Safety: converting an integer to an array of bytes of the same size is safe
- unsafe { core::mem::transmute_copy(&self.0) }
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ pub fn to_bitmask_vector(self) -> Simd<u8, N> {
+ let mut bitmask = Simd::splat(0);
+ bitmask.as_mut_array()[..self.0.as_ref().len()].copy_from_slice(self.0.as_ref());
+ bitmask
}
- #[cfg(feature = "generic_const_exprs")]
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- pub fn from_bitmask_array<const N: usize>(bitmask: [u8; N]) -> Self {
- assert!(core::mem::size_of::<Self>() == N);
-
- // Safety: converting an array of bytes to an integer of the same size is safe
- Self(unsafe { core::mem::transmute_copy(&bitmask) }, PhantomData)
+ pub fn from_bitmask_vector(bitmask: Simd<u8, N>) -> Self {
+ let mut bytes = <LaneCount<N> as SupportedLaneCount>::BitMask::default();
+ let len = bytes.as_ref().len();
+ bytes.as_mut().copy_from_slice(&bitmask.as_array()[..len]);
+ Self(bytes, PhantomData)
}
#[inline]
- pub fn to_bitmask_integer<U>(self) -> U
- where
- super::Mask<T, LANES>: ToBitMask<BitMask = U>,
- {
- // Safety: these are the same types
- unsafe { core::mem::transmute_copy(&self.0) }
+ pub fn to_bitmask_integer(self) -> u64 {
+ let mut bitmask = [0u8; 8];
+ bitmask[..self.0.as_ref().len()].copy_from_slice(self.0.as_ref());
+ u64::from_ne_bytes(bitmask)
}
#[inline]
- pub fn from_bitmask_integer<U>(bitmask: U) -> Self
- where
- super::Mask<T, LANES>: ToBitMask<BitMask = U>,
- {
- // Safety: these are the same types
- unsafe { Self(core::mem::transmute_copy(&bitmask), PhantomData) }
+ pub fn from_bitmask_integer(bitmask: u64) -> Self {
+ let mut bytes = <LaneCount<N> as SupportedLaneCount>::BitMask::default();
+ let len = bytes.as_mut().len();
+ bytes
+ .as_mut()
+ .copy_from_slice(&bitmask.to_ne_bytes()[..len]);
+ Self(bytes, PhantomData)
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- pub fn convert<U>(self) -> Mask<U, LANES>
+ pub fn convert<U>(self) -> Mask<U, N>
where
U: MaskElement,
{
@@ -180,11 +176,11 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitAnd for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitAnd for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
- <LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
+ LaneCount<N>: SupportedLaneCount,
+ <LaneCount<N> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
{
type Output = Self;
#[inline]
@@ -197,11 +193,11 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitOr for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitOr for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
- <LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
+ LaneCount<N>: SupportedLaneCount,
+ <LaneCount<N> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
{
type Output = Self;
#[inline]
@@ -214,10 +210,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitXor for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitXor for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -230,10 +226,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::Not for Mask<T, LANES>
+impl<T, const N: usize> core::ops::Not for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -242,8 +238,8 @@ where
for x in self.0.as_mut() {
*x = !*x;
}
- if LANES % 8 > 0 {
- *self.0.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - LANES % 8);
+ if N % 8 > 0 {
+ *self.0.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - N % 8);
}
self
}
diff --git a/library/portable-simd/crates/core_simd/src/masks/full_masks.rs b/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
index 1d13c45b8..63964f455 100644
--- a/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
+++ b/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
@@ -1,29 +1,25 @@
//! Masks that take up full SIMD vector registers.
-use super::MaskElement;
use crate::simd::intrinsics;
-use crate::simd::{LaneCount, Simd, SupportedLaneCount, ToBitMask};
-
-#[cfg(feature = "generic_const_exprs")]
-use crate::simd::ToBitMaskArray;
+use crate::simd::{LaneCount, MaskElement, Simd, SupportedLaneCount};
#[repr(transparent)]
-pub struct Mask<T, const LANES: usize>(Simd<T, LANES>)
+pub struct Mask<T, const N: usize>(Simd<T, N>)
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount;
+ LaneCount<N>: SupportedLaneCount;
-impl<T, const LANES: usize> Copy for Mask<T, LANES>
+impl<T, const N: usize> Copy for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
}
-impl<T, const LANES: usize> Clone for Mask<T, LANES>
+impl<T, const N: usize> Clone for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
@@ -32,10 +28,10 @@ where
}
}
-impl<T, const LANES: usize> PartialEq for Mask<T, LANES>
+impl<T, const N: usize> PartialEq for Mask<T, N>
where
T: MaskElement + PartialEq,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
@@ -43,10 +39,10 @@ where
}
}
-impl<T, const LANES: usize> PartialOrd for Mask<T, LANES>
+impl<T, const N: usize> PartialOrd for Mask<T, N>
where
T: MaskElement + PartialOrd,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
@@ -54,17 +50,17 @@ where
}
}
-impl<T, const LANES: usize> Eq for Mask<T, LANES>
+impl<T, const N: usize> Eq for Mask<T, N>
where
T: MaskElement + Eq,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
}
-impl<T, const LANES: usize> Ord for Mask<T, LANES>
+impl<T, const N: usize> Ord for Mask<T, N>
where
T: MaskElement + Ord,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
@@ -101,10 +97,10 @@ macro_rules! impl_reverse_bits {
impl_reverse_bits! { u8, u16, u32, u64 }
-impl<T, const LANES: usize> Mask<T, LANES>
+impl<T, const N: usize> Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
@@ -125,19 +121,19 @@ where
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
- pub fn to_int(self) -> Simd<T, LANES> {
+ pub fn to_int(self) -> Simd<T, N> {
self.0
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- pub unsafe fn from_int_unchecked(value: Simd<T, LANES>) -> Self {
+ pub unsafe fn from_int_unchecked(value: Simd<T, N>) -> Self {
Self(value)
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- pub fn convert<U>(self) -> Mask<U, LANES>
+ pub fn convert<U>(self) -> Mask<U, N>
where
U: MaskElement,
{
@@ -145,62 +141,50 @@ where
unsafe { Mask(intrinsics::simd_cast(self.0)) }
}
- #[cfg(feature = "generic_const_exprs")]
#[inline]
- #[must_use = "method returns a new array and does not mutate the original value"]
- pub fn to_bitmask_array<const N: usize>(self) -> [u8; N]
- where
- super::Mask<T, LANES>: ToBitMaskArray,
- [(); <super::Mask<T, LANES> as ToBitMaskArray>::BYTES]: Sized,
- {
- assert_eq!(<super::Mask<T, LANES> as ToBitMaskArray>::BYTES, N);
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ pub fn to_bitmask_vector(self) -> Simd<u8, N> {
+ let mut bitmask = Simd::splat(0);
- // Safety: N is the correct bitmask size
+ // Safety: Bytes is the right size array
unsafe {
// Compute the bitmask
- let bitmask: [u8; <super::Mask<T, LANES> as ToBitMaskArray>::BYTES] =
+ let mut bytes: <LaneCount<N> as SupportedLaneCount>::BitMask =
intrinsics::simd_bitmask(self.0);
- // Transmute to the return type, previously asserted to be the same size
- let mut bitmask: [u8; N] = core::mem::transmute_copy(&bitmask);
-
// LLVM assumes bit order should match endianness
if cfg!(target_endian = "big") {
- for x in bitmask.as_mut() {
- *x = x.reverse_bits();
+ for x in bytes.as_mut() {
+ *x = x.reverse_bits()
}
- };
+ }
- bitmask
+ bitmask.as_mut_array()[..bytes.as_ref().len()].copy_from_slice(bytes.as_ref());
}
+
+ bitmask
}
- #[cfg(feature = "generic_const_exprs")]
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
- pub fn from_bitmask_array<const N: usize>(mut bitmask: [u8; N]) -> Self
- where
- super::Mask<T, LANES>: ToBitMaskArray,
- [(); <super::Mask<T, LANES> as ToBitMaskArray>::BYTES]: Sized,
- {
- assert_eq!(<super::Mask<T, LANES> as ToBitMaskArray>::BYTES, N);
+ pub fn from_bitmask_vector(bitmask: Simd<u8, N>) -> Self {
+ let mut bytes = <LaneCount<N> as SupportedLaneCount>::BitMask::default();
- // Safety: N is the correct bitmask size
+ // Safety: Bytes is the right size array
unsafe {
+ let len = bytes.as_ref().len();
+ bytes.as_mut().copy_from_slice(&bitmask.as_array()[..len]);
+
// LLVM assumes bit order should match endianness
if cfg!(target_endian = "big") {
- for x in bitmask.as_mut() {
+ for x in bytes.as_mut() {
*x = x.reverse_bits();
}
}
- // Transmute to the bitmask type, previously asserted to be the same size
- let bitmask: [u8; <super::Mask<T, LANES> as ToBitMaskArray>::BYTES] =
- core::mem::transmute_copy(&bitmask);
-
// Compute the regular mask
Self::from_int_unchecked(intrinsics::simd_select_bitmask(
- bitmask,
+ bytes,
Self::splat(true).to_int(),
Self::splat(false).to_int(),
))
@@ -208,40 +192,81 @@ where
}
#[inline]
- pub(crate) fn to_bitmask_integer<U: ReverseBits>(self) -> U
+ unsafe fn to_bitmask_impl<U: ReverseBits, const M: usize>(self) -> U
where
- super::Mask<T, LANES>: ToBitMask<BitMask = U>,
+ LaneCount<M>: SupportedLaneCount,
{
- // Safety: U is required to be the appropriate bitmask type
- let bitmask: U = unsafe { intrinsics::simd_bitmask(self.0) };
+ let resized = self.to_int().resize::<M>(T::FALSE);
+
+ // Safety: `resized` is an integer vector with length M, which must match T
+ let bitmask: U = unsafe { intrinsics::simd_bitmask(resized) };
// LLVM assumes bit order should match endianness
if cfg!(target_endian = "big") {
- bitmask.reverse_bits(LANES)
+ bitmask.reverse_bits(M)
} else {
bitmask
}
}
#[inline]
- pub(crate) fn from_bitmask_integer<U: ReverseBits>(bitmask: U) -> Self
+ unsafe fn from_bitmask_impl<U: ReverseBits, const M: usize>(bitmask: U) -> Self
where
- super::Mask<T, LANES>: ToBitMask<BitMask = U>,
+ LaneCount<M>: SupportedLaneCount,
{
// LLVM assumes bit order should match endianness
let bitmask = if cfg!(target_endian = "big") {
- bitmask.reverse_bits(LANES)
+ bitmask.reverse_bits(M)
} else {
bitmask
};
- // Safety: U is required to be the appropriate bitmask type
- unsafe {
- Self::from_int_unchecked(intrinsics::simd_select_bitmask(
+ // SAFETY: `mask` is the correct bitmask type for a u64 bitmask
+ let mask: Simd<T, M> = unsafe {
+ intrinsics::simd_select_bitmask(
bitmask,
- Self::splat(true).to_int(),
- Self::splat(false).to_int(),
- ))
+ Simd::<T, M>::splat(T::TRUE),
+ Simd::<T, M>::splat(T::FALSE),
+ )
+ };
+
+ // SAFETY: `mask` only contains `T::TRUE` or `T::FALSE`
+ unsafe { Self::from_int_unchecked(mask.resize::<N>(T::FALSE)) }
+ }
+
+ #[inline]
+ pub(crate) fn to_bitmask_integer(self) -> u64 {
+ // TODO modify simd_bitmask to zero-extend output, making this unnecessary
+ if N <= 8 {
+ // Safety: bitmask matches length
+ unsafe { self.to_bitmask_impl::<u8, 8>() as u64 }
+ } else if N <= 16 {
+ // Safety: bitmask matches length
+ unsafe { self.to_bitmask_impl::<u16, 16>() as u64 }
+ } else if N <= 32 {
+ // Safety: bitmask matches length
+ unsafe { self.to_bitmask_impl::<u32, 32>() as u64 }
+ } else {
+ // Safety: bitmask matches length
+ unsafe { self.to_bitmask_impl::<u64, 64>() }
+ }
+ }
+
+ #[inline]
+ pub(crate) fn from_bitmask_integer(bitmask: u64) -> Self {
+ // TODO modify simd_bitmask_select to truncate input, making this unnecessary
+ if N <= 8 {
+ // Safety: bitmask matches length
+ unsafe { Self::from_bitmask_impl::<u8, 8>(bitmask as u8) }
+ } else if N <= 16 {
+ // Safety: bitmask matches length
+ unsafe { Self::from_bitmask_impl::<u16, 16>(bitmask as u16) }
+ } else if N <= 32 {
+ // Safety: bitmask matches length
+ unsafe { Self::from_bitmask_impl::<u32, 32>(bitmask as u32) }
+ } else {
+ // Safety: bitmask matches length
+ unsafe { Self::from_bitmask_impl::<u64, 64>(bitmask) }
}
}
@@ -260,21 +285,21 @@ where
}
}
-impl<T, const LANES: usize> From<Mask<T, LANES>> for Simd<T, LANES>
+impl<T, const N: usize> From<Mask<T, N>> for Simd<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
- fn from(value: Mask<T, LANES>) -> Self {
+ fn from(value: Mask<T, N>) -> Self {
value.0
}
}
-impl<T, const LANES: usize> core::ops::BitAnd for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitAnd for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -285,10 +310,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitOr for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitOr for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -299,10 +324,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::BitXor for Mask<T, LANES>
+impl<T, const N: usize> core::ops::BitXor for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
@@ -313,10 +338,10 @@ where
}
}
-impl<T, const LANES: usize> core::ops::Not for Mask<T, LANES>
+impl<T, const N: usize> core::ops::Not for Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
diff --git a/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs b/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
deleted file mode 100644
index fc7d6b781..000000000
--- a/library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs
+++ /dev/null
@@ -1,97 +0,0 @@
-use super::{mask_impl, Mask, MaskElement};
-use crate::simd::{LaneCount, SupportedLaneCount};
-
-mod sealed {
- pub trait Sealed {}
-}
-pub use sealed::Sealed;
-
-impl<T, const LANES: usize> Sealed for Mask<T, LANES>
-where
- T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
-{
-}
-
-/// Converts masks to and from integer bitmasks.
-///
-/// Each bit of the bitmask corresponds to a mask lane, starting with the LSB.
-pub trait ToBitMask: Sealed {
- /// The integer bitmask type.
- type BitMask;
-
- /// Converts a mask to a bitmask.
- fn to_bitmask(self) -> Self::BitMask;
-
- /// Converts a bitmask to a mask.
- fn from_bitmask(bitmask: Self::BitMask) -> Self;
-}
-
-/// Converts masks to and from byte array bitmasks.
-///
-/// Each bit of the bitmask corresponds to a mask lane, starting with the LSB of the first byte.
-#[cfg(feature = "generic_const_exprs")]
-pub trait ToBitMaskArray: Sealed {
- /// The length of the bitmask array.
- const BYTES: usize;
-
- /// Converts a mask to a bitmask.
- fn to_bitmask_array(self) -> [u8; Self::BYTES];
-
- /// Converts a bitmask to a mask.
- fn from_bitmask_array(bitmask: [u8; Self::BYTES]) -> Self;
-}
-
-macro_rules! impl_integer_intrinsic {
- { $(impl ToBitMask<BitMask=$int:ty> for Mask<_, $lanes:literal>)* } => {
- $(
- impl<T: MaskElement> ToBitMask for Mask<T, $lanes> {
- type BitMask = $int;
-
- #[inline]
- fn to_bitmask(self) -> $int {
- self.0.to_bitmask_integer()
- }
-
- #[inline]
- fn from_bitmask(bitmask: $int) -> Self {
- Self(mask_impl::Mask::from_bitmask_integer(bitmask))
- }
- }
- )*
- }
-}
-
-impl_integer_intrinsic! {
- impl ToBitMask<BitMask=u8> for Mask<_, 1>
- impl ToBitMask<BitMask=u8> for Mask<_, 2>
- impl ToBitMask<BitMask=u8> for Mask<_, 4>
- impl ToBitMask<BitMask=u8> for Mask<_, 8>
- impl ToBitMask<BitMask=u16> for Mask<_, 16>
- impl ToBitMask<BitMask=u32> for Mask<_, 32>
- impl ToBitMask<BitMask=u64> for Mask<_, 64>
-}
-
-/// Returns the minimum number of bytes in a bitmask with `lanes` lanes.
-#[cfg(feature = "generic_const_exprs")]
-pub const fn bitmask_len(lanes: usize) -> usize {
- (lanes + 7) / 8
-}
-
-#[cfg(feature = "generic_const_exprs")]
-impl<T: MaskElement, const LANES: usize> ToBitMaskArray for Mask<T, LANES>
-where
- LaneCount<LANES>: SupportedLaneCount,
-{
- const BYTES: usize = bitmask_len(LANES);
-
- #[inline]
- fn to_bitmask_array(self) -> [u8; Self::BYTES] {
- self.0.to_bitmask_array()
- }
-
- #[inline]
- fn from_bitmask_array(bitmask: [u8; Self::BYTES]) -> Self {
- Mask(mask_impl::Mask::from_bitmask_array(bitmask))
- }
-}
diff --git a/library/portable-simd/crates/core_simd/src/mod.rs b/library/portable-simd/crates/core_simd/src/mod.rs
index 194267698..fd016f1c6 100644
--- a/library/portable-simd/crates/core_simd/src/mod.rs
+++ b/library/portable-simd/crates/core_simd/src/mod.rs
@@ -3,37 +3,37 @@ mod swizzle;
pub(crate) mod intrinsics;
-#[cfg(feature = "generic_const_exprs")]
-mod to_bytes;
-
mod alias;
mod cast;
-mod elements;
-mod eq;
mod fmt;
mod iter;
mod lane_count;
mod masks;
mod ops;
-mod ord;
mod select;
mod swizzle_dyn;
+mod to_bytes;
mod vector;
mod vendor;
-#[doc = include_str!("core_simd_docs.md")]
pub mod simd {
+ #![doc = include_str!("core_simd_docs.md")]
+
pub mod prelude;
+ pub mod num;
+
+ pub mod ptr;
+
+ pub mod cmp;
+
pub(crate) use crate::core_simd::intrinsics;
pub use crate::core_simd::alias::*;
pub use crate::core_simd::cast::*;
- pub use crate::core_simd::elements::*;
- pub use crate::core_simd::eq::*;
pub use crate::core_simd::lane_count::{LaneCount, SupportedLaneCount};
pub use crate::core_simd::masks::*;
- pub use crate::core_simd::ord::*;
pub use crate::core_simd::swizzle::*;
+ pub use crate::core_simd::to_bytes::ToBytes;
pub use crate::core_simd::vector::*;
}
diff --git a/library/portable-simd/crates/core_simd/src/ops.rs b/library/portable-simd/crates/core_simd/src/ops.rs
index b007456cf..8a1b083f0 100644
--- a/library/portable-simd/crates/core_simd/src/ops.rs
+++ b/library/portable-simd/crates/core_simd/src/ops.rs
@@ -1,4 +1,4 @@
-use crate::simd::{LaneCount, Simd, SimdElement, SimdPartialEq, SupportedLaneCount};
+use crate::simd::{cmp::SimdPartialEq, LaneCount, Simd, SimdElement, SupportedLaneCount};
use core::ops::{Add, Mul};
use core::ops::{BitAnd, BitOr, BitXor};
use core::ops::{Div, Rem, Sub};
@@ -6,12 +6,13 @@ use core::ops::{Shl, Shr};
mod assign;
mod deref;
+mod shift_scalar;
mod unary;
-impl<I, T, const LANES: usize> core::ops::Index<I> for Simd<T, LANES>
+impl<I, T, const N: usize> core::ops::Index<I> for Simd<T, N>
where
T: SimdElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
I: core::slice::SliceIndex<[T]>,
{
type Output = I::Output;
@@ -21,10 +22,10 @@ where
}
}
-impl<I, T, const LANES: usize> core::ops::IndexMut<I> for Simd<T, LANES>
+impl<I, T, const N: usize> core::ops::IndexMut<I> for Simd<T, N>
where
T: SimdElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
I: core::slice::SliceIndex<[T]>,
{
#[inline]
diff --git a/library/portable-simd/crates/core_simd/src/ops/assign.rs b/library/portable-simd/crates/core_simd/src/ops/assign.rs
index d2b48614f..0e8778502 100644
--- a/library/portable-simd/crates/core_simd/src/ops/assign.rs
+++ b/library/portable-simd/crates/core_simd/src/ops/assign.rs
@@ -8,7 +8,7 @@ use core::ops::{ShlAssign, ShrAssign}; // non-commutative bit binary op-assignme
// Arithmetic
macro_rules! assign_ops {
- ($(impl<T, U, const LANES: usize> $assignTrait:ident<U> for Simd<T, LANES>
+ ($(impl<T, U, const N: usize> $assignTrait:ident<U> for Simd<T, N>
where
Self: $trait:ident,
{
@@ -16,11 +16,11 @@ macro_rules! assign_ops {
$call:ident
}
})*) => {
- $(impl<T, U, const LANES: usize> $assignTrait<U> for Simd<T, LANES>
+ $(impl<T, U, const N: usize> $assignTrait<U> for Simd<T, N>
where
Self: $trait<U, Output = Self>,
T: SimdElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn $assign_call(&mut self, rhs: U) {
@@ -32,7 +32,7 @@ macro_rules! assign_ops {
assign_ops! {
// Arithmetic
- impl<T, U, const LANES: usize> AddAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> AddAssign<U> for Simd<T, N>
where
Self: Add,
{
@@ -41,7 +41,7 @@ assign_ops! {
}
}
- impl<T, U, const LANES: usize> MulAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> MulAssign<U> for Simd<T, N>
where
Self: Mul,
{
@@ -50,7 +50,7 @@ assign_ops! {
}
}
- impl<T, U, const LANES: usize> SubAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> SubAssign<U> for Simd<T, N>
where
Self: Sub,
{
@@ -59,7 +59,7 @@ assign_ops! {
}
}
- impl<T, U, const LANES: usize> DivAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> DivAssign<U> for Simd<T, N>
where
Self: Div,
{
@@ -67,7 +67,7 @@ assign_ops! {
div
}
}
- impl<T, U, const LANES: usize> RemAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> RemAssign<U> for Simd<T, N>
where
Self: Rem,
{
@@ -77,7 +77,7 @@ assign_ops! {
}
// Bitops
- impl<T, U, const LANES: usize> BitAndAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> BitAndAssign<U> for Simd<T, N>
where
Self: BitAnd,
{
@@ -86,7 +86,7 @@ assign_ops! {
}
}
- impl<T, U, const LANES: usize> BitOrAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> BitOrAssign<U> for Simd<T, N>
where
Self: BitOr,
{
@@ -95,7 +95,7 @@ assign_ops! {
}
}
- impl<T, U, const LANES: usize> BitXorAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> BitXorAssign<U> for Simd<T, N>
where
Self: BitXor,
{
@@ -104,7 +104,7 @@ assign_ops! {
}
}
- impl<T, U, const LANES: usize> ShlAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> ShlAssign<U> for Simd<T, N>
where
Self: Shl,
{
@@ -113,7 +113,7 @@ assign_ops! {
}
}
- impl<T, U, const LANES: usize> ShrAssign<U> for Simd<T, LANES>
+ impl<T, U, const N: usize> ShrAssign<U> for Simd<T, N>
where
Self: Shr,
{
diff --git a/library/portable-simd/crates/core_simd/src/ops/deref.rs b/library/portable-simd/crates/core_simd/src/ops/deref.rs
index 302bf148b..89a60ba11 100644
--- a/library/portable-simd/crates/core_simd/src/ops/deref.rs
+++ b/library/portable-simd/crates/core_simd/src/ops/deref.rs
@@ -5,16 +5,16 @@
use super::*;
macro_rules! deref_lhs {
- (impl<T, const LANES: usize> $trait:ident for $simd:ty {
+ (impl<T, const N: usize> $trait:ident for $simd:ty {
fn $call:ident
}) => {
- impl<T, const LANES: usize> $trait<$simd> for &$simd
+ impl<T, const N: usize> $trait<$simd> for &$simd
where
T: SimdElement,
$simd: $trait<$simd, Output = $simd>,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Output = Simd<T, LANES>;
+ type Output = Simd<T, N>;
#[inline]
#[must_use = "operator returns a new vector without mutating the inputs"]
@@ -26,16 +26,16 @@ macro_rules! deref_lhs {
}
macro_rules! deref_rhs {
- (impl<T, const LANES: usize> $trait:ident for $simd:ty {
+ (impl<T, const N: usize> $trait:ident for $simd:ty {
fn $call:ident
}) => {
- impl<T, const LANES: usize> $trait<&$simd> for $simd
+ impl<T, const N: usize> $trait<&$simd> for $simd
where
T: SimdElement,
$simd: $trait<$simd, Output = $simd>,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Output = Simd<T, LANES>;
+ type Output = Simd<T, N>;
#[inline]
#[must_use = "operator returns a new vector without mutating the inputs"]
@@ -47,25 +47,25 @@ macro_rules! deref_rhs {
}
macro_rules! deref_ops {
- ($(impl<T, const LANES: usize> $trait:ident for $simd:ty {
+ ($(impl<T, const N: usize> $trait:ident for $simd:ty {
fn $call:ident
})*) => {
$(
deref_rhs! {
- impl<T, const LANES: usize> $trait for $simd {
+ impl<T, const N: usize> $trait for $simd {
fn $call
}
}
deref_lhs! {
- impl<T, const LANES: usize> $trait for $simd {
+ impl<T, const N: usize> $trait for $simd {
fn $call
}
}
- impl<'lhs, 'rhs, T, const LANES: usize> $trait<&'rhs $simd> for &'lhs $simd
+ impl<'lhs, 'rhs, T, const N: usize> $trait<&'rhs $simd> for &'lhs $simd
where
T: SimdElement,
$simd: $trait<$simd, Output = $simd>,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = $simd;
@@ -81,44 +81,44 @@ macro_rules! deref_ops {
deref_ops! {
// Arithmetic
- impl<T, const LANES: usize> Add for Simd<T, LANES> {
+ impl<T, const N: usize> Add for Simd<T, N> {
fn add
}
- impl<T, const LANES: usize> Mul for Simd<T, LANES> {
+ impl<T, const N: usize> Mul for Simd<T, N> {
fn mul
}
- impl<T, const LANES: usize> Sub for Simd<T, LANES> {
+ impl<T, const N: usize> Sub for Simd<T, N> {
fn sub
}
- impl<T, const LANES: usize> Div for Simd<T, LANES> {
+ impl<T, const N: usize> Div for Simd<T, N> {
fn div
}
- impl<T, const LANES: usize> Rem for Simd<T, LANES> {
+ impl<T, const N: usize> Rem for Simd<T, N> {
fn rem
}
// Bitops
- impl<T, const LANES: usize> BitAnd for Simd<T, LANES> {
+ impl<T, const N: usize> BitAnd for Simd<T, N> {
fn bitand
}
- impl<T, const LANES: usize> BitOr for Simd<T, LANES> {
+ impl<T, const N: usize> BitOr for Simd<T, N> {
fn bitor
}
- impl<T, const LANES: usize> BitXor for Simd<T, LANES> {
+ impl<T, const N: usize> BitXor for Simd<T, N> {
fn bitxor
}
- impl<T, const LANES: usize> Shl for Simd<T, LANES> {
+ impl<T, const N: usize> Shl for Simd<T, N> {
fn shl
}
- impl<T, const LANES: usize> Shr for Simd<T, LANES> {
+ impl<T, const N: usize> Shr for Simd<T, N> {
fn shr
}
}
diff --git a/library/portable-simd/crates/core_simd/src/ops/shift_scalar.rs b/library/portable-simd/crates/core_simd/src/ops/shift_scalar.rs
new file mode 100644
index 000000000..f5115a5a5
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/ops/shift_scalar.rs
@@ -0,0 +1,62 @@
+// Shift operations uniquely typically only have a scalar on the right-hand side.
+// Here, we implement shifts for scalar RHS arguments.
+
+use crate::simd::{LaneCount, Simd, SupportedLaneCount};
+
+macro_rules! impl_splatted_shifts {
+ { impl $trait:ident :: $trait_fn:ident for $ty:ty } => {
+ impl<const N: usize> core::ops::$trait<$ty> for Simd<$ty, N>
+ where
+ LaneCount<N>: SupportedLaneCount,
+ {
+ type Output = Self;
+ #[inline]
+ fn $trait_fn(self, rhs: $ty) -> Self::Output {
+ self.$trait_fn(Simd::splat(rhs))
+ }
+ }
+
+ impl<const N: usize> core::ops::$trait<&$ty> for Simd<$ty, N>
+ where
+ LaneCount<N>: SupportedLaneCount,
+ {
+ type Output = Self;
+ #[inline]
+ fn $trait_fn(self, rhs: &$ty) -> Self::Output {
+ self.$trait_fn(Simd::splat(*rhs))
+ }
+ }
+
+ impl<'lhs, const N: usize> core::ops::$trait<$ty> for &'lhs Simd<$ty, N>
+ where
+ LaneCount<N>: SupportedLaneCount,
+ {
+ type Output = Simd<$ty, N>;
+ #[inline]
+ fn $trait_fn(self, rhs: $ty) -> Self::Output {
+ self.$trait_fn(Simd::splat(rhs))
+ }
+ }
+
+ impl<'lhs, const N: usize> core::ops::$trait<&$ty> for &'lhs Simd<$ty, N>
+ where
+ LaneCount<N>: SupportedLaneCount,
+ {
+ type Output = Simd<$ty, N>;
+ #[inline]
+ fn $trait_fn(self, rhs: &$ty) -> Self::Output {
+ self.$trait_fn(Simd::splat(*rhs))
+ }
+ }
+ };
+ { $($ty:ty),* } => {
+ $(
+ impl_splatted_shifts! { impl Shl::shl for $ty }
+ impl_splatted_shifts! { impl Shr::shr for $ty }
+ )*
+ }
+}
+
+// In the past there were inference issues when generically splatting arguments.
+// Enumerate them instead.
+impl_splatted_shifts! { i8, i16, i32, i64, isize, u8, u16, u32, u64, usize }
diff --git a/library/portable-simd/crates/core_simd/src/ops/unary.rs b/library/portable-simd/crates/core_simd/src/ops/unary.rs
index 4ad022150..a651aa73e 100644
--- a/library/portable-simd/crates/core_simd/src/ops/unary.rs
+++ b/library/portable-simd/crates/core_simd/src/ops/unary.rs
@@ -3,11 +3,11 @@ use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
use core::ops::{Neg, Not}; // unary ops
macro_rules! neg {
- ($(impl<const LANES: usize> Neg for Simd<$scalar:ty, LANES>)*) => {
- $(impl<const LANES: usize> Neg for Simd<$scalar, LANES>
+ ($(impl<const N: usize> Neg for Simd<$scalar:ty, N>)*) => {
+ $(impl<const N: usize> Neg for Simd<$scalar, N>
where
$scalar: SimdElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
@@ -22,27 +22,27 @@ macro_rules! neg {
}
neg! {
- impl<const LANES: usize> Neg for Simd<f32, LANES>
+ impl<const N: usize> Neg for Simd<f32, N>
- impl<const LANES: usize> Neg for Simd<f64, LANES>
+ impl<const N: usize> Neg for Simd<f64, N>
- impl<const LANES: usize> Neg for Simd<i8, LANES>
+ impl<const N: usize> Neg for Simd<i8, N>
- impl<const LANES: usize> Neg for Simd<i16, LANES>
+ impl<const N: usize> Neg for Simd<i16, N>
- impl<const LANES: usize> Neg for Simd<i32, LANES>
+ impl<const N: usize> Neg for Simd<i32, N>
- impl<const LANES: usize> Neg for Simd<i64, LANES>
+ impl<const N: usize> Neg for Simd<i64, N>
- impl<const LANES: usize> Neg for Simd<isize, LANES>
+ impl<const N: usize> Neg for Simd<isize, N>
}
macro_rules! not {
- ($(impl<const LANES: usize> Not for Simd<$scalar:ty, LANES>)*) => {
- $(impl<const LANES: usize> Not for Simd<$scalar, LANES>
+ ($(impl<const N: usize> Not for Simd<$scalar:ty, N>)*) => {
+ $(impl<const N: usize> Not for Simd<$scalar, N>
where
$scalar: SimdElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
@@ -56,23 +56,23 @@ macro_rules! not {
}
not! {
- impl<const LANES: usize> Not for Simd<i8, LANES>
+ impl<const N: usize> Not for Simd<i8, N>
- impl<const LANES: usize> Not for Simd<i16, LANES>
+ impl<const N: usize> Not for Simd<i16, N>
- impl<const LANES: usize> Not for Simd<i32, LANES>
+ impl<const N: usize> Not for Simd<i32, N>
- impl<const LANES: usize> Not for Simd<i64, LANES>
+ impl<const N: usize> Not for Simd<i64, N>
- impl<const LANES: usize> Not for Simd<isize, LANES>
+ impl<const N: usize> Not for Simd<isize, N>
- impl<const LANES: usize> Not for Simd<u8, LANES>
+ impl<const N: usize> Not for Simd<u8, N>
- impl<const LANES: usize> Not for Simd<u16, LANES>
+ impl<const N: usize> Not for Simd<u16, N>
- impl<const LANES: usize> Not for Simd<u32, LANES>
+ impl<const N: usize> Not for Simd<u32, N>
- impl<const LANES: usize> Not for Simd<u64, LANES>
+ impl<const N: usize> Not for Simd<u64, N>
- impl<const LANES: usize> Not for Simd<usize, LANES>
+ impl<const N: usize> Not for Simd<usize, N>
}
diff --git a/library/portable-simd/crates/core_simd/src/select.rs b/library/portable-simd/crates/core_simd/src/select.rs
index 065c5987d..cdcf8eeec 100644
--- a/library/portable-simd/crates/core_simd/src/select.rs
+++ b/library/portable-simd/crates/core_simd/src/select.rs
@@ -1,15 +1,15 @@
use crate::simd::intrinsics;
use crate::simd::{LaneCount, Mask, MaskElement, Simd, SimdElement, SupportedLaneCount};
-impl<T, const LANES: usize> Mask<T, LANES>
+impl<T, const N: usize> Mask<T, N>
where
T: MaskElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- /// Choose lanes from two vectors.
+ /// Choose elements from two vectors.
///
- /// For each lane in the mask, choose the corresponding lane from `true_values` if
- /// that lane mask is true, and `false_values` if that lane mask is false.
+ /// For each element in the mask, choose the corresponding element from `true_values` if
+ /// that element mask is true, and `false_values` if that element mask is false.
///
/// # Examples
/// ```
@@ -23,11 +23,7 @@ where
/// ```
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
- pub fn select<U>(
- self,
- true_values: Simd<U, LANES>,
- false_values: Simd<U, LANES>,
- ) -> Simd<U, LANES>
+ pub fn select<U>(self, true_values: Simd<U, N>, false_values: Simd<U, N>) -> Simd<U, N>
where
U: SimdElement<Mask = T>,
{
@@ -36,10 +32,10 @@ where
unsafe { intrinsics::simd_select(self.to_int(), true_values, false_values) }
}
- /// Choose lanes from two masks.
+ /// Choose elements from two masks.
///
- /// For each lane in the mask, choose the corresponding lane from `true_values` if
- /// that lane mask is true, and `false_values` if that lane mask is false.
+ /// For each element in the mask, choose the corresponding element from `true_values` if
+ /// that element mask is true, and `false_values` if that element mask is false.
///
/// # Examples
/// ```
diff --git a/library/portable-simd/crates/core_simd/src/simd/cmp.rs b/library/portable-simd/crates/core_simd/src/simd/cmp.rs
new file mode 100644
index 000000000..a8d81dbf2
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/simd/cmp.rs
@@ -0,0 +1,7 @@
+//! Traits for comparing and ordering vectors.
+
+mod eq;
+mod ord;
+
+pub use eq::*;
+pub use ord::*;
diff --git a/library/portable-simd/crates/core_simd/src/eq.rs b/library/portable-simd/crates/core_simd/src/simd/cmp/eq.rs
index 80763c072..f132fa2cc 100644
--- a/library/portable-simd/crates/core_simd/src/eq.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/cmp/eq.rs
@@ -1,5 +1,7 @@
use crate::simd::{
- intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdElement, SimdMutPtr, SupportedLaneCount,
+ intrinsics,
+ ptr::{SimdConstPtr, SimdMutPtr},
+ LaneCount, Mask, Simd, SimdElement, SupportedLaneCount,
};
/// Parallel `PartialEq`.
@@ -7,11 +9,11 @@ pub trait SimdPartialEq {
/// The mask type returned by each comparison.
type Mask;
- /// Test if each lane is equal to the corresponding lane in `other`.
+ /// Test if each element is equal to the corresponding element in `other`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn simd_eq(self, other: Self) -> Self::Mask;
- /// Test if each lane is equal to the corresponding lane in `other`.
+ /// Test if each element is equal to the corresponding element in `other`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn simd_ne(self, other: Self) -> Self::Mask;
}
@@ -19,11 +21,11 @@ pub trait SimdPartialEq {
macro_rules! impl_number {
{ $($number:ty),* } => {
$(
- impl<const LANES: usize> SimdPartialEq for Simd<$number, LANES>
+ impl<const N: usize> SimdPartialEq for Simd<$number, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Mask = Mask<<$number as SimdElement>::Mask, LANES>;
+ type Mask = Mask<<$number as SimdElement>::Mask, N>;
#[inline]
fn simd_eq(self, other: Self) -> Self::Mask {
@@ -48,9 +50,9 @@ impl_number! { f32, f64, u8, u16, u32, u64, usize, i8, i16, i32, i64, isize }
macro_rules! impl_mask {
{ $($integer:ty),* } => {
$(
- impl<const LANES: usize> SimdPartialEq for Mask<$integer, LANES>
+ impl<const N: usize> SimdPartialEq for Mask<$integer, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Mask = Self;
@@ -74,11 +76,11 @@ macro_rules! impl_mask {
impl_mask! { i8, i16, i32, i64, isize }
-impl<T, const LANES: usize> SimdPartialEq for Simd<*const T, LANES>
+impl<T, const N: usize> SimdPartialEq for Simd<*const T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Mask = Mask<isize, LANES>;
+ type Mask = Mask<isize, N>;
#[inline]
fn simd_eq(self, other: Self) -> Self::Mask {
@@ -91,11 +93,11 @@ where
}
}
-impl<T, const LANES: usize> SimdPartialEq for Simd<*mut T, LANES>
+impl<T, const N: usize> SimdPartialEq for Simd<*mut T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Mask = Mask<isize, LANES>;
+ type Mask = Mask<isize, N>;
#[inline]
fn simd_eq(self, other: Self) -> Self::Mask {
diff --git a/library/portable-simd/crates/core_simd/src/ord.rs b/library/portable-simd/crates/core_simd/src/simd/cmp/ord.rs
index b2455190e..4e9d49ea2 100644
--- a/library/portable-simd/crates/core_simd/src/ord.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/cmp/ord.rs
@@ -1,44 +1,47 @@
use crate::simd::{
- intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdMutPtr, SimdPartialEq, SupportedLaneCount,
+ cmp::SimdPartialEq,
+ intrinsics,
+ ptr::{SimdConstPtr, SimdMutPtr},
+ LaneCount, Mask, Simd, SupportedLaneCount,
};
/// Parallel `PartialOrd`.
pub trait SimdPartialOrd: SimdPartialEq {
- /// Test if each lane is less than the corresponding lane in `other`.
+ /// Test if each element is less than the corresponding element in `other`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn simd_lt(self, other: Self) -> Self::Mask;
- /// Test if each lane is less than or equal to the corresponding lane in `other`.
+ /// Test if each element is less than or equal to the corresponding element in `other`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn simd_le(self, other: Self) -> Self::Mask;
- /// Test if each lane is greater than the corresponding lane in `other`.
+ /// Test if each element is greater than the corresponding element in `other`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn simd_gt(self, other: Self) -> Self::Mask;
- /// Test if each lane is greater than or equal to the corresponding lane in `other`.
+ /// Test if each element is greater than or equal to the corresponding element in `other`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn simd_ge(self, other: Self) -> Self::Mask;
}
/// Parallel `Ord`.
pub trait SimdOrd: SimdPartialOrd {
- /// Returns the lane-wise maximum with `other`.
+ /// Returns the element-wise maximum with `other`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_max(self, other: Self) -> Self;
- /// Returns the lane-wise minimum with `other`.
+ /// Returns the element-wise minimum with `other`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_min(self, other: Self) -> Self;
- /// Restrict each lane to a certain interval.
+ /// Restrict each element to a certain interval.
///
- /// For each lane, returns `max` if `self` is greater than `max`, and `min` if `self` is
+ /// For each element, returns `max` if `self` is greater than `max`, and `min` if `self` is
/// less than `min`. Otherwise returns `self`.
///
/// # Panics
///
- /// Panics if `min > max` on any lane.
+ /// Panics if `min > max` on any element.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_clamp(self, min: Self, max: Self) -> Self;
}
@@ -46,9 +49,9 @@ pub trait SimdOrd: SimdPartialOrd {
macro_rules! impl_integer {
{ $($integer:ty),* } => {
$(
- impl<const LANES: usize> SimdPartialOrd for Simd<$integer, LANES>
+ impl<const N: usize> SimdPartialOrd for Simd<$integer, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_lt(self, other: Self) -> Self::Mask {
@@ -79,9 +82,9 @@ macro_rules! impl_integer {
}
}
- impl<const LANES: usize> SimdOrd for Simd<$integer, LANES>
+ impl<const N: usize> SimdOrd for Simd<$integer, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_max(self, other: Self) -> Self {
@@ -98,7 +101,7 @@ macro_rules! impl_integer {
fn simd_clamp(self, min: Self, max: Self) -> Self {
assert!(
min.simd_le(max).all(),
- "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ "each element in `min` must be less than or equal to the corresponding element in `max`",
);
self.simd_max(min).simd_min(max)
}
@@ -112,9 +115,9 @@ impl_integer! { u8, u16, u32, u64, usize, i8, i16, i32, i64, isize }
macro_rules! impl_float {
{ $($float:ty),* } => {
$(
- impl<const LANES: usize> SimdPartialOrd for Simd<$float, LANES>
+ impl<const N: usize> SimdPartialOrd for Simd<$float, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_lt(self, other: Self) -> Self::Mask {
@@ -153,9 +156,9 @@ impl_float! { f32, f64 }
macro_rules! impl_mask {
{ $($integer:ty),* } => {
$(
- impl<const LANES: usize> SimdPartialOrd for Mask<$integer, LANES>
+ impl<const N: usize> SimdPartialOrd for Mask<$integer, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_lt(self, other: Self) -> Self::Mask {
@@ -186,9 +189,9 @@ macro_rules! impl_mask {
}
}
- impl<const LANES: usize> SimdOrd for Mask<$integer, LANES>
+ impl<const N: usize> SimdOrd for Mask<$integer, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_max(self, other: Self) -> Self {
@@ -205,7 +208,7 @@ macro_rules! impl_mask {
fn simd_clamp(self, min: Self, max: Self) -> Self {
assert!(
min.simd_le(max).all(),
- "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ "each element in `min` must be less than or equal to the corresponding element in `max`",
);
self.simd_max(min).simd_min(max)
}
@@ -216,9 +219,9 @@ macro_rules! impl_mask {
impl_mask! { i8, i16, i32, i64, isize }
-impl<T, const LANES: usize> SimdPartialOrd for Simd<*const T, LANES>
+impl<T, const N: usize> SimdPartialOrd for Simd<*const T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_lt(self, other: Self) -> Self::Mask {
@@ -241,9 +244,9 @@ where
}
}
-impl<T, const LANES: usize> SimdOrd for Simd<*const T, LANES>
+impl<T, const N: usize> SimdOrd for Simd<*const T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_max(self, other: Self) -> Self {
@@ -260,15 +263,15 @@ where
fn simd_clamp(self, min: Self, max: Self) -> Self {
assert!(
min.simd_le(max).all(),
- "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ "each element in `min` must be less than or equal to the corresponding element in `max`",
);
self.simd_max(min).simd_min(max)
}
}
-impl<T, const LANES: usize> SimdPartialOrd for Simd<*mut T, LANES>
+impl<T, const N: usize> SimdPartialOrd for Simd<*mut T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_lt(self, other: Self) -> Self::Mask {
@@ -291,9 +294,9 @@ where
}
}
-impl<T, const LANES: usize> SimdOrd for Simd<*mut T, LANES>
+impl<T, const N: usize> SimdOrd for Simd<*mut T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn simd_max(self, other: Self) -> Self {
@@ -310,7 +313,7 @@ where
fn simd_clamp(self, min: Self, max: Self) -> Self {
assert!(
min.simd_le(max).all(),
- "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ "each element in `min` must be less than or equal to the corresponding element in `max`",
);
self.simd_max(min).simd_min(max)
}
diff --git a/library/portable-simd/crates/core_simd/src/elements.rs b/library/portable-simd/crates/core_simd/src/simd/num.rs
index dc7f52a4d..22a4802ec 100644
--- a/library/portable-simd/crates/core_simd/src/elements.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/num.rs
@@ -1,15 +1,13 @@
-mod const_ptr;
+//! Traits for vectors with numeric elements.
+
mod float;
mod int;
-mod mut_ptr;
mod uint;
mod sealed {
pub trait Sealed {}
}
-pub use const_ptr::*;
pub use float::*;
pub use int::*;
-pub use mut_ptr::*;
pub use uint::*;
diff --git a/library/portable-simd/crates/core_simd/src/elements/float.rs b/library/portable-simd/crates/core_simd/src/simd/num/float.rs
index 501c1c5dd..fc0b99e87 100644
--- a/library/portable-simd/crates/core_simd/src/elements/float.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/num/float.rs
@@ -1,7 +1,7 @@
use super::sealed::Sealed;
use crate::simd::{
- intrinsics, LaneCount, Mask, Simd, SimdCast, SimdElement, SimdPartialEq, SimdPartialOrd,
- SupportedLaneCount,
+ cmp::{SimdPartialEq, SimdPartialOrd},
+ intrinsics, LaneCount, Mask, Simd, SimdCast, SimdElement, SupportedLaneCount,
};
/// Operations on SIMD vectors of floats.
@@ -28,7 +28,7 @@ pub trait SimdFloat: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{SimdFloat, SimdInt, Simd};
+ /// # use simd::prelude::*;
/// let floats: Simd<f32, 4> = Simd::from_array([1.9, -4.5, f32::INFINITY, f32::NAN]);
/// let ints = floats.cast::<i32>();
/// assert_eq!(ints, Simd::from_array([1, -4, i32::MAX, 0]));
@@ -63,64 +63,64 @@ pub trait SimdFloat: Copy + Sealed {
Self::Scalar: core::convert::FloatToInt<I>;
/// Raw transmutation to an unsigned integer vector type with the
- /// same size and number of lanes.
+ /// same size and number of elements.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn to_bits(self) -> Self::Bits;
/// Raw transmutation from an unsigned integer vector type with the
- /// same size and number of lanes.
+ /// same size and number of elements.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn from_bits(bits: Self::Bits) -> Self;
- /// Produces a vector where every lane has the absolute value of the
- /// equivalently-indexed lane in `self`.
+ /// Produces a vector where every element has the absolute value of the
+ /// equivalently-indexed element in `self`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn abs(self) -> Self;
- /// Takes the reciprocal (inverse) of each lane, `1/x`.
+ /// Takes the reciprocal (inverse) of each element, `1/x`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn recip(self) -> Self;
- /// Converts each lane from radians to degrees.
+ /// Converts each element from radians to degrees.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn to_degrees(self) -> Self;
- /// Converts each lane from degrees to radians.
+ /// Converts each element from degrees to radians.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn to_radians(self) -> Self;
- /// Returns true for each lane if it has a positive sign, including
+ /// Returns true for each element if it has a positive sign, including
/// `+0.0`, `NaN`s with positive sign bit and positive infinity.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_sign_positive(self) -> Self::Mask;
- /// Returns true for each lane if it has a negative sign, including
+ /// Returns true for each element if it has a negative sign, including
/// `-0.0`, `NaN`s with negative sign bit and negative infinity.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_sign_negative(self) -> Self::Mask;
- /// Returns true for each lane if its value is `NaN`.
+ /// Returns true for each element if its value is `NaN`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_nan(self) -> Self::Mask;
- /// Returns true for each lane if its value is positive infinity or negative infinity.
+ /// Returns true for each element if its value is positive infinity or negative infinity.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_infinite(self) -> Self::Mask;
- /// Returns true for each lane if its value is neither infinite nor `NaN`.
+ /// Returns true for each element if its value is neither infinite nor `NaN`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_finite(self) -> Self::Mask;
- /// Returns true for each lane if its value is subnormal.
+ /// Returns true for each element if its value is subnormal.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_subnormal(self) -> Self::Mask;
- /// Returns true for each lane if its value is neither zero, infinite,
+ /// Returns true for each element if its value is neither zero, infinite,
/// subnormal, nor `NaN`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_normal(self) -> Self::Mask;
- /// Replaces each lane with a number that represents its sign.
+ /// Replaces each element with a number that represents its sign.
///
/// * `1.0` if the number is positive, `+0.0`, or `INFINITY`
/// * `-1.0` if the number is negative, `-0.0`, or `NEG_INFINITY`
@@ -128,33 +128,33 @@ pub trait SimdFloat: Copy + Sealed {
#[must_use = "method returns a new vector and does not mutate the original value"]
fn signum(self) -> Self;
- /// Returns each lane with the magnitude of `self` and the sign of `sign`.
+ /// Returns each element with the magnitude of `self` and the sign of `sign`.
///
- /// For any lane containing a `NAN`, a `NAN` with the sign of `sign` is returned.
+ /// For any element containing a `NAN`, a `NAN` with the sign of `sign` is returned.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn copysign(self, sign: Self) -> Self;
- /// Returns the minimum of each lane.
+ /// Returns the minimum of each element.
///
/// If one of the values is `NAN`, then the other value is returned.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_min(self, other: Self) -> Self;
- /// Returns the maximum of each lane.
+ /// Returns the maximum of each element.
///
/// If one of the values is `NAN`, then the other value is returned.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_max(self, other: Self) -> Self;
- /// Restrict each lane to a certain interval unless it is NaN.
+ /// Restrict each element to a certain interval unless it is NaN.
///
- /// For each lane in `self`, returns the corresponding lane in `max` if the lane is
- /// greater than `max`, and the corresponding lane in `min` if the lane is less
- /// than `min`. Otherwise returns the lane in `self`.
+ /// For each element in `self`, returns the corresponding element in `max` if the element is
+ /// greater than `max`, and the corresponding element in `min` if the element is less
+ /// than `min`. Otherwise returns the element in `self`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_clamp(self, min: Self, max: Self) -> Self;
- /// Returns the sum of the lanes of the vector.
+ /// Returns the sum of the elements of the vector.
///
/// # Examples
///
@@ -162,13 +162,13 @@ pub trait SimdFloat: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{f32x2, SimdFloat};
+ /// # use simd::prelude::*;
/// let v = f32x2::from_array([1., 2.]);
/// assert_eq!(v.reduce_sum(), 3.);
/// ```
fn reduce_sum(self) -> Self::Scalar;
- /// Reducing multiply. Returns the product of the lanes of the vector.
+ /// Reducing multiply. Returns the product of the elements of the vector.
///
/// # Examples
///
@@ -176,18 +176,18 @@ pub trait SimdFloat: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{f32x2, SimdFloat};
+ /// # use simd::prelude::*;
/// let v = f32x2::from_array([3., 4.]);
/// assert_eq!(v.reduce_product(), 12.);
/// ```
fn reduce_product(self) -> Self::Scalar;
- /// Returns the maximum lane in the vector.
+ /// Returns the maximum element in the vector.
///
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
/// return either.
///
- /// This function will not return `NaN` unless all lanes are `NaN`.
+ /// This function will not return `NaN` unless all elements are `NaN`.
///
/// # Examples
///
@@ -195,7 +195,7 @@ pub trait SimdFloat: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{f32x2, SimdFloat};
+ /// # use simd::prelude::*;
/// let v = f32x2::from_array([1., 2.]);
/// assert_eq!(v.reduce_max(), 2.);
///
@@ -209,12 +209,12 @@ pub trait SimdFloat: Copy + Sealed {
/// ```
fn reduce_max(self) -> Self::Scalar;
- /// Returns the minimum lane in the vector.
+ /// Returns the minimum element in the vector.
///
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
/// return either.
///
- /// This function will not return `NaN` unless all lanes are `NaN`.
+ /// This function will not return `NaN` unless all elements are `NaN`.
///
/// # Examples
///
@@ -222,7 +222,7 @@ pub trait SimdFloat: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{f32x2, SimdFloat};
+ /// # use simd::prelude::*;
/// let v = f32x2::from_array([3., 7.]);
/// assert_eq!(v.reduce_min(), 3.);
///
@@ -240,20 +240,20 @@ pub trait SimdFloat: Copy + Sealed {
macro_rules! impl_trait {
{ $($ty:ty { bits: $bits_ty:ty, mask: $mask_ty:ty }),* } => {
$(
- impl<const LANES: usize> Sealed for Simd<$ty, LANES>
+ impl<const N: usize> Sealed for Simd<$ty, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
}
- impl<const LANES: usize> SimdFloat for Simd<$ty, LANES>
+ impl<const N: usize> SimdFloat for Simd<$ty, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Mask = Mask<<$mask_ty as SimdElement>::Mask, LANES>;
+ type Mask = Mask<<$mask_ty as SimdElement>::Mask, N>;
type Scalar = $ty;
- type Bits = Simd<$bits_ty, LANES>;
- type Cast<T: SimdElement> = Simd<T, LANES>;
+ type Bits = Simd<$bits_ty, N>;
+ type Cast<T: SimdElement> = Simd<T, N>;
#[inline]
fn cast<T: SimdCast>(self) -> Self::Cast<T>
@@ -273,14 +273,14 @@ macro_rules! impl_trait {
}
#[inline]
- fn to_bits(self) -> Simd<$bits_ty, LANES> {
+ fn to_bits(self) -> Simd<$bits_ty, N> {
assert_eq!(core::mem::size_of::<Self>(), core::mem::size_of::<Self::Bits>());
// Safety: transmuting between vector types is safe
unsafe { core::mem::transmute_copy(&self) }
}
#[inline]
- fn from_bits(bits: Simd<$bits_ty, LANES>) -> Self {
+ fn from_bits(bits: Simd<$bits_ty, N>) -> Self {
assert_eq!(core::mem::size_of::<Self>(), core::mem::size_of::<Self::Bits>());
// Safety: transmuting between vector types is safe
unsafe { core::mem::transmute_copy(&bits) }
@@ -336,7 +336,10 @@ macro_rules! impl_trait {
#[inline]
fn is_subnormal(self) -> Self::Mask {
- self.abs().simd_ne(Self::splat(0.0)) & (self.to_bits() & Self::splat(Self::Scalar::INFINITY).to_bits()).simd_eq(Simd::splat(0))
+ // On some architectures (e.g. armv7 and some ppc) subnormals are flushed to zero,
+ // so this comparison must be done with integers.
+ let not_zero = self.abs().to_bits().simd_ne(Self::splat(0.0).to_bits());
+ not_zero & (self.to_bits() & Self::splat(Self::Scalar::INFINITY).to_bits()).simd_eq(Simd::splat(0))
}
#[inline]
@@ -373,7 +376,7 @@ macro_rules! impl_trait {
fn simd_clamp(self, min: Self, max: Self) -> Self {
assert!(
min.simd_le(max).all(),
- "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ "each element in `min` must be less than or equal to the corresponding element in `max`",
);
let mut x = self;
x = x.simd_lt(min).select(min, x);
diff --git a/library/portable-simd/crates/core_simd/src/elements/int.rs b/library/portable-simd/crates/core_simd/src/simd/num/int.rs
index 6db89ff9a..1f1aa2727 100644
--- a/library/portable-simd/crates/core_simd/src/elements/int.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/num/int.rs
@@ -1,6 +1,7 @@
use super::sealed::Sealed;
use crate::simd::{
- intrinsics, LaneCount, Mask, Simd, SimdCast, SimdElement, SimdPartialOrd, SupportedLaneCount,
+ cmp::SimdPartialOrd, intrinsics, num::SimdUint, LaneCount, Mask, Simd, SimdCast, SimdElement,
+ SupportedLaneCount,
};
/// Operations on SIMD vectors of signed integers.
@@ -11,6 +12,9 @@ pub trait SimdInt: Copy + Sealed {
/// Scalar type contained by this SIMD vector type.
type Scalar;
+ /// A SIMD vector of unsigned integers with the same element size.
+ type Unsigned;
+
/// A SIMD vector with a different element type.
type Cast<T: SimdElement>;
@@ -28,7 +32,7 @@ pub trait SimdInt: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdInt};
+ /// # use simd::prelude::*;
/// use core::i32::{MIN, MAX};
/// let x = Simd::from_array([MIN, 0, 1, MAX]);
/// let max = Simd::splat(MAX);
@@ -46,7 +50,7 @@ pub trait SimdInt: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdInt};
+ /// # use simd::prelude::*;
/// use core::i32::{MIN, MAX};
/// let x = Simd::from_array([MIN, -2, -1, MAX]);
/// let max = Simd::splat(MAX);
@@ -57,14 +61,14 @@ pub trait SimdInt: Copy + Sealed {
fn saturating_sub(self, second: Self) -> Self;
/// Lanewise absolute value, implemented in Rust.
- /// Every lane becomes its absolute value.
+ /// Every element becomes its absolute value.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdInt};
+ /// # use simd::prelude::*;
/// use core::i32::{MIN, MAX};
/// let xs = Simd::from_array([MIN, MIN +1, -5, 0]);
/// assert_eq!(xs.abs(), Simd::from_array([MIN, MAX, 5, 0]));
@@ -79,7 +83,7 @@ pub trait SimdInt: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdInt};
+ /// # use simd::prelude::*;
/// use core::i32::{MIN, MAX};
/// let xs = Simd::from_array([MIN, -2, 0, 3]);
/// let unsat = xs.abs();
@@ -97,7 +101,7 @@ pub trait SimdInt: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdInt};
+ /// # use simd::prelude::*;
/// use core::i32::{MIN, MAX};
/// let x = Simd::from_array([MIN, -2, 3, MAX]);
/// let unsat = -x;
@@ -107,19 +111,19 @@ pub trait SimdInt: Copy + Sealed {
/// ```
fn saturating_neg(self) -> Self;
- /// Returns true for each positive lane and false if it is zero or negative.
+ /// Returns true for each positive element and false if it is zero or negative.
fn is_positive(self) -> Self::Mask;
- /// Returns true for each negative lane and false if it is zero or positive.
+ /// Returns true for each negative element and false if it is zero or positive.
fn is_negative(self) -> Self::Mask;
- /// Returns numbers representing the sign of each lane.
+ /// Returns numbers representing the sign of each element.
/// * `0` if the number is zero
/// * `1` if the number is positive
/// * `-1` if the number is negative
fn signum(self) -> Self;
- /// Returns the sum of the lanes of the vector, with wrapping addition.
+ /// Returns the sum of the elements of the vector, with wrapping addition.
///
/// # Examples
///
@@ -127,7 +131,7 @@ pub trait SimdInt: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{i32x4, SimdInt};
+ /// # use simd::prelude::*;
/// let v = i32x4::from_array([1, 2, 3, 4]);
/// assert_eq!(v.reduce_sum(), 10);
///
@@ -137,7 +141,7 @@ pub trait SimdInt: Copy + Sealed {
/// ```
fn reduce_sum(self) -> Self::Scalar;
- /// Returns the product of the lanes of the vector, with wrapping multiplication.
+ /// Returns the product of the elements of the vector, with wrapping multiplication.
///
/// # Examples
///
@@ -145,7 +149,7 @@ pub trait SimdInt: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{i32x4, SimdInt};
+ /// # use simd::prelude::*;
/// let v = i32x4::from_array([1, 2, 3, 4]);
/// assert_eq!(v.reduce_product(), 24);
///
@@ -155,7 +159,7 @@ pub trait SimdInt: Copy + Sealed {
/// ```
fn reduce_product(self) -> Self::Scalar;
- /// Returns the maximum lane in the vector.
+ /// Returns the maximum element in the vector.
///
/// # Examples
///
@@ -163,13 +167,13 @@ pub trait SimdInt: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{i32x4, SimdInt};
+ /// # use simd::prelude::*;
/// let v = i32x4::from_array([1, 2, 3, 4]);
/// assert_eq!(v.reduce_max(), 4);
/// ```
fn reduce_max(self) -> Self::Scalar;
- /// Returns the minimum lane in the vector.
+ /// Returns the minimum element in the vector.
///
/// # Examples
///
@@ -177,38 +181,58 @@ pub trait SimdInt: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{i32x4, SimdInt};
+ /// # use simd::prelude::*;
/// let v = i32x4::from_array([1, 2, 3, 4]);
/// assert_eq!(v.reduce_min(), 1);
/// ```
fn reduce_min(self) -> Self::Scalar;
- /// Returns the cumulative bitwise "and" across the lanes of the vector.
+ /// Returns the cumulative bitwise "and" across the elements of the vector.
fn reduce_and(self) -> Self::Scalar;
- /// Returns the cumulative bitwise "or" across the lanes of the vector.
+ /// Returns the cumulative bitwise "or" across the elements of the vector.
fn reduce_or(self) -> Self::Scalar;
- /// Returns the cumulative bitwise "xor" across the lanes of the vector.
+ /// Returns the cumulative bitwise "xor" across the elements of the vector.
fn reduce_xor(self) -> Self::Scalar;
+
+ /// Reverses the byte order of each element.
+ fn swap_bytes(self) -> Self;
+
+ /// Reverses the order of bits in each elemnent.
+ /// The least significant bit becomes the most significant bit, second least-significant bit becomes second most-significant bit, etc.
+ fn reverse_bits(self) -> Self;
+
+ /// Returns the number of leading zeros in the binary representation of each element.
+ fn leading_zeros(self) -> Self::Unsigned;
+
+ /// Returns the number of trailing zeros in the binary representation of each element.
+ fn trailing_zeros(self) -> Self::Unsigned;
+
+ /// Returns the number of leading ones in the binary representation of each element.
+ fn leading_ones(self) -> Self::Unsigned;
+
+ /// Returns the number of trailing ones in the binary representation of each element.
+ fn trailing_ones(self) -> Self::Unsigned;
}
macro_rules! impl_trait {
- { $($ty:ty),* } => {
+ { $($ty:ident ($unsigned:ident)),* } => {
$(
- impl<const LANES: usize> Sealed for Simd<$ty, LANES>
+ impl<const N: usize> Sealed for Simd<$ty, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
}
- impl<const LANES: usize> SimdInt for Simd<$ty, LANES>
+ impl<const N: usize> SimdInt for Simd<$ty, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Mask = Mask<<$ty as SimdElement>::Mask, LANES>;
+ type Mask = Mask<<$ty as SimdElement>::Mask, N>;
type Scalar = $ty;
- type Cast<T: SimdElement> = Simd<T, LANES>;
+ type Unsigned = Simd<$unsigned, N>;
+ type Cast<T: SimdElement> = Simd<T, N>;
#[inline]
fn cast<T: SimdCast>(self) -> Self::Cast<T> {
@@ -307,9 +331,41 @@ macro_rules! impl_trait {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_xor(self) }
}
+
+ #[inline]
+ fn swap_bytes(self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_bswap(self) }
+ }
+
+ #[inline]
+ fn reverse_bits(self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_bitreverse(self) }
+ }
+
+ #[inline]
+ fn leading_zeros(self) -> Self::Unsigned {
+ self.cast::<$unsigned>().leading_zeros()
+ }
+
+ #[inline]
+ fn trailing_zeros(self) -> Self::Unsigned {
+ self.cast::<$unsigned>().trailing_zeros()
+ }
+
+ #[inline]
+ fn leading_ones(self) -> Self::Unsigned {
+ self.cast::<$unsigned>().leading_ones()
+ }
+
+ #[inline]
+ fn trailing_ones(self) -> Self::Unsigned {
+ self.cast::<$unsigned>().trailing_ones()
+ }
}
)*
}
}
-impl_trait! { i8, i16, i32, i64, isize }
+impl_trait! { i8 (u8), i16 (u16), i32 (u32), i64 (u64), isize (usize) }
diff --git a/library/portable-simd/crates/core_simd/src/elements/uint.rs b/library/portable-simd/crates/core_simd/src/simd/num/uint.rs
index 3926c395e..c955ee8fe 100644
--- a/library/portable-simd/crates/core_simd/src/elements/uint.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/num/uint.rs
@@ -16,6 +16,12 @@ pub trait SimdUint: Copy + Sealed {
#[must_use]
fn cast<T: SimdCast>(self) -> Self::Cast<T>;
+ /// Wrapping negation.
+ ///
+ /// Like [`u32::wrapping_neg`], all applications of this function will wrap, with the exception
+ /// of `-0`.
+ fn wrapping_neg(self) -> Self;
+
/// Lanewise saturating add.
///
/// # Examples
@@ -23,7 +29,7 @@ pub trait SimdUint: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdUint};
+ /// # use simd::prelude::*;
/// use core::u32::MAX;
/// let x = Simd::from_array([2, 1, 0, MAX]);
/// let max = Simd::splat(MAX);
@@ -41,7 +47,7 @@ pub trait SimdUint: Copy + Sealed {
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdUint};
+ /// # use simd::prelude::*;
/// use core::u32::MAX;
/// let x = Simd::from_array([2, 1, 0, MAX]);
/// let max = Simd::splat(MAX);
@@ -51,43 +57,62 @@ pub trait SimdUint: Copy + Sealed {
/// assert_eq!(sat, Simd::splat(0));
fn saturating_sub(self, second: Self) -> Self;
- /// Returns the sum of the lanes of the vector, with wrapping addition.
+ /// Returns the sum of the elements of the vector, with wrapping addition.
fn reduce_sum(self) -> Self::Scalar;
- /// Returns the product of the lanes of the vector, with wrapping multiplication.
+ /// Returns the product of the elements of the vector, with wrapping multiplication.
fn reduce_product(self) -> Self::Scalar;
- /// Returns the maximum lane in the vector.
+ /// Returns the maximum element in the vector.
fn reduce_max(self) -> Self::Scalar;
- /// Returns the minimum lane in the vector.
+ /// Returns the minimum element in the vector.
fn reduce_min(self) -> Self::Scalar;
- /// Returns the cumulative bitwise "and" across the lanes of the vector.
+ /// Returns the cumulative bitwise "and" across the elements of the vector.
fn reduce_and(self) -> Self::Scalar;
- /// Returns the cumulative bitwise "or" across the lanes of the vector.
+ /// Returns the cumulative bitwise "or" across the elements of the vector.
fn reduce_or(self) -> Self::Scalar;
- /// Returns the cumulative bitwise "xor" across the lanes of the vector.
+ /// Returns the cumulative bitwise "xor" across the elements of the vector.
fn reduce_xor(self) -> Self::Scalar;
+
+ /// Reverses the byte order of each element.
+ fn swap_bytes(self) -> Self;
+
+ /// Reverses the order of bits in each elemnent.
+ /// The least significant bit becomes the most significant bit, second least-significant bit becomes second most-significant bit, etc.
+ fn reverse_bits(self) -> Self;
+
+ /// Returns the number of leading zeros in the binary representation of each element.
+ fn leading_zeros(self) -> Self;
+
+ /// Returns the number of trailing zeros in the binary representation of each element.
+ fn trailing_zeros(self) -> Self;
+
+ /// Returns the number of leading ones in the binary representation of each element.
+ fn leading_ones(self) -> Self;
+
+ /// Returns the number of trailing ones in the binary representation of each element.
+ fn trailing_ones(self) -> Self;
}
macro_rules! impl_trait {
- { $($ty:ty),* } => {
+ { $($ty:ident ($signed:ident)),* } => {
$(
- impl<const LANES: usize> Sealed for Simd<$ty, LANES>
+ impl<const N: usize> Sealed for Simd<$ty, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
}
- impl<const LANES: usize> SimdUint for Simd<$ty, LANES>
+ impl<const N: usize> SimdUint for Simd<$ty, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
type Scalar = $ty;
- type Cast<T: SimdElement> = Simd<T, LANES>;
+ type Cast<T: SimdElement> = Simd<T, N>;
#[inline]
fn cast<T: SimdCast>(self) -> Self::Cast<T> {
@@ -96,6 +121,12 @@ macro_rules! impl_trait {
}
#[inline]
+ fn wrapping_neg(self) -> Self {
+ use crate::simd::num::SimdInt;
+ (-self.cast::<$signed>()).cast()
+ }
+
+ #[inline]
fn saturating_add(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { intrinsics::simd_saturating_add(self, second) }
@@ -148,9 +179,43 @@ macro_rules! impl_trait {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_xor(self) }
}
+
+ #[inline]
+ fn swap_bytes(self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_bswap(self) }
+ }
+
+ #[inline]
+ fn reverse_bits(self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_bitreverse(self) }
+ }
+
+ #[inline]
+ fn leading_zeros(self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_ctlz(self) }
+ }
+
+ #[inline]
+ fn trailing_zeros(self) -> Self {
+ // Safety: `self` is an integer vector
+ unsafe { intrinsics::simd_cttz(self) }
+ }
+
+ #[inline]
+ fn leading_ones(self) -> Self {
+ (!self).leading_zeros()
+ }
+
+ #[inline]
+ fn trailing_ones(self) -> Self {
+ (!self).trailing_zeros()
+ }
}
)*
}
}
-impl_trait! { u8, u16, u32, u64, usize }
+impl_trait! { u8 (i8), u16 (i16), u32 (i32), u64 (i64), usize (isize) }
diff --git a/library/portable-simd/crates/core_simd/src/simd/prelude.rs b/library/portable-simd/crates/core_simd/src/simd/prelude.rs
index e8fdc932d..4b7c744c0 100644
--- a/library/portable-simd/crates/core_simd/src/simd/prelude.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/prelude.rs
@@ -7,8 +7,10 @@
#[doc(no_inline)]
pub use super::{
- simd_swizzle, Mask, Simd, SimdConstPtr, SimdFloat, SimdInt, SimdMutPtr, SimdOrd, SimdPartialEq,
- SimdPartialOrd, SimdUint,
+ cmp::{SimdOrd, SimdPartialEq, SimdPartialOrd},
+ num::{SimdFloat, SimdInt, SimdUint},
+ ptr::{SimdConstPtr, SimdMutPtr},
+ simd_swizzle, Mask, Simd,
};
#[rustfmt::skip]
diff --git a/library/portable-simd/crates/core_simd/src/simd/ptr.rs b/library/portable-simd/crates/core_simd/src/simd/ptr.rs
new file mode 100644
index 000000000..3f8e66691
--- /dev/null
+++ b/library/portable-simd/crates/core_simd/src/simd/ptr.rs
@@ -0,0 +1,11 @@
+//! Traits for vectors of pointers.
+
+mod const_ptr;
+mod mut_ptr;
+
+mod sealed {
+ pub trait Sealed {}
+}
+
+pub use const_ptr::*;
+pub use mut_ptr::*;
diff --git a/library/portable-simd/crates/core_simd/src/elements/const_ptr.rs b/library/portable-simd/crates/core_simd/src/simd/ptr/const_ptr.rs
index f215f9a61..97fe3fb60 100644
--- a/library/portable-simd/crates/core_simd/src/elements/const_ptr.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/ptr/const_ptr.rs
@@ -1,15 +1,17 @@
use super::sealed::Sealed;
-use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SimdUint, SupportedLaneCount};
+use crate::simd::{
+ cmp::SimdPartialEq, intrinsics, num::SimdUint, LaneCount, Mask, Simd, SupportedLaneCount,
+};
/// Operations on SIMD vectors of constant pointers.
pub trait SimdConstPtr: Copy + Sealed {
- /// Vector of `usize` with the same number of lanes.
+ /// Vector of `usize` with the same number of elements.
type Usize;
- /// Vector of `isize` with the same number of lanes.
+ /// Vector of `isize` with the same number of elements.
type Isize;
- /// Vector of const pointers with the same number of lanes.
+ /// Vector of const pointers with the same number of elements.
type CastPtr<T>;
/// Vector of mutable pointers to the same type.
@@ -18,17 +20,17 @@ pub trait SimdConstPtr: Copy + Sealed {
/// Mask type used for manipulating this SIMD vector type.
type Mask;
- /// Returns `true` for each lane that is null.
+ /// Returns `true` for each element that is null.
fn is_null(self) -> Self::Mask;
/// Casts to a pointer of another type.
///
- /// Equivalent to calling [`pointer::cast`] on each lane.
+ /// Equivalent to calling [`pointer::cast`] on each element.
fn cast<T>(self) -> Self::CastPtr<T>;
/// Changes constness without changing the type.
///
- /// Equivalent to calling [`pointer::cast_mut`] on each lane.
+ /// Equivalent to calling [`pointer::cast_mut`] on each element.
fn cast_mut(self) -> Self::MutPtr;
/// Gets the "address" portion of the pointer.
@@ -39,7 +41,7 @@ pub trait SimdConstPtr: Copy + Sealed {
/// This method semantically discards *provenance* and
/// *address-space* information. To properly restore that information, use [`Self::with_addr`].
///
- /// Equivalent to calling [`pointer::addr`] on each lane.
+ /// Equivalent to calling [`pointer::addr`] on each element.
fn addr(self) -> Self::Usize;
/// Creates a new pointer with the given address.
@@ -47,7 +49,7 @@ pub trait SimdConstPtr: Copy + Sealed {
/// This performs the same operation as a cast, but copies the *address-space* and
/// *provenance* of `self` to the new pointer.
///
- /// Equivalent to calling [`pointer::with_addr`] on each lane.
+ /// Equivalent to calling [`pointer::with_addr`] on each element.
fn with_addr(self, addr: Self::Usize) -> Self;
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
@@ -56,39 +58,36 @@ pub trait SimdConstPtr: Copy + Sealed {
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
///
- /// Equivalent to calling [`core::ptr::from_exposed_addr`] on each lane.
+ /// Equivalent to calling [`core::ptr::from_exposed_addr`] on each element.
fn from_exposed_addr(addr: Self::Usize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
- /// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
+ /// Equivalent to calling [`pointer::wrapping_offset`] on each element.
fn wrapping_offset(self, offset: Self::Isize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
- /// Equivalent to calling [`pointer::wrapping_add`] on each lane.
+ /// Equivalent to calling [`pointer::wrapping_add`] on each element.
fn wrapping_add(self, count: Self::Usize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
- /// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
+ /// Equivalent to calling [`pointer::wrapping_sub`] on each element.
fn wrapping_sub(self, count: Self::Usize) -> Self;
}
-impl<T, const LANES: usize> Sealed for Simd<*const T, LANES> where
- LaneCount<LANES>: SupportedLaneCount
-{
-}
+impl<T, const N: usize> Sealed for Simd<*const T, N> where LaneCount<N>: SupportedLaneCount {}
-impl<T, const LANES: usize> SimdConstPtr for Simd<*const T, LANES>
+impl<T, const N: usize> SimdConstPtr for Simd<*const T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Usize = Simd<usize, LANES>;
- type Isize = Simd<isize, LANES>;
- type CastPtr<U> = Simd<*const U, LANES>;
- type MutPtr = Simd<*mut T, LANES>;
- type Mask = Mask<isize, LANES>;
+ type Usize = Simd<usize, N>;
+ type Isize = Simd<isize, N>;
+ type CastPtr<U> = Simd<*const U, N>;
+ type MutPtr = Simd<*mut T, N>;
+ type Mask = Mask<isize, N>;
#[inline]
fn is_null(self) -> Self::Mask {
diff --git a/library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs b/library/portable-simd/crates/core_simd/src/simd/ptr/mut_ptr.rs
index 4bdc6a14c..e35633d04 100644
--- a/library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs
+++ b/library/portable-simd/crates/core_simd/src/simd/ptr/mut_ptr.rs
@@ -1,15 +1,17 @@
use super::sealed::Sealed;
-use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SimdUint, SupportedLaneCount};
+use crate::simd::{
+ cmp::SimdPartialEq, intrinsics, num::SimdUint, LaneCount, Mask, Simd, SupportedLaneCount,
+};
/// Operations on SIMD vectors of mutable pointers.
pub trait SimdMutPtr: Copy + Sealed {
- /// Vector of `usize` with the same number of lanes.
+ /// Vector of `usize` with the same number of elements.
type Usize;
- /// Vector of `isize` with the same number of lanes.
+ /// Vector of `isize` with the same number of elements.
type Isize;
- /// Vector of const pointers with the same number of lanes.
+ /// Vector of const pointers with the same number of elements.
type CastPtr<T>;
/// Vector of constant pointers to the same type.
@@ -18,17 +20,17 @@ pub trait SimdMutPtr: Copy + Sealed {
/// Mask type used for manipulating this SIMD vector type.
type Mask;
- /// Returns `true` for each lane that is null.
+ /// Returns `true` for each element that is null.
fn is_null(self) -> Self::Mask;
/// Casts to a pointer of another type.
///
- /// Equivalent to calling [`pointer::cast`] on each lane.
+ /// Equivalent to calling [`pointer::cast`] on each element.
fn cast<T>(self) -> Self::CastPtr<T>;
/// Changes constness without changing the type.
///
- /// Equivalent to calling [`pointer::cast_const`] on each lane.
+ /// Equivalent to calling [`pointer::cast_const`] on each element.
fn cast_const(self) -> Self::ConstPtr;
/// Gets the "address" portion of the pointer.
@@ -36,7 +38,7 @@ pub trait SimdMutPtr: Copy + Sealed {
/// This method discards pointer semantic metadata, so the result cannot be
/// directly cast into a valid pointer.
///
- /// Equivalent to calling [`pointer::addr`] on each lane.
+ /// Equivalent to calling [`pointer::addr`] on each element.
fn addr(self) -> Self::Usize;
/// Creates a new pointer with the given address.
@@ -44,7 +46,7 @@ pub trait SimdMutPtr: Copy + Sealed {
/// This performs the same operation as a cast, but copies the *address-space* and
/// *provenance* of `self` to the new pointer.
///
- /// Equivalent to calling [`pointer::with_addr`] on each lane.
+ /// Equivalent to calling [`pointer::with_addr`] on each element.
fn with_addr(self, addr: Self::Usize) -> Self;
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
@@ -53,37 +55,36 @@ pub trait SimdMutPtr: Copy + Sealed {
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
///
- /// Equivalent to calling [`core::ptr::from_exposed_addr_mut`] on each lane.
+ /// Equivalent to calling [`core::ptr::from_exposed_addr_mut`] on each element.
fn from_exposed_addr(addr: Self::Usize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
- /// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
+ /// Equivalent to calling [`pointer::wrapping_offset`] on each element.
fn wrapping_offset(self, offset: Self::Isize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
- /// Equivalent to calling [`pointer::wrapping_add`] on each lane.
+ /// Equivalent to calling [`pointer::wrapping_add`] on each element.
fn wrapping_add(self, count: Self::Usize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
- /// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
+ /// Equivalent to calling [`pointer::wrapping_sub`] on each element.
fn wrapping_sub(self, count: Self::Usize) -> Self;
}
-impl<T, const LANES: usize> Sealed for Simd<*mut T, LANES> where LaneCount<LANES>: SupportedLaneCount
-{}
+impl<T, const N: usize> Sealed for Simd<*mut T, N> where LaneCount<N>: SupportedLaneCount {}
-impl<T, const LANES: usize> SimdMutPtr for Simd<*mut T, LANES>
+impl<T, const N: usize> SimdMutPtr for Simd<*mut T, N>
where
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- type Usize = Simd<usize, LANES>;
- type Isize = Simd<isize, LANES>;
- type CastPtr<U> = Simd<*mut U, LANES>;
- type ConstPtr = Simd<*const T, LANES>;
- type Mask = Mask<isize, LANES>;
+ type Usize = Simd<usize, N>;
+ type Isize = Simd<isize, N>;
+ type CastPtr<U> = Simd<*mut U, N>;
+ type ConstPtr = Simd<*const T, N>;
+ type Mask = Mask<isize, N>;
#[inline]
fn is_null(self) -> Self::Mask {
diff --git a/library/portable-simd/crates/core_simd/src/swizzle.rs b/library/portable-simd/crates/core_simd/src/swizzle.rs
index 68f20516c..ec8548d55 100644
--- a/library/portable-simd/crates/core_simd/src/swizzle.rs
+++ b/library/portable-simd/crates/core_simd/src/swizzle.rs
@@ -1,17 +1,15 @@
use crate::simd::intrinsics;
-use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
+use crate::simd::{LaneCount, Mask, MaskElement, Simd, SimdElement, SupportedLaneCount};
-/// Constructs a new SIMD vector by copying elements from selected lanes in other vectors.
+/// Constructs a new SIMD vector by copying elements from selected elements in other vectors.
///
-/// When swizzling one vector, lanes are selected by a `const` array of `usize`,
-/// like [`Swizzle`].
+/// When swizzling one vector, elements are selected like [`Swizzle::swizzle`].
///
-/// When swizzling two vectors, lanes are selected by a `const` array of [`Which`],
-/// like [`Swizzle2`].
+/// When swizzling two vectors, elements are selected like [`Swizzle::concat_swizzle`].
///
/// # Examples
///
-/// With a single SIMD vector, the const array specifies lane indices in that vector:
+/// With a single SIMD vector, the const array specifies element indices in that vector:
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::{u32x2, u32x4, simd_swizzle};
@@ -21,25 +19,27 @@ use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
/// let r: u32x4 = simd_swizzle!(v, [3, 0, 1, 2]);
/// assert_eq!(r.to_array(), [13, 10, 11, 12]);
///
-/// // Changing the number of lanes
+/// // Changing the number of elements
/// let r: u32x2 = simd_swizzle!(v, [3, 1]);
/// assert_eq!(r.to_array(), [13, 11]);
/// ```
///
-/// With two input SIMD vectors, the const array uses `Which` to specify the source of each index:
+/// With two input SIMD vectors, the const array specifies element indices in the concatenation of
+/// those vectors:
/// ```
/// # #![feature(portable_simd)]
-/// # use core::simd::{u32x2, u32x4, simd_swizzle, Which};
-/// use Which::{First, Second};
+/// # #[cfg(feature = "as_crate")] use core_simd::simd;
+/// # #[cfg(not(feature = "as_crate"))] use core::simd;
+/// # use simd::{u32x2, u32x4, simd_swizzle};
/// let a = u32x4::from_array([0, 1, 2, 3]);
/// let b = u32x4::from_array([4, 5, 6, 7]);
///
/// // Keeping the same size
-/// let r: u32x4 = simd_swizzle!(a, b, [First(0), First(1), Second(2), Second(3)]);
+/// let r: u32x4 = simd_swizzle!(a, b, [0, 1, 6, 7]);
/// assert_eq!(r.to_array(), [0, 1, 6, 7]);
///
-/// // Changing the number of lanes
-/// let r: u32x2 = simd_swizzle!(a, b, [First(0), Second(0)]);
+/// // Changing the number of elements
+/// let r: u32x2 = simd_swizzle!(a, b, [0, 4]);
/// assert_eq!(r.to_array(), [0, 4]);
/// ```
#[allow(unused_macros)]
@@ -50,7 +50,7 @@ pub macro simd_swizzle {
{
use $crate::simd::Swizzle;
struct Impl;
- impl<const LANES: usize> Swizzle<LANES, {$index.len()}> for Impl {
+ impl Swizzle<{$index.len()}> for Impl {
const INDEX: [usize; {$index.len()}] = $index;
}
Impl::swizzle($vector)
@@ -60,204 +60,194 @@ pub macro simd_swizzle {
$first:expr, $second:expr, $index:expr $(,)?
) => {
{
- use $crate::simd::{Which, Swizzle2};
+ use $crate::simd::Swizzle;
struct Impl;
- impl<const LANES: usize> Swizzle2<LANES, {$index.len()}> for Impl {
- const INDEX: [Which; {$index.len()}] = $index;
+ impl Swizzle<{$index.len()}> for Impl {
+ const INDEX: [usize; {$index.len()}] = $index;
}
- Impl::swizzle2($first, $second)
+ Impl::concat_swizzle($first, $second)
}
}
}
-/// Specifies a lane index into one of two SIMD vectors.
-///
-/// This is an input type for [Swizzle2] and helper macros like [simd_swizzle].
-#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub enum Which {
- /// Index of a lane in the first input SIMD vector.
- First(usize),
- /// Index of a lane in the second input SIMD vector.
- Second(usize),
-}
-
/// Create a vector from the elements of another vector.
-pub trait Swizzle<const INPUT_LANES: usize, const OUTPUT_LANES: usize> {
- /// Map from the lanes of the input vector to the output vector.
- const INDEX: [usize; OUTPUT_LANES];
+pub trait Swizzle<const N: usize> {
+ /// Map from the elements of the input vector to the output vector.
+ const INDEX: [usize; N];
- /// Create a new vector from the lanes of `vector`.
+ /// Create a new vector from the elements of `vector`.
///
/// Lane `i` of the output is `vector[Self::INDEX[i]]`.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
- fn swizzle<T>(vector: Simd<T, INPUT_LANES>) -> Simd<T, OUTPUT_LANES>
+ fn swizzle<T, const M: usize>(vector: Simd<T, M>) -> Simd<T, N>
where
T: SimdElement,
- LaneCount<INPUT_LANES>: SupportedLaneCount,
- LaneCount<OUTPUT_LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
+ LaneCount<M>: SupportedLaneCount,
{
- // Safety: `vector` is a vector, and `INDEX_IMPL` is a const array of u32.
- unsafe { intrinsics::simd_shuffle(vector, vector, Self::INDEX_IMPL) }
+ // Safety: `vector` is a vector, and the index is a const array of u32.
+ unsafe {
+ intrinsics::simd_shuffle(
+ vector,
+ vector,
+ const {
+ let mut output = [0; N];
+ let mut i = 0;
+ while i < N {
+ let index = Self::INDEX[i];
+ assert!(index as u32 as usize == index);
+ assert!(
+ index < M,
+ "source element index exceeds input vector length"
+ );
+ output[i] = index as u32;
+ i += 1;
+ }
+ output
+ },
+ )
+ }
}
-}
-
-/// Create a vector from the elements of two other vectors.
-pub trait Swizzle2<const INPUT_LANES: usize, const OUTPUT_LANES: usize> {
- /// Map from the lanes of the input vectors to the output vector
- const INDEX: [Which; OUTPUT_LANES];
- /// Create a new vector from the lanes of `first` and `second`.
+ /// Create a new vector from the elements of `first` and `second`.
///
- /// Lane `i` is `first[j]` when `Self::INDEX[i]` is `First(j)`, or `second[j]` when it is
- /// `Second(j)`.
+ /// Lane `i` of the output is `concat[Self::INDEX[i]]`, where `concat` is the concatenation of
+ /// `first` and `second`.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
- fn swizzle2<T>(
- first: Simd<T, INPUT_LANES>,
- second: Simd<T, INPUT_LANES>,
- ) -> Simd<T, OUTPUT_LANES>
+ fn concat_swizzle<T, const M: usize>(first: Simd<T, M>, second: Simd<T, M>) -> Simd<T, N>
where
T: SimdElement,
- LaneCount<INPUT_LANES>: SupportedLaneCount,
- LaneCount<OUTPUT_LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
+ LaneCount<M>: SupportedLaneCount,
{
- // Safety: `first` and `second` are vectors, and `INDEX_IMPL` is a const array of u32.
- unsafe { intrinsics::simd_shuffle(first, second, Self::INDEX_IMPL) }
- }
-}
-
-/// The `simd_shuffle` intrinsic expects `u32`, so do error checking and conversion here.
-/// This trait hides `INDEX_IMPL` from the public API.
-trait SwizzleImpl<const INPUT_LANES: usize, const OUTPUT_LANES: usize> {
- const INDEX_IMPL: [u32; OUTPUT_LANES];
-}
-
-impl<T, const INPUT_LANES: usize, const OUTPUT_LANES: usize> SwizzleImpl<INPUT_LANES, OUTPUT_LANES>
- for T
-where
- T: Swizzle<INPUT_LANES, OUTPUT_LANES> + ?Sized,
-{
- const INDEX_IMPL: [u32; OUTPUT_LANES] = {
- let mut output = [0; OUTPUT_LANES];
- let mut i = 0;
- while i < OUTPUT_LANES {
- let index = Self::INDEX[i];
- assert!(index as u32 as usize == index);
- assert!(index < INPUT_LANES, "source lane exceeds input lane count",);
- output[i] = index as u32;
- i += 1;
+ // Safety: `first` and `second` are vectors, and the index is a const array of u32.
+ unsafe {
+ intrinsics::simd_shuffle(
+ first,
+ second,
+ const {
+ let mut output = [0; N];
+ let mut i = 0;
+ while i < N {
+ let index = Self::INDEX[i];
+ assert!(index as u32 as usize == index);
+ assert!(
+ index < 2 * M,
+ "source element index exceeds input vector length"
+ );
+ output[i] = index as u32;
+ i += 1;
+ }
+ output
+ },
+ )
}
- output
- };
-}
-
-/// The `simd_shuffle` intrinsic expects `u32`, so do error checking and conversion here.
-/// This trait hides `INDEX_IMPL` from the public API.
-trait Swizzle2Impl<const INPUT_LANES: usize, const OUTPUT_LANES: usize> {
- const INDEX_IMPL: [u32; OUTPUT_LANES];
-}
+ }
-impl<T, const INPUT_LANES: usize, const OUTPUT_LANES: usize> Swizzle2Impl<INPUT_LANES, OUTPUT_LANES>
- for T
-where
- T: Swizzle2<INPUT_LANES, OUTPUT_LANES> + ?Sized,
-{
- const INDEX_IMPL: [u32; OUTPUT_LANES] = {
- let mut output = [0; OUTPUT_LANES];
- let mut i = 0;
- while i < OUTPUT_LANES {
- let (offset, index) = match Self::INDEX[i] {
- Which::First(index) => (false, index),
- Which::Second(index) => (true, index),
- };
- assert!(index < INPUT_LANES, "source lane exceeds input lane count",);
+ /// Create a new mask from the elements of `mask`.
+ ///
+ /// Element `i` of the output is `concat[Self::INDEX[i]]`, where `concat` is the concatenation of
+ /// `first` and `second`.
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original inputs"]
+ fn swizzle_mask<T, const M: usize>(mask: Mask<T, M>) -> Mask<T, N>
+ where
+ T: MaskElement,
+ LaneCount<N>: SupportedLaneCount,
+ LaneCount<M>: SupportedLaneCount,
+ {
+ // SAFETY: all elements of this mask come from another mask
+ unsafe { Mask::from_int_unchecked(Self::swizzle(mask.to_int())) }
+ }
- // lanes are indexed by the first vector, then second vector
- let index = if offset { index + INPUT_LANES } else { index };
- assert!(index as u32 as usize == index);
- output[i] = index as u32;
- i += 1;
- }
- output
- };
+ /// Create a new mask from the elements of `first` and `second`.
+ ///
+ /// Element `i` of the output is `concat[Self::INDEX[i]]`, where `concat` is the concatenation of
+ /// `first` and `second`.
+ #[inline]
+ #[must_use = "method returns a new mask and does not mutate the original inputs"]
+ fn concat_swizzle_mask<T, const M: usize>(first: Mask<T, M>, second: Mask<T, M>) -> Mask<T, N>
+ where
+ T: MaskElement,
+ LaneCount<N>: SupportedLaneCount,
+ LaneCount<M>: SupportedLaneCount,
+ {
+ // SAFETY: all elements of this mask come from another mask
+ unsafe { Mask::from_int_unchecked(Self::concat_swizzle(first.to_int(), second.to_int())) }
+ }
}
-impl<T, const LANES: usize> Simd<T, LANES>
+impl<T, const N: usize> Simd<T, N>
where
T: SimdElement,
- LaneCount<LANES>: SupportedLaneCount,
+ LaneCount<N>: SupportedLaneCount,
{
- /// Reverse the order of the lanes in the vector.
+ /// Reverse the order of the elements in the vector.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
pub fn reverse(self) -> Self {
- const fn reverse_index<const LANES: usize>() -> [usize; LANES] {
- let mut index = [0; LANES];
- let mut i = 0;
- while i < LANES {
- index[i] = LANES - i - 1;
- i += 1;
- }
- index
- }
-
struct Reverse;
- impl<const LANES: usize> Swizzle<LANES, LANES> for Reverse {
- const INDEX: [usize; LANES] = reverse_index::<LANES>();
+ impl<const N: usize> Swizzle<N> for Reverse {
+ const INDEX: [usize; N] = const {
+ let mut index = [0; N];
+ let mut i = 0;
+ while i < N {
+ index[i] = N - i - 1;
+ i += 1;
+ }
+ index
+ };
}
Reverse::swizzle(self)
}
/// Rotates the vector such that the first `OFFSET` elements of the slice move to the end
- /// while the last `LANES - OFFSET` elements move to the front. After calling `rotate_lanes_left`,
- /// the element previously in lane `OFFSET` will become the first element in the slice.
+ /// while the last `self.len() - OFFSET` elements move to the front. After calling `rotate_elements_left`,
+ /// the element previously at index `OFFSET` will become the first element in the slice.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
- pub fn rotate_lanes_left<const OFFSET: usize>(self) -> Self {
- const fn rotate_index<const OFFSET: usize, const LANES: usize>() -> [usize; LANES] {
- let offset = OFFSET % LANES;
- let mut index = [0; LANES];
- let mut i = 0;
- while i < LANES {
- index[i] = (i + offset) % LANES;
- i += 1;
- }
- index
- }
-
+ pub fn rotate_elements_left<const OFFSET: usize>(self) -> Self {
struct Rotate<const OFFSET: usize>;
- impl<const OFFSET: usize, const LANES: usize> Swizzle<LANES, LANES> for Rotate<OFFSET> {
- const INDEX: [usize; LANES] = rotate_index::<OFFSET, LANES>();
+ impl<const OFFSET: usize, const N: usize> Swizzle<N> for Rotate<OFFSET> {
+ const INDEX: [usize; N] = const {
+ let offset = OFFSET % N;
+ let mut index = [0; N];
+ let mut i = 0;
+ while i < N {
+ index[i] = (i + offset) % N;
+ i += 1;
+ }
+ index
+ };
}
Rotate::<OFFSET>::swizzle(self)
}
- /// Rotates the vector such that the first `LANES - OFFSET` elements of the vector move to
- /// the end while the last `OFFSET` elements move to the front. After calling `rotate_lanes_right`,
- /// the element previously at index `LANES - OFFSET` will become the first element in the slice.
+ /// Rotates the vector such that the first `self.len() - OFFSET` elements of the vector move to
+ /// the end while the last `OFFSET` elements move to the front. After calling `rotate_elements_right`,
+ /// the element previously at index `self.len() - OFFSET` will become the first element in the slice.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
- pub fn rotate_lanes_right<const OFFSET: usize>(self) -> Self {
- const fn rotate_index<const OFFSET: usize, const LANES: usize>() -> [usize; LANES] {
- let offset = LANES - OFFSET % LANES;
- let mut index = [0; LANES];
- let mut i = 0;
- while i < LANES {
- index[i] = (i + offset) % LANES;
- i += 1;
- }
- index
- }
-
+ pub fn rotate_elements_right<const OFFSET: usize>(self) -> Self {
struct Rotate<const OFFSET: usize>;
- impl<const OFFSET: usize, const LANES: usize> Swizzle<LANES, LANES> for Rotate<OFFSET> {
- const INDEX: [usize; LANES] = rotate_index::<OFFSET, LANES>();
+ impl<const OFFSET: usize, const N: usize> Swizzle<N> for Rotate<OFFSET> {
+ const INDEX: [usize; N] = const {
+ let offset = N - OFFSET % N;
+ let mut index = [0; N];
+ let mut i = 0;
+ while i < N {
+ index[i] = (i + offset) % N;
+ i += 1;
+ }
+ index
+ };
}
Rotate::<OFFSET>::swizzle(self)
@@ -265,7 +255,7 @@ where
/// Interleave two vectors.
///
- /// The resulting vectors contain lanes taken alternatively from `self` and `other`, first
+ /// The resulting vectors contain elements taken alternatively from `self` and `other`, first
/// filling the first result, and then the second.
///
/// The reverse of this operation is [`Simd::deinterleave`].
@@ -282,18 +272,13 @@ where
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
pub fn interleave(self, other: Self) -> (Self, Self) {
- const fn interleave<const LANES: usize>(high: bool) -> [Which; LANES] {
- let mut idx = [Which::First(0); LANES];
+ const fn interleave<const N: usize>(high: bool) -> [usize; N] {
+ let mut idx = [0; N];
let mut i = 0;
- while i < LANES {
- // Treat the source as a concatenated vector
- let dst_index = if high { i + LANES } else { i };
- let src_index = dst_index / 2 + (dst_index % 2) * LANES;
- idx[i] = if src_index < LANES {
- Which::First(src_index)
- } else {
- Which::Second(src_index % LANES)
- };
+ while i < N {
+ let dst_index = if high { i + N } else { i };
+ let src_index = dst_index / 2 + (dst_index % 2) * N;
+ idx[i] = src_index;
i += 1;
}
idx
@@ -302,24 +287,27 @@ where
struct Lo;
struct Hi;
- impl<const LANES: usize> Swizzle2<LANES, LANES> for Lo {
- const INDEX: [Which; LANES] = interleave::<LANES>(false);
+ impl<const N: usize> Swizzle<N> for Lo {
+ const INDEX: [usize; N] = interleave::<N>(false);
}
- impl<const LANES: usize> Swizzle2<LANES, LANES> for Hi {
- const INDEX: [Which; LANES] = interleave::<LANES>(true);
+ impl<const N: usize> Swizzle<N> for Hi {
+ const INDEX: [usize; N] = interleave::<N>(true);
}
- (Lo::swizzle2(self, other), Hi::swizzle2(self, other))
+ (
+ Lo::concat_swizzle(self, other),
+ Hi::concat_swizzle(self, other),
+ )
}
/// Deinterleave two vectors.
///
- /// The first result takes every other lane of `self` and then `other`, starting with
- /// the first lane.
+ /// The first result takes every other element of `self` and then `other`, starting with
+ /// the first element.
///
- /// The second result takes every other lane of `self` and then `other`, starting with
- /// the second lane.
+ /// The second result takes every other element of `self` and then `other`, starting with
+ /// the second element.
///
/// The reverse of this operation is [`Simd::interleave`].
///
@@ -335,17 +323,11 @@ where
#[inline]
#[must_use = "method returns a new vector and does not mutate the original inputs"]
pub fn deinterleave(self, other: Self) -> (Self, Self) {
- const fn deinterleave<const LANES: usize>(second: bool) -> [Which; LANES] {
- let mut idx = [Which::First(0); LANES];
+ const fn deinterleave<const N: usize>(second: bool) -> [usize; N] {
+ let mut idx = [0; N];
let mut i = 0;
- while i < LANES {
- // Treat the source as a concatenated vector
- let src_index = i * 2 + second as usize;
- idx[i] = if src_index < LANES {
- Which::First(src_index)
- } else {
- Which::Second(src_index % LANES)
- };
+ while i < N {
+ idx[i] = i * 2 + second as usize;
i += 1;
}
idx
@@ -354,14 +336,52 @@ where
struct Even;
struct Odd;
- impl<const LANES: usize> Swizzle2<LANES, LANES> for Even {
- const INDEX: [Which; LANES] = deinterleave::<LANES>(false);
+ impl<const N: usize> Swizzle<N> for Even {
+ const INDEX: [usize; N] = deinterleave::<N>(false);
}
- impl<const LANES: usize> Swizzle2<LANES, LANES> for Odd {
- const INDEX: [Which; LANES] = deinterleave::<LANES>(true);
+ impl<const N: usize> Swizzle<N> for Odd {
+ const INDEX: [usize; N] = deinterleave::<N>(true);
}
- (Even::swizzle2(self, other), Odd::swizzle2(self, other))
+ (
+ Even::concat_swizzle(self, other),
+ Odd::concat_swizzle(self, other),
+ )
+ }
+
+ /// Resize a vector.
+ ///
+ /// If `M` > `N`, extends the length of a vector, setting the new elements to `value`.
+ /// If `M` < `N`, truncates the vector to the first `M` elements.
+ ///
+ /// ```
+ /// # #![feature(portable_simd)]
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::u32x4;
+ /// let x = u32x4::from_array([0, 1, 2, 3]);
+ /// assert_eq!(x.resize::<8>(9).to_array(), [0, 1, 2, 3, 9, 9, 9, 9]);
+ /// assert_eq!(x.resize::<2>(9).to_array(), [0, 1]);
+ /// ```
+ #[inline]
+ #[must_use = "method returns a new vector and does not mutate the original inputs"]
+ pub fn resize<const M: usize>(self, value: T) -> Simd<T, M>
+ where
+ LaneCount<M>: SupportedLaneCount,
+ {
+ struct Resize<const N: usize>;
+ impl<const N: usize, const M: usize> Swizzle<M> for Resize<N> {
+ const INDEX: [usize; M] = const {
+ let mut index = [0; M];
+ let mut i = 0;
+ while i < M {
+ index[i] = if i < N { i } else { N };
+ i += 1;
+ }
+ index
+ };
+ }
+ Resize::<N>::concat_swizzle(self, Simd::splat(value))
}
}
diff --git a/library/portable-simd/crates/core_simd/src/swizzle_dyn.rs b/library/portable-simd/crates/core_simd/src/swizzle_dyn.rs
index ce6217925..dac013cc9 100644
--- a/library/portable-simd/crates/core_simd/src/swizzle_dyn.rs
+++ b/library/portable-simd/crates/core_simd/src/swizzle_dyn.rs
@@ -55,7 +55,7 @@ where
16 => transize(vqtbl1q_u8, self, idxs),
#[cfg(all(target_feature = "avx2", not(target_feature = "avx512vbmi")))]
32 => transize_raw(avx2_pshufb, self, idxs),
- #[cfg(target_feature = "avx512vl,avx512vbmi")]
+ #[cfg(all(target_feature = "avx512vl", target_feature = "avx512vbmi"))]
32 => transize(x86::_mm256_permutexvar_epi8, self, idxs),
// Notable absence: avx512bw shuffle
// If avx512bw is available, odds of avx512vbmi are good
@@ -86,7 +86,7 @@ where
#[inline]
#[allow(clippy::let_and_return)]
unsafe fn avx2_pshufb(bytes: Simd<u8, 32>, idxs: Simd<u8, 32>) -> Simd<u8, 32> {
- use crate::simd::SimdPartialOrd;
+ use crate::simd::cmp::SimdPartialOrd;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
@@ -149,7 +149,7 @@ where
// On x86, make sure the top bit is set.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
let idxs = {
- use crate::simd::SimdPartialOrd;
+ use crate::simd::cmp::SimdPartialOrd;
idxs.simd_lt(Simd::splat(N as u8))
.select(idxs, Simd::splat(u8::MAX))
};
diff --git a/library/portable-simd/crates/core_simd/src/to_bytes.rs b/library/portable-simd/crates/core_simd/src/to_bytes.rs
index b36b1a347..222526c4a 100644
--- a/library/portable-simd/crates/core_simd/src/to_bytes.rs
+++ b/library/portable-simd/crates/core_simd/src/to_bytes.rs
@@ -1,24 +1,125 @@
+use crate::simd::{
+ num::{SimdFloat, SimdInt, SimdUint},
+ LaneCount, Simd, SimdElement, SupportedLaneCount,
+};
+
+mod sealed {
+ use super::*;
+ pub trait Sealed {}
+ impl<T: SimdElement, const N: usize> Sealed for Simd<T, N> where LaneCount<N>: SupportedLaneCount {}
+}
+use sealed::Sealed;
+
+/// Convert SIMD vectors to vectors of bytes
+pub trait ToBytes: Sealed {
+ /// This type, reinterpreted as bytes.
+ type Bytes: Copy
+ + Unpin
+ + Send
+ + Sync
+ + AsRef<[u8]>
+ + AsMut<[u8]>
+ + SimdUint<Scalar = u8>
+ + 'static;
+
+ /// Return the memory representation of this integer as a byte array in native byte
+ /// order.
+ fn to_ne_bytes(self) -> Self::Bytes;
+
+ /// Return the memory representation of this integer as a byte array in big-endian
+ /// (network) byte order.
+ fn to_be_bytes(self) -> Self::Bytes;
+
+ /// Return the memory representation of this integer as a byte array in little-endian
+ /// byte order.
+ fn to_le_bytes(self) -> Self::Bytes;
+
+ /// Create a native endian integer value from its memory representation as a byte array
+ /// in native endianness.
+ fn from_ne_bytes(bytes: Self::Bytes) -> Self;
+
+ /// Create an integer value from its representation as a byte array in big endian.
+ fn from_be_bytes(bytes: Self::Bytes) -> Self;
+
+ /// Create an integer value from its representation as a byte array in little endian.
+ fn from_le_bytes(bytes: Self::Bytes) -> Self;
+}
+
+macro_rules! swap_bytes {
+ { f32, $x:expr } => { Simd::from_bits($x.to_bits().swap_bytes()) };
+ { f64, $x:expr } => { Simd::from_bits($x.to_bits().swap_bytes()) };
+ { $ty:ty, $x:expr } => { $x.swap_bytes() }
+}
+
macro_rules! impl_to_bytes {
- { $ty:ty, $size:literal } => {
- impl<const LANES: usize> crate::simd::Simd<$ty, LANES>
- where
- crate::simd::LaneCount<LANES>: crate::simd::SupportedLaneCount,
- crate::simd::LaneCount<{{ $size * LANES }}>: crate::simd::SupportedLaneCount,
- {
- /// Return the memory representation of this integer as a byte array in native byte
- /// order.
- pub fn to_ne_bytes(self) -> crate::simd::Simd<u8, {{ $size * LANES }}> {
+ { $ty:tt, 1 } => { impl_to_bytes! { $ty, 1 * [1, 2, 4, 8, 16, 32, 64] } };
+ { $ty:tt, 2 } => { impl_to_bytes! { $ty, 2 * [1, 2, 4, 8, 16, 32] } };
+ { $ty:tt, 4 } => { impl_to_bytes! { $ty, 4 * [1, 2, 4, 8, 16] } };
+ { $ty:tt, 8 } => { impl_to_bytes! { $ty, 8 * [1, 2, 4, 8] } };
+ { $ty:tt, 16 } => { impl_to_bytes! { $ty, 16 * [1, 2, 4] } };
+ { $ty:tt, 32 } => { impl_to_bytes! { $ty, 32 * [1, 2] } };
+ { $ty:tt, 64 } => { impl_to_bytes! { $ty, 64 * [1] } };
+
+ { $ty:tt, $size:literal * [$($elems:literal),*] } => {
+ $(
+ impl ToBytes for Simd<$ty, $elems> {
+ type Bytes = Simd<u8, { $size * $elems }>;
+
+ #[inline]
+ fn to_ne_bytes(self) -> Self::Bytes {
// Safety: transmuting between vectors is safe
- unsafe { core::mem::transmute_copy(&self) }
+ unsafe {
+ #![allow(clippy::useless_transmute)]
+ core::mem::transmute(self)
+ }
+ }
+
+ #[inline]
+ fn to_be_bytes(mut self) -> Self::Bytes {
+ if !cfg!(target_endian = "big") {
+ self = swap_bytes!($ty, self);
+ }
+ self.to_ne_bytes()
+ }
+
+ #[inline]
+ fn to_le_bytes(mut self) -> Self::Bytes {
+ if !cfg!(target_endian = "little") {
+ self = swap_bytes!($ty, self);
+ }
+ self.to_ne_bytes()
}
- /// Create a native endian integer value from its memory representation as a byte array
- /// in native endianness.
- pub fn from_ne_bytes(bytes: crate::simd::Simd<u8, {{ $size * LANES }}>) -> Self {
+ #[inline]
+ fn from_ne_bytes(bytes: Self::Bytes) -> Self {
// Safety: transmuting between vectors is safe
- unsafe { core::mem::transmute_copy(&bytes) }
+ unsafe {
+ #![allow(clippy::useless_transmute)]
+ core::mem::transmute(bytes)
+ }
+ }
+
+ #[inline]
+ fn from_be_bytes(bytes: Self::Bytes) -> Self {
+ let ret = Self::from_ne_bytes(bytes);
+ if cfg!(target_endian = "big") {
+ ret
+ } else {
+ swap_bytes!($ty, ret)
+ }
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: Self::Bytes) -> Self {
+ let ret = Self::from_ne_bytes(bytes);
+ if cfg!(target_endian = "little") {
+ ret
+ } else {
+ swap_bytes!($ty, ret)
+ }
}
}
+ )*
}
}
@@ -39,3 +140,6 @@ impl_to_bytes! { i64, 8 }
impl_to_bytes! { isize, 4 }
#[cfg(target_pointer_width = "64")]
impl_to_bytes! { isize, 8 }
+
+impl_to_bytes! { f32, 4 }
+impl_to_bytes! { f64, 8 }
diff --git a/library/portable-simd/crates/core_simd/src/vector.rs b/library/portable-simd/crates/core_simd/src/vector.rs
index 9aa7bacfc..105c06741 100644
--- a/library/portable-simd/crates/core_simd/src/vector.rs
+++ b/library/portable-simd/crates/core_simd/src/vector.rs
@@ -1,6 +1,8 @@
use crate::simd::{
- intrinsics, LaneCount, Mask, MaskElement, SimdConstPtr, SimdMutPtr, SimdPartialOrd,
- SupportedLaneCount, Swizzle,
+ cmp::SimdPartialOrd,
+ intrinsics,
+ ptr::{SimdConstPtr, SimdMutPtr},
+ LaneCount, Mask, MaskElement, SupportedLaneCount, Swizzle,
};
use core::convert::{TryFrom, TryInto};
@@ -110,7 +112,7 @@ where
T: SimdElement,
{
/// Number of elements in this vector.
- pub const LANES: usize = N;
+ pub const LEN: usize = N;
/// Returns the number of elements in this SIMD vector.
///
@@ -118,13 +120,16 @@ where
///
/// ```
/// # #![feature(portable_simd)]
- /// # use core::simd::u32x4;
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::u32x4;
/// let v = u32x4::splat(0);
- /// assert_eq!(v.lanes(), 4);
+ /// assert_eq!(v.len(), 4);
/// ```
#[inline]
- pub const fn lanes(&self) -> usize {
- Self::LANES
+ #[allow(clippy::len_without_is_empty)]
+ pub const fn len(&self) -> usize {
+ Self::LEN
}
/// Constructs a new SIMD vector with all elements set to the given value.
@@ -133,7 +138,9 @@ where
///
/// ```
/// # #![feature(portable_simd)]
- /// # use core::simd::u32x4;
+ /// # #[cfg(feature = "as_crate")] use core_simd::simd;
+ /// # #[cfg(not(feature = "as_crate"))] use core::simd;
+ /// # use simd::u32x4;
/// let v = u32x4::splat(8);
/// assert_eq!(v.as_array(), &[8, 8, 8, 8]);
/// ```
@@ -142,10 +149,10 @@ where
// This is preferred over `[value; N]`, since it's explicitly a splat:
// https://github.com/rust-lang/rust/issues/97804
struct Splat;
- impl<const N: usize> Swizzle<1, N> for Splat {
+ impl<const N: usize> Swizzle<N> for Splat {
const INDEX: [usize; N] = [0; N];
}
- Splat::swizzle(Simd::<T, 1>::from([value]))
+ Splat::swizzle::<T, 1>(Simd::<T, 1>::from([value]))
}
/// Returns an array reference containing the entire SIMD vector.
@@ -271,7 +278,7 @@ where
#[track_caller]
pub const fn from_slice(slice: &[T]) -> Self {
assert!(
- slice.len() >= Self::LANES,
+ slice.len() >= Self::LEN,
"slice length must be at least the number of elements"
);
// SAFETY: We just checked that the slice contains
@@ -301,7 +308,7 @@ where
#[track_caller]
pub fn copy_to_slice(self, slice: &mut [T]) {
assert!(
- slice.len() >= Self::LANES,
+ slice.len() >= Self::LEN,
"slice length must be at least the number of elements"
);
// SAFETY: We just checked that the slice contains
@@ -394,7 +401,7 @@ where
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdPartialOrd, Mask};
+ /// # use simd::{Simd, cmp::SimdPartialOrd, Mask};
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 5]); // Includes an out-of-bounds index
/// let alt = Simd::from_array([-5, -4, -3, -2]);
@@ -434,7 +441,7 @@ where
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdConstPtr};
+ /// # use simd::prelude::*;
/// let values = [6, 2, 4, 9];
/// let offsets = Simd::from_array([1, 0, 0, 3]);
/// let source = Simd::splat(values.as_ptr()).wrapping_add(offsets);
@@ -467,7 +474,7 @@ where
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Mask, Simd, SimdConstPtr};
+ /// # use simd::prelude::*;
/// let values = [6, 2, 4, 9];
/// let enable = Mask::from_array([true, true, false, true]);
/// let offsets = Simd::from_array([1, 0, 0, 3]);
@@ -550,7 +557,7 @@ where
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdPartialOrd, Mask};
+ /// # use simd::{Simd, cmp::SimdPartialOrd, Mask};
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 0]);
/// let vals = Simd::from_array([-27, 82, -41, 124]);
@@ -604,7 +611,7 @@ where
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Simd, SimdMutPtr};
+ /// # use simd::{Simd, ptr::SimdMutPtr};
/// let mut values = [0; 4];
/// let offset = Simd::from_array([3, 2, 1, 0]);
/// let ptrs = Simd::splat(values.as_mut_ptr()).wrapping_add(offset);
@@ -631,7 +638,7 @@ where
/// # #![feature(portable_simd)]
/// # #[cfg(feature = "as_crate")] use core_simd::simd;
/// # #[cfg(not(feature = "as_crate"))] use core::simd;
- /// # use simd::{Mask, Simd, SimdMutPtr};
+ /// # use simd::{Mask, Simd, ptr::SimdMutPtr};
/// let mut values = [0; 4];
/// let offset = Simd::from_array([3, 2, 1, 0]);
/// let ptrs = Simd::splat(values.as_mut_ptr()).wrapping_add(offset);
diff --git a/library/portable-simd/crates/core_simd/src/vendor.rs b/library/portable-simd/crates/core_simd/src/vendor.rs
index 9fb70218c..6223bedb4 100644
--- a/library/portable-simd/crates/core_simd/src/vendor.rs
+++ b/library/portable-simd/crates/core_simd/src/vendor.rs
@@ -21,7 +21,7 @@ macro_rules! from_transmute {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod x86;
-#[cfg(any(target_arch = "wasm32"))]
+#[cfg(target_arch = "wasm32")]
mod wasm32;
#[cfg(any(target_arch = "aarch64", target_arch = "arm",))]
diff --git a/library/portable-simd/crates/core_simd/src/vendor/x86.rs b/library/portable-simd/crates/core_simd/src/vendor/x86.rs
index 0dd47015e..66aaf90ee 100644
--- a/library/portable-simd/crates/core_simd/src/vendor/x86.rs
+++ b/library/portable-simd/crates/core_simd/src/vendor/x86.rs
@@ -1,6 +1,6 @@
use crate::simd::*;
-#[cfg(any(target_arch = "x86"))]
+#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
diff --git a/library/portable-simd/crates/core_simd/tests/cast.rs b/library/portable-simd/crates/core_simd/tests/cast.rs
index 00545936e..185e1945f 100644
--- a/library/portable-simd/crates/core_simd/tests/cast.rs
+++ b/library/portable-simd/crates/core_simd/tests/cast.rs
@@ -3,7 +3,7 @@ macro_rules! cast_types {
($start:ident, $($target:ident),*) => {
mod $start {
#[allow(unused)]
- use core_simd::simd::{Simd, SimdInt, SimdUint, SimdFloat};
+ use core_simd::simd::prelude::*;
type Vector<const N: usize> = Simd<$start, N>;
$(
mod $target {
diff --git a/library/portable-simd/crates/core_simd/tests/masks.rs b/library/portable-simd/crates/core_simd/tests/masks.rs
index 9f8bad1c3..00fc2a24e 100644
--- a/library/portable-simd/crates/core_simd/tests/masks.rs
+++ b/library/portable-simd/crates/core_simd/tests/masks.rs
@@ -72,7 +72,6 @@ macro_rules! test_mask_api {
#[test]
fn roundtrip_bitmask_conversion() {
- use core_simd::simd::ToBitMask;
let values = [
true, false, false, true, false, false, true, false,
true, true, false, false, false, false, false, true,
@@ -85,8 +84,6 @@ macro_rules! test_mask_api {
#[test]
fn roundtrip_bitmask_conversion_short() {
- use core_simd::simd::ToBitMask;
-
let values = [
false, false, false, true,
];
@@ -125,18 +122,17 @@ macro_rules! test_mask_api {
cast_impl::<isize>();
}
- #[cfg(feature = "generic_const_exprs")]
#[test]
- fn roundtrip_bitmask_array_conversion() {
- use core_simd::simd::ToBitMaskArray;
+ fn roundtrip_bitmask_vector_conversion() {
+ use core_simd::simd::ToBytes;
let values = [
true, false, false, true, false, false, true, false,
true, true, false, false, false, false, false, true,
];
let mask = Mask::<$type, 16>::from_array(values);
- let bitmask = mask.to_bitmask_array();
- assert_eq!(bitmask, [0b01001001, 0b10000011]);
- assert_eq!(Mask::<$type, 16>::from_bitmask_array(bitmask), mask);
+ let bitmask = mask.to_bitmask_vector();
+ assert_eq!(bitmask.resize::<2>(0).to_ne_bytes()[..2], [0b01001001, 0b10000011]);
+ assert_eq!(Mask::<$type, 16>::from_bitmask_vector(bitmask), mask);
}
}
}
diff --git a/library/portable-simd/crates/core_simd/tests/ops_macros.rs b/library/portable-simd/crates/core_simd/tests/ops_macros.rs
index 3a02f3f01..aa565a137 100644
--- a/library/portable-simd/crates/core_simd/tests/ops_macros.rs
+++ b/library/portable-simd/crates/core_simd/tests/ops_macros.rs
@@ -6,7 +6,7 @@ macro_rules! impl_unary_op_test {
{ $scalar:ty, $trait:ident :: $fn:ident, $scalar_fn:expr } => {
test_helpers::test_lanes! {
fn $fn<const LANES: usize>() {
- test_helpers::test_unary_elementwise(
+ test_helpers::test_unary_elementwise_flush_subnormals(
&<core_simd::simd::Simd<$scalar, LANES> as core::ops::$trait>::$fn,
&$scalar_fn,
&|_| true,
@@ -31,7 +31,7 @@ macro_rules! impl_binary_op_test {
test_helpers::test_lanes! {
fn normal<const LANES: usize>() {
- test_helpers::test_binary_elementwise(
+ test_helpers::test_binary_elementwise_flush_subnormals(
&<Simd<$scalar, LANES> as core::ops::$trait>::$fn,
&$scalar_fn,
&|_, _| true,
@@ -39,7 +39,7 @@ macro_rules! impl_binary_op_test {
}
fn assign<const LANES: usize>() {
- test_helpers::test_binary_elementwise(
+ test_helpers::test_binary_elementwise_flush_subnormals(
&|mut a, b| { <Simd<$scalar, LANES> as core::ops::$trait_assign>::$fn_assign(&mut a, b); a },
&$scalar_fn,
&|_, _| true,
@@ -68,6 +68,7 @@ macro_rules! impl_binary_checked_op_test {
test_helpers::test_lanes! {
fn normal<const LANES: usize>() {
+ #![allow(clippy::redundant_closure_call)]
test_helpers::test_binary_elementwise(
&<Simd<$scalar, LANES> as core::ops::$trait>::$fn,
&$scalar_fn,
@@ -76,6 +77,7 @@ macro_rules! impl_binary_checked_op_test {
}
fn assign<const LANES: usize>() {
+ #![allow(clippy::redundant_closure_call)]
test_helpers::test_binary_elementwise(
&|mut a, b| { <Simd<$scalar, LANES> as core::ops::$trait_assign>::$fn_assign(&mut a, b); a },
&$scalar_fn,
@@ -94,11 +96,43 @@ macro_rules! impl_binary_checked_op_test {
macro_rules! impl_common_integer_tests {
{ $vector:ident, $scalar:ident } => {
test_helpers::test_lanes! {
+ fn shr<const LANES: usize>() {
+ use core::ops::Shr;
+ let shr = |x: $scalar, y: $scalar| x.wrapping_shr(y as _);
+ test_helpers::test_binary_elementwise(
+ &<$vector::<LANES> as Shr<$vector::<LANES>>>::shr,
+ &shr,
+ &|_, _| true,
+ );
+ test_helpers::test_binary_scalar_rhs_elementwise(
+ &<$vector::<LANES> as Shr<$scalar>>::shr,
+ &shr,
+ &|_, _| true,
+ );
+ }
+
+ fn shl<const LANES: usize>() {
+ use core::ops::Shl;
+ let shl = |x: $scalar, y: $scalar| x.wrapping_shl(y as _);
+ test_helpers::test_binary_elementwise(
+ &<$vector::<LANES> as Shl<$vector::<LANES>>>::shl,
+ &shl,
+ &|_, _| true,
+ );
+ test_helpers::test_binary_scalar_rhs_elementwise(
+ &<$vector::<LANES> as Shl<$scalar>>::shl,
+ &shl,
+ &|_, _| true,
+ );
+ }
+
fn reduce_sum<const LANES: usize>() {
test_helpers::test_1(&|x| {
+ use test_helpers::subnormals::{flush, flush_in};
test_helpers::prop_assert_biteq! (
$vector::<LANES>::from_array(x).reduce_sum(),
x.iter().copied().fold(0 as $scalar, $scalar::wrapping_add),
+ flush(x.iter().copied().map(flush_in).fold(0 as $scalar, $scalar::wrapping_add)),
);
Ok(())
});
@@ -106,9 +140,11 @@ macro_rules! impl_common_integer_tests {
fn reduce_product<const LANES: usize>() {
test_helpers::test_1(&|x| {
+ use test_helpers::subnormals::{flush, flush_in};
test_helpers::prop_assert_biteq! (
$vector::<LANES>::from_array(x).reduce_product(),
x.iter().copied().fold(1 as $scalar, $scalar::wrapping_mul),
+ flush(x.iter().copied().map(flush_in).fold(1 as $scalar, $scalar::wrapping_mul)),
);
Ok(())
});
@@ -163,6 +199,54 @@ macro_rules! impl_common_integer_tests {
Ok(())
});
}
+
+ fn swap_bytes<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+ &$vector::<LANES>::swap_bytes,
+ &$scalar::swap_bytes,
+ &|_| true,
+ )
+ }
+
+ fn reverse_bits<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+ &$vector::<LANES>::reverse_bits,
+ &$scalar::reverse_bits,
+ &|_| true,
+ )
+ }
+
+ fn leading_zeros<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+ &$vector::<LANES>::leading_zeros,
+ &|x| x.leading_zeros() as _,
+ &|_| true,
+ )
+ }
+
+ fn trailing_zeros<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+ &$vector::<LANES>::trailing_zeros,
+ &|x| x.trailing_zeros() as _,
+ &|_| true,
+ )
+ }
+
+ fn leading_ones<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+ &$vector::<LANES>::leading_ones,
+ &|x| x.leading_ones() as _,
+ &|_| true,
+ )
+ }
+
+ fn trailing_ones<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+ &$vector::<LANES>::trailing_ones,
+ &|x| x.trailing_ones() as _,
+ &|_| true,
+ )
+ }
}
}
}
@@ -172,7 +256,7 @@ macro_rules! impl_common_integer_tests {
macro_rules! impl_signed_tests {
{ $scalar:tt } => {
mod $scalar {
- use core_simd::simd::SimdInt;
+ use core_simd::simd::num::SimdInt;
type Vector<const LANES: usize> = core_simd::simd::Simd<Scalar, LANES>;
type Scalar = $scalar;
@@ -224,7 +308,7 @@ macro_rules! impl_signed_tests {
}
fn simd_min<const LANES: usize>() {
- use core_simd::simd::SimdOrd;
+ use core_simd::simd::cmp::SimdOrd;
let a = Vector::<LANES>::splat(Scalar::MIN);
let b = Vector::<LANES>::splat(0);
assert_eq!(a.simd_min(b), a);
@@ -234,7 +318,7 @@ macro_rules! impl_signed_tests {
}
fn simd_max<const LANES: usize>() {
- use core_simd::simd::SimdOrd;
+ use core_simd::simd::cmp::SimdOrd;
let a = Vector::<LANES>::splat(Scalar::MIN);
let b = Vector::<LANES>::splat(0);
assert_eq!(a.simd_max(b), b);
@@ -244,7 +328,7 @@ macro_rules! impl_signed_tests {
}
fn simd_clamp<const LANES: usize>() {
- use core_simd::simd::SimdOrd;
+ use core_simd::simd::cmp::SimdOrd;
let min = Vector::<LANES>::splat(Scalar::MIN);
let max = Vector::<LANES>::splat(Scalar::MAX);
let zero = Vector::<LANES>::splat(0);
@@ -313,7 +397,7 @@ macro_rules! impl_signed_tests {
macro_rules! impl_unsigned_tests {
{ $scalar:tt } => {
mod $scalar {
- use core_simd::simd::SimdUint;
+ use core_simd::simd::num::SimdUint;
type Vector<const LANES: usize> = core_simd::simd::Simd<Scalar, LANES>;
type Scalar = $scalar;
@@ -327,6 +411,16 @@ macro_rules! impl_unsigned_tests {
}
}
+ test_helpers::test_lanes! {
+ fn wrapping_neg<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+ &Vector::<LANES>::wrapping_neg,
+ &Scalar::wrapping_neg,
+ &|_| true,
+ );
+ }
+ }
+
impl_binary_op_test!(Scalar, Add::add, AddAssign::add_assign, Scalar::wrapping_add);
impl_binary_op_test!(Scalar, Sub::sub, SubAssign::sub_assign, Scalar::wrapping_sub);
impl_binary_op_test!(Scalar, Mul::mul, MulAssign::mul_assign, Scalar::wrapping_mul);
@@ -348,7 +442,7 @@ macro_rules! impl_unsigned_tests {
macro_rules! impl_float_tests {
{ $scalar:tt, $int_scalar:tt } => {
mod $scalar {
- use core_simd::simd::SimdFloat;
+ use core_simd::simd::num::SimdFloat;
type Vector<const LANES: usize> = core_simd::simd::Simd<Scalar, LANES>;
type Scalar = $scalar;
@@ -433,7 +527,7 @@ macro_rules! impl_float_tests {
}
fn to_degrees<const LANES: usize>() {
- test_helpers::test_unary_elementwise(
+ test_helpers::test_unary_elementwise_flush_subnormals(
&Vector::<LANES>::to_degrees,
&Scalar::to_degrees,
&|_| true,
@@ -441,7 +535,7 @@ macro_rules! impl_float_tests {
}
fn to_radians<const LANES: usize>() {
- test_helpers::test_unary_elementwise(
+ test_helpers::test_unary_elementwise_flush_subnormals(
&Vector::<LANES>::to_radians,
&Scalar::to_radians,
&|_| true,
@@ -511,7 +605,12 @@ macro_rules! impl_float_tests {
}
fn simd_clamp<const LANES: usize>() {
+ if cfg!(all(target_arch = "powerpc64", target_feature = "vsx")) {
+ // https://gitlab.com/qemu-project/qemu/-/issues/1780
+ return;
+ }
test_helpers::test_3(&|value: [Scalar; LANES], mut min: [Scalar; LANES], mut max: [Scalar; LANES]| {
+ use test_helpers::subnormals::flush_in;
for (min, max) in min.iter_mut().zip(max.iter_mut()) {
if max < min {
core::mem::swap(min, max);
@@ -528,8 +627,20 @@ macro_rules! impl_float_tests {
for i in 0..LANES {
result_scalar[i] = value[i].clamp(min[i], max[i]);
}
+ let mut result_scalar_flush = [Scalar::default(); LANES];
+ for i in 0..LANES {
+ // Comparisons flush-to-zero, but return value selection is _not_ flushed.
+ let mut value = value[i];
+ if flush_in(value) < flush_in(min[i]) {
+ value = min[i];
+ }
+ if flush_in(value) > flush_in(max[i]) {
+ value = max[i];
+ }
+ result_scalar_flush[i] = value
+ }
let result_vector = Vector::from_array(value).simd_clamp(min.into(), max.into()).to_array();
- test_helpers::prop_assert_biteq!(result_scalar, result_vector);
+ test_helpers::prop_assert_biteq!(result_vector, result_scalar, result_scalar_flush);
Ok(())
})
}
diff --git a/library/portable-simd/crates/core_simd/tests/pointers.rs b/library/portable-simd/crates/core_simd/tests/pointers.rs
index 0ae8f83b8..b9f32d16e 100644
--- a/library/portable-simd/crates/core_simd/tests/pointers.rs
+++ b/library/portable-simd/crates/core_simd/tests/pointers.rs
@@ -1,6 +1,9 @@
-#![feature(portable_simd, strict_provenance)]
+#![feature(portable_simd, strict_provenance, exposed_provenance)]
-use core_simd::simd::{Simd, SimdConstPtr, SimdMutPtr};
+use core_simd::simd::{
+ ptr::{SimdConstPtr, SimdMutPtr},
+ Simd,
+};
macro_rules! common_tests {
{ $constness:ident } => {
diff --git a/library/portable-simd/crates/core_simd/tests/round.rs b/library/portable-simd/crates/core_simd/tests/round.rs
index aacf7bd3b..847766ec4 100644
--- a/library/portable-simd/crates/core_simd/tests/round.rs
+++ b/library/portable-simd/crates/core_simd/tests/round.rs
@@ -43,7 +43,7 @@ macro_rules! float_rounding_test {
}
fn fract<const LANES: usize>() {
- test_helpers::test_unary_elementwise(
+ test_helpers::test_unary_elementwise_flush_subnormals(
&Vector::<LANES>::fract,
&Scalar::fract,
&|_| true,
@@ -53,7 +53,7 @@ macro_rules! float_rounding_test {
test_helpers::test_lanes! {
fn to_int_unchecked<const LANES: usize>() {
- use core_simd::simd::SimdFloat;
+ use core_simd::simd::num::SimdFloat;
// The maximum integer that can be represented by the equivalently sized float has
// all of the mantissa digits set to 1, pushed up to the MSB.
const ALL_MANTISSA_BITS: IntScalar = ((1 << <Scalar>::MANTISSA_DIGITS) - 1);
diff --git a/library/portable-simd/crates/core_simd/tests/swizzle.rs b/library/portable-simd/crates/core_simd/tests/swizzle.rs
index 8cd7c33e8..522d71439 100644
--- a/library/portable-simd/crates/core_simd/tests/swizzle.rs
+++ b/library/portable-simd/crates/core_simd/tests/swizzle.rs
@@ -11,10 +11,10 @@ wasm_bindgen_test_configure!(run_in_browser);
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn swizzle() {
struct Index;
- impl Swizzle<4, 4> for Index {
+ impl Swizzle<4> for Index {
const INDEX: [usize; 4] = [2, 1, 3, 0];
}
- impl Swizzle<4, 2> for Index {
+ impl Swizzle<2> for Index {
const INDEX: [usize; 2] = [1, 1];
}
@@ -34,18 +34,18 @@ fn reverse() {
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rotate() {
let a = Simd::from_array([1, 2, 3, 4]);
- assert_eq!(a.rotate_lanes_left::<0>().to_array(), [1, 2, 3, 4]);
- assert_eq!(a.rotate_lanes_left::<1>().to_array(), [2, 3, 4, 1]);
- assert_eq!(a.rotate_lanes_left::<2>().to_array(), [3, 4, 1, 2]);
- assert_eq!(a.rotate_lanes_left::<3>().to_array(), [4, 1, 2, 3]);
- assert_eq!(a.rotate_lanes_left::<4>().to_array(), [1, 2, 3, 4]);
- assert_eq!(a.rotate_lanes_left::<5>().to_array(), [2, 3, 4, 1]);
- assert_eq!(a.rotate_lanes_right::<0>().to_array(), [1, 2, 3, 4]);
- assert_eq!(a.rotate_lanes_right::<1>().to_array(), [4, 1, 2, 3]);
- assert_eq!(a.rotate_lanes_right::<2>().to_array(), [3, 4, 1, 2]);
- assert_eq!(a.rotate_lanes_right::<3>().to_array(), [2, 3, 4, 1]);
- assert_eq!(a.rotate_lanes_right::<4>().to_array(), [1, 2, 3, 4]);
- assert_eq!(a.rotate_lanes_right::<5>().to_array(), [4, 1, 2, 3]);
+ assert_eq!(a.rotate_elements_left::<0>().to_array(), [1, 2, 3, 4]);
+ assert_eq!(a.rotate_elements_left::<1>().to_array(), [2, 3, 4, 1]);
+ assert_eq!(a.rotate_elements_left::<2>().to_array(), [3, 4, 1, 2]);
+ assert_eq!(a.rotate_elements_left::<3>().to_array(), [4, 1, 2, 3]);
+ assert_eq!(a.rotate_elements_left::<4>().to_array(), [1, 2, 3, 4]);
+ assert_eq!(a.rotate_elements_left::<5>().to_array(), [2, 3, 4, 1]);
+ assert_eq!(a.rotate_elements_right::<0>().to_array(), [1, 2, 3, 4]);
+ assert_eq!(a.rotate_elements_right::<1>().to_array(), [4, 1, 2, 3]);
+ assert_eq!(a.rotate_elements_right::<2>().to_array(), [3, 4, 1, 2]);
+ assert_eq!(a.rotate_elements_right::<3>().to_array(), [2, 3, 4, 1]);
+ assert_eq!(a.rotate_elements_right::<4>().to_array(), [1, 2, 3, 4]);
+ assert_eq!(a.rotate_elements_right::<5>().to_array(), [4, 1, 2, 3]);
}
#[test]
diff --git a/library/portable-simd/crates/core_simd/tests/swizzle_dyn.rs b/library/portable-simd/crates/core_simd/tests/swizzle_dyn.rs
index 646cd5f33..f21a937f0 100644
--- a/library/portable-simd/crates/core_simd/tests/swizzle_dyn.rs
+++ b/library/portable-simd/crates/core_simd/tests/swizzle_dyn.rs
@@ -1,6 +1,5 @@
#![feature(portable_simd)]
use core::{fmt, ops::RangeInclusive};
-use proptest;
use test_helpers::{self, biteq, make_runner, prop_assert_biteq};
fn swizzle_dyn_scalar_ver<const N: usize>(values: [u8; N], idxs: [u8; N]) -> [u8; N] {
diff --git a/library/portable-simd/crates/core_simd/tests/to_bytes.rs b/library/portable-simd/crates/core_simd/tests/to_bytes.rs
index be0ee4349..66a7981cd 100644
--- a/library/portable-simd/crates/core_simd/tests/to_bytes.rs
+++ b/library/portable-simd/crates/core_simd/tests/to_bytes.rs
@@ -1,14 +1,20 @@
-#![feature(portable_simd, generic_const_exprs, adt_const_params)]
-#![allow(incomplete_features)]
-#![cfg(feature = "generic_const_exprs")]
+#![feature(portable_simd)]
-use core_simd::simd::Simd;
+use core_simd::simd::{Simd, ToBytes};
#[test]
fn byte_convert() {
let int = Simd::<u32, 2>::from_array([0xdeadbeef, 0x8badf00d]);
- let bytes = int.to_ne_bytes();
- assert_eq!(int[0].to_ne_bytes(), bytes[..4]);
- assert_eq!(int[1].to_ne_bytes(), bytes[4..]);
- assert_eq!(Simd::<u32, 2>::from_ne_bytes(bytes), int);
+ let ne_bytes = int.to_ne_bytes();
+ let be_bytes = int.to_be_bytes();
+ let le_bytes = int.to_le_bytes();
+ assert_eq!(int[0].to_ne_bytes(), ne_bytes[..4]);
+ assert_eq!(int[1].to_ne_bytes(), ne_bytes[4..]);
+ assert_eq!(int[0].to_be_bytes(), be_bytes[..4]);
+ assert_eq!(int[1].to_be_bytes(), be_bytes[4..]);
+ assert_eq!(int[0].to_le_bytes(), le_bytes[..4]);
+ assert_eq!(int[1].to_le_bytes(), le_bytes[4..]);
+ assert_eq!(Simd::<u32, 2>::from_ne_bytes(ne_bytes), int);
+ assert_eq!(Simd::<u32, 2>::from_be_bytes(be_bytes), int);
+ assert_eq!(Simd::<u32, 2>::from_le_bytes(le_bytes), int);
}
diff --git a/library/portable-simd/crates/std_float/src/lib.rs b/library/portable-simd/crates/std_float/src/lib.rs
index 4ac60b10c..1fef17242 100644
--- a/library/portable-simd/crates/std_float/src/lib.rs
+++ b/library/portable-simd/crates/std_float/src/lib.rs
@@ -1,5 +1,10 @@
#![cfg_attr(feature = "as_crate", no_std)] // We are std!
-#![cfg_attr(feature = "as_crate", feature(platform_intrinsics), feature(portable_simd))]
+#![cfg_attr(
+ feature = "as_crate",
+ feature(platform_intrinsics),
+ feature(portable_simd),
+ allow(internal_features)
+)]
#[cfg(not(feature = "as_crate"))]
use core::simd;
#[cfg(feature = "as_crate")]
@@ -144,7 +149,7 @@ where
#[cfg(test)]
mod tests {
use super::*;
- use simd::*;
+ use simd::prelude::*;
#[test]
fn everything_works() {
diff --git a/library/portable-simd/crates/test_helpers/Cargo.toml b/library/portable-simd/crates/test_helpers/Cargo.toml
index 1d2bc8b51..23dae7c93 100644
--- a/library/portable-simd/crates/test_helpers/Cargo.toml
+++ b/library/portable-simd/crates/test_helpers/Cargo.toml
@@ -4,10 +4,8 @@ version = "0.1.0"
edition = "2021"
publish = false
-[dependencies.proptest]
-version = "0.10"
-default-features = false
-features = ["alloc"]
+[dependencies]
+proptest = { version = "0.10", default-features = false, features = ["alloc"] }
[features]
all_lane_counts = []
diff --git a/library/portable-simd/crates/test_helpers/src/biteq.rs b/library/portable-simd/crates/test_helpers/src/biteq.rs
index 7d91260d8..cbc20cda0 100644
--- a/library/portable-simd/crates/test_helpers/src/biteq.rs
+++ b/library/portable-simd/crates/test_helpers/src/biteq.rs
@@ -113,6 +113,27 @@ impl<T: BitEq> core::fmt::Debug for BitEqWrapper<'_, T> {
}
}
+#[doc(hidden)]
+pub struct BitEqEitherWrapper<'a, T>(pub &'a T, pub &'a T);
+
+impl<T: BitEq> PartialEq<BitEqEitherWrapper<'_, T>> for BitEqWrapper<'_, T> {
+ fn eq(&self, other: &BitEqEitherWrapper<'_, T>) -> bool {
+ self.0.biteq(other.0) || self.0.biteq(other.1)
+ }
+}
+
+impl<T: BitEq> core::fmt::Debug for BitEqEitherWrapper<'_, T> {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ if self.0.biteq(self.1) {
+ self.0.fmt(f)
+ } else {
+ self.0.fmt(f)?;
+ write!(f, " or ")?;
+ self.1.fmt(f)
+ }
+ }
+}
+
#[macro_export]
macro_rules! prop_assert_biteq {
{ $a:expr, $b:expr $(,)? } => {
@@ -122,5 +143,14 @@ macro_rules! prop_assert_biteq {
let b = $b;
proptest::prop_assert_eq!(BitEqWrapper(&a), BitEqWrapper(&b));
}
- }
+ };
+ { $a:expr, $b:expr, $c:expr $(,)? } => {
+ {
+ use $crate::biteq::{BitEqWrapper, BitEqEitherWrapper};
+ let a = $a;
+ let b = $b;
+ let c = $c;
+ proptest::prop_assert_eq!(BitEqWrapper(&a), BitEqEitherWrapper(&b, &c));
+ }
+ };
}
diff --git a/library/portable-simd/crates/test_helpers/src/lib.rs b/library/portable-simd/crates/test_helpers/src/lib.rs
index b26cdc311..b80c745aa 100644
--- a/library/portable-simd/crates/test_helpers/src/lib.rs
+++ b/library/portable-simd/crates/test_helpers/src/lib.rs
@@ -1,3 +1,5 @@
+#![feature(stdsimd, powerpc_target_feature)]
+
pub mod array;
#[cfg(target_arch = "wasm32")]
@@ -6,6 +8,9 @@ pub mod wasm;
#[macro_use]
pub mod biteq;
+pub mod subnormals;
+use subnormals::FlushSubnormals;
+
/// Specifies the default strategy for testing a type.
///
/// This strategy should be what "makes sense" to test.
@@ -151,7 +156,6 @@ pub fn test_3<
}
/// Test a unary vector function against a unary scalar function, applied elementwise.
-#[inline(never)]
pub fn test_unary_elementwise<Scalar, ScalarResult, Vector, VectorResult, const LANES: usize>(
fv: &dyn Fn(Vector) -> VectorResult,
fs: &dyn Fn(Scalar) -> ScalarResult,
@@ -178,6 +182,48 @@ pub fn test_unary_elementwise<Scalar, ScalarResult, Vector, VectorResult, const
}
/// Test a unary vector function against a unary scalar function, applied elementwise.
+///
+/// Where subnormals are flushed, use approximate equality.
+pub fn test_unary_elementwise_flush_subnormals<
+ Scalar,
+ ScalarResult,
+ Vector,
+ VectorResult,
+ const LANES: usize,
+>(
+ fv: &dyn Fn(Vector) -> VectorResult,
+ fs: &dyn Fn(Scalar) -> ScalarResult,
+ check: &dyn Fn([Scalar; LANES]) -> bool,
+) where
+ Scalar: Copy + core::fmt::Debug + DefaultStrategy + FlushSubnormals,
+ ScalarResult: Copy + biteq::BitEq + core::fmt::Debug + DefaultStrategy + FlushSubnormals,
+ Vector: Into<[Scalar; LANES]> + From<[Scalar; LANES]> + Copy,
+ VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
+{
+ let flush = |x: Scalar| subnormals::flush(fs(subnormals::flush_in(x)));
+ test_1(&|x: [Scalar; LANES]| {
+ proptest::prop_assume!(check(x));
+ let result_v: [ScalarResult; LANES] = fv(x.into()).into();
+ let result_s: [ScalarResult; LANES] = x
+ .iter()
+ .copied()
+ .map(fs)
+ .collect::<Vec<_>>()
+ .try_into()
+ .unwrap();
+ let result_sf: [ScalarResult; LANES] = x
+ .iter()
+ .copied()
+ .map(flush)
+ .collect::<Vec<_>>()
+ .try_into()
+ .unwrap();
+ crate::prop_assert_biteq!(result_v, result_s, result_sf);
+ Ok(())
+ });
+}
+
+/// Test a unary vector function against a unary scalar function, applied elementwise.
#[inline(never)]
pub fn test_unary_mask_elementwise<Scalar, Vector, Mask, const LANES: usize>(
fv: &dyn Fn(Vector) -> Mask,
@@ -204,7 +250,6 @@ pub fn test_unary_mask_elementwise<Scalar, Vector, Mask, const LANES: usize>(
}
/// Test a binary vector function against a binary scalar function, applied elementwise.
-#[inline(never)]
pub fn test_binary_elementwise<
Scalar1,
Scalar2,
@@ -241,6 +286,85 @@ pub fn test_binary_elementwise<
});
}
+/// Test a binary vector function against a binary scalar function, applied elementwise.
+///
+/// Where subnormals are flushed, use approximate equality.
+pub fn test_binary_elementwise_flush_subnormals<
+ Scalar1,
+ Scalar2,
+ ScalarResult,
+ Vector1,
+ Vector2,
+ VectorResult,
+ const LANES: usize,
+>(
+ fv: &dyn Fn(Vector1, Vector2) -> VectorResult,
+ fs: &dyn Fn(Scalar1, Scalar2) -> ScalarResult,
+ check: &dyn Fn([Scalar1; LANES], [Scalar2; LANES]) -> bool,
+) where
+ Scalar1: Copy + core::fmt::Debug + DefaultStrategy + FlushSubnormals,
+ Scalar2: Copy + core::fmt::Debug + DefaultStrategy + FlushSubnormals,
+ ScalarResult: Copy + biteq::BitEq + core::fmt::Debug + DefaultStrategy + FlushSubnormals,
+ Vector1: Into<[Scalar1; LANES]> + From<[Scalar1; LANES]> + Copy,
+ Vector2: Into<[Scalar2; LANES]> + From<[Scalar2; LANES]> + Copy,
+ VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
+{
+ let flush = |x: Scalar1, y: Scalar2| {
+ subnormals::flush(fs(subnormals::flush_in(x), subnormals::flush_in(y)))
+ };
+ test_2(&|x: [Scalar1; LANES], y: [Scalar2; LANES]| {
+ proptest::prop_assume!(check(x, y));
+ let result_v: [ScalarResult; LANES] = fv(x.into(), y.into()).into();
+ let result_s: [ScalarResult; LANES] = x
+ .iter()
+ .copied()
+ .zip(y.iter().copied())
+ .map(|(x, y)| fs(x, y))
+ .collect::<Vec<_>>()
+ .try_into()
+ .unwrap();
+ let result_sf: [ScalarResult; LANES] = x
+ .iter()
+ .copied()
+ .zip(y.iter().copied())
+ .map(|(x, y)| flush(x, y))
+ .collect::<Vec<_>>()
+ .try_into()
+ .unwrap();
+ crate::prop_assert_biteq!(result_v, result_s, result_sf);
+ Ok(())
+ });
+}
+
+/// Test a unary vector function against a unary scalar function, applied elementwise.
+#[inline(never)]
+pub fn test_binary_mask_elementwise<Scalar1, Scalar2, Vector1, Vector2, Mask, const LANES: usize>(
+ fv: &dyn Fn(Vector1, Vector2) -> Mask,
+ fs: &dyn Fn(Scalar1, Scalar2) -> bool,
+ check: &dyn Fn([Scalar1; LANES], [Scalar2; LANES]) -> bool,
+) where
+ Scalar1: Copy + core::fmt::Debug + DefaultStrategy,
+ Scalar2: Copy + core::fmt::Debug + DefaultStrategy,
+ Vector1: Into<[Scalar1; LANES]> + From<[Scalar1; LANES]> + Copy,
+ Vector2: Into<[Scalar2; LANES]> + From<[Scalar2; LANES]> + Copy,
+ Mask: Into<[bool; LANES]> + From<[bool; LANES]> + Copy,
+{
+ test_2(&|x: [Scalar1; LANES], y: [Scalar2; LANES]| {
+ proptest::prop_assume!(check(x, y));
+ let result_v: [bool; LANES] = fv(x.into(), y.into()).into();
+ let result_s: [bool; LANES] = x
+ .iter()
+ .copied()
+ .zip(y.iter().copied())
+ .map(|(x, y)| fs(x, y))
+ .collect::<Vec<_>>()
+ .try_into()
+ .unwrap();
+ crate::prop_assert_biteq!(result_v, result_s);
+ Ok(())
+ });
+}
+
/// Test a binary vector-scalar function against a binary scalar function, applied elementwise.
#[inline(never)]
pub fn test_binary_scalar_rhs_elementwise<
diff --git a/library/portable-simd/crates/test_helpers/src/subnormals.rs b/library/portable-simd/crates/test_helpers/src/subnormals.rs
new file mode 100644
index 000000000..ec0f1fb24
--- /dev/null
+++ b/library/portable-simd/crates/test_helpers/src/subnormals.rs
@@ -0,0 +1,91 @@
+pub trait FlushSubnormals: Sized {
+ fn flush(self) -> Self {
+ self
+ }
+}
+
+impl<T> FlushSubnormals for *const T {}
+impl<T> FlushSubnormals for *mut T {}
+
+macro_rules! impl_float {
+ { $($ty:ty),* } => {
+ $(
+ impl FlushSubnormals for $ty {
+ fn flush(self) -> Self {
+ let is_f32 = core::mem::size_of::<Self>() == 4;
+ let ppc_flush = is_f32 && cfg!(all(
+ any(target_arch = "powerpc", all(target_arch = "powerpc64", target_endian = "big")),
+ target_feature = "altivec",
+ not(target_feature = "vsx"),
+ ));
+ let arm_flush = is_f32 && cfg!(all(target_arch = "arm", target_feature = "neon"));
+ let flush = ppc_flush || arm_flush;
+ if flush && self.is_subnormal() {
+ <$ty>::copysign(0., self)
+ } else {
+ self
+ }
+ }
+ }
+ )*
+ }
+}
+
+macro_rules! impl_else {
+ { $($ty:ty),* } => {
+ $(
+ impl FlushSubnormals for $ty {}
+ )*
+ }
+}
+
+impl_float! { f32, f64 }
+impl_else! { i8, i16, i32, i64, isize, u8, u16, u32, u64, usize }
+
+/// AltiVec should flush subnormal inputs to zero, but QEMU seems to only flush outputs.
+/// https://gitlab.com/qemu-project/qemu/-/issues/1779
+#[cfg(all(
+ any(target_arch = "powerpc", target_arch = "powerpc64"),
+ target_feature = "altivec"
+))]
+fn in_buggy_qemu() -> bool {
+ use std::sync::OnceLock;
+ static BUGGY: OnceLock<bool> = OnceLock::new();
+
+ fn add(x: f32, y: f32) -> f32 {
+ #[cfg(target_arch = "powerpc")]
+ use core::arch::powerpc::*;
+ #[cfg(target_arch = "powerpc64")]
+ use core::arch::powerpc64::*;
+
+ let array: [f32; 4] =
+ unsafe { core::mem::transmute(vec_add(vec_splats(x), vec_splats(y))) };
+ array[0]
+ }
+
+ *BUGGY.get_or_init(|| add(-1.0857398e-38, 0.).is_sign_negative())
+}
+
+#[cfg(all(
+ any(target_arch = "powerpc", target_arch = "powerpc64"),
+ target_feature = "altivec"
+))]
+pub fn flush_in<T: FlushSubnormals>(x: T) -> T {
+ if in_buggy_qemu() {
+ x
+ } else {
+ x.flush()
+ }
+}
+
+#[cfg(not(all(
+ any(target_arch = "powerpc", target_arch = "powerpc64"),
+ target_feature = "altivec"
+)))]
+pub fn flush_in<T: FlushSubnormals>(x: T) -> T {
+ x.flush()
+}
+
+pub fn flush<T: FlushSubnormals>(x: T) -> T {
+ x.flush()
+}
diff --git a/library/proc_macro/src/bridge/mod.rs b/library/proc_macro/src/bridge/mod.rs
index 86ce5d9c6..75bf33297 100644
--- a/library/proc_macro/src/bridge/mod.rs
+++ b/library/proc_macro/src/bridge/mod.rs
@@ -55,6 +55,7 @@ macro_rules! with_api {
$m! {
FreeFunctions {
fn drop($self: $S::FreeFunctions);
+ fn injected_env_var(var: &str) -> Option<String>;
fn track_env_var(var: &str, value: Option<&str>);
fn track_path(path: &str);
fn literal_from_str(s: &str) -> Result<Literal<$S::Span, $S::Symbol>, ()>;
diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs
index 991fdb125..6e664a162 100644
--- a/library/proc_macro/src/lib.rs
+++ b/library/proc_macro/src/lib.rs
@@ -17,8 +17,8 @@
test(no_crate_inject, attr(deny(warnings))),
test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
)]
-#![cfg_attr(not(bootstrap), doc(rust_logo))]
-#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
+#![doc(rust_logo)]
+#![feature(rustdoc_internals)]
// This library is copied into rust-analyzer to allow loading rustc compiled proc macros.
// Please avoid unstable features where possible to minimize the amount of changes necessary
// to make it compile with rust-analyzer on stable.
@@ -925,13 +925,12 @@ impl !Sync for Punct {}
pub enum Spacing {
/// A `Punct` token can join with the following token to form a multi-character operator.
///
- /// In token streams constructed using proc macro interfaces `Joint` punctuation tokens can be
- /// followed by any other tokens. \
- /// However, in token streams parsed from source code compiler will only set spacing to `Joint`
- /// in the following cases:
- /// - A `Punct` is immediately followed by another `Punct` without a whitespace. \
- /// E.g. `+` is `Joint` in `+=` and `++`.
- /// - A single quote `'` is immediately followed by an identifier without a whitespace. \
+ /// In token streams constructed using proc macro interfaces, `Joint` punctuation tokens can be
+ /// followed by any other tokens. However, in token streams parsed from source code, the
+ /// compiler will only set spacing to `Joint` in the following cases.
+ /// - When a `Punct` is immediately followed by another `Punct` without a whitespace. E.g. `+`
+ /// is `Joint` in `+=` and `++`.
+ /// - When a single quote `'` is immediately followed by an identifier without a whitespace.
/// E.g. `'` is `Joint` in `'lifetime`.
///
/// This list may be extended in the future to enable more token combinations.
@@ -939,11 +938,10 @@ pub enum Spacing {
Joint,
/// A `Punct` token cannot join with the following token to form a multi-character operator.
///
- /// `Alone` punctuation tokens can be followed by any other tokens. \
- /// In token streams parsed from source code compiler will set spacing to `Alone` in all cases
- /// not covered by the conditions for `Joint` above. \
- /// E.g. `+` is `Alone` in `+ =`, `+ident` and `+()`.
- /// In particular, token not followed by anything will also be marked as `Alone`.
+ /// `Alone` punctuation tokens can be followed by any other tokens. In token streams parsed
+ /// from source code, the compiler will set spacing to `Alone` in all cases not covered by the
+ /// conditions for `Joint` above. E.g. `+` is `Alone` in `+ =`, `+ident` and `+()`. In
+ /// particular, tokens not followed by anything will be marked as `Alone`.
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
Alone,
}
@@ -978,8 +976,8 @@ impl Punct {
}
/// Returns the spacing of this punctuation character, indicating whether it can be potentially
- /// combined into a multi-character operator with the following token (`Joint`), or the operator
- /// has certainly ended (`Alone`).
+ /// combined into a multi-character operator with the following token (`Joint`), or whether the
+ /// operator has definitely ended (`Alone`).
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
pub fn spacing(&self) -> Spacing {
if self.0.joint { Spacing::Joint } else { Spacing::Alone }
@@ -1505,7 +1503,8 @@ pub mod tracked_env {
#[unstable(feature = "proc_macro_tracked_env", issue = "99515")]
pub fn var<K: AsRef<OsStr> + AsRef<str>>(key: K) -> Result<String, VarError> {
let key: &str = key.as_ref();
- let value = env::var(key);
+ let value = crate::bridge::client::FreeFunctions::injected_env_var(key)
+ .map_or_else(|| env::var(key), Ok);
crate::bridge::client::FreeFunctions::track_env_var(key, value.as_deref().ok());
value
}
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index f666b1888..fe66788b5 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -30,8 +30,13 @@ rustc-demangle = { version = "0.1.21", features = ['rustc-dep-of-std'] }
[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies]
miniz_oxide = { version = "0.7.0", optional = true, default-features = false }
addr2line = { version = "0.21.0", optional = true, default-features = false }
+
+[target.'cfg(all(not(target_os = "aix"), not(all(windows, target_env = "msvc", not(target_vendor = "uwp")))))'.dependencies]
object = { version = "0.32.0", default-features = false, optional = true, features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive'] }
+[target.'cfg(target_os = "aix")'.dependencies]
+object = { version = "0.32.0", default-features = false, optional = true, features = ['read_core', 'xcoff', 'unaligned', 'archive'] }
+
[dev-dependencies]
rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
rand_xorshift = "0.3.0"
diff --git a/library/std/build.rs b/library/std/build.rs
index ad0a82eab..0f5068b59 100644
--- a/library/std/build.rs
+++ b/library/std/build.rs
@@ -25,7 +25,6 @@ fn main() {
|| target.contains("vxworks")
|| target.contains("wasm32")
|| target.contains("wasm64")
- || target.contains("asmjs")
|| target.contains("espidf")
|| target.contains("solid")
|| target.contains("nintendo-3ds")
@@ -35,6 +34,7 @@ fn main() {
|| target.contains("xous")
|| target.contains("hurd")
|| target.contains("uefi")
+ || target.contains("teeos")
// See src/bootstrap/synthetic_targets.rs
|| env::var("RUSTC_BOOTSTRAP_SYNTHETIC_TARGET").is_ok()
{
diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs
index e7110aebd..9638f4919 100644
--- a/library/std/src/backtrace.rs
+++ b/library/std/src/backtrace.rs
@@ -95,8 +95,7 @@ use crate::fmt;
use crate::panic::UnwindSafe;
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use crate::sync::LazyLock;
-use crate::sys_common::backtrace::{lock, output_filename};
-use crate::vec::Vec;
+use crate::sys_common::backtrace::{lock, output_filename, set_image_base};
/// A captured OS thread stack backtrace.
///
@@ -327,6 +326,7 @@ impl Backtrace {
let _lock = lock();
let mut frames = Vec::new();
let mut actual_start = None;
+ set_image_base();
unsafe {
backtrace_rs::trace_unsynchronized(|frame| {
frames.push(BacktraceFrame {
diff --git a/library/std/src/backtrace/tests.rs b/library/std/src/backtrace/tests.rs
index 73543a3af..174d62813 100644
--- a/library/std/src/backtrace/tests.rs
+++ b/library/std/src/backtrace/tests.rs
@@ -1,5 +1,5 @@
use super::*;
-use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::panic::RefUnwindSafe;
fn generate_fake_frames() -> Vec<BacktraceFrame> {
vec![
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index 4d109285d..39e94902c 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -6,16 +6,13 @@ use self::Entry::*;
use hashbrown::hash_map as base;
use crate::borrow::Borrow;
-use crate::cell::Cell;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind;
use crate::error::Error;
use crate::fmt::{self, Debug};
-#[allow(deprecated)]
-use crate::hash::{BuildHasher, Hash, Hasher, SipHasher13};
+use crate::hash::{BuildHasher, Hash, RandomState};
use crate::iter::FusedIterator;
use crate::ops::Index;
-use crate::sys;
/// A [hash map] implemented with quadratic probing and SIMD lookup.
///
@@ -274,7 +271,7 @@ impl<K, V, S> HashMap<K, V, S> {
///
/// ```
/// use std::collections::HashMap;
- /// use std::collections::hash_map::RandomState;
+ /// use std::hash::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_hasher(s);
@@ -306,7 +303,7 @@ impl<K, V, S> HashMap<K, V, S> {
///
/// ```
/// use std::collections::HashMap;
- /// use std::collections::hash_map::RandomState;
+ /// use std::hash::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_capacity_and_hasher(10, s);
@@ -717,7 +714,7 @@ impl<K, V, S> HashMap<K, V, S> {
///
/// ```
/// use std::collections::HashMap;
- /// use std::collections::hash_map::RandomState;
+ /// use std::hash::RandomState;
///
/// let hasher = RandomState::new();
/// let map: HashMap<i32, i32> = HashMap::with_hasher(hasher);
@@ -3072,152 +3069,6 @@ where
}
}
-/// `RandomState` is the default state for [`HashMap`] types.
-///
-/// A particular instance `RandomState` will create the same instances of
-/// [`Hasher`], but the hashers created by two different `RandomState`
-/// instances are unlikely to produce the same result for the same values.
-///
-/// # Examples
-///
-/// ```
-/// use std::collections::HashMap;
-/// use std::collections::hash_map::RandomState;
-///
-/// let s = RandomState::new();
-/// let mut map = HashMap::with_hasher(s);
-/// map.insert(1, 2);
-/// ```
-#[derive(Clone)]
-#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
-pub struct RandomState {
- k0: u64,
- k1: u64,
-}
-
-impl RandomState {
- /// Constructs a new `RandomState` that is initialized with random keys.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::hash_map::RandomState;
- ///
- /// let s = RandomState::new();
- /// ```
- #[inline]
- #[allow(deprecated)]
- // rand
- #[must_use]
- #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
- pub fn new() -> RandomState {
- // Historically this function did not cache keys from the OS and instead
- // simply always called `rand::thread_rng().gen()` twice. In #31356 it
- // was discovered, however, that because we re-seed the thread-local RNG
- // from the OS periodically that this can cause excessive slowdown when
- // many hash maps are created on a thread. To solve this performance
- // trap we cache the first set of randomly generated keys per-thread.
- //
- // Later in #36481 it was discovered that exposing a deterministic
- // iteration order allows a form of DOS attack. To counter that we
- // increment one of the seeds on every RandomState creation, giving
- // every corresponding HashMap a different iteration order.
- thread_local!(static KEYS: Cell<(u64, u64)> = {
- Cell::new(sys::hashmap_random_keys())
- });
-
- KEYS.with(|keys| {
- let (k0, k1) = keys.get();
- keys.set((k0.wrapping_add(1), k1));
- RandomState { k0, k1 }
- })
- }
-}
-
-#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
-impl BuildHasher for RandomState {
- type Hasher = DefaultHasher;
- #[inline]
- #[allow(deprecated)]
- fn build_hasher(&self) -> DefaultHasher {
- DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
- }
-}
-
-/// The default [`Hasher`] used by [`RandomState`].
-///
-/// The internal algorithm is not specified, and so it and its hashes should
-/// not be relied upon over releases.
-#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
-#[allow(deprecated)]
-#[derive(Clone, Debug)]
-pub struct DefaultHasher(SipHasher13);
-
-impl DefaultHasher {
- /// Creates a new `DefaultHasher`.
- ///
- /// This hasher is not guaranteed to be the same as all other
- /// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
- /// instances created through `new` or `default`.
- #[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
- #[inline]
- #[allow(deprecated)]
- #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
- #[must_use]
- pub const fn new() -> DefaultHasher {
- DefaultHasher(SipHasher13::new_with_keys(0, 0))
- }
-}
-
-#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
-impl Default for DefaultHasher {
- /// Creates a new `DefaultHasher` using [`new`].
- /// See its documentation for more.
- ///
- /// [`new`]: DefaultHasher::new
- #[inline]
- fn default() -> DefaultHasher {
- DefaultHasher::new()
- }
-}
-
-#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
-impl Hasher for DefaultHasher {
- // The underlying `SipHasher13` doesn't override the other
- // `write_*` methods, so it's ok not to forward them here.
-
- #[inline]
- fn write(&mut self, msg: &[u8]) {
- self.0.write(msg)
- }
-
- #[inline]
- fn write_str(&mut self, s: &str) {
- self.0.write_str(s);
- }
-
- #[inline]
- fn finish(&self) -> u64 {
- self.0.finish()
- }
-}
-
-#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
-impl Default for RandomState {
- /// Constructs a new `RandomState`.
- #[inline]
- fn default() -> RandomState {
- RandomState::new()
- }
-}
-
-#[stable(feature = "std_debug", since = "1.16.0")]
-impl fmt::Debug for RandomState {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("RandomState").finish_non_exhaustive()
- }
-}
-
#[inline]
fn map_entry<'a, K: 'a, V: 'a>(raw: base::RustcEntry<'a, K, V>) -> Entry<'a, K, V> {
match raw {
diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs
index 91a3776e7..8585376ab 100644
--- a/library/std/src/collections/hash/map/tests.rs
+++ b/library/std/src/collections/hash/map/tests.rs
@@ -1,8 +1,8 @@
use super::Entry::{Occupied, Vacant};
use super::HashMap;
-use super::RandomState;
use crate::assert_matches::assert_matches;
use crate::cell::RefCell;
+use crate::hash::RandomState;
use crate::test_helpers::test_rng;
use rand::Rng;
use realstd::collections::TryReserveErrorKind::*;
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index 6a87f6e5f..8bc596082 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -6,11 +6,11 @@ use hashbrown::hash_set as base;
use crate::borrow::Borrow;
use crate::collections::TryReserveError;
use crate::fmt;
-use crate::hash::{BuildHasher, Hash};
+use crate::hash::{BuildHasher, Hash, RandomState};
use crate::iter::{Chain, FusedIterator};
use crate::ops::{BitAnd, BitOr, BitXor, Sub};
-use super::map::{map_try_reserve_error, RandomState};
+use super::map::map_try_reserve_error;
/// A [hash set] implemented as a `HashMap` where the value is `()`.
///
@@ -361,7 +361,7 @@ impl<T, S> HashSet<T, S> {
///
/// ```
/// use std::collections::HashSet;
- /// use std::collections::hash_map::RandomState;
+ /// use std::hash::RandomState;
///
/// let s = RandomState::new();
/// let mut set = HashSet::with_hasher(s);
@@ -393,7 +393,7 @@ impl<T, S> HashSet<T, S> {
///
/// ```
/// use std::collections::HashSet;
- /// use std::collections::hash_map::RandomState;
+ /// use std::hash::RandomState;
///
/// let s = RandomState::new();
/// let mut set = HashSet::with_capacity_and_hasher(10, s);
@@ -411,7 +411,7 @@ impl<T, S> HashSet<T, S> {
///
/// ```
/// use std::collections::HashSet;
- /// use std::collections::hash_map::RandomState;
+ /// use std::hash::RandomState;
///
/// let hasher = RandomState::new();
/// let set: HashSet<i32> = HashSet::with_hasher(hasher);
diff --git a/library/std/src/collections/hash/set/tests.rs b/library/std/src/collections/hash/set/tests.rs
index e0cd80b44..208f61e75 100644
--- a/library/std/src/collections/hash/set/tests.rs
+++ b/library/std/src/collections/hash/set/tests.rs
@@ -1,6 +1,6 @@
-use super::super::map::RandomState;
use super::HashSet;
+use crate::hash::RandomState;
use crate::panic::{catch_unwind, AssertUnwindSafe};
use crate::sync::atomic::{AtomicU32, Ordering};
use crate::sync::Arc;
diff --git a/library/std/src/collections/mod.rs b/library/std/src/collections/mod.rs
index 42f738acb..1389d24a8 100644
--- a/library/std/src/collections/mod.rs
+++ b/library/std/src/collections/mod.rs
@@ -439,6 +439,11 @@ pub mod hash_map {
//! A hash map implemented with quadratic probing and SIMD lookup.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::hash::map::*;
+
+ #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+ pub use crate::hash::random::DefaultHasher;
+ #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+ pub use crate::hash::random::RandomState;
}
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/std/src/env.rs b/library/std/src/env.rs
index f67f6034d..30ac05123 100644
--- a/library/std/src/env.rs
+++ b/library/std/src/env.rs
@@ -313,17 +313,32 @@ impl Error for VarError {
/// Sets the environment variable `key` to the value `value` for the currently running
/// process.
///
-/// Note that while concurrent access to environment variables is safe in Rust,
-/// some platforms only expose inherently unsafe non-threadsafe APIs for
-/// inspecting the environment. As a result, extra care needs to be taken when
-/// auditing calls to unsafe external FFI functions to ensure that any external
-/// environment accesses are properly synchronized with accesses in Rust.
+/// # Safety
+///
+/// Even though this function is currently not marked as `unsafe`, it needs to
+/// be because invoking it can cause undefined behaviour. The function will be
+/// marked `unsafe` in a future version of Rust. This is tracked in
+/// [rust#27970](https://github.com/rust-lang/rust/issues/27970).
+///
+/// This function is safe to call in a single-threaded program.
+///
+/// In multi-threaded programs, you must ensure that are no other threads
+/// concurrently writing or *reading*(!) from the environment through functions
+/// other than the ones in this module. You are responsible for figuring out
+/// how to achieve this, but we strongly suggest not using `set_var` or
+/// `remove_var` in multi-threaded programs at all.
+///
+/// Most C libraries, including libc itself do not advertise which functions
+/// read from the environment. Even functions from the Rust standard library do
+/// that, e.g. for DNS lookups from [`std::net::ToSocketAddrs`].
///
/// Discussion of this unsafety on Unix may be found in:
///
/// - [Austin Group Bugzilla](https://austingroupbugs.net/view.php?id=188)
/// - [GNU C library Bugzilla](https://sourceware.org/bugzilla/show_bug.cgi?id=15607#c2)
///
+/// [`std::net::ToSocketAddrs`]: crate::net::ToSocketAddrs
+///
/// # Panics
///
/// This function may panic if `key` is empty, contains an ASCII equals sign `'='`
@@ -351,17 +366,32 @@ fn _set_var(key: &OsStr, value: &OsStr) {
/// Removes an environment variable from the environment of the currently running process.
///
-/// Note that while concurrent access to environment variables is safe in Rust,
-/// some platforms only expose inherently unsafe non-threadsafe APIs for
-/// inspecting the environment. As a result extra care needs to be taken when
-/// auditing calls to unsafe external FFI functions to ensure that any external
-/// environment accesses are properly synchronized with accesses in Rust.
+/// # Safety
+///
+/// Even though this function is currently not marked as `unsafe`, it needs to
+/// be because invoking it can cause undefined behaviour. The function will be
+/// marked `unsafe` in a future version of Rust. This is tracked in
+/// [rust#27970](https://github.com/rust-lang/rust/issues/27970).
+///
+/// This function is safe to call in a single-threaded program.
+///
+/// In multi-threaded programs, you must ensure that are no other threads
+/// concurrently writing or *reading*(!) from the environment through functions
+/// other than the ones in this module. You are responsible for figuring out
+/// how to achieve this, but we strongly suggest not using `set_var` or
+/// `remove_var` in multi-threaded programs at all.
+///
+/// Most C libraries, including libc itself do not advertise which functions
+/// read from the environment. Even functions from the Rust standard library do
+/// that, e.g. for DNS lookups from [`std::net::ToSocketAddrs`].
///
/// Discussion of this unsafety on Unix may be found in:
///
/// - [Austin Group Bugzilla](https://austingroupbugs.net/view.php?id=188)
/// - [GNU C library Bugzilla](https://sourceware.org/bugzilla/show_bug.cgi?id=15607#c2)
///
+/// [`std::net::ToSocketAddrs`]: crate::net::ToSocketAddrs
+///
/// # Panics
///
/// This function may panic if `key` is empty, contains an ASCII equals sign
diff --git a/library/std/src/env/tests.rs b/library/std/src/env/tests.rs
index 558692295..fc7aee297 100644
--- a/library/std/src/env/tests.rs
+++ b/library/std/src/env/tests.rs
@@ -1,7 +1,5 @@
use super::*;
-use crate::path::Path;
-
#[test]
#[cfg_attr(any(target_os = "emscripten", target_env = "sgx"), ignore)]
fn test_self_exe_path() {
diff --git a/library/std/src/error.rs b/library/std/src/error.rs
index 375ff2d24..b240e4e2c 100644
--- a/library/std/src/error.rs
+++ b/library/std/src/error.rs
@@ -12,14 +12,6 @@ pub use core::error::Error;
#[unstable(feature = "error_generic_member_access", issue = "99301")]
pub use core::error::{request_ref, request_value, Request};
-mod private {
- // This is a hack to prevent `type_id` from being overridden by `Error`
- // implementations, since that can enable unsound downcasting.
- #[unstable(feature = "error_type_id", issue = "60784")]
- #[derive(Debug)]
- pub struct Internal;
-}
-
/// An error reporter that prints an error and its sources.
///
/// Report also exposes configuration options for formatting the error sources, either entirely on a
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
index fa9d48771..819731821 100644
--- a/library/std/src/ffi/os_str.rs
+++ b/library/std/src/ffi/os_str.rs
@@ -6,9 +6,10 @@ use crate::cmp;
use crate::collections::TryReserveError;
use crate::fmt;
use crate::hash::{Hash, Hasher};
-use crate::ops;
+use crate::ops::{self, Range};
use crate::rc::Rc;
-use crate::str::FromStr;
+use crate::slice;
+use crate::str::{from_utf8 as str_from_utf8, FromStr};
use crate::sync::Arc;
use crate::sys::os_str::{Buf, Slice};
@@ -963,6 +964,83 @@ impl OsStr {
self.inner.as_encoded_bytes()
}
+ /// Takes a substring based on a range that corresponds to the return value of
+ /// [`OsStr::as_encoded_bytes`].
+ ///
+ /// The range's start and end must lie on valid `OsStr` boundaries.
+ /// A valid `OsStr` boundary is one of:
+ /// - The start of the string
+ /// - The end of the string
+ /// - Immediately before a valid non-empty UTF-8 substring
+ /// - Immediately after a valid non-empty UTF-8 substring
+ ///
+ /// # Panics
+ ///
+ /// Panics if `range` does not lie on valid `OsStr` boundaries or if it
+ /// exceeds the end of the string.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(os_str_slice)]
+ ///
+ /// use std::ffi::OsStr;
+ ///
+ /// let os_str = OsStr::new("foo=bar");
+ /// let bytes = os_str.as_encoded_bytes();
+ /// if let Some(index) = bytes.iter().position(|b| *b == b'=') {
+ /// let key = os_str.slice_encoded_bytes(..index);
+ /// let value = os_str.slice_encoded_bytes(index + 1..);
+ /// assert_eq!(key, "foo");
+ /// assert_eq!(value, "bar");
+ /// }
+ /// ```
+ #[unstable(feature = "os_str_slice", issue = "118485")]
+ pub fn slice_encoded_bytes<R: ops::RangeBounds<usize>>(&self, range: R) -> &Self {
+ #[track_caller]
+ fn check_valid_boundary(bytes: &[u8], index: usize) {
+ if index == 0 || index == bytes.len() {
+ return;
+ }
+
+ // Fast path
+ if bytes[index - 1].is_ascii() || bytes[index].is_ascii() {
+ return;
+ }
+
+ let (before, after) = bytes.split_at(index);
+
+ // UTF-8 takes at most 4 bytes per codepoint, so we don't
+ // need to check more than that.
+ let after = after.get(..4).unwrap_or(after);
+ match str_from_utf8(after) {
+ Ok(_) => return,
+ Err(err) if err.valid_up_to() != 0 => return,
+ Err(_) => (),
+ }
+
+ for len in 2..=4.min(index) {
+ let before = &before[index - len..];
+ if str_from_utf8(before).is_ok() {
+ return;
+ }
+ }
+
+ panic!("byte index {index} is not an OsStr boundary");
+ }
+
+ let encoded_bytes = self.as_encoded_bytes();
+ let Range { start, end } = slice::range(range, ..encoded_bytes.len());
+ check_valid_boundary(encoded_bytes, start);
+ check_valid_boundary(encoded_bytes, end);
+
+ // SAFETY: `slice::range` ensures that `start` and `end` are valid
+ let slice = unsafe { encoded_bytes.get_unchecked(start..end) };
+
+ // SAFETY: `slice` comes from `self` and we validated the boundaries
+ unsafe { Self::from_encoded_bytes_unchecked(slice) }
+ }
+
/// Converts this string to its ASCII lower case equivalent in-place.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
diff --git a/library/std/src/ffi/os_str/tests.rs b/library/std/src/ffi/os_str/tests.rs
index d7926749a..60cde376d 100644
--- a/library/std/src/ffi/os_str/tests.rs
+++ b/library/std/src/ffi/os_str/tests.rs
@@ -1,8 +1,4 @@
use super::*;
-use crate::sys_common::{AsInner, IntoInner};
-
-use crate::rc::Rc;
-use crate::sync::Arc;
#[test]
fn test_os_string_with_capacity() {
@@ -177,3 +173,53 @@ fn into_rc() {
assert_eq!(&*rc2, os_str);
assert_eq!(&*arc2, os_str);
}
+
+#[test]
+fn slice_encoded_bytes() {
+ let os_str = OsStr::new("123θგ🦀");
+ // ASCII
+ let digits = os_str.slice_encoded_bytes(..3);
+ assert_eq!(digits, "123");
+ let three = os_str.slice_encoded_bytes(2..3);
+ assert_eq!(three, "3");
+ // 2-byte UTF-8
+ let theta = os_str.slice_encoded_bytes(3..5);
+ assert_eq!(theta, "θ");
+ // 3-byte UTF-8
+ let gani = os_str.slice_encoded_bytes(5..8);
+ assert_eq!(gani, "გ");
+ // 4-byte UTF-8
+ let crab = os_str.slice_encoded_bytes(8..);
+ assert_eq!(crab, "🦀");
+}
+
+#[test]
+#[should_panic(expected = "byte index 2 is not an OsStr boundary")]
+fn slice_mid_char() {
+ let crab = OsStr::new("🦀");
+ let _ = crab.slice_encoded_bytes(..2);
+}
+
+#[cfg(windows)]
+#[test]
+#[should_panic(expected = "byte index 3 is not an OsStr boundary")]
+fn slice_between_surrogates() {
+ use crate::os::windows::ffi::OsStringExt;
+
+ let os_string = OsString::from_wide(&[0xD800, 0xD800]);
+ assert_eq!(os_string.as_encoded_bytes(), &[0xED, 0xA0, 0x80, 0xED, 0xA0, 0x80]);
+ let _ = os_string.slice_encoded_bytes(..3);
+}
+
+#[cfg(windows)]
+#[test]
+fn slice_surrogate_edge() {
+ use crate::os::windows::ffi::OsStringExt;
+
+ let os_string = OsString::from_wide(&[0xD800]);
+ let mut with_crab = os_string.clone();
+ with_crab.push("🦀");
+
+ assert_eq!(with_crab.slice_encoded_bytes(..3), os_string);
+ assert_eq!(with_crab.slice_encoded_bytes(3..), "🦀");
+}
diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs
index 547a7b705..12afdef26 100644
--- a/library/std/src/fs/tests.rs
+++ b/library/std/src/fs/tests.rs
@@ -22,7 +22,7 @@ use crate::os::unix::fs::symlink as symlink_file;
#[cfg(unix)]
use crate::os::unix::fs::symlink as symlink_junction;
#[cfg(windows)]
-use crate::os::windows::fs::{symlink_dir, symlink_file};
+use crate::os::windows::fs::{symlink_dir, symlink_file, OpenOptionsExt};
#[cfg(windows)]
use crate::sys::fs::symlink_junction;
#[cfg(target_os = "macos")]
@@ -74,7 +74,7 @@ macro_rules! error_contains {
// tests most of the time, but at least we do if the user has the right
// permissions.
pub fn got_symlink_permission(tmpdir: &TempDir) -> bool {
- if cfg!(unix) {
+ if cfg!(not(windows)) || env::var_os("CI").is_some() {
return true;
}
let link = tmpdir.join("some_hopefully_unique_link_name");
@@ -1793,3 +1793,28 @@ fn windows_unix_socket_exists() {
assert_eq!(socket_path.try_exists().unwrap(), true);
assert_eq!(socket_path.metadata().is_ok(), true);
}
+
+#[cfg(windows)]
+#[test]
+fn test_hidden_file_truncation() {
+ // Make sure that File::create works on an existing hidden file. See #115745.
+ let tmpdir = tmpdir();
+ let path = tmpdir.join("hidden_file.txt");
+
+ // Create a hidden file.
+ const FILE_ATTRIBUTE_HIDDEN: u32 = 2;
+ let mut file = OpenOptions::new()
+ .write(true)
+ .create_new(true)
+ .attributes(FILE_ATTRIBUTE_HIDDEN)
+ .open(&path)
+ .unwrap();
+ file.write("hidden world!".as_bytes()).unwrap();
+ file.flush().unwrap();
+ drop(file);
+
+ // Create a new file by truncating the existing one.
+ let file = File::create(&path).unwrap();
+ let metadata = file.metadata().unwrap();
+ assert_eq!(metadata.len(), 0);
+}
diff --git a/library/std/src/hash/mod.rs b/library/std/src/hash/mod.rs
new file mode 100644
index 000000000..e5ef9e335
--- /dev/null
+++ b/library/std/src/hash/mod.rs
@@ -0,0 +1,91 @@
+//! Generic hashing support.
+//!
+//! This module provides a generic way to compute the [hash] of a value.
+//! Hashes are most commonly used with [`HashMap`] and [`HashSet`].
+//!
+//! [hash]: https://en.wikipedia.org/wiki/Hash_function
+//! [`HashMap`]: ../../std/collections/struct.HashMap.html
+//! [`HashSet`]: ../../std/collections/struct.HashSet.html
+//!
+//! The simplest way to make a type hashable is to use `#[derive(Hash)]`:
+//!
+//! # Examples
+//!
+//! ```rust
+//! use std::hash::{DefaultHasher, Hash, Hasher};
+//!
+//! #[derive(Hash)]
+//! struct Person {
+//! id: u32,
+//! name: String,
+//! phone: u64,
+//! }
+//!
+//! let person1 = Person {
+//! id: 5,
+//! name: "Janet".to_string(),
+//! phone: 555_666_7777,
+//! };
+//! let person2 = Person {
+//! id: 5,
+//! name: "Bob".to_string(),
+//! phone: 555_666_7777,
+//! };
+//!
+//! assert!(calculate_hash(&person1) != calculate_hash(&person2));
+//!
+//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
+//! let mut s = DefaultHasher::new();
+//! t.hash(&mut s);
+//! s.finish()
+//! }
+//! ```
+//!
+//! If you need more control over how a value is hashed, you need to implement
+//! the [`Hash`] trait:
+//!
+//! ```rust
+//! use std::hash::{DefaultHasher, Hash, Hasher};
+//!
+//! struct Person {
+//! id: u32,
+//! # #[allow(dead_code)]
+//! name: String,
+//! phone: u64,
+//! }
+//!
+//! impl Hash for Person {
+//! fn hash<H: Hasher>(&self, state: &mut H) {
+//! self.id.hash(state);
+//! self.phone.hash(state);
+//! }
+//! }
+//!
+//! let person1 = Person {
+//! id: 5,
+//! name: "Janet".to_string(),
+//! phone: 555_666_7777,
+//! };
+//! let person2 = Person {
+//! id: 5,
+//! name: "Bob".to_string(),
+//! phone: 555_666_7777,
+//! };
+//!
+//! assert_eq!(calculate_hash(&person1), calculate_hash(&person2));
+//!
+//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
+//! let mut s = DefaultHasher::new();
+//! t.hash(&mut s);
+//! s.finish()
+//! }
+//! ```
+#![stable(feature = "rust1", since = "1.0.0")]
+
+pub(crate) mod random;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::hash::*;
+
+#[stable(feature = "std_hash_exports", since = "1.76.0")]
+pub use self::random::{DefaultHasher, RandomState};
diff --git a/library/std/src/hash/random.rs b/library/std/src/hash/random.rs
new file mode 100644
index 000000000..a1ccbb253
--- /dev/null
+++ b/library/std/src/hash/random.rs
@@ -0,0 +1,161 @@
+//! This module exists to isolate [`RandomState`] and [`DefaultHasher`] outside of the
+//! [`collections`] module without actually publicly exporting them, so that parts of that
+//! implementation can more easily be moved to the [`alloc`] crate.
+//!
+//! Although its items are public and contain stability attributes, they can't actually be accessed
+//! outside this crate.
+//!
+//! [`collections`]: crate::collections
+#[allow(deprecated)]
+use super::{BuildHasher, Hasher, SipHasher13};
+use crate::cell::Cell;
+use crate::fmt;
+use crate::sys;
+
+/// `RandomState` is the default state for [`HashMap`] types.
+///
+/// A particular instance `RandomState` will create the same instances of
+/// [`Hasher`], but the hashers created by two different `RandomState`
+/// instances are unlikely to produce the same result for the same values.
+///
+/// [`HashMap`]: crate::collections::HashMap
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashMap;
+/// use std::hash::RandomState;
+///
+/// let s = RandomState::new();
+/// let mut map = HashMap::with_hasher(s);
+/// map.insert(1, 2);
+/// ```
+#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+#[derive(Clone)]
+pub struct RandomState {
+ k0: u64,
+ k1: u64,
+}
+
+impl RandomState {
+ /// Constructs a new `RandomState` that is initialized with random keys.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::hash::RandomState;
+ ///
+ /// let s = RandomState::new();
+ /// ```
+ #[inline]
+ #[allow(deprecated)]
+ // rand
+ #[must_use]
+ #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+ pub fn new() -> RandomState {
+ // Historically this function did not cache keys from the OS and instead
+ // simply always called `rand::thread_rng().gen()` twice. In #31356 it
+ // was discovered, however, that because we re-seed the thread-local RNG
+ // from the OS periodically that this can cause excessive slowdown when
+ // many hash maps are created on a thread. To solve this performance
+ // trap we cache the first set of randomly generated keys per-thread.
+ //
+ // Later in #36481 it was discovered that exposing a deterministic
+ // iteration order allows a form of DOS attack. To counter that we
+ // increment one of the seeds on every RandomState creation, giving
+ // every corresponding HashMap a different iteration order.
+ thread_local!(static KEYS: Cell<(u64, u64)> = {
+ Cell::new(sys::hashmap_random_keys())
+ });
+
+ KEYS.with(|keys| {
+ let (k0, k1) = keys.get();
+ keys.set((k0.wrapping_add(1), k1));
+ RandomState { k0, k1 }
+ })
+ }
+}
+
+#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+impl BuildHasher for RandomState {
+ type Hasher = DefaultHasher;
+ #[inline]
+ #[allow(deprecated)]
+ fn build_hasher(&self) -> DefaultHasher {
+ DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
+ }
+}
+
+/// The default [`Hasher`] used by [`RandomState`].
+///
+/// The internal algorithm is not specified, and so it and its hashes should
+/// not be relied upon over releases.
+#[allow(deprecated)]
+#[derive(Clone, Debug)]
+#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+pub struct DefaultHasher(SipHasher13);
+
+impl DefaultHasher {
+ /// Creates a new `DefaultHasher`.
+ ///
+ /// This hasher is not guaranteed to be the same as all other
+ /// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
+ /// instances created through `new` or `default`.
+ #[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+ #[inline]
+ #[allow(deprecated)]
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ #[must_use]
+ pub const fn new() -> DefaultHasher {
+ DefaultHasher(SipHasher13::new_with_keys(0, 0))
+ }
+}
+
+#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+impl Default for DefaultHasher {
+ /// Creates a new `DefaultHasher` using [`new`].
+ /// See its documentation for more.
+ ///
+ /// [`new`]: DefaultHasher::new
+ #[inline]
+ fn default() -> DefaultHasher {
+ DefaultHasher::new()
+ }
+}
+
+#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+impl Hasher for DefaultHasher {
+ // The underlying `SipHasher13` doesn't override the other
+ // `write_*` methods, so it's ok not to forward them here.
+
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.0.write(msg)
+ }
+
+ #[inline]
+ fn write_str(&mut self, s: &str) {
+ self.0.write_str(s);
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.0.finish()
+ }
+}
+
+#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+impl Default for RandomState {
+ /// Constructs a new `RandomState`.
+ #[inline]
+ fn default() -> RandomState {
+ RandomState::new()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for RandomState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RandomState").finish_non_exhaustive()
+ }
+}
diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs
index 55aafc3db..6c7494a6a 100644
--- a/library/std/src/io/buffered/bufreader.rs
+++ b/library/std/src/io/buffered/bufreader.rs
@@ -507,6 +507,16 @@ impl<R: ?Sized + Seek> Seek for BufReader<R> {
)
})
}
+
+ /// Seeks relative to the current position.
+ ///
+ /// If the new position lies within the buffer, the buffer will not be
+ /// flushed, allowing for more efficient seeks. This method does not return
+ /// the location of the underlying reader, so the caller must track this
+ /// information themselves if it is required.
+ fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
+ self.seek_relative(offset)
+ }
}
impl<T: ?Sized> SizeHint for BufReader<T> {
diff --git a/library/std/src/io/copy.rs b/library/std/src/io/copy.rs
index 4d51a719f..d49866345 100644
--- a/library/std/src/io/copy.rs
+++ b/library/std/src/io/copy.rs
@@ -1,7 +1,6 @@
use super::{BorrowedBuf, BufReader, BufWriter, Read, Result, Write, DEFAULT_BUF_SIZE};
use crate::alloc::Allocator;
use crate::cmp;
-use crate::cmp::min;
use crate::collections::VecDeque;
use crate::io::IoSlice;
use crate::mem::MaybeUninit;
@@ -256,79 +255,17 @@ impl<I: Write + ?Sized> BufferedWriterSpec for BufWriter<I> {
}
}
-impl<A: Allocator> BufferedWriterSpec for Vec<u8, A> {
+impl BufferedWriterSpec for Vec<u8> {
fn buffer_size(&self) -> usize {
cmp::max(DEFAULT_BUF_SIZE, self.capacity() - self.len())
}
fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
- let mut bytes = 0;
-
- // avoid inflating empty/small vecs before we have determined that there's anything to read
- if self.capacity() < DEFAULT_BUF_SIZE {
- let stack_read_limit = DEFAULT_BUF_SIZE as u64;
- bytes = stack_buffer_copy(&mut reader.take(stack_read_limit), self)?;
- // fewer bytes than requested -> EOF reached
- if bytes < stack_read_limit {
- return Ok(bytes);
- }
- }
-
- // don't immediately offer the vec's whole spare capacity, otherwise
- // we might have to fully initialize it if the reader doesn't have a custom read_buf() impl
- let mut max_read_size = DEFAULT_BUF_SIZE;
-
- loop {
- self.reserve(DEFAULT_BUF_SIZE);
- let mut initialized_spare_capacity = 0;
-
- loop {
- let buf = self.spare_capacity_mut();
- let read_size = min(max_read_size, buf.len());
- let mut buf = BorrowedBuf::from(&mut buf[..read_size]);
- // SAFETY: init is either 0 or the init_len from the previous iteration.
- unsafe {
- buf.set_init(initialized_spare_capacity);
- }
- match reader.read_buf(buf.unfilled()) {
- Ok(()) => {
- let bytes_read = buf.len();
-
- // EOF
- if bytes_read == 0 {
- return Ok(bytes);
- }
-
- // the reader is returning short reads but it doesn't call ensure_init()
- if buf.init_len() < buf.capacity() {
- max_read_size = usize::MAX;
- }
- // the reader hasn't returned short reads so far
- if bytes_read == buf.capacity() {
- max_read_size *= 2;
- }
-
- initialized_spare_capacity = buf.init_len() - bytes_read;
- bytes += bytes_read as u64;
- // SAFETY: BorrowedBuf guarantees all of its filled bytes are init
- // and the number of read bytes can't exceed the spare capacity since
- // that's what the buffer is borrowing from.
- unsafe { self.set_len(self.len() + bytes_read) };
-
- // spare capacity full, reserve more
- if self.len() == self.capacity() {
- break;
- }
- }
- Err(e) if e.is_interrupted() => continue,
- Err(e) => return Err(e),
- }
- }
- }
+ reader.read_to_end(self).map(|bytes| u64::try_from(bytes).expect("usize overflowed u64"))
}
}
-fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
+pub fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
reader: &mut R,
writer: &mut W,
) -> Result<u64> {
diff --git a/library/std/src/io/copy/tests.rs b/library/std/src/io/copy/tests.rs
index af137eaf8..a1f909a3c 100644
--- a/library/std/src/io/copy/tests.rs
+++ b/library/std/src/io/copy/tests.rs
@@ -82,13 +82,16 @@ fn copy_specializes_bufreader() {
#[test]
fn copy_specializes_to_vec() {
- let cap = 123456;
- let mut source = ShortReader { cap, observed_buffer: 0, read_size: 1337 };
+ let cap = DEFAULT_BUF_SIZE * 10;
+ let mut source = ShortReader { cap, observed_buffer: 0, read_size: DEFAULT_BUF_SIZE };
let mut sink = Vec::new();
- assert_eq!(cap as u64, io::copy(&mut source, &mut sink).unwrap());
+ let copied = io::copy(&mut source, &mut sink).unwrap();
+ assert_eq!(cap as u64, copied);
+ assert_eq!(sink.len() as u64, copied);
assert!(
source.observed_buffer > DEFAULT_BUF_SIZE,
- "expected a large buffer to be provided to the reader"
+ "expected a large buffer to be provided to the reader, got {}",
+ source.observed_buffer
);
}
diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs
index 6e7366b36..db1756597 100644
--- a/library/std/src/io/error/repr_bitpacked.rs
+++ b/library/std/src/io/error/repr_bitpacked.rs
@@ -103,7 +103,6 @@
//! the time.
use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage};
-use alloc::boxed::Box;
use core::marker::PhantomData;
use core::mem::{align_of, size_of};
use core::ptr::{self, NonNull};
diff --git a/library/std/src/io/error/repr_unpacked.rs b/library/std/src/io/error/repr_unpacked.rs
index 093fde337..dc8a95577 100644
--- a/library/std/src/io/error/repr_unpacked.rs
+++ b/library/std/src/io/error/repr_unpacked.rs
@@ -3,7 +3,6 @@
//! would have no benefit.
use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage};
-use alloc::boxed::Box;
type Inner = ErrorData<Box<Custom>>;
diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs
index 7d70a0bac..e3aa97374 100644
--- a/library/std/src/io/mod.rs
+++ b/library/std/src/io/mod.rs
@@ -397,12 +397,16 @@ where
}
}
-// This uses an adaptive system to extend the vector when it fills. We want to
-// avoid paying to allocate and zero a huge chunk of memory if the reader only
-// has 4 bytes while still making large reads if the reader does have a ton
-// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every
-// time is 4,500 times (!) slower than a default reservation size of 32 if the
-// reader has a very small amount of data to return.
+// Here we must serve many masters with conflicting goals:
+//
+// - avoid allocating unless necessary
+// - avoid overallocating if we know the exact size (#89165)
+// - avoid passing large buffers to readers that always initialize the free capacity if they perform short reads (#23815, #23820)
+// - pass large buffers to readers that do not initialize the spare capacity. this can amortize per-call overheads
+// - and finally pass not-too-small and not-too-large buffers to Windows read APIs because they manage to suffer from both problems
+// at the same time, i.e. small reads suffer from syscall overhead, all reads incur initialization cost
+// proportional to buffer size (#110650)
+//
pub(crate) fn default_read_to_end<R: Read + ?Sized>(
r: &mut R,
buf: &mut Vec<u8>,
@@ -412,20 +416,58 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(
let start_cap = buf.capacity();
// Optionally limit the maximum bytes read on each iteration.
// This adds an arbitrary fiddle factor to allow for more data than we expect.
- let max_read_size =
- size_hint.and_then(|s| s.checked_add(1024)?.checked_next_multiple_of(DEFAULT_BUF_SIZE));
+ let mut max_read_size = size_hint
+ .and_then(|s| s.checked_add(1024)?.checked_next_multiple_of(DEFAULT_BUF_SIZE))
+ .unwrap_or(DEFAULT_BUF_SIZE);
let mut initialized = 0; // Extra initialized bytes from previous loop iteration
+
+ const PROBE_SIZE: usize = 32;
+
+ fn small_probe_read<R: Read + ?Sized>(r: &mut R, buf: &mut Vec<u8>) -> Result<usize> {
+ let mut probe = [0u8; PROBE_SIZE];
+
+ loop {
+ match r.read(&mut probe) {
+ Ok(n) => {
+ buf.extend_from_slice(&probe[..n]);
+ return Ok(n);
+ }
+ Err(ref e) if e.is_interrupted() => continue,
+ Err(e) => return Err(e),
+ }
+ }
+ }
+
+ // avoid inflating empty/small vecs before we have determined that there's anything to read
+ if (size_hint.is_none() || size_hint == Some(0)) && buf.capacity() - buf.len() < PROBE_SIZE {
+ let read = small_probe_read(r, buf)?;
+
+ if read == 0 {
+ return Ok(0);
+ }
+ }
+
loop {
+ if buf.len() == buf.capacity() && buf.capacity() == start_cap {
+ // The buffer might be an exact fit. Let's read into a probe buffer
+ // and see if it returns `Ok(0)`. If so, we've avoided an
+ // unnecessary doubling of the capacity. But if not, append the
+ // probe buffer to the primary buffer and let its capacity grow.
+ let read = small_probe_read(r, buf)?;
+
+ if read == 0 {
+ return Ok(buf.len() - start_len);
+ }
+ }
+
if buf.len() == buf.capacity() {
- buf.reserve(32); // buf is full, need more space
+ buf.reserve(PROBE_SIZE); // buf is full, need more space
}
let mut spare = buf.spare_capacity_mut();
- if let Some(size) = max_read_size {
- let len = cmp::min(spare.len(), size);
- spare = &mut spare[..len]
- }
+ let buf_len = cmp::min(spare.len(), max_read_size);
+ spare = &mut spare[..buf_len];
let mut read_buf: BorrowedBuf<'_> = spare.into();
// SAFETY: These bytes were initialized but not filled in the previous loop
@@ -434,42 +476,44 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(
}
let mut cursor = read_buf.unfilled();
- match r.read_buf(cursor.reborrow()) {
- Ok(()) => {}
- Err(e) if e.is_interrupted() => continue,
- Err(e) => return Err(e),
+ loop {
+ match r.read_buf(cursor.reborrow()) {
+ Ok(()) => break,
+ Err(e) if e.is_interrupted() => continue,
+ Err(e) => return Err(e),
+ }
}
- if cursor.written() == 0 {
+ let unfilled_but_initialized = cursor.init_ref().len();
+ let bytes_read = cursor.written();
+ let was_fully_initialized = read_buf.init_len() == buf_len;
+
+ if bytes_read == 0 {
return Ok(buf.len() - start_len);
}
// store how much was initialized but not filled
- initialized = cursor.init_ref().len();
+ initialized = unfilled_but_initialized;
// SAFETY: BorrowedBuf's invariants mean this much memory is initialized.
unsafe {
- let new_len = read_buf.filled().len() + buf.len();
+ let new_len = bytes_read + buf.len();
buf.set_len(new_len);
}
- if buf.len() == buf.capacity() && buf.capacity() == start_cap {
- // The buffer might be an exact fit. Let's read into a probe buffer
- // and see if it returns `Ok(0)`. If so, we've avoided an
- // unnecessary doubling of the capacity. But if not, append the
- // probe buffer to the primary buffer and let its capacity grow.
- let mut probe = [0u8; 32];
-
- loop {
- match r.read(&mut probe) {
- Ok(0) => return Ok(buf.len() - start_len),
- Ok(n) => {
- buf.extend_from_slice(&probe[..n]);
- break;
- }
- Err(ref e) if e.is_interrupted() => continue,
- Err(e) => return Err(e),
- }
+ // Use heuristics to determine the max read size if no initial size hint was provided
+ if size_hint.is_none() {
+ // The reader is returning short reads but it doesn't call ensure_init().
+ // In that case we no longer need to restrict read sizes to avoid
+ // initialization costs.
+ if !was_fully_initialized {
+ max_read_size = usize::MAX;
+ }
+
+ // we have passed a larger buffer than previously and the
+ // reader still hasn't returned a short read
+ if buf_len >= max_read_size && bytes_read == buf_len {
+ max_read_size = max_read_size.saturating_mul(2);
}
}
}
@@ -556,6 +600,10 @@ where
/// therefore, using something that implements [`BufRead`], such as
/// [`BufReader`], will be more efficient.
///
+/// Repeated calls to the reader use the same cursor, so for example
+/// calling `read_to_end` twice on a [`File`] will only return the file's
+/// contents once. It's recommended to first call `rewind()` in that case.
+///
/// # Examples
///
/// [`File`]s implement `Read`:
@@ -1957,6 +2005,36 @@ pub trait Seek {
fn stream_position(&mut self) -> Result<u64> {
self.seek(SeekFrom::Current(0))
}
+
+ /// Seeks relative to the current position.
+ ///
+ /// This is equivalent to `self.seek(SeekFrom::Current(offset))` but
+ /// doesn't return the new position which can allow some implementations
+ /// such as [`BufReader`] to perform more efficient seeks.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// #![feature(seek_seek_relative)]
+ /// use std::{
+ /// io::{self, Seek},
+ /// fs::File,
+ /// };
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ /// f.seek_relative(10)?;
+ /// assert_eq!(f.stream_position()?, 10);
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`BufReader`]: crate::io::BufReader
+ #[unstable(feature = "seek_seek_relative", issue = "117374")]
+ fn seek_relative(&mut self, offset: i64) -> Result<()> {
+ self.seek(SeekFrom::Current(offset))?;
+ Ok(())
+ }
}
/// Enumeration of possible methods to seek within an I/O object.
@@ -2014,6 +2092,28 @@ fn read_until<R: BufRead + ?Sized>(r: &mut R, delim: u8, buf: &mut Vec<u8>) -> R
}
}
+fn skip_until<R: BufRead + ?Sized>(r: &mut R, delim: u8) -> Result<usize> {
+ let mut read = 0;
+ loop {
+ let (done, used) = {
+ let available = match r.fill_buf() {
+ Ok(n) => n,
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ };
+ match memchr::memchr(delim, available) {
+ Some(i) => (true, i + 1),
+ None => (false, available.len()),
+ }
+ };
+ r.consume(used);
+ read += used;
+ if done || used == 0 {
+ return Ok(read);
+ }
+ }
+}
+
/// A `BufRead` is a type of `Read`er which has an internal buffer, allowing it
/// to perform extra ways of reading.
///
@@ -2217,6 +2317,68 @@ pub trait BufRead: Read {
read_until(self, byte, buf)
}
+ /// Skip all bytes until the delimiter `byte` or EOF is reached.
+ ///
+ /// This function will read (and discard) bytes from the underlying stream until the
+ /// delimiter or EOF is found.
+ ///
+ /// If successful, this function will return the total number of bytes read,
+ /// including the delimiter byte.
+ ///
+ /// This is useful for efficiently skipping data such as NUL-terminated strings
+ /// in binary file formats without buffering.
+ ///
+ /// This function is blocking and should be used carefully: it is possible for
+ /// an attacker to continuously send bytes without ever sending the delimiter
+ /// or EOF.
+ ///
+ /// # Errors
+ ///
+ /// This function will ignore all instances of [`ErrorKind::Interrupted`] and
+ /// will otherwise return any errors returned by [`fill_buf`].
+ ///
+ /// If an I/O error is encountered then all bytes read so far will be
+ /// present in `buf` and its length will have been adjusted appropriately.
+ ///
+ /// [`fill_buf`]: BufRead::fill_buf
+ ///
+ /// # Examples
+ ///
+ /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
+ /// this example, we use [`Cursor`] to read some NUL-terminated information
+ /// about Ferris from a binary string, skipping the fun fact:
+ ///
+ /// ```
+ /// #![feature(bufread_skip_until)]
+ ///
+ /// use std::io::{self, BufRead};
+ ///
+ /// let mut cursor = io::Cursor::new(b"Ferris\0Likes long walks on the beach\0Crustacean\0");
+ ///
+ /// // read name
+ /// let mut name = Vec::new();
+ /// let num_bytes = cursor.read_until(b'\0', &mut name)
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 7);
+ /// assert_eq!(name, b"Ferris\0");
+ ///
+ /// // skip fun fact
+ /// let num_bytes = cursor.skip_until(b'\0')
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 30);
+ ///
+ /// // read animal type
+ /// let mut animal = Vec::new();
+ /// let num_bytes = cursor.read_until(b'\0', &mut animal)
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 11);
+ /// assert_eq!(animal, b"Crustacean\0");
+ /// ```
+ #[unstable(feature = "bufread_skip_until", issue = "111735")]
+ fn skip_until(&mut self, byte: u8) -> Result<usize> {
+ skip_until(self, byte)
+ }
+
/// Read all bytes until a newline (the `0xA` byte) is reached, and append
/// them to the provided `String` buffer.
///
diff --git a/library/std/src/io/tests.rs b/library/std/src/io/tests.rs
index 6d30f5e6c..bda5b721a 100644
--- a/library/std/src/io/tests.rs
+++ b/library/std/src/io/tests.rs
@@ -26,6 +26,36 @@ fn read_until() {
}
#[test]
+fn skip_until() {
+ let bytes: &[u8] = b"read\0ignore\0read\0ignore\0read\0ignore\0";
+ let mut reader = BufReader::new(bytes);
+
+ // read from the bytes, alternating between
+ // consuming `read\0`s and skipping `ignore\0`s
+ loop {
+ // consume `read\0`
+ let mut out = Vec::new();
+ let read = reader.read_until(0, &mut out).unwrap();
+ if read == 0 {
+ // eof
+ break;
+ } else {
+ assert_eq!(out, b"read\0");
+ assert_eq!(read, b"read\0".len());
+ }
+
+ // skip past `ignore\0`
+ let skipped = reader.skip_until(0).unwrap();
+ assert_eq!(skipped, b"ignore\0".len());
+ }
+
+ // ensure we are at the end of the byte slice and that we can skip no further
+ // also ensure skip_until matches the behavior of read_until at EOF
+ let skipped = reader.skip_until(0).unwrap();
+ assert_eq!(skipped, 0);
+}
+
+#[test]
fn split() {
let buf = Cursor::new(&b"12"[..]);
let mut s = buf.split(b'3');
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 425890122..34b381b6c 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -227,7 +227,7 @@
test(no_crate_inject, attr(deny(warnings))),
test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
)]
-#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![doc(rust_logo)]
#![doc(cfg_hide(
not(test),
not(any(test, bootstrap)),
@@ -308,6 +308,7 @@
//
// Library features (core):
// tidy-alphabetical-start
+#![feature(c_str_literals)]
#![feature(char_internals)]
#![feature(core_intrinsics)]
#![feature(core_io_borrowed_buf)]
@@ -317,6 +318,7 @@
#![feature(error_iter)]
#![feature(exact_size_is_empty)]
#![feature(exclusive_wrapper)]
+#![feature(exposed_provenance)]
#![feature(extend_one)]
#![feature(float_gamma)]
#![feature(float_minimum_maximum)]
@@ -340,6 +342,7 @@
#![feature(round_ties_even)]
#![feature(slice_internals)]
#![feature(slice_ptr_get)]
+#![feature(slice_range)]
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
@@ -494,8 +497,6 @@ pub use core::convert;
pub use core::default;
#[stable(feature = "futures_api", since = "1.36.0")]
pub use core::future;
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::hash;
#[stable(feature = "core_hint", since = "1.27.0")]
pub use core::hint;
#[stable(feature = "i128", since = "1.26.0")]
@@ -565,6 +566,7 @@ pub mod env;
pub mod error;
pub mod ffi;
pub mod fs;
+pub mod hash;
pub mod io;
pub mod net;
pub mod num;
@@ -583,9 +585,10 @@ pub mod time;
#[unstable(feature = "portable_simd", issue = "86656")]
mod std_float;
-#[doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
#[unstable(feature = "portable_simd", issue = "86656")]
pub mod simd {
+ #![doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
+
#[doc(inline)]
pub use crate::std_float::StdFloat;
#[doc(inline)]
@@ -716,7 +719,7 @@ pub(crate) mod test_helpers {
#[track_caller]
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use core::hash::{BuildHasher, Hash, Hasher};
- let mut hasher = crate::collections::hash_map::RandomState::new().build_hasher();
+ let mut hasher = crate::hash::RandomState::new().build_hasher();
core::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>();
diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs
index 34b8b6b97..58df83bd7 100644
--- a/library/std/src/macros.rs
+++ b/library/std/src/macros.rs
@@ -355,15 +355,15 @@ macro_rules! dbg {
// `$val` expression could be a block (`{ .. }`), in which case the `eprintln!`
// will be malformed.
() => {
- $crate::eprintln!("[{}:{}]", $crate::file!(), $crate::line!())
+ $crate::eprintln!("[{}:{}:{}]", $crate::file!(), $crate::line!(), $crate::column!())
};
($val:expr $(,)?) => {
// Use of `match` here is intentional because it affects the lifetimes
// of temporaries - https://stackoverflow.com/a/48732525/1063961
match $val {
tmp => {
- $crate::eprintln!("[{}:{}] {} = {:#?}",
- $crate::file!(), $crate::line!(), $crate::stringify!($val), &tmp);
+ $crate::eprintln!("[{}:{}:{}] {} = {:#?}",
+ $crate::file!(), $crate::line!(), $crate::column!(), $crate::stringify!($val), &tmp);
tmp
}
}
diff --git a/library/std/src/net/tcp/tests.rs b/library/std/src/net/tcp/tests.rs
index db367cfa0..b24b851a6 100644
--- a/library/std/src/net/tcp/tests.rs
+++ b/library/std/src/net/tcp/tests.rs
@@ -1,6 +1,6 @@
use crate::fmt;
use crate::io::prelude::*;
-use crate::io::{BorrowedBuf, ErrorKind, IoSlice, IoSliceMut};
+use crate::io::{BorrowedBuf, IoSlice, IoSliceMut};
use crate::mem::MaybeUninit;
use crate::net::test::{next_test_ip4, next_test_ip6};
use crate::net::*;
diff --git a/library/std/src/net/udp/tests.rs b/library/std/src/net/udp/tests.rs
index 892fe2ba8..0cf993664 100644
--- a/library/std/src/net/udp/tests.rs
+++ b/library/std/src/net/udp/tests.rs
@@ -1,4 +1,3 @@
-use crate::io::ErrorKind;
use crate::net::test::{next_test_ip4, next_test_ip6};
use crate::net::*;
use crate::sync::mpsc::channel;
diff --git a/library/std/src/os/l4re/raw.rs b/library/std/src/os/l4re/raw.rs
index 12c029328..8fb6e99ec 100644
--- a/library/std/src/os/l4re/raw.rs
+++ b/library/std/src/os/l4re/raw.rs
@@ -31,7 +31,6 @@ pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
target_arch = "powerpc",
target_arch = "sparc",
target_arch = "arm",
- target_arch = "asmjs",
target_arch = "wasm32"
))]
mod arch {
diff --git a/library/std/src/os/linux/process.rs b/library/std/src/os/linux/process.rs
index 2b3ff76d7..51af432d0 100644
--- a/library/std/src/os/linux/process.rs
+++ b/library/std/src/os/linux/process.rs
@@ -152,6 +152,12 @@ pub trait CommandExt: Sealed {
/// in a guaranteed race-free manner (e.g. if the `clone3` system call
/// is supported). Otherwise, [`pidfd`] will return an error.
///
+ /// If a pidfd has been successfully created and not been taken from the `Child`
+ /// then calls to `kill()`, `wait()` and `try_wait()` will use the pidfd
+ /// instead of the pid. This can prevent pid recycling races, e.g.
+ /// those caused by rogue libraries in the same process prematurely reaping
+ /// zombie children via `waitpid(-1, ...)` calls.
+ ///
/// [`Command`]: process::Command
/// [`Child`]: process::Child
/// [`pidfd`]: fn@ChildExt::pidfd
diff --git a/library/std/src/os/linux/raw.rs b/library/std/src/os/linux/raw.rs
index a568f9b26..c29dd62bc 100644
--- a/library/std/src/os/linux/raw.rs
+++ b/library/std/src/os/linux/raw.rs
@@ -31,7 +31,6 @@ pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
target_arch = "powerpc",
target_arch = "sparc",
target_arch = "arm",
- target_arch = "asmjs",
target_arch = "wasm32"
))]
mod arch {
diff --git a/library/std/src/os/solid/io.rs b/library/std/src/os/solid/io.rs
index f82034663..19b4fe220 100644
--- a/library/std/src/os/solid/io.rs
+++ b/library/std/src/os/solid/io.rs
@@ -1,8 +1,55 @@
//! SOLID-specific extensions to general I/O primitives
+//!
+//! Just like raw pointers, raw SOLID Sockets file descriptors point to
+//! resources with dynamic lifetimes, and they can dangle if they outlive their
+//! resources or be forged if they're created from invalid values.
+//!
+//! This module provides three types for representing raw file descriptors
+//! with different ownership properties: raw, borrowed, and owned, which are
+//! analogous to types used for representing pointers:
+//!
+//! | Type | Analogous to |
+//! | ------------------ | ------------ |
+//! | [`RawFd`] | `*const _` |
+//! | [`BorrowedFd<'a>`] | `&'a _` |
+//! | [`OwnedFd`] | `Box<_>` |
+//!
+//! Like raw pointers, `RawFd` values are primitive values. And in new code,
+//! they should be considered unsafe to do I/O on (analogous to dereferencing
+//! them). Rust did not always provide this guidance, so existing code in the
+//! Rust ecosystem often doesn't mark `RawFd` usage as unsafe. Once the
+//! `io_safety` feature is stable, libraries will be encouraged to migrate,
+//! either by adding `unsafe` to APIs that dereference `RawFd` values, or by
+//! using to `BorrowedFd` or `OwnedFd` instead.
+//!
+//! Like references, `BorrowedFd` values are tied to a lifetime, to ensure
+//! that they don't outlive the resource they point to. These are safe to
+//! use. `BorrowedFd` values may be used in APIs which provide safe access to
+//! any system call except for:
+//!
+//! - `close`, because that would end the dynamic lifetime of the resource
+//! without ending the lifetime of the file descriptor.
+//!
+//! - `dup2`/`dup3`, in the second argument, because this argument is
+//! closed and assigned a new resource, which may break the assumptions
+//! other code using that file descriptor.
+//!
+//! `BorrowedFd` values may be used in APIs which provide safe access to `dup`
+//! system calls, so types implementing `AsFd` or `From<OwnedFd>` should not
+//! assume they always have exclusive access to the underlying file
+//! description.
+//!
+//! Like boxes, `OwnedFd` values conceptually own the resource they point to,
+//! and free (close) it when they are dropped.
+//!
+//! [`BorrowedFd<'a>`]: crate::os::solid::io::BorrowedFd
#![deny(unsafe_op_in_unsafe_fn)]
#![unstable(feature = "solid_ext", issue = "none")]
+use crate::fmt;
+use crate::marker::PhantomData;
+use crate::mem::forget;
use crate::net;
use crate::sys;
use crate::sys_common::{self, AsInner, FromInner, IntoInner};
@@ -10,6 +57,253 @@ use crate::sys_common::{self, AsInner, FromInner, IntoInner};
/// Raw file descriptors.
pub type RawFd = i32;
+/// A borrowed SOLID Sockets file descriptor.
+///
+/// This has a lifetime parameter to tie it to the lifetime of something that
+/// owns the socket.
+///
+/// This uses `repr(transparent)` and has the representation of a host file
+/// descriptor, so it can be used in FFI in places where a socket is passed as
+/// an argument, it is not captured or consumed, and it never has the value
+/// `SOLID_NET_INVALID_FD`.
+///
+/// This type's `.to_owned()` implementation returns another `BorrowedFd`
+/// rather than an `OwnedFd`. It just makes a trivial copy of the raw
+/// socket, which is then borrowed under the same lifetime.
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+// This is -2, in two's complement. -1 is `SOLID_NET_INVALID_FD`.
+#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
+#[rustc_nonnull_optimization_guaranteed]
+pub struct BorrowedFd<'socket> {
+ fd: RawFd,
+ _phantom: PhantomData<&'socket OwnedFd>,
+}
+
+/// An owned SOLID Sockets file descriptor.
+///
+/// This closes the file descriptor on drop.
+///
+/// This uses `repr(transparent)` and has the representation of a host file
+/// descriptor, so it can be used in FFI in places where a socket is passed as
+/// an argument, it is not captured or consumed, and it never has the value
+/// `SOLID_NET_INVALID_FD`.
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+// This is -2, in two's complement. -1 is `SOLID_NET_INVALID_FD`.
+#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
+#[rustc_nonnull_optimization_guaranteed]
+pub struct OwnedFd {
+ fd: RawFd,
+}
+
+impl BorrowedFd<'_> {
+ /// Return a `BorrowedFd` holding the given raw file descriptor.
+ ///
+ /// # Safety
+ ///
+ /// The resource pointed to by `fd` must remain open for the duration of
+ /// the returned `BorrowedFd`, and it must not have the value
+ /// `SOLID_NET_INVALID_FD`.
+ #[inline]
+ pub const unsafe fn borrow_raw(fd: RawFd) -> Self {
+ assert!(fd != -1 as RawFd);
+ // SAFETY: we just asserted that the value is in the valid range and
+ // isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
+ unsafe { Self { fd, _phantom: PhantomData } }
+ }
+}
+
+impl OwnedFd {
+ /// Creates a new `OwnedFd` instance that shares the same underlying file
+ /// description as the existing `OwnedFd` instance.
+ pub fn try_clone(&self) -> crate::io::Result<Self> {
+ self.as_fd().try_clone_to_owned()
+ }
+}
+
+impl BorrowedFd<'_> {
+ /// Creates a new `OwnedFd` instance that shares the same underlying file
+ /// description as the existing `BorrowedFd` instance.
+ pub fn try_clone_to_owned(&self) -> crate::io::Result<OwnedFd> {
+ let fd = sys::net::cvt(unsafe { sys::net::netc::dup(self.as_raw_fd()) })?;
+ Ok(unsafe { OwnedFd::from_raw_fd(fd) })
+ }
+}
+
+impl AsRawFd for BorrowedFd<'_> {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd
+ }
+}
+
+impl AsRawFd for OwnedFd {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd
+ }
+}
+
+impl IntoRawFd for OwnedFd {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ let fd = self.fd;
+ forget(self);
+ fd
+ }
+}
+
+impl FromRawFd for OwnedFd {
+ /// Constructs a new instance of `Self` from the given raw file descriptor.
+ ///
+ /// # Safety
+ ///
+ /// The resource pointed to by `fd` must be open and suitable for assuming
+ /// ownership. The resource must not require any cleanup other than `close`.
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> Self {
+ assert_ne!(fd, -1 as RawFd);
+ // SAFETY: we just asserted that the value is in the valid range and
+ // isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
+ unsafe { Self { fd } }
+ }
+}
+
+impl Drop for OwnedFd {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe { sys::net::netc::close(self.fd) };
+ }
+}
+
+impl fmt::Debug for BorrowedFd<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BorrowedFd").field("fd", &self.fd).finish()
+ }
+}
+
+impl fmt::Debug for OwnedFd {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OwnedFd").field("fd", &self.fd).finish()
+ }
+}
+
+macro_rules! impl_is_terminal {
+ ($($t:ty),*$(,)?) => {$(
+ #[unstable(feature = "sealed", issue = "none")]
+ impl crate::sealed::Sealed for $t {}
+
+ #[stable(feature = "is_terminal", since = "1.70.0")]
+ impl crate::io::IsTerminal for $t {
+ #[inline]
+ fn is_terminal(&self) -> bool {
+ crate::sys::io::is_terminal(self)
+ }
+ }
+ )*}
+}
+
+impl_is_terminal!(BorrowedFd<'_>, OwnedFd);
+
+/// A trait to borrow the SOLID Sockets file descriptor from an underlying
+/// object.
+pub trait AsFd {
+ /// Borrows the file descriptor.
+ fn as_fd(&self) -> BorrowedFd<'_>;
+}
+
+impl<T: AsFd> AsFd for &T {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ T::as_fd(self)
+ }
+}
+
+impl<T: AsFd> AsFd for &mut T {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ T::as_fd(self)
+ }
+}
+
+impl AsFd for BorrowedFd<'_> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ *self
+ }
+}
+
+impl AsFd for OwnedFd {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ // Safety: `OwnedFd` and `BorrowedFd` have the same validity
+ // invariants, and the `BorrowedFd` is bounded by the lifetime
+ // of `&self`.
+ unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
+ }
+}
+
+macro_rules! impl_owned_fd_traits {
+ ($($t:ident)*) => {$(
+ impl AsFd for net::$t {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().socket().as_fd()
+ }
+ }
+
+ impl From<net::$t> for OwnedFd {
+ #[inline]
+ fn from(socket: net::$t) -> OwnedFd {
+ socket.into_inner().into_socket().into_inner()
+ }
+ }
+
+ impl From<OwnedFd> for net::$t {
+ #[inline]
+ fn from(owned_fd: OwnedFd) -> Self {
+ Self::from_inner(FromInner::from_inner(FromInner::from_inner(owned_fd)))
+ }
+ }
+ )*};
+}
+impl_owned_fd_traits! { TcpStream TcpListener UdpSocket }
+
+/// This impl allows implementing traits that require `AsFd` on Arc.
+/// ```
+/// # #[cfg(target_os = "solid_asp3")] mod group_cfg {
+/// # use std::os::solid::io::AsFd;
+/// use std::net::UdpSocket;
+/// use std::sync::Arc;
+///
+/// trait MyTrait: AsFd {}
+/// impl MyTrait for Arc<UdpSocket> {}
+/// impl MyTrait for Box<UdpSocket> {}
+/// # }
+/// ```
+impl<T: AsFd> AsFd for crate::sync::Arc<T> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ (**self).as_fd()
+ }
+}
+
+impl<T: AsFd> AsFd for crate::rc::Rc<T> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ (**self).as_fd()
+ }
+}
+
+impl<T: AsFd> AsFd for Box<T> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ (**self).as_fd()
+ }
+}
+
/// A trait to extract the raw SOLID Sockets file descriptor from an underlying
/// object.
pub trait AsRawFd {
@@ -84,7 +378,7 @@ macro_rules! impl_as_raw_fd {
impl AsRawFd for net::$t {
#[inline]
fn as_raw_fd(&self) -> RawFd {
- *self.as_inner().socket().as_inner()
+ self.as_inner().socket().as_raw_fd()
}
}
)*};
@@ -97,7 +391,7 @@ macro_rules! impl_from_raw_fd {
impl FromRawFd for net::$t {
#[inline]
unsafe fn from_raw_fd(fd: RawFd) -> net::$t {
- let socket = sys::net::Socket::from_inner(fd);
+ let socket = unsafe { sys::net::Socket::from_raw_fd(fd) };
net::$t::from_inner(sys_common::net::$t::from_inner(socket))
}
}
@@ -111,7 +405,7 @@ macro_rules! impl_into_raw_fd {
impl IntoRawFd for net::$t {
#[inline]
fn into_raw_fd(self) -> RawFd {
- self.into_inner().into_socket().into_inner()
+ self.into_inner().into_socket().into_raw_fd()
}
}
)*};
diff --git a/library/std/src/os/solid/mod.rs b/library/std/src/os/solid/mod.rs
index 4328ba7c3..0bb83c73d 100644
--- a/library/std/src/os/solid/mod.rs
+++ b/library/std/src/os/solid/mod.rs
@@ -13,5 +13,5 @@ pub mod prelude {
pub use super::ffi::{OsStrExt, OsStringExt};
#[doc(no_inline)]
#[stable(feature = "rust1", since = "1.0.0")]
- pub use super::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+ pub use super::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
}
diff --git a/library/std/src/os/windows/io/handle.rs b/library/std/src/os/windows/io/handle.rs
index 274af08a3..b0540872c 100644
--- a/library/std/src/os/windows/io/handle.rs
+++ b/library/std/src/os/windows/io/handle.rs
@@ -504,7 +504,7 @@ impl AsHandle for fs::File {
impl From<fs::File> for OwnedHandle {
#[inline]
fn from(file: fs::File) -> OwnedHandle {
- file.into_inner().into_inner().into_inner().into()
+ file.into_inner().into_inner().into_inner()
}
}
diff --git a/library/std/src/os/windows/io/socket.rs b/library/std/src/os/windows/io/socket.rs
index c80b9e284..65f161f32 100644
--- a/library/std/src/os/windows/io/socket.rs
+++ b/library/std/src/os/windows/io/socket.rs
@@ -127,7 +127,7 @@ impl BorrowedSocket<'_> {
info.iAddressFamily,
info.iSocketType,
info.iProtocol,
- &mut info,
+ &info,
0,
sys::c::WSA_FLAG_OVERLAPPED | sys::c::WSA_FLAG_NO_HANDLE_INHERIT,
)
@@ -147,7 +147,7 @@ impl BorrowedSocket<'_> {
info.iAddressFamily,
info.iSocketType,
info.iProtocol,
- &mut info,
+ &info,
0,
sys::c::WSA_FLAG_OVERLAPPED,
)
diff --git a/library/std/src/os/windows/process.rs b/library/std/src/os/windows/process.rs
index d00e79476..5bf0154ea 100644
--- a/library/std/src/os/windows/process.rs
+++ b/library/std/src/os/windows/process.rs
@@ -347,7 +347,7 @@ impl ChildExt for process::Child {
///
/// This trait is sealed: it cannot be implemented outside the standard library.
/// This is so that future additional methods are not breaking changes.
-#[unstable(feature = "windows_process_exit_code_from", issue = "none")]
+#[unstable(feature = "windows_process_exit_code_from", issue = "111688")]
pub trait ExitCodeExt: Sealed {
/// Creates a new `ExitCode` from the raw underlying `u32` return value of
/// a process.
@@ -355,11 +355,11 @@ pub trait ExitCodeExt: Sealed {
/// The exit code should not be 259, as this conflicts with the `STILL_ACTIVE`
/// macro returned from the `GetExitCodeProcess` function to signal that the
/// process has yet to run to completion.
- #[unstable(feature = "windows_process_exit_code_from", issue = "none")]
+ #[unstable(feature = "windows_process_exit_code_from", issue = "111688")]
fn from_raw(raw: u32) -> Self;
}
-#[unstable(feature = "windows_process_exit_code_from", issue = "none")]
+#[unstable(feature = "windows_process_exit_code_from", issue = "111688")]
impl ExitCodeExt for process::ExitCode {
fn from_raw(raw: u32) -> Self {
process::ExitCode::from_inner(From::from(raw))
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index 55f4917a9..66b4ec37c 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -588,7 +588,7 @@ pub fn panicking() -> bool {
}
/// Entry point of panics from the core crate (`panic_impl` lang item).
-#[cfg(not(test))]
+#[cfg(not(any(test, doctest)))]
#[panic_handler]
pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
struct FormatStringPayload<'a> {
@@ -669,7 +669,7 @@ pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
/// panic!() and assert!(). In particular, this is the only entry point that supports
/// arbitrary payloads, not just format strings.
#[unstable(feature = "libstd_sys_internals", reason = "used by the panic! macro", issue = "none")]
-#[cfg_attr(not(test), lang = "begin_panic")]
+#[cfg_attr(not(any(test, doctest)), lang = "begin_panic")]
// lang item for CTFE panic support
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
diff --git a/library/std/src/path/tests.rs b/library/std/src/path/tests.rs
index f12ffbf2e..fde6ed4f0 100644
--- a/library/std/src/path/tests.rs
+++ b/library/std/src/path/tests.rs
@@ -1,10 +1,7 @@
use super::*;
-use crate::collections::hash_map::DefaultHasher;
use crate::collections::{BTreeSet, HashSet};
-use crate::hash::Hasher;
-use crate::rc::Rc;
-use crate::sync::Arc;
+use crate::hash::DefaultHasher;
use core::hint::black_box;
#[allow(unknown_lints, unused_macro_rules)]
@@ -1461,8 +1458,7 @@ fn test_eq_receivers() {
#[test]
pub fn test_compare() {
- use crate::collections::hash_map::DefaultHasher;
- use crate::hash::{Hash, Hasher};
+ use crate::hash::{DefaultHasher, Hash, Hasher};
fn hash<T: Hash>(t: T) -> u64 {
let mut s = DefaultHasher::new();
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
index af6bef1a7..4a7f5d8e0 100644
--- a/library/std/src/process.rs
+++ b/library/std/src/process.rs
@@ -1108,7 +1108,7 @@ impl fmt::Debug for Command {
///
/// The default format approximates a shell invocation of the program along with its
/// arguments. It does not include most of the other command properties. The output is not guaranteed to work
- /// (e.g. due to lack of shell-escaping or differences in path resolution)
+ /// (e.g. due to lack of shell-escaping or differences in path resolution).
/// On some platforms you can use [the alternate syntax] to show more fields.
///
/// Note that the debug implementation is platform-specific.
@@ -2311,7 +2311,7 @@ pub fn id() -> u32 {
/// of the `main` function, this trait is likely to be available only on
/// standard library's runtime for convenience. Other runtimes are not required
/// to provide similar functionality.
-#[cfg_attr(not(test), lang = "termination")]
+#[cfg_attr(not(any(test, doctest)), lang = "termination")]
#[stable(feature = "termination_trait_lib", since = "1.61.0")]
#[rustc_on_unimplemented(on(
cause = "MainFunctionType",
diff --git a/library/std/src/rt.rs b/library/std/src/rt.rs
index 5c83f72f3..335944845 100644
--- a/library/std/src/rt.rs
+++ b/library/std/src/rt.rs
@@ -154,8 +154,7 @@ fn lang_start_internal(
ret_code
}
-#[cfg(not(test))]
-#[inline(never)]
+#[cfg(not(any(test, doctest)))]
#[lang = "start"]
fn lang_start<T: crate::process::Termination + 'static>(
main: fn() -> T,
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs
index 632709fd9..945de280f 100644
--- a/library/std/src/sync/mpsc/sync_tests.rs
+++ b/library/std/src/sync/mpsc/sync_tests.rs
@@ -3,7 +3,6 @@ use crate::env;
use crate::rc::Rc;
use crate::sync::mpmc::SendTimeoutError;
use crate::thread;
-use crate::time::Duration;
pub fn stress_factor() -> usize {
match env::var("RUST_TEST_STRESS") {
diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs
index 1e52a4a70..ac1a804cf 100644
--- a/library/std/src/sync/mpsc/tests.rs
+++ b/library/std/src/sync/mpsc/tests.rs
@@ -1,7 +1,6 @@
use super::*;
use crate::env;
use crate::thread;
-use crate::time::{Duration, Instant};
pub fn stress_factor() -> usize {
match env::var("RUST_TEST_STRESS") {
diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs
index b4ae6b7e0..0c001d7c2 100644
--- a/library/std/src/sync/mutex.rs
+++ b/library/std/src/sync/mutex.rs
@@ -146,7 +146,7 @@ use crate::sys::locks as sys;
/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
/// data.push(result);
/// // We drop the `data` explicitly because it's not necessary anymore and the
-/// // thread still has work to do. This allow other threads to start working on
+/// // thread still has work to do. This allows other threads to start working on
/// // the data immediately, without waiting for the rest of the unrelated work
/// // to be done here.
/// //
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index f49630907..b8873a3b5 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -13,22 +13,65 @@ use crate::sync::Once;
///
/// # Examples
///
+/// Using `OnceCell` to store a function’s previously computed value (a.k.a.
+/// ‘lazy static’ or ‘memoizing’):
+///
/// ```
/// use std::sync::OnceLock;
///
-/// static CELL: OnceLock<String> = OnceLock::new();
+/// struct DeepThought {
+/// answer: String,
+/// }
+///
+/// impl DeepThought {
+/// # fn great_question() -> String {
+/// # "42".to_string()
+/// # }
+/// #
+/// fn new() -> Self {
+/// Self {
+/// // M3 Ultra takes about 16 million years in --release config
+/// answer: Self::great_question(),
+/// }
+/// }
+/// }
+///
+/// fn computation() -> &'static DeepThought {
+/// // n.b. static items do not call [`Drop`] on program termination, so if
+/// // [`DeepThought`] impls Drop, that will not be used for this instance.
+/// static COMPUTATION: OnceLock<DeepThought> = OnceLock::new();
+/// COMPUTATION.get_or_init(|| DeepThought::new())
+/// }
+///
+/// // The `DeepThought` is built, stored in the `OnceLock`, and returned.
+/// let _ = computation().answer;
+/// // The `DeepThought` is retrieved from the `OnceLock` and returned.
+/// let _ = computation().answer;
+/// ```
+///
+/// Writing to a `OnceLock` from a separate thread:
+///
+/// ```
+/// use std::sync::OnceLock;
+///
+/// static CELL: OnceLock<usize> = OnceLock::new();
+///
+/// // `OnceLock` has not been written to yet.
/// assert!(CELL.get().is_none());
///
+/// // Spawn a thread and write to `OnceLock`.
/// std::thread::spawn(|| {
-/// let value: &String = CELL.get_or_init(|| {
-/// "Hello, World!".to_string()
-/// });
-/// assert_eq!(value, "Hello, World!");
-/// }).join().unwrap();
+/// let value = CELL.get_or_init(|| 12345);
+/// assert_eq!(value, &12345);
+/// })
+/// .join()
+/// .unwrap();
///
-/// let value: Option<&String> = CELL.get();
-/// assert!(value.is_some());
-/// assert_eq!(value.unwrap().as_str(), "Hello, World!");
+/// // `OnceLock` now contains the value.
+/// assert_eq!(
+/// CELL.get(),
+/// Some(&12345),
+/// );
/// ```
#[stable(feature = "once_cell", since = "1.70.0")]
pub struct OnceLock<T> {
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index ac7c800ff..5d8967bfb 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -532,7 +532,7 @@ impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
}
#[stable(feature = "std_debug", since = "1.16.0")]
-impl<T: fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
@@ -546,7 +546,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
}
#[stable(feature = "std_debug", since = "1.16.0")]
-impl<T: fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
diff --git a/library/std/src/sys/common/alloc.rs b/library/std/src/sys/common/alloc.rs
index d58aa6c27..b7357460f 100644
--- a/library/std/src/sys/common/alloc.rs
+++ b/library/std/src/sys/common/alloc.rs
@@ -14,7 +14,6 @@ use crate::ptr;
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "sparc",
- target_arch = "asmjs",
target_arch = "wasm32",
target_arch = "hexagon",
all(target_arch = "riscv32", not(target_os = "espidf")),
diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs
index 159ffe7ac..88420bd36 100644
--- a/library/std/src/sys/mod.rs
+++ b/library/std/src/sys/mod.rs
@@ -53,6 +53,9 @@ cfg_if::cfg_if! {
} else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] {
mod sgx;
pub use self::sgx::*;
+ } else if #[cfg(target_os = "teeos")] {
+ mod teeos;
+ pub use self::teeos::*;
} else {
mod unsupported;
pub use self::unsupported::*;
diff --git a/library/std/src/sys/personality/mod.rs b/library/std/src/sys/personality/mod.rs
index 386a399f5..d37b8ce63 100644
--- a/library/std/src/sys/personality/mod.rs
+++ b/library/std/src/sys/personality/mod.rs
@@ -12,7 +12,7 @@
mod dwarf;
-#[cfg(not(test))]
+#[cfg(not(any(test, doctest)))]
cfg_if::cfg_if! {
if #[cfg(target_os = "emscripten")] {
mod emcc;
@@ -28,6 +28,7 @@ cfg_if::cfg_if! {
} else if #[cfg(any(
all(target_family = "windows", target_env = "gnu"),
target_os = "psp",
+ target_os = "xous",
target_os = "solid_asp3",
all(target_family = "unix", not(target_os = "espidf"), not(target_os = "l4re")),
all(target_vendor = "fortanix", target_env = "sgx"),
diff --git a/library/std/src/sys/solid/net.rs b/library/std/src/sys/solid/net.rs
index 1eae0fc06..a768e2406 100644
--- a/library/std/src/sys/solid/net.rs
+++ b/library/std/src/sys/solid/net.rs
@@ -5,9 +5,10 @@ use crate::{
io::{self, BorrowedBuf, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut},
mem,
net::{Shutdown, SocketAddr},
+ os::solid::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd},
ptr, str,
sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr},
- sys_common::{AsInner, FromInner, IntoInner},
+ sys_common::{FromInner, IntoInner},
time::Duration,
};
@@ -28,102 +29,6 @@ const fn max_iov() -> usize {
1024
}
-/// A file descriptor.
-#[rustc_layout_scalar_valid_range_start(0)]
-// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
-// 32-bit c_int. Below is -2, in two's complement, but that only works out
-// because c_int is 32 bits.
-#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
-struct FileDesc {
- fd: c_int,
-}
-
-impl FileDesc {
- #[inline]
- fn new(fd: c_int) -> FileDesc {
- assert_ne!(fd, -1i32);
- // Safety: we just asserted that the value is in the valid range and
- // isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
- unsafe { FileDesc { fd } }
- }
-
- #[inline]
- fn raw(&self) -> c_int {
- self.fd
- }
-
- /// Extracts the actual file descriptor without closing it.
- #[inline]
- fn into_raw(self) -> c_int {
- let fd = self.fd;
- mem::forget(self);
- fd
- }
-
- fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
- let ret = cvt(unsafe {
- netc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT))
- })?;
- Ok(ret as usize)
- }
-
- fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
- let ret = cvt(unsafe {
- netc::readv(
- self.fd,
- bufs.as_ptr() as *const netc::iovec,
- cmp::min(bufs.len(), max_iov()) as c_int,
- )
- })?;
- Ok(ret as usize)
- }
-
- #[inline]
- fn is_read_vectored(&self) -> bool {
- true
- }
-
- fn write(&self, buf: &[u8]) -> io::Result<usize> {
- let ret = cvt(unsafe {
- netc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT))
- })?;
- Ok(ret as usize)
- }
-
- fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
- let ret = cvt(unsafe {
- netc::writev(
- self.fd,
- bufs.as_ptr() as *const netc::iovec,
- cmp::min(bufs.len(), max_iov()) as c_int,
- )
- })?;
- Ok(ret as usize)
- }
-
- #[inline]
- fn is_write_vectored(&self) -> bool {
- true
- }
-
- fn duplicate(&self) -> io::Result<FileDesc> {
- cvt(unsafe { netc::dup(self.fd) }).map(Self::new)
- }
-}
-
-impl AsInner<c_int> for FileDesc {
- #[inline]
- fn as_inner(&self) -> &c_int {
- &self.fd
- }
-}
-
-impl Drop for FileDesc {
- fn drop(&mut self) {
- unsafe { netc::close(self.fd) };
- }
-}
-
#[doc(hidden)]
pub trait IsMinusOne {
fn is_minus_one(&self) -> bool;
@@ -212,7 +117,7 @@ pub(super) fn decode_error_kind(er: abi::ER) -> ErrorKind {
pub fn init() {}
-pub struct Socket(FileDesc);
+pub struct Socket(OwnedFd);
impl Socket {
pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
@@ -226,16 +131,13 @@ impl Socket {
pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> {
unsafe {
let fd = cvt(netc::socket(fam, ty, 0))?;
- let fd = FileDesc::new(fd);
- let socket = Socket(fd);
-
- Ok(socket)
+ Ok(Self::from_raw_fd(fd))
}
}
pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> {
let (addr, len) = addr.into_inner();
- cvt(unsafe { netc::connect(self.0.raw(), addr.as_ptr(), len) })?;
+ cvt(unsafe { netc::connect(self.as_raw_fd(), addr.as_ptr(), len) })?;
Ok(())
}
@@ -264,14 +166,14 @@ impl Socket {
timeout.tv_usec = 1;
}
- let fds = netc::fd_set { num_fds: 1, fds: [self.0.raw()] };
+ let fds = netc::fd_set { num_fds: 1, fds: [self.as_raw_fd()] };
let mut writefds = fds;
let mut errorfds = fds;
let n = unsafe {
cvt(netc::select(
- self.0.raw() + 1,
+ self.as_raw_fd() + 1,
ptr::null_mut(),
&mut writefds,
&mut errorfds,
@@ -294,18 +196,17 @@ impl Socket {
}
pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> {
- let fd = cvt_r(|| unsafe { netc::accept(self.0.raw(), storage, len) })?;
- let fd = FileDesc::new(fd);
- Ok(Socket(fd))
+ let fd = cvt_r(|| unsafe { netc::accept(self.as_raw_fd(), storage, len) })?;
+ unsafe { Ok(Self::from_raw_fd(fd)) }
}
pub fn duplicate(&self) -> io::Result<Socket> {
- self.0.duplicate().map(Socket)
+ Ok(Self(self.0.try_clone()?))
}
fn recv_with_flags(&self, mut buf: BorrowedCursor<'_>, flags: c_int) -> io::Result<()> {
let ret = cvt(unsafe {
- netc::recv(self.0.raw(), buf.as_mut().as_mut_ptr().cast(), buf.capacity(), flags)
+ netc::recv(self.as_raw_fd(), buf.as_mut().as_mut_ptr().cast(), buf.capacity(), flags)
})?;
unsafe {
buf.advance(ret as usize);
@@ -330,12 +231,19 @@ impl Socket {
}
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
- self.0.read_vectored(bufs)
+ let ret = cvt(unsafe {
+ netc::readv(
+ self.as_raw_fd(),
+ bufs.as_ptr() as *const netc::iovec,
+ cmp::min(bufs.len(), max_iov()) as c_int,
+ )
+ })?;
+ Ok(ret as usize)
}
#[inline]
pub fn is_read_vectored(&self) -> bool {
- self.0.is_read_vectored()
+ true
}
fn recv_from_with_flags(
@@ -348,7 +256,7 @@ impl Socket {
let n = cvt(unsafe {
netc::recvfrom(
- self.0.raw(),
+ self.as_raw_fd(),
buf.as_mut_ptr() as *mut c_void,
buf.len(),
flags,
@@ -368,16 +276,30 @@ impl Socket {
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
- self.0.write(buf)
+ let ret = cvt(unsafe {
+ netc::write(
+ self.as_raw_fd(),
+ buf.as_ptr() as *const c_void,
+ cmp::min(buf.len(), READ_LIMIT),
+ )
+ })?;
+ Ok(ret as usize)
}
pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
- self.0.write_vectored(bufs)
+ let ret = cvt(unsafe {
+ netc::writev(
+ self.as_raw_fd(),
+ bufs.as_ptr() as *const netc::iovec,
+ cmp::min(bufs.len(), max_iov()) as c_int,
+ )
+ })?;
+ Ok(ret as usize)
}
#[inline]
pub fn is_write_vectored(&self) -> bool {
- self.0.is_write_vectored()
+ true
}
pub fn set_timeout(&self, dur: Option<Duration>, kind: c_int) -> io::Result<()> {
@@ -423,7 +345,7 @@ impl Socket {
Shutdown::Read => netc::SHUT_RD,
Shutdown::Both => netc::SHUT_RDWR,
};
- cvt(unsafe { netc::shutdown(self.0.raw(), how) })?;
+ cvt(unsafe { netc::shutdown(self.as_raw_fd(), how) })?;
Ok(())
}
@@ -454,7 +376,7 @@ impl Socket {
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
let mut nonblocking = nonblocking as c_int;
cvt(unsafe {
- netc::ioctl(*self.as_inner(), netc::FIONBIO, (&mut nonblocking) as *mut c_int as _)
+ netc::ioctl(self.as_raw_fd(), netc::FIONBIO, (&mut nonblocking) as *mut c_int as _)
})
.map(drop)
}
@@ -466,25 +388,48 @@ impl Socket {
// This method is used by sys_common code to abstract over targets.
pub fn as_raw(&self) -> c_int {
- *self.as_inner()
+ self.as_raw_fd()
}
}
-impl AsInner<c_int> for Socket {
+impl FromInner<OwnedFd> for Socket {
#[inline]
- fn as_inner(&self) -> &c_int {
- self.0.as_inner()
+ fn from_inner(sock: OwnedFd) -> Socket {
+ Socket(sock)
}
}
-impl FromInner<c_int> for Socket {
- fn from_inner(fd: c_int) -> Socket {
- Socket(FileDesc::new(fd))
+impl IntoInner<OwnedFd> for Socket {
+ #[inline]
+ fn into_inner(self) -> OwnedFd {
+ self.0
}
}
-impl IntoInner<c_int> for Socket {
- fn into_inner(self) -> c_int {
- self.0.into_raw()
+impl AsFd for Socket {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
+ }
+}
+
+impl AsRawFd for Socket {
+ #[inline]
+ fn as_raw_fd(&self) -> c_int {
+ self.0.as_raw_fd()
+ }
+}
+
+impl FromRawFd for Socket {
+ #[inline]
+ unsafe fn from_raw_fd(fd: c_int) -> Socket {
+ unsafe { Self(FromRawFd::from_raw_fd(fd)) }
+ }
+}
+
+impl IntoRawFd for Socket {
+ #[inline]
+ fn into_raw_fd(self) -> c_int {
+ self.0.into_raw_fd()
}
}
diff --git a/library/std/src/sys/teeos/alloc.rs b/library/std/src/sys/teeos/alloc.rs
new file mode 100644
index 000000000..e236819aa
--- /dev/null
+++ b/library/std/src/sys/teeos/alloc.rs
@@ -0,0 +1,57 @@
+use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::ptr;
+use crate::sys::common::alloc::{realloc_fallback, MIN_ALIGN};
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // jemalloc provides alignment less than MIN_ALIGN for small allocations.
+ // So only rely on MIN_ALIGN if size >= align.
+ // Also see <https://github.com/rust-lang/rust/issues/45955> and
+ // <https://github.com/rust-lang/rust/issues/62251#issuecomment-507580914>.
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::malloc(layout.size()) as *mut u8
+ } else {
+ aligned_malloc(&layout)
+ }
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // See the comment above in `alloc` for why this check looks the way it does.
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::calloc(layout.size(), 1) as *mut u8
+ } else {
+ let ptr = self.alloc(layout);
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, layout.size());
+ }
+ ptr
+ }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ libc::free(ptr as *mut libc::c_void)
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+ libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+ } else {
+ realloc_fallback(self, ptr, layout, new_size)
+ }
+ }
+}
+
+#[inline]
+unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ let mut out = ptr::null_mut();
+ // posix_memalign requires that the alignment be a multiple of `sizeof(void*)`.
+ // Since these are all powers of 2, we can just use max.
+ let align = layout.align().max(crate::mem::size_of::<usize>());
+ let ret = libc::posix_memalign(&mut out, align, layout.size());
+ if ret != 0 { ptr::null_mut() } else { out as *mut u8 }
+}
diff --git a/library/std/src/sys/teeos/locks/condvar.rs b/library/std/src/sys/teeos/locks/condvar.rs
new file mode 100644
index 000000000..c08e8145b
--- /dev/null
+++ b/library/std/src/sys/teeos/locks/condvar.rs
@@ -0,0 +1,100 @@
+use crate::cell::UnsafeCell;
+use crate::ptr;
+use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed};
+use crate::sys::locks::mutex::{self, Mutex};
+use crate::sys::time::TIMESPEC_MAX;
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+use crate::time::Duration;
+
+extern "C" {
+ pub fn pthread_cond_timedwait(
+ cond: *mut libc::pthread_cond_t,
+ lock: *mut libc::pthread_mutex_t,
+ adstime: *const libc::timespec,
+ ) -> libc::c_int;
+}
+
+struct AllocatedCondvar(UnsafeCell<libc::pthread_cond_t>);
+
+pub struct Condvar {
+ inner: LazyBox<AllocatedCondvar>,
+ mutex: AtomicPtr<libc::pthread_mutex_t>,
+}
+
+#[inline]
+fn raw(c: &Condvar) -> *mut libc::pthread_cond_t {
+ c.inner.0.get()
+}
+
+unsafe impl Send for AllocatedCondvar {}
+unsafe impl Sync for AllocatedCondvar {}
+
+impl LazyInit for AllocatedCondvar {
+ fn init() -> Box<Self> {
+ let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)));
+
+ let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) };
+ assert_eq!(r, 0);
+
+ condvar
+ }
+}
+
+impl Drop for AllocatedCondvar {
+ #[inline]
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_cond_destroy(self.0.get()) };
+ debug_assert_eq!(r, 0);
+ }
+}
+
+impl Condvar {
+ pub const fn new() -> Condvar {
+ Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) }
+ }
+
+ #[inline]
+ fn verify(&self, mutex: *mut libc::pthread_mutex_t) {
+ match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) {
+ Ok(_) => {} // Stored the address
+ Err(n) if n == mutex => {} // Lost a race to store the same address
+ _ => panic!("attempted to use a condition variable with two mutexes"),
+ }
+ }
+
+ #[inline]
+ pub fn notify_one(&self) {
+ let r = unsafe { libc::pthread_cond_signal(raw(self)) };
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ pub fn notify_all(&self) {
+ let r = unsafe { libc::pthread_cond_broadcast(raw(self)) };
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ let mutex = mutex::raw(mutex);
+ self.verify(mutex);
+ let r = libc::pthread_cond_wait(raw(self), mutex);
+ debug_assert_eq!(r, 0);
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ use crate::sys::time::Timespec;
+
+ let mutex = mutex::raw(mutex);
+ self.verify(mutex);
+
+ let timeout = Timespec::now(libc::CLOCK_MONOTONIC)
+ .checked_add_duration(&dur)
+ .and_then(|t| t.to_timespec())
+ .unwrap_or(TIMESPEC_MAX);
+
+ let r = pthread_cond_timedwait(raw(self), mutex, &timeout);
+ assert!(r == libc::ETIMEDOUT || r == 0);
+ r == 0
+ }
+}
diff --git a/library/std/src/sys/teeos/locks/mod.rs b/library/std/src/sys/teeos/locks/mod.rs
new file mode 100644
index 000000000..c58e9c7fd
--- /dev/null
+++ b/library/std/src/sys/teeos/locks/mod.rs
@@ -0,0 +1,8 @@
+pub mod condvar;
+#[path = "../../unix/locks/pthread_mutex.rs"]
+pub mod mutex;
+pub mod rwlock;
+
+pub(crate) use condvar::Condvar;
+pub(crate) use mutex::Mutex;
+pub(crate) use rwlock::RwLock;
diff --git a/library/std/src/sys/teeos/locks/rwlock.rs b/library/std/src/sys/teeos/locks/rwlock.rs
new file mode 100644
index 000000000..27cdb8878
--- /dev/null
+++ b/library/std/src/sys/teeos/locks/rwlock.rs
@@ -0,0 +1,44 @@
+use crate::sys::locks::mutex::Mutex;
+
+/// we do not supported rwlock, so use mutex to simulate rwlock.
+/// it's useful because so many code in std will use rwlock.
+pub struct RwLock {
+ inner: Mutex,
+}
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> RwLock {
+ RwLock { inner: Mutex::new() }
+ }
+
+ #[inline]
+ pub fn read(&self) {
+ unsafe { self.inner.lock() };
+ }
+
+ #[inline]
+ pub fn try_read(&self) -> bool {
+ unsafe { self.inner.try_lock() }
+ }
+
+ #[inline]
+ pub fn write(&self) {
+ unsafe { self.inner.lock() };
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ unsafe { self.inner.try_lock() }
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ unsafe { self.inner.unlock() };
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ unsafe { self.inner.unlock() };
+ }
+}
diff --git a/library/std/src/sys/teeos/mod.rs b/library/std/src/sys/teeos/mod.rs
new file mode 100644
index 000000000..ed8c54b2c
--- /dev/null
+++ b/library/std/src/sys/teeos/mod.rs
@@ -0,0 +1,167 @@
+//! System bindings for the Teeos platform
+//!
+//! This module contains the facade (aka platform-specific) implementations of
+//! OS level functionality for Teeos.
+#![allow(unsafe_op_in_unsafe_fn)]
+#![allow(unused_variables)]
+#![allow(dead_code)]
+
+pub use self::rand::hashmap_random_keys;
+
+pub mod alloc;
+#[path = "../unsupported/args.rs"]
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+#[path = "../unsupported/env.rs"]
+pub mod env;
+pub mod locks;
+//pub mod fd;
+#[path = "../unsupported/fs.rs"]
+pub mod fs;
+#[path = "../unsupported/io.rs"]
+pub mod io;
+#[path = "../unix/memchr.rs"]
+pub mod memchr;
+pub mod net;
+#[path = "../unsupported/once.rs"]
+pub mod once;
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+#[path = "../unix/path.rs"]
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+mod rand;
+pub mod stdio;
+pub mod thread;
+pub mod thread_local_dtor;
+#[path = "../unix/thread_local_key.rs"]
+pub mod thread_local_key;
+#[path = "../unsupported/thread_parking.rs"]
+pub mod thread_parking;
+#[allow(non_upper_case_globals)]
+#[path = "../unix/time.rs"]
+pub mod time;
+
+use crate::io::ErrorKind;
+
+pub fn abort_internal() -> ! {
+ unsafe { libc::abort() }
+}
+
+// Trusted Applications are loaded as dynamic libraries on Teeos,
+// so this should never be called.
+pub fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {}
+
+// SAFETY: must be called only once during runtime cleanup.
+// this is not guaranteed to run, for example when the program aborts.
+pub unsafe fn cleanup() {
+ unimplemented!()
+ // We do NOT have stack overflow handler, because TEE OS will kill TA when it happens.
+ // So cleanup is commented
+ // stack_overflow::cleanup();
+}
+
+#[inline]
+pub(crate) fn is_interrupted(errno: i32) -> bool {
+ errno == libc::EINTR
+}
+
+// Note: code below is 1:1 copied from unix/mod.rs
+pub fn decode_error_kind(errno: i32) -> ErrorKind {
+ use ErrorKind::*;
+ match errno as libc::c_int {
+ libc::E2BIG => ArgumentListTooLong,
+ libc::EADDRINUSE => AddrInUse,
+ libc::EADDRNOTAVAIL => AddrNotAvailable,
+ libc::EBUSY => ResourceBusy,
+ libc::ECONNABORTED => ConnectionAborted,
+ libc::ECONNREFUSED => ConnectionRefused,
+ libc::ECONNRESET => ConnectionReset,
+ libc::EDEADLK => Deadlock,
+ libc::EDQUOT => FilesystemQuotaExceeded,
+ libc::EEXIST => AlreadyExists,
+ libc::EFBIG => FileTooLarge,
+ libc::EHOSTUNREACH => HostUnreachable,
+ libc::EINTR => Interrupted,
+ libc::EINVAL => InvalidInput,
+ libc::EISDIR => IsADirectory,
+ libc::ELOOP => FilesystemLoop,
+ libc::ENOENT => NotFound,
+ libc::ENOMEM => OutOfMemory,
+ libc::ENOSPC => StorageFull,
+ libc::ENOSYS => Unsupported,
+ libc::EMLINK => TooManyLinks,
+ libc::ENAMETOOLONG => InvalidFilename,
+ libc::ENETDOWN => NetworkDown,
+ libc::ENETUNREACH => NetworkUnreachable,
+ libc::ENOTCONN => NotConnected,
+ libc::ENOTDIR => NotADirectory,
+ libc::ENOTEMPTY => DirectoryNotEmpty,
+ libc::EPIPE => BrokenPipe,
+ libc::EROFS => ReadOnlyFilesystem,
+ libc::ESPIPE => NotSeekable,
+ libc::ESTALE => StaleNetworkFileHandle,
+ libc::ETIMEDOUT => TimedOut,
+ libc::ETXTBSY => ExecutableFileBusy,
+ libc::EXDEV => CrossesDevices,
+
+ libc::EACCES | libc::EPERM => PermissionDenied,
+
+ // These two constants can have the same value on some systems,
+ // but different values on others, so we can't use a match
+ // clause
+ x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => WouldBlock,
+
+ _ => Uncategorized,
+ }
+}
+
+#[doc(hidden)]
+pub trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+macro_rules! impl_is_minus_one {
+ ($($t:ident)*) => ($(impl IsMinusOne for $t {
+ fn is_minus_one(&self) -> bool {
+ *self == -1
+ }
+ })*)
+}
+
+impl_is_minus_one! { i8 i16 i32 i64 isize }
+
+pub fn cvt<T: IsMinusOne>(t: T) -> crate::io::Result<T> {
+ if t.is_minus_one() { Err(crate::io::Error::last_os_error()) } else { Ok(t) }
+}
+
+pub fn cvt_r<T, F>(mut f: F) -> crate::io::Result<T>
+where
+ T: IsMinusOne,
+ F: FnMut() -> T,
+{
+ loop {
+ match cvt(f()) {
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ other => return other,
+ }
+ }
+}
+
+pub fn cvt_nz(error: libc::c_int) -> crate::io::Result<()> {
+ if error == 0 { Ok(()) } else { Err(crate::io::Error::from_raw_os_error(error)) }
+}
+
+use crate::io as std_io;
+pub fn unsupported<T>() -> std_io::Result<T> {
+ Err(unsupported_err())
+}
+
+pub fn unsupported_err() -> std_io::Error {
+ std_io::Error::new(std_io::ErrorKind::Unsupported, "operation not supported on this platform")
+}
diff --git a/library/std/src/sys/teeos/net.rs b/library/std/src/sys/teeos/net.rs
new file mode 100644
index 000000000..0df681dbf
--- /dev/null
+++ b/library/std/src/sys/teeos/net.rs
@@ -0,0 +1,372 @@
+use crate::fmt;
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
+use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
+use crate::sys::unsupported;
+use crate::time::Duration;
+
+pub struct TcpStream(!);
+
+impl TcpStream {
+ pub fn connect(_: io::Result<&SocketAddr>) -> io::Result<TcpStream> {
+ unsupported()
+ }
+
+ pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<TcpStream> {
+ unsupported()
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn read(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn read_buf(&self, _buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn is_read_vectored(&self) -> bool {
+ self.0
+ }
+
+ pub fn write(&self, _: &[u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn is_write_vectored(&self) -> bool {
+ self.0
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn shutdown(&self, _: Shutdown) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpStream> {
+ self.0
+ }
+
+ pub fn set_linger(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn set_nodelay(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+pub struct TcpListener(!);
+
+impl TcpListener {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<TcpListener> {
+ unsupported()
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ self.0
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpListener> {
+ self.0
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn set_only_v6(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+pub struct UdpSocket(!);
+
+impl UdpSocket {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> {
+ unsupported()
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0
+ }
+
+ pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0
+ }
+
+ pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn duplicate(&self) -> io::Result<UdpSocket> {
+ self.0
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn set_broadcast(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn send(&self, _: &[u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> {
+ self.0
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+pub struct LookupHost(!);
+
+impl LookupHost {
+ pub fn port(&self) -> u16 {
+ self.0
+ }
+}
+
+impl Iterator for LookupHost {
+ type Item = SocketAddr;
+ fn next(&mut self) -> Option<SocketAddr> {
+ self.0
+ }
+}
+
+impl TryFrom<&str> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: &str) -> io::Result<LookupHost> {
+ unsupported()
+ }
+}
+
+impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> {
+ unsupported()
+ }
+}
+
+#[allow(nonstandard_style)]
+pub mod netc {
+ pub const AF_INET: u8 = 0;
+ pub const AF_INET6: u8 = 1;
+ pub type sa_family_t = u8;
+
+ #[derive(Copy, Clone)]
+ pub struct in_addr {
+ pub s_addr: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in {
+ pub sin_family: sa_family_t,
+ pub sin_port: u16,
+ pub sin_addr: in_addr,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct in6_addr {
+ pub s6_addr: [u8; 16],
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in6 {
+ pub sin6_family: sa_family_t,
+ pub sin6_port: u16,
+ pub sin6_addr: in6_addr,
+ pub sin6_flowinfo: u32,
+ pub sin6_scope_id: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr {}
+}
+
+pub type Socket = UdpSocket;
diff --git a/library/std/src/sys/teeos/os.rs b/library/std/src/sys/teeos/os.rs
new file mode 100644
index 000000000..e54a92f01
--- /dev/null
+++ b/library/std/src/sys/teeos/os.rs
@@ -0,0 +1,134 @@
+//! Implementation of `std::os` functionality for teeos
+
+use core::marker::PhantomData;
+
+use crate::error::Error as StdError;
+use crate::ffi::{OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::path;
+use crate::path::PathBuf;
+
+use super::unsupported;
+
+pub fn errno() -> i32 {
+ unsafe { (*libc::__errno_location()) as i32 }
+}
+
+// Hardcoded to return 4096, since `sysconf` is only implemented as a stub.
+pub fn page_size() -> usize {
+ // unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
+ 4096
+}
+
+// Everything below are stubs and copied from unsupported.rs
+
+pub fn error_string(_errno: i32) -> String {
+ "error string unimplemented".to_string()
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn chdir(_: &path::Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
+
+pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
+ panic!("unsupported")
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ Err(JoinPathsError)
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "not supported on this platform yet".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "not supported on this platform yet"
+ }
+}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub struct Env(!);
+
+impl Env {
+ // FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
+pub fn env() -> Env {
+ panic!("not supported on this platform")
+}
+
+pub fn getenv(_: &OsStr) -> Option<OsString> {
+ None
+}
+
+pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> {
+ Err(io::Error::new(io::ErrorKind::Unsupported, "cannot set env vars on this platform"))
+}
+
+pub fn unsetenv(_: &OsStr) -> io::Result<()> {
+ Err(io::Error::new(io::ErrorKind::Unsupported, "cannot unset env vars on this platform"))
+}
+
+pub fn temp_dir() -> PathBuf {
+ panic!("no filesystem on this platform")
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ None
+}
+
+pub fn exit(_code: i32) -> ! {
+ panic!("TA should not call `exit`")
+}
+
+pub fn getpid() -> u32 {
+ panic!("no pids on this platform")
+}
diff --git a/library/std/src/sys/teeos/rand.rs b/library/std/src/sys/teeos/rand.rs
new file mode 100644
index 000000000..b45c3bb40
--- /dev/null
+++ b/library/std/src/sys/teeos/rand.rs
@@ -0,0 +1,21 @@
+pub fn hashmap_random_keys() -> (u64, u64) {
+ const KEY_LEN: usize = core::mem::size_of::<u64>();
+
+ let mut v = [0u8; KEY_LEN * 2];
+ imp::fill_bytes(&mut v);
+
+ let key1 = v[0..KEY_LEN].try_into().unwrap();
+ let key2 = v[KEY_LEN..].try_into().unwrap();
+
+ (u64::from_ne_bytes(key1), u64::from_ne_bytes(key2))
+}
+
+mod imp {
+ extern "C" {
+ fn TEE_GenerateRandom(randomBuffer: *mut core::ffi::c_void, randomBufferLen: libc::size_t);
+ }
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ unsafe { TEE_GenerateRandom(v.as_mut_ptr() as _, v.len() * crate::mem::size_of::<u8>()) }
+ }
+}
diff --git a/library/std/src/sys/teeos/stdio.rs b/library/std/src/sys/teeos/stdio.rs
new file mode 100644
index 000000000..9ca04f292
--- /dev/null
+++ b/library/std/src/sys/teeos/stdio.rs
@@ -0,0 +1,88 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::io;
+use core::arch::asm;
+
+pub struct Stdin;
+pub struct Stdout;
+pub struct Stderr;
+
+const KCALL_DEBUG_CMD_PUT_BYTES: i64 = 2;
+
+unsafe fn debug_call(cap_ref: u64, call_no: i64, arg1: u64, arg2: u64) -> i32 {
+ let ret: u64;
+ unsafe {
+ asm!(
+ "svc #99",
+ inout("x0") cap_ref => ret,
+ in("x1") call_no,
+ in("x2") arg1,
+ in("x3") arg2,
+ );
+ }
+
+ ret as i32
+}
+
+fn print_buf(s: &[u8]) -> io::Result<usize> {
+ // Corresponds to `HM_DEBUG_PUT_BYTES_LIMIT`.
+ const MAX_LEN: usize = 512;
+ let len = if s.len() > MAX_LEN { MAX_LEN } else { s.len() };
+ let result = unsafe { debug_call(0, KCALL_DEBUG_CMD_PUT_BYTES, s.as_ptr() as u64, len as u64) };
+
+ if result == 0 { Ok(len) } else { Err(io::Error::from(io::ErrorKind::InvalidInput)) }
+}
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
+ Ok(0)
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ print_buf(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ print_buf(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub const STDIN_BUF_SIZE: usize = 0;
+
+pub fn is_ebadf(err: &io::Error) -> bool {
+ err.raw_os_error() == Some(libc::EBADF as i32)
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ Some(Stderr::new())
+}
diff --git a/library/std/src/sys/teeos/thread.rs b/library/std/src/sys/teeos/thread.rs
new file mode 100644
index 000000000..155f333f9
--- /dev/null
+++ b/library/std/src/sys/teeos/thread.rs
@@ -0,0 +1,164 @@
+use core::convert::TryInto;
+
+use crate::cmp;
+use crate::ffi::CStr;
+use crate::io;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ptr;
+use crate::sys::os;
+use crate::time::Duration;
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 8 * 1024;
+
+pub struct Thread {
+ id: libc::pthread_t,
+}
+
+// Some platforms may have pthread_t as a pointer in which case we still want
+// a thread to be Send/Sync
+unsafe impl Send for Thread {}
+unsafe impl Sync for Thread {}
+
+extern "C" {
+ pub fn TEE_Wait(timeout: u32) -> u32;
+}
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ let p = Box::into_raw(Box::new(p));
+ let mut native: libc::pthread_t = mem::zeroed();
+ let mut attr: libc::pthread_attr_t = mem::zeroed();
+ assert_eq!(libc::pthread_attr_init(&mut attr), 0);
+ assert_eq!(
+ libc::pthread_attr_settee(
+ &mut attr,
+ libc::TEESMP_THREAD_ATTR_CA_INHERIT,
+ libc::TEESMP_THREAD_ATTR_TASK_ID_INHERIT,
+ libc::TEESMP_THREAD_ATTR_HAS_SHADOW,
+ ),
+ 0,
+ );
+
+ let stack_size = cmp::max(stack, min_stack_size(&attr));
+
+ match libc::pthread_attr_setstacksize(&mut attr, stack_size) {
+ 0 => {}
+ n => {
+ assert_eq!(n, libc::EINVAL);
+ // EINVAL means |stack_size| is either too small or not a
+ // multiple of the system page size. Because it's definitely
+ // >= PTHREAD_STACK_MIN, it must be an alignment issue.
+ // Round up to the nearest page and try again.
+ let page_size = os::page_size();
+ let stack_size =
+ (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
+ assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0);
+ }
+ };
+
+ let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _);
+ // Note: if the thread creation fails and this assert fails, then p will
+ // be leaked. However, an alternative design could cause double-free
+ // which is clearly worse.
+ assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
+
+ return if ret != 0 {
+ // The thread failed to start and as a result p was not consumed. Therefore, it is
+ // safe to reconstruct the box so that it gets deallocated.
+ drop(Box::from_raw(p));
+ Err(io::Error::from_raw_os_error(ret))
+ } else {
+ // The new thread will start running earliest after the next yield.
+ // We add a yield here, so that the user does not have to.
+ Thread::yield_now();
+ Ok(Thread { id: native })
+ };
+
+ extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
+ unsafe {
+ // Next, set up our stack overflow handler which may get triggered if we run
+ // out of stack.
+ // this is not necessary in TEE.
+ //let _handler = stack_overflow::Handler::new();
+ // Finally, let's run some code.
+ Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+ }
+ ptr::null_mut()
+ }
+ }
+
+ pub fn yield_now() {
+ let ret = unsafe { libc::sched_yield() };
+ debug_assert_eq!(ret, 0);
+ }
+
+ /// This does not do anything on teeos
+ pub fn set_name(_name: &CStr) {
+ // Both pthread_setname_np and prctl are not available to the TA,
+ // so we can't implement this currently. If the need arises please
+ // contact the teeos rustzone team.
+ }
+
+ /// only main thread could wait for sometime in teeos
+ pub fn sleep(dur: Duration) {
+ let sleep_millis = dur.as_millis();
+ let final_sleep: u32 =
+ if sleep_millis >= u32::MAX as u128 { u32::MAX } else { sleep_millis as u32 };
+ unsafe {
+ let _ = TEE_Wait(final_sleep);
+ }
+ }
+
+ /// must join, because no pthread_detach supported
+ pub fn join(self) {
+ unsafe {
+ let ret = libc::pthread_join(self.id, ptr::null_mut());
+ mem::forget(self);
+ assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret));
+ }
+ }
+
+ pub fn id(&self) -> libc::pthread_t {
+ self.id
+ }
+
+ pub fn into_id(self) -> libc::pthread_t {
+ let id = self.id;
+ mem::forget(self);
+ id
+ }
+}
+
+impl Drop for Thread {
+ fn drop(&mut self) {
+ // we can not call detach, so just panic if thread spawn without join
+ panic!("thread must join, detach is not supported!");
+ }
+}
+
+// Note: Both `sched_getaffinity` and `sysconf` are available but not functional on
+// teeos, so this function always returns an Error!
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "The number of hardware threads is not known for the target platform",
+ ))
+}
+
+// stub
+pub mod guard {
+ use crate::ops::Range;
+ pub type Guard = Range<usize>;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
+
+fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
+ libc::PTHREAD_STACK_MIN.try_into().expect("Infallible")
+}
diff --git a/library/std/src/sys/teeos/thread_local_dtor.rs b/library/std/src/sys/teeos/thread_local_dtor.rs
new file mode 100644
index 000000000..5c6bc4d67
--- /dev/null
+++ b/library/std/src/sys/teeos/thread_local_dtor.rs
@@ -0,0 +1,4 @@
+pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ use crate::sys_common::thread_local_dtor::register_dtor_fallback;
+ register_dtor_fallback(t, dtor);
+}
diff --git a/library/std/src/sys/unix/args.rs b/library/std/src/sys/unix/args.rs
index 2da17fabc..9f7dcc041 100644
--- a/library/std/src/sys/unix/args.rs
+++ b/library/std/src/sys/unix/args.rs
@@ -244,13 +244,15 @@ mod imp {
let mut res = Vec::new();
unsafe {
- let process_info_sel = sel_registerName("processInfo\0".as_ptr());
- let arguments_sel = sel_registerName("arguments\0".as_ptr());
- let utf8_sel = sel_registerName("UTF8String\0".as_ptr());
- let count_sel = sel_registerName("count\0".as_ptr());
- let object_at_sel = sel_registerName("objectAtIndex:\0".as_ptr());
-
- let klass = objc_getClass("NSProcessInfo\0".as_ptr());
+ let process_info_sel =
+ sel_registerName(c"processInfo".as_ptr() as *const libc::c_uchar);
+ let arguments_sel = sel_registerName(c"arguments".as_ptr() as *const libc::c_uchar);
+ let utf8_sel = sel_registerName(c"UTF8String".as_ptr() as *const libc::c_uchar);
+ let count_sel = sel_registerName(c"count".as_ptr() as *const libc::c_uchar);
+ let object_at_sel =
+ sel_registerName(c"objectAtIndex:".as_ptr() as *const libc::c_uchar);
+
+ let klass = objc_getClass(c"NSProcessInfo".as_ptr() as *const libc::c_uchar);
let info = objc_msgSend(klass, process_info_sel);
let args = objc_msgSend(info, arguments_sel);
diff --git a/library/std/src/sys/unix/env.rs b/library/std/src/sys/unix/env.rs
index 3bb492fa9..3d4ba5098 100644
--- a/library/std/src/sys/unix/env.rs
+++ b/library/std/src/sys/unix/env.rs
@@ -174,17 +174,6 @@ pub mod os {
pub const EXE_EXTENSION: &str = "elf";
}
-#[cfg(all(target_os = "emscripten", target_arch = "asmjs"))]
-pub mod os {
- pub const FAMILY: &str = "unix";
- pub const OS: &str = "emscripten";
- pub const DLL_PREFIX: &str = "lib";
- pub const DLL_SUFFIX: &str = ".so";
- pub const DLL_EXTENSION: &str = "so";
- pub const EXE_SUFFIX: &str = ".js";
- pub const EXE_EXTENSION: &str = "js";
-}
-
#[cfg(all(target_os = "emscripten", target_arch = "wasm32"))]
pub mod os {
pub const FAMILY: &str = "unix";
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index 40eb910fd..72e7b1b1f 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -1140,7 +1140,7 @@ impl File {
cfg_has_statx! {
if let Some(ret) = unsafe { try_statx(
fd,
- b"\0" as *const _ as *const c_char,
+ c"".as_ptr() as *const c_char,
libc::AT_EMPTY_PATH | libc::AT_STATX_SYNC_AS_STAT,
libc::STATX_ALL,
) } {
@@ -1300,13 +1300,17 @@ impl File {
pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
#[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))]
- let to_timespec = |time: Option<SystemTime>| {
- match time {
- Some(time) if let Some(ts) = time.t.to_timespec() => Ok(ts),
- Some(time) if time > crate::sys::time::UNIX_EPOCH => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too large to set as a file time")),
- Some(_) => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too small to set as a file time")),
- None => Ok(libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ }),
- }
+ let to_timespec = |time: Option<SystemTime>| match time {
+ Some(time) if let Some(ts) = time.t.to_timespec() => Ok(ts),
+ Some(time) if time > crate::sys::time::UNIX_EPOCH => Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "timestamp is too large to set as a file time"
+ )),
+ Some(_) => Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "timestamp is too small to set as a file time"
+ )),
+ None => Ok(libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ }),
};
cfg_if::cfg_if! {
if #[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon"))] {
diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs
index 4b28f6feb..b5da5f870 100644
--- a/library/std/src/sys/unix/mod.rs
+++ b/library/std/src/sys/unix/mod.rs
@@ -1,6 +1,5 @@
#![allow(missing_docs, nonstandard_style)]
-use crate::ffi::CStr;
use crate::io::ErrorKind;
pub use self::rand::hashmap_random_keys;
@@ -75,7 +74,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
// thread-id for the main thread and so renaming the main thread will rename the
// process and we only want to enable this on platforms we've tested.
if cfg!(target_os = "macos") {
- thread::Thread::set_name(&CStr::from_bytes_with_nul_unchecked(b"main\0"));
+ thread::Thread::set_name(&c"main");
}
unsafe fn sanitize_standard_fds() {
@@ -127,7 +126,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
if pfd.revents & libc::POLLNVAL == 0 {
continue;
}
- if open64("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
+ if open64(c"/dev/null".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
// If the stream is closed but we failed to reopen it, abort the
// process. Otherwise we wouldn't preserve the safety of
// operations on the corresponding Rust object Stdin, Stdout, or
@@ -157,7 +156,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
use libc::open64;
for fd in 0..3 {
if libc::fcntl(fd, libc::F_GETFD) == -1 && errno() == libc::EBADF {
- if open64("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
+ if open64(c"/dev/null".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
// If the stream is closed but we failed to reopen it, abort the
// process. Otherwise we wouldn't preserve the safety of
// operations on the corresponding Rust object Stdin, Stdout, or
diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs
index dc3c037c0..881b3a25c 100644
--- a/library/std/src/sys/unix/os.rs
+++ b/library/std/src/sys/unix/os.rs
@@ -180,7 +180,7 @@ pub fn getcwd() -> io::Result<PathBuf> {
}
#[cfg(target_os = "espidf")]
-pub fn chdir(p: &path::Path) -> io::Result<()> {
+pub fn chdir(_p: &path::Path) -> io::Result<()> {
super::unsupported::unsupported()
}
@@ -274,15 +274,19 @@ pub fn current_exe() -> io::Result<PathBuf> {
return path.canonicalize();
}
// Search PWD to infer current_exe.
- if let Some(pstr) = path.to_str() && pstr.contains("/") {
+ if let Some(pstr) = path.to_str()
+ && pstr.contains("/")
+ {
return getcwd().map(|cwd| cwd.join(path))?.canonicalize();
}
// Search PATH to infer current_exe.
if let Some(p) = getenv(OsStr::from_bytes("PATH".as_bytes())) {
for search_path in split_paths(&p) {
let pb = search_path.join(&path);
- if pb.is_file() && let Ok(metadata) = crate::fs::metadata(&pb) &&
- metadata.permissions().mode() & 0o111 != 0 {
+ if pb.is_file()
+ && let Ok(metadata) = crate::fs::metadata(&pb)
+ && metadata.permissions().mode() & 0o111 != 0
+ {
return pb.canonicalize();
}
}
diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs
index bac32d9e6..c5f04fb8b 100644
--- a/library/std/src/sys/unix/process/process_common.rs
+++ b/library/std/src/sys/unix/process/process_common.rs
@@ -24,11 +24,11 @@ cfg_if::cfg_if! {
if #[cfg(target_os = "fuchsia")] {
// fuchsia doesn't have /dev/null
} else if #[cfg(target_os = "redox")] {
- const DEV_NULL: &str = "null:\0";
+ const DEV_NULL: &CStr = c"null:";
} else if #[cfg(target_os = "vxworks")] {
- const DEV_NULL: &str = "/null\0";
+ const DEV_NULL: &CStr = c"/null";
} else {
- const DEV_NULL: &str = "/dev/null\0";
+ const DEV_NULL: &CStr = c"/dev/null";
}
}
@@ -481,8 +481,7 @@ impl Stdio {
let mut opts = OpenOptions::new();
opts.read(readable);
opts.write(!readable);
- let path = unsafe { CStr::from_ptr(DEV_NULL.as_ptr() as *const _) };
- let fd = File::open_c(&path, &opts)?;
+ let fd = File::open_c(DEV_NULL, &opts)?;
Ok((ChildStdio::Owned(fd.into_inner()), None))
}
diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs
index 72aca4e66..ee86a5f88 100644
--- a/library/std/src/sys/unix/process/process_unix.rs
+++ b/library/std/src/sys/unix/process/process_unix.rs
@@ -9,6 +9,8 @@ use core::ffi::NonZero_c_int;
#[cfg(target_os = "linux")]
use crate::os::linux::process::PidFd;
+#[cfg(target_os = "linux")]
+use crate::os::unix::io::AsRawFd;
#[cfg(any(
target_os = "macos",
@@ -696,11 +698,12 @@ impl Command {
msg.msg_iov = &mut iov as *mut _ as *mut _;
msg.msg_iovlen = 1;
- msg.msg_controllen = mem::size_of_val(&cmsg.buf) as _;
- msg.msg_control = &mut cmsg.buf as *mut _ as *mut _;
// only attach cmsg if we successfully acquired the pidfd
if pidfd >= 0 {
+ msg.msg_controllen = mem::size_of_val(&cmsg.buf) as _;
+ msg.msg_control = &mut cmsg.buf as *mut _ as *mut _;
+
let hdr = CMSG_FIRSTHDR(&mut msg as *mut _ as *mut _);
(*hdr).cmsg_level = SOL_SOCKET;
(*hdr).cmsg_type = SCM_RIGHTS;
@@ -717,7 +720,7 @@ impl Command {
// so we get a consistent SEQPACKET order
match cvt_r(|| libc::sendmsg(sock.as_raw(), &msg, 0)) {
Ok(0) => {}
- _ => rtabort!("failed to communicate with parent process"),
+ other => rtabort!("failed to communicate with parent process. {:?}", other),
}
}
}
@@ -748,7 +751,7 @@ impl Command {
msg.msg_controllen = mem::size_of::<Cmsg>() as _;
msg.msg_control = &mut cmsg as *mut _ as *mut _;
- match cvt_r(|| libc::recvmsg(sock.as_raw(), &mut msg, 0)) {
+ match cvt_r(|| libc::recvmsg(sock.as_raw(), &mut msg, libc::MSG_CMSG_CLOEXEC)) {
Err(_) => return -1,
Ok(_) => {}
}
@@ -787,7 +790,7 @@ pub struct Process {
// On Linux, stores the pidfd created for this child.
// This is None if the user did not request pidfd creation,
// or if the pidfd could not be created for some reason
- // (e.g. the `clone3` syscall was not available).
+ // (e.g. the `pidfd_open` syscall was not available).
#[cfg(target_os = "linux")]
pidfd: Option<PidFd>,
}
@@ -816,10 +819,23 @@ impl Process {
// and used for another process, and we probably shouldn't be killing
// random processes, so return Ok because the process has exited already.
if self.status.is_some() {
- Ok(())
- } else {
- cvt(unsafe { libc::kill(self.pid, libc::SIGKILL) }).map(drop)
+ return Ok(());
+ }
+ #[cfg(target_os = "linux")]
+ if let Some(pid_fd) = self.pidfd.as_ref() {
+ // pidfd_send_signal predates pidfd_open. so if we were able to get an fd then sending signals will work too
+ return cvt(unsafe {
+ libc::syscall(
+ libc::SYS_pidfd_send_signal,
+ pid_fd.as_raw_fd(),
+ libc::SIGKILL,
+ crate::ptr::null::<()>(),
+ 0,
+ )
+ })
+ .map(drop);
}
+ cvt(unsafe { libc::kill(self.pid, libc::SIGKILL) }).map(drop)
}
pub fn wait(&mut self) -> io::Result<ExitStatus> {
@@ -827,6 +843,17 @@ impl Process {
if let Some(status) = self.status {
return Ok(status);
}
+ #[cfg(target_os = "linux")]
+ if let Some(pid_fd) = self.pidfd.as_ref() {
+ let mut siginfo: libc::siginfo_t = unsafe { crate::mem::zeroed() };
+
+ cvt_r(|| unsafe {
+ libc::waitid(libc::P_PIDFD, pid_fd.as_raw_fd() as u32, &mut siginfo, libc::WEXITED)
+ })?;
+ let status = ExitStatus::from_waitid_siginfo(siginfo);
+ self.status = Some(status);
+ return Ok(status);
+ }
let mut status = 0 as c_int;
cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) })?;
self.status = Some(ExitStatus::new(status));
@@ -837,6 +864,25 @@ impl Process {
if let Some(status) = self.status {
return Ok(Some(status));
}
+ #[cfg(target_os = "linux")]
+ if let Some(pid_fd) = self.pidfd.as_ref() {
+ let mut siginfo: libc::siginfo_t = unsafe { crate::mem::zeroed() };
+
+ cvt(unsafe {
+ libc::waitid(
+ libc::P_PIDFD,
+ pid_fd.as_raw_fd() as u32,
+ &mut siginfo,
+ libc::WEXITED | libc::WNOHANG,
+ )
+ })?;
+ if unsafe { siginfo.si_pid() } == 0 {
+ return Ok(None);
+ }
+ let status = ExitStatus::from_waitid_siginfo(siginfo);
+ self.status = Some(status);
+ return Ok(Some(status));
+ }
let mut status = 0 as c_int;
let pid = cvt(unsafe { libc::waitpid(self.pid, &mut status, libc::WNOHANG) })?;
if pid == 0 {
@@ -866,6 +912,20 @@ impl ExitStatus {
ExitStatus(status)
}
+ #[cfg(target_os = "linux")]
+ pub fn from_waitid_siginfo(siginfo: libc::siginfo_t) -> ExitStatus {
+ let status = unsafe { siginfo.si_status() };
+
+ match siginfo.si_code {
+ libc::CLD_EXITED => ExitStatus((status & 0xff) << 8),
+ libc::CLD_KILLED => ExitStatus(status),
+ libc::CLD_DUMPED => ExitStatus(status | 0x80),
+ libc::CLD_CONTINUED => ExitStatus(0xffff),
+ libc::CLD_STOPPED | libc::CLD_TRAPPED => ExitStatus(((status & 0xff) << 8) | 0x7f),
+ _ => unreachable!("waitid() should only return the above codes"),
+ }
+ }
+
fn exited(&self) -> bool {
libc::WIFEXITED(self.0)
}
diff --git a/library/std/src/sys/unix/process/process_unix/tests.rs b/library/std/src/sys/unix/process/process_unix/tests.rs
index 6aa79e7f9..6e952ed7c 100644
--- a/library/std/src/sys/unix/process/process_unix/tests.rs
+++ b/library/std/src/sys/unix/process/process_unix/tests.rs
@@ -64,7 +64,8 @@ fn test_command_fork_no_unwind() {
#[test]
#[cfg(target_os = "linux")]
fn test_command_pidfd() {
- use crate::os::fd::RawFd;
+ use crate::assert_matches::assert_matches;
+ use crate::os::fd::{AsRawFd, RawFd};
use crate::os::linux::process::{ChildExt, CommandExt};
use crate::process::Command;
@@ -78,10 +79,22 @@ fn test_command_pidfd() {
};
// always exercise creation attempts
- let child = Command::new("echo").create_pidfd(true).spawn().unwrap();
+ let mut child = Command::new("false").create_pidfd(true).spawn().unwrap();
// but only check if we know that the kernel supports pidfds
if pidfd_open_available {
- assert!(child.pidfd().is_ok())
+ assert!(child.pidfd().is_ok());
}
+ if let Ok(pidfd) = child.pidfd() {
+ let flags = super::cvt(unsafe { libc::fcntl(pidfd.as_raw_fd(), libc::F_GETFD) }).unwrap();
+ assert!(flags & libc::FD_CLOEXEC != 0);
+ }
+ let status = child.wait().expect("error waiting on pidfd");
+ assert_eq!(status.code(), Some(1));
+
+ let mut child = Command::new("sleep").arg("1000").create_pidfd(true).spawn().unwrap();
+ assert_matches!(child.try_wait(), Ok(None));
+ child.kill().expect("failed to kill child");
+ let status = child.wait().expect("error waiting on pidfd");
+ assert_eq!(status.signal(), Some(libc::SIGKILL));
}
diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs
index 29db9468e..76b96bb37 100644
--- a/library/std/src/sys/unix/thread.rs
+++ b/library/std/src/sys/unix/thread.rs
@@ -163,10 +163,9 @@ impl Thread {
#[cfg(target_os = "netbsd")]
pub fn set_name(name: &CStr) {
unsafe {
- let cname = CStr::from_bytes_with_nul_unchecked(b"%s\0".as_slice());
let res = libc::pthread_setname_np(
libc::pthread_self(),
- cname.as_ptr(),
+ c"%s".as_ptr(),
name.as_ptr() as *mut libc::c_void,
);
debug_assert_eq!(res, 0);
diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs
index 06399e8a2..ac85531c3 100644
--- a/library/std/src/sys/unix/thread_local_dtor.rs
+++ b/library/std/src/sys/unix/thread_local_dtor.rs
@@ -12,7 +12,13 @@
// compiling from a newer linux to an older linux, so we also have a
// fallback implementation to use as well.
#[allow(unexpected_cfgs)]
-#[cfg(any(target_os = "linux", target_os = "fuchsia", target_os = "redox", target_os = "hurd"))]
+#[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "hurd"
+))]
// FIXME: The Rust compiler currently omits weakly function definitions (i.e.,
// __cxa_thread_atexit_impl) and its metadata from LLVM IR.
#[no_sanitize(cfi, kcfi)]
@@ -23,6 +29,8 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
/// This is necessary because the __cxa_thread_atexit_impl implementation
/// std links to by default may be a C or C++ implementation that was not
/// compiled using the Clang integer normalization option.
+ #[cfg(sanitizer_cfi_normalize_integers)]
+ use core::ffi::c_int;
#[cfg(not(sanitizer_cfi_normalize_integers))]
#[cfi_encoding = "i"]
#[repr(transparent)]
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index f2e86a4fb..f62eb828e 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -23,11 +23,11 @@ struct Nanoseconds(u32);
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SystemTime {
- pub(in crate::sys::unix) t: Timespec,
+ pub(crate) t: Timespec,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub(in crate::sys::unix) struct Timespec {
+pub(crate) struct Timespec {
tv_sec: i64,
tv_nsec: Nanoseconds,
}
@@ -239,11 +239,11 @@ impl From<libc::timespec> for Timespec {
not(target_arch = "riscv32")
))]
#[repr(C)]
-pub(in crate::sys::unix) struct __timespec64 {
- pub(in crate::sys::unix) tv_sec: i64,
+pub(crate) struct __timespec64 {
+ pub(crate) tv_sec: i64,
#[cfg(target_endian = "big")]
_padding: i32,
- pub(in crate::sys::unix) tv_nsec: i32,
+ pub(crate) tv_nsec: i32,
#[cfg(target_endian = "little")]
_padding: i32,
}
@@ -255,7 +255,7 @@ pub(in crate::sys::unix) struct __timespec64 {
not(target_arch = "riscv32")
))]
impl __timespec64 {
- pub(in crate::sys::unix) fn new(tv_sec: i64, tv_nsec: i32) -> Self {
+ pub(crate) fn new(tv_sec: i64, tv_nsec: i32) -> Self {
Self { tv_sec, tv_nsec, _padding: 0 }
}
}
diff --git a/library/std/src/sys/wasi/fs.rs b/library/std/src/sys/wasi/fs.rs
index 437aae3ae..e82386654 100644
--- a/library/std/src/sys/wasi/fs.rs
+++ b/library/std/src/sys/wasi/fs.rs
@@ -477,12 +477,13 @@ impl File {
}
pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
- let to_timestamp = |time: Option<SystemTime>| {
- match time {
- Some(time) if let Some(ts) = time.to_wasi_timestamp() => Ok(ts),
- Some(_) => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too large to set as a file time")),
- None => Ok(0),
- }
+ let to_timestamp = |time: Option<SystemTime>| match time {
+ Some(time) if let Some(ts) = time.to_wasi_timestamp() => Ok(ts),
+ Some(_) => Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "timestamp is too large to set as a file time"
+ )),
+ None => Ok(0),
};
self.fd.filestat_set_times(
to_timestamp(times.accessed)?,
diff --git a/library/std/src/sys/windows/api.rs b/library/std/src/sys/windows/api.rs
index e9f0bbfbe..a7ea59e85 100644
--- a/library/std/src/sys/windows/api.rs
+++ b/library/std/src/sys/windows/api.rs
@@ -48,7 +48,7 @@ use super::c;
/// converted to a `u32`. Clippy would warn about this but, alas, it's not run
/// on the standard library.
const fn win32_size_of<T: Sized>() -> u32 {
- // Const assert that the size is less than u32::MAX.
+ // Const assert that the size does not exceed u32::MAX.
// Uses a trait to workaround restriction on using generic types in inner items.
trait Win32SizeOf: Sized {
const WIN32_SIZE_OF: u32 = {
@@ -132,7 +132,7 @@ pub fn set_file_information_by_handle<T: SetFileInformation>(
size: u32,
) -> Result<(), WinError> {
let result = c::SetFileInformationByHandle(handle, class, info, size);
- (result != 0).then_some(()).ok_or_else(|| get_last_error())
+ (result != 0).then_some(()).ok_or_else(get_last_error)
}
// SAFETY: The `SetFileInformation` trait ensures that this is safe.
unsafe { set_info(handle, T::CLASS, info.as_ptr(), info.size()) }
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index a349e24b0..d55d9bace 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -3,6 +3,7 @@
#![allow(nonstandard_style)]
#![cfg_attr(test, allow(dead_code))]
#![unstable(issue = "none", feature = "windows_c")]
+#![allow(clippy::style)]
use crate::ffi::CStr;
use crate::mem;
@@ -46,6 +47,8 @@ pub use FD_SET as fd_set;
pub use LINGER as linger;
pub use TIMEVAL as timeval;
+pub const INVALID_HANDLE_VALUE: HANDLE = ::core::ptr::invalid_mut(-1i32 as _);
+
// https://learn.microsoft.com/en-us/cpp/c-runtime-library/exit-success-exit-failure?view=msvc-170
pub const EXIT_SUCCESS: u32 = 0;
pub const EXIT_FAILURE: u32 = 1;
@@ -81,7 +84,7 @@ pub fn nt_success(status: NTSTATUS) -> bool {
impl UNICODE_STRING {
pub fn from_ref(slice: &[u16]) -> Self {
- let len = slice.len() * mem::size_of::<u16>();
+ let len = mem::size_of_val(slice);
Self { Length: len as _, MaximumLength: len as _, Buffer: slice.as_ptr() as _ }
}
}
@@ -321,7 +324,7 @@ pub unsafe fn NtWriteFile(
// Functions that aren't available on every version of Windows that we support,
// but we still use them and just provide some form of a fallback implementation.
compat_fn_with_fallback! {
- pub static KERNEL32: &CStr = ansi_str!("kernel32");
+ pub static KERNEL32: &CStr = c"kernel32";
// >= Win10 1607
// https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setthreaddescription
@@ -354,7 +357,7 @@ compat_fn_optional! {
}
compat_fn_with_fallback! {
- pub static NTDLL: &CStr = ansi_str!("ntdll");
+ pub static NTDLL: &CStr = c"ntdll";
pub fn NtCreateKeyedEvent(
KeyedEventHandle: LPHANDLE,
diff --git a/library/std/src/sys/windows/c/windows_sys.lst b/library/std/src/sys/windows/c/windows_sys.lst
index 38bf15b7c..f91e1054a 100644
--- a/library/std/src/sys/windows/c/windows_sys.lst
+++ b/library/std/src/sys/windows/c/windows_sys.lst
@@ -2,6 +2,7 @@
--config flatten std
--filter
// tidy-alphabetical-start
+!Windows.Win32.Foundation.INVALID_HANDLE_VALUE
Windows.Wdk.Storage.FileSystem.FILE_COMPLETE_IF_OPLOCKED
Windows.Wdk.Storage.FileSystem.FILE_CONTAINS_EXTENDED_CREATE_INFORMATION
Windows.Wdk.Storage.FileSystem.FILE_CREATE
@@ -1923,7 +1924,6 @@ Windows.Win32.Foundation.HANDLE_FLAG_INHERIT
Windows.Win32.Foundation.HANDLE_FLAG_PROTECT_FROM_CLOSE
Windows.Win32.Foundation.HANDLE_FLAGS
Windows.Win32.Foundation.HMODULE
-Windows.Win32.Foundation.INVALID_HANDLE_VALUE
Windows.Win32.Foundation.MAX_PATH
Windows.Win32.Foundation.NO_ERROR
Windows.Win32.Foundation.NTSTATUS
@@ -2483,7 +2483,6 @@ Windows.Win32.System.SystemInformation.GetSystemTimeAsFileTime
Windows.Win32.System.SystemInformation.GetWindowsDirectoryW
Windows.Win32.System.SystemInformation.PROCESSOR_ARCHITECTURE
Windows.Win32.System.SystemInformation.SYSTEM_INFO
-Windows.Win32.System.SystemServices.ALL_PROCESSOR_GROUPS
Windows.Win32.System.SystemServices.DLL_PROCESS_DETACH
Windows.Win32.System.SystemServices.DLL_THREAD_DETACH
Windows.Win32.System.SystemServices.EXCEPTION_MAXIMUM_PARAMETERS
@@ -2492,6 +2491,7 @@ Windows.Win32.System.SystemServices.IO_REPARSE_TAG_SYMLINK
Windows.Win32.System.Threading.ABOVE_NORMAL_PRIORITY_CLASS
Windows.Win32.System.Threading.AcquireSRWLockExclusive
Windows.Win32.System.Threading.AcquireSRWLockShared
+Windows.Win32.System.Threading.ALL_PROCESSOR_GROUPS
Windows.Win32.System.Threading.BELOW_NORMAL_PRIORITY_CLASS
Windows.Win32.System.Threading.CREATE_BREAKAWAY_FROM_JOB
Windows.Win32.System.Threading.CREATE_DEFAULT_ERROR_MODE
diff --git a/library/std/src/sys/windows/c/windows_sys.rs b/library/std/src/sys/windows/c/windows_sys.rs
index e0509e6a5..b38b70c89 100644
--- a/library/std/src/sys/windows/c/windows_sys.rs
+++ b/library/std/src/sys/windows/c/windows_sys.rs
@@ -4,7 +4,7 @@
// regenerate the bindings.
//
// ignore-tidy-filelength
-// Bindings generated by `windows-bindgen` 0.51.1
+// Bindings generated by `windows-bindgen` 0.52.0
#![allow(non_snake_case, non_upper_case_globals, non_camel_case_types, dead_code, clippy::all)]
#[link(name = "advapi32")]
@@ -63,7 +63,7 @@ extern "system" {
lpnewfilename: PCWSTR,
lpprogressroutine: LPPROGRESS_ROUTINE,
lpdata: *const ::core::ffi::c_void,
- pbcancel: *mut i32,
+ pbcancel: *mut BOOL,
dwcopyflags: u32,
) -> BOOL;
}
@@ -619,7 +619,7 @@ extern "system" {
lpmultibytestr: PSTR,
cbmultibyte: i32,
lpdefaultchar: PCSTR,
- lpuseddefaultchar: *mut i32,
+ lpuseddefaultchar: *mut BOOL,
) -> i32;
}
#[link(name = "kernel32")]
@@ -869,7 +869,7 @@ pub const AF_INET: ADDRESS_FAMILY = 2u16;
pub const AF_INET6: ADDRESS_FAMILY = 23u16;
pub const AF_UNIX: u16 = 1u16;
pub const AF_UNSPEC: ADDRESS_FAMILY = 0u16;
-pub const ALL_PROCESSOR_GROUPS: u32 = 65535u32;
+pub const ALL_PROCESSOR_GROUPS: u16 = 65535u16;
#[repr(C)]
pub union ARM64_NT_NEON128 {
pub Anonymous: ARM64_NT_NEON128_0,
@@ -3498,7 +3498,6 @@ impl ::core::clone::Clone for INIT_ONCE {
}
pub const INIT_ONCE_INIT_FAILED: u32 = 4u32;
pub const INVALID_FILE_ATTRIBUTES: u32 = 4294967295u32;
-pub const INVALID_HANDLE_VALUE: HANDLE = ::core::ptr::invalid_mut(-1i32 as _);
pub const INVALID_SOCKET: SOCKET = -1i32 as _;
#[repr(C)]
pub struct IN_ADDR {
diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs
index e28dd4935..f60b3a2c7 100644
--- a/library/std/src/sys/windows/compat.rs
+++ b/library/std/src/sys/windows/compat.rs
@@ -225,9 +225,9 @@ macro_rules! compat_fn_optional {
/// Load all needed functions from "api-ms-win-core-synch-l1-2-0".
pub(super) fn load_synch_functions() {
fn try_load() -> Option<()> {
- const MODULE_NAME: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
- const WAIT_ON_ADDRESS: &CStr = ansi_str!("WaitOnAddress");
- const WAKE_BY_ADDRESS_SINGLE: &CStr = ansi_str!("WakeByAddressSingle");
+ const MODULE_NAME: &CStr = c"api-ms-win-core-synch-l1-2-0";
+ const WAIT_ON_ADDRESS: &CStr = c"WaitOnAddress";
+ const WAKE_BY_ADDRESS_SINGLE: &CStr = c"WakeByAddressSingle";
// Try loading the library and all the required functions.
// If any step fails, then they all fail.
diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs
index d7e36b9a3..424845436 100644
--- a/library/std/src/sys/windows/fs.rs
+++ b/library/std/src/sys/windows/fs.rs
@@ -1,7 +1,7 @@
use crate::os::windows::prelude::*;
use crate::borrow::Cow;
-use crate::ffi::OsString;
+use crate::ffi::{c_void, OsString};
use crate::fmt;
use crate::io::{self, BorrowedCursor, Error, IoSlice, IoSliceMut, SeekFrom};
use crate::mem::{self, MaybeUninit};
@@ -16,8 +16,6 @@ use crate::sys::{c, cvt, Align8};
use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::thread;
-use core::ffi::c_void;
-
use super::path::maybe_verbatim;
use super::{api, to_u16s, IoResult};
@@ -156,7 +154,7 @@ impl DirEntry {
}
pub fn path(&self) -> PathBuf {
- self.root.join(&self.file_name())
+ self.root.join(self.file_name())
}
pub fn file_name(&self) -> OsString {
@@ -273,7 +271,9 @@ impl OpenOptions {
(false, false, false) => c::OPEN_EXISTING,
(true, false, false) => c::OPEN_ALWAYS,
(false, true, false) => c::TRUNCATE_EXISTING,
- (true, true, false) => c::CREATE_ALWAYS,
+ // `CREATE_ALWAYS` has weird semantics so we emulate it using
+ // `OPEN_ALWAYS` and a manual truncation step. See #115745.
+ (true, true, false) => c::OPEN_ALWAYS,
(_, _, true) => c::CREATE_NEW,
})
}
@@ -289,19 +289,42 @@ impl OpenOptions {
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = maybe_verbatim(path)?;
+ let creation = opts.get_creation_mode()?;
let handle = unsafe {
c::CreateFileW(
path.as_ptr(),
opts.get_access_mode()?,
opts.share_mode,
opts.security_attributes,
- opts.get_creation_mode()?,
+ creation,
opts.get_flags_and_attributes(),
ptr::null_mut(),
)
};
let handle = unsafe { HandleOrInvalid::from_raw_handle(handle) };
- if let Ok(handle) = handle.try_into() {
+ if let Ok(handle) = OwnedHandle::try_from(handle) {
+ // Manual truncation. See #115745.
+ if opts.truncate
+ && creation == c::OPEN_ALWAYS
+ && unsafe { c::GetLastError() } == c::ERROR_ALREADY_EXISTS
+ {
+ unsafe {
+ // This originally used `FileAllocationInfo` instead of
+ // `FileEndOfFileInfo` but that wasn't supported by WINE.
+ // It's arguable which fits the semantics of `OpenOptions`
+ // better so let's just use the more widely supported method.
+ let eof = c::FILE_END_OF_FILE_INFO { EndOfFile: 0 };
+ let result = c::SetFileInformationByHandle(
+ handle.as_raw_handle(),
+ c::FileEndOfFileInfo,
+ ptr::addr_of!(eof).cast::<c_void>(),
+ mem::size_of::<c::FILE_END_OF_FILE_INFO>() as u32,
+ );
+ if result == 0 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+ }
Ok(File { handle: Handle::from_inner(handle) })
} else {
Err(Error::last_os_error())
@@ -548,7 +571,7 @@ impl File {
let user = super::args::from_wide_to_user_path(
subst.iter().copied().chain([0]).collect(),
)?;
- Ok(PathBuf::from(OsString::from_wide(&user.strip_suffix(&[0]).unwrap_or(&user))))
+ Ok(PathBuf::from(OsString::from_wide(user.strip_suffix(&[0]).unwrap_or(&user))))
} else {
Ok(PathBuf::from(OsString::from_wide(subst)))
}
@@ -786,7 +809,7 @@ fn open_link_no_reparse(parent: &File, name: &[u16], access: u32) -> io::Result<
// tricked into following a symlink. However, it may not be available in
// earlier versions of Windows.
static ATTRIBUTES: AtomicU32 = AtomicU32::new(c::OBJ_DONT_REPARSE);
- let mut object = c::OBJECT_ATTRIBUTES {
+ let object = c::OBJECT_ATTRIBUTES {
ObjectName: &mut name_str,
RootDirectory: parent.as_raw_handle(),
Attributes: ATTRIBUTES.load(Ordering::Relaxed),
@@ -795,7 +818,7 @@ fn open_link_no_reparse(parent: &File, name: &[u16], access: u32) -> io::Result<
let status = c::NtCreateFile(
&mut handle,
access,
- &mut object,
+ &object,
&mut io_status,
crate::ptr::null_mut(),
0,
@@ -874,7 +897,7 @@ impl fmt::Debug for File {
// FIXME(#24570): add more info here (e.g., mode)
let mut b = f.debug_struct("File");
b.field("handle", &self.handle.as_raw_handle());
- if let Ok(path) = get_path(&self) {
+ if let Ok(path) = get_path(self) {
b.field("path", &path);
}
b.finish()
@@ -1193,7 +1216,7 @@ pub fn readlink(path: &Path) -> io::Result<PathBuf> {
let mut opts = OpenOptions::new();
opts.access_mode(0);
opts.custom_flags(c::FILE_FLAG_OPEN_REPARSE_POINT | c::FILE_FLAG_BACKUP_SEMANTICS);
- let file = File::open(&path, &opts)?;
+ let file = File::open(path, &opts)?;
file.readlink()
}
@@ -1407,7 +1430,7 @@ pub fn symlink_junction<P: AsRef<Path>, Q: AsRef<Path>>(
#[allow(dead_code)]
fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
let d = DirBuilder::new();
- d.mkdir(&junction)?;
+ d.mkdir(junction)?;
let mut opts = OpenOptions::new();
opts.write(true);
diff --git a/library/std/src/sys/windows/handle.rs b/library/std/src/sys/windows/handle.rs
index 56d0d6c08..c4495f81a 100644
--- a/library/std/src/sys/windows/handle.rs
+++ b/library/std/src/sys/windows/handle.rs
@@ -81,7 +81,7 @@ impl Handle {
let res = unsafe { self.synchronous_read(buf.as_mut_ptr().cast(), buf.len(), None) };
match res {
- Ok(read) => Ok(read as usize),
+ Ok(read) => Ok(read),
// The special treatment of BrokenPipe is to deal with Windows
// pipe semantics, which yields this error when *reading* from
@@ -107,7 +107,7 @@ impl Handle {
unsafe { self.synchronous_read(buf.as_mut_ptr().cast(), buf.len(), Some(offset)) };
match res {
- Ok(read) => Ok(read as usize),
+ Ok(read) => Ok(read),
Err(ref e) if e.raw_os_error() == Some(c::ERROR_HANDLE_EOF as i32) => Ok(0),
Err(e) => Err(e),
}
@@ -121,7 +121,7 @@ impl Handle {
Ok(read) => {
// Safety: `read` bytes were written to the initialized portion of the buffer
unsafe {
- cursor.advance(read as usize);
+ cursor.advance(read);
}
Ok(())
}
@@ -189,7 +189,7 @@ impl Handle {
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
- self.synchronous_write(&buf, None)
+ self.synchronous_write(buf, None)
}
pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
@@ -202,7 +202,7 @@ impl Handle {
}
pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
- self.synchronous_write(&buf, Some(offset))
+ self.synchronous_write(buf, Some(offset))
}
pub fn try_clone(&self) -> io::Result<Self> {
diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs
index 9b540ee07..649826d25 100644
--- a/library/std/src/sys/windows/io.rs
+++ b/library/std/src/sys/windows/io.rs
@@ -36,7 +36,7 @@ impl<'a> IoSlice<'a> {
#[inline]
pub fn as_slice(&self) -> &[u8] {
- unsafe { slice::from_raw_parts(self.vec.buf as *mut u8, self.vec.len as usize) }
+ unsafe { slice::from_raw_parts(self.vec.buf, self.vec.len as usize) }
}
}
@@ -70,12 +70,12 @@ impl<'a> IoSliceMut<'a> {
#[inline]
pub fn as_slice(&self) -> &[u8] {
- unsafe { slice::from_raw_parts(self.vec.buf as *mut u8, self.vec.len as usize) }
+ unsafe { slice::from_raw_parts(self.vec.buf, self.vec.len as usize) }
}
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [u8] {
- unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.len as usize) }
+ unsafe { slice::from_raw_parts_mut(self.vec.buf, self.vec.len as usize) }
}
}
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index c4e56e13b..8b722f01a 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -1,6 +1,6 @@
#![allow(missing_docs, nonstandard_style)]
-use crate::ffi::{CStr, OsStr, OsString};
+use crate::ffi::{OsStr, OsString};
use crate::io::ErrorKind;
use crate::mem::MaybeUninit;
use crate::os::windows::ffi::{OsStrExt, OsStringExt};
@@ -63,7 +63,7 @@ pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {
// Normally, `thread::spawn` will call `Thread::set_name` but since this thread already
// exists, we have to call it ourselves.
- thread::Thread::set_name(&CStr::from_bytes_with_nul_unchecked(b"main\0"));
+ thread::Thread::set_name(&c"main");
}
// SAFETY: must be called only once during runtime cleanup.
@@ -150,7 +150,7 @@ pub fn decode_error_kind(errno: i32) -> ErrorKind {
pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> {
let ptr = haystack.as_ptr();
- let mut start = &haystack[..];
+ let mut start = haystack;
// For performance reasons unfold the loop eight times.
while start.len() >= 8 {
diff --git a/library/std/src/sys/windows/net.rs b/library/std/src/sys/windows/net.rs
index c29b86366..6cd758ec5 100644
--- a/library/std/src/sys/windows/net.rs
+++ b/library/std/src/sys/windows/net.rs
@@ -162,7 +162,7 @@ impl Socket {
let mut timeout = c::timeval {
tv_sec: cmp::min(timeout.as_secs(), c_long::MAX as u64) as c_long,
- tv_usec: (timeout.subsec_nanos() / 1000) as c_long,
+ tv_usec: timeout.subsec_micros() as c_long,
};
if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs
index 8cc905101..829dd5eb9 100644
--- a/library/std/src/sys/windows/os.rs
+++ b/library/std/src/sys/windows/os.rs
@@ -297,7 +297,7 @@ pub fn getenv(k: &OsStr) -> Option<OsString> {
let k = to_u16s(k).ok()?;
super::fill_utf16_buf(
|buf, sz| unsafe { c::GetEnvironmentVariableW(k.as_ptr(), buf, sz) },
- |buf| OsStringExt::from_wide(buf),
+ OsStringExt::from_wide,
)
.ok()
}
@@ -356,7 +356,7 @@ pub fn home_dir() -> Option<PathBuf> {
crate::env::var_os("HOME")
.or_else(|| crate::env::var_os("USERPROFILE"))
.map(PathBuf::from)
- .or_else(|| home_dir_crt())
+ .or_else(home_dir_crt)
}
pub fn exit(code: i32) -> ! {
@@ -364,5 +364,5 @@ pub fn exit(code: i32) -> ! {
}
pub fn getpid() -> u32 {
- unsafe { c::GetCurrentProcessId() as u32 }
+ unsafe { c::GetCurrentProcessId() }
}
diff --git a/library/std/src/sys/windows/path.rs b/library/std/src/sys/windows/path.rs
index 8c0e07b35..d9684f217 100644
--- a/library/std/src/sys/windows/path.rs
+++ b/library/std/src/sys/windows/path.rs
@@ -78,7 +78,7 @@ impl<'a> PrefixParserSlice<'a, '_> {
fn strip_prefix(&self, prefix: &str) -> Option<Self> {
self.prefix[self.index..]
.starts_with(prefix.as_bytes())
- .then(|| Self { index: self.index + prefix.len(), ..*self })
+ .then_some(Self { index: self.index + prefix.len(), ..*self })
}
fn prefix_bytes(&self) -> &'a [u8] {
@@ -104,7 +104,9 @@ pub fn parse_prefix(path: &OsStr) -> Option<Prefix<'_>> {
// The meaning of verbatim paths can change when they use a different
// separator.
- if let Some(parser) = parser.strip_prefix(r"?\") && !parser.prefix_bytes().iter().any(|&x| x == b'/') {
+ if let Some(parser) = parser.strip_prefix(r"?\")
+ && !parser.prefix_bytes().iter().any(|&x| x == b'/')
+ {
// \\?\
if let Some(parser) = parser.strip_prefix(r"UNC\") {
// \\?\UNC\server\share
@@ -145,12 +147,10 @@ pub fn parse_prefix(path: &OsStr) -> Option<Prefix<'_>> {
None
}
}
- } else if let Some(drive) = parse_drive(path) {
- // C:
- Some(Disk(drive))
} else {
- // no prefix
- None
+ // If it has a drive like `C:` then it's a disk.
+ // Otherwise there is no prefix.
+ parse_drive(path).map(Disk)
}
}
@@ -250,7 +250,7 @@ pub(crate) fn get_long_path(mut path: Vec<u16>, prefer_verbatim: bool) -> io::Re
// \\?\UNC\
const UNC_PREFIX: &[u16] = &[SEP, SEP, QUERY, SEP, U, N, C, SEP];
- if path.starts_with(VERBATIM_PREFIX) || path.starts_with(NT_PREFIX) || path == &[0] {
+ if path.starts_with(VERBATIM_PREFIX) || path.starts_with(NT_PREFIX) || path == [0] {
// Early return for paths that are already verbatim or empty.
return Ok(path);
} else if path.len() < LEGACY_MAX_PATH {
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index f4078d359..9ec775959 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -245,7 +245,7 @@ impl Command {
}
pub fn get_current_dir(&self) -> Option<&Path> {
- self.cwd.as_ref().map(|cwd| Path::new(cwd))
+ self.cwd.as_ref().map(Path::new)
}
pub unsafe fn raw_attribute<T: Copy + Send + Sync + 'static>(
@@ -463,7 +463,7 @@ fn resolve_exe<'a>(
// Search the directories given by `search_paths`.
let result = search_paths(parent_paths, child_paths, |mut path| {
- path.push(&exe_path);
+ path.push(exe_path);
if !has_extension {
path.set_extension(EXE_EXTENSION);
}
@@ -597,7 +597,7 @@ impl Stdio {
opts.read(stdio_id == c::STD_INPUT_HANDLE);
opts.write(stdio_id != c::STD_INPUT_HANDLE);
opts.security_attributes(&mut sa);
- File::open(Path::new("NUL"), &opts).map(|file| file.into_inner())
+ File::open(Path::new(r"\\.\NUL"), &opts).map(|file| file.into_inner())
}
}
}
@@ -657,7 +657,7 @@ impl Process {
}
pub fn id(&self) -> u32 {
- unsafe { c::GetProcessId(self.handle.as_raw_handle()) as u32 }
+ unsafe { c::GetProcessId(self.handle.as_raw_handle()) }
}
pub fn main_thread_handle(&self) -> BorrowedHandle<'_> {
@@ -917,9 +917,8 @@ fn make_proc_thread_attribute_list(
)
};
- let mut proc_thread_attribute_list = ProcThreadAttributeList(
- vec![MaybeUninit::uninit(); required_size as usize].into_boxed_slice(),
- );
+ let mut proc_thread_attribute_list =
+ ProcThreadAttributeList(vec![MaybeUninit::uninit(); required_size].into_boxed_slice());
// Once we've allocated the necessary memory, it's safe to invoke
// `InitializeProcThreadAttributeList` to properly initialize the list.
diff --git a/library/std/src/sys/windows/stdio.rs b/library/std/src/sys/windows/stdio.rs
index a9ff909aa..819a48266 100644
--- a/library/std/src/sys/windows/stdio.rs
+++ b/library/std/src/sys/windows/stdio.rs
@@ -195,7 +195,7 @@ fn write_valid_utf8_to_console(handle: c::HANDLE, utf8: &str) -> io::Result<usiz
MaybeUninit::slice_assume_init_ref(&utf16[..result as usize])
};
- let mut written = write_u16s(handle, &utf16)?;
+ let mut written = write_u16s(handle, utf16)?;
// Figure out how many bytes of as UTF-8 were written away as UTF-16.
if written == utf16.len() {
@@ -207,7 +207,7 @@ fn write_valid_utf8_to_console(handle: c::HANDLE, utf8: &str) -> io::Result<usiz
// write the missing surrogate out now.
// Buffering it would mean we have to lie about the number of bytes written.
let first_code_unit_remaining = utf16[written];
- if first_code_unit_remaining >= 0xDCEE && first_code_unit_remaining <= 0xDFFF {
+ if matches!(first_code_unit_remaining, 0xDCEE..=0xDFFF) {
// low surrogate
// We just hope this works, and give up otherwise
let _ = write_u16s(handle, &utf16[written..written + 1]);
@@ -266,7 +266,7 @@ impl io::Read for Stdin {
let mut bytes_copied = self.incomplete_utf8.read(buf);
if bytes_copied == buf.len() {
- return Ok(bytes_copied);
+ Ok(bytes_copied)
} else if buf.len() - bytes_copied < 4 {
// Not enough space to get a UTF-8 byte. We will use the incomplete UTF8.
let mut utf16_buf = [MaybeUninit::new(0); 1];
@@ -332,7 +332,7 @@ fn read_u16s_fixup_surrogates(
// and it is not 0, so we know that `buf[amount - 1]` have been
// initialized.
let last_char = unsafe { buf[amount - 1].assume_init() };
- if last_char >= 0xD800 && last_char <= 0xDBFF {
+ if matches!(last_char, 0xD800..=0xDBFF) {
// high surrogate
*surrogate = last_char;
amount -= 1;
diff --git a/library/std/src/sys/windows/time.rs b/library/std/src/sys/windows/time.rs
index bece48e79..09e78a293 100644
--- a/library/std/src/sys/windows/time.rs
+++ b/library/std/src/sys/windows/time.rs
@@ -1,7 +1,7 @@
use crate::cmp::Ordering;
use crate::fmt;
use crate::mem;
-use crate::ptr::{null, null_mut};
+use crate::ptr::null;
use crate::sys::c;
use crate::sys_common::IntoInner;
use crate::time::Duration;
@@ -240,7 +240,7 @@ impl WaitableTimer {
c::TIMER_ALL_ACCESS,
)
};
- if handle != null_mut() { Ok(Self { handle }) } else { Err(()) }
+ if !handle.is_null() { Ok(Self { handle }) } else { Err(()) }
}
pub fn set(&self, duration: Duration) -> Result<(), ()> {
// Convert the Duration to a format similar to FILETIME.
diff --git a/library/std/src/sys/xous/mod.rs b/library/std/src/sys/xous/mod.rs
index 6d5c218d1..c2550dcfd 100644
--- a/library/std/src/sys/xous/mod.rs
+++ b/library/std/src/sys/xous/mod.rs
@@ -28,7 +28,6 @@ pub mod process;
pub mod stdio;
pub mod thread;
pub mod thread_local_key;
-#[path = "../unsupported/thread_parking.rs"]
pub mod thread_parking;
pub mod time;
diff --git a/library/std/src/sys/xous/os.rs b/library/std/src/sys/xous/os.rs
index 3d19fa4b3..8d2eaee8a 100644
--- a/library/std/src/sys/xous/os.rs
+++ b/library/std/src/sys/xous/os.rs
@@ -8,6 +8,28 @@ use crate::os::xous::ffi::Error as XousError;
use crate::path::{self, PathBuf};
#[cfg(not(test))]
+#[cfg(feature = "panic_unwind")]
+mod eh_unwinding {
+ pub(crate) struct EhFrameFinder(usize /* eh_frame */);
+ pub(crate) static mut EH_FRAME_SETTINGS: EhFrameFinder = EhFrameFinder(0);
+ impl EhFrameFinder {
+ pub(crate) unsafe fn init(&mut self, eh_frame: usize) {
+ unsafe {
+ EH_FRAME_SETTINGS.0 = eh_frame;
+ }
+ }
+ }
+ unsafe impl unwind::EhFrameFinder for EhFrameFinder {
+ fn find(&self, _pc: usize) -> Option<unwind::FrameInfo> {
+ Some(unwind::FrameInfo {
+ text_base: None,
+ kind: unwind::FrameInfoKind::EhFrame(self.0),
+ })
+ }
+ }
+}
+
+#[cfg(not(test))]
mod c_compat {
use crate::os::xous::ffi::exit;
extern "C" {
@@ -20,7 +42,12 @@ mod c_compat {
}
#[no_mangle]
- pub extern "C" fn _start() {
+ pub extern "C" fn _start(eh_frame: usize) {
+ #[cfg(feature = "panic_unwind")]
+ unsafe {
+ super::eh_unwinding::EH_FRAME_SETTINGS.init(eh_frame);
+ unwind::set_custom_eh_frame_finder(&super::eh_unwinding::EH_FRAME_SETTINGS).ok();
+ }
exit(unsafe { main() });
}
diff --git a/library/std/src/sys/xous/thread_parking.rs b/library/std/src/sys/xous/thread_parking.rs
new file mode 100644
index 000000000..aa39c6d27
--- /dev/null
+++ b/library/std/src/sys/xous/thread_parking.rs
@@ -0,0 +1,94 @@
+use crate::os::xous::ffi::{blocking_scalar, scalar};
+use crate::os::xous::services::{ticktimer_server, TicktimerScalar};
+use crate::pin::Pin;
+use crate::ptr;
+use crate::sync::atomic::{
+ AtomicI8,
+ Ordering::{Acquire, Release},
+};
+use crate::time::Duration;
+
+const NOTIFIED: i8 = 1;
+const EMPTY: i8 = 0;
+const PARKED: i8 = -1;
+
+pub struct Parker {
+ state: AtomicI8,
+}
+
+impl Parker {
+ pub unsafe fn new_in_place(parker: *mut Parker) {
+ unsafe { parker.write(Parker { state: AtomicI8::new(EMPTY) }) }
+ }
+
+ fn index(&self) -> usize {
+ ptr::from_ref(self).addr()
+ }
+
+ pub unsafe fn park(self: Pin<&Self>) {
+ // Change NOTIFIED to EMPTY and EMPTY to PARKED.
+ let state = self.state.fetch_sub(1, Acquire);
+ if state == NOTIFIED {
+ return;
+ }
+
+ // The state was set to PARKED. Wait until the `unpark` wakes us up.
+ blocking_scalar(
+ ticktimer_server(),
+ TicktimerScalar::WaitForCondition(self.index(), 0).into(),
+ )
+ .expect("failed to send WaitForCondition command");
+
+ self.state.swap(EMPTY, Acquire);
+ }
+
+ pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) {
+ // Change NOTIFIED to EMPTY and EMPTY to PARKED.
+ let state = self.state.fetch_sub(1, Acquire);
+ if state == NOTIFIED {
+ return;
+ }
+
+ // A value of zero indicates an indefinite wait. Clamp the number of
+ // milliseconds to the allowed range.
+ let millis = usize::max(timeout.as_millis().try_into().unwrap_or(usize::MAX), 1);
+
+ let was_timeout = blocking_scalar(
+ ticktimer_server(),
+ TicktimerScalar::WaitForCondition(self.index(), millis).into(),
+ )
+ .expect("failed to send WaitForCondition command")[0]
+ != 0;
+
+ let state = self.state.swap(EMPTY, Acquire);
+ if was_timeout && state == NOTIFIED {
+ // The state was set to NOTIFIED after we returned from the wait
+ // but before we reset the state. Therefore, a wakeup is on its
+ // way, which we need to consume here.
+ // NOTICE: this is a priority hole.
+ blocking_scalar(
+ ticktimer_server(),
+ TicktimerScalar::WaitForCondition(self.index(), 0).into(),
+ )
+ .expect("failed to send WaitForCondition command");
+ }
+ }
+
+ pub fn unpark(self: Pin<&Self>) {
+ let state = self.state.swap(NOTIFIED, Release);
+ if state == PARKED {
+ // The thread is parked, wake it up.
+ blocking_scalar(
+ ticktimer_server(),
+ TicktimerScalar::NotifyCondition(self.index(), 1).into(),
+ )
+ .expect("failed to send NotifyCondition command");
+ }
+ }
+}
+
+impl Drop for Parker {
+ fn drop(&mut self) {
+ scalar(ticktimer_server(), TicktimerScalar::FreeCondition(self.index()).into()).ok();
+ }
+}
diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs
index 84e2c5d8d..adfe721cf 100644
--- a/library/std/src/sys_common/backtrace.rs
+++ b/library/std/src/sys_common/backtrace.rs
@@ -64,6 +64,7 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt::
let mut first_omit = true;
// Start immediately if we're not using a short backtrace.
let mut start = print_fmt != PrintFmt::Short;
+ set_image_base();
backtrace_rs::trace_unsynchronized(|frame| {
if print_fmt == PrintFmt::Short && idx > MAX_NB_FRAMES {
return false;
@@ -213,3 +214,14 @@ pub fn output_filename(
}
fmt::Display::fmt(&file.display(), fmt)
}
+
+#[cfg(all(target_vendor = "fortanix", target_env = "sgx"))]
+pub fn set_image_base() {
+ let image_base = crate::os::fortanix_sgx::mem::image_base();
+ backtrace_rs::set_image_base(crate::ptr::invalid_mut(image_base as _));
+}
+
+#[cfg(not(all(target_vendor = "fortanix", target_env = "sgx")))]
+pub fn set_image_base() {
+ // nothing to do for platforms other than SGX
+}
diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs
index e18638f2a..851832a37 100644
--- a/library/std/src/sys_common/mod.rs
+++ b/library/std/src/sys_common/mod.rs
@@ -43,15 +43,15 @@ cfg_if::cfg_if! {
}
cfg_if::cfg_if! {
- if #[cfg(any(target_os = "l4re",
- target_os = "uefi",
- feature = "restricted-std",
- all(target_family = "wasm", not(target_os = "emscripten")),
- target_os = "xous",
- all(target_vendor = "fortanix", target_env = "sgx")))] {
- pub use crate::sys::net;
- } else {
+ if #[cfg(any(
+ all(unix, not(target_os = "l4re")),
+ windows,
+ target_os = "hermit",
+ target_os = "solid_asp3"
+ ))] {
pub mod net;
+ } else {
+ pub use crate::sys::net;
}
}
diff --git a/library/std/src/sys_common/once/futex.rs b/library/std/src/sys_common/once/futex.rs
index 42db5fad4..609085dcd 100644
--- a/library/std/src/sys_common/once/futex.rs
+++ b/library/std/src/sys_common/once/futex.rs
@@ -128,7 +128,8 @@ impl Once {
RUNNING | QUEUED => {
// Set the state to QUEUED if it is not already.
if state == RUNNING
- && let Err(new) = self.state.compare_exchange_weak(RUNNING, QUEUED, Relaxed, Acquire)
+ && let Err(new) =
+ self.state.compare_exchange_weak(RUNNING, QUEUED, Relaxed, Acquire)
{
state = new;
continue;
diff --git a/library/std/src/sys_common/thread.rs b/library/std/src/sys_common/thread.rs
index 76466b2b3..8f5624bbc 100644
--- a/library/std/src/sys_common/thread.rs
+++ b/library/std/src/sys_common/thread.rs
@@ -8,7 +8,7 @@ pub fn min_stack() -> usize {
0 => {}
n => return n - 1,
}
- let amt = env::var("RUST_MIN_STACK").ok().and_then(|s| s.parse().ok());
+ let amt = env::var_os("RUST_MIN_STACK").and_then(|s| s.to_str().and_then(|s| s.parse().ok()));
let amt = amt.unwrap_or(imp::DEFAULT_MIN_STACK_SIZE);
// 0 is our sentinel value, so ensure that we'll never see 0 after
diff --git a/library/std/src/sys_common/wtf8/tests.rs b/library/std/src/sys_common/wtf8/tests.rs
index a07bbe6d7..28a426648 100644
--- a/library/std/src/sys_common/wtf8/tests.rs
+++ b/library/std/src/sys_common/wtf8/tests.rs
@@ -1,5 +1,4 @@
use super::*;
-use crate::borrow::Cow;
#[test]
fn code_point_from_u32() {
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index 4097eb554..849893780 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -548,10 +548,6 @@ impl Builder {
let main = Box::new(main);
// SAFETY: dynamic size and alignment of the Box remain the same. See below for why the
// lifetime change is justified.
- #[cfg(bootstrap)]
- let main =
- unsafe { mem::transmute::<Box<dyn FnOnce() + 'a>, Box<dyn FnOnce() + 'static>>(main) };
- #[cfg(not(bootstrap))]
let main = unsafe { Box::from_raw(Box::into_raw(main) as *mut (dyn FnOnce() + 'static)) };
Ok(JoinInner {
@@ -1585,6 +1581,7 @@ impl<'scope, T> JoinInner<'scope, T> {
/// [`thread::Builder::spawn`]: Builder::spawn
/// [`thread::spawn`]: spawn
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(target_os = "teeos", must_use)]
pub struct JoinHandle<T>(JoinInner<'static, T>);
#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")]
diff --git a/library/std/tests/common/mod.rs b/library/std/tests/common/mod.rs
index 358c2c3f9..1aad6549e 100644
--- a/library/std/tests/common/mod.rs
+++ b/library/std/tests/common/mod.rs
@@ -1,17 +1,17 @@
#![allow(unused)]
+use rand::RngCore;
use std::env;
use std::fs;
use std::path::{Path, PathBuf};
use std::thread;
-use rand::RngCore;
/// Copied from `std::test_helpers::test_rng`, since these tests rely on the
/// seed not being the same for every RNG invocation too.
#[track_caller]
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use core::hash::{BuildHasher, Hash, Hasher};
- let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ let mut hasher = std::hash::RandomState::new().build_hasher();
core::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>();
diff --git a/library/test/src/helpers/shuffle.rs b/library/test/src/helpers/shuffle.rs
index ca503106c..2ac3bfbd4 100644
--- a/library/test/src/helpers/shuffle.rs
+++ b/library/test/src/helpers/shuffle.rs
@@ -1,7 +1,6 @@
use crate::cli::TestOpts;
use crate::types::{TestDescAndFn, TestId, TestName};
-use std::collections::hash_map::DefaultHasher;
-use std::hash::Hasher;
+use std::hash::{DefaultHasher, Hasher};
use std::time::{SystemTime, UNIX_EPOCH};
pub fn get_shuffle_seed(opts: &TestOpts) -> Option<u64> {
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index bddf75dff..2fa5a8e5e 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -16,8 +16,8 @@
#![unstable(feature = "test", issue = "50297")]
#![doc(test(attr(deny(warnings))))]
-#![cfg_attr(not(bootstrap), doc(rust_logo))]
-#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
+#![doc(rust_logo)]
+#![feature(rustdoc_internals)]
#![feature(internal_output_capture)]
#![feature(staged_api)]
#![feature(process_exitcode_internals)]
@@ -264,8 +264,8 @@ pub fn run_tests<F>(
where
F: FnMut(TestEvent) -> io::Result<()>,
{
- use std::collections::{self, HashMap};
- use std::hash::BuildHasherDefault;
+ use std::collections::HashMap;
+ use std::hash::{BuildHasherDefault, DefaultHasher};
use std::sync::mpsc::RecvTimeoutError;
struct RunningTest {
@@ -286,8 +286,7 @@ where
}
// Use a deterministic hasher
- type TestMap =
- HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
+ type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<DefaultHasher>>;
struct TimeoutEntry {
id: TestId,
diff --git a/library/test/src/term/terminfo/parm/tests.rs b/library/test/src/term/terminfo/parm/tests.rs
index c738f3ba0..e785d84f3 100644
--- a/library/test/src/term/terminfo/parm/tests.rs
+++ b/library/test/src/term/terminfo/parm/tests.rs
@@ -1,7 +1,5 @@
use super::*;
-use std::result::Result::Ok;
-
#[test]
fn test_basic_setabf() {
let s = b"\\E[48;5;%p1%dm";
diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs
index 4ef18b14f..43a906ad2 100644
--- a/library/test/src/tests.rs
+++ b/library/test/src/tests.rs
@@ -1,34 +1,17 @@
use super::*;
use crate::{
- bench::Bencher,
console::OutputLocation,
formatters::PrettyFormatter,
- options::OutputFormat,
test::{
- filter_tests,
parse_opts,
- run_test,
- DynTestFn,
- DynTestName,
MetricMap,
- RunIgnored,
- RunStrategy,
- ShouldPanic,
- StaticTestName,
- TestDesc,
- TestDescAndFn,
- TestOpts,
- TrIgnored,
- TrOk,
// FIXME (introduced by #65251)
// ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions,
// TestType, TrFailedMsg, TrIgnored, TrOk,
},
time::{TestTimeOptions, TimeThreshold},
};
-use std::sync::mpsc::channel;
-use std::time::Duration;
impl TestOpts {
fn new() -> TestOpts {
diff --git a/library/unwind/Cargo.toml b/library/unwind/Cargo.toml
index 9aa552ed8..b7418d118 100644
--- a/library/unwind/Cargo.toml
+++ b/library/unwind/Cargo.toml
@@ -15,10 +15,13 @@ doc = false
[dependencies]
core = { path = "../core" }
-libc = { version = "0.2.79", features = ['rustc-dep-of-std'], default-features = false }
+libc = { version = "0.2.140", features = ['rustc-dep-of-std'], default-features = false }
compiler_builtins = "0.1.0"
cfg-if = "1.0"
+[target.'cfg(target_os = "xous")'.dependencies]
+unwinding = { version = "0.2.1", features = ['rustc-dep-of-std', 'unwinder', 'fde-custom'], default-features = false }
+
[features]
# Only applies for Linux and Fuchsia targets
diff --git a/library/unwind/src/lib.rs b/library/unwind/src/lib.rs
index 335bded71..eeee98f75 100644
--- a/library/unwind/src/lib.rs
+++ b/library/unwind/src/lib.rs
@@ -26,6 +26,9 @@ cfg_if::cfg_if! {
))] {
mod libunwind;
pub use libunwind::*;
+ } else if #[cfg(target_os = "xous")] {
+ mod unwinding;
+ pub use unwinding::*;
} else {
// no unwinder on the system!
// - wasm32 (not emscripten, which is "unix" family)
diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs
index dba64aa74..1b5f6f9dd 100644
--- a/library/unwind/src/libunwind.rs
+++ b/library/unwind/src/libunwind.rs
@@ -103,7 +103,10 @@ pub type _Unwind_Exception_Cleanup_Fn =
// and RFC 2841
#[cfg_attr(
any(
- all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")),
+ all(
+ feature = "llvm-libunwind",
+ any(target_os = "fuchsia", target_os = "linux", target_os = "xous")
+ ),
all(target_os = "windows", target_env = "gnu", target_abi = "llvm")
),
link(name = "unwind", kind = "static", modifiers = "-bundle")
@@ -134,7 +137,7 @@ if #[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos", targe
pub use _Unwind_Action::*;
#[cfg_attr(
- all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")),
+ all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux", target_os = "xous")),
link(name = "unwind", kind = "static", modifiers = "-bundle")
)]
extern "C" {
@@ -192,7 +195,7 @@ if #[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos", targe
pub const UNWIND_IP_REG: c_int = 15;
#[cfg_attr(
- all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")),
+ all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux", target_os = "xous")),
link(name = "unwind", kind = "static", modifiers = "-bundle")
)]
extern "C" {
@@ -258,14 +261,14 @@ cfg_if::cfg_if! {
if #[cfg(not(all(target_os = "ios", target_arch = "arm")))] {
// Not 32-bit iOS
#[cfg_attr(
- all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")),
+ all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux", target_os = "xous")),
link(name = "unwind", kind = "static", modifiers = "-bundle")
)]
extern "C-unwind" {
pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
}
#[cfg_attr(
- all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")),
+ all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux", target_os = "xous")),
link(name = "unwind", kind = "static", modifiers = "-bundle")
)]
extern "C" {
diff --git a/library/unwind/src/unwinding.rs b/library/unwind/src/unwinding.rs
new file mode 100644
index 000000000..1a4187b22
--- /dev/null
+++ b/library/unwind/src/unwinding.rs
@@ -0,0 +1,105 @@
+#![allow(nonstandard_style)]
+
+use libc::{c_int, c_void};
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq)]
+pub enum _Unwind_Action {
+ _UA_SEARCH_PHASE = 1,
+ _UA_CLEANUP_PHASE = 2,
+ _UA_HANDLER_FRAME = 4,
+ _UA_FORCE_UNWIND = 8,
+ _UA_END_OF_STACK = 16,
+}
+pub use _Unwind_Action::*;
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub enum _Unwind_Reason_Code {
+ _URC_NO_REASON = 0,
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8,
+ _URC_FAILURE = 9, // used only by ARM EHABI
+}
+pub use _Unwind_Reason_Code::*;
+
+pub use unwinding::abi::UnwindContext;
+pub use unwinding::abi::UnwindException;
+pub enum _Unwind_Context {}
+
+pub use unwinding::custom_eh_frame_finder::{
+ set_custom_eh_frame_finder, EhFrameFinder, FrameInfo, FrameInfoKind,
+};
+
+pub type _Unwind_Exception_Class = u64;
+pub type _Unwind_Word = *const u8;
+pub type _Unwind_Ptr = *const u8;
+
+pub const unwinder_private_data_size: usize = core::mem::size_of::<UnwindException>()
+ - core::mem::size_of::<_Unwind_Exception_Class>()
+ - core::mem::size_of::<_Unwind_Exception_Cleanup_Fn>();
+
+pub type _Unwind_Exception_Cleanup_Fn =
+ extern "C" fn(unwind_code: _Unwind_Reason_Code, exception: *mut _Unwind_Exception);
+
+#[repr(C)]
+pub struct _Unwind_Exception {
+ pub exception_class: _Unwind_Exception_Class,
+ pub exception_cleanup: _Unwind_Exception_Cleanup_Fn,
+ pub private: [_Unwind_Word; unwinder_private_data_size],
+}
+
+pub unsafe fn _Unwind_GetDataRelBase(ctx: *mut _Unwind_Context) -> _Unwind_Ptr {
+ let ctx = unsafe { &mut *(ctx as *mut UnwindContext<'_>) };
+ unwinding::abi::_Unwind_GetDataRelBase(ctx) as _Unwind_Ptr
+}
+
+pub unsafe fn _Unwind_GetTextRelBase(ctx: *mut _Unwind_Context) -> _Unwind_Ptr {
+ let ctx = unsafe { &mut *(ctx as *mut UnwindContext<'_>) };
+ unwinding::abi::_Unwind_GetTextRelBase(ctx) as _Unwind_Ptr
+}
+
+pub unsafe fn _Unwind_GetRegionStart(ctx: *mut _Unwind_Context) -> _Unwind_Ptr {
+ let ctx = unsafe { &mut *(ctx as *mut UnwindContext<'_>) };
+ unwinding::abi::_Unwind_GetRegionStart(ctx) as _Unwind_Ptr
+}
+
+pub unsafe fn _Unwind_SetGR(ctx: *mut _Unwind_Context, reg_index: c_int, value: _Unwind_Word) {
+ let ctx = unsafe { &mut *(ctx as *mut UnwindContext<'_>) };
+ unwinding::abi::_Unwind_SetGR(ctx, reg_index, value as usize)
+}
+
+pub unsafe fn _Unwind_SetIP(ctx: *mut _Unwind_Context, value: _Unwind_Word) {
+ let ctx = unsafe { &mut *(ctx as *mut UnwindContext<'_>) };
+ unwinding::abi::_Unwind_SetIP(ctx, value as usize)
+}
+
+pub unsafe fn _Unwind_GetIPInfo(
+ ctx: *mut _Unwind_Context,
+ ip_before_insn: *mut c_int,
+) -> _Unwind_Word {
+ let ctx = unsafe { &mut *(ctx as *mut UnwindContext<'_>) };
+ let ip_before_insn = unsafe { &mut *(ip_before_insn as *mut c_int) };
+ unsafe { &*(unwinding::abi::_Unwind_GetIPInfo(ctx, ip_before_insn) as _Unwind_Word) }
+}
+
+pub unsafe fn _Unwind_GetLanguageSpecificData(ctx: *mut _Unwind_Context) -> *mut c_void {
+ let ctx = unsafe { &mut *(ctx as *mut UnwindContext<'_>) };
+ unwinding::abi::_Unwind_GetLanguageSpecificData(ctx)
+}
+
+pub unsafe fn _Unwind_RaiseException(exception: *mut _Unwind_Exception) -> _Unwind_Reason_Code {
+ let exception = unsafe { &mut *(exception as *mut UnwindException) };
+ unsafe { core::mem::transmute(unwinding::abi::_Unwind_RaiseException(exception)) }
+}
+
+pub unsafe fn _Unwind_DeleteException(exception: *mut _Unwind_Exception) {
+ let exception = unsafe { &mut *(exception as *mut UnwindException) };
+ unsafe { unwinding::abi::_Unwind_DeleteException(exception) }
+}