summaryrefslogtreecommitdiffstats
path: root/library/core
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /library/core
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core')
-rw-r--r--library/core/Cargo.toml35
-rw-r--r--library/core/benches/any.rs12
-rw-r--r--library/core/benches/ascii.rs353
-rw-r--r--library/core/benches/ascii/is_ascii.rs82
-rw-r--r--library/core/benches/char/methods.rs77
-rw-r--r--library/core/benches/char/mod.rs1
-rw-r--r--library/core/benches/fmt.rs150
-rw-r--r--library/core/benches/hash/mod.rs1
-rw-r--r--library/core/benches/hash/sip.rs123
-rw-r--r--library/core/benches/iter.rs393
-rw-r--r--library/core/benches/lib.rs28
-rw-r--r--library/core/benches/num/dec2flt/mod.rs57
-rw-r--r--library/core/benches/num/flt2dec/mod.rs37
-rw-r--r--library/core/benches/num/flt2dec/strategy/dragon.rs76
-rw-r--r--library/core/benches/num/flt2dec/strategy/grisu.rs83
-rw-r--r--library/core/benches/num/int_log/mod.rs58
-rw-r--r--library/core/benches/num/mod.rs108
-rw-r--r--library/core/benches/ops.rs19
-rw-r--r--library/core/benches/pattern.rs42
-rw-r--r--library/core/benches/slice.rs164
-rw-r--r--library/core/benches/str.rs10
-rw-r--r--library/core/benches/str/char_count.rs107
-rw-r--r--library/core/benches/str/corpora.rs88
-rw-r--r--library/core/primitive_docs/box_into_raw.md1
-rw-r--r--library/core/primitive_docs/fs_file.md1
-rw-r--r--library/core/primitive_docs/io_bufread.md1
-rw-r--r--library/core/primitive_docs/io_read.md1
-rw-r--r--library/core/primitive_docs/io_seek.md1
-rw-r--r--library/core/primitive_docs/io_write.md1
-rw-r--r--library/core/primitive_docs/net_tosocketaddrs.md1
-rw-r--r--library/core/primitive_docs/process_exit.md1
-rw-r--r--library/core/primitive_docs/string_string.md1
-rw-r--r--library/core/src/alloc/global.rs275
-rw-r--r--library/core/src/alloc/layout.rs443
-rw-r--r--library/core/src/alloc/mod.rs410
-rw-r--r--library/core/src/any.rs1067
-rw-r--r--library/core/src/array/equality.rs216
-rw-r--r--library/core/src/array/iter.rs420
-rw-r--r--library/core/src/array/mod.rs872
-rw-r--r--library/core/src/ascii.rs151
-rw-r--r--library/core/src/asserting.rs109
-rw-r--r--library/core/src/async_iter/async_iter.rs111
-rw-r--r--library/core/src/async_iter/from_iter.rs38
-rw-r--r--library/core/src/async_iter/mod.rs128
-rw-r--r--library/core/src/bool.rs44
-rw-r--r--library/core/src/borrow.rs246
-rw-r--r--library/core/src/cell.rs2122
-rw-r--r--library/core/src/cell/lazy.rs104
-rw-r--r--library/core/src/cell/once.rs283
-rw-r--r--library/core/src/char/convert.rs258
-rw-r--r--library/core/src/char/decode.rs123
-rw-r--r--library/core/src/char/methods.rs1741
-rw-r--r--library/core/src/char/mod.rs584
-rw-r--r--library/core/src/clone.rs245
-rw-r--r--library/core/src/cmp.rs1643
-rw-r--r--library/core/src/convert/mod.rs755
-rw-r--r--library/core/src/convert/num.rs546
-rw-r--r--library/core/src/default.rs222
-rw-r--r--library/core/src/ffi/c_char.md8
-rw-r--r--library/core/src/ffi/c_double.md6
-rw-r--r--library/core/src/ffi/c_float.md5
-rw-r--r--library/core/src/ffi/c_int.md5
-rw-r--r--library/core/src/ffi/c_long.md5
-rw-r--r--library/core/src/ffi/c_longlong.md5
-rw-r--r--library/core/src/ffi/c_schar.md5
-rw-r--r--library/core/src/ffi/c_short.md5
-rw-r--r--library/core/src/ffi/c_str.rs608
-rw-r--r--library/core/src/ffi/c_uchar.md5
-rw-r--r--library/core/src/ffi/c_uint.md5
-rw-r--r--library/core/src/ffi/c_ulong.md5
-rw-r--r--library/core/src/ffi/c_ulonglong.md5
-rw-r--r--library/core/src/ffi/c_ushort.md5
-rw-r--r--library/core/src/ffi/c_void.md16
-rw-r--r--library/core/src/ffi/mod.rs580
-rw-r--r--library/core/src/fmt/builders.rs939
-rw-r--r--library/core/src/fmt/float.rs226
-rw-r--r--library/core/src/fmt/mod.rs2664
-rw-r--r--library/core/src/fmt/nofloat.rs15
-rw-r--r--library/core/src/fmt/num.rs683
-rw-r--r--library/core/src/fmt/rt/v1.rs45
-rw-r--r--library/core/src/future/future.rs126
-rw-r--r--library/core/src/future/into_future.rs139
-rw-r--r--library/core/src/future/join.rs193
-rw-r--r--library/core/src/future/mod.rs110
-rw-r--r--library/core/src/future/pending.rs58
-rw-r--r--library/core/src/future/poll_fn.rs63
-rw-r--r--library/core/src/future/ready.rs46
-rw-r--r--library/core/src/hash/mod.rs978
-rw-r--r--library/core/src/hash/sip.rs401
-rw-r--r--library/core/src/hint.rs350
-rw-r--r--library/core/src/internal_macros.rs258
-rw-r--r--library/core/src/intrinsics.rs2716
-rw-r--r--library/core/src/iter/adapters/by_ref_sized.rs86
-rw-r--r--library/core/src/iter/adapters/chain.rs292
-rw-r--r--library/core/src/iter/adapters/cloned.rs142
-rw-r--r--library/core/src/iter/adapters/copied.rs168
-rw-r--r--library/core/src/iter/adapters/cycle.rs108
-rw-r--r--library/core/src/iter/adapters/enumerate.rs266
-rw-r--r--library/core/src/iter/adapters/filter.rs152
-rw-r--r--library/core/src/iter/adapters/filter_map.rs149
-rw-r--r--library/core/src/iter/adapters/flatten.rs599
-rw-r--r--library/core/src/iter/adapters/fuse.rs413
-rw-r--r--library/core/src/iter/adapters/inspect.rs166
-rw-r--r--library/core/src/iter/adapters/intersperse.rs187
-rw-r--r--library/core/src/iter/adapters/map.rs218
-rw-r--r--library/core/src/iter/adapters/map_while.rs100
-rw-r--r--library/core/src/iter/adapters/mod.rs232
-rw-r--r--library/core/src/iter/adapters/peekable.rs335
-rw-r--r--library/core/src/iter/adapters/rev.rs137
-rw-r--r--library/core/src/iter/adapters/scan.rs110
-rw-r--r--library/core/src/iter/adapters/skip.rs239
-rw-r--r--library/core/src/iter/adapters/skip_while.rs125
-rw-r--r--library/core/src/iter/adapters/step_by.rs235
-rw-r--r--library/core/src/iter/adapters/take.rs244
-rw-r--r--library/core/src/iter/adapters/take_while.rs138
-rw-r--r--library/core/src/iter/adapters/zip.rs585
-rw-r--r--library/core/src/iter/mod.rs432
-rw-r--r--library/core/src/iter/range.rs1253
-rw-r--r--library/core/src/iter/sources.rs36
-rw-r--r--library/core/src/iter/sources/empty.rs94
-rw-r--r--library/core/src/iter/sources/from_fn.rs78
-rw-r--r--library/core/src/iter/sources/from_generator.rs43
-rw-r--r--library/core/src/iter/sources/once.rs99
-rw-r--r--library/core/src/iter/sources/once_with.rs109
-rw-r--r--library/core/src/iter/sources/repeat.rs129
-rw-r--r--library/core/src/iter/sources/repeat_with.rs98
-rw-r--r--library/core/src/iter/sources/successors.rs66
-rw-r--r--library/core/src/iter/traits/accum.rs231
-rw-r--r--library/core/src/iter/traits/collect.rs450
-rw-r--r--library/core/src/iter/traits/double_ended.rs374
-rw-r--r--library/core/src/iter/traits/exact_size.rs151
-rw-r--r--library/core/src/iter/traits/iterator.rs3836
-rw-r--r--library/core/src/iter/traits/marker.rs78
-rw-r--r--library/core/src/iter/traits/mod.rs21
-rw-r--r--library/core/src/lazy.rs1
-rw-r--r--library/core/src/lib.rs426
-rw-r--r--library/core/src/macros/mod.rs1554
-rw-r--r--library/core/src/macros/panic.md75
-rw-r--r--library/core/src/marker.rs840
-rw-r--r--library/core/src/mem/manually_drop.rs165
-rw-r--r--library/core/src/mem/maybe_uninit.rs1292
-rw-r--r--library/core/src/mem/mod.rs1180
-rw-r--r--library/core/src/mem/transmutability.rs43
-rw-r--r--library/core/src/mem/valid_align.rs247
-rw-r--r--library/core/src/num/bignum.rs434
-rw-r--r--library/core/src/num/dec2flt/common.rs198
-rw-r--r--library/core/src/num/dec2flt/decimal.rs351
-rw-r--r--library/core/src/num/dec2flt/float.rs207
-rw-r--r--library/core/src/num/dec2flt/fpu.rs90
-rw-r--r--library/core/src/num/dec2flt/lemire.rs166
-rw-r--r--library/core/src/num/dec2flt/mod.rs269
-rw-r--r--library/core/src/num/dec2flt/number.rs86
-rw-r--r--library/core/src/num/dec2flt/parse.rs233
-rw-r--r--library/core/src/num/dec2flt/slow.rs109
-rw-r--r--library/core/src/num/dec2flt/table.rs670
-rw-r--r--library/core/src/num/diy_float.rs81
-rw-r--r--library/core/src/num/error.rs146
-rw-r--r--library/core/src/num/f32.rs1296
-rw-r--r--library/core/src/num/f64.rs1294
-rw-r--r--library/core/src/num/flt2dec/decoder.rs100
-rw-r--r--library/core/src/num/flt2dec/estimator.rs14
-rw-r--r--library/core/src/num/flt2dec/mod.rs673
-rw-r--r--library/core/src/num/flt2dec/strategy/dragon.rs388
-rw-r--r--library/core/src/num/flt2dec/strategy/grisu.rs764
-rw-r--r--library/core/src/num/fmt.rs108
-rw-r--r--library/core/src/num/int_log10.rs140
-rw-r--r--library/core/src/num/int_macros.rs2744
-rw-r--r--library/core/src/num/mod.rs1124
-rw-r--r--library/core/src/num/nonzero.rs1134
-rw-r--r--library/core/src/num/saturating.rs1081
-rw-r--r--library/core/src/num/shells/i128.rs13
-rw-r--r--library/core/src/num/shells/i16.rs13
-rw-r--r--library/core/src/num/shells/i32.rs13
-rw-r--r--library/core/src/num/shells/i64.rs13
-rw-r--r--library/core/src/num/shells/i8.rs13
-rw-r--r--library/core/src/num/shells/int_macros.rs44
-rw-r--r--library/core/src/num/shells/isize.rs13
-rw-r--r--library/core/src/num/shells/u128.rs13
-rw-r--r--library/core/src/num/shells/u16.rs13
-rw-r--r--library/core/src/num/shells/u32.rs13
-rw-r--r--library/core/src/num/shells/u64.rs13
-rw-r--r--library/core/src/num/shells/u8.rs13
-rw-r--r--library/core/src/num/shells/usize.rs13
-rw-r--r--library/core/src/num/uint_macros.rs2454
-rw-r--r--library/core/src/num/wrapping.rs1123
-rw-r--r--library/core/src/ops/arith.rs1029
-rw-r--r--library/core/src/ops/bit.rs1044
-rw-r--r--library/core/src/ops/control_flow.rs299
-rw-r--r--library/core/src/ops/deref.rs199
-rw-r--r--library/core/src/ops/drop.rs165
-rw-r--r--library/core/src/ops/function.rs304
-rw-r--r--library/core/src/ops/generator.rs136
-rw-r--r--library/core/src/ops/index.rs175
-rw-r--r--library/core/src/ops/mod.rs208
-rw-r--r--library/core/src/ops/range.rs991
-rw-r--r--library/core/src/ops/try_trait.rs418
-rw-r--r--library/core/src/ops/unsize.rs132
-rw-r--r--library/core/src/option.rs2356
-rw-r--r--library/core/src/panic.rs112
-rw-r--r--library/core/src/panic/location.rs197
-rw-r--r--library/core/src/panic/panic_info.rs166
-rw-r--r--library/core/src/panic/unwind_safe.rs312
-rw-r--r--library/core/src/panicking.rs231
-rw-r--r--library/core/src/pin.rs1159
-rw-r--r--library/core/src/prelude/mod.rs57
-rw-r--r--library/core/src/prelude/v1.rs93
-rw-r--r--library/core/src/primitive.rs67
-rw-r--r--library/core/src/primitive_docs.rs1508
-rw-r--r--library/core/src/ptr/const_ptr.rs1525
-rw-r--r--library/core/src/ptr/metadata.rs290
-rw-r--r--library/core/src/ptr/mod.rs2054
-rw-r--r--library/core/src/ptr/mut_ptr.rs1973
-rw-r--r--library/core/src/ptr/non_null.rs802
-rw-r--r--library/core/src/ptr/unique.rs193
-rw-r--r--library/core/src/result.rs2150
-rw-r--r--library/core/src/slice/ascii.rs330
-rw-r--r--library/core/src/slice/cmp.rs260
-rw-r--r--library/core/src/slice/index.rs730
-rw-r--r--library/core/src/slice/iter.rs3388
-rw-r--r--library/core/src/slice/iter/macros.rs423
-rw-r--r--library/core/src/slice/memchr.rs142
-rw-r--r--library/core/src/slice/mod.rs4244
-rw-r--r--library/core/src/slice/raw.rs271
-rw-r--r--library/core/src/slice/rotate.rs234
-rw-r--r--library/core/src/slice/sort.rs929
-rw-r--r--library/core/src/slice/specialize.rs23
-rw-r--r--library/core/src/str/converts.rs203
-rw-r--r--library/core/src/str/count.rs136
-rw-r--r--library/core/src/str/error.rs138
-rw-r--r--library/core/src/str/iter.rs1499
-rw-r--r--library/core/src/str/lossy.rs200
-rw-r--r--library/core/src/str/mod.rs2640
-rw-r--r--library/core/src/str/pattern.rs1686
-rw-r--r--library/core/src/str/traits.rs604
-rw-r--r--library/core/src/str/validations.rs274
-rw-r--r--library/core/src/sync/atomic.rs3488
-rw-r--r--library/core/src/sync/exclusive.rs173
-rw-r--r--library/core/src/sync/mod.rs8
-rw-r--r--library/core/src/task/mod.rs17
-rw-r--r--library/core/src/task/poll.rs320
-rw-r--r--library/core/src/task/ready.rs114
-rw-r--r--library/core/src/task/wake.rs334
-rw-r--r--library/core/src/time.rs1480
-rw-r--r--library/core/src/tuple.rs159
-rw-r--r--library/core/src/unicode/mod.rs31
-rwxr-xr-xlibrary/core/src/unicode/printable.py243
-rw-r--r--library/core/src/unicode/printable.rs573
-rw-r--r--library/core/src/unicode/unicode_data.rs2375
-rw-r--r--library/core/src/unit.rs21
-rw-r--r--library/core/tests/alloc.rs31
-rw-r--r--library/core/tests/any.rs194
-rw-r--r--library/core/tests/array.rs702
-rw-r--r--library/core/tests/ascii.rs463
-rw-r--r--library/core/tests/asserting.rs37
-rw-r--r--library/core/tests/atomic.rs314
-rw-r--r--library/core/tests/bool.rs105
-rw-r--r--library/core/tests/cell.rs479
-rw-r--r--library/core/tests/char.rs415
-rw-r--r--library/core/tests/clone.rs15
-rw-r--r--library/core/tests/cmp.rs250
-rw-r--r--library/core/tests/const_ptr.rs101
-rw-r--r--library/core/tests/convert.rs16
-rw-r--r--library/core/tests/fmt/builders.rs726
-rw-r--r--library/core/tests/fmt/float.rs55
-rw-r--r--library/core/tests/fmt/mod.rs45
-rw-r--r--library/core/tests/fmt/num.rs225
-rw-r--r--library/core/tests/future.rs128
-rw-r--r--library/core/tests/hash/mod.rs161
-rw-r--r--library/core/tests/hash/sip.rs309
-rw-r--r--library/core/tests/intrinsics.rs101
-rw-r--r--library/core/tests/iter/adapters/chain.rs280
-rw-r--r--library/core/tests/iter/adapters/cloned.rs52
-rw-r--r--library/core/tests/iter/adapters/copied.rs18
-rw-r--r--library/core/tests/iter/adapters/cycle.rs31
-rw-r--r--library/core/tests/iter/adapters/enumerate.rs107
-rw-r--r--library/core/tests/iter/adapters/filter.rs52
-rw-r--r--library/core/tests/iter/adapters/filter_map.rs50
-rw-r--r--library/core/tests/iter/adapters/flat_map.rs74
-rw-r--r--library/core/tests/iter/adapters/flatten.rs170
-rw-r--r--library/core/tests/iter/adapters/fuse.rs75
-rw-r--r--library/core/tests/iter/adapters/inspect.rs38
-rw-r--r--library/core/tests/iter/adapters/intersperse.rs154
-rw-r--r--library/core/tests/iter/adapters/map.rs27
-rw-r--r--library/core/tests/iter/adapters/mod.rs185
-rw-r--r--library/core/tests/iter/adapters/peekable.rs272
-rw-r--r--library/core/tests/iter/adapters/scan.rs20
-rw-r--r--library/core/tests/iter/adapters/skip.rs203
-rw-r--r--library/core/tests/iter/adapters/skip_while.rs50
-rw-r--r--library/core/tests/iter/adapters/step_by.rs246
-rw-r--r--library/core/tests/iter/adapters/take.rs148
-rw-r--r--library/core/tests/iter/adapters/take_while.rs29
-rw-r--r--library/core/tests/iter/adapters/zip.rs315
-rw-r--r--library/core/tests/iter/mod.rs102
-rw-r--r--library/core/tests/iter/range.rs472
-rw-r--r--library/core/tests/iter/sources.rs108
-rw-r--r--library/core/tests/iter/traits/accum.rs66
-rw-r--r--library/core/tests/iter/traits/double_ended.rs91
-rw-r--r--library/core/tests/iter/traits/iterator.rs593
-rw-r--r--library/core/tests/iter/traits/mod.rs4
-rw-r--r--library/core/tests/iter/traits/step.rs89
-rw-r--r--library/core/tests/lazy.rs138
-rw-r--r--library/core/tests/lib.rs142
-rw-r--r--library/core/tests/macros.rs20
-rw-r--r--library/core/tests/manually_drop.rs27
-rw-r--r--library/core/tests/mem.rs343
-rw-r--r--library/core/tests/nonzero.rs336
-rw-r--r--library/core/tests/num/bignum.rs276
-rw-r--r--library/core/tests/num/const_from.rs25
-rw-r--r--library/core/tests/num/dec2flt/float.rs33
-rw-r--r--library/core/tests/num/dec2flt/lemire.rs53
-rw-r--r--library/core/tests/num/dec2flt/mod.rs140
-rw-r--r--library/core/tests/num/dec2flt/parse.rs177
-rw-r--r--library/core/tests/num/flt2dec/estimator.rs62
-rw-r--r--library/core/tests/num/flt2dec/mod.rs1172
-rw-r--r--library/core/tests/num/flt2dec/random.rs202
-rw-r--r--library/core/tests/num/flt2dec/strategy/dragon.rs63
-rw-r--r--library/core/tests/num/flt2dec/strategy/grisu.rs72
-rw-r--r--library/core/tests/num/i128.rs1
-rw-r--r--library/core/tests/num/i16.rs1
-rw-r--r--library/core/tests/num/i32.rs30
-rw-r--r--library/core/tests/num/i64.rs1
-rw-r--r--library/core/tests/num/i8.rs1
-rw-r--r--library/core/tests/num/ieee754.rs158
-rw-r--r--library/core/tests/num/int_log.rs166
-rw-r--r--library/core/tests/num/int_macros.rs343
-rw-r--r--library/core/tests/num/mod.rs871
-rw-r--r--library/core/tests/num/nan.rs7
-rw-r--r--library/core/tests/num/ops.rs232
-rw-r--r--library/core/tests/num/u128.rs1
-rw-r--r--library/core/tests/num/u16.rs1
-rw-r--r--library/core/tests/num/u32.rs1
-rw-r--r--library/core/tests/num/u64.rs1
-rw-r--r--library/core/tests/num/u8.rs1
-rw-r--r--library/core/tests/num/uint_macros.rs235
-rw-r--r--library/core/tests/num/wrapping.rs320
-rw-r--r--library/core/tests/ops.rs240
-rw-r--r--library/core/tests/ops/control_flow.rs18
-rw-r--r--library/core/tests/option.rs555
-rw-r--r--library/core/tests/pattern.rs503
-rw-r--r--library/core/tests/pin.rs31
-rw-r--r--library/core/tests/pin_macro.rs33
-rw-r--r--library/core/tests/ptr.rs855
-rw-r--r--library/core/tests/result.rs427
-rw-r--r--library/core/tests/simd.rs14
-rw-r--r--library/core/tests/slice.rs2597
-rw-r--r--library/core/tests/str.rs1
-rw-r--r--library/core/tests/str_lossy.rs85
-rw-r--r--library/core/tests/task.rs14
-rw-r--r--library/core/tests/time.rs447
-rw-r--r--library/core/tests/tuple.rs61
-rw-r--r--library/core/tests/unicode.rs5
-rw-r--r--library/core/tests/waker.rs22
352 files changed, 131644 insertions, 0 deletions
diff --git a/library/core/Cargo.toml b/library/core/Cargo.toml
new file mode 100644
index 000000000..2a7df9556
--- /dev/null
+++ b/library/core/Cargo.toml
@@ -0,0 +1,35 @@
+[package]
+name = "core"
+version = "0.0.0"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/rust.git"
+description = "The Rust Core Library"
+autotests = false
+autobenches = false
+# If you update this, be sure to update it in a bunch of other places too!
+# As of 2022, it was the ci/pgo.sh script and the core-no-fp-fmt-parse test.
+edition = "2021"
+
+[lib]
+test = false
+bench = false
+
+[[test]]
+name = "coretests"
+path = "tests/lib.rs"
+
+[[bench]]
+name = "corebenches"
+path = "benches/lib.rs"
+test = true
+
+[dev-dependencies]
+rand = "0.7"
+rand_xorshift = "0.2"
+
+[features]
+# Make panics and failed asserts immediately abort without formatting any message
+panic_immediate_abort = []
+# Make `RefCell` store additional debugging information, which is printed out when
+# a borrow error occurs
+debug_refcell = []
diff --git a/library/core/benches/any.rs b/library/core/benches/any.rs
new file mode 100644
index 000000000..53099b782
--- /dev/null
+++ b/library/core/benches/any.rs
@@ -0,0 +1,12 @@
+use core::any::*;
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_downcast_ref(b: &mut Bencher) {
+ b.iter(|| {
+ let mut x = 0;
+ let mut y = &mut x as &mut dyn Any;
+ black_box(&mut y);
+ black_box(y.downcast_ref::<isize>() == Some(&0));
+ });
+}
diff --git a/library/core/benches/ascii.rs b/library/core/benches/ascii.rs
new file mode 100644
index 000000000..64938745a
--- /dev/null
+++ b/library/core/benches/ascii.rs
@@ -0,0 +1,353 @@
+mod is_ascii;
+
+// Lower-case ASCII 'a' is the first byte that has its highest bit set
+// after wrap-adding 0x1F:
+//
+// b'a' + 0x1F == 0x80 == 0b1000_0000
+// b'z' + 0x1F == 0x98 == 0b1001_1000
+//
+// Lower-case ASCII 'z' is the last byte that has its highest bit unset
+// after wrap-adding 0x05:
+//
+// b'a' + 0x05 == 0x66 == 0b0110_0110
+// b'z' + 0x05 == 0x7F == 0b0111_1111
+//
+// … except for 0xFB to 0xFF, but those are in the range of bytes
+// that have the highest bit unset again after adding 0x1F.
+//
+// So `(byte + 0x1f) & !(byte + 5)` has its highest bit set
+// iff `byte` is a lower-case ASCII letter.
+//
+// Lower-case ASCII letters all have the 0x20 bit set.
+// (Two positions right of 0x80, the highest bit.)
+// Unsetting that bit produces the same letter, in upper-case.
+//
+// Therefore:
+fn branchless_to_ascii_upper_case(byte: u8) -> u8 {
+ byte & !((byte.wrapping_add(0x1f) & !byte.wrapping_add(0x05) & 0x80) >> 2)
+}
+
+macro_rules! benches {
+ ($( fn $name: ident($arg: ident: &mut [u8]) $body: block )+ @iter $( $is_: ident, )+) => {
+ benches! {@
+ $( fn $name($arg: &mut [u8]) $body )+
+ $( fn $is_(bytes: &mut [u8]) { bytes.iter().all(u8::$is_) } )+
+ }
+ };
+
+ (@$( fn $name: ident($arg: ident: &mut [u8]) $body: block )+) => {
+ benches!(mod short SHORT $($name $arg $body)+);
+ benches!(mod medium MEDIUM $($name $arg $body)+);
+ benches!(mod long LONG $($name $arg $body)+);
+ };
+
+ (mod $mod_name: ident $input: ident $($name: ident $arg: ident $body: block)+) => {
+ mod $mod_name {
+ use super::*;
+
+ $(
+ #[bench]
+ fn $name(bencher: &mut Bencher) {
+ bencher.bytes = $input.len() as u64;
+ bencher.iter(|| {
+ let mut vec = $input.as_bytes().to_vec();
+ {
+ let $arg = &mut vec[..];
+ black_box($body);
+ }
+ vec
+ })
+ }
+ )+
+ }
+ }
+}
+
+use test::black_box;
+use test::Bencher;
+
+const ASCII_CASE_MASK: u8 = 0b0010_0000;
+
+benches! {
+ fn case00_alloc_only(_bytes: &mut [u8]) {}
+
+ fn case01_black_box_read_each_byte(bytes: &mut [u8]) {
+ for byte in bytes {
+ black_box(*byte);
+ }
+ }
+
+ fn case02_lookup_table(bytes: &mut [u8]) {
+ for byte in bytes {
+ *byte = ASCII_UPPERCASE_MAP[*byte as usize]
+ }
+ }
+
+ fn case03_branch_and_subtract(bytes: &mut [u8]) {
+ for byte in bytes {
+ *byte = if b'a' <= *byte && *byte <= b'z' {
+ *byte - b'a' + b'A'
+ } else {
+ *byte
+ }
+ }
+ }
+
+ fn case04_branch_and_mask(bytes: &mut [u8]) {
+ for byte in bytes {
+ *byte = if b'a' <= *byte && *byte <= b'z' {
+ *byte & !0x20
+ } else {
+ *byte
+ }
+ }
+ }
+
+ fn case05_branchless(bytes: &mut [u8]) {
+ for byte in bytes {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ }
+
+ fn case06_libcore(bytes: &mut [u8]) {
+ bytes.make_ascii_uppercase()
+ }
+
+ fn case07_fake_simd_u32(bytes: &mut [u8]) {
+ // SAFETY: transmuting a sequence of `u8` to `u32` is always fine
+ let (before, aligned, after) = unsafe {
+ bytes.align_to_mut::<u32>()
+ };
+ for byte in before {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ for word in aligned {
+ // FIXME: this is incorrect for some byte values:
+ // addition within a byte can carry/overflow into the next byte.
+ // Test case: b"\xFFz "
+ *word &= !(
+ (
+ word.wrapping_add(0x1f1f1f1f) &
+ !word.wrapping_add(0x05050505) &
+ 0x80808080
+ ) >> 2
+ )
+ }
+ for byte in after {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ }
+
+ fn case08_fake_simd_u64(bytes: &mut [u8]) {
+ // SAFETY: transmuting a sequence of `u8` to `u64` is always fine
+ let (before, aligned, after) = unsafe {
+ bytes.align_to_mut::<u64>()
+ };
+ for byte in before {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ for word in aligned {
+ // FIXME: like above, this is incorrect for some byte values.
+ *word &= !(
+ (
+ word.wrapping_add(0x1f1f1f1f_1f1f1f1f) &
+ !word.wrapping_add(0x05050505_05050505) &
+ 0x80808080_80808080
+ ) >> 2
+ )
+ }
+ for byte in after {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ }
+
+ fn case09_mask_mult_bool_branchy_lookup_table(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ if b >= 0x80 { return false }
+ match ASCII_CHARACTER_CLASS[b as usize] {
+ L | Lx => true,
+ _ => false,
+ }
+ }
+ for byte in bytes {
+ *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8))
+ }
+ }
+
+ fn case10_mask_mult_bool_lookup_table(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match ASCII_CHARACTER_CLASS[b as usize] {
+ L | Lx => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8))
+ }
+ }
+
+ fn case11_mask_mult_bool_match_range(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match b {
+ b'a'..=b'z' => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8))
+ }
+ }
+
+ fn case12_mask_shifted_bool_match_range(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match b {
+ b'a'..=b'z' => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte &= !((is_ascii_lowercase(*byte) as u8) * ASCII_CASE_MASK)
+ }
+ }
+
+ fn case13_subtract_shifted_bool_match_range(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match b {
+ b'a'..=b'z' => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte -= (is_ascii_lowercase(*byte) as u8) * ASCII_CASE_MASK
+ }
+ }
+
+ fn case14_subtract_multiplied_bool_match_range(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match b {
+ b'a'..=b'z' => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte -= (b'a' - b'A') * is_ascii_lowercase(*byte) as u8
+ }
+ }
+
+ @iter
+
+ is_ascii,
+ is_ascii_alphabetic,
+ is_ascii_uppercase,
+ is_ascii_lowercase,
+ is_ascii_alphanumeric,
+ is_ascii_digit,
+ is_ascii_hexdigit,
+ is_ascii_punctuation,
+ is_ascii_graphic,
+ is_ascii_whitespace,
+ is_ascii_control,
+}
+
+macro_rules! repeat {
+ ($s: expr) => {
+ concat!($s, $s, $s, $s, $s, $s, $s, $s, $s, $s)
+ };
+}
+
+const SHORT: &str = "Alice's";
+const MEDIUM: &str = "Alice's Adventures in Wonderland";
+const LONG: &str = repeat!(
+ r#"
+ La Guida di Bragia, a Ballad Opera for the Marionette Theatre (around 1850)
+ Alice's Adventures in Wonderland (1865)
+ Phantasmagoria and Other Poems (1869)
+ Through the Looking-Glass, and What Alice Found There
+ (includes "Jabberwocky" and "The Walrus and the Carpenter") (1871)
+ The Hunting of the Snark (1876)
+ Rhyme? And Reason? (1883) – shares some contents with the 1869 collection,
+ including the long poem "Phantasmagoria"
+ A Tangled Tale (1885)
+ Sylvie and Bruno (1889)
+ Sylvie and Bruno Concluded (1893)
+ Pillow Problems (1893)
+ What the Tortoise Said to Achilles (1895)
+ Three Sunsets and Other Poems (1898)
+ The Manlet (1903)[106]
+"#
+);
+
+#[rustfmt::skip]
+const ASCII_UPPERCASE_MAP: [u8; 256] = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'',
+ b'(', b')', b'*', b'+', b',', b'-', b'.', b'/',
+ b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7',
+ b'8', b'9', b':', b';', b'<', b'=', b'>', b'?',
+ b'@', b'A', b'B', b'C', b'D', b'E', b'F', b'G',
+ b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O',
+ b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W',
+ b'X', b'Y', b'Z', b'[', b'\\', b']', b'^', b'_',
+ b'`',
+
+ b'A', b'B', b'C', b'D', b'E', b'F', b'G',
+ b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O',
+ b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W',
+ b'X', b'Y', b'Z',
+
+ b'{', b'|', b'}', b'~', 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+];
+
+enum AsciiCharacterClass {
+ C, // control
+ Cw, // control whitespace
+ W, // whitespace
+ D, // digit
+ L, // lowercase
+ Lx, // lowercase hex digit
+ U, // uppercase
+ Ux, // uppercase hex digit
+ P, // punctuation
+ N, // Non-ASCII
+}
+use self::AsciiCharacterClass::*;
+
+#[rustfmt::skip]
+static ASCII_CHARACTER_CLASS: [AsciiCharacterClass; 256] = [
+// _0 _1 _2 _3 _4 _5 _6 _7 _8 _9 _a _b _c _d _e _f
+ C, C, C, C, C, C, C, C, C, Cw,Cw,C, Cw,Cw,C, C, // 0_
+ C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, // 1_
+ W, P, P, P, P, P, P, P, P, P, P, P, P, P, P, P, // 2_
+ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, P, // 3_
+ P, Ux,Ux,Ux,Ux,Ux,Ux,U, U, U, U, U, U, U, U, U, // 4_
+ U, U, U, U, U, U, U, U, U, U, U, P, P, P, P, P, // 5_
+ P, Lx,Lx,Lx,Lx,Lx,Lx,L, L, L, L, L, L, L, L, L, // 6_
+ L, L, L, L, L, L, L, L, L, L, L, P, P, P, P, C, // 7_
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+];
diff --git a/library/core/benches/ascii/is_ascii.rs b/library/core/benches/ascii/is_ascii.rs
new file mode 100644
index 000000000..a42a1dcfe
--- /dev/null
+++ b/library/core/benches/ascii/is_ascii.rs
@@ -0,0 +1,82 @@
+use super::{LONG, MEDIUM, SHORT};
+use test::black_box;
+use test::Bencher;
+
+macro_rules! benches {
+ ($( fn $name: ident($arg: ident: &[u8]) $body: block )+) => {
+ benches!(mod short SHORT[..] $($name $arg $body)+);
+ benches!(mod medium MEDIUM[..] $($name $arg $body)+);
+ benches!(mod long LONG[..] $($name $arg $body)+);
+ // Ensure we benchmark cases where the functions are called with strings
+ // that are not perfectly aligned or have a length which is not a
+ // multiple of size_of::<usize>() (or both)
+ benches!(mod unaligned_head MEDIUM[1..] $($name $arg $body)+);
+ benches!(mod unaligned_tail MEDIUM[..(MEDIUM.len() - 1)] $($name $arg $body)+);
+ benches!(mod unaligned_both MEDIUM[1..(MEDIUM.len() - 1)] $($name $arg $body)+);
+ };
+
+ (mod $mod_name: ident $input: ident [$range: expr] $($name: ident $arg: ident $body: block)+) => {
+ mod $mod_name {
+ use super::*;
+ $(
+ #[bench]
+ fn $name(bencher: &mut Bencher) {
+ bencher.bytes = $input[$range].len() as u64;
+ let mut vec = $input.as_bytes().to_vec();
+ bencher.iter(|| {
+ let $arg: &[u8] = &black_box(&mut vec)[$range];
+ black_box($body)
+ })
+ }
+ )+
+ }
+ };
+}
+
+benches! {
+ fn case00_libcore(bytes: &[u8]) {
+ bytes.is_ascii()
+ }
+
+ fn case01_iter_all(bytes: &[u8]) {
+ bytes.iter().all(|b| b.is_ascii())
+ }
+
+ fn case02_align_to(bytes: &[u8]) {
+ is_ascii_align_to(bytes)
+ }
+
+ fn case03_align_to_unrolled(bytes: &[u8]) {
+ is_ascii_align_to_unrolled(bytes)
+ }
+}
+
+// These are separate since it's easier to debug errors if they don't go through
+// macro expansion first.
+fn is_ascii_align_to(bytes: &[u8]) -> bool {
+ if bytes.len() < core::mem::size_of::<usize>() {
+ return bytes.iter().all(|b| b.is_ascii());
+ }
+ // SAFETY: transmuting a sequence of `u8` to `usize` is always fine
+ let (head, body, tail) = unsafe { bytes.align_to::<usize>() };
+ head.iter().all(|b| b.is_ascii())
+ && body.iter().all(|w| !contains_nonascii(*w))
+ && tail.iter().all(|b| b.is_ascii())
+}
+
+fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
+ if bytes.len() < core::mem::size_of::<usize>() {
+ return bytes.iter().all(|b| b.is_ascii());
+ }
+ // SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine
+ let (head, body, tail) = unsafe { bytes.align_to::<[usize; 2]>() };
+ head.iter().all(|b| b.is_ascii())
+ && body.iter().all(|w| !contains_nonascii(w[0] | w[1]))
+ && tail.iter().all(|b| b.is_ascii())
+}
+
+#[inline]
+fn contains_nonascii(v: usize) -> bool {
+ const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; core::mem::size_of::<usize>()]);
+ (NONASCII_MASK & v) != 0
+}
diff --git a/library/core/benches/char/methods.rs b/library/core/benches/char/methods.rs
new file mode 100644
index 000000000..9408f83c3
--- /dev/null
+++ b/library/core/benches/char/methods.rs
@@ -0,0 +1,77 @@
+use test::Bencher;
+
+const CHARS: [char; 9] = ['0', 'x', '2', '5', 'A', 'f', '7', '8', '9'];
+const RADIX: [u32; 5] = [2, 8, 10, 16, 32];
+
+#[bench]
+fn bench_to_digit_radix_2(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(2)).min())
+}
+
+#[bench]
+fn bench_to_digit_radix_10(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(10)).min())
+}
+
+#[bench]
+fn bench_to_digit_radix_16(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(16)).min())
+}
+
+#[bench]
+fn bench_to_digit_radix_36(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(36)).min())
+}
+
+#[bench]
+fn bench_to_digit_radix_var(b: &mut Bencher) {
+ b.iter(|| {
+ CHARS
+ .iter()
+ .cycle()
+ .zip(RADIX.iter().cycle())
+ .take(10_000)
+ .map(|(c, radix)| c.to_digit(*radix))
+ .min()
+ })
+}
+
+#[bench]
+fn bench_to_ascii_uppercase(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_ascii_uppercase()).min())
+}
+
+#[bench]
+fn bench_to_ascii_lowercase(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_ascii_lowercase()).min())
+}
+
+#[bench]
+fn bench_ascii_mix_to_uppercase(b: &mut Bencher) {
+ b.iter(|| (0..=255).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count())
+}
+
+#[bench]
+fn bench_ascii_mix_to_lowercase(b: &mut Bencher) {
+ b.iter(|| (0..=255).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count())
+}
+
+#[bench]
+fn bench_ascii_char_to_uppercase(b: &mut Bencher) {
+ b.iter(|| (0..=127).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count())
+}
+
+#[bench]
+fn bench_ascii_char_to_lowercase(b: &mut Bencher) {
+ b.iter(|| (0..=127).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count())
+}
+
+#[bench]
+fn bench_non_ascii_char_to_uppercase(b: &mut Bencher) {
+ b.iter(|| (128..=255).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count())
+}
+
+#[bench]
+fn bench_non_ascii_char_to_lowercase(b: &mut Bencher) {
+ b.iter(|| (128..=255).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count())
+}
diff --git a/library/core/benches/char/mod.rs b/library/core/benches/char/mod.rs
new file mode 100644
index 000000000..9ca51a768
--- /dev/null
+++ b/library/core/benches/char/mod.rs
@@ -0,0 +1 @@
+mod methods;
diff --git a/library/core/benches/fmt.rs b/library/core/benches/fmt.rs
new file mode 100644
index 000000000..ff726ff75
--- /dev/null
+++ b/library/core/benches/fmt.rs
@@ -0,0 +1,150 @@
+use std::fmt::{self, Write as FmtWrite};
+use std::io::{self, Write as IoWrite};
+use test::Bencher;
+
+#[bench]
+fn write_vec_value(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ for _ in 0..1000 {
+ mem.write_all("abc".as_bytes()).unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_vec_ref(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ let wr = &mut mem as &mut dyn io::Write;
+ for _ in 0..1000 {
+ wr.write_all("abc".as_bytes()).unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_vec_macro1(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ let wr = &mut mem as &mut dyn io::Write;
+ for _ in 0..1000 {
+ write!(wr, "abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_vec_macro2(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ let wr = &mut mem as &mut dyn io::Write;
+ for _ in 0..1000 {
+ write!(wr, "{}", "abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_vec_macro_debug(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ let wr = &mut mem as &mut dyn io::Write;
+ for _ in 0..1000 {
+ write!(wr, "{:?}", "☃").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_value(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ for _ in 0..1000 {
+ mem.write_str("abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_ref(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ let wr = &mut mem as &mut dyn fmt::Write;
+ for _ in 0..1000 {
+ wr.write_str("abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_macro1(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ for _ in 0..1000 {
+ write!(mem, "abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_macro2(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ let wr = &mut mem as &mut dyn fmt::Write;
+ for _ in 0..1000 {
+ write!(wr, "{}", "abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_macro_debug(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ let wr = &mut mem as &mut dyn fmt::Write;
+ for _ in 0..1000 {
+ write!(wr, "{:?}", "☃").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_macro_debug_ascii(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ let wr = &mut mem as &mut dyn fmt::Write;
+ for _ in 0..1000 {
+ write!(wr, "{:?}", "Hello, World!").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_u128_max(bh: &mut Bencher) {
+ bh.iter(|| {
+ test::black_box(format!("{}", u128::MAX));
+ });
+}
+
+#[bench]
+fn write_u128_min(bh: &mut Bencher) {
+ bh.iter(|| {
+ let s = format!("{}", 0u128);
+ test::black_box(s);
+ });
+}
+
+#[bench]
+fn write_u64_max(bh: &mut Bencher) {
+ bh.iter(|| {
+ test::black_box(format!("{}", u64::MAX));
+ });
+}
+
+#[bench]
+fn write_u64_min(bh: &mut Bencher) {
+ bh.iter(|| {
+ test::black_box(format!("{}", 0u64));
+ });
+}
diff --git a/library/core/benches/hash/mod.rs b/library/core/benches/hash/mod.rs
new file mode 100644
index 000000000..4f2e152b6
--- /dev/null
+++ b/library/core/benches/hash/mod.rs
@@ -0,0 +1 @@
+mod sip;
diff --git a/library/core/benches/hash/sip.rs b/library/core/benches/hash/sip.rs
new file mode 100644
index 000000000..725c864dc
--- /dev/null
+++ b/library/core/benches/hash/sip.rs
@@ -0,0 +1,123 @@
+#![allow(deprecated)]
+
+use core::hash::*;
+use test::{black_box, Bencher};
+
+fn hash_bytes<H: Hasher>(mut s: H, x: &[u8]) -> u64 {
+ Hasher::write(&mut s, x);
+ s.finish()
+}
+
+fn hash_with<H: Hasher, T: Hash>(mut st: H, x: &T) -> u64 {
+ x.hash(&mut st);
+ st.finish()
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ hash_with(SipHasher::new(), x)
+}
+
+#[bench]
+fn bench_str_under_8_bytes(b: &mut Bencher) {
+ let s = "foo";
+ b.iter(|| {
+ assert_eq!(hash(&s), 16262950014981195938);
+ })
+}
+
+#[bench]
+fn bench_str_of_8_bytes(b: &mut Bencher) {
+ let s = "foobar78";
+ b.iter(|| {
+ assert_eq!(hash(&s), 4898293253460910787);
+ })
+}
+
+#[bench]
+fn bench_str_over_8_bytes(b: &mut Bencher) {
+ let s = "foobarbaz0";
+ b.iter(|| {
+ assert_eq!(hash(&s), 10581415515220175264);
+ })
+}
+
+#[bench]
+fn bench_long_str(b: &mut Bencher) {
+ let s = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \
+ incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \
+ exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute \
+ irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla \
+ pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui \
+ officia deserunt mollit anim id est laborum.";
+ b.iter(|| {
+ assert_eq!(hash(&s), 17717065544121360093);
+ })
+}
+
+#[bench]
+fn bench_u32(b: &mut Bencher) {
+ let u = 162629500u32;
+ let u = black_box(u);
+ b.iter(|| hash(&u));
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_u32_keyed(b: &mut Bencher) {
+ let u = 162629500u32;
+ let u = black_box(u);
+ let k1 = black_box(0x1);
+ let k2 = black_box(0x2);
+ b.iter(|| hash_with(SipHasher::new_with_keys(k1, k2), &u));
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_u64(b: &mut Bencher) {
+ let u = 16262950014981195938u64;
+ let u = black_box(u);
+ b.iter(|| hash(&u));
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_bytes_4(b: &mut Bencher) {
+ let data = black_box([b' '; 4]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 4;
+}
+
+#[bench]
+fn bench_bytes_7(b: &mut Bencher) {
+ let data = black_box([b' '; 7]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 7;
+}
+
+#[bench]
+fn bench_bytes_8(b: &mut Bencher) {
+ let data = black_box([b' '; 8]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_bytes_a_16(b: &mut Bencher) {
+ let data = black_box([b' '; 16]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 16;
+}
+
+#[bench]
+fn bench_bytes_b_32(b: &mut Bencher) {
+ let data = black_box([b' '; 32]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 32;
+}
+
+#[bench]
+fn bench_bytes_c_128(b: &mut Bencher) {
+ let data = black_box([b' '; 128]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 128;
+}
diff --git a/library/core/benches/iter.rs b/library/core/benches/iter.rs
new file mode 100644
index 000000000..0abe20e4c
--- /dev/null
+++ b/library/core/benches/iter.rs
@@ -0,0 +1,393 @@
+use core::iter::*;
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_rposition(b: &mut Bencher) {
+ let it: Vec<usize> = (0..300).collect();
+ b.iter(|| {
+ it.iter().rposition(|&x| x <= 150);
+ });
+}
+
+#[bench]
+fn bench_skip_while(b: &mut Bencher) {
+ b.iter(|| {
+ let it = 0..100;
+ let mut sum = 0;
+ it.skip_while(|&x| {
+ sum += x;
+ sum < 4000
+ })
+ .all(|_| true);
+ });
+}
+
+#[bench]
+fn bench_multiple_take(b: &mut Bencher) {
+ let mut it = (0..42).cycle();
+ b.iter(|| {
+ let n = it.next().unwrap();
+ for _ in 0..n {
+ it.clone().take(it.next().unwrap()).all(|_| true);
+ }
+ });
+}
+
+fn scatter(x: i32) -> i32 {
+ (x * 31) % 127
+}
+
+#[bench]
+fn bench_max_by_key(b: &mut Bencher) {
+ b.iter(|| {
+ let it = 0..100;
+ it.map(black_box).max_by_key(|&x| scatter(x))
+ })
+}
+
+// https://www.reddit.com/r/rust/comments/31syce/using_iterators_to_find_the_index_of_the_min_or/
+#[bench]
+fn bench_max_by_key2(b: &mut Bencher) {
+ fn max_index_iter(array: &[i32]) -> usize {
+ array.iter().enumerate().max_by_key(|&(_, item)| item).unwrap().0
+ }
+
+ let mut data = vec![0; 1638];
+ data[514] = 9999;
+
+ b.iter(|| max_index_iter(&data));
+}
+
+#[bench]
+fn bench_max(b: &mut Bencher) {
+ b.iter(|| {
+ let it = 0..100;
+ it.map(black_box).map(scatter).max()
+ })
+}
+
+pub fn copy_zip(xs: &[u8], ys: &mut [u8]) {
+ for (a, b) in ys.iter_mut().zip(xs) {
+ *a = *b;
+ }
+}
+
+pub fn add_zip(xs: &[f32], ys: &mut [f32]) {
+ for (a, b) in ys.iter_mut().zip(xs) {
+ *a += *b;
+ }
+}
+
+#[bench]
+fn bench_zip_copy(b: &mut Bencher) {
+ let source = vec![0u8; 16 * 1024];
+ let mut dst = black_box(vec![0u8; 16 * 1024]);
+ b.iter(|| copy_zip(&source, &mut dst))
+}
+
+#[bench]
+fn bench_zip_add(b: &mut Bencher) {
+ let source = vec![1.; 16 * 1024];
+ let mut dst = vec![0.; 16 * 1024];
+ b.iter(|| add_zip(&source, &mut dst));
+}
+
+/// `Iterator::for_each` implemented as a plain loop.
+fn for_each_loop<I, F>(iter: I, mut f: F)
+where
+ I: Iterator,
+ F: FnMut(I::Item),
+{
+ for item in iter {
+ f(item);
+ }
+}
+
+/// `Iterator::for_each` implemented with `fold` for internal iteration.
+/// (except when `by_ref()` effectively disables that optimization.)
+fn for_each_fold<I, F>(iter: I, mut f: F)
+where
+ I: Iterator,
+ F: FnMut(I::Item),
+{
+ iter.fold((), move |(), item| f(item));
+}
+
+#[bench]
+fn bench_for_each_chain_loop(b: &mut Bencher) {
+ b.iter(|| {
+ let mut acc = 0;
+ let iter = (0i64..1000000).chain(0..1000000).map(black_box);
+ for_each_loop(iter, |x| acc += x);
+ acc
+ });
+}
+
+#[bench]
+fn bench_for_each_chain_fold(b: &mut Bencher) {
+ b.iter(|| {
+ let mut acc = 0;
+ let iter = (0i64..1000000).chain(0..1000000).map(black_box);
+ for_each_fold(iter, |x| acc += x);
+ acc
+ });
+}
+
+#[bench]
+fn bench_for_each_chain_ref_fold(b: &mut Bencher) {
+ b.iter(|| {
+ let mut acc = 0;
+ let mut iter = (0i64..1000000).chain(0..1000000).map(black_box);
+ for_each_fold(iter.by_ref(), |x| acc += x);
+ acc
+ });
+}
+
+/// Helper to benchmark `sum` for iterators taken by value which
+/// can optimize `fold`, and by reference which cannot.
+macro_rules! bench_sums {
+ ($bench_sum:ident, $bench_ref_sum:ident, $iter:expr) => {
+ #[bench]
+ fn $bench_sum(b: &mut Bencher) {
+ b.iter(|| -> i64 { $iter.map(black_box).sum() });
+ }
+
+ #[bench]
+ fn $bench_ref_sum(b: &mut Bencher) {
+ b.iter(|| -> i64 { $iter.map(black_box).by_ref().sum() });
+ }
+ };
+}
+
+bench_sums! {
+ bench_flat_map_sum,
+ bench_flat_map_ref_sum,
+ (0i64..1000).flat_map(|x| x..x+1000)
+}
+
+bench_sums! {
+ bench_flat_map_chain_sum,
+ bench_flat_map_chain_ref_sum,
+ (0i64..1000000).flat_map(|x| once(x).chain(once(x)))
+}
+
+bench_sums! {
+ bench_enumerate_sum,
+ bench_enumerate_ref_sum,
+ (0i64..1000000).enumerate().map(|(i, x)| x * i as i64)
+}
+
+bench_sums! {
+ bench_enumerate_chain_sum,
+ bench_enumerate_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).enumerate().map(|(i, x)| x * i as i64)
+}
+
+bench_sums! {
+ bench_filter_sum,
+ bench_filter_ref_sum,
+ (0i64..1000000).filter(|x| x % 3 == 0)
+}
+
+bench_sums! {
+ bench_filter_chain_sum,
+ bench_filter_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).filter(|x| x % 3 == 0)
+}
+
+bench_sums! {
+ bench_filter_map_sum,
+ bench_filter_map_ref_sum,
+ (0i64..1000000).filter_map(|x| x.checked_mul(x))
+}
+
+bench_sums! {
+ bench_filter_map_chain_sum,
+ bench_filter_map_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).filter_map(|x| x.checked_mul(x))
+}
+
+bench_sums! {
+ bench_fuse_sum,
+ bench_fuse_ref_sum,
+ (0i64..1000000).fuse()
+}
+
+bench_sums! {
+ bench_fuse_chain_sum,
+ bench_fuse_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).fuse()
+}
+
+bench_sums! {
+ bench_inspect_sum,
+ bench_inspect_ref_sum,
+ (0i64..1000000).inspect(|_| {})
+}
+
+bench_sums! {
+ bench_inspect_chain_sum,
+ bench_inspect_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).inspect(|_| {})
+}
+
+bench_sums! {
+ bench_peekable_sum,
+ bench_peekable_ref_sum,
+ (0i64..1000000).peekable()
+}
+
+bench_sums! {
+ bench_peekable_chain_sum,
+ bench_peekable_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).peekable()
+}
+
+bench_sums! {
+ bench_skip_sum,
+ bench_skip_ref_sum,
+ (0i64..1000000).skip(1000)
+}
+
+bench_sums! {
+ bench_skip_chain_sum,
+ bench_skip_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).skip(1000)
+}
+
+bench_sums! {
+ bench_skip_while_sum,
+ bench_skip_while_ref_sum,
+ (0i64..1000000).skip_while(|&x| x < 1000)
+}
+
+bench_sums! {
+ bench_skip_while_chain_sum,
+ bench_skip_while_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).skip_while(|&x| x < 1000)
+}
+
+bench_sums! {
+ bench_take_while_chain_sum,
+ bench_take_while_chain_ref_sum,
+ (0i64..1000000).chain(1000000..).take_while(|&x| x < 1111111)
+}
+
+bench_sums! {
+ bench_cycle_take_sum,
+ bench_cycle_take_ref_sum,
+ (0..10000).cycle().take(1000000)
+}
+
+bench_sums! {
+ bench_cycle_skip_take_sum,
+ bench_cycle_skip_take_ref_sum,
+ (0..100000).cycle().skip(1000000).take(1000000)
+}
+
+bench_sums! {
+ bench_cycle_take_skip_sum,
+ bench_cycle_take_skip_ref_sum,
+ (0..100000).cycle().take(1000000).skip(100000)
+}
+
+bench_sums! {
+ bench_skip_cycle_skip_zip_add_sum,
+ bench_skip_cycle_skip_zip_add_ref_sum,
+ (0..100000).skip(100).cycle().skip(100)
+ .zip((0..100000).cycle().skip(10))
+ .map(|(a,b)| a+b)
+ .skip(100000)
+ .take(1000000)
+}
+
+// Checks whether Skip<Zip<A,B>> is as fast as Zip<Skip<A>, Skip<B>>, from
+// https://users.rust-lang.org/t/performance-difference-between-iterator-zip-and-skip-order/15743
+#[bench]
+fn bench_zip_then_skip(b: &mut Bencher) {
+ let v: Vec<_> = (0..100_000).collect();
+ let t: Vec<_> = (0..100_000).collect();
+
+ b.iter(|| {
+ let s = v
+ .iter()
+ .zip(t.iter())
+ .skip(10000)
+ .take_while(|t| *t.0 < 10100)
+ .map(|(a, b)| *a + *b)
+ .sum::<u64>();
+ assert_eq!(s, 2009900);
+ });
+}
+#[bench]
+fn bench_skip_then_zip(b: &mut Bencher) {
+ let v: Vec<_> = (0..100_000).collect();
+ let t: Vec<_> = (0..100_000).collect();
+
+ b.iter(|| {
+ let s = v
+ .iter()
+ .skip(10000)
+ .zip(t.iter().skip(10000))
+ .take_while(|t| *t.0 < 10100)
+ .map(|(a, b)| *a + *b)
+ .sum::<u64>();
+ assert_eq!(s, 2009900);
+ });
+}
+
+#[bench]
+fn bench_filter_count(b: &mut Bencher) {
+ b.iter(|| (0i64..1000000).map(black_box).filter(|x| x % 3 == 0).count())
+}
+
+#[bench]
+fn bench_filter_ref_count(b: &mut Bencher) {
+ b.iter(|| (0i64..1000000).map(black_box).by_ref().filter(|x| x % 3 == 0).count())
+}
+
+#[bench]
+fn bench_filter_chain_count(b: &mut Bencher) {
+ b.iter(|| (0i64..1000000).chain(0..1000000).map(black_box).filter(|x| x % 3 == 0).count())
+}
+
+#[bench]
+fn bench_filter_chain_ref_count(b: &mut Bencher) {
+ b.iter(|| {
+ (0i64..1000000).chain(0..1000000).map(black_box).by_ref().filter(|x| x % 3 == 0).count()
+ })
+}
+
+#[bench]
+fn bench_partial_cmp(b: &mut Bencher) {
+ b.iter(|| (0..100000).map(black_box).partial_cmp((0..100000).map(black_box)))
+}
+
+#[bench]
+fn bench_lt(b: &mut Bencher) {
+ b.iter(|| (0..100000).map(black_box).lt((0..100000).map(black_box)))
+}
+
+#[bench]
+fn bench_trusted_random_access_adapters(b: &mut Bencher) {
+ let vec1: Vec<_> = (0usize..100000).collect();
+ let vec2 = black_box(vec1.clone());
+ b.iter(|| {
+ let mut iter = vec1
+ .iter()
+ .copied()
+ .enumerate()
+ .map(|(idx, e)| idx.wrapping_add(e))
+ .zip(vec2.iter().copied())
+ .map(|(a, b)| a.wrapping_add(b))
+ .fuse();
+ let mut acc: usize = 0;
+ let size = iter.size();
+ for i in 0..size {
+ // SAFETY: TRA requirements are satisfied by 0..size iteration and then dropping the
+ // iterator.
+ acc = acc.wrapping_add(unsafe { iter.__iterator_get_unchecked(i) });
+ }
+ acc
+ })
+}
diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs
new file mode 100644
index 000000000..a6c174d2f
--- /dev/null
+++ b/library/core/benches/lib.rs
@@ -0,0 +1,28 @@
+// wasm32 does not support benches (no time).
+#![cfg(not(target_arch = "wasm32"))]
+#![feature(flt2dec)]
+#![feature(int_log)]
+#![feature(test)]
+#![feature(trusted_random_access)]
+
+extern crate test;
+
+mod any;
+mod ascii;
+mod char;
+mod fmt;
+mod hash;
+mod iter;
+mod num;
+mod ops;
+mod pattern;
+mod slice;
+mod str;
+
+/// Returns a `rand::Rng` seeded with a consistent seed.
+///
+/// This is done to avoid introducing nondeterminism in benchmark results.
+fn bench_rng() -> rand_xorshift::XorShiftRng {
+ const SEED: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
+ rand::SeedableRng::from_seed(SEED)
+}
diff --git a/library/core/benches/num/dec2flt/mod.rs b/library/core/benches/num/dec2flt/mod.rs
new file mode 100644
index 000000000..305baa687
--- /dev/null
+++ b/library/core/benches/num/dec2flt/mod.rs
@@ -0,0 +1,57 @@
+use test::Bencher;
+
+#[bench]
+fn bench_0(b: &mut Bencher) {
+ b.iter(|| "0.0".parse::<f64>());
+}
+
+#[bench]
+fn bench_42(b: &mut Bencher) {
+ b.iter(|| "42".parse::<f64>());
+}
+
+#[bench]
+fn bench_huge_int(b: &mut Bencher) {
+ // 2^128 - 1
+ b.iter(|| "170141183460469231731687303715884105727".parse::<f64>());
+}
+
+#[bench]
+fn bench_short_decimal(b: &mut Bencher) {
+ b.iter(|| "1234.5678".parse::<f64>());
+}
+
+#[bench]
+fn bench_pi_long(b: &mut Bencher) {
+ b.iter(|| "3.14159265358979323846264338327950288".parse::<f64>());
+}
+
+#[bench]
+fn bench_pi_short(b: &mut Bencher) {
+ b.iter(|| "3.141592653589793".parse::<f64>())
+}
+
+#[bench]
+fn bench_1e150(b: &mut Bencher) {
+ b.iter(|| "1e150".parse::<f64>());
+}
+
+#[bench]
+fn bench_long_decimal_and_exp(b: &mut Bencher) {
+ b.iter(|| "727501488517303786137132964064381141071e-123".parse::<f64>());
+}
+
+#[bench]
+fn bench_min_subnormal(b: &mut Bencher) {
+ b.iter(|| "5e-324".parse::<f64>());
+}
+
+#[bench]
+fn bench_min_normal(b: &mut Bencher) {
+ b.iter(|| "2.2250738585072014e-308".parse::<f64>());
+}
+
+#[bench]
+fn bench_max(b: &mut Bencher) {
+ b.iter(|| "1.7976931348623157e308".parse::<f64>());
+}
diff --git a/library/core/benches/num/flt2dec/mod.rs b/library/core/benches/num/flt2dec/mod.rs
new file mode 100644
index 000000000..32fd5e626
--- /dev/null
+++ b/library/core/benches/num/flt2dec/mod.rs
@@ -0,0 +1,37 @@
+mod strategy {
+ mod dragon;
+ mod grisu;
+}
+
+use core::num::flt2dec::MAX_SIG_DIGITS;
+use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
+use std::io::Write;
+use std::vec::Vec;
+use test::Bencher;
+
+pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+ FullDecoded::Finite(decoded) => decoded,
+ full_decoded => panic!("expected finite, got {full_decoded:?} instead"),
+ }
+}
+
+#[bench]
+fn bench_small_shortest(b: &mut Bencher) {
+ let mut buf = Vec::with_capacity(20);
+
+ b.iter(|| {
+ buf.clear();
+ write!(&mut buf, "{}", 3.1415926f64).unwrap()
+ });
+}
+
+#[bench]
+fn bench_big_shortest(b: &mut Bencher) {
+ let mut buf = Vec::with_capacity(300);
+
+ b.iter(|| {
+ buf.clear();
+ write!(&mut buf, "{}", f64::MAX).unwrap()
+ });
+}
diff --git a/library/core/benches/num/flt2dec/strategy/dragon.rs b/library/core/benches/num/flt2dec/strategy/dragon.rs
new file mode 100644
index 000000000..319b9773e
--- /dev/null
+++ b/library/core/benches/num/flt2dec/strategy/dragon.rs
@@ -0,0 +1,76 @@
+use super::super::*;
+use core::num::flt2dec::strategy::dragon::*;
+use std::mem::MaybeUninit;
+use test::Bencher;
+
+#[bench]
+fn bench_small_shortest(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS];
+ b.iter(|| {
+ format_shortest(&decoded, &mut buf);
+ });
+}
+
+#[bench]
+fn bench_big_shortest(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS];
+ b.iter(|| {
+ format_shortest(&decoded, &mut buf);
+ });
+}
+
+#[bench]
+fn bench_small_exact_3(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 3];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_3(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 3];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_small_exact_12(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 12];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_12(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 12];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_small_exact_inf(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 1024];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_inf(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 1024];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
diff --git a/library/core/benches/num/flt2dec/strategy/grisu.rs b/library/core/benches/num/flt2dec/strategy/grisu.rs
new file mode 100644
index 000000000..8e47a046c
--- /dev/null
+++ b/library/core/benches/num/flt2dec/strategy/grisu.rs
@@ -0,0 +1,83 @@
+use super::super::*;
+use core::num::flt2dec::strategy::grisu::*;
+use std::mem::MaybeUninit;
+use test::Bencher;
+
+pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+ FullDecoded::Finite(decoded) => decoded,
+ full_decoded => panic!("expected finite, got {full_decoded:?} instead"),
+ }
+}
+
+#[bench]
+fn bench_small_shortest(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS];
+ b.iter(|| {
+ format_shortest(&decoded, &mut buf);
+ });
+}
+
+#[bench]
+fn bench_big_shortest(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS];
+ b.iter(|| {
+ format_shortest(&decoded, &mut buf);
+ });
+}
+
+#[bench]
+fn bench_small_exact_3(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 3];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_3(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 3];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_small_exact_12(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 12];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_12(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 12];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_small_exact_inf(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 1024];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_inf(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 1024];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
diff --git a/library/core/benches/num/int_log/mod.rs b/library/core/benches/num/int_log/mod.rs
new file mode 100644
index 000000000..19864d2d4
--- /dev/null
+++ b/library/core/benches/num/int_log/mod.rs
@@ -0,0 +1,58 @@
+use rand::Rng;
+use test::{black_box, Bencher};
+
+macro_rules! int_log_bench {
+ ($t:ty, $predictable:ident, $random:ident, $random_small:ident) => {
+ #[bench]
+ fn $predictable(bench: &mut Bencher) {
+ bench.iter(|| {
+ for n in 0..(<$t>::BITS / 8) {
+ for i in 1..=(100 as $t) {
+ let x = black_box(i << (n * 8));
+ black_box(x.log10());
+ }
+ }
+ });
+ }
+
+ #[bench]
+ fn $random(bench: &mut Bencher) {
+ let mut rng = crate::bench_rng();
+ /* Exponentially distributed random numbers from the whole range of the type. */
+ let numbers: Vec<$t> = (0..256)
+ .map(|_| {
+ let x = rng.gen::<$t>() >> rng.gen_range(0, <$t>::BITS);
+ if x != 0 { x } else { 1 }
+ })
+ .collect();
+ bench.iter(|| {
+ for x in &numbers {
+ black_box(black_box(x).log10());
+ }
+ });
+ }
+
+ #[bench]
+ fn $random_small(bench: &mut Bencher) {
+ let mut rng = crate::bench_rng();
+ /* Exponentially distributed random numbers from the range 0..256. */
+ let numbers: Vec<$t> = (0..256)
+ .map(|_| {
+ let x = (rng.gen::<u8>() >> rng.gen_range(0, u8::BITS)) as $t;
+ if x != 0 { x } else { 1 }
+ })
+ .collect();
+ bench.iter(|| {
+ for x in &numbers {
+ black_box(black_box(x).log10());
+ }
+ });
+ }
+ };
+}
+
+int_log_bench! {u8, u8_log10_predictable, u8_log10_random, u8_log10_random_small}
+int_log_bench! {u16, u16_log10_predictable, u16_log10_random, u16_log10_random_small}
+int_log_bench! {u32, u32_log10_predictable, u32_log10_random, u32_log10_random_small}
+int_log_bench! {u64, u64_log10_predictable, u64_log10_random, u64_log10_random_small}
+int_log_bench! {u128, u128_log10_predictable, u128_log10_random, u128_log10_random_small}
diff --git a/library/core/benches/num/mod.rs b/library/core/benches/num/mod.rs
new file mode 100644
index 000000000..2f9cad272
--- /dev/null
+++ b/library/core/benches/num/mod.rs
@@ -0,0 +1,108 @@
+mod dec2flt;
+mod flt2dec;
+mod int_log;
+
+use std::str::FromStr;
+use test::Bencher;
+
+const ASCII_NUMBERS: [&str; 19] = [
+ "0",
+ "1",
+ "2",
+ "43",
+ "765",
+ "76567",
+ "987245987",
+ "-4aa32",
+ "1786235",
+ "8723095",
+ "f##5s",
+ "83638730",
+ "-2345",
+ "562aa43",
+ "-1",
+ "-0",
+ "abc",
+ "xyz",
+ "c0ffee",
+];
+
+macro_rules! from_str_bench {
+ ($mac:ident, $t:ty) => {
+ #[bench]
+ fn $mac(b: &mut Bencher) {
+ b.iter(|| {
+ ASCII_NUMBERS
+ .iter()
+ .cycle()
+ .take(5_000)
+ .filter_map(|s| <$t>::from_str(s).ok())
+ .max()
+ })
+ }
+ };
+}
+
+macro_rules! from_str_radix_bench {
+ ($mac:ident, $t:ty, $radix:expr) => {
+ #[bench]
+ fn $mac(b: &mut Bencher) {
+ b.iter(|| {
+ ASCII_NUMBERS
+ .iter()
+ .cycle()
+ .take(5_000)
+ .filter_map(|s| <$t>::from_str_radix(s, $radix).ok())
+ .max()
+ })
+ }
+ };
+}
+
+from_str_bench!(bench_u8_from_str, u8);
+from_str_radix_bench!(bench_u8_from_str_radix_2, u8, 2);
+from_str_radix_bench!(bench_u8_from_str_radix_10, u8, 10);
+from_str_radix_bench!(bench_u8_from_str_radix_16, u8, 16);
+from_str_radix_bench!(bench_u8_from_str_radix_36, u8, 36);
+
+from_str_bench!(bench_u16_from_str, u16);
+from_str_radix_bench!(bench_u16_from_str_radix_2, u16, 2);
+from_str_radix_bench!(bench_u16_from_str_radix_10, u16, 10);
+from_str_radix_bench!(bench_u16_from_str_radix_16, u16, 16);
+from_str_radix_bench!(bench_u16_from_str_radix_36, u16, 36);
+
+from_str_bench!(bench_u32_from_str, u32);
+from_str_radix_bench!(bench_u32_from_str_radix_2, u32, 2);
+from_str_radix_bench!(bench_u32_from_str_radix_10, u32, 10);
+from_str_radix_bench!(bench_u32_from_str_radix_16, u32, 16);
+from_str_radix_bench!(bench_u32_from_str_radix_36, u32, 36);
+
+from_str_bench!(bench_u64_from_str, u64);
+from_str_radix_bench!(bench_u64_from_str_radix_2, u64, 2);
+from_str_radix_bench!(bench_u64_from_str_radix_10, u64, 10);
+from_str_radix_bench!(bench_u64_from_str_radix_16, u64, 16);
+from_str_radix_bench!(bench_u64_from_str_radix_36, u64, 36);
+
+from_str_bench!(bench_i8_from_str, i8);
+from_str_radix_bench!(bench_i8_from_str_radix_2, i8, 2);
+from_str_radix_bench!(bench_i8_from_str_radix_10, i8, 10);
+from_str_radix_bench!(bench_i8_from_str_radix_16, i8, 16);
+from_str_radix_bench!(bench_i8_from_str_radix_36, i8, 36);
+
+from_str_bench!(bench_i16_from_str, i16);
+from_str_radix_bench!(bench_i16_from_str_radix_2, i16, 2);
+from_str_radix_bench!(bench_i16_from_str_radix_10, i16, 10);
+from_str_radix_bench!(bench_i16_from_str_radix_16, i16, 16);
+from_str_radix_bench!(bench_i16_from_str_radix_36, i16, 36);
+
+from_str_bench!(bench_i32_from_str, i32);
+from_str_radix_bench!(bench_i32_from_str_radix_2, i32, 2);
+from_str_radix_bench!(bench_i32_from_str_radix_10, i32, 10);
+from_str_radix_bench!(bench_i32_from_str_radix_16, i32, 16);
+from_str_radix_bench!(bench_i32_from_str_radix_36, i32, 36);
+
+from_str_bench!(bench_i64_from_str, i64);
+from_str_radix_bench!(bench_i64_from_str_radix_2, i64, 2);
+from_str_radix_bench!(bench_i64_from_str_radix_10, i64, 10);
+from_str_radix_bench!(bench_i64_from_str_radix_16, i64, 16);
+from_str_radix_bench!(bench_i64_from_str_radix_36, i64, 36);
diff --git a/library/core/benches/ops.rs b/library/core/benches/ops.rs
new file mode 100644
index 000000000..0a2be8a28
--- /dev/null
+++ b/library/core/benches/ops.rs
@@ -0,0 +1,19 @@
+use core::ops::*;
+use test::Bencher;
+
+// Overhead of dtors
+
+struct HasDtor {
+ _x: isize,
+}
+
+impl Drop for HasDtor {
+ fn drop(&mut self) {}
+}
+
+#[bench]
+fn alloc_obj_with_dtor(b: &mut Bencher) {
+ b.iter(|| {
+ HasDtor { _x: 10 };
+ })
+}
diff --git a/library/core/benches/pattern.rs b/library/core/benches/pattern.rs
new file mode 100644
index 000000000..480ac6f36
--- /dev/null
+++ b/library/core/benches/pattern.rs
@@ -0,0 +1,42 @@
+use test::black_box;
+use test::Bencher;
+
+#[bench]
+fn starts_with_char(b: &mut Bencher) {
+ let text = black_box("kdjsfhlakfhlsghlkvcnljknfqiunvcijqenwodind");
+ b.iter(|| {
+ for _ in 0..1024 {
+ black_box(text.starts_with('k'));
+ }
+ })
+}
+
+#[bench]
+fn starts_with_str(b: &mut Bencher) {
+ let text = black_box("kdjsfhlakfhlsghlkvcnljknfqiunvcijqenwodind");
+ b.iter(|| {
+ for _ in 0..1024 {
+ black_box(text.starts_with("k"));
+ }
+ })
+}
+
+#[bench]
+fn ends_with_char(b: &mut Bencher) {
+ let text = black_box("kdjsfhlakfhlsghlkvcnljknfqiunvcijqenwodind");
+ b.iter(|| {
+ for _ in 0..1024 {
+ black_box(text.ends_with('k'));
+ }
+ })
+}
+
+#[bench]
+fn ends_with_str(b: &mut Bencher) {
+ let text = black_box("kdjsfhlakfhlsghlkvcnljknfqiunvcijqenwodind");
+ b.iter(|| {
+ for _ in 0..1024 {
+ black_box(text.ends_with("k"));
+ }
+ })
+}
diff --git a/library/core/benches/slice.rs b/library/core/benches/slice.rs
new file mode 100644
index 000000000..9b86a0ca9
--- /dev/null
+++ b/library/core/benches/slice.rs
@@ -0,0 +1,164 @@
+use test::black_box;
+use test::Bencher;
+
+enum Cache {
+ L1,
+ L2,
+ L3,
+}
+
+impl Cache {
+ fn size(&self) -> usize {
+ match self {
+ Cache::L1 => 1000, // 8kb
+ Cache::L2 => 10_000, // 80kb
+ Cache::L3 => 1_000_000, // 8Mb
+ }
+ }
+}
+
+fn binary_search<F>(b: &mut Bencher, cache: Cache, mapper: F)
+where
+ F: Fn(usize) -> usize,
+{
+ let size = cache.size();
+ let v = (0..size).map(&mapper).collect::<Vec<_>>();
+ let mut r = 0usize;
+ b.iter(move || {
+ // LCG constants from https://en.wikipedia.org/wiki/Numerical_Recipes.
+ r = r.wrapping_mul(1664525).wrapping_add(1013904223);
+ // Lookup the whole range to get 50% hits and 50% misses.
+ let i = mapper(r % size);
+ black_box(v.binary_search(&i).is_ok());
+ });
+}
+
+fn binary_search_worst_case(b: &mut Bencher, cache: Cache) {
+ let size = cache.size();
+
+ let mut v = vec![0; size];
+ let i = 1;
+ v[size - 1] = i;
+ b.iter(move || {
+ black_box(v.binary_search(&i).is_ok());
+ });
+}
+
+#[bench]
+fn binary_search_l1(b: &mut Bencher) {
+ binary_search(b, Cache::L1, |i| i * 2);
+}
+
+#[bench]
+fn binary_search_l2(b: &mut Bencher) {
+ binary_search(b, Cache::L2, |i| i * 2);
+}
+
+#[bench]
+fn binary_search_l3(b: &mut Bencher) {
+ binary_search(b, Cache::L3, |i| i * 2);
+}
+
+#[bench]
+fn binary_search_l1_with_dups(b: &mut Bencher) {
+ binary_search(b, Cache::L1, |i| i / 16 * 16);
+}
+
+#[bench]
+fn binary_search_l2_with_dups(b: &mut Bencher) {
+ binary_search(b, Cache::L2, |i| i / 16 * 16);
+}
+
+#[bench]
+fn binary_search_l3_with_dups(b: &mut Bencher) {
+ binary_search(b, Cache::L3, |i| i / 16 * 16);
+}
+
+#[bench]
+fn binary_search_l1_worst_case(b: &mut Bencher) {
+ binary_search_worst_case(b, Cache::L1);
+}
+
+#[bench]
+fn binary_search_l2_worst_case(b: &mut Bencher) {
+ binary_search_worst_case(b, Cache::L2);
+}
+
+#[bench]
+fn binary_search_l3_worst_case(b: &mut Bencher) {
+ binary_search_worst_case(b, Cache::L3);
+}
+
+#[derive(Clone)]
+struct Rgb(u8, u8, u8);
+
+impl Rgb {
+ fn gen(i: usize) -> Self {
+ Rgb(i as u8, (i as u8).wrapping_add(7), (i as u8).wrapping_add(42))
+ }
+}
+
+macro_rules! rotate {
+ ($fn:ident, $n:expr, $mapper:expr) => {
+ #[bench]
+ fn $fn(b: &mut Bencher) {
+ let mut x = (0usize..$n).map(&$mapper).collect::<Vec<_>>();
+ b.iter(|| {
+ for s in 0..x.len() {
+ x[..].rotate_right(s);
+ }
+ black_box(x[0].clone())
+ })
+ }
+ };
+}
+
+rotate!(rotate_u8, 32, |i| i as u8);
+rotate!(rotate_rgb, 32, Rgb::gen);
+rotate!(rotate_usize, 32, |i| i);
+rotate!(rotate_16_usize_4, 16, |i| [i; 4]);
+rotate!(rotate_16_usize_5, 16, |i| [i; 5]);
+rotate!(rotate_64_usize_4, 64, |i| [i; 4]);
+rotate!(rotate_64_usize_5, 64, |i| [i; 5]);
+
+macro_rules! swap_with_slice {
+ ($fn:ident, $n:expr, $mapper:expr) => {
+ #[bench]
+ fn $fn(b: &mut Bencher) {
+ let mut x = (0usize..$n).map(&$mapper).collect::<Vec<_>>();
+ let mut y = ($n..($n * 2)).map(&$mapper).collect::<Vec<_>>();
+ let mut skip = 0;
+ b.iter(|| {
+ for _ in 0..32 {
+ x[skip..].swap_with_slice(&mut y[..($n - skip)]);
+ skip = black_box(skip + 1) % 8;
+ }
+ black_box((x[$n / 3].clone(), y[$n * 2 / 3].clone()))
+ })
+ }
+ };
+}
+
+swap_with_slice!(swap_with_slice_u8_30, 30, |i| i as u8);
+swap_with_slice!(swap_with_slice_u8_3000, 3000, |i| i as u8);
+swap_with_slice!(swap_with_slice_rgb_30, 30, Rgb::gen);
+swap_with_slice!(swap_with_slice_rgb_3000, 3000, Rgb::gen);
+swap_with_slice!(swap_with_slice_usize_30, 30, |i| i);
+swap_with_slice!(swap_with_slice_usize_3000, 3000, |i| i);
+swap_with_slice!(swap_with_slice_4x_usize_30, 30, |i| [i; 4]);
+swap_with_slice!(swap_with_slice_4x_usize_3000, 3000, |i| [i; 4]);
+swap_with_slice!(swap_with_slice_5x_usize_30, 30, |i| [i; 5]);
+swap_with_slice!(swap_with_slice_5x_usize_3000, 3000, |i| [i; 5]);
+
+#[bench]
+fn fill_byte_sized(b: &mut Bencher) {
+ #[derive(Copy, Clone)]
+ struct NewType(u8);
+
+ let mut ary = [NewType(0); 1024];
+
+ b.iter(|| {
+ let slice = &mut ary[..];
+ black_box(slice.fill(black_box(NewType(42))));
+ });
+}
diff --git a/library/core/benches/str.rs b/library/core/benches/str.rs
new file mode 100644
index 000000000..78865d81f
--- /dev/null
+++ b/library/core/benches/str.rs
@@ -0,0 +1,10 @@
+use std::str;
+use test::{black_box, Bencher};
+
+mod char_count;
+mod corpora;
+
+#[bench]
+fn str_validate_emoji(b: &mut Bencher) {
+ b.iter(|| str::from_utf8(black_box(corpora::emoji::LARGE.as_bytes())));
+}
diff --git a/library/core/benches/str/char_count.rs b/library/core/benches/str/char_count.rs
new file mode 100644
index 000000000..25d9b2e29
--- /dev/null
+++ b/library/core/benches/str/char_count.rs
@@ -0,0 +1,107 @@
+use super::corpora::*;
+use test::{black_box, Bencher};
+
+macro_rules! define_benches {
+ ($( fn $name: ident($arg: ident: &str) $body: block )+) => {
+ define_benches!(mod en_tiny, en::TINY, $($name $arg $body)+);
+ define_benches!(mod en_small, en::SMALL, $($name $arg $body)+);
+ define_benches!(mod en_medium, en::MEDIUM, $($name $arg $body)+);
+ define_benches!(mod en_large, en::LARGE, $($name $arg $body)+);
+ define_benches!(mod en_huge, en::HUGE, $($name $arg $body)+);
+
+ define_benches!(mod zh_tiny, zh::TINY, $($name $arg $body)+);
+ define_benches!(mod zh_small, zh::SMALL, $($name $arg $body)+);
+ define_benches!(mod zh_medium, zh::MEDIUM, $($name $arg $body)+);
+ define_benches!(mod zh_large, zh::LARGE, $($name $arg $body)+);
+ define_benches!(mod zh_huge, zh::HUGE, $($name $arg $body)+);
+
+ define_benches!(mod ru_tiny, ru::TINY, $($name $arg $body)+);
+ define_benches!(mod ru_small, ru::SMALL, $($name $arg $body)+);
+ define_benches!(mod ru_medium, ru::MEDIUM, $($name $arg $body)+);
+ define_benches!(mod ru_large, ru::LARGE, $($name $arg $body)+);
+ define_benches!(mod ru_huge, ru::HUGE, $($name $arg $body)+);
+
+ define_benches!(mod emoji_tiny, emoji::TINY, $($name $arg $body)+);
+ define_benches!(mod emoji_small, emoji::SMALL, $($name $arg $body)+);
+ define_benches!(mod emoji_medium, emoji::MEDIUM, $($name $arg $body)+);
+ define_benches!(mod emoji_large, emoji::LARGE, $($name $arg $body)+);
+ define_benches!(mod emoji_huge, emoji::HUGE, $($name $arg $body)+);
+ };
+ (mod $mod_name: ident, $input: expr, $($name: ident $arg: ident $body: block)+) => {
+ mod $mod_name {
+ use super::*;
+ $(
+ #[bench]
+ fn $name(bencher: &mut Bencher) {
+ let input = $input;
+ bencher.bytes = input.len() as u64;
+ let mut input_s = input.to_string();
+ bencher.iter(|| {
+ let $arg: &str = &black_box(&mut input_s);
+ black_box($body)
+ })
+ }
+ )+
+ }
+ };
+}
+
+define_benches! {
+ fn case00_libcore(s: &str) {
+ libcore(s)
+ }
+
+ fn case01_filter_count_cont_bytes(s: &str) {
+ filter_count_cont_bytes(s)
+ }
+
+ fn case02_iter_increment(s: &str) {
+ iterator_increment(s)
+ }
+
+ fn case03_manual_char_len(s: &str) {
+ manual_char_len(s)
+ }
+}
+
+fn libcore(s: &str) -> usize {
+ s.chars().count()
+}
+
+#[inline]
+fn utf8_is_cont_byte(byte: u8) -> bool {
+ (byte as i8) < -64
+}
+
+fn filter_count_cont_bytes(s: &str) -> usize {
+ s.as_bytes().iter().filter(|&&byte| !utf8_is_cont_byte(byte)).count()
+}
+
+fn iterator_increment(s: &str) -> usize {
+ let mut c = 0;
+ for _ in s.chars() {
+ c += 1;
+ }
+ c
+}
+
+fn manual_char_len(s: &str) -> usize {
+ let s = s.as_bytes();
+ let mut c = 0;
+ let mut i = 0;
+ let l = s.len();
+ while i < l {
+ let b = s[i];
+ if b < 0x80 {
+ i += 1;
+ } else if b < 0xe0 {
+ i += 2;
+ } else if b < 0xf0 {
+ i += 3;
+ } else {
+ i += 4;
+ }
+ c += 1;
+ }
+ c
+}
diff --git a/library/core/benches/str/corpora.rs b/library/core/benches/str/corpora.rs
new file mode 100644
index 000000000..b4ac62506
--- /dev/null
+++ b/library/core/benches/str/corpora.rs
@@ -0,0 +1,88 @@
+//! Exposes a number of modules with different kinds of strings.
+//!
+//! Each module contains `&str` constants named `TINY`, `SMALL`, `MEDIUM`,
+//! `LARGE`, and `HUGE`.
+//!
+//! - The `TINY` string is generally around 8 bytes.
+//! - The `SMALL` string is generally around 30-40 bytes.
+//! - The `MEDIUM` string is generally around 600-700 bytes.
+//! - The `LARGE` string is the `MEDIUM` string repeated 8x, and is around 5kb.
+//! - The `HUGE` string is the `LARGE` string repeated 8x (or the `MEDIUM`
+//! string repeated 64x), and is around 40kb.
+//!
+//! Except for `mod emoji` (which is just a bunch of emoji), the strings were
+//! pulled from (localizations of) rust-lang.org.
+
+macro_rules! repeat8 {
+ ($s:expr) => {
+ concat!($s, $s, $s, $s, $s, $s, $s, $s)
+ };
+}
+
+macro_rules! define_consts {
+ ($s:literal) => {
+ pub const MEDIUM: &str = $s;
+ pub const LARGE: &str = repeat8!($s);
+ pub const HUGE: &str = repeat8!(repeat8!(repeat8!($s)));
+ };
+}
+
+pub mod en {
+ pub const TINY: &str = "Mary had";
+ pub const SMALL: &str = "Mary had a little lamb, Little lamb";
+ define_consts! {
+ "Rust is blazingly fast and memory-efficient: with no runtime or garbage
+ collector, it can power performance-critical services, run on embedded
+ devices, and easily integrate with other languages. Rust’s rich type system
+ and ownership model guarantee memory-safety and thread-safety — enabling you
+ to eliminate many classes of bugs at compile-time. Rust has great
+ documentation, a friendly compiler with useful error messages, and top-notch
+ tooling — an integrated package manager and build tool, smart multi-editor
+ support with auto-completion and type inspections, an auto-formatter, and
+ more."
+ }
+}
+
+pub mod zh {
+ pub const TINY: &str = "速度惊";
+ pub const SMALL: &str = "速度惊人且内存利用率极高";
+ define_consts! {
+ "Rust 速度惊人且内存利用率极高。由于\
+ 没有运行时和垃圾回收,它能够胜任对性能要\
+ 求特别高的服务,可以在嵌入式设备上运行,\
+ 还能轻松和其他语言集成。Rust 丰富的类型\
+ 系统和所有权模型保证了内存安全和线程安全,\
+ 让您在编译期就能够消除各种各样的错误。\
+ Rust 拥有出色的文档、友好的编译器和清晰\
+ 的错误提示信息, 还集成了一流的工具——\
+ 包管理器和构建工具, 智能地自动补全和类\
+ 型检验的多编辑器支持, 以及自动格式化代\
+ 码等等。"
+ }
+}
+
+pub mod ru {
+ pub const TINY: &str = "Сотни";
+ pub const SMALL: &str = "Сотни компаний по";
+ define_consts! {
+ "Сотни компаний по всему миру используют Rust в реальных\
+ проектах для быстрых кросс-платформенных решений с\
+ ограниченными ресурсами. Такие проекты, как Firefox,\
+ Dropbox и Cloudflare, используют Rust. Rust отлично\
+ подходит как для стартапов, так и для больших компаний,\
+ как для встраиваемых устройств, так и для масштабируемых\
+ web-сервисов. Мой самый большой комплимент Rust."
+ }
+}
+
+pub mod emoji {
+ pub const TINY: &str = "😀😃";
+ pub const SMALL: &str = "😀😃😄😁😆😅🤣😂🙂🙃😉😊😇🥰😍🤩😘";
+ define_consts! {
+ "😀😃😄😁😆😅🤣😂🙂🙃😉😊😇🥰😍🤩😘😗☺😚😙🥲😋😛😜🤪😝🤑🤗🤭🤫🤔🤐🤨😐😑😶😶‍🌫️😏😒\
+ 🙄😬😮‍💨🤥😌😔😪🤤😴😷🤒🤕🤢🤮🤧🥵🥶🥴😵😵‍💫🤯��🥳🥸😎🤓🧐😕😟🙁☹😮😯😲😳🥺😦😧😨\
+ 😰😥😢😭😱😖😣😞😓😩😫🥱😤😡😠🤬😈👿💀☠💩🤡👹👺👻👽👾🤖😺😸😹😻😼😽🙀😿😾🙈🙉🙊\
+ 💋💌💘💝💖💗💓��💕💟❣💔❤️‍🔥❤️‍🩹❤🧡💛💚💙💜🤎🖤🤍💯💢💥💫💦💨🕳💬👁️‍🗨️🗨🗯💭💤👋\
+ 🤚🖐✋🖖👌🤌🤏✌"
+ }
+}
diff --git a/library/core/primitive_docs/box_into_raw.md b/library/core/primitive_docs/box_into_raw.md
new file mode 100644
index 000000000..9dd0344c7
--- /dev/null
+++ b/library/core/primitive_docs/box_into_raw.md
@@ -0,0 +1 @@
+../std/boxed/struct.Box.html#method.into_raw
diff --git a/library/core/primitive_docs/fs_file.md b/library/core/primitive_docs/fs_file.md
new file mode 100644
index 000000000..4023e340a
--- /dev/null
+++ b/library/core/primitive_docs/fs_file.md
@@ -0,0 +1 @@
+../std/fs/struct.File.html
diff --git a/library/core/primitive_docs/io_bufread.md b/library/core/primitive_docs/io_bufread.md
new file mode 100644
index 000000000..7beda2cd3
--- /dev/null
+++ b/library/core/primitive_docs/io_bufread.md
@@ -0,0 +1 @@
+../std/io/trait.BufRead.html
diff --git a/library/core/primitive_docs/io_read.md b/library/core/primitive_docs/io_read.md
new file mode 100644
index 000000000..b7ecf5e27
--- /dev/null
+++ b/library/core/primitive_docs/io_read.md
@@ -0,0 +1 @@
+../std/io/trait.Read.html
diff --git a/library/core/primitive_docs/io_seek.md b/library/core/primitive_docs/io_seek.md
new file mode 100644
index 000000000..db0274d29
--- /dev/null
+++ b/library/core/primitive_docs/io_seek.md
@@ -0,0 +1 @@
+../std/io/trait.Seek.html
diff --git a/library/core/primitive_docs/io_write.md b/library/core/primitive_docs/io_write.md
new file mode 100644
index 000000000..92a3b88a7
--- /dev/null
+++ b/library/core/primitive_docs/io_write.md
@@ -0,0 +1 @@
+../std/io/trait.Write.html
diff --git a/library/core/primitive_docs/net_tosocketaddrs.md b/library/core/primitive_docs/net_tosocketaddrs.md
new file mode 100644
index 000000000..4daa10ddb
--- /dev/null
+++ b/library/core/primitive_docs/net_tosocketaddrs.md
@@ -0,0 +1 @@
+../std/net/trait.ToSocketAddrs.html
diff --git a/library/core/primitive_docs/process_exit.md b/library/core/primitive_docs/process_exit.md
new file mode 100644
index 000000000..cae34d12d
--- /dev/null
+++ b/library/core/primitive_docs/process_exit.md
@@ -0,0 +1 @@
+../std/process/fn.exit.html
diff --git a/library/core/primitive_docs/string_string.md b/library/core/primitive_docs/string_string.md
new file mode 100644
index 000000000..303dc07b1
--- /dev/null
+++ b/library/core/primitive_docs/string_string.md
@@ -0,0 +1 @@
+../std/string/struct.String.html
diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs
new file mode 100644
index 000000000..887246c60
--- /dev/null
+++ b/library/core/src/alloc/global.rs
@@ -0,0 +1,275 @@
+use crate::alloc::Layout;
+use crate::cmp;
+use crate::ptr;
+
+/// A memory allocator that can be registered as the standard library’s default
+/// through the `#[global_allocator]` attribute.
+///
+/// Some of the methods require that a memory block be *currently
+/// allocated* via an allocator. This means that:
+///
+/// * the starting address for that memory block was previously
+/// returned by a previous call to an allocation method
+/// such as `alloc`, and
+///
+/// * the memory block has not been subsequently deallocated, where
+/// blocks are deallocated either by being passed to a deallocation
+/// method such as `dealloc` or by being
+/// passed to a reallocation method that returns a non-null pointer.
+///
+///
+/// # Example
+///
+/// ```
+/// use std::alloc::{GlobalAlloc, Layout};
+/// use std::cell::UnsafeCell;
+/// use std::ptr::null_mut;
+/// use std::sync::atomic::{
+/// AtomicUsize,
+/// Ordering::{Acquire, SeqCst},
+/// };
+///
+/// const ARENA_SIZE: usize = 128 * 1024;
+/// const MAX_SUPPORTED_ALIGN: usize = 4096;
+/// #[repr(C, align(4096))] // 4096 == MAX_SUPPORTED_ALIGN
+/// struct SimpleAllocator {
+/// arena: UnsafeCell<[u8; ARENA_SIZE]>,
+/// remaining: AtomicUsize, // we allocate from the top, counting down
+/// }
+///
+/// #[global_allocator]
+/// static ALLOCATOR: SimpleAllocator = SimpleAllocator {
+/// arena: UnsafeCell::new([0x55; ARENA_SIZE]),
+/// remaining: AtomicUsize::new(ARENA_SIZE),
+/// };
+///
+/// unsafe impl Sync for SimpleAllocator {}
+///
+/// unsafe impl GlobalAlloc for SimpleAllocator {
+/// unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+/// let size = layout.size();
+/// let align = layout.align();
+///
+/// // `Layout` contract forbids making a `Layout` with align=0, or align not power of 2.
+/// // So we can safely use a mask to ensure alignment without worrying about UB.
+/// let align_mask_to_round_down = !(align - 1);
+///
+/// if align > MAX_SUPPORTED_ALIGN {
+/// return null_mut();
+/// }
+///
+/// let mut allocated = 0;
+/// if self
+/// .remaining
+/// .fetch_update(SeqCst, SeqCst, |mut remaining| {
+/// if size > remaining {
+/// return None;
+/// }
+/// remaining -= size;
+/// remaining &= align_mask_to_round_down;
+/// allocated = remaining;
+/// Some(remaining)
+/// })
+/// .is_err()
+/// {
+/// return null_mut();
+/// };
+/// (self.arena.get() as *mut u8).add(allocated)
+/// }
+/// unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
+/// }
+///
+/// fn main() {
+/// let _s = format!("allocating a string!");
+/// let currently = ALLOCATOR.remaining.load(Acquire);
+/// println!("allocated so far: {}", ARENA_SIZE - currently);
+/// }
+/// ```
+///
+/// # Safety
+///
+/// The `GlobalAlloc` trait is an `unsafe` trait for a number of reasons, and
+/// implementors must ensure that they adhere to these contracts:
+///
+/// * It's undefined behavior if global allocators unwind. This restriction may
+/// be lifted in the future, but currently a panic from any of these
+/// functions may lead to memory unsafety.
+///
+/// * `Layout` queries and calculations in general must be correct. Callers of
+/// this trait are allowed to rely on the contracts defined on each method,
+/// and implementors must ensure such contracts remain true.
+///
+/// * You must not rely on allocations actually happening, even if there are explicit
+/// heap allocations in the source. The optimizer may detect unused allocations that it can either
+/// eliminate entirely or move to the stack and thus never invoke the allocator. The
+/// optimizer may further assume that allocation is infallible, so code that used to fail due
+/// to allocator failures may now suddenly work because the optimizer worked around the
+/// need for an allocation. More concretely, the following code example is unsound, irrespective
+/// of whether your custom allocator allows counting how many allocations have happened.
+///
+/// ```rust,ignore (unsound and has placeholders)
+/// drop(Box::new(42));
+/// let number_of_heap_allocs = /* call private allocator API */;
+/// unsafe { std::intrinsics::assume(number_of_heap_allocs > 0); }
+/// ```
+///
+/// Note that the optimizations mentioned above are not the only
+/// optimization that can be applied. You may generally not rely on heap allocations
+/// happening if they can be removed without changing program behavior.
+/// Whether allocations happen or not is not part of the program behavior, even if it
+/// could be detected via an allocator that tracks allocations by printing or otherwise
+/// having side effects.
+#[stable(feature = "global_alloc", since = "1.28.0")]
+pub unsafe trait GlobalAlloc {
+ /// Allocate memory as described by the given `layout`.
+ ///
+ /// Returns a pointer to newly-allocated memory,
+ /// or null to indicate allocation failure.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure that `layout` has non-zero size.
+ ///
+ /// (Extension subtraits might provide more specific bounds on
+ /// behavior, e.g., guarantee a sentinel address or a null pointer
+ /// in response to a zero-size allocation request.)
+ ///
+ /// The allocated block of memory may or may not be initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning a null pointer indicates that either memory is exhausted
+ /// or `layout` does not meet this allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return null on memory
+ /// exhaustion rather than aborting, but this is not
+ /// a strict requirement. (Specifically: it is *legal* to
+ /// implement this trait atop an underlying native allocation
+ /// library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8;
+
+ /// Deallocate the block of memory at the given `ptr` pointer with the given `layout`.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must denote a block of memory currently allocated via
+ /// this allocator,
+ ///
+ /// * `layout` must be the same layout that was used
+ /// to allocate that block of memory.
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout);
+
+ /// Behaves like `alloc`, but also ensures that the contents
+ /// are set to zero before being returned.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe for the same reasons that `alloc` is.
+ /// However the allocated block of memory is guaranteed to be initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning a null pointer indicates that either memory is exhausted
+ /// or `layout` does not meet allocator's size or alignment constraints,
+ /// just as in `alloc`.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ let size = layout.size();
+ // SAFETY: the safety contract for `alloc` must be upheld by the caller.
+ let ptr = unsafe { self.alloc(layout) };
+ if !ptr.is_null() {
+ // SAFETY: as allocation succeeded, the region from `ptr`
+ // of size `size` is guaranteed to be valid for writes.
+ unsafe { ptr::write_bytes(ptr, 0, size) };
+ }
+ ptr
+ }
+
+ /// Shrink or grow a block of memory to the given `new_size`.
+ /// The block is described by the given `ptr` pointer and `layout`.
+ ///
+ /// If this returns a non-null pointer, then ownership of the memory block
+ /// referenced by `ptr` has been transferred to this allocator.
+ /// The memory may or may not have been deallocated, and should be
+ /// considered unusable. The new memory block is allocated with `layout`,
+ /// but with the `size` updated to `new_size`. This new layout should be
+ /// used when deallocating the new memory block with `dealloc`. The range
+ /// `0..min(layout.size(), new_size)` of the new memory block is
+ /// guaranteed to have the same values as the original block.
+ ///
+ /// If this method returns null, then ownership of the memory
+ /// block has not been transferred to this allocator, and the
+ /// contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must be currently allocated via this allocator,
+ ///
+ /// * `layout` must be the same layout that was used
+ /// to allocate that block of memory,
+ ///
+ /// * `new_size` must be greater than zero.
+ ///
+ /// * `new_size`, when rounded up to the nearest multiple of `layout.align()`,
+ /// must not overflow (i.e., the rounded value must be less than `usize::MAX`).
+ ///
+ /// (Extension subtraits might provide more specific bounds on
+ /// behavior, e.g., guarantee a sentinel address or a null pointer
+ /// in response to a zero-size allocation request.)
+ ///
+ /// # Errors
+ ///
+ /// Returns null if the new layout does not meet the size
+ /// and alignment constraints of the allocator, or if reallocation
+ /// otherwise fails.
+ ///
+ /// Implementations are encouraged to return null on memory
+ /// exhaustion rather than panicking or aborting, but this is not
+ /// a strict requirement. (Specifically: it is *legal* to
+ /// implement this trait atop an underlying native allocation
+ /// library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to a
+ /// reallocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY: the caller must ensure that the `new_size` does not overflow.
+ // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid.
+ let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
+ // SAFETY: the caller must ensure that `new_layout` is greater than zero.
+ let new_ptr = unsafe { self.alloc(new_layout) };
+ if !new_ptr.is_null() {
+ // SAFETY: the previously allocated block cannot overlap the newly allocated block.
+ // The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
+ self.dealloc(ptr, layout);
+ }
+ }
+ new_ptr
+ }
+}
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
new file mode 100644
index 000000000..2f378836c
--- /dev/null
+++ b/library/core/src/alloc/layout.rs
@@ -0,0 +1,443 @@
+use crate::cmp;
+use crate::fmt;
+use crate::mem::{self, ValidAlign};
+use crate::ptr::NonNull;
+
+// While this function is used in one place and its implementation
+// could be inlined, the previous attempts to do so made rustc
+// slower:
+//
+// * https://github.com/rust-lang/rust/pull/72189
+// * https://github.com/rust-lang/rust/pull/79827
+const fn size_align<T>() -> (usize, usize) {
+ (mem::size_of::<T>(), mem::align_of::<T>())
+}
+
+/// Layout of a block of memory.
+///
+/// An instance of `Layout` describes a particular layout of memory.
+/// You build a `Layout` up as an input to give to an allocator.
+///
+/// All layouts have an associated size and a power-of-two alignment.
+///
+/// (Note that layouts are *not* required to have non-zero size,
+/// even though `GlobalAlloc` requires that all memory requests
+/// be non-zero in size. A caller must either ensure that conditions
+/// like this are met, use specific allocators with looser
+/// requirements, or use the more lenient `Allocator` interface.)
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[lang = "alloc_layout"]
+pub struct Layout {
+ // size of the requested block of memory, measured in bytes.
+ size: usize,
+
+ // alignment of the requested block of memory, measured in bytes.
+ // we ensure that this is always a power-of-two, because API's
+ // like `posix_memalign` require it and it is a reasonable
+ // constraint to impose on Layout constructors.
+ //
+ // (However, we do not analogously require `align >= sizeof(void*)`,
+ // even though that is *also* a requirement of `posix_memalign`.)
+ align: ValidAlign,
+}
+
+impl Layout {
+ /// Constructs a `Layout` from a given `size` and `align`,
+ /// or returns `LayoutError` if any of the following conditions
+ /// are not met:
+ ///
+ /// * `align` must not be zero,
+ ///
+ /// * `align` must be a power of two,
+ ///
+ /// * `size`, when rounded up to the nearest multiple of `align`,
+ /// must not overflow (i.e., the rounded value must be less than
+ /// or equal to `usize::MAX`).
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")]
+ #[inline]
+ pub const fn from_size_align(size: usize, align: usize) -> Result<Self, LayoutError> {
+ if !align.is_power_of_two() {
+ return Err(LayoutError);
+ }
+
+ // (power-of-two implies align != 0.)
+
+ // Rounded up size is:
+ // size_rounded_up = (size + align - 1) & !(align - 1);
+ //
+ // We know from above that align != 0. If adding (align - 1)
+ // does not overflow, then rounding up will be fine.
+ //
+ // Conversely, &-masking with !(align - 1) will subtract off
+ // only low-order-bits. Thus if overflow occurs with the sum,
+ // the &-mask cannot subtract enough to undo that overflow.
+ //
+ // Above implies that checking for summation overflow is both
+ // necessary and sufficient.
+ if size > usize::MAX - (align - 1) {
+ return Err(LayoutError);
+ }
+
+ // SAFETY: the conditions for `from_size_align_unchecked` have been
+ // checked above.
+ unsafe { Ok(Layout::from_size_align_unchecked(size, align)) }
+ }
+
+ /// Creates a layout, bypassing all checks.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe as it does not verify the preconditions from
+ /// [`Layout::from_size_align`].
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "const_alloc_layout_unchecked", since = "1.36.0")]
+ #[must_use]
+ #[inline]
+ pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
+ // SAFETY: the caller must ensure that `align` is a power of two.
+ Layout { size, align: unsafe { ValidAlign::new_unchecked(align) } }
+ }
+
+ /// The minimum size in bytes for a memory block of this layout.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")]
+ #[must_use]
+ #[inline]
+ pub const fn size(&self) -> usize {
+ self.size
+ }
+
+ /// The minimum byte alignment for a memory block of this layout.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")]
+ #[must_use = "this returns the minimum alignment, \
+ without modifying the layout"]
+ #[inline]
+ pub const fn align(&self) -> usize {
+ self.align.as_nonzero().get()
+ }
+
+ /// Constructs a `Layout` suitable for holding a value of type `T`.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "alloc_layout_const_new", since = "1.42.0")]
+ #[must_use]
+ #[inline]
+ pub const fn new<T>() -> Self {
+ let (size, align) = size_align::<T>();
+ // SAFETY: the align is guaranteed by Rust to be a power of two and
+ // the size+align combo is guaranteed to fit in our address space. As a
+ // result use the unchecked constructor here to avoid inserting code
+ // that panics if it isn't optimized well enough.
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Produces layout describing a record that could be used to
+ /// allocate backing structure for `T` (which could be a trait
+ /// or other unsized type like a slice).
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[must_use]
+ #[inline]
+ pub fn for_value<T: ?Sized>(t: &T) -> Self {
+ let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
+ debug_assert!(Layout::from_size_align(size, align).is_ok());
+ // SAFETY: see rationale in `new` for why this is using the unsafe variant
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Produces layout describing a record that could be used to
+ /// allocate backing structure for `T` (which could be a trait
+ /// or other unsized type like a slice).
+ ///
+ /// # Safety
+ ///
+ /// This function is only safe to call if the following conditions hold:
+ ///
+ /// - If `T` is `Sized`, this function is always safe to call.
+ /// - If the unsized tail of `T` is:
+ /// - a [slice], then the length of the slice tail must be an initialized
+ /// integer, and the size of the *entire value*
+ /// (dynamic tail length + statically sized prefix) must fit in `isize`.
+ /// - a [trait object], then the vtable part of the pointer must point
+ /// to a valid vtable for the type `T` acquired by an unsizing coercion,
+ /// and the size of the *entire value*
+ /// (dynamic tail length + statically sized prefix) must fit in `isize`.
+ /// - an (unstable) [extern type], then this function is always safe to
+ /// call, but may panic or otherwise return the wrong value, as the
+ /// extern type's layout is not known. This is the same behavior as
+ /// [`Layout::for_value`] on a reference to an extern type tail.
+ /// - otherwise, it is conservatively not allowed to call this function.
+ ///
+ /// [trait object]: ../../book/ch17-02-trait-objects.html
+ /// [extern type]: ../../unstable-book/language-features/extern-types.html
+ #[unstable(feature = "layout_for_ptr", issue = "69835")]
+ #[must_use]
+ pub unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
+ // SAFETY: we pass along the prerequisites of these functions to the caller
+ let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) };
+ debug_assert!(Layout::from_size_align(size, align).is_ok());
+ // SAFETY: see rationale in `new` for why this is using the unsafe variant
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Creates a `NonNull` that is dangling, but well-aligned for this Layout.
+ ///
+ /// Note that the pointer value may potentially represent a valid pointer,
+ /// which means this must not be used as a "not yet initialized"
+ /// sentinel value. Types that lazily allocate must track initialization by
+ /// some other means.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[must_use]
+ #[inline]
+ pub const fn dangling(&self) -> NonNull<u8> {
+ // SAFETY: align is guaranteed to be non-zero
+ unsafe { NonNull::new_unchecked(crate::ptr::invalid_mut::<u8>(self.align())) }
+ }
+
+ /// Creates a layout describing the record that can hold a value
+ /// of the same layout as `self`, but that also is aligned to
+ /// alignment `align` (measured in bytes).
+ ///
+ /// If `self` already meets the prescribed alignment, then returns
+ /// `self`.
+ ///
+ /// Note that this method does not add any padding to the overall
+ /// size, regardless of whether the returned layout has a different
+ /// alignment. In other words, if `K` has size 16, `K.align_to(32)`
+ /// will *still* have size 16.
+ ///
+ /// Returns an error if the combination of `self.size()` and the given
+ /// `align` violates the conditions listed in [`Layout::from_size_align`].
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn align_to(&self, align: usize) -> Result<Self, LayoutError> {
+ Layout::from_size_align(self.size(), cmp::max(self.align(), align))
+ }
+
+ /// Returns the amount of padding we must insert after `self`
+ /// to ensure that the following address will satisfy `align`
+ /// (measured in bytes).
+ ///
+ /// e.g., if `self.size()` is 9, then `self.padding_needed_for(4)`
+ /// returns 3, because that is the minimum number of bytes of
+ /// padding required to get a 4-aligned address (assuming that the
+ /// corresponding memory block starts at a 4-aligned address).
+ ///
+ /// The return value of this function has no meaning if `align` is
+ /// not a power-of-two.
+ ///
+ /// Note that the utility of the returned value requires `align`
+ /// to be less than or equal to the alignment of the starting
+ /// address for the whole allocated block of memory. One way to
+ /// satisfy this constraint is to ensure `align <= self.align()`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
+ #[must_use = "this returns the padding needed, \
+ without modifying the `Layout`"]
+ #[inline]
+ pub const fn padding_needed_for(&self, align: usize) -> usize {
+ let len = self.size();
+
+ // Rounded up value is:
+ // len_rounded_up = (len + align - 1) & !(align - 1);
+ // and then we return the padding difference: `len_rounded_up - len`.
+ //
+ // We use modular arithmetic throughout:
+ //
+ // 1. align is guaranteed to be > 0, so align - 1 is always
+ // valid.
+ //
+ // 2. `len + align - 1` can overflow by at most `align - 1`,
+ // so the &-mask with `!(align - 1)` will ensure that in the
+ // case of overflow, `len_rounded_up` will itself be 0.
+ // Thus the returned padding, when added to `len`, yields 0,
+ // which trivially satisfies the alignment `align`.
+ //
+ // (Of course, attempts to allocate blocks of memory whose
+ // size and padding overflow in the above manner should cause
+ // the allocator to yield an error anyway.)
+
+ let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
+ len_rounded_up.wrapping_sub(len)
+ }
+
+ /// Creates a layout by rounding the size of this layout up to a multiple
+ /// of the layout's alignment.
+ ///
+ /// This is equivalent to adding the result of `padding_needed_for`
+ /// to the layout's current size.
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[must_use = "this returns a new `Layout`, \
+ without modifying the original"]
+ #[inline]
+ pub fn pad_to_align(&self) -> Layout {
+ let pad = self.padding_needed_for(self.align());
+ // This cannot overflow. Quoting from the invariant of Layout:
+ // > `size`, when rounded up to the nearest multiple of `align`,
+ // > must not overflow (i.e., the rounded value must be less than
+ // > `usize::MAX`)
+ let new_size = self.size() + pad;
+
+ // SAFETY: self.align is already known to be valid and new_size has been
+ // padded already.
+ unsafe { Layout::from_size_align_unchecked(new_size, self.align()) }
+ }
+
+ /// Creates a layout describing the record for `n` instances of
+ /// `self`, with a suitable amount of padding between each to
+ /// ensure that each instance is given its requested size and
+ /// alignment. On success, returns `(k, offs)` where `k` is the
+ /// layout of the array and `offs` is the distance between the start
+ /// of each element in the array.
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> {
+ // This cannot overflow. Quoting from the invariant of Layout:
+ // > `size`, when rounded up to the nearest multiple of `align`,
+ // > must not overflow (i.e., the rounded value must be less than
+ // > `usize::MAX`)
+ let padded_size = self.size() + self.padding_needed_for(self.align());
+ let alloc_size = padded_size.checked_mul(n).ok_or(LayoutError)?;
+
+ // SAFETY: self.align is already known to be valid and alloc_size has been
+ // padded already.
+ unsafe { Ok((Layout::from_size_align_unchecked(alloc_size, self.align()), padded_size)) }
+ }
+
+ /// Creates a layout describing the record for `self` followed by
+ /// `next`, including any necessary padding to ensure that `next`
+ /// will be properly aligned, but *no trailing padding*.
+ ///
+ /// In order to match C representation layout `repr(C)`, you should
+ /// call `pad_to_align` after extending the layout with all fields.
+ /// (There is no way to match the default Rust representation
+ /// layout `repr(Rust)`, as it is unspecified.)
+ ///
+ /// Note that the alignment of the resulting layout will be the maximum of
+ /// those of `self` and `next`, in order to ensure alignment of both parts.
+ ///
+ /// Returns `Ok((k, offset))`, where `k` is layout of the concatenated
+ /// record and `offset` is the relative location, in bytes, of the
+ /// start of the `next` embedded within the concatenated record
+ /// (assuming that the record itself starts at offset 0).
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ ///
+ /// # Examples
+ ///
+ /// To calculate the layout of a `#[repr(C)]` structure and the offsets of
+ /// the fields from its fields' layouts:
+ ///
+ /// ```rust
+ /// # use std::alloc::{Layout, LayoutError};
+ /// pub fn repr_c(fields: &[Layout]) -> Result<(Layout, Vec<usize>), LayoutError> {
+ /// let mut offsets = Vec::new();
+ /// let mut layout = Layout::from_size_align(0, 1)?;
+ /// for &field in fields {
+ /// let (new_layout, offset) = layout.extend(field)?;
+ /// layout = new_layout;
+ /// offsets.push(offset);
+ /// }
+ /// // Remember to finalize with `pad_to_align`!
+ /// Ok((layout.pad_to_align(), offsets))
+ /// }
+ /// # // test that it works
+ /// # #[repr(C)] struct S { a: u64, b: u32, c: u16, d: u32 }
+ /// # let s = Layout::new::<S>();
+ /// # let u16 = Layout::new::<u16>();
+ /// # let u32 = Layout::new::<u32>();
+ /// # let u64 = Layout::new::<u64>();
+ /// # assert_eq!(repr_c(&[u64, u32, u16, u32]), Ok((s, vec![0, 8, 12, 16])));
+ /// ```
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> {
+ let new_align = cmp::max(self.align(), next.align());
+ let pad = self.padding_needed_for(next.align());
+
+ let offset = self.size().checked_add(pad).ok_or(LayoutError)?;
+ let new_size = offset.checked_add(next.size()).ok_or(LayoutError)?;
+
+ let layout = Layout::from_size_align(new_size, new_align)?;
+ Ok((layout, offset))
+ }
+
+ /// Creates a layout describing the record for `n` instances of
+ /// `self`, with no padding between each instance.
+ ///
+ /// Note that, unlike `repeat`, `repeat_packed` does not guarantee
+ /// that the repeated instances of `self` will be properly
+ /// aligned, even if a given instance of `self` is properly
+ /// aligned. In other words, if the layout returned by
+ /// `repeat_packed` is used to allocate an array, it is not
+ /// guaranteed that all elements in the array will be properly
+ /// aligned.
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn repeat_packed(&self, n: usize) -> Result<Self, LayoutError> {
+ let size = self.size().checked_mul(n).ok_or(LayoutError)?;
+ Layout::from_size_align(size, self.align())
+ }
+
+ /// Creates a layout describing the record for `self` followed by
+ /// `next` with no additional padding between the two. Since no
+ /// padding is inserted, the alignment of `next` is irrelevant,
+ /// and is not incorporated *at all* into the resulting layout.
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn extend_packed(&self, next: Self) -> Result<Self, LayoutError> {
+ let new_size = self.size().checked_add(next.size()).ok_or(LayoutError)?;
+ Layout::from_size_align(new_size, self.align())
+ }
+
+ /// Creates a layout describing the record for a `[T; n]`.
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn array<T>(n: usize) -> Result<Self, LayoutError> {
+ let array_size = mem::size_of::<T>().checked_mul(n).ok_or(LayoutError)?;
+
+ // SAFETY:
+ // - Size: `array_size` cannot be too big because `size_of::<T>()` must
+ // be a multiple of `align_of::<T>()`. Therefore, `array_size`
+ // rounded up to the nearest multiple of `align_of::<T>()` is just
+ // `array_size`. And `array_size` cannot be too big because it was
+ // just checked by the `checked_mul()`.
+ // - Alignment: `align_of::<T>()` will always give an acceptable
+ // (non-zero, power of two) alignment.
+ Ok(unsafe { Layout::from_size_align_unchecked(array_size, mem::align_of::<T>()) })
+ }
+}
+
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+#[deprecated(
+ since = "1.52.0",
+ note = "Name does not follow std convention, use LayoutError",
+ suggestion = "LayoutError"
+)]
+pub type LayoutErr = LayoutError;
+
+/// The parameters given to `Layout::from_size_align`
+/// or some other `Layout` constructor
+/// do not satisfy its documented constraints.
+#[stable(feature = "alloc_layout_error", since = "1.50.0")]
+#[non_exhaustive]
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct LayoutError;
+
+// (we need this for downstream impl of trait Error)
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+impl fmt::Display for LayoutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("invalid parameters to Layout::from_size_align")
+ }
+}
diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs
new file mode 100644
index 000000000..6cc6e359e
--- /dev/null
+++ b/library/core/src/alloc/mod.rs
@@ -0,0 +1,410 @@
+//! Memory allocation APIs
+
+#![stable(feature = "alloc_module", since = "1.28.0")]
+
+mod global;
+mod layout;
+
+#[stable(feature = "global_alloc", since = "1.28.0")]
+pub use self::global::GlobalAlloc;
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+pub use self::layout::Layout;
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+#[deprecated(
+ since = "1.52.0",
+ note = "Name does not follow std convention, use LayoutError",
+ suggestion = "LayoutError"
+)]
+#[allow(deprecated, deprecated_in_future)]
+pub use self::layout::LayoutErr;
+
+#[stable(feature = "alloc_layout_error", since = "1.50.0")]
+pub use self::layout::LayoutError;
+
+use crate::fmt;
+use crate::ptr::{self, NonNull};
+
+/// The `AllocError` error indicates an allocation failure
+/// that may be due to resource exhaustion or to
+/// something wrong when combining the given input arguments with this
+/// allocator.
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct AllocError;
+
+// (we need this for downstream impl of trait Error)
+#[unstable(feature = "allocator_api", issue = "32838")]
+impl fmt::Display for AllocError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("memory allocation failed")
+ }
+}
+
+/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of
+/// data described via [`Layout`][].
+///
+/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having
+/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the
+/// allocated memory.
+///
+/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying
+/// allocator does not support this (like jemalloc) or return a null pointer (such as
+/// `libc::malloc`), this must be caught by the implementation.
+///
+/// ### Currently allocated memory
+///
+/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
+/// means that:
+///
+/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or
+/// [`shrink`], and
+///
+/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
+/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or
+/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
+/// remains valid.
+///
+/// [`allocate`]: Allocator::allocate
+/// [`grow`]: Allocator::grow
+/// [`shrink`]: Allocator::shrink
+/// [`deallocate`]: Allocator::deallocate
+///
+/// ### Memory fitting
+///
+/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
+/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
+/// following conditions must hold:
+///
+/// * The block must be allocated with the same alignment as [`layout.align()`], and
+///
+/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
+/// - `min` is the size of the layout most recently used to allocate the block, and
+/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`].
+///
+/// [`layout.align()`]: Layout::align
+/// [`layout.size()`]: Layout::size
+///
+/// # Safety
+///
+/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
+/// until the instance and all of its clones are dropped,
+///
+/// * cloning or moving the allocator must not invalidate memory blocks returned from this
+/// allocator. A cloned allocator must behave like the same allocator, and
+///
+/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
+/// method of the allocator.
+///
+/// [*currently allocated*]: #currently-allocated-memory
+#[unstable(feature = "allocator_api", issue = "32838")]
+pub unsafe trait Allocator {
+ /// Attempts to allocate a block of memory.
+ ///
+ /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`.
+ ///
+ /// The returned block may have a larger size than specified by `layout.size()`, and may or may
+ /// not have its contents initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
+ /// allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
+
+ /// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
+ /// allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ let ptr = self.allocate(layout)?;
+ // SAFETY: `alloc` returns a valid memory block
+ unsafe { ptr.as_non_null_ptr().as_ptr().write_bytes(0, ptr.len()) }
+ Ok(ptr)
+ }
+
+ /// Deallocates the memory referenced by `ptr`.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and
+ /// * `layout` must [*fit*] that block of memory.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
+
+ /// Attempts to extend the memory block.
+ ///
+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
+ /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
+ /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
+ /// transferred to this allocator. The memory may or may not have been freed, and should be
+ /// considered unusable.
+ ///
+ /// If this method returns `Err`, then ownership of the memory block has not been transferred to
+ /// this allocator, and the contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if growing otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Behaves like `grow`, but also ensures that the new contents are set to zero before being
+ /// returned.
+ ///
+ /// The memory block will contain the following contents after a successful call to
+ /// `grow_zeroed`:
+ /// * Bytes `0..old_layout.size()` are preserved from the original allocation.
+ /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on
+ /// the allocator implementation. `old_size` refers to the size of the memory block prior
+ /// to the `grow_zeroed` call, which may be larger than the size that was originally
+ /// requested when it was allocated.
+ /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory
+ /// block returned by the `grow_zeroed` call.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if growing otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate_zeroed(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Attempts to shrink the memory block.
+ ///
+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
+ /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
+ /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
+ /// transferred to this allocator. The memory may or may not have been freed, and should be
+ /// considered unusable.
+ ///
+ /// If this method returns `Err`, then ownership of the memory block has not been transferred to
+ /// this allocator, and the contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if shrinking otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be lower than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Creates a "by reference" adapter for this instance of `Allocator`.
+ ///
+ /// The returned adapter also implements `Allocator` and will simply borrow this.
+ #[inline(always)]
+ fn by_ref(&self) -> &Self
+ where
+ Self: Sized,
+ {
+ self
+ }
+}
+
+#[unstable(feature = "allocator_api", issue = "32838")]
+unsafe impl<A> Allocator for &A
+where
+ A: Allocator + ?Sized,
+{
+ #[inline]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ (**self).allocate(layout)
+ }
+
+ #[inline]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ (**self).allocate_zeroed(layout)
+ }
+
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).deallocate(ptr, layout) }
+ }
+
+ #[inline]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).grow(ptr, old_layout, new_layout) }
+ }
+
+ #[inline]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) }
+ }
+
+ #[inline]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).shrink(ptr, old_layout, new_layout) }
+ }
+}
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
new file mode 100644
index 000000000..f20c497a1
--- /dev/null
+++ b/library/core/src/any.rs
@@ -0,0 +1,1067 @@
+//! This module contains the `Any` trait, which enables dynamic typing
+//! of any `'static` type through runtime reflection. It also contains the
+//! `Provider` trait and accompanying API, which enable trait objects to provide
+//! data based on typed requests, an alternate form of runtime reflection.
+//!
+//! # `Any` and `TypeId`
+//!
+//! `Any` itself can be used to get a `TypeId`, and has more features when used
+//! as a trait object. As `&dyn Any` (a borrowed trait object), it has the `is`
+//! and `downcast_ref` methods, to test if the contained value is of a given type,
+//! and to get a reference to the inner value as a type. As `&mut dyn Any`, there
+//! is also the `downcast_mut` method, for getting a mutable reference to the
+//! inner value. `Box<dyn Any>` adds the `downcast` method, which attempts to
+//! convert to a `Box<T>`. See the [`Box`] documentation for the full details.
+//!
+//! Note that `&dyn Any` is limited to testing whether a value is of a specified
+//! concrete type, and cannot be used to test whether a type implements a trait.
+//!
+//! [`Box`]: ../../std/boxed/struct.Box.html
+//!
+//! # Smart pointers and `dyn Any`
+//!
+//! One piece of behavior to keep in mind when using `Any` as a trait object,
+//! especially with types like `Box<dyn Any>` or `Arc<dyn Any>`, is that simply
+//! calling `.type_id()` on the value will produce the `TypeId` of the
+//! *container*, not the underlying trait object. This can be avoided by
+//! converting the smart pointer into a `&dyn Any` instead, which will return
+//! the object's `TypeId`. For example:
+//!
+//! ```
+//! use std::any::{Any, TypeId};
+//!
+//! let boxed: Box<dyn Any> = Box::new(3_i32);
+//!
+//! // You're more likely to want this:
+//! let actual_id = (&*boxed).type_id();
+//! // ... than this:
+//! let boxed_id = boxed.type_id();
+//!
+//! assert_eq!(actual_id, TypeId::of::<i32>());
+//! assert_eq!(boxed_id, TypeId::of::<Box<dyn Any>>());
+//! ```
+//!
+//! ## Examples
+//!
+//! Consider a situation where we want to log out a value passed to a function.
+//! We know the value we're working on implements Debug, but we don't know its
+//! concrete type. We want to give special treatment to certain types: in this
+//! case printing out the length of String values prior to their value.
+//! We don't know the concrete type of our value at compile time, so we need to
+//! use runtime reflection instead.
+//!
+//! ```rust
+//! use std::fmt::Debug;
+//! use std::any::Any;
+//!
+//! // Logger function for any type that implements Debug.
+//! fn log<T: Any + Debug>(value: &T) {
+//! let value_any = value as &dyn Any;
+//!
+//! // Try to convert our value to a `String`. If successful, we want to
+//! // output the String`'s length as well as its value. If not, it's a
+//! // different type: just print it out unadorned.
+//! match value_any.downcast_ref::<String>() {
+//! Some(as_string) => {
+//! println!("String ({}): {}", as_string.len(), as_string);
+//! }
+//! None => {
+//! println!("{value:?}");
+//! }
+//! }
+//! }
+//!
+//! // This function wants to log its parameter out prior to doing work with it.
+//! fn do_work<T: Any + Debug>(value: &T) {
+//! log(value);
+//! // ...do some other work
+//! }
+//!
+//! fn main() {
+//! let my_string = "Hello World".to_string();
+//! do_work(&my_string);
+//!
+//! let my_i8: i8 = 100;
+//! do_work(&my_i8);
+//! }
+//! ```
+//!
+//! # `Provider` and `Demand`
+//!
+//! `Provider` and the associated APIs support generic, type-driven access to data, and a mechanism
+//! for implementers to provide such data. The key parts of the interface are the `Provider`
+//! trait for objects which can provide data, and the [`request_value`] and [`request_ref`]
+//! functions for requesting data from an object which implements `Provider`. Generally, end users
+//! should not call `request_*` directly, they are helper functions for intermediate implementers
+//! to use to implement a user-facing interface. This is purely for the sake of ergonomics, there is
+//! no safety concern here; intermediate implementers can typically support methods rather than
+//! free functions and use more specific names.
+//!
+//! Typically, a data provider is a trait object of a trait which extends `Provider`. A user will
+//! request data from a trait object by specifying the type of the data.
+//!
+//! ## Data flow
+//!
+//! * A user requests an object of a specific type, which is delegated to `request_value` or
+//! `request_ref`
+//! * `request_*` creates a `Demand` object and passes it to `Provider::provide`
+//! * The data provider's implementation of `Provider::provide` tries providing values of
+//! different types using `Demand::provide_*`. If the type matches the type requested by
+//! the user, the value will be stored in the `Demand` object.
+//! * `request_*` unpacks the `Demand` object and returns any stored value to the user.
+//!
+//! ## Examples
+//!
+//! ```
+//! # #![feature(provide_any)]
+//! use std::any::{Provider, Demand, request_ref};
+//!
+//! // Definition of MyTrait, a data provider.
+//! trait MyTrait: Provider {
+//! // ...
+//! }
+//!
+//! // Methods on `MyTrait` trait objects.
+//! impl dyn MyTrait + '_ {
+//! /// Get a reference to a field of the implementing struct.
+//! pub fn get_context_by_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
+//! request_ref::<T>(self)
+//! }
+//! }
+//!
+//! // Downstream implementation of `MyTrait` and `Provider`.
+//! # struct SomeConcreteType { some_string: String }
+//! impl MyTrait for SomeConcreteType {
+//! // ...
+//! }
+//!
+//! impl Provider for SomeConcreteType {
+//! fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+//! // Provide a string reference. We could provide multiple values with
+//! // different types here.
+//! demand.provide_ref::<String>(&self.some_string);
+//! }
+//! }
+//!
+//! // Downstream usage of `MyTrait`.
+//! fn use_my_trait(obj: &dyn MyTrait) {
+//! // Request a &String from obj.
+//! let _ = obj.get_context_by_ref::<String>().unwrap();
+//! }
+//! ```
+//!
+//! In this example, if the concrete type of `obj` in `use_my_trait` is `SomeConcreteType`, then
+//! the `get_context_ref` call will return a reference to `obj.some_string` with type `&String`.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fmt;
+use crate::intrinsics;
+
+///////////////////////////////////////////////////////////////////////////////
+// Any trait
+///////////////////////////////////////////////////////////////////////////////
+
+/// A trait to emulate dynamic typing.
+///
+/// Most types implement `Any`. However, any type which contains a non-`'static` reference does not.
+/// See the [module-level documentation][mod] for more details.
+///
+/// [mod]: crate::any
+// This trait is not unsafe, though we rely on the specifics of it's sole impl's
+// `type_id` function in unsafe code (e.g., `downcast`). Normally, that would be
+// a problem, but because the only impl of `Any` is a blanket implementation, no
+// other code can implement `Any`.
+//
+// We could plausibly make this trait unsafe -- it would not cause breakage,
+// since we control all the implementations -- but we choose not to as that's
+// both not really necessary and may confuse users about the distinction of
+// unsafe traits and unsafe methods (i.e., `type_id` would still be safe to call,
+// but we would likely want to indicate as such in documentation).
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Any")]
+pub trait Any: 'static {
+ /// Gets the `TypeId` of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::{Any, TypeId};
+ ///
+ /// fn is_string(s: &dyn Any) -> bool {
+ /// TypeId::of::<String>() == s.type_id()
+ /// }
+ ///
+ /// assert_eq!(is_string(&0), false);
+ /// assert_eq!(is_string(&"cookie monster".to_string()), true);
+ /// ```
+ #[stable(feature = "get_type_id", since = "1.34.0")]
+ fn type_id(&self) -> TypeId;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: 'static + ?Sized> Any for T {
+ fn type_id(&self) -> TypeId {
+ TypeId::of::<T>()
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Extension methods for Any trait objects.
+///////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for dyn Any {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Any").finish_non_exhaustive()
+ }
+}
+
+// Ensure that the result of e.g., joining a thread can be printed and
+// hence used with `unwrap`. May eventually no longer be needed if
+// dispatch works with upcasting.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for dyn Any + Send {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Any").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "any_send_sync_methods", since = "1.28.0")]
+impl fmt::Debug for dyn Any + Send + Sync {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Any").finish_non_exhaustive()
+ }
+}
+
+impl dyn Any {
+ /// Returns `true` if the inner type is the same as `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn is_string(s: &dyn Any) {
+ /// if s.is::<String>() {
+ /// println!("It's a string!");
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// is_string(&0);
+ /// is_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is<T: Any>(&self) -> bool {
+ // Get `TypeId` of the type this function is instantiated with.
+ let t = TypeId::of::<T>();
+
+ // Get `TypeId` of the type in the trait object (`self`).
+ let concrete = self.type_id();
+
+ // Compare both `TypeId`s on equality.
+ t == concrete
+ }
+
+ /// Returns some reference to the inner value if it is of type `T`, or
+ /// `None` if it isn't.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(s: &dyn Any) {
+ /// if let Some(string) = s.downcast_ref::<String>() {
+ /// println!("It's a string({}): '{}'", string.len(), string);
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// print_if_string(&0);
+ /// print_if_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+ if self.is::<T>() {
+ // SAFETY: just checked whether we are pointing to the correct type, and we can rely on
+ // that check for memory safety because we have implemented Any for all types; no other
+ // impls can exist as they would conflict with our impl.
+ unsafe { Some(self.downcast_ref_unchecked()) }
+ } else {
+ None
+ }
+ }
+
+ /// Returns some mutable reference to the inner value if it is of type `T`, or
+ /// `None` if it isn't.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn modify_if_u32(s: &mut dyn Any) {
+ /// if let Some(num) = s.downcast_mut::<u32>() {
+ /// *num = 42;
+ /// }
+ /// }
+ ///
+ /// let mut x = 10u32;
+ /// let mut s = "starlord".to_string();
+ ///
+ /// modify_if_u32(&mut x);
+ /// modify_if_u32(&mut s);
+ ///
+ /// assert_eq!(x, 42);
+ /// assert_eq!(&s, "starlord");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
+ if self.is::<T>() {
+ // SAFETY: just checked whether we are pointing to the correct type, and we can rely on
+ // that check for memory safety because we have implemented Any for all types; no other
+ // impls can exist as they would conflict with our impl.
+ unsafe { Some(self.downcast_mut_unchecked()) }
+ } else {
+ None
+ }
+ }
+
+ /// Returns a reference to the inner value as type `dyn T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_ref_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ #[inline]
+ pub unsafe fn downcast_ref_unchecked<T: Any>(&self) -> &T {
+ debug_assert!(self.is::<T>());
+ // SAFETY: caller guarantees that T is the correct type
+ unsafe { &*(self as *const dyn Any as *const T) }
+ }
+
+ /// Returns a mutable reference to the inner value as type `dyn T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let mut x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// *x.downcast_mut_unchecked::<usize>() += 1;
+ /// }
+ ///
+ /// assert_eq!(*x.downcast_ref::<usize>().unwrap(), 2);
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ #[inline]
+ pub unsafe fn downcast_mut_unchecked<T: Any>(&mut self) -> &mut T {
+ debug_assert!(self.is::<T>());
+ // SAFETY: caller guarantees that T is the correct type
+ unsafe { &mut *(self as *mut dyn Any as *mut T) }
+ }
+}
+
+impl dyn Any + Send {
+ /// Forwards to the method defined on the type `dyn Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn is_string(s: &(dyn Any + Send)) {
+ /// if s.is::<String>() {
+ /// println!("It's a string!");
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// is_string(&0);
+ /// is_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is<T: Any>(&self) -> bool {
+ <dyn Any>::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(s: &(dyn Any + Send)) {
+ /// if let Some(string) = s.downcast_ref::<String>() {
+ /// println!("It's a string({}): '{}'", string.len(), string);
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// print_if_string(&0);
+ /// print_if_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+ <dyn Any>::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn modify_if_u32(s: &mut (dyn Any + Send)) {
+ /// if let Some(num) = s.downcast_mut::<u32>() {
+ /// *num = 42;
+ /// }
+ /// }
+ ///
+ /// let mut x = 10u32;
+ /// let mut s = "starlord".to_string();
+ ///
+ /// modify_if_u32(&mut x);
+ /// modify_if_u32(&mut s);
+ ///
+ /// assert_eq!(x, 42);
+ /// assert_eq!(&s, "starlord");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
+ <dyn Any>::downcast_mut::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_ref_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// Same as the method on the type `dyn Any`.
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ #[inline]
+ pub unsafe fn downcast_ref_unchecked<T: Any>(&self) -> &T {
+ // SAFETY: guaranteed by caller
+ unsafe { <dyn Any>::downcast_ref_unchecked::<T>(self) }
+ }
+
+ /// Forwards to the method defined on the type `dyn Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let mut x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// *x.downcast_mut_unchecked::<usize>() += 1;
+ /// }
+ ///
+ /// assert_eq!(*x.downcast_ref::<usize>().unwrap(), 2);
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// Same as the method on the type `dyn Any`.
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ #[inline]
+ pub unsafe fn downcast_mut_unchecked<T: Any>(&mut self) -> &mut T {
+ // SAFETY: guaranteed by caller
+ unsafe { <dyn Any>::downcast_mut_unchecked::<T>(self) }
+ }
+}
+
+impl dyn Any + Send + Sync {
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn is_string(s: &(dyn Any + Send + Sync)) {
+ /// if s.is::<String>() {
+ /// println!("It's a string!");
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// is_string(&0);
+ /// is_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "any_send_sync_methods", since = "1.28.0")]
+ #[inline]
+ pub fn is<T: Any>(&self) -> bool {
+ <dyn Any>::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(s: &(dyn Any + Send + Sync)) {
+ /// if let Some(string) = s.downcast_ref::<String>() {
+ /// println!("It's a string({}): '{}'", string.len(), string);
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// print_if_string(&0);
+ /// print_if_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "any_send_sync_methods", since = "1.28.0")]
+ #[inline]
+ pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+ <dyn Any>::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn modify_if_u32(s: &mut (dyn Any + Send + Sync)) {
+ /// if let Some(num) = s.downcast_mut::<u32>() {
+ /// *num = 42;
+ /// }
+ /// }
+ ///
+ /// let mut x = 10u32;
+ /// let mut s = "starlord".to_string();
+ ///
+ /// modify_if_u32(&mut x);
+ /// modify_if_u32(&mut s);
+ ///
+ /// assert_eq!(x, 42);
+ /// assert_eq!(&s, "starlord");
+ /// ```
+ #[stable(feature = "any_send_sync_methods", since = "1.28.0")]
+ #[inline]
+ pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
+ <dyn Any>::downcast_mut::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_ref_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ #[inline]
+ pub unsafe fn downcast_ref_unchecked<T: Any>(&self) -> &T {
+ // SAFETY: guaranteed by caller
+ unsafe { <dyn Any>::downcast_ref_unchecked::<T>(self) }
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let mut x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// *x.downcast_mut_unchecked::<usize>() += 1;
+ /// }
+ ///
+ /// assert_eq!(*x.downcast_ref::<usize>().unwrap(), 2);
+ /// ```
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ #[inline]
+ pub unsafe fn downcast_mut_unchecked<T: Any>(&mut self) -> &mut T {
+ // SAFETY: guaranteed by caller
+ unsafe { <dyn Any>::downcast_mut_unchecked::<T>(self) }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TypeID and its methods
+///////////////////////////////////////////////////////////////////////////////
+
+/// A `TypeId` represents a globally unique identifier for a type.
+///
+/// Each `TypeId` is an opaque object which does not allow inspection of what's
+/// inside but does allow basic operations such as cloning, comparison,
+/// printing, and showing.
+///
+/// A `TypeId` is currently only available for types which ascribe to `'static`,
+/// but this limitation may be removed in the future.
+///
+/// While `TypeId` implements `Hash`, `PartialOrd`, and `Ord`, it is worth
+/// noting that the hashes and ordering will vary between Rust releases. Beware
+/// of relying on them inside of your code!
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct TypeId {
+ t: u64,
+}
+
+impl TypeId {
+ /// Returns the `TypeId` of the type this generic function has been
+ /// instantiated with.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::{Any, TypeId};
+ ///
+ /// fn is_string<T: ?Sized + Any>(_s: &T) -> bool {
+ /// TypeId::of::<String>() == TypeId::of::<T>()
+ /// }
+ ///
+ /// assert_eq!(is_string(&0), false);
+ /// assert_eq!(is_string(&"cookie monster".to_string()), true);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+ pub const fn of<T: ?Sized + 'static>() -> TypeId {
+ TypeId { t: intrinsics::type_id::<T>() }
+ }
+}
+
+/// Returns the name of a type as a string slice.
+///
+/// # Note
+///
+/// This is intended for diagnostic use. The exact contents and format of the
+/// string returned are not specified, other than being a best-effort
+/// description of the type. For example, amongst the strings
+/// that `type_name::<Option<String>>()` might return are `"Option<String>"` and
+/// `"std::option::Option<std::string::String>"`.
+///
+/// The returned string must not be considered to be a unique identifier of a
+/// type as multiple types may map to the same type name. Similarly, there is no
+/// guarantee that all parts of a type will appear in the returned string: for
+/// example, lifetime specifiers are currently not included. In addition, the
+/// output may change between versions of the compiler.
+///
+/// The current implementation uses the same infrastructure as compiler
+/// diagnostics and debuginfo, but this is not guaranteed.
+///
+/// # Examples
+///
+/// ```rust
+/// assert_eq!(
+/// std::any::type_name::<Option<String>>(),
+/// "core::option::Option<alloc::string::String>",
+/// );
+/// ```
+#[must_use]
+#[stable(feature = "type_name", since = "1.38.0")]
+#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
+pub const fn type_name<T: ?Sized>() -> &'static str {
+ intrinsics::type_name::<T>()
+}
+
+/// Returns the name of the type of the pointed-to value as a string slice.
+/// This is the same as `type_name::<T>()`, but can be used where the type of a
+/// variable is not easily available.
+///
+/// # Note
+///
+/// This is intended for diagnostic use. The exact contents and format of the
+/// string are not specified, other than being a best-effort description of the
+/// type. For example, `type_name_of_val::<Option<String>>(None)` could return
+/// `"Option<String>"` or `"std::option::Option<std::string::String>"`, but not
+/// `"foobar"`. In addition, the output may change between versions of the
+/// compiler.
+///
+/// This function does not resolve trait objects,
+/// meaning that `type_name_of_val(&7u32 as &dyn Debug)`
+/// may return `"dyn Debug"`, but not `"u32"`.
+///
+/// The type name should not be considered a unique identifier of a type;
+/// multiple types may share the same type name.
+///
+/// The current implementation uses the same infrastructure as compiler
+/// diagnostics and debuginfo, but this is not guaranteed.
+///
+/// # Examples
+///
+/// Prints the default integer and float types.
+///
+/// ```rust
+/// #![feature(type_name_of_val)]
+/// use std::any::type_name_of_val;
+///
+/// let x = 1;
+/// println!("{}", type_name_of_val(&x));
+/// let y = 1.0;
+/// println!("{}", type_name_of_val(&y));
+/// ```
+#[must_use]
+#[unstable(feature = "type_name_of_val", issue = "66359")]
+#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
+pub const fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
+ type_name::<T>()
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Provider trait
+///////////////////////////////////////////////////////////////////////////////
+
+/// Trait implemented by a type which can dynamically provide values based on type.
+#[unstable(feature = "provide_any", issue = "96024")]
+pub trait Provider {
+ /// Data providers should implement this method to provide *all* values they are able to
+ /// provide by using `demand`.
+ ///
+ /// Note that the `provide_*` methods on `Demand` have short-circuit semantics: if an earlier
+ /// method has successfully provided a value, then later methods will not get an opportunity to
+ /// provide.
+ ///
+ /// # Examples
+ ///
+ /// Provides a reference to a field with type `String` as a `&str`, and a value of
+ /// type `i32`.
+ ///
+ /// ```rust
+ /// # #![feature(provide_any)]
+ /// use std::any::{Provider, Demand};
+ /// # struct SomeConcreteType { field: String, num_field: i32 }
+ ///
+ /// impl Provider for SomeConcreteType {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// demand.provide_ref::<str>(&self.field)
+ /// .provide_value::<i32>(|| self.num_field);
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "provide_any", issue = "96024")]
+ fn provide<'a>(&'a self, demand: &mut Demand<'a>);
+}
+
+/// Request a value from the `Provider`.
+///
+/// # Examples
+///
+/// Get a string value from a provider.
+///
+/// ```rust
+/// # #![feature(provide_any)]
+/// use std::any::{Provider, request_value};
+///
+/// fn get_string(provider: &impl Provider) -> String {
+/// request_value::<String>(provider).unwrap()
+/// }
+/// ```
+#[unstable(feature = "provide_any", issue = "96024")]
+pub fn request_value<'a, T>(provider: &'a (impl Provider + ?Sized)) -> Option<T>
+where
+ T: 'static,
+{
+ request_by_type_tag::<'a, tags::Value<T>>(provider)
+}
+
+/// Request a reference from the `Provider`.
+///
+/// # Examples
+///
+/// Get a string reference from a provider.
+///
+/// ```rust
+/// # #![feature(provide_any)]
+/// use std::any::{Provider, request_ref};
+///
+/// fn get_str(provider: &impl Provider) -> &str {
+/// request_ref::<str>(provider).unwrap()
+/// }
+/// ```
+#[unstable(feature = "provide_any", issue = "96024")]
+pub fn request_ref<'a, T>(provider: &'a (impl Provider + ?Sized)) -> Option<&'a T>
+where
+ T: 'static + ?Sized,
+{
+ request_by_type_tag::<'a, tags::Ref<tags::MaybeSizedValue<T>>>(provider)
+}
+
+/// Request a specific value by tag from the `Provider`.
+fn request_by_type_tag<'a, I>(provider: &'a (impl Provider + ?Sized)) -> Option<I::Reified>
+where
+ I: tags::Type<'a>,
+{
+ let mut tagged = TaggedOption::<'a, I>(None);
+ provider.provide(tagged.as_demand());
+ tagged.0
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Demand and its methods
+///////////////////////////////////////////////////////////////////////////////
+
+/// A helper object for providing data by type.
+///
+/// A data provider provides values by calling this type's provide methods.
+#[unstable(feature = "provide_any", issue = "96024")]
+#[repr(transparent)]
+pub struct Demand<'a>(dyn Erased<'a> + 'a);
+
+impl<'a> Demand<'a> {
+ /// Create a new `&mut Demand` from a `&mut dyn Erased` trait object.
+ fn new<'b>(erased: &'b mut (dyn Erased<'a> + 'a)) -> &'b mut Demand<'a> {
+ // SAFETY: transmuting `&mut (dyn Erased<'a> + 'a)` to `&mut Demand<'a>` is safe since
+ // `Demand` is repr(transparent).
+ unsafe { &mut *(erased as *mut dyn Erased<'a> as *mut Demand<'a>) }
+ }
+
+ /// Provide a value or other type with only static lifetimes.
+ ///
+ /// # Examples
+ ///
+ /// Provides a `String` by cloning.
+ ///
+ /// ```rust
+ /// # #![feature(provide_any)]
+ /// use std::any::{Provider, Demand};
+ /// # struct SomeConcreteType { field: String }
+ ///
+ /// impl Provider for SomeConcreteType {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// demand.provide_value::<String>(|| self.field.clone());
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "provide_any", issue = "96024")]
+ pub fn provide_value<T>(&mut self, fulfil: impl FnOnce() -> T) -> &mut Self
+ where
+ T: 'static,
+ {
+ self.provide_with::<tags::Value<T>>(fulfil)
+ }
+
+ /// Provide a reference, note that the referee type must be bounded by `'static`,
+ /// but may be unsized.
+ ///
+ /// # Examples
+ ///
+ /// Provides a reference to a field as a `&str`.
+ ///
+ /// ```rust
+ /// # #![feature(provide_any)]
+ /// use std::any::{Provider, Demand};
+ /// # struct SomeConcreteType { field: String }
+ ///
+ /// impl Provider for SomeConcreteType {
+ /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ /// demand.provide_ref::<str>(&self.field);
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "provide_any", issue = "96024")]
+ pub fn provide_ref<T: ?Sized + 'static>(&mut self, value: &'a T) -> &mut Self {
+ self.provide::<tags::Ref<tags::MaybeSizedValue<T>>>(value)
+ }
+
+ /// Provide a value with the given `Type` tag.
+ fn provide<I>(&mut self, value: I::Reified) -> &mut Self
+ where
+ I: tags::Type<'a>,
+ {
+ if let Some(res @ TaggedOption(None)) = self.0.downcast_mut::<I>() {
+ res.0 = Some(value);
+ }
+ self
+ }
+
+ /// Provide a value with the given `Type` tag, using a closure to prevent unnecessary work.
+ fn provide_with<I>(&mut self, fulfil: impl FnOnce() -> I::Reified) -> &mut Self
+ where
+ I: tags::Type<'a>,
+ {
+ if let Some(res @ TaggedOption(None)) = self.0.downcast_mut::<I>() {
+ res.0 = Some(fulfil());
+ }
+ self
+ }
+}
+
+#[unstable(feature = "provide_any", issue = "96024")]
+impl<'a> fmt::Debug for Demand<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Demand").finish_non_exhaustive()
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Type tags
+///////////////////////////////////////////////////////////////////////////////
+
+mod tags {
+ //! Type tags are used to identify a type using a separate value. This module includes type tags
+ //! for some very common types.
+ //!
+ //! Currently type tags are not exposed to the user. But in the future, if you want to use the
+ //! Provider API with more complex types (typically those including lifetime parameters), you
+ //! will need to write your own tags.
+
+ use crate::marker::PhantomData;
+
+ /// This trait is implemented by specific tag types in order to allow
+ /// describing a type which can be requested for a given lifetime `'a`.
+ ///
+ /// A few example implementations for type-driven tags can be found in this
+ /// module, although crates may also implement their own tags for more
+ /// complex types with internal lifetimes.
+ pub trait Type<'a>: Sized + 'static {
+ /// The type of values which may be tagged by this tag for the given
+ /// lifetime.
+ type Reified: 'a;
+ }
+
+ /// Similar to the [`Type`] trait, but represents a type which may be unsized (i.e., has a
+ /// `?Sized` bound). E.g., `str`.
+ pub trait MaybeSizedType<'a>: Sized + 'static {
+ type Reified: 'a + ?Sized;
+ }
+
+ impl<'a, T: Type<'a>> MaybeSizedType<'a> for T {
+ type Reified = T::Reified;
+ }
+
+ /// Type-based tag for types bounded by `'static`, i.e., with no borrowed elements.
+ #[derive(Debug)]
+ pub struct Value<T: 'static>(PhantomData<T>);
+
+ impl<'a, T: 'static> Type<'a> for Value<T> {
+ type Reified = T;
+ }
+
+ /// Type-based tag similar to [`Value`] but which may be unsized (i.e., has a `?Sized` bound).
+ #[derive(Debug)]
+ pub struct MaybeSizedValue<T: ?Sized + 'static>(PhantomData<T>);
+
+ impl<'a, T: ?Sized + 'static> MaybeSizedType<'a> for MaybeSizedValue<T> {
+ type Reified = T;
+ }
+
+ /// Type-based tag for reference types (`&'a T`, where T is represented by
+ /// `<I as MaybeSizedType<'a>>::Reified`.
+ #[derive(Debug)]
+ pub struct Ref<I>(PhantomData<I>);
+
+ impl<'a, I: MaybeSizedType<'a>> Type<'a> for Ref<I> {
+ type Reified = &'a I::Reified;
+ }
+}
+
+/// An `Option` with a type tag `I`.
+///
+/// Since this struct implements `Erased`, the type can be erased to make a dynamically typed
+/// option. The type can be checked dynamically using `Erased::tag_id` and since this is statically
+/// checked for the concrete type, there is some degree of type safety.
+#[repr(transparent)]
+struct TaggedOption<'a, I: tags::Type<'a>>(Option<I::Reified>);
+
+impl<'a, I: tags::Type<'a>> TaggedOption<'a, I> {
+ fn as_demand(&mut self) -> &mut Demand<'a> {
+ Demand::new(self as &mut (dyn Erased<'a> + 'a))
+ }
+}
+
+/// Represents a type-erased but identifiable object.
+///
+/// This trait is exclusively implemented by the `TaggedOption` type.
+unsafe trait Erased<'a>: 'a {
+ /// The `TypeId` of the erased type.
+ fn tag_id(&self) -> TypeId;
+}
+
+unsafe impl<'a, I: tags::Type<'a>> Erased<'a> for TaggedOption<'a, I> {
+ fn tag_id(&self) -> TypeId {
+ TypeId::of::<I>()
+ }
+}
+
+#[unstable(feature = "provide_any", issue = "96024")]
+impl<'a> dyn Erased<'a> + 'a {
+ /// Returns some reference to the dynamic value if it is tagged with `I`,
+ /// or `None` otherwise.
+ #[inline]
+ fn downcast_mut<I>(&mut self) -> Option<&mut TaggedOption<'a, I>>
+ where
+ I: tags::Type<'a>,
+ {
+ if self.tag_id() == TypeId::of::<I>() {
+ // SAFETY: Just checked whether we're pointing to an I.
+ Some(unsafe { &mut *(self as *mut Self).cast::<TaggedOption<'a, I>>() })
+ } else {
+ None
+ }
+ }
+}
diff --git a/library/core/src/array/equality.rs b/library/core/src/array/equality.rs
new file mode 100644
index 000000000..33f7f494e
--- /dev/null
+++ b/library/core/src/array/equality.rs
@@ -0,0 +1,216 @@
+use crate::convert::TryInto;
+use crate::num::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize};
+use crate::num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<[B; N]> for [A; N]
+where
+ A: PartialEq<B>,
+{
+ #[inline]
+ fn eq(&self, other: &[B; N]) -> bool {
+ SpecArrayEq::spec_eq(self, other)
+ }
+ #[inline]
+ fn ne(&self, other: &[B; N]) -> bool {
+ SpecArrayEq::spec_ne(self, other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<[B]> for [A; N]
+where
+ A: PartialEq<B>,
+{
+ #[inline]
+ fn eq(&self, other: &[B]) -> bool {
+ let b: Result<&[B; N], _> = other.try_into();
+ match b {
+ Ok(b) => *self == *b,
+ Err(_) => false,
+ }
+ }
+ #[inline]
+ fn ne(&self, other: &[B]) -> bool {
+ let b: Result<&[B; N], _> = other.try_into();
+ match b {
+ Ok(b) => *self != *b,
+ Err(_) => true,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<[A; N]> for [B]
+where
+ B: PartialEq<A>,
+{
+ #[inline]
+ fn eq(&self, other: &[A; N]) -> bool {
+ let b: Result<&[B; N], _> = self.try_into();
+ match b {
+ Ok(b) => *b == *other,
+ Err(_) => false,
+ }
+ }
+ #[inline]
+ fn ne(&self, other: &[A; N]) -> bool {
+ let b: Result<&[B; N], _> = self.try_into();
+ match b {
+ Ok(b) => *b != *other,
+ Err(_) => true,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<&[B]> for [A; N]
+where
+ A: PartialEq<B>,
+{
+ #[inline]
+ fn eq(&self, other: &&[B]) -> bool {
+ *self == **other
+ }
+ #[inline]
+ fn ne(&self, other: &&[B]) -> bool {
+ *self != **other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<[A; N]> for &[B]
+where
+ B: PartialEq<A>,
+{
+ #[inline]
+ fn eq(&self, other: &[A; N]) -> bool {
+ **self == *other
+ }
+ #[inline]
+ fn ne(&self, other: &[A; N]) -> bool {
+ **self != *other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<&mut [B]> for [A; N]
+where
+ A: PartialEq<B>,
+{
+ #[inline]
+ fn eq(&self, other: &&mut [B]) -> bool {
+ *self == **other
+ }
+ #[inline]
+ fn ne(&self, other: &&mut [B]) -> bool {
+ *self != **other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<[A; N]> for &mut [B]
+where
+ B: PartialEq<A>,
+{
+ #[inline]
+ fn eq(&self, other: &[A; N]) -> bool {
+ **self == *other
+ }
+ #[inline]
+ fn ne(&self, other: &[A; N]) -> bool {
+ **self != *other
+ }
+}
+
+// NOTE: some less important impls are omitted to reduce code bloat
+// __impl_slice_eq2! { [A; $N], &'b [B; $N] }
+// __impl_slice_eq2! { [A; $N], &'b mut [B; $N] }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, const N: usize> Eq for [T; N] {}
+
+trait SpecArrayEq<Other, const N: usize>: Sized {
+ fn spec_eq(a: &[Self; N], b: &[Other; N]) -> bool;
+ fn spec_ne(a: &[Self; N], b: &[Other; N]) -> bool;
+}
+
+impl<T: PartialEq<Other>, Other, const N: usize> SpecArrayEq<Other, N> for T {
+ default fn spec_eq(a: &[Self; N], b: &[Other; N]) -> bool {
+ a[..] == b[..]
+ }
+ default fn spec_ne(a: &[Self; N], b: &[Other; N]) -> bool {
+ a[..] != b[..]
+ }
+}
+
+impl<T: IsRawEqComparable<U>, U, const N: usize> SpecArrayEq<U, N> for T {
+ fn spec_eq(a: &[T; N], b: &[U; N]) -> bool {
+ // SAFETY: This is why `IsRawEqComparable` is an `unsafe trait`.
+ unsafe {
+ let b = &*b.as_ptr().cast::<[T; N]>();
+ crate::intrinsics::raw_eq(a, b)
+ }
+ }
+ fn spec_ne(a: &[T; N], b: &[U; N]) -> bool {
+ !Self::spec_eq(a, b)
+ }
+}
+
+/// `U` exists on here mostly because `min_specialization` didn't let me
+/// repeat the `T` type parameter in the above specialization, so instead
+/// the `T == U` constraint comes from the impls on this.
+/// # Safety
+/// - Neither `Self` nor `U` has any padding.
+/// - `Self` and `U` have the same layout.
+/// - `Self: PartialEq<U>` is byte-wise (this means no floats, among other things)
+#[rustc_specialization_trait]
+unsafe trait IsRawEqComparable<U>: PartialEq<U> {}
+
+macro_rules! is_raw_eq_comparable {
+ ($($t:ty),+ $(,)?) => {$(
+ unsafe impl IsRawEqComparable<$t> for $t {}
+ )+};
+}
+
+// SAFETY: All the ordinary integer types allow all bit patterns as distinct values
+is_raw_eq_comparable!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize);
+
+// SAFETY: bool and char have *niches*, but no *padding*, so this is sound
+is_raw_eq_comparable!(bool, char);
+
+// SAFETY: Similarly, the non-zero types have a niche, but no undef,
+// and they compare like their underlying numeric type.
+is_raw_eq_comparable!(
+ NonZeroU8,
+ NonZeroU16,
+ NonZeroU32,
+ NonZeroU64,
+ NonZeroU128,
+ NonZeroUsize,
+ NonZeroI8,
+ NonZeroI16,
+ NonZeroI32,
+ NonZeroI64,
+ NonZeroI128,
+ NonZeroIsize,
+);
+
+// SAFETY: The NonZero types have the "null" optimization guaranteed, and thus
+// are also safe to equality-compare bitwise inside an `Option`.
+// The way `PartialOrd` is defined for `Option` means that this wouldn't work
+// for `<` or `>` on the signed types, but since we only do `==` it's fine.
+is_raw_eq_comparable!(
+ Option<NonZeroU8>,
+ Option<NonZeroU16>,
+ Option<NonZeroU32>,
+ Option<NonZeroU64>,
+ Option<NonZeroU128>,
+ Option<NonZeroUsize>,
+ Option<NonZeroI8>,
+ Option<NonZeroI16>,
+ Option<NonZeroI32>,
+ Option<NonZeroI64>,
+ Option<NonZeroI128>,
+ Option<NonZeroIsize>,
+);
diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs
new file mode 100644
index 000000000..f4885ed9f
--- /dev/null
+++ b/library/core/src/array/iter.rs
@@ -0,0 +1,420 @@
+//! Defines the `IntoIter` owned iterator for arrays.
+
+use crate::{
+ cmp, fmt,
+ iter::{self, ExactSizeIterator, FusedIterator, TrustedLen},
+ mem::{self, MaybeUninit},
+ ops::Range,
+ ptr,
+};
+
+/// A by-value [array] iterator.
+#[stable(feature = "array_value_iter", since = "1.51.0")]
+#[rustc_insignificant_dtor]
+pub struct IntoIter<T, const N: usize> {
+ /// This is the array we are iterating over.
+ ///
+ /// Elements with index `i` where `alive.start <= i < alive.end` have not
+ /// been yielded yet and are valid array entries. Elements with indices `i
+ /// < alive.start` or `i >= alive.end` have been yielded already and must
+ /// not be accessed anymore! Those dead elements might even be in a
+ /// completely uninitialized state!
+ ///
+ /// So the invariants are:
+ /// - `data[alive]` is alive (i.e. contains valid elements)
+ /// - `data[..alive.start]` and `data[alive.end..]` are dead (i.e. the
+ /// elements were already read and must not be touched anymore!)
+ data: [MaybeUninit<T>; N],
+
+ /// The elements in `data` that have not been yielded yet.
+ ///
+ /// Invariants:
+ /// - `alive.start <= alive.end`
+ /// - `alive.end <= N`
+ alive: Range<usize>,
+}
+
+// Note: the `#[rustc_skip_array_during_method_dispatch]` on `trait IntoIterator`
+// hides this implementation from explicit `.into_iter()` calls on editions < 2021,
+// so those calls will still resolve to the slice implementation, by reference.
+#[stable(feature = "array_into_iter_impl", since = "1.53.0")]
+impl<T, const N: usize> IntoIterator for [T; N] {
+ type Item = T;
+ type IntoIter = IntoIter<T, N>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the array (from start to end). The array cannot be used after calling
+ /// this unless `T` implements `Copy`, so the whole array is copied.
+ ///
+ /// Arrays have special behavior when calling `.into_iter()` prior to the
+ /// 2021 edition -- see the [array] Editions section for more information.
+ ///
+ /// [array]: prim@array
+ fn into_iter(self) -> Self::IntoIter {
+ // SAFETY: The transmute here is actually safe. The docs of `MaybeUninit`
+ // promise:
+ //
+ // > `MaybeUninit<T>` is guaranteed to have the same size and alignment
+ // > as `T`.
+ //
+ // The docs even show a transmute from an array of `MaybeUninit<T>` to
+ // an array of `T`.
+ //
+ // With that, this initialization satisfies the invariants.
+
+ // FIXME(LukasKalbertodt): actually use `mem::transmute` here, once it
+ // works with const generics:
+ // `mem::transmute::<[T; N], [MaybeUninit<T>; N]>(array)`
+ //
+ // Until then, we can use `mem::transmute_copy` to create a bitwise copy
+ // as a different type, then forget `array` so that it is not dropped.
+ unsafe {
+ let iter = IntoIter { data: mem::transmute_copy(&self), alive: 0..N };
+ mem::forget(self);
+ iter
+ }
+ }
+}
+
+impl<T, const N: usize> IntoIter<T, N> {
+ /// Creates a new iterator over the given `array`.
+ #[stable(feature = "array_value_iter", since = "1.51.0")]
+ #[deprecated(since = "1.59.0", note = "use `IntoIterator::into_iter` instead")]
+ pub fn new(array: [T; N]) -> Self {
+ IntoIterator::into_iter(array)
+ }
+
+ /// Creates an iterator over the elements in a partially-initialized buffer.
+ ///
+ /// If you have a fully-initialized array, then use [`IntoIterator`].
+ /// But this is useful for returning partial results from unsafe code.
+ ///
+ /// # Safety
+ ///
+ /// - The `buffer[initialized]` elements must all be initialized.
+ /// - The range must be canonical, with `initialized.start <= initialized.end`.
+ /// - The range must be in-bounds for the buffer, with `initialized.end <= N`.
+ /// (Like how indexing `[0][100..100]` fails despite the range being empty.)
+ ///
+ /// It's sound to have more elements initialized than mentioned, though that
+ /// will most likely result in them being leaked.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_into_iter_constructors)]
+ ///
+ /// #![feature(maybe_uninit_array_assume_init)]
+ /// #![feature(maybe_uninit_uninit_array)]
+ /// use std::array::IntoIter;
+ /// use std::mem::MaybeUninit;
+ ///
+ /// # // Hi! Thanks for reading the code. This is restricted to `Copy` because
+ /// # // otherwise it could leak. A fully-general version this would need a drop
+ /// # // guard to handle panics from the iterator, but this works for an example.
+ /// fn next_chunk<T: Copy, const N: usize>(
+ /// it: &mut impl Iterator<Item = T>,
+ /// ) -> Result<[T; N], IntoIter<T, N>> {
+ /// let mut buffer = MaybeUninit::uninit_array();
+ /// let mut i = 0;
+ /// while i < N {
+ /// match it.next() {
+ /// Some(x) => {
+ /// buffer[i].write(x);
+ /// i += 1;
+ /// }
+ /// None => {
+ /// // SAFETY: We've initialized the first `i` items
+ /// unsafe {
+ /// return Err(IntoIter::new_unchecked(buffer, 0..i));
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ /// // SAFETY: We've initialized all N items
+ /// unsafe { Ok(MaybeUninit::array_assume_init(buffer)) }
+ /// }
+ ///
+ /// let r: [_; 4] = next_chunk(&mut (10..16)).unwrap();
+ /// assert_eq!(r, [10, 11, 12, 13]);
+ /// let r: IntoIter<_, 40> = next_chunk(&mut (10..16)).unwrap_err();
+ /// assert_eq!(r.collect::<Vec<_>>(), vec![10, 11, 12, 13, 14, 15]);
+ /// ```
+ #[unstable(feature = "array_into_iter_constructors", issue = "91583")]
+ #[rustc_const_unstable(feature = "const_array_into_iter_constructors", issue = "91583")]
+ pub const unsafe fn new_unchecked(
+ buffer: [MaybeUninit<T>; N],
+ initialized: Range<usize>,
+ ) -> Self {
+ Self { data: buffer, alive: initialized }
+ }
+
+ /// Creates an iterator over `T` which returns no elements.
+ ///
+ /// If you just need an empty iterator, then use
+ /// [`iter::empty()`](crate::iter::empty) instead.
+ /// And if you need an empty array, use `[]`.
+ ///
+ /// But this is useful when you need an `array::IntoIter<T, N>` *specifically*.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_into_iter_constructors)]
+ /// use std::array::IntoIter;
+ ///
+ /// let empty = IntoIter::<i32, 3>::empty();
+ /// assert_eq!(empty.len(), 0);
+ /// assert_eq!(empty.as_slice(), &[]);
+ ///
+ /// let empty = IntoIter::<std::convert::Infallible, 200>::empty();
+ /// assert_eq!(empty.len(), 0);
+ /// ```
+ ///
+ /// `[1, 2].into_iter()` and `[].into_iter()` have different types
+ /// ```should_fail,edition2021
+ /// #![feature(array_into_iter_constructors)]
+ /// use std::array::IntoIter;
+ ///
+ /// pub fn get_bytes(b: bool) -> IntoIter<i8, 4> {
+ /// if b {
+ /// [1, 2, 3, 4].into_iter()
+ /// } else {
+ /// [].into_iter() // error[E0308]: mismatched types
+ /// }
+ /// }
+ /// ```
+ ///
+ /// But using this method you can get an empty iterator of appropriate size:
+ /// ```edition2021
+ /// #![feature(array_into_iter_constructors)]
+ /// use std::array::IntoIter;
+ ///
+ /// pub fn get_bytes(b: bool) -> IntoIter<i8, 4> {
+ /// if b {
+ /// [1, 2, 3, 4].into_iter()
+ /// } else {
+ /// IntoIter::empty()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(get_bytes(true).collect::<Vec<_>>(), vec![1, 2, 3, 4]);
+ /// assert_eq!(get_bytes(false).collect::<Vec<_>>(), vec![]);
+ /// ```
+ #[unstable(feature = "array_into_iter_constructors", issue = "91583")]
+ #[rustc_const_unstable(feature = "const_array_into_iter_constructors", issue = "91583")]
+ pub const fn empty() -> Self {
+ let buffer = MaybeUninit::uninit_array();
+ let initialized = 0..0;
+
+ // SAFETY: We're telling it that none of the elements are initialized,
+ // which is trivially true. And ∀N: usize, 0 <= N.
+ unsafe { Self::new_unchecked(buffer, initialized) }
+ }
+
+ /// Returns an immutable slice of all elements that have not been yielded
+ /// yet.
+ #[stable(feature = "array_value_iter", since = "1.51.0")]
+ pub fn as_slice(&self) -> &[T] {
+ // SAFETY: We know that all elements within `alive` are properly initialized.
+ unsafe {
+ let slice = self.data.get_unchecked(self.alive.clone());
+ MaybeUninit::slice_assume_init_ref(slice)
+ }
+ }
+
+ /// Returns a mutable slice of all elements that have not been yielded yet.
+ #[stable(feature = "array_value_iter", since = "1.51.0")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ // SAFETY: We know that all elements within `alive` are properly initialized.
+ unsafe {
+ let slice = self.data.get_unchecked_mut(self.alive.clone());
+ MaybeUninit::slice_assume_init_mut(slice)
+ }
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> Iterator for IntoIter<T, N> {
+ type Item = T;
+ fn next(&mut self) -> Option<Self::Item> {
+ // Get the next index from the front.
+ //
+ // Increasing `alive.start` by 1 maintains the invariant regarding
+ // `alive`. However, due to this change, for a short time, the alive
+ // zone is not `data[alive]` anymore, but `data[idx..alive.end]`.
+ self.alive.next().map(|idx| {
+ // Read the element from the array.
+ // SAFETY: `idx` is an index into the former "alive" region of the
+ // array. Reading this element means that `data[idx]` is regarded as
+ // dead now (i.e. do not touch). As `idx` was the start of the
+ // alive-zone, the alive zone is now `data[alive]` again, restoring
+ // all invariants.
+ unsafe { self.data.get_unchecked(idx).assume_init_read() }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let data = &mut self.data;
+ iter::ByRefSized(&mut self.alive).fold(init, |acc, idx| {
+ // SAFETY: idx is obtained by folding over the `alive` range, which implies the
+ // value is currently considered alive but as the range is being consumed each value
+ // we read here will only be read once and then considered dead.
+ fold(acc, unsafe { data.get_unchecked(idx).assume_init_read() })
+ })
+ }
+
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let len = self.len();
+
+ // The number of elements to drop. Always in-bounds by construction.
+ let delta = cmp::min(n, len);
+
+ let range_to_drop = self.alive.start..(self.alive.start + delta);
+
+ // Moving the start marks them as conceptually "dropped", so if anything
+ // goes bad then our drop impl won't double-free them.
+ self.alive.start += delta;
+
+ // SAFETY: These elements are currently initialized, so it's fine to drop them.
+ unsafe {
+ let slice = self.data.get_unchecked_mut(range_to_drop);
+ ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
+ }
+
+ if n > len { Err(len) } else { Ok(()) }
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ // Get the next index from the back.
+ //
+ // Decreasing `alive.end` by 1 maintains the invariant regarding
+ // `alive`. However, due to this change, for a short time, the alive
+ // zone is not `data[alive]` anymore, but `data[alive.start..=idx]`.
+ self.alive.next_back().map(|idx| {
+ // Read the element from the array.
+ // SAFETY: `idx` is an index into the former "alive" region of the
+ // array. Reading this element means that `data[idx]` is regarded as
+ // dead now (i.e. do not touch). As `idx` was the end of the
+ // alive-zone, the alive zone is now `data[alive]` again, restoring
+ // all invariants.
+ unsafe { self.data.get_unchecked(idx).assume_init_read() }
+ })
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(mut self, init: Acc, mut rfold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let data = &mut self.data;
+ iter::ByRefSized(&mut self.alive).rfold(init, |acc, idx| {
+ // SAFETY: idx is obtained by folding over the `alive` range, which implies the
+ // value is currently considered alive but as the range is being consumed each value
+ // we read here will only be read once and then considered dead.
+ rfold(acc, unsafe { data.get_unchecked(idx).assume_init_read() })
+ })
+ }
+
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let len = self.len();
+
+ // The number of elements to drop. Always in-bounds by construction.
+ let delta = cmp::min(n, len);
+
+ let range_to_drop = (self.alive.end - delta)..self.alive.end;
+
+ // Moving the end marks them as conceptually "dropped", so if anything
+ // goes bad then our drop impl won't double-free them.
+ self.alive.end -= delta;
+
+ // SAFETY: These elements are currently initialized, so it's fine to drop them.
+ unsafe {
+ let slice = self.data.get_unchecked_mut(range_to_drop);
+ ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
+ }
+
+ if n > len { Err(len) } else { Ok(()) }
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> Drop for IntoIter<T, N> {
+ fn drop(&mut self) {
+ // SAFETY: This is safe: `as_mut_slice` returns exactly the sub-slice
+ // of elements that have not been moved out yet and that remain
+ // to be dropped.
+ unsafe { ptr::drop_in_place(self.as_mut_slice()) }
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> ExactSizeIterator for IntoIter<T, N> {
+ fn len(&self) -> usize {
+ // Will never underflow due to the invariant `alive.start <=
+ // alive.end`.
+ self.alive.end - self.alive.start
+ }
+ fn is_empty(&self) -> bool {
+ self.alive.is_empty()
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> FusedIterator for IntoIter<T, N> {}
+
+// The iterator indeed reports the correct length. The number of "alive"
+// elements (that will still be yielded) is the length of the range `alive`.
+// This range is decremented in length in either `next` or `next_back`. It is
+// always decremented by 1 in those methods, but only if `Some(_)` is returned.
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+unsafe impl<T, const N: usize> TrustedLen for IntoIter<T, N> {}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T: Clone, const N: usize> Clone for IntoIter<T, N> {
+ fn clone(&self) -> Self {
+ // Note, we don't really need to match the exact same alive range, so
+ // we can just clone into offset 0 regardless of where `self` is.
+ let mut new = Self { data: MaybeUninit::uninit_array(), alive: 0..0 };
+
+ // Clone all alive elements.
+ for (src, dst) in iter::zip(self.as_slice(), &mut new.data) {
+ // Write a clone into the new array, then update its alive range.
+ // If cloning panics, we'll correctly drop the previous items.
+ dst.write(src.clone());
+ new.alive.end += 1;
+ }
+
+ new
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T: fmt::Debug, const N: usize> fmt::Debug for IntoIter<T, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Only print the elements that were not yielded yet: we cannot
+ // access the yielded elements anymore.
+ f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
+ }
+}
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
new file mode 100644
index 000000000..c9823a136
--- /dev/null
+++ b/library/core/src/array/mod.rs
@@ -0,0 +1,872 @@
+//! Helper functions and types for fixed-length arrays.
+//!
+//! *[See also the array primitive type](array).*
+
+#![stable(feature = "core_array", since = "1.36.0")]
+
+use crate::borrow::{Borrow, BorrowMut};
+use crate::cmp::Ordering;
+use crate::convert::{Infallible, TryFrom};
+use crate::fmt;
+use crate::hash::{self, Hash};
+use crate::iter::TrustedLen;
+use crate::mem::{self, MaybeUninit};
+use crate::ops::{
+ ChangeOutputType, ControlFlow, FromResidual, Index, IndexMut, NeverShortCircuit, Residual, Try,
+};
+use crate::slice::{Iter, IterMut};
+
+mod equality;
+mod iter;
+
+#[stable(feature = "array_value_iter", since = "1.51.0")]
+pub use iter::IntoIter;
+
+/// Creates an array `[T; N]` where each array element `T` is returned by the `cb` call.
+///
+/// # Arguments
+///
+/// * `cb`: Callback where the passed argument is the current array index.
+///
+/// # Example
+///
+/// ```rust
+/// let array = core::array::from_fn(|i| i);
+/// assert_eq!(array, [0, 1, 2, 3, 4]);
+/// ```
+#[inline]
+#[stable(feature = "array_from_fn", since = "1.63.0")]
+pub fn from_fn<T, const N: usize, F>(mut cb: F) -> [T; N]
+where
+ F: FnMut(usize) -> T,
+{
+ let mut idx = 0;
+ [(); N].map(|_| {
+ let res = cb(idx);
+ idx += 1;
+ res
+ })
+}
+
+/// Creates an array `[T; N]` where each fallible array element `T` is returned by the `cb` call.
+/// Unlike [`from_fn`], where the element creation can't fail, this version will return an error
+/// if any element creation was unsuccessful.
+///
+/// The return type of this function depends on the return type of the closure.
+/// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N]; E>`.
+/// If you return `Option<T>` from the closure, you'll get an `Option<[T; N]>`.
+///
+/// # Arguments
+///
+/// * `cb`: Callback where the passed argument is the current array index.
+///
+/// # Example
+///
+/// ```rust
+/// #![feature(array_try_from_fn)]
+///
+/// let array: Result<[u8; 5], _> = std::array::try_from_fn(|i| i.try_into());
+/// assert_eq!(array, Ok([0, 1, 2, 3, 4]));
+///
+/// let array: Result<[i8; 200], _> = std::array::try_from_fn(|i| i.try_into());
+/// assert!(array.is_err());
+///
+/// let array: Option<[_; 4]> = std::array::try_from_fn(|i| i.checked_add(100));
+/// assert_eq!(array, Some([100, 101, 102, 103]));
+///
+/// let array: Option<[_; 4]> = std::array::try_from_fn(|i| i.checked_sub(100));
+/// assert_eq!(array, None);
+/// ```
+#[inline]
+#[unstable(feature = "array_try_from_fn", issue = "89379")]
+pub fn try_from_fn<R, const N: usize, F>(cb: F) -> ChangeOutputType<R, [R::Output; N]>
+where
+ F: FnMut(usize) -> R,
+ R: Try,
+ R::Residual: Residual<[R::Output; N]>,
+{
+ // SAFETY: we know for certain that this iterator will yield exactly `N`
+ // items.
+ unsafe { try_collect_into_array_unchecked(&mut (0..N).map(cb)) }
+}
+
+/// Converts a reference to `T` into a reference to an array of length 1 (without copying).
+#[stable(feature = "array_from_ref", since = "1.53.0")]
+#[rustc_const_stable(feature = "const_array_from_ref_shared", since = "1.63.0")]
+pub const fn from_ref<T>(s: &T) -> &[T; 1] {
+ // SAFETY: Converting `&T` to `&[T; 1]` is sound.
+ unsafe { &*(s as *const T).cast::<[T; 1]>() }
+}
+
+/// Converts a mutable reference to `T` into a mutable reference to an array of length 1 (without copying).
+#[stable(feature = "array_from_ref", since = "1.53.0")]
+#[rustc_const_unstable(feature = "const_array_from_ref", issue = "90206")]
+pub const fn from_mut<T>(s: &mut T) -> &mut [T; 1] {
+ // SAFETY: Converting `&mut T` to `&mut [T; 1]` is sound.
+ unsafe { &mut *(s as *mut T).cast::<[T; 1]>() }
+}
+
+/// The error type returned when a conversion from a slice to an array fails.
+#[stable(feature = "try_from", since = "1.34.0")]
+#[derive(Debug, Copy, Clone)]
+pub struct TryFromSliceError(());
+
+#[stable(feature = "core_array", since = "1.36.0")]
+impl fmt::Display for TryFromSliceError {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.__description(), f)
+ }
+}
+
+impl TryFromSliceError {
+ #[unstable(
+ feature = "array_error_internals",
+ reason = "available through Error trait and this method should not \
+ be exposed publicly",
+ issue = "none"
+ )]
+ #[inline]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ "could not convert slice to array"
+ }
+}
+
+#[stable(feature = "try_from_slice_error", since = "1.36.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl const From<Infallible> for TryFromSliceError {
+ fn from(x: Infallible) -> TryFromSliceError {
+ match x {}
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, const N: usize> AsRef<[T]> for [T; N] {
+ #[inline]
+ fn as_ref(&self) -> &[T] {
+ &self[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, const N: usize> AsMut<[T]> for [T; N] {
+ #[inline]
+ fn as_mut(&mut self) -> &mut [T] {
+ &mut self[..]
+ }
+}
+
+#[stable(feature = "array_borrow", since = "1.4.0")]
+#[rustc_const_unstable(feature = "const_borrow", issue = "91522")]
+impl<T, const N: usize> const Borrow<[T]> for [T; N] {
+ fn borrow(&self) -> &[T] {
+ self
+ }
+}
+
+#[stable(feature = "array_borrow", since = "1.4.0")]
+#[rustc_const_unstable(feature = "const_borrow", issue = "91522")]
+impl<T, const N: usize> const BorrowMut<[T]> for [T; N] {
+ fn borrow_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl<T, const N: usize> TryFrom<&[T]> for [T; N]
+where
+ T: Copy,
+{
+ type Error = TryFromSliceError;
+
+ fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
+ <&Self>::try_from(slice).map(|r| *r)
+ }
+}
+
+#[stable(feature = "try_from_mut_slice_to_array", since = "1.59.0")]
+impl<T, const N: usize> TryFrom<&mut [T]> for [T; N]
+where
+ T: Copy,
+{
+ type Error = TryFromSliceError;
+
+ fn try_from(slice: &mut [T]) -> Result<[T; N], TryFromSliceError> {
+ <Self>::try_from(&*slice)
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl<'a, T, const N: usize> TryFrom<&'a [T]> for &'a [T; N] {
+ type Error = TryFromSliceError;
+
+ fn try_from(slice: &[T]) -> Result<&[T; N], TryFromSliceError> {
+ if slice.len() == N {
+ let ptr = slice.as_ptr() as *const [T; N];
+ // SAFETY: ok because we just checked that the length fits
+ unsafe { Ok(&*ptr) }
+ } else {
+ Err(TryFromSliceError(()))
+ }
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl<'a, T, const N: usize> TryFrom<&'a mut [T]> for &'a mut [T; N] {
+ type Error = TryFromSliceError;
+
+ fn try_from(slice: &mut [T]) -> Result<&mut [T; N], TryFromSliceError> {
+ if slice.len() == N {
+ let ptr = slice.as_mut_ptr() as *mut [T; N];
+ // SAFETY: ok because we just checked that the length fits
+ unsafe { Ok(&mut *ptr) }
+ } else {
+ Err(TryFromSliceError(()))
+ }
+ }
+}
+
+/// The hash of an array is the same as that of the corresponding slice,
+/// as required by the `Borrow` implementation.
+///
+/// ```
+/// #![feature(build_hasher_simple_hash_one)]
+/// use std::hash::BuildHasher;
+///
+/// let b = std::collections::hash_map::RandomState::new();
+/// let a: [u8; 3] = [0xa8, 0x3c, 0x09];
+/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
+/// assert_eq!(b.hash_one(a), b.hash_one(s));
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, const N: usize> Hash for [T; N] {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ Hash::hash(&self[..], state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug, const N: usize> fmt::Debug for [T; N] {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&&self[..], f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, const N: usize> IntoIterator for &'a [T; N] {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, const N: usize> IntoIterator for &'a mut [T; N] {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "index_trait_on_arrays", since = "1.50.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+impl<T, I, const N: usize> const Index<I> for [T; N]
+where
+ [T]: ~const Index<I>,
+{
+ type Output = <[T] as Index<I>>::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &Self::Output {
+ Index::index(self as &[T], index)
+ }
+}
+
+#[stable(feature = "index_trait_on_arrays", since = "1.50.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+impl<T, I, const N: usize> const IndexMut<I> for [T; N]
+where
+ [T]: ~const IndexMut<I>,
+{
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut Self::Output {
+ IndexMut::index_mut(self as &mut [T], index)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, const N: usize> PartialOrd for [T; N] {
+ #[inline]
+ fn partial_cmp(&self, other: &[T; N]) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&&self[..], &&other[..])
+ }
+ #[inline]
+ fn lt(&self, other: &[T; N]) -> bool {
+ PartialOrd::lt(&&self[..], &&other[..])
+ }
+ #[inline]
+ fn le(&self, other: &[T; N]) -> bool {
+ PartialOrd::le(&&self[..], &&other[..])
+ }
+ #[inline]
+ fn ge(&self, other: &[T; N]) -> bool {
+ PartialOrd::ge(&&self[..], &&other[..])
+ }
+ #[inline]
+ fn gt(&self, other: &[T; N]) -> bool {
+ PartialOrd::gt(&&self[..], &&other[..])
+ }
+}
+
+/// Implements comparison of arrays [lexicographically](Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, const N: usize> Ord for [T; N] {
+ #[inline]
+ fn cmp(&self, other: &[T; N]) -> Ordering {
+ Ord::cmp(&&self[..], &&other[..])
+ }
+}
+
+#[stable(feature = "copy_clone_array_lib", since = "1.58.0")]
+impl<T: Copy, const N: usize> Copy for [T; N] {}
+
+#[stable(feature = "copy_clone_array_lib", since = "1.58.0")]
+impl<T: Clone, const N: usize> Clone for [T; N] {
+ #[inline]
+ fn clone(&self) -> Self {
+ SpecArrayClone::clone(self)
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.clone_from_slice(other);
+ }
+}
+
+trait SpecArrayClone: Clone {
+ fn clone<const N: usize>(array: &[Self; N]) -> [Self; N];
+}
+
+impl<T: Clone> SpecArrayClone for T {
+ #[inline]
+ default fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
+ // SAFETY: we know for certain that this iterator will yield exactly `N`
+ // items.
+ unsafe { collect_into_array_unchecked(&mut array.iter().cloned()) }
+ }
+}
+
+impl<T: Copy> SpecArrayClone for T {
+ #[inline]
+ fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
+ *array
+ }
+}
+
+// The Default impls cannot be done with const generics because `[T; 0]` doesn't
+// require Default to be implemented, and having different impl blocks for
+// different numbers isn't supported yet.
+
+macro_rules! array_impl_default {
+ {$n:expr, $t:ident $($ts:ident)*} => {
+ #[stable(since = "1.4.0", feature = "array_default")]
+ impl<T> Default for [T; $n] where T: Default {
+ fn default() -> [T; $n] {
+ [$t::default(), $($ts::default()),*]
+ }
+ }
+ array_impl_default!{($n - 1), $($ts)*}
+ };
+ {$n:expr,} => {
+ #[stable(since = "1.4.0", feature = "array_default")]
+ #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+ impl<T> const Default for [T; $n] {
+ fn default() -> [T; $n] { [] }
+ }
+ };
+}
+
+array_impl_default! {32, T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T}
+
+impl<T, const N: usize> [T; N] {
+ /// Returns an array of the same size as `self`, with function `f` applied to each element
+ /// in order.
+ ///
+ /// If you don't necessarily need a new fixed-size array, consider using
+ /// [`Iterator::map`] instead.
+ ///
+ ///
+ /// # Note on performance and stack usage
+ ///
+ /// Unfortunately, usages of this method are currently not always optimized
+ /// as well as they could be. This mainly concerns large arrays, as mapping
+ /// over small arrays seem to be optimized just fine. Also note that in
+ /// debug mode (i.e. without any optimizations), this method can use a lot
+ /// of stack space (a few times the size of the array or more).
+ ///
+ /// Therefore, in performance-critical code, try to avoid using this method
+ /// on large arrays or check the emitted code. Also try to avoid chained
+ /// maps (e.g. `arr.map(...).map(...)`).
+ ///
+ /// In many cases, you can instead use [`Iterator::map`] by calling `.iter()`
+ /// or `.into_iter()` on your array. `[T; N]::map` is only necessary if you
+ /// really need a new array of the same size as the result. Rust's lazy
+ /// iterators tend to get optimized very well.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = [1, 2, 3];
+ /// let y = x.map(|v| v + 1);
+ /// assert_eq!(y, [2, 3, 4]);
+ ///
+ /// let x = [1, 2, 3];
+ /// let mut temp = 0;
+ /// let y = x.map(|v| { temp += 1; v * temp });
+ /// assert_eq!(y, [1, 4, 9]);
+ ///
+ /// let x = ["Ferris", "Bueller's", "Day", "Off"];
+ /// let y = x.map(|v| v.len());
+ /// assert_eq!(y, [6, 9, 3, 3]);
+ /// ```
+ #[stable(feature = "array_map", since = "1.55.0")]
+ pub fn map<F, U>(self, f: F) -> [U; N]
+ where
+ F: FnMut(T) -> U,
+ {
+ // SAFETY: we know for certain that this iterator will yield exactly `N`
+ // items.
+ unsafe { collect_into_array_unchecked(&mut IntoIterator::into_iter(self).map(f)) }
+ }
+
+ /// A fallible function `f` applied to each element on array `self` in order to
+ /// return an array the same size as `self` or the first error encountered.
+ ///
+ /// The return type of this function depends on the return type of the closure.
+ /// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N]; E>`.
+ /// If you return `Option<T>` from the closure, you'll get an `Option<[T; N]>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_try_map)]
+ /// let a = ["1", "2", "3"];
+ /// let b = a.try_map(|v| v.parse::<u32>()).unwrap().map(|v| v + 1);
+ /// assert_eq!(b, [2, 3, 4]);
+ ///
+ /// let a = ["1", "2a", "3"];
+ /// let b = a.try_map(|v| v.parse::<u32>());
+ /// assert!(b.is_err());
+ ///
+ /// use std::num::NonZeroU32;
+ /// let z = [1, 2, 0, 3, 4];
+ /// assert_eq!(z.try_map(NonZeroU32::new), None);
+ /// let a = [1, 2, 3];
+ /// let b = a.try_map(NonZeroU32::new);
+ /// let c = b.map(|x| x.map(NonZeroU32::get));
+ /// assert_eq!(c, Some(a));
+ /// ```
+ #[unstable(feature = "array_try_map", issue = "79711")]
+ pub fn try_map<F, R>(self, f: F) -> ChangeOutputType<R, [R::Output; N]>
+ where
+ F: FnMut(T) -> R,
+ R: Try,
+ R::Residual: Residual<[R::Output; N]>,
+ {
+ // SAFETY: we know for certain that this iterator will yield exactly `N`
+ // items.
+ unsafe { try_collect_into_array_unchecked(&mut IntoIterator::into_iter(self).map(f)) }
+ }
+
+ /// 'Zips up' two arrays into a single array of pairs.
+ ///
+ /// `zip()` returns a new array where every element is a tuple where the
+ /// first element comes from the first array, and the second element comes
+ /// from the second array. In other words, it zips two arrays together,
+ /// into a single one.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_zip)]
+ /// let x = [1, 2, 3];
+ /// let y = [4, 5, 6];
+ /// let z = x.zip(y);
+ /// assert_eq!(z, [(1, 4), (2, 5), (3, 6)]);
+ /// ```
+ #[unstable(feature = "array_zip", issue = "80094")]
+ pub fn zip<U>(self, rhs: [U; N]) -> [(T, U); N] {
+ let mut iter = IntoIterator::into_iter(self).zip(rhs);
+
+ // SAFETY: we know for certain that this iterator will yield exactly `N`
+ // items.
+ unsafe { collect_into_array_unchecked(&mut iter) }
+ }
+
+ /// Returns a slice containing the entire array. Equivalent to `&s[..]`.
+ #[stable(feature = "array_as_slice", since = "1.57.0")]
+ #[rustc_const_stable(feature = "array_as_slice", since = "1.57.0")]
+ pub const fn as_slice(&self) -> &[T] {
+ self
+ }
+
+ /// Returns a mutable slice containing the entire array. Equivalent to
+ /// `&mut s[..]`.
+ #[stable(feature = "array_as_slice", since = "1.57.0")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ self
+ }
+
+ /// Borrows each element and returns an array of references with the same
+ /// size as `self`.
+ ///
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(array_methods)]
+ ///
+ /// let floats = [3.1, 2.7, -1.0];
+ /// let float_refs: [&f64; 3] = floats.each_ref();
+ /// assert_eq!(float_refs, [&3.1, &2.7, &-1.0]);
+ /// ```
+ ///
+ /// This method is particularly useful if combined with other methods, like
+ /// [`map`](#method.map). This way, you can avoid moving the original
+ /// array if its elements are not [`Copy`].
+ ///
+ /// ```
+ /// #![feature(array_methods)]
+ ///
+ /// let strings = ["Ferris".to_string(), "♥".to_string(), "Rust".to_string()];
+ /// let is_ascii = strings.each_ref().map(|s| s.is_ascii());
+ /// assert_eq!(is_ascii, [true, false, true]);
+ ///
+ /// // We can still access the original array: it has not been moved.
+ /// assert_eq!(strings.len(), 3);
+ /// ```
+ #[unstable(feature = "array_methods", issue = "76118")]
+ pub fn each_ref(&self) -> [&T; N] {
+ // SAFETY: we know for certain that this iterator will yield exactly `N`
+ // items.
+ unsafe { collect_into_array_unchecked(&mut self.iter()) }
+ }
+
+ /// Borrows each element mutably and returns an array of mutable references
+ /// with the same size as `self`.
+ ///
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(array_methods)]
+ ///
+ /// let mut floats = [3.1, 2.7, -1.0];
+ /// let float_refs: [&mut f64; 3] = floats.each_mut();
+ /// *float_refs[0] = 0.0;
+ /// assert_eq!(float_refs, [&mut 0.0, &mut 2.7, &mut -1.0]);
+ /// assert_eq!(floats, [0.0, 2.7, -1.0]);
+ /// ```
+ #[unstable(feature = "array_methods", issue = "76118")]
+ pub fn each_mut(&mut self) -> [&mut T; N] {
+ // SAFETY: we know for certain that this iterator will yield exactly `N`
+ // items.
+ unsafe { collect_into_array_unchecked(&mut self.iter_mut()) }
+ }
+
+ /// Divides one array reference into two at an index.
+ ///
+ /// The first will contain all indices from `[0, M)` (excluding
+ /// the index `M` itself) and the second will contain all
+ /// indices from `[M, N)` (excluding the index `N` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `M > N`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_array)]
+ ///
+ /// let v = [1, 2, 3, 4, 5, 6];
+ ///
+ /// {
+ /// let (left, right) = v.split_array_ref::<0>();
+ /// assert_eq!(left, &[]);
+ /// assert_eq!(right, &[1, 2, 3, 4, 5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.split_array_ref::<2>();
+ /// assert_eq!(left, &[1, 2]);
+ /// assert_eq!(right, &[3, 4, 5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.split_array_ref::<6>();
+ /// assert_eq!(left, &[1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(right, &[]);
+ /// }
+ /// ```
+ #[unstable(
+ feature = "split_array",
+ reason = "return type should have array as 2nd element",
+ issue = "90091"
+ )]
+ #[inline]
+ pub fn split_array_ref<const M: usize>(&self) -> (&[T; M], &[T]) {
+ (&self[..]).split_array_ref::<M>()
+ }
+
+ /// Divides one mutable array reference into two at an index.
+ ///
+ /// The first will contain all indices from `[0, M)` (excluding
+ /// the index `M` itself) and the second will contain all
+ /// indices from `[M, N)` (excluding the index `N` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `M > N`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_array)]
+ ///
+ /// let mut v = [1, 0, 3, 0, 5, 6];
+ /// let (left, right) = v.split_array_mut::<2>();
+ /// assert_eq!(left, &mut [1, 0][..]);
+ /// assert_eq!(right, &mut [3, 0, 5, 6]);
+ /// left[1] = 2;
+ /// right[1] = 4;
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[unstable(
+ feature = "split_array",
+ reason = "return type should have array as 2nd element",
+ issue = "90091"
+ )]
+ #[inline]
+ pub fn split_array_mut<const M: usize>(&mut self) -> (&mut [T; M], &mut [T]) {
+ (&mut self[..]).split_array_mut::<M>()
+ }
+
+ /// Divides one array reference into two at an index from the end.
+ ///
+ /// The first will contain all indices from `[0, N - M)` (excluding
+ /// the index `N - M` itself) and the second will contain all
+ /// indices from `[N - M, N)` (excluding the index `N` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `M > N`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_array)]
+ ///
+ /// let v = [1, 2, 3, 4, 5, 6];
+ ///
+ /// {
+ /// let (left, right) = v.rsplit_array_ref::<0>();
+ /// assert_eq!(left, &[1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(right, &[]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.rsplit_array_ref::<2>();
+ /// assert_eq!(left, &[1, 2, 3, 4]);
+ /// assert_eq!(right, &[5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.rsplit_array_ref::<6>();
+ /// assert_eq!(left, &[]);
+ /// assert_eq!(right, &[1, 2, 3, 4, 5, 6]);
+ /// }
+ /// ```
+ #[unstable(
+ feature = "split_array",
+ reason = "return type should have array as 2nd element",
+ issue = "90091"
+ )]
+ #[inline]
+ pub fn rsplit_array_ref<const M: usize>(&self) -> (&[T], &[T; M]) {
+ (&self[..]).rsplit_array_ref::<M>()
+ }
+
+ /// Divides one mutable array reference into two at an index from the end.
+ ///
+ /// The first will contain all indices from `[0, N - M)` (excluding
+ /// the index `N - M` itself) and the second will contain all
+ /// indices from `[N - M, N)` (excluding the index `N` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `M > N`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_array)]
+ ///
+ /// let mut v = [1, 0, 3, 0, 5, 6];
+ /// let (left, right) = v.rsplit_array_mut::<4>();
+ /// assert_eq!(left, &mut [1, 0]);
+ /// assert_eq!(right, &mut [3, 0, 5, 6][..]);
+ /// left[1] = 2;
+ /// right[1] = 4;
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[unstable(
+ feature = "split_array",
+ reason = "return type should have array as 2nd element",
+ issue = "90091"
+ )]
+ #[inline]
+ pub fn rsplit_array_mut<const M: usize>(&mut self) -> (&mut [T], &mut [T; M]) {
+ (&mut self[..]).rsplit_array_mut::<M>()
+ }
+}
+
+/// Pulls `N` items from `iter` and returns them as an array. If the iterator
+/// yields fewer than `N` items, this function exhibits undefined behavior.
+///
+/// See [`try_collect_into_array`] for more information.
+///
+///
+/// # Safety
+///
+/// It is up to the caller to guarantee that `iter` yields at least `N` items.
+/// Violating this condition causes undefined behavior.
+unsafe fn try_collect_into_array_unchecked<I, T, R, const N: usize>(iter: &mut I) -> R::TryType
+where
+ // Note: `TrustedLen` here is somewhat of an experiment. This is just an
+ // internal function, so feel free to remove if this bound turns out to be a
+ // bad idea. In that case, remember to also remove the lower bound
+ // `debug_assert!` below!
+ I: Iterator + TrustedLen,
+ I::Item: Try<Output = T, Residual = R>,
+ R: Residual<[T; N]>,
+{
+ debug_assert!(N <= iter.size_hint().1.unwrap_or(usize::MAX));
+ debug_assert!(N <= iter.size_hint().0);
+
+ // SAFETY: covered by the function contract.
+ unsafe { try_collect_into_array(iter).unwrap_unchecked() }
+}
+
+// Infallible version of `try_collect_into_array_unchecked`.
+unsafe fn collect_into_array_unchecked<I, const N: usize>(iter: &mut I) -> [I::Item; N]
+where
+ I: Iterator + TrustedLen,
+{
+ let mut map = iter.map(NeverShortCircuit);
+
+ // SAFETY: The same safety considerations w.r.t. the iterator length
+ // apply for `try_collect_into_array_unchecked` as for
+ // `collect_into_array_unchecked`
+ match unsafe { try_collect_into_array_unchecked(&mut map) } {
+ NeverShortCircuit(array) => array,
+ }
+}
+
+/// Pulls `N` items from `iter` and returns them as an array. If the iterator
+/// yields fewer than `N` items, `Err` is returned containing an iterator over
+/// the already yielded items.
+///
+/// Since the iterator is passed as a mutable reference and this function calls
+/// `next` at most `N` times, the iterator can still be used afterwards to
+/// retrieve the remaining items.
+///
+/// If `iter.next()` panicks, all items already yielded by the iterator are
+/// dropped.
+#[inline]
+fn try_collect_into_array<I, T, R, const N: usize>(
+ iter: &mut I,
+) -> Result<R::TryType, IntoIter<T, N>>
+where
+ I: Iterator,
+ I::Item: Try<Output = T, Residual = R>,
+ R: Residual<[T; N]>,
+{
+ if N == 0 {
+ // SAFETY: An empty array is always inhabited and has no validity invariants.
+ return Ok(Try::from_output(unsafe { mem::zeroed() }));
+ }
+
+ struct Guard<'a, T, const N: usize> {
+ array_mut: &'a mut [MaybeUninit<T>; N],
+ initialized: usize,
+ }
+
+ impl<T, const N: usize> Drop for Guard<'_, T, N> {
+ fn drop(&mut self) {
+ debug_assert!(self.initialized <= N);
+
+ // SAFETY: this slice will contain only initialized objects.
+ unsafe {
+ crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(
+ &mut self.array_mut.get_unchecked_mut(..self.initialized),
+ ));
+ }
+ }
+ }
+
+ let mut array = MaybeUninit::uninit_array::<N>();
+ let mut guard = Guard { array_mut: &mut array, initialized: 0 };
+
+ for _ in 0..N {
+ match iter.next() {
+ Some(item_rslt) => {
+ let item = match item_rslt.branch() {
+ ControlFlow::Break(r) => {
+ return Ok(FromResidual::from_residual(r));
+ }
+ ControlFlow::Continue(elem) => elem,
+ };
+
+ // SAFETY: `guard.initialized` starts at 0, is increased by one in the
+ // loop and the loop is aborted once it reaches N (which is
+ // `array.len()`).
+ unsafe {
+ guard.array_mut.get_unchecked_mut(guard.initialized).write(item);
+ }
+ guard.initialized += 1;
+ }
+ None => {
+ let alive = 0..guard.initialized;
+ mem::forget(guard);
+ // SAFETY: `array` was initialized with exactly `initialized`
+ // number of elements.
+ return Err(unsafe { IntoIter::new_unchecked(array, alive) });
+ }
+ }
+ }
+
+ mem::forget(guard);
+ // SAFETY: All elements of the array were populated in the loop above.
+ let output = unsafe { MaybeUninit::array_assume_init(array) };
+ Ok(Try::from_output(output))
+}
+
+/// Returns the next chunk of `N` items from the iterator or errors with an
+/// iterator over the remainder. Used for `Iterator::next_chunk`.
+#[inline]
+pub(crate) fn iter_next_chunk<I, const N: usize>(
+ iter: &mut I,
+) -> Result<[I::Item; N], IntoIter<I::Item, N>>
+where
+ I: Iterator,
+{
+ let mut map = iter.map(NeverShortCircuit);
+ try_collect_into_array(&mut map).map(|NeverShortCircuit(arr)| arr)
+}
diff --git a/library/core/src/ascii.rs b/library/core/src/ascii.rs
new file mode 100644
index 000000000..8a4cb78cc
--- /dev/null
+++ b/library/core/src/ascii.rs
@@ -0,0 +1,151 @@
+//! Operations on ASCII strings and characters.
+//!
+//! Most string operations in Rust act on UTF-8 strings. However, at times it
+//! makes more sense to only consider the ASCII character set for a specific
+//! operation.
+//!
+//! The [`escape_default`] function provides an iterator over the bytes of an
+//! escaped version of the character given.
+
+#![stable(feature = "core_ascii", since = "1.26.0")]
+
+use crate::fmt;
+use crate::iter::FusedIterator;
+use crate::ops::Range;
+use crate::str::from_utf8_unchecked;
+
+/// An iterator over the escaped version of a byte.
+///
+/// This `struct` is created by the [`escape_default`] function. See its
+/// documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct EscapeDefault {
+ range: Range<u8>,
+ data: [u8; 4],
+}
+
+/// Returns an iterator that produces an escaped version of a `u8`.
+///
+/// The default is chosen with a bias toward producing literals that are
+/// legal in a variety of languages, including C++11 and similar C-family
+/// languages. The exact rules are:
+///
+/// * Tab is escaped as `\t`.
+/// * Carriage return is escaped as `\r`.
+/// * Line feed is escaped as `\n`.
+/// * Single quote is escaped as `\'`.
+/// * Double quote is escaped as `\"`.
+/// * Backslash is escaped as `\\`.
+/// * Any character in the 'printable ASCII' range `0x20` .. `0x7e`
+/// inclusive is not escaped.
+/// * Any other chars are given hex escapes of the form '\xNN'.
+/// * Unicode escapes are never generated by this function.
+///
+/// # Examples
+///
+/// ```
+/// use std::ascii;
+///
+/// let escaped = ascii::escape_default(b'0').next().unwrap();
+/// assert_eq!(b'0', escaped);
+///
+/// let mut escaped = ascii::escape_default(b'\t');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b't', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\r');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'r', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\n');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'n', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\'');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'\'', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'"');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'"', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\\');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'\\', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\x9d');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'x', escaped.next().unwrap());
+/// assert_eq!(b'9', escaped.next().unwrap());
+/// assert_eq!(b'd', escaped.next().unwrap());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn escape_default(c: u8) -> EscapeDefault {
+ let (data, len) = match c {
+ b'\t' => ([b'\\', b't', 0, 0], 2),
+ b'\r' => ([b'\\', b'r', 0, 0], 2),
+ b'\n' => ([b'\\', b'n', 0, 0], 2),
+ b'\\' => ([b'\\', b'\\', 0, 0], 2),
+ b'\'' => ([b'\\', b'\'', 0, 0], 2),
+ b'"' => ([b'\\', b'"', 0, 0], 2),
+ b'\x20'..=b'\x7e' => ([c, 0, 0, 0], 1),
+ _ => {
+ let hex_digits: &[u8; 16] = b"0123456789abcdef";
+ ([b'\\', b'x', hex_digits[(c >> 4) as usize], hex_digits[(c & 0xf) as usize]], 4)
+ }
+ };
+
+ return EscapeDefault { range: 0..len, data };
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for EscapeDefault {
+ type Item = u8;
+
+ #[inline]
+ fn next(&mut self) -> Option<u8> {
+ self.range.next().map(|i| self.data[i as usize])
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.range.size_hint()
+ }
+ fn last(mut self) -> Option<u8> {
+ self.next_back()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl DoubleEndedIterator for EscapeDefault {
+ fn next_back(&mut self) -> Option<u8> {
+ self.range.next_back().map(|i| self.data[i as usize])
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ExactSizeIterator for EscapeDefault {}
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EscapeDefault {}
+
+#[stable(feature = "ascii_escape_display", since = "1.39.0")]
+impl fmt::Display for EscapeDefault {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: ok because `escape_default` created only valid utf-8 data
+ f.write_str(unsafe {
+ from_utf8_unchecked(&self.data[(self.range.start as usize)..(self.range.end as usize)])
+ })
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for EscapeDefault {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("EscapeDefault").finish_non_exhaustive()
+ }
+}
diff --git a/library/core/src/asserting.rs b/library/core/src/asserting.rs
new file mode 100644
index 000000000..212b637d3
--- /dev/null
+++ b/library/core/src/asserting.rs
@@ -0,0 +1,109 @@
+// Contains the machinery necessary to print useful `assert!` messages. Not intended for public
+// usage, not even nightly use-cases.
+//
+// Based on https://github.com/dtolnay/case-studies/tree/master/autoref-specialization. When
+// 'specialization' is robust enough (5 years? 10 years? Never?), `Capture` can be specialized
+// to [Printable].
+
+#![allow(missing_debug_implementations)]
+#![doc(hidden)]
+#![unstable(feature = "generic_assert_internals", issue = "44838")]
+
+use crate::{
+ fmt::{Debug, Formatter},
+ marker::PhantomData,
+};
+
+// ***** TryCapture - Generic *****
+
+/// Marker used by [Capture]
+#[unstable(feature = "generic_assert_internals", issue = "44838")]
+pub struct TryCaptureWithoutDebug;
+
+/// Catches an arbitrary `E` and modifies `to` accordingly
+#[unstable(feature = "generic_assert_internals", issue = "44838")]
+pub trait TryCaptureGeneric<E, M> {
+ /// Similar to [TryCapturePrintable] but generic to any `E`.
+ fn try_capture(&self, to: &mut Capture<E, M>);
+}
+
+impl<E> TryCaptureGeneric<E, TryCaptureWithoutDebug> for &Wrapper<&E> {
+ #[inline]
+ fn try_capture(&self, _: &mut Capture<E, TryCaptureWithoutDebug>) {}
+}
+
+impl<E> Debug for Capture<E, TryCaptureWithoutDebug> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {
+ f.write_str("N/A")
+ }
+}
+
+// ***** TryCapture - Printable *****
+
+/// Marker used by [Capture]
+#[unstable(feature = "generic_assert_internals", issue = "44838")]
+pub struct TryCaptureWithDebug;
+
+/// Catches an arbitrary `E: Printable` and modifies `to` accordingly
+#[unstable(feature = "generic_assert_internals", issue = "44838")]
+pub trait TryCapturePrintable<E, M> {
+ /// Similar as [TryCaptureGeneric] but specialized to any `E: Printable`.
+ fn try_capture(&self, to: &mut Capture<E, M>);
+}
+
+impl<E> TryCapturePrintable<E, TryCaptureWithDebug> for Wrapper<&E>
+where
+ E: Printable,
+{
+ #[inline]
+ fn try_capture(&self, to: &mut Capture<E, TryCaptureWithDebug>) {
+ to.elem = Some(*self.0);
+ }
+}
+
+impl<E> Debug for Capture<E, TryCaptureWithDebug>
+where
+ E: Printable,
+{
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {
+ match self.elem {
+ None => f.write_str("N/A"),
+ Some(ref value) => Debug::fmt(value, f),
+ }
+ }
+}
+
+// ***** Others *****
+
+/// All possible captured `assert!` elements
+///
+/// # Types
+///
+/// * `E`: **E**lement that is going to be displayed.
+/// * `M`: **M**arker used to differentiate [Capture]s in regards to [Debug].
+#[unstable(feature = "generic_assert_internals", issue = "44838")]
+pub struct Capture<E, M> {
+ // If None, then `E` does not implements [Printable] or `E` wasn't evaluated (`assert!( ... )`
+ // short-circuited).
+ //
+ // If Some, then `E` implements [Printable] and was evaluated.
+ pub elem: Option<E>,
+ phantom: PhantomData<M>,
+}
+
+impl<M, T> Capture<M, T> {
+ #[inline]
+ pub const fn new() -> Self {
+ Self { elem: None, phantom: PhantomData }
+ }
+}
+
+/// Necessary for the implementations of `TryCapture*`
+#[unstable(feature = "generic_assert_internals", issue = "44838")]
+pub struct Wrapper<T>(pub T);
+
+/// Tells which elements can be copied and displayed
+#[unstable(feature = "generic_assert_internals", issue = "44838")]
+pub trait Printable: Copy + Debug {}
+
+impl<T> Printable for T where T: Copy + Debug {}
diff --git a/library/core/src/async_iter/async_iter.rs b/library/core/src/async_iter/async_iter.rs
new file mode 100644
index 000000000..016a3685e
--- /dev/null
+++ b/library/core/src/async_iter/async_iter.rs
@@ -0,0 +1,111 @@
+use crate::ops::DerefMut;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// An interface for dealing with asynchronous iterators.
+///
+/// This is the main async iterator trait. For more about the concept of async iterators
+/// generally, please see the [module-level documentation]. In particular, you
+/// may want to know how to [implement `AsyncIterator`][impl].
+///
+/// [module-level documentation]: index.html
+/// [impl]: index.html#implementing-async-iterator
+#[unstable(feature = "async_iterator", issue = "79024")]
+#[must_use = "async iterators do nothing unless polled"]
+#[doc(alias = "Stream")]
+pub trait AsyncIterator {
+ /// The type of items yielded by the async iterator.
+ type Item;
+
+ /// Attempt to pull out the next value of this async iterator, registering the
+ /// current task for wakeup if the value is not yet available, and returning
+ /// `None` if the async iterator is exhausted.
+ ///
+ /// # Return value
+ ///
+ /// There are several possible return values, each indicating a distinct
+ /// async iterator state:
+ ///
+ /// - `Poll::Pending` means that this async iterator's next value is not ready
+ /// yet. Implementations will ensure that the current task will be notified
+ /// when the next value may be ready.
+ ///
+ /// - `Poll::Ready(Some(val))` means that the async iterator has successfully
+ /// produced a value, `val`, and may produce further values on subsequent
+ /// `poll_next` calls.
+ ///
+ /// - `Poll::Ready(None)` means that the async iterator has terminated, and
+ /// `poll_next` should not be invoked again.
+ ///
+ /// # Panics
+ ///
+ /// Once an async iterator has finished (returned `Ready(None)` from `poll_next`), calling its
+ /// `poll_next` method again may panic, block forever, or cause other kinds of
+ /// problems; the `AsyncIterator` trait places no requirements on the effects of
+ /// such a call. However, as the `poll_next` method is not marked `unsafe`,
+ /// Rust's usual rules apply: calls must never cause undefined behavior
+ /// (memory corruption, incorrect use of `unsafe` functions, or the like),
+ /// regardless of the async iterator's state.
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>>;
+
+ /// Returns the bounds on the remaining length of the async iterator.
+ ///
+ /// Specifically, `size_hint()` returns a tuple where the first element
+ /// is the lower bound, and the second element is the upper bound.
+ ///
+ /// The second half of the tuple that is returned is an <code>[Option]<[usize]></code>.
+ /// A [`None`] here means that either there is no known upper bound, or the
+ /// upper bound is larger than [`usize`].
+ ///
+ /// # Implementation notes
+ ///
+ /// It is not enforced that an async iterator implementation yields the declared
+ /// number of elements. A buggy async iterator may yield less than the lower bound
+ /// or more than the upper bound of elements.
+ ///
+ /// `size_hint()` is primarily intended to be used for optimizations such as
+ /// reserving space for the elements of the async iterator, but must not be
+ /// trusted to e.g., omit bounds checks in unsafe code. An incorrect
+ /// implementation of `size_hint()` should not lead to memory safety
+ /// violations.
+ ///
+ /// That said, the implementation should provide a correct estimation,
+ /// because otherwise it would be a violation of the trait's protocol.
+ ///
+ /// The default implementation returns <code>(0, [None])</code> which is correct for any
+ /// async iterator.
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, None)
+ }
+}
+
+#[unstable(feature = "async_iterator", issue = "79024")]
+impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for &mut S {
+ type Item = S::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ S::poll_next(Pin::new(&mut **self), cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+}
+
+#[unstable(feature = "async_iterator", issue = "79024")]
+impl<P> AsyncIterator for Pin<P>
+where
+ P: DerefMut,
+ P::Target: AsyncIterator,
+{
+ type Item = <P::Target as AsyncIterator>::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ <P::Target as AsyncIterator>::poll_next(self.as_deref_mut(), cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+}
diff --git a/library/core/src/async_iter/from_iter.rs b/library/core/src/async_iter/from_iter.rs
new file mode 100644
index 000000000..3180187af
--- /dev/null
+++ b/library/core/src/async_iter/from_iter.rs
@@ -0,0 +1,38 @@
+use crate::pin::Pin;
+
+use crate::async_iter::AsyncIterator;
+use crate::task::{Context, Poll};
+
+/// An async iterator that was created from iterator.
+///
+/// This async iterator is created by the [`from_iter`] function.
+/// See it documentation for more.
+///
+/// [`from_iter`]: fn.from_iter.html
+#[unstable(feature = "async_iter_from_iter", issue = "81798")]
+#[derive(Clone, Debug)]
+pub struct FromIter<I> {
+ iter: I,
+}
+
+#[unstable(feature = "async_iter_from_iter", issue = "81798")]
+impl<I> Unpin for FromIter<I> {}
+
+/// Converts an iterator into an async iterator.
+#[unstable(feature = "async_iter_from_iter", issue = "81798")]
+pub fn from_iter<I: IntoIterator>(iter: I) -> FromIter<I::IntoIter> {
+ FromIter { iter: iter.into_iter() }
+}
+
+#[unstable(feature = "async_iter_from_iter", issue = "81798")]
+impl<I: Iterator> AsyncIterator for FromIter<I> {
+ type Item = I::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(self.iter.next())
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
diff --git a/library/core/src/async_iter/mod.rs b/library/core/src/async_iter/mod.rs
new file mode 100644
index 000000000..0c6f63771
--- /dev/null
+++ b/library/core/src/async_iter/mod.rs
@@ -0,0 +1,128 @@
+//! Composable asynchronous iteration.
+//!
+//! If you've found yourself with an asynchronous collection of some kind,
+//! and needed to perform an operation on the elements of said collection,
+//! you'll quickly run into 'async iterators'. Async Iterators are heavily used in
+//! idiomatic asynchronous Rust code, so it's worth becoming familiar with them.
+//!
+//! Before explaining more, let's talk about how this module is structured:
+//!
+//! # Organization
+//!
+//! This module is largely organized by type:
+//!
+//! * [Traits] are the core portion: these traits define what kind of async iterators
+//! exist and what you can do with them. The methods of these traits are worth
+//! putting some extra study time into.
+//! * Functions provide some helpful ways to create some basic async iterators.
+//! * Structs are often the return types of the various methods on this
+//! module's traits. You'll usually want to look at the method that creates
+//! the `struct`, rather than the `struct` itself. For more detail about why,
+//! see '[Implementing Async Iterator](#implementing-async-iterator)'.
+//!
+//! [Traits]: #traits
+//!
+//! That's it! Let's dig into async iterators.
+//!
+//! # Async Iterators
+//!
+//! The heart and soul of this module is the [`AsyncIterator`] trait. The core of
+//! [`AsyncIterator`] looks like this:
+//!
+//! ```
+//! # use core::task::{Context, Poll};
+//! # use core::pin::Pin;
+//! trait AsyncIterator {
+//! type Item;
+//! fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>>;
+//! }
+//! ```
+//!
+//! Unlike `Iterator`, `AsyncIterator` makes a distinction between the [`poll_next`]
+//! method which is used when implementing an `AsyncIterator`, and a (to-be-implemented)
+//! `next` method which is used when consuming an async iterator. Consumers of `AsyncIterator`
+//! only need to consider `next`, which when called, returns a future which
+//! yields `Option<AsyncIterator::Item>`.
+//!
+//! The future returned by `next` will yield `Some(Item)` as long as there are
+//! elements, and once they've all been exhausted, will yield `None` to indicate
+//! that iteration is finished. If we're waiting on something asynchronous to
+//! resolve, the future will wait until the async iterator is ready to yield again.
+//!
+//! Individual async iterators may choose to resume iteration, and so calling `next`
+//! again may or may not eventually yield `Some(Item)` again at some point.
+//!
+//! [`AsyncIterator`]'s full definition includes a number of other methods as well,
+//! but they are default methods, built on top of [`poll_next`], and so you get
+//! them for free.
+//!
+//! [`Poll`]: super::task::Poll
+//! [`poll_next`]: AsyncIterator::poll_next
+//!
+//! # Implementing Async Iterator
+//!
+//! Creating an async iterator of your own involves two steps: creating a `struct` to
+//! hold the async iterator's state, and then implementing [`AsyncIterator`] for that
+//! `struct`.
+//!
+//! Let's make an async iterator named `Counter` which counts from `1` to `5`:
+//!
+//! ```no_run
+//! #![feature(async_iterator)]
+//! # use core::async_iter::AsyncIterator;
+//! # use core::task::{Context, Poll};
+//! # use core::pin::Pin;
+//!
+//! // First, the struct:
+//!
+//! /// An async iterator which counts from one to five
+//! struct Counter {
+//! count: usize,
+//! }
+//!
+//! // we want our count to start at one, so let's add a new() method to help.
+//! // This isn't strictly necessary, but is convenient. Note that we start
+//! // `count` at zero, we'll see why in `poll_next()`'s implementation below.
+//! impl Counter {
+//! fn new() -> Counter {
+//! Counter { count: 0 }
+//! }
+//! }
+//!
+//! // Then, we implement `AsyncIterator` for our `Counter`:
+//!
+//! impl AsyncIterator for Counter {
+//! // we will be counting with usize
+//! type Item = usize;
+//!
+//! // poll_next() is the only required method
+//! fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+//! // Increment our count. This is why we started at zero.
+//! self.count += 1;
+//!
+//! // Check to see if we've finished counting or not.
+//! if self.count < 6 {
+//! Poll::Ready(Some(self.count))
+//! } else {
+//! Poll::Ready(None)
+//! }
+//! }
+//! }
+//! ```
+//!
+//! # Laziness
+//!
+//! Async iterators are *lazy*. This means that just creating an async iterator doesn't
+//! _do_ a whole lot. Nothing really happens until you call `poll_next`. This is
+//! sometimes a source of confusion when creating an async iterator solely for its side
+//! effects. The compiler will warn us about this kind of behavior:
+//!
+//! ```text
+//! warning: unused result that must be used: async iterators do nothing unless polled
+//! ```
+
+mod async_iter;
+mod from_iter;
+
+pub use async_iter::AsyncIterator;
+pub use from_iter::{from_iter, FromIter};
diff --git a/library/core/src/bool.rs b/library/core/src/bool.rs
new file mode 100644
index 000000000..f7a8aa0d9
--- /dev/null
+++ b/library/core/src/bool.rs
@@ -0,0 +1,44 @@
+//! impl bool {}
+
+use crate::marker::Destruct;
+
+impl bool {
+ /// Returns `Some(t)` if the `bool` is [`true`](../std/keyword.true.html),
+ /// or `None` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(false.then_some(0), None);
+ /// assert_eq!(true.then_some(0), Some(0));
+ /// ```
+ #[stable(feature = "bool_to_option", since = "1.62.0")]
+ #[rustc_const_unstable(feature = "const_bool_to_option", issue = "91917")]
+ #[inline]
+ pub const fn then_some<T>(self, t: T) -> Option<T>
+ where
+ T: ~const Destruct,
+ {
+ if self { Some(t) } else { None }
+ }
+
+ /// Returns `Some(f())` if the `bool` is [`true`](../std/keyword.true.html),
+ /// or `None` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(false.then(|| 0), None);
+ /// assert_eq!(true.then(|| 0), Some(0));
+ /// ```
+ #[stable(feature = "lazy_bool_to_option", since = "1.50.0")]
+ #[rustc_const_unstable(feature = "const_bool_to_option", issue = "91917")]
+ #[inline]
+ pub const fn then<T, F>(self, f: F) -> Option<T>
+ where
+ F: ~const FnOnce() -> T,
+ F: ~const Destruct,
+ {
+ if self { Some(f()) } else { None }
+ }
+}
diff --git a/library/core/src/borrow.rs b/library/core/src/borrow.rs
new file mode 100644
index 000000000..58eabecf3
--- /dev/null
+++ b/library/core/src/borrow.rs
@@ -0,0 +1,246 @@
+//! A module for working with borrowed data.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+/// A trait for borrowing data.
+///
+/// In Rust, it is common to provide different representations of a type for
+/// different use cases. For instance, storage location and management for a
+/// value can be specifically chosen as appropriate for a particular use via
+/// pointer types such as [`Box<T>`] or [`Rc<T>`]. Beyond these generic
+/// wrappers that can be used with any type, some types provide optional
+/// facets providing potentially costly functionality. An example for such a
+/// type is [`String`] which adds the ability to extend a string to the basic
+/// [`str`]. This requires keeping additional information unnecessary for a
+/// simple, immutable string.
+///
+/// These types provide access to the underlying data through references
+/// to the type of that data. They are said to be ‘borrowed as’ that type.
+/// For instance, a [`Box<T>`] can be borrowed as `T` while a [`String`]
+/// can be borrowed as `str`.
+///
+/// Types express that they can be borrowed as some type `T` by implementing
+/// `Borrow<T>`, providing a reference to a `T` in the trait’s
+/// [`borrow`] method. A type is free to borrow as several different types.
+/// If it wishes to mutably borrow as the type – allowing the underlying data
+/// to be modified, it can additionally implement [`BorrowMut<T>`].
+///
+/// Further, when providing implementations for additional traits, it needs
+/// to be considered whether they should behave identical to those of the
+/// underlying type as a consequence of acting as a representation of that
+/// underlying type. Generic code typically uses `Borrow<T>` when it relies
+/// on the identical behavior of these additional trait implementations.
+/// These traits will likely appear as additional trait bounds.
+///
+/// In particular `Eq`, `Ord` and `Hash` must be equivalent for
+/// borrowed and owned values: `x.borrow() == y.borrow()` should give the
+/// same result as `x == y`.
+///
+/// If generic code merely needs to work for all types that can
+/// provide a reference to related type `T`, it is often better to use
+/// [`AsRef<T>`] as more types can safely implement it.
+///
+/// [`Box<T>`]: ../../std/boxed/struct.Box.html
+/// [`Mutex<T>`]: ../../std/sync/struct.Mutex.html
+/// [`Rc<T>`]: ../../std/rc/struct.Rc.html
+/// [`String`]: ../../std/string/struct.String.html
+/// [`borrow`]: Borrow::borrow
+///
+/// # Examples
+///
+/// As a data collection, [`HashMap<K, V>`] owns both keys and values. If
+/// the key’s actual data is wrapped in a managing type of some kind, it
+/// should, however, still be possible to search for a value using a
+/// reference to the key’s data. For instance, if the key is a string, then
+/// it is likely stored with the hash map as a [`String`], while it should
+/// be possible to search using a [`&str`][`str`]. Thus, `insert` needs to
+/// operate on a `String` while `get` needs to be able to use a `&str`.
+///
+/// Slightly simplified, the relevant parts of `HashMap<K, V>` look like
+/// this:
+///
+/// ```
+/// use std::borrow::Borrow;
+/// use std::hash::Hash;
+///
+/// pub struct HashMap<K, V> {
+/// # marker: ::std::marker::PhantomData<(K, V)>,
+/// // fields omitted
+/// }
+///
+/// impl<K, V> HashMap<K, V> {
+/// pub fn insert(&self, key: K, value: V) -> Option<V>
+/// where K: Hash + Eq
+/// {
+/// # unimplemented!()
+/// // ...
+/// }
+///
+/// pub fn get<Q>(&self, k: &Q) -> Option<&V>
+/// where
+/// K: Borrow<Q>,
+/// Q: Hash + Eq + ?Sized
+/// {
+/// # unimplemented!()
+/// // ...
+/// }
+/// }
+/// ```
+///
+/// The entire hash map is generic over a key type `K`. Because these keys
+/// are stored with the hash map, this type has to own the key’s data.
+/// When inserting a key-value pair, the map is given such a `K` and needs
+/// to find the correct hash bucket and check if the key is already present
+/// based on that `K`. It therefore requires `K: Hash + Eq`.
+///
+/// When searching for a value in the map, however, having to provide a
+/// reference to a `K` as the key to search for would require to always
+/// create such an owned value. For string keys, this would mean a `String`
+/// value needs to be created just for the search for cases where only a
+/// `str` is available.
+///
+/// Instead, the `get` method is generic over the type of the underlying key
+/// data, called `Q` in the method signature above. It states that `K`
+/// borrows as a `Q` by requiring that `K: Borrow<Q>`. By additionally
+/// requiring `Q: Hash + Eq`, it signals the requirement that `K` and `Q`
+/// have implementations of the `Hash` and `Eq` traits that produce identical
+/// results.
+///
+/// The implementation of `get` relies in particular on identical
+/// implementations of `Hash` by determining the key’s hash bucket by calling
+/// `Hash::hash` on the `Q` value even though it inserted the key based on
+/// the hash value calculated from the `K` value.
+///
+/// As a consequence, the hash map breaks if a `K` wrapping a `Q` value
+/// produces a different hash than `Q`. For instance, imagine you have a
+/// type that wraps a string but compares ASCII letters ignoring their case:
+///
+/// ```
+/// pub struct CaseInsensitiveString(String);
+///
+/// impl PartialEq for CaseInsensitiveString {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.0.eq_ignore_ascii_case(&other.0)
+/// }
+/// }
+///
+/// impl Eq for CaseInsensitiveString { }
+/// ```
+///
+/// Because two equal values need to produce the same hash value, the
+/// implementation of `Hash` needs to ignore ASCII case, too:
+///
+/// ```
+/// # use std::hash::{Hash, Hasher};
+/// # pub struct CaseInsensitiveString(String);
+/// impl Hash for CaseInsensitiveString {
+/// fn hash<H: Hasher>(&self, state: &mut H) {
+/// for c in self.0.as_bytes() {
+/// c.to_ascii_lowercase().hash(state)
+/// }
+/// }
+/// }
+/// ```
+///
+/// Can `CaseInsensitiveString` implement `Borrow<str>`? It certainly can
+/// provide a reference to a string slice via its contained owned string.
+/// But because its `Hash` implementation differs, it behaves differently
+/// from `str` and therefore must not, in fact, implement `Borrow<str>`.
+/// If it wants to allow others access to the underlying `str`, it can do
+/// that via `AsRef<str>` which doesn’t carry any extra requirements.
+///
+/// [`Hash`]: crate::hash::Hash
+/// [`HashMap<K, V>`]: ../../std/collections/struct.HashMap.html
+/// [`String`]: ../../std/string/struct.String.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "Borrow"]
+pub trait Borrow<Borrowed: ?Sized> {
+ /// Immutably borrows from an owned value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::borrow::Borrow;
+ ///
+ /// fn check<T: Borrow<str>>(s: T) {
+ /// assert_eq!("Hello", s.borrow());
+ /// }
+ ///
+ /// let s = "Hello".to_string();
+ ///
+ /// check(s);
+ ///
+ /// let s = "Hello";
+ ///
+ /// check(s);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn borrow(&self) -> &Borrowed;
+}
+
+/// A trait for mutably borrowing data.
+///
+/// As a companion to [`Borrow<T>`] this trait allows a type to borrow as
+/// an underlying type by providing a mutable reference. See [`Borrow<T>`]
+/// for more information on borrowing as another type.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait BorrowMut<Borrowed: ?Sized>: Borrow<Borrowed> {
+ /// Mutably borrows from an owned value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::borrow::BorrowMut;
+ ///
+ /// fn check<T: BorrowMut<[i32]>>(mut v: T) {
+ /// assert_eq!(&mut [1, 2, 3], v.borrow_mut());
+ /// }
+ ///
+ /// let v = vec![1, 2, 3];
+ ///
+ /// check(v);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn borrow_mut(&mut self) -> &mut Borrowed;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_borrow", issue = "91522")]
+impl<T: ?Sized> const Borrow<T> for T {
+ #[rustc_diagnostic_item = "noop_method_borrow"]
+ fn borrow(&self) -> &T {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_borrow", issue = "91522")]
+impl<T: ?Sized> const BorrowMut<T> for T {
+ fn borrow_mut(&mut self) -> &mut T {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_borrow", issue = "91522")]
+impl<T: ?Sized> const Borrow<T> for &T {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_borrow", issue = "91522")]
+impl<T: ?Sized> const Borrow<T> for &mut T {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_borrow", issue = "91522")]
+impl<T: ?Sized> const BorrowMut<T> for &mut T {
+ fn borrow_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
new file mode 100644
index 000000000..fb4454c94
--- /dev/null
+++ b/library/core/src/cell.rs
@@ -0,0 +1,2122 @@
+//! Shareable mutable containers.
+//!
+//! Rust memory safety is based on this rule: Given an object `T`, it is only possible to
+//! have one of the following:
+//!
+//! - Having several immutable references (`&T`) to the object (also known as **aliasing**).
+//! - Having one mutable reference (`&mut T`) to the object (also known as **mutability**).
+//!
+//! This is enforced by the Rust compiler. However, there are situations where this rule is not
+//! flexible enough. Sometimes it is required to have multiple references to an object and yet
+//! mutate it.
+//!
+//! Shareable mutable containers exist to permit mutability in a controlled manner, even in the
+//! presence of aliasing. Both [`Cell<T>`] and [`RefCell<T>`] allow doing this in a single-threaded
+//! way. However, neither `Cell<T>` nor `RefCell<T>` are thread safe (they do not implement
+//! [`Sync`]). If you need to do aliasing and mutation between multiple threads it is possible to
+//! use [`Mutex<T>`], [`RwLock<T>`] or [`atomic`] types.
+//!
+//! Values of the `Cell<T>` and `RefCell<T>` types may be mutated through shared references (i.e.
+//! the common `&T` type), whereas most Rust types can only be mutated through unique (`&mut T`)
+//! references. We say that `Cell<T>` and `RefCell<T>` provide 'interior mutability', in contrast
+//! with typical Rust types that exhibit 'inherited mutability'.
+//!
+//! Cell types come in two flavors: `Cell<T>` and `RefCell<T>`. `Cell<T>` implements interior
+//! mutability by moving values in and out of the `Cell<T>`. To use references instead of values,
+//! one must use the `RefCell<T>` type, acquiring a write lock before mutating. `Cell<T>` provides
+//! methods to retrieve and change the current interior value:
+//!
+//! - For types that implement [`Copy`], the [`get`](Cell::get) method retrieves the current
+//! interior value.
+//! - For types that implement [`Default`], the [`take`](Cell::take) method replaces the current
+//! interior value with [`Default::default()`] and returns the replaced value.
+//! - For all types, the [`replace`](Cell::replace) method replaces the current interior value and
+//! returns the replaced value and the [`into_inner`](Cell::into_inner) method consumes the
+//! `Cell<T>` and returns the interior value. Additionally, the [`set`](Cell::set) method
+//! replaces the interior value, dropping the replaced value.
+//!
+//! `RefCell<T>` uses Rust's lifetimes to implement 'dynamic borrowing', a process whereby one can
+//! claim temporary, exclusive, mutable access to the inner value. Borrows for `RefCell<T>`s are
+//! tracked 'at runtime', unlike Rust's native reference types which are entirely tracked
+//! statically, at compile time. Because `RefCell<T>` borrows are dynamic it is possible to attempt
+//! to borrow a value that is already mutably borrowed; when this happens it results in thread
+//! panic.
+//!
+//! # When to choose interior mutability
+//!
+//! The more common inherited mutability, where one must have unique access to mutate a value, is
+//! one of the key language elements that enables Rust to reason strongly about pointer aliasing,
+//! statically preventing crash bugs. Because of that, inherited mutability is preferred, and
+//! interior mutability is something of a last resort. Since cell types enable mutation where it
+//! would otherwise be disallowed though, there are occasions when interior mutability might be
+//! appropriate, or even *must* be used, e.g.
+//!
+//! * Introducing mutability 'inside' of something immutable
+//! * Implementation details of logically-immutable methods.
+//! * Mutating implementations of [`Clone`].
+//!
+//! ## Introducing mutability 'inside' of something immutable
+//!
+//! Many shared smart pointer types, including [`Rc<T>`] and [`Arc<T>`], provide containers that can
+//! be cloned and shared between multiple parties. Because the contained values may be
+//! multiply-aliased, they can only be borrowed with `&`, not `&mut`. Without cells it would be
+//! impossible to mutate data inside of these smart pointers at all.
+//!
+//! It's very common then to put a `RefCell<T>` inside shared pointer types to reintroduce
+//! mutability:
+//!
+//! ```
+//! use std::cell::{RefCell, RefMut};
+//! use std::collections::HashMap;
+//! use std::rc::Rc;
+//!
+//! fn main() {
+//! let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
+//! // Create a new block to limit the scope of the dynamic borrow
+//! {
+//! let mut map: RefMut<_> = shared_map.borrow_mut();
+//! map.insert("africa", 92388);
+//! map.insert("kyoto", 11837);
+//! map.insert("piccadilly", 11826);
+//! map.insert("marbles", 38);
+//! }
+//!
+//! // Note that if we had not let the previous borrow of the cache fall out
+//! // of scope then the subsequent borrow would cause a dynamic thread panic.
+//! // This is the major hazard of using `RefCell`.
+//! let total: i32 = shared_map.borrow().values().sum();
+//! println!("{total}");
+//! }
+//! ```
+//!
+//! Note that this example uses `Rc<T>` and not `Arc<T>`. `RefCell<T>`s are for single-threaded
+//! scenarios. Consider using [`RwLock<T>`] or [`Mutex<T>`] if you need shared mutability in a
+//! multi-threaded situation.
+//!
+//! ## Implementation details of logically-immutable methods
+//!
+//! Occasionally it may be desirable not to expose in an API that there is mutation happening
+//! "under the hood". This may be because logically the operation is immutable, but e.g., caching
+//! forces the implementation to perform mutation; or because you must employ mutation to implement
+//! a trait method that was originally defined to take `&self`.
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! use std::cell::RefCell;
+//!
+//! struct Graph {
+//! edges: Vec<(i32, i32)>,
+//! span_tree_cache: RefCell<Option<Vec<(i32, i32)>>>
+//! }
+//!
+//! impl Graph {
+//! fn minimum_spanning_tree(&self) -> Vec<(i32, i32)> {
+//! self.span_tree_cache.borrow_mut()
+//! .get_or_insert_with(|| self.calc_span_tree())
+//! .clone()
+//! }
+//!
+//! fn calc_span_tree(&self) -> Vec<(i32, i32)> {
+//! // Expensive computation goes here
+//! vec![]
+//! }
+//! }
+//! ```
+//!
+//! ## Mutating implementations of `Clone`
+//!
+//! This is simply a special - but common - case of the previous: hiding mutability for operations
+//! that appear to be immutable. The [`clone`](Clone::clone) method is expected to not change the
+//! source value, and is declared to take `&self`, not `&mut self`. Therefore, any mutation that
+//! happens in the `clone` method must use cell types. For example, [`Rc<T>`] maintains its
+//! reference counts within a `Cell<T>`.
+//!
+//! ```
+//! use std::cell::Cell;
+//! use std::ptr::NonNull;
+//! use std::process::abort;
+//! use std::marker::PhantomData;
+//!
+//! struct Rc<T: ?Sized> {
+//! ptr: NonNull<RcBox<T>>,
+//! phantom: PhantomData<RcBox<T>>,
+//! }
+//!
+//! struct RcBox<T: ?Sized> {
+//! strong: Cell<usize>,
+//! refcount: Cell<usize>,
+//! value: T,
+//! }
+//!
+//! impl<T: ?Sized> Clone for Rc<T> {
+//! fn clone(&self) -> Rc<T> {
+//! self.inc_strong();
+//! Rc {
+//! ptr: self.ptr,
+//! phantom: PhantomData,
+//! }
+//! }
+//! }
+//!
+//! trait RcBoxPtr<T: ?Sized> {
+//!
+//! fn inner(&self) -> &RcBox<T>;
+//!
+//! fn strong(&self) -> usize {
+//! self.inner().strong.get()
+//! }
+//!
+//! fn inc_strong(&self) {
+//! self.inner()
+//! .strong
+//! .set(self.strong()
+//! .checked_add(1)
+//! .unwrap_or_else(|| abort() ));
+//! }
+//! }
+//!
+//! impl<T: ?Sized> RcBoxPtr<T> for Rc<T> {
+//! fn inner(&self) -> &RcBox<T> {
+//! unsafe {
+//! self.ptr.as_ref()
+//! }
+//! }
+//! }
+//! ```
+//!
+//! [`Arc<T>`]: ../../std/sync/struct.Arc.html
+//! [`Rc<T>`]: ../../std/rc/struct.Rc.html
+//! [`RwLock<T>`]: ../../std/sync/struct.RwLock.html
+//! [`Mutex<T>`]: ../../std/sync/struct.Mutex.html
+//! [`atomic`]: crate::sync::atomic
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cmp::Ordering;
+use crate::fmt::{self, Debug, Display};
+use crate::marker::{PhantomData, Unsize};
+use crate::mem;
+use crate::ops::{CoerceUnsized, Deref, DerefMut};
+use crate::ptr::{self, NonNull};
+
+mod lazy;
+mod once;
+
+#[unstable(feature = "once_cell", issue = "74465")]
+pub use lazy::LazyCell;
+#[unstable(feature = "once_cell", issue = "74465")]
+pub use once::OnceCell;
+
+/// A mutable memory location.
+///
+/// # Examples
+///
+/// In this example, you can see that `Cell<T>` enables mutation inside an
+/// immutable struct. In other words, it enables "interior mutability".
+///
+/// ```
+/// use std::cell::Cell;
+///
+/// struct SomeStruct {
+/// regular_field: u8,
+/// special_field: Cell<u8>,
+/// }
+///
+/// let my_struct = SomeStruct {
+/// regular_field: 0,
+/// special_field: Cell::new(1),
+/// };
+///
+/// let new_value = 100;
+///
+/// // ERROR: `my_struct` is immutable
+/// // my_struct.regular_field = new_value;
+///
+/// // WORKS: although `my_struct` is immutable, `special_field` is a `Cell`,
+/// // which can always be mutated
+/// my_struct.special_field.set(new_value);
+/// assert_eq!(my_struct.special_field.get(), new_value);
+/// ```
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[repr(transparent)]
+pub struct Cell<T: ?Sized> {
+ value: UnsafeCell<T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized> Send for Cell<T> where T: Send {}
+
+// Note that this negative impl isn't strictly necessary for correctness,
+// as `Cell` wraps `UnsafeCell`, which is itself `!Sync`.
+// However, given how important `Cell`'s `!Sync`-ness is,
+// having an explicit negative impl is nice for documentation purposes
+// and results in nicer error messages.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for Cell<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Copy> Clone for Cell<T> {
+ #[inline]
+ fn clone(&self) -> Cell<T> {
+ Cell::new(self.get())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Cell<T> {
+ /// Creates a `Cell<T>`, with the `Default` value for T.
+ #[inline]
+ fn default() -> Cell<T> {
+ Cell::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq + Copy> PartialEq for Cell<T> {
+ #[inline]
+ fn eq(&self, other: &Cell<T>) -> bool {
+ self.get() == other.get()
+ }
+}
+
+#[stable(feature = "cell_eq", since = "1.2.0")]
+impl<T: Eq + Copy> Eq for Cell<T> {}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: PartialOrd + Copy> PartialOrd for Cell<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &Cell<T>) -> Option<Ordering> {
+ self.get().partial_cmp(&other.get())
+ }
+
+ #[inline]
+ fn lt(&self, other: &Cell<T>) -> bool {
+ self.get() < other.get()
+ }
+
+ #[inline]
+ fn le(&self, other: &Cell<T>) -> bool {
+ self.get() <= other.get()
+ }
+
+ #[inline]
+ fn gt(&self, other: &Cell<T>) -> bool {
+ self.get() > other.get()
+ }
+
+ #[inline]
+ fn ge(&self, other: &Cell<T>) -> bool {
+ self.get() >= other.get()
+ }
+}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: Ord + Copy> Ord for Cell<T> {
+ #[inline]
+ fn cmp(&self, other: &Cell<T>) -> Ordering {
+ self.get().cmp(&other.get())
+ }
+}
+
+#[stable(feature = "cell_from", since = "1.12.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<T> for Cell<T> {
+ /// Creates a new `Cell<T>` containing the given value.
+ fn from(t: T) -> Cell<T> {
+ Cell::new(t)
+ }
+}
+
+impl<T> Cell<T> {
+ /// Creates a new `Cell` containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_cell_new", since = "1.24.0")]
+ #[inline]
+ pub const fn new(value: T) -> Cell<T> {
+ Cell { value: UnsafeCell::new(value) }
+ }
+
+ /// Sets the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ ///
+ /// c.set(10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn set(&self, val: T) {
+ let old = self.replace(val);
+ drop(old);
+ }
+
+ /// Swaps the values of two `Cell`s.
+ /// Difference with `std::mem::swap` is that this function doesn't require `&mut` reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c1 = Cell::new(5i32);
+ /// let c2 = Cell::new(10i32);
+ /// c1.swap(&c2);
+ /// assert_eq!(10, c1.get());
+ /// assert_eq!(5, c2.get());
+ /// ```
+ #[inline]
+ #[stable(feature = "move_cell", since = "1.17.0")]
+ pub fn swap(&self, other: &Self) {
+ if ptr::eq(self, other) {
+ return;
+ }
+ // SAFETY: This can be risky if called from separate threads, but `Cell`
+ // is `!Sync` so this won't happen. This also won't invalidate any
+ // pointers since `Cell` makes sure nothing else will be pointing into
+ // either of these `Cell`s.
+ unsafe {
+ ptr::swap(self.value.get(), other.value.get());
+ }
+ }
+
+ /// Replaces the contained value with `val`, and returns the old contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let cell = Cell::new(5);
+ /// assert_eq!(cell.get(), 5);
+ /// assert_eq!(cell.replace(10), 5);
+ /// assert_eq!(cell.get(), 10);
+ /// ```
+ #[stable(feature = "move_cell", since = "1.17.0")]
+ pub fn replace(&self, val: T) -> T {
+ // SAFETY: This can cause data races if called from a separate thread,
+ // but `Cell` is `!Sync` so this won't happen.
+ mem::replace(unsafe { &mut *self.value.get() }, val)
+ }
+
+ /// Unwraps the value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ /// let five = c.into_inner();
+ ///
+ /// assert_eq!(five, 5);
+ /// ```
+ #[stable(feature = "move_cell", since = "1.17.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> T {
+ self.value.into_inner()
+ }
+}
+
+impl<T: Copy> Cell<T> {
+ /// Returns a copy of the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ ///
+ /// let five = c.get();
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get(&self) -> T {
+ // SAFETY: This can cause data races if called from a separate thread,
+ // but `Cell` is `!Sync` so this won't happen.
+ unsafe { *self.value.get() }
+ }
+
+ /// Updates the contained value using a function and returns the new value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cell_update)]
+ ///
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ /// let new = c.update(|x| x + 1);
+ ///
+ /// assert_eq!(new, 6);
+ /// assert_eq!(c.get(), 6);
+ /// ```
+ #[inline]
+ #[unstable(feature = "cell_update", issue = "50186")]
+ pub fn update<F>(&self, f: F) -> T
+ where
+ F: FnOnce(T) -> T,
+ {
+ let old = self.get();
+ let new = f(old);
+ self.set(new);
+ new
+ }
+}
+
+impl<T: ?Sized> Cell<T> {
+ /// Returns a raw pointer to the underlying data in this cell.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ ///
+ /// let ptr = c.as_ptr();
+ /// ```
+ #[inline]
+ #[stable(feature = "cell_as_ptr", since = "1.12.0")]
+ #[rustc_const_stable(feature = "const_cell_as_ptr", since = "1.32.0")]
+ pub const fn as_ptr(&self) -> *mut T {
+ self.value.get()
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows `Cell` mutably (at compile-time) which guarantees
+ /// that we possess the only reference.
+ ///
+ /// However be cautious: this method expects `self` to be mutable, which is
+ /// generally not the case when using a `Cell`. If you require interior
+ /// mutability by reference, consider using `RefCell` which provides
+ /// run-time checked mutable borrows through its [`borrow_mut`] method.
+ ///
+ /// [`borrow_mut`]: RefCell::borrow_mut()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let mut c = Cell::new(5);
+ /// *c.get_mut() += 1;
+ ///
+ /// assert_eq!(c.get(), 6);
+ /// ```
+ #[inline]
+ #[stable(feature = "cell_get_mut", since = "1.11.0")]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Returns a `&Cell<T>` from a `&mut T`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let slice: &mut [i32] = &mut [1, 2, 3];
+ /// let cell_slice: &Cell<[i32]> = Cell::from_mut(slice);
+ /// let slice_cell: &[Cell<i32>] = cell_slice.as_slice_of_cells();
+ ///
+ /// assert_eq!(slice_cell.len(), 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "as_cell", since = "1.37.0")]
+ pub fn from_mut(t: &mut T) -> &Cell<T> {
+ // SAFETY: `&mut` ensures unique access.
+ unsafe { &*(t as *mut T as *const Cell<T>) }
+ }
+}
+
+impl<T: Default> Cell<T> {
+ /// Takes the value of the cell, leaving `Default::default()` in its place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ /// let five = c.take();
+ ///
+ /// assert_eq!(five, 5);
+ /// assert_eq!(c.into_inner(), 0);
+ /// ```
+ #[stable(feature = "move_cell", since = "1.17.0")]
+ pub fn take(&self) -> T {
+ self.replace(Default::default())
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Cell<U>> for Cell<T> {}
+
+impl<T> Cell<[T]> {
+ /// Returns a `&[Cell<T>]` from a `&Cell<[T]>`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let slice: &mut [i32] = &mut [1, 2, 3];
+ /// let cell_slice: &Cell<[i32]> = Cell::from_mut(slice);
+ /// let slice_cell: &[Cell<i32>] = cell_slice.as_slice_of_cells();
+ ///
+ /// assert_eq!(slice_cell.len(), 3);
+ /// ```
+ #[stable(feature = "as_cell", since = "1.37.0")]
+ pub fn as_slice_of_cells(&self) -> &[Cell<T>] {
+ // SAFETY: `Cell<T>` has the same memory layout as `T`.
+ unsafe { &*(self as *const Cell<[T]> as *const [Cell<T>]) }
+ }
+}
+
+impl<T, const N: usize> Cell<[T; N]> {
+ /// Returns a `&[Cell<T>; N]` from a `&Cell<[T; N]>`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(as_array_of_cells)]
+ /// use std::cell::Cell;
+ ///
+ /// let mut array: [i32; 3] = [1, 2, 3];
+ /// let cell_array: &Cell<[i32; 3]> = Cell::from_mut(&mut array);
+ /// let array_cell: &[Cell<i32>; 3] = cell_array.as_array_of_cells();
+ /// ```
+ #[unstable(feature = "as_array_of_cells", issue = "88248")]
+ pub fn as_array_of_cells(&self) -> &[Cell<T>; N] {
+ // SAFETY: `Cell<T>` has the same memory layout as `T`.
+ unsafe { &*(self as *const Cell<[T; N]> as *const [Cell<T>; N]) }
+ }
+}
+
+/// A mutable memory location with dynamically checked borrow rules
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RefCell<T: ?Sized> {
+ borrow: Cell<BorrowFlag>,
+ // Stores the location of the earliest currently active borrow.
+ // This gets updated whenever we go from having zero borrows
+ // to having a single borrow. When a borrow occurs, this gets included
+ // in the generated `BorrowError/`BorrowMutError`
+ #[cfg(feature = "debug_refcell")]
+ borrowed_at: Cell<Option<&'static crate::panic::Location<'static>>>,
+ value: UnsafeCell<T>,
+}
+
+/// An error returned by [`RefCell::try_borrow`].
+#[stable(feature = "try_borrow", since = "1.13.0")]
+#[non_exhaustive]
+pub struct BorrowError {
+ #[cfg(feature = "debug_refcell")]
+ location: &'static crate::panic::Location<'static>,
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Debug for BorrowError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut builder = f.debug_struct("BorrowError");
+
+ #[cfg(feature = "debug_refcell")]
+ builder.field("location", self.location);
+
+ builder.finish()
+ }
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Display for BorrowError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Display::fmt("already mutably borrowed", f)
+ }
+}
+
+/// An error returned by [`RefCell::try_borrow_mut`].
+#[stable(feature = "try_borrow", since = "1.13.0")]
+#[non_exhaustive]
+pub struct BorrowMutError {
+ #[cfg(feature = "debug_refcell")]
+ location: &'static crate::panic::Location<'static>,
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Debug for BorrowMutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut builder = f.debug_struct("BorrowMutError");
+
+ #[cfg(feature = "debug_refcell")]
+ builder.field("location", self.location);
+
+ builder.finish()
+ }
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Display for BorrowMutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Display::fmt("already borrowed", f)
+ }
+}
+
+// Positive values represent the number of `Ref` active. Negative values
+// represent the number of `RefMut` active. Multiple `RefMut`s can only be
+// active at a time if they refer to distinct, nonoverlapping components of a
+// `RefCell` (e.g., different ranges of a slice).
+//
+// `Ref` and `RefMut` are both two words in size, and so there will likely never
+// be enough `Ref`s or `RefMut`s in existence to overflow half of the `usize`
+// range. Thus, a `BorrowFlag` will probably never overflow or underflow.
+// However, this is not a guarantee, as a pathological program could repeatedly
+// create and then mem::forget `Ref`s or `RefMut`s. Thus, all code must
+// explicitly check for overflow and underflow in order to avoid unsafety, or at
+// least behave correctly in the event that overflow or underflow happens (e.g.,
+// see BorrowRef::new).
+type BorrowFlag = isize;
+const UNUSED: BorrowFlag = 0;
+
+#[inline(always)]
+fn is_writing(x: BorrowFlag) -> bool {
+ x < UNUSED
+}
+
+#[inline(always)]
+fn is_reading(x: BorrowFlag) -> bool {
+ x > UNUSED
+}
+
+impl<T> RefCell<T> {
+ /// Creates a new `RefCell` containing `value`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_refcell_new", since = "1.24.0")]
+ #[inline]
+ pub const fn new(value: T) -> RefCell<T> {
+ RefCell {
+ value: UnsafeCell::new(value),
+ borrow: Cell::new(UNUSED),
+ #[cfg(feature = "debug_refcell")]
+ borrowed_at: Cell::new(None),
+ }
+ }
+
+ /// Consumes the `RefCell`, returning the wrapped value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// let five = c.into_inner();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ #[inline]
+ pub const fn into_inner(self) -> T {
+ // Since this function takes `self` (the `RefCell`) by value, the
+ // compiler statically verifies that it is not currently borrowed.
+ self.value.into_inner()
+ }
+
+ /// Replaces the wrapped value with a new one, returning the old value,
+ /// without deinitializing either one.
+ ///
+ /// This function corresponds to [`std::mem::replace`](../mem/fn.replace.html).
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ /// let cell = RefCell::new(5);
+ /// let old_value = cell.replace(6);
+ /// assert_eq!(old_value, 5);
+ /// assert_eq!(cell, RefCell::new(6));
+ /// ```
+ #[inline]
+ #[stable(feature = "refcell_replace", since = "1.24.0")]
+ #[track_caller]
+ pub fn replace(&self, t: T) -> T {
+ mem::replace(&mut *self.borrow_mut(), t)
+ }
+
+ /// Replaces the wrapped value with a new one computed from `f`, returning
+ /// the old value, without deinitializing either one.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ /// let cell = RefCell::new(5);
+ /// let old_value = cell.replace_with(|&mut old| old + 1);
+ /// assert_eq!(old_value, 5);
+ /// assert_eq!(cell, RefCell::new(6));
+ /// ```
+ #[inline]
+ #[stable(feature = "refcell_replace_swap", since = "1.35.0")]
+ #[track_caller]
+ pub fn replace_with<F: FnOnce(&mut T) -> T>(&self, f: F) -> T {
+ let mut_borrow = &mut *self.borrow_mut();
+ let replacement = f(mut_borrow);
+ mem::replace(mut_borrow, replacement)
+ }
+
+ /// Swaps the wrapped value of `self` with the wrapped value of `other`,
+ /// without deinitializing either one.
+ ///
+ /// This function corresponds to [`std::mem::swap`](../mem/fn.swap.html).
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ /// let c = RefCell::new(5);
+ /// let d = RefCell::new(6);
+ /// c.swap(&d);
+ /// assert_eq!(c, RefCell::new(6));
+ /// assert_eq!(d, RefCell::new(5));
+ /// ```
+ #[inline]
+ #[stable(feature = "refcell_swap", since = "1.24.0")]
+ pub fn swap(&self, other: &Self) {
+ mem::swap(&mut *self.borrow_mut(), &mut *other.borrow_mut())
+ }
+}
+
+impl<T: ?Sized> RefCell<T> {
+ /// Immutably borrows the wrapped value.
+ ///
+ /// The borrow lasts until the returned `Ref` exits scope. Multiple
+ /// immutable borrows can be taken out at the same time.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently mutably borrowed. For a non-panicking variant, use
+ /// [`try_borrow`](#method.try_borrow).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// let borrowed_five = c.borrow();
+ /// let borrowed_five2 = c.borrow();
+ /// ```
+ ///
+ /// An example of panic:
+ ///
+ /// ```should_panic
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// let m = c.borrow_mut();
+ /// let b = c.borrow(); // this causes a panic
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[track_caller]
+ pub fn borrow(&self) -> Ref<'_, T> {
+ self.try_borrow().expect("already mutably borrowed")
+ }
+
+ /// Immutably borrows the wrapped value, returning an error if the value is currently mutably
+ /// borrowed.
+ ///
+ /// The borrow lasts until the returned `Ref` exits scope. Multiple immutable borrows can be
+ /// taken out at the same time.
+ ///
+ /// This is the non-panicking variant of [`borrow`](#method.borrow).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// {
+ /// let m = c.borrow_mut();
+ /// assert!(c.try_borrow().is_err());
+ /// }
+ ///
+ /// {
+ /// let m = c.borrow();
+ /// assert!(c.try_borrow().is_ok());
+ /// }
+ /// ```
+ #[stable(feature = "try_borrow", since = "1.13.0")]
+ #[inline]
+ #[cfg_attr(feature = "debug_refcell", track_caller)]
+ pub fn try_borrow(&self) -> Result<Ref<'_, T>, BorrowError> {
+ match BorrowRef::new(&self.borrow) {
+ Some(b) => {
+ #[cfg(feature = "debug_refcell")]
+ {
+ // `borrowed_at` is always the *first* active borrow
+ if b.borrow.get() == 1 {
+ self.borrowed_at.set(Some(crate::panic::Location::caller()));
+ }
+ }
+
+ // SAFETY: `BorrowRef` ensures that there is only immutable access
+ // to the value while borrowed.
+ let value = unsafe { NonNull::new_unchecked(self.value.get()) };
+ Ok(Ref { value, borrow: b })
+ }
+ None => Err(BorrowError {
+ // If a borrow occurred, then we must already have an outstanding borrow,
+ // so `borrowed_at` will be `Some`
+ #[cfg(feature = "debug_refcell")]
+ location: self.borrowed_at.get().unwrap(),
+ }),
+ }
+ }
+
+ /// Mutably borrows the wrapped value.
+ ///
+ /// The borrow lasts until the returned `RefMut` or all `RefMut`s derived
+ /// from it exit scope. The value cannot be borrowed while this borrow is
+ /// active.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed. For a non-panicking variant, use
+ /// [`try_borrow_mut`](#method.try_borrow_mut).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new("hello".to_owned());
+ ///
+ /// *c.borrow_mut() = "bonjour".to_owned();
+ ///
+ /// assert_eq!(&*c.borrow(), "bonjour");
+ /// ```
+ ///
+ /// An example of panic:
+ ///
+ /// ```should_panic
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ /// let m = c.borrow();
+ ///
+ /// let b = c.borrow_mut(); // this causes a panic
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[track_caller]
+ pub fn borrow_mut(&self) -> RefMut<'_, T> {
+ self.try_borrow_mut().expect("already borrowed")
+ }
+
+ /// Mutably borrows the wrapped value, returning an error if the value is currently borrowed.
+ ///
+ /// The borrow lasts until the returned `RefMut` or all `RefMut`s derived
+ /// from it exit scope. The value cannot be borrowed while this borrow is
+ /// active.
+ ///
+ /// This is the non-panicking variant of [`borrow_mut`](#method.borrow_mut).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// {
+ /// let m = c.borrow();
+ /// assert!(c.try_borrow_mut().is_err());
+ /// }
+ ///
+ /// assert!(c.try_borrow_mut().is_ok());
+ /// ```
+ #[stable(feature = "try_borrow", since = "1.13.0")]
+ #[inline]
+ #[cfg_attr(feature = "debug_refcell", track_caller)]
+ pub fn try_borrow_mut(&self) -> Result<RefMut<'_, T>, BorrowMutError> {
+ match BorrowRefMut::new(&self.borrow) {
+ Some(b) => {
+ #[cfg(feature = "debug_refcell")]
+ {
+ self.borrowed_at.set(Some(crate::panic::Location::caller()));
+ }
+
+ // SAFETY: `BorrowRefMut` guarantees unique access.
+ let value = unsafe { NonNull::new_unchecked(self.value.get()) };
+ Ok(RefMut { value, borrow: b, marker: PhantomData })
+ }
+ None => Err(BorrowMutError {
+ // If a borrow occurred, then we must already have an outstanding borrow,
+ // so `borrowed_at` will be `Some`
+ #[cfg(feature = "debug_refcell")]
+ location: self.borrowed_at.get().unwrap(),
+ }),
+ }
+ }
+
+ /// Returns a raw pointer to the underlying data in this cell.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// let ptr = c.as_ptr();
+ /// ```
+ #[inline]
+ #[stable(feature = "cell_as_ptr", since = "1.12.0")]
+ pub fn as_ptr(&self) -> *mut T {
+ self.value.get()
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows `RefCell` mutably (at compile-time) so there is no
+ /// need for dynamic checks.
+ ///
+ /// However be cautious: this method expects `self` to be mutable, which is
+ /// generally not the case when using a `RefCell`. Take a look at the
+ /// [`borrow_mut`] method instead if `self` isn't mutable.
+ ///
+ /// Also, please be aware that this method is only for special circumstances and is usually
+ /// not what you want. In case of doubt, use [`borrow_mut`] instead.
+ ///
+ /// [`borrow_mut`]: RefCell::borrow_mut()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let mut c = RefCell::new(5);
+ /// *c.get_mut() += 1;
+ ///
+ /// assert_eq!(c, RefCell::new(6));
+ /// ```
+ #[inline]
+ #[stable(feature = "cell_get_mut", since = "1.11.0")]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Undo the effect of leaked guards on the borrow state of the `RefCell`.
+ ///
+ /// This call is similar to [`get_mut`] but more specialized. It borrows `RefCell` mutably to
+ /// ensure no borrows exist and then resets the state tracking shared borrows. This is relevant
+ /// if some `Ref` or `RefMut` borrows have been leaked.
+ ///
+ /// [`get_mut`]: RefCell::get_mut()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cell_leak)]
+ /// use std::cell::RefCell;
+ ///
+ /// let mut c = RefCell::new(0);
+ /// std::mem::forget(c.borrow_mut());
+ ///
+ /// assert!(c.try_borrow().is_err());
+ /// c.undo_leak();
+ /// assert!(c.try_borrow().is_ok());
+ /// ```
+ #[unstable(feature = "cell_leak", issue = "69099")]
+ pub fn undo_leak(&mut self) -> &mut T {
+ *self.borrow.get_mut() = UNUSED;
+ self.get_mut()
+ }
+
+ /// Immutably borrows the wrapped value, returning an error if the value is
+ /// currently mutably borrowed.
+ ///
+ /// # Safety
+ ///
+ /// Unlike `RefCell::borrow`, this method is unsafe because it does not
+ /// return a `Ref`, thus leaving the borrow flag untouched. Mutably
+ /// borrowing the `RefCell` while the reference returned by this method
+ /// is alive is undefined behaviour.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// {
+ /// let m = c.borrow_mut();
+ /// assert!(unsafe { c.try_borrow_unguarded() }.is_err());
+ /// }
+ ///
+ /// {
+ /// let m = c.borrow();
+ /// assert!(unsafe { c.try_borrow_unguarded() }.is_ok());
+ /// }
+ /// ```
+ #[stable(feature = "borrow_state", since = "1.37.0")]
+ #[inline]
+ pub unsafe fn try_borrow_unguarded(&self) -> Result<&T, BorrowError> {
+ if !is_writing(self.borrow.get()) {
+ // SAFETY: We check that nobody is actively writing now, but it is
+ // the caller's responsibility to ensure that nobody writes until
+ // the returned reference is no longer in use.
+ // Also, `self.value.get()` refers to the value owned by `self`
+ // and is thus guaranteed to be valid for the lifetime of `self`.
+ Ok(unsafe { &*self.value.get() })
+ } else {
+ Err(BorrowError {
+ // If a borrow occurred, then we must already have an outstanding borrow,
+ // so `borrowed_at` will be `Some`
+ #[cfg(feature = "debug_refcell")]
+ location: self.borrowed_at.get().unwrap(),
+ })
+ }
+ }
+}
+
+impl<T: Default> RefCell<T> {
+ /// Takes the wrapped value, leaving `Default::default()` in its place.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ /// let five = c.take();
+ ///
+ /// assert_eq!(five, 5);
+ /// assert_eq!(c.into_inner(), 0);
+ /// ```
+ #[stable(feature = "refcell_take", since = "1.50.0")]
+ pub fn take(&self) -> T {
+ self.replace(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized> Send for RefCell<T> where T: Send {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for RefCell<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for RefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value is currently mutably borrowed.
+ #[inline]
+ #[track_caller]
+ fn clone(&self) -> RefCell<T> {
+ RefCell::new(self.borrow().clone())
+ }
+
+ /// # Panics
+ ///
+ /// Panics if `other` is currently mutably borrowed.
+ #[inline]
+ #[track_caller]
+ fn clone_from(&mut self, other: &Self) {
+ self.get_mut().clone_from(&other.borrow())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for RefCell<T> {
+ /// Creates a `RefCell<T>`, with the `Default` value for T.
+ #[inline]
+ fn default() -> RefCell<T> {
+ RefCell::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> PartialEq for RefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn eq(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() == *other.borrow()
+ }
+}
+
+#[stable(feature = "cell_eq", since = "1.2.0")]
+impl<T: ?Sized + Eq> Eq for RefCell<T> {}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn partial_cmp(&self, other: &RefCell<T>) -> Option<Ordering> {
+ self.borrow().partial_cmp(&*other.borrow())
+ }
+
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn lt(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() < *other.borrow()
+ }
+
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn le(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() <= *other.borrow()
+ }
+
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn gt(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() > *other.borrow()
+ }
+
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn ge(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() >= *other.borrow()
+ }
+}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: ?Sized + Ord> Ord for RefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn cmp(&self, other: &RefCell<T>) -> Ordering {
+ self.borrow().cmp(&*other.borrow())
+ }
+}
+
+#[stable(feature = "cell_from", since = "1.12.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<T> for RefCell<T> {
+ /// Creates a new `RefCell<T>` containing the given value.
+ fn from(t: T) -> RefCell<T> {
+ RefCell::new(t)
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<RefCell<U>> for RefCell<T> {}
+
+struct BorrowRef<'b> {
+ borrow: &'b Cell<BorrowFlag>,
+}
+
+impl<'b> BorrowRef<'b> {
+ #[inline]
+ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRef<'b>> {
+ let b = borrow.get().wrapping_add(1);
+ if !is_reading(b) {
+ // Incrementing borrow can result in a non-reading value (<= 0) in these cases:
+ // 1. It was < 0, i.e. there are writing borrows, so we can't allow a read borrow
+ // due to Rust's reference aliasing rules
+ // 2. It was isize::MAX (the max amount of reading borrows) and it overflowed
+ // into isize::MIN (the max amount of writing borrows) so we can't allow
+ // an additional read borrow because isize can't represent so many read borrows
+ // (this can only happen if you mem::forget more than a small constant amount of
+ // `Ref`s, which is not good practice)
+ None
+ } else {
+ // Incrementing borrow can result in a reading value (> 0) in these cases:
+ // 1. It was = 0, i.e. it wasn't borrowed, and we are taking the first read borrow
+ // 2. It was > 0 and < isize::MAX, i.e. there were read borrows, and isize
+ // is large enough to represent having one more read borrow
+ borrow.set(b);
+ Some(BorrowRef { borrow })
+ }
+ }
+}
+
+impl Drop for BorrowRef<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ let borrow = self.borrow.get();
+ debug_assert!(is_reading(borrow));
+ self.borrow.set(borrow - 1);
+ }
+}
+
+impl Clone for BorrowRef<'_> {
+ #[inline]
+ fn clone(&self) -> Self {
+ // Since this Ref exists, we know the borrow flag
+ // is a reading borrow.
+ let borrow = self.borrow.get();
+ debug_assert!(is_reading(borrow));
+ // Prevent the borrow counter from overflowing into
+ // a writing borrow.
+ assert!(borrow != isize::MAX);
+ self.borrow.set(borrow + 1);
+ BorrowRef { borrow: self.borrow }
+ }
+}
+
+/// Wraps a borrowed reference to a value in a `RefCell` box.
+/// A wrapper type for an immutably borrowed value from a `RefCell<T>`.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_not_suspend = "holding a Ref across suspend points can cause BorrowErrors"]
+pub struct Ref<'b, T: ?Sized + 'b> {
+ // NB: we use a pointer instead of `&'b T` to avoid `noalias` violations, because a
+ // `Ref` argument doesn't hold immutability for its whole scope, only until it drops.
+ // `NonNull` is also covariant over `T`, just like we would have with `&T`.
+ value: NonNull<T>,
+ borrow: BorrowRef<'b>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for Ref<'_, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: the value is accessible as long as we hold our borrow.
+ unsafe { self.value.as_ref() }
+ }
+}
+
+impl<'b, T: ?Sized> Ref<'b, T> {
+ /// Copies a `Ref`.
+ ///
+ /// The `RefCell` is already immutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `Ref::clone(...)`. A `Clone` implementation or a method would interfere
+ /// with the widespread use of `r.borrow().clone()` to clone the contents of
+ /// a `RefCell`.
+ #[stable(feature = "cell_extras", since = "1.15.0")]
+ #[must_use]
+ #[inline]
+ pub fn clone(orig: &Ref<'b, T>) -> Ref<'b, T> {
+ Ref { value: orig.value, borrow: orig.borrow.clone() }
+ }
+
+ /// Makes a new `Ref` for a component of the borrowed data.
+ ///
+ /// The `RefCell` is already immutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as `Ref::map(...)`.
+ /// A method would interfere with methods of the same name on the contents
+ /// of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{RefCell, Ref};
+ ///
+ /// let c = RefCell::new((5, 'b'));
+ /// let b1: Ref<(u32, char)> = c.borrow();
+ /// let b2: Ref<u32> = Ref::map(b1, |t| &t.0);
+ /// assert_eq!(*b2, 5)
+ /// ```
+ #[stable(feature = "cell_map", since = "1.8.0")]
+ #[inline]
+ pub fn map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Ref<'b, U>
+ where
+ F: FnOnce(&T) -> &U,
+ {
+ Ref { value: NonNull::from(f(&*orig)), borrow: orig.borrow }
+ }
+
+ /// Makes a new `Ref` for an optional component of the borrowed data. The
+ /// original guard is returned as an `Err(..)` if the closure returns
+ /// `None`.
+ ///
+ /// The `RefCell` is already immutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `Ref::filter_map(...)`. A method would interfere with methods of the same
+ /// name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{RefCell, Ref};
+ ///
+ /// let c = RefCell::new(vec![1, 2, 3]);
+ /// let b1: Ref<Vec<u32>> = c.borrow();
+ /// let b2: Result<Ref<u32>, _> = Ref::filter_map(b1, |v| v.get(1));
+ /// assert_eq!(*b2.unwrap(), 2);
+ /// ```
+ #[stable(feature = "cell_filter_map", since = "1.63.0")]
+ #[inline]
+ pub fn filter_map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Result<Ref<'b, U>, Self>
+ where
+ F: FnOnce(&T) -> Option<&U>,
+ {
+ match f(&*orig) {
+ Some(value) => Ok(Ref { value: NonNull::from(value), borrow: orig.borrow }),
+ None => Err(orig),
+ }
+ }
+
+ /// Splits a `Ref` into multiple `Ref`s for different components of the
+ /// borrowed data.
+ ///
+ /// The `RefCell` is already immutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `Ref::map_split(...)`. A method would interfere with methods of the same
+ /// name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{Ref, RefCell};
+ ///
+ /// let cell = RefCell::new([1, 2, 3, 4]);
+ /// let borrow = cell.borrow();
+ /// let (begin, end) = Ref::map_split(borrow, |slice| slice.split_at(2));
+ /// assert_eq!(*begin, [1, 2]);
+ /// assert_eq!(*end, [3, 4]);
+ /// ```
+ #[stable(feature = "refcell_map_split", since = "1.35.0")]
+ #[inline]
+ pub fn map_split<U: ?Sized, V: ?Sized, F>(orig: Ref<'b, T>, f: F) -> (Ref<'b, U>, Ref<'b, V>)
+ where
+ F: FnOnce(&T) -> (&U, &V),
+ {
+ let (a, b) = f(&*orig);
+ let borrow = orig.borrow.clone();
+ (
+ Ref { value: NonNull::from(a), borrow },
+ Ref { value: NonNull::from(b), borrow: orig.borrow },
+ )
+ }
+
+ /// Convert into a reference to the underlying data.
+ ///
+ /// The underlying `RefCell` can never be mutably borrowed from again and will always appear
+ /// already immutably borrowed. It is not a good idea to leak more than a constant number of
+ /// references. The `RefCell` can be immutably borrowed again if only a smaller number of leaks
+ /// have occurred in total.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `Ref::leak(...)`. A method would interfere with methods of the
+ /// same name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cell_leak)]
+ /// use std::cell::{RefCell, Ref};
+ /// let cell = RefCell::new(0);
+ ///
+ /// let value = Ref::leak(cell.borrow());
+ /// assert_eq!(*value, 0);
+ ///
+ /// assert!(cell.try_borrow().is_ok());
+ /// assert!(cell.try_borrow_mut().is_err());
+ /// ```
+ #[unstable(feature = "cell_leak", issue = "69099")]
+ pub fn leak(orig: Ref<'b, T>) -> &'b T {
+ // By forgetting this Ref we ensure that the borrow counter in the RefCell can't go back to
+ // UNUSED within the lifetime `'b`. Resetting the reference tracking state would require a
+ // unique reference to the borrowed RefCell. No further mutable references can be created
+ // from the original cell.
+ mem::forget(orig.borrow);
+ // SAFETY: after forgetting, we can form a reference for the rest of lifetime `'b`.
+ unsafe { orig.value.as_ref() }
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Ref<'b, U>> for Ref<'b, T> {}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for Ref<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+impl<'b, T: ?Sized> RefMut<'b, T> {
+ /// Makes a new `RefMut` for a component of the borrowed data, e.g., an enum
+ /// variant.
+ ///
+ /// The `RefCell` is already mutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RefMut::map(...)`. A method would interfere with methods of the same
+ /// name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{RefCell, RefMut};
+ ///
+ /// let c = RefCell::new((5, 'b'));
+ /// {
+ /// let b1: RefMut<(u32, char)> = c.borrow_mut();
+ /// let mut b2: RefMut<u32> = RefMut::map(b1, |t| &mut t.0);
+ /// assert_eq!(*b2, 5);
+ /// *b2 = 42;
+ /// }
+ /// assert_eq!(*c.borrow(), (42, 'b'));
+ /// ```
+ #[stable(feature = "cell_map", since = "1.8.0")]
+ #[inline]
+ pub fn map<U: ?Sized, F>(mut orig: RefMut<'b, T>, f: F) -> RefMut<'b, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ let value = NonNull::from(f(&mut *orig));
+ RefMut { value, borrow: orig.borrow, marker: PhantomData }
+ }
+
+ /// Makes a new `RefMut` for an optional component of the borrowed data. The
+ /// original guard is returned as an `Err(..)` if the closure returns
+ /// `None`.
+ ///
+ /// The `RefCell` is already mutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RefMut::filter_map(...)`. A method would interfere with methods of the
+ /// same name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{RefCell, RefMut};
+ ///
+ /// let c = RefCell::new(vec![1, 2, 3]);
+ ///
+ /// {
+ /// let b1: RefMut<Vec<u32>> = c.borrow_mut();
+ /// let mut b2: Result<RefMut<u32>, _> = RefMut::filter_map(b1, |v| v.get_mut(1));
+ ///
+ /// if let Ok(mut b2) = b2 {
+ /// *b2 += 2;
+ /// }
+ /// }
+ ///
+ /// assert_eq!(*c.borrow(), vec![1, 4, 3]);
+ /// ```
+ #[stable(feature = "cell_filter_map", since = "1.63.0")]
+ #[inline]
+ pub fn filter_map<U: ?Sized, F>(mut orig: RefMut<'b, T>, f: F) -> Result<RefMut<'b, U>, Self>
+ where
+ F: FnOnce(&mut T) -> Option<&mut U>,
+ {
+ // SAFETY: function holds onto an exclusive reference for the duration
+ // of its call through `orig`, and the pointer is only de-referenced
+ // inside of the function call never allowing the exclusive reference to
+ // escape.
+ match f(&mut *orig) {
+ Some(value) => {
+ Ok(RefMut { value: NonNull::from(value), borrow: orig.borrow, marker: PhantomData })
+ }
+ None => Err(orig),
+ }
+ }
+
+ /// Splits a `RefMut` into multiple `RefMut`s for different components of the
+ /// borrowed data.
+ ///
+ /// The underlying `RefCell` will remain mutably borrowed until both
+ /// returned `RefMut`s go out of scope.
+ ///
+ /// The `RefCell` is already mutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RefMut::map_split(...)`. A method would interfere with methods of the
+ /// same name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{RefCell, RefMut};
+ ///
+ /// let cell = RefCell::new([1, 2, 3, 4]);
+ /// let borrow = cell.borrow_mut();
+ /// let (mut begin, mut end) = RefMut::map_split(borrow, |slice| slice.split_at_mut(2));
+ /// assert_eq!(*begin, [1, 2]);
+ /// assert_eq!(*end, [3, 4]);
+ /// begin.copy_from_slice(&[4, 3]);
+ /// end.copy_from_slice(&[2, 1]);
+ /// ```
+ #[stable(feature = "refcell_map_split", since = "1.35.0")]
+ #[inline]
+ pub fn map_split<U: ?Sized, V: ?Sized, F>(
+ mut orig: RefMut<'b, T>,
+ f: F,
+ ) -> (RefMut<'b, U>, RefMut<'b, V>)
+ where
+ F: FnOnce(&mut T) -> (&mut U, &mut V),
+ {
+ let borrow = orig.borrow.clone();
+ let (a, b) = f(&mut *orig);
+ (
+ RefMut { value: NonNull::from(a), borrow, marker: PhantomData },
+ RefMut { value: NonNull::from(b), borrow: orig.borrow, marker: PhantomData },
+ )
+ }
+
+ /// Convert into a mutable reference to the underlying data.
+ ///
+ /// The underlying `RefCell` can not be borrowed from again and will always appear already
+ /// mutably borrowed, making the returned reference the only to the interior.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RefMut::leak(...)`. A method would interfere with methods of the
+ /// same name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cell_leak)]
+ /// use std::cell::{RefCell, RefMut};
+ /// let cell = RefCell::new(0);
+ ///
+ /// let value = RefMut::leak(cell.borrow_mut());
+ /// assert_eq!(*value, 0);
+ /// *value = 1;
+ ///
+ /// assert!(cell.try_borrow_mut().is_err());
+ /// ```
+ #[unstable(feature = "cell_leak", issue = "69099")]
+ pub fn leak(mut orig: RefMut<'b, T>) -> &'b mut T {
+ // By forgetting this BorrowRefMut we ensure that the borrow counter in the RefCell can't
+ // go back to UNUSED within the lifetime `'b`. Resetting the reference tracking state would
+ // require a unique reference to the borrowed RefCell. No further references can be created
+ // from the original cell within that lifetime, making the current borrow the only
+ // reference for the remaining lifetime.
+ mem::forget(orig.borrow);
+ // SAFETY: after forgetting, we can form a reference for the rest of lifetime `'b`.
+ unsafe { orig.value.as_mut() }
+ }
+}
+
+struct BorrowRefMut<'b> {
+ borrow: &'b Cell<BorrowFlag>,
+}
+
+impl Drop for BorrowRefMut<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ let borrow = self.borrow.get();
+ debug_assert!(is_writing(borrow));
+ self.borrow.set(borrow + 1);
+ }
+}
+
+impl<'b> BorrowRefMut<'b> {
+ #[inline]
+ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRefMut<'b>> {
+ // NOTE: Unlike BorrowRefMut::clone, new is called to create the initial
+ // mutable reference, and so there must currently be no existing
+ // references. Thus, while clone increments the mutable refcount, here
+ // we explicitly only allow going from UNUSED to UNUSED - 1.
+ match borrow.get() {
+ UNUSED => {
+ borrow.set(UNUSED - 1);
+ Some(BorrowRefMut { borrow })
+ }
+ _ => None,
+ }
+ }
+
+ // Clones a `BorrowRefMut`.
+ //
+ // This is only valid if each `BorrowRefMut` is used to track a mutable
+ // reference to a distinct, nonoverlapping range of the original object.
+ // This isn't in a Clone impl so that code doesn't call this implicitly.
+ #[inline]
+ fn clone(&self) -> BorrowRefMut<'b> {
+ let borrow = self.borrow.get();
+ debug_assert!(is_writing(borrow));
+ // Prevent the borrow counter from underflowing.
+ assert!(borrow != isize::MIN);
+ self.borrow.set(borrow - 1);
+ BorrowRefMut { borrow: self.borrow }
+ }
+}
+
+/// A wrapper type for a mutably borrowed value from a `RefCell<T>`.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_not_suspend = "holding a RefMut across suspend points can cause BorrowErrors"]
+pub struct RefMut<'b, T: ?Sized + 'b> {
+ // NB: we use a pointer instead of `&'b mut T` to avoid `noalias` violations, because a
+ // `RefMut` argument doesn't hold exclusivity for its whole scope, only until it drops.
+ value: NonNull<T>,
+ borrow: BorrowRefMut<'b>,
+ // `NonNull` is covariant over `T`, so we need to reintroduce invariance.
+ marker: PhantomData<&'b mut T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for RefMut<'_, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: the value is accessible as long as we hold our borrow.
+ unsafe { self.value.as_ref() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> DerefMut for RefMut<'_, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: the value is accessible as long as we hold our borrow.
+ unsafe { self.value.as_mut() }
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<RefMut<'b, U>> for RefMut<'b, T> {}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+/// The core primitive for interior mutability in Rust.
+///
+/// If you have a reference `&T`, then normally in Rust the compiler performs optimizations based on
+/// the knowledge that `&T` points to immutable data. Mutating that data, for example through an
+/// alias or by transmuting an `&T` into an `&mut T`, is considered undefined behavior.
+/// `UnsafeCell<T>` opts-out of the immutability guarantee for `&T`: a shared reference
+/// `&UnsafeCell<T>` may point to data that is being mutated. This is called "interior mutability".
+///
+/// All other types that allow internal mutability, such as `Cell<T>` and `RefCell<T>`, internally
+/// use `UnsafeCell` to wrap their data.
+///
+/// Note that only the immutability guarantee for shared references is affected by `UnsafeCell`. The
+/// uniqueness guarantee for mutable references is unaffected. There is *no* legal way to obtain
+/// aliasing `&mut`, not even with `UnsafeCell<T>`.
+///
+/// The `UnsafeCell` API itself is technically very simple: [`.get()`] gives you a raw pointer
+/// `*mut T` to its contents. It is up to _you_ as the abstraction designer to use that raw pointer
+/// correctly.
+///
+/// [`.get()`]: `UnsafeCell::get`
+///
+/// The precise Rust aliasing rules are somewhat in flux, but the main points are not contentious:
+///
+/// - If you create a safe reference with lifetime `'a` (either a `&T` or `&mut T` reference), then
+/// you must not access the data in any way that contradicts that reference for the remainder of
+/// `'a`. For example, this means that if you take the `*mut T` from an `UnsafeCell<T>` and cast it
+/// to an `&T`, then the data in `T` must remain immutable (modulo any `UnsafeCell` data found
+/// within `T`, of course) until that reference's lifetime expires. Similarly, if you create a `&mut
+/// T` reference that is released to safe code, then you must not access the data within the
+/// `UnsafeCell` until that reference expires.
+///
+/// - For both `&T` without `UnsafeCell<_>` and `&mut T`, you must also not deallocate the data
+/// until the reference expires. As a special exception, given an `&T`, any part of it that is
+/// inside an `UnsafeCell<_>` may be deallocated during the lifetime of the reference, after the
+/// last time the reference is used (dereferenced or reborrowed). Since you cannot deallocate a part
+/// of what a reference points to, this means the memory an `&T` points to can be deallocted only if
+/// *every part of it* (including padding) is inside an `UnsafeCell`.
+///
+/// However, whenever a `&UnsafeCell<T>` is constructed or dereferenced, it must still point to
+/// live memory and the compiler is allowed to insert spurious reads if it can prove that this
+/// memory has not yet been deallocated.
+///
+/// - At all times, you must avoid data races. If multiple threads have access to
+/// the same `UnsafeCell`, then any writes must have a proper happens-before relation to all other
+/// accesses (or use atomics).
+///
+/// To assist with proper design, the following scenarios are explicitly declared legal
+/// for single-threaded code:
+///
+/// 1. A `&T` reference can be released to safe code and there it can co-exist with other `&T`
+/// references, but not with a `&mut T`
+///
+/// 2. A `&mut T` reference may be released to safe code provided neither other `&mut T` nor `&T`
+/// co-exist with it. A `&mut T` must always be unique.
+///
+/// Note that whilst mutating the contents of an `&UnsafeCell<T>` (even while other
+/// `&UnsafeCell<T>` references alias the cell) is
+/// ok (provided you enforce the above invariants some other way), it is still undefined behavior
+/// to have multiple `&mut UnsafeCell<T>` aliases. That is, `UnsafeCell` is a wrapper
+/// designed to have a special interaction with _shared_ accesses (_i.e._, through an
+/// `&UnsafeCell<_>` reference); there is no magic whatsoever when dealing with _exclusive_
+/// accesses (_e.g._, through an `&mut UnsafeCell<_>`): neither the cell nor the wrapped value
+/// may be aliased for the duration of that `&mut` borrow.
+/// This is showcased by the [`.get_mut()`] accessor, which is a _safe_ getter that yields
+/// a `&mut T`.
+///
+/// [`.get_mut()`]: `UnsafeCell::get_mut`
+///
+/// # Examples
+///
+/// Here is an example showcasing how to soundly mutate the contents of an `UnsafeCell<_>` despite
+/// there being multiple references aliasing the cell:
+///
+/// ```
+/// use std::cell::UnsafeCell;
+///
+/// let x: UnsafeCell<i32> = 42.into();
+/// // Get multiple / concurrent / shared references to the same `x`.
+/// let (p1, p2): (&UnsafeCell<i32>, &UnsafeCell<i32>) = (&x, &x);
+///
+/// unsafe {
+/// // SAFETY: within this scope there are no other references to `x`'s contents,
+/// // so ours is effectively unique.
+/// let p1_exclusive: &mut i32 = &mut *p1.get(); // -- borrow --+
+/// *p1_exclusive += 27; // |
+/// } // <---------- cannot go beyond this point -------------------+
+///
+/// unsafe {
+/// // SAFETY: within this scope nobody expects to have exclusive access to `x`'s contents,
+/// // so we can have multiple shared accesses concurrently.
+/// let p2_shared: &i32 = &*p2.get();
+/// assert_eq!(*p2_shared, 42 + 27);
+/// let p1_shared: &i32 = &*p1.get();
+/// assert_eq!(*p1_shared, *p2_shared);
+/// }
+/// ```
+///
+/// The following example showcases the fact that exclusive access to an `UnsafeCell<T>`
+/// implies exclusive access to its `T`:
+///
+/// ```rust
+/// #![forbid(unsafe_code)] // with exclusive accesses,
+/// // `UnsafeCell` is a transparent no-op wrapper,
+/// // so no need for `unsafe` here.
+/// use std::cell::UnsafeCell;
+///
+/// let mut x: UnsafeCell<i32> = 42.into();
+///
+/// // Get a compile-time-checked unique reference to `x`.
+/// let p_unique: &mut UnsafeCell<i32> = &mut x;
+/// // With an exclusive reference, we can mutate the contents for free.
+/// *p_unique.get_mut() = 0;
+/// // Or, equivalently:
+/// x = UnsafeCell::new(0);
+///
+/// // When we own the value, we can extract the contents for free.
+/// let contents: i32 = x.into_inner();
+/// assert_eq!(contents, 0);
+/// ```
+#[lang = "unsafe_cell"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[repr(transparent)]
+pub struct UnsafeCell<T: ?Sized> {
+ value: T,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for UnsafeCell<T> {}
+
+impl<T> UnsafeCell<T> {
+ /// Constructs a new instance of `UnsafeCell` which will wrap the specified
+ /// value.
+ ///
+ /// All access to the inner value through methods is `unsafe`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::UnsafeCell;
+ ///
+ /// let uc = UnsafeCell::new(5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_unsafe_cell_new", since = "1.32.0")]
+ #[inline(always)]
+ pub const fn new(value: T) -> UnsafeCell<T> {
+ UnsafeCell { value }
+ }
+
+ /// Unwraps the value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::UnsafeCell;
+ ///
+ /// let uc = UnsafeCell::new(5);
+ ///
+ /// let five = uc.into_inner();
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> T {
+ self.value
+ }
+}
+
+impl<T: ?Sized> UnsafeCell<T> {
+ /// Gets a mutable pointer to the wrapped value.
+ ///
+ /// This can be cast to a pointer of any kind.
+ /// Ensure that the access is unique (no active references, mutable or not)
+ /// when casting to `&mut T`, and ensure that there are no mutations
+ /// or mutable aliases going on when casting to `&T`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::UnsafeCell;
+ ///
+ /// let uc = UnsafeCell::new(5);
+ ///
+ /// let five = uc.get();
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_unsafecell_get", since = "1.32.0")]
+ pub const fn get(&self) -> *mut T {
+ // We can just cast the pointer from `UnsafeCell<T>` to `T` because of
+ // #[repr(transparent)]. This exploits libstd's special status, there is
+ // no guarantee for user code that this will work in future versions of the compiler!
+ self as *const UnsafeCell<T> as *const T as *mut T
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows the `UnsafeCell` mutably (at compile-time) which
+ /// guarantees that we possess the only reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::UnsafeCell;
+ ///
+ /// let mut c = UnsafeCell::new(5);
+ /// *c.get_mut() += 1;
+ ///
+ /// assert_eq!(*c.get_mut(), 6);
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "unsafe_cell_get_mut", since = "1.50.0")]
+ #[rustc_const_unstable(feature = "const_unsafecell_get_mut", issue = "88836")]
+ pub const fn get_mut(&mut self) -> &mut T {
+ &mut self.value
+ }
+
+ /// Gets a mutable pointer to the wrapped value.
+ /// The difference from [`get`] is that this function accepts a raw pointer,
+ /// which is useful to avoid the creation of temporary references.
+ ///
+ /// The result can be cast to a pointer of any kind.
+ /// Ensure that the access is unique (no active references, mutable or not)
+ /// when casting to `&mut T`, and ensure that there are no mutations
+ /// or mutable aliases going on when casting to `&T`.
+ ///
+ /// [`get`]: UnsafeCell::get()
+ ///
+ /// # Examples
+ ///
+ /// Gradual initialization of an `UnsafeCell` requires `raw_get`, as
+ /// calling `get` would require creating a reference to uninitialized data:
+ ///
+ /// ```
+ /// use std::cell::UnsafeCell;
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let m = MaybeUninit::<UnsafeCell<i32>>::uninit();
+ /// unsafe { UnsafeCell::raw_get(m.as_ptr()).write(5); }
+ /// let uc = unsafe { m.assume_init() };
+ ///
+ /// assert_eq!(uc.into_inner(), 5);
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "unsafe_cell_raw_get", since = "1.56.0")]
+ #[rustc_const_stable(feature = "unsafe_cell_raw_get", since = "1.56.0")]
+ pub const fn raw_get(this: *const Self) -> *mut T {
+ // We can just cast the pointer from `UnsafeCell<T>` to `T` because of
+ // #[repr(transparent)]. This exploits libstd's special status, there is
+ // no guarantee for user code that this will work in future versions of the compiler!
+ this as *const T as *mut T
+ }
+}
+
+#[stable(feature = "unsafe_cell_default", since = "1.10.0")]
+impl<T: Default> Default for UnsafeCell<T> {
+ /// Creates an `UnsafeCell`, with the `Default` value for T.
+ fn default() -> UnsafeCell<T> {
+ UnsafeCell::new(Default::default())
+ }
+}
+
+#[stable(feature = "cell_from", since = "1.12.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<T> for UnsafeCell<T> {
+ /// Creates a new `UnsafeCell<T>` containing the given value.
+ fn from(t: T) -> UnsafeCell<T> {
+ UnsafeCell::new(t)
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<UnsafeCell<U>> for UnsafeCell<T> {}
+
+/// [`UnsafeCell`], but [`Sync`].
+///
+/// This is just an `UnsafeCell`, except it implements `Sync`
+/// if `T` implements `Sync`.
+///
+/// `UnsafeCell` doesn't implement `Sync`, to prevent accidental mis-use.
+/// You can use `SyncUnsafeCell` instead of `UnsafeCell` to allow it to be
+/// shared between threads, if that's intentional.
+/// Providing proper synchronization is still the task of the user,
+/// making this type just as unsafe to use.
+///
+/// See [`UnsafeCell`] for details.
+#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
+#[repr(transparent)]
+pub struct SyncUnsafeCell<T: ?Sized> {
+ value: UnsafeCell<T>,
+}
+
+#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
+unsafe impl<T: ?Sized + Sync> Sync for SyncUnsafeCell<T> {}
+
+#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
+impl<T> SyncUnsafeCell<T> {
+ /// Constructs a new instance of `SyncUnsafeCell` which will wrap the specified value.
+ #[inline]
+ pub const fn new(value: T) -> Self {
+ Self { value: UnsafeCell { value } }
+ }
+
+ /// Unwraps the value.
+ #[inline]
+ pub const fn into_inner(self) -> T {
+ self.value.into_inner()
+ }
+}
+
+#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
+impl<T: ?Sized> SyncUnsafeCell<T> {
+ /// Gets a mutable pointer to the wrapped value.
+ ///
+ /// This can be cast to a pointer of any kind.
+ /// Ensure that the access is unique (no active references, mutable or not)
+ /// when casting to `&mut T`, and ensure that there are no mutations
+ /// or mutable aliases going on when casting to `&T`
+ #[inline]
+ pub const fn get(&self) -> *mut T {
+ self.value.get()
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows the `SyncUnsafeCell` mutably (at compile-time) which
+ /// guarantees that we possess the only reference.
+ #[inline]
+ pub const fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Gets a mutable pointer to the wrapped value.
+ ///
+ /// See [`UnsafeCell::get`] for details.
+ #[inline]
+ pub const fn raw_get(this: *const Self) -> *mut T {
+ // We can just cast the pointer from `SyncUnsafeCell<T>` to `T` because
+ // of #[repr(transparent)] on both SyncUnsafeCell and UnsafeCell.
+ // See UnsafeCell::raw_get.
+ this as *const T as *mut T
+ }
+}
+
+#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
+impl<T: Default> Default for SyncUnsafeCell<T> {
+ /// Creates an `SyncUnsafeCell`, with the `Default` value for T.
+ fn default() -> SyncUnsafeCell<T> {
+ SyncUnsafeCell::new(Default::default())
+ }
+}
+
+#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<T> for SyncUnsafeCell<T> {
+ /// Creates a new `SyncUnsafeCell<T>` containing the given value.
+ fn from(t: T) -> SyncUnsafeCell<T> {
+ SyncUnsafeCell::new(t)
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+//#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<SyncUnsafeCell<U>> for SyncUnsafeCell<T> {}
+
+#[allow(unused)]
+fn assert_coerce_unsized(
+ a: UnsafeCell<&i32>,
+ b: SyncUnsafeCell<&i32>,
+ c: Cell<&i32>,
+ d: RefCell<&i32>,
+) {
+ let _: UnsafeCell<&dyn Send> = a;
+ let _: SyncUnsafeCell<&dyn Send> = b;
+ let _: Cell<&dyn Send> = c;
+ let _: RefCell<&dyn Send> = d;
+}
diff --git a/library/core/src/cell/lazy.rs b/library/core/src/cell/lazy.rs
new file mode 100644
index 000000000..7844be5f7
--- /dev/null
+++ b/library/core/src/cell/lazy.rs
@@ -0,0 +1,104 @@
+use crate::cell::{Cell, OnceCell};
+use crate::fmt;
+use crate::ops::Deref;
+
+/// A value which is initialized on the first access.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(once_cell)]
+///
+/// use std::cell::LazyCell;
+///
+/// let lazy: LazyCell<i32> = LazyCell::new(|| {
+/// println!("initializing");
+/// 92
+/// });
+/// println!("ready");
+/// println!("{}", *lazy);
+/// println!("{}", *lazy);
+///
+/// // Prints:
+/// // ready
+/// // initializing
+/// // 92
+/// // 92
+/// ```
+#[unstable(feature = "once_cell", issue = "74465")]
+pub struct LazyCell<T, F = fn() -> T> {
+ cell: OnceCell<T>,
+ init: Cell<Option<F>>,
+}
+
+impl<T, F> LazyCell<T, F> {
+ /// Creates a new lazy value with the given initializing function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::cell::LazyCell;
+ ///
+ /// let hello = "Hello, World!".to_string();
+ ///
+ /// let lazy = LazyCell::new(|| hello.to_uppercase());
+ ///
+ /// assert_eq!(&*lazy, "HELLO, WORLD!");
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub const fn new(init: F) -> LazyCell<T, F> {
+ LazyCell { cell: OnceCell::new(), init: Cell::new(Some(init)) }
+ }
+}
+
+impl<T, F: FnOnce() -> T> LazyCell<T, F> {
+ /// Forces the evaluation of this lazy value and returns a reference to
+ /// the result.
+ ///
+ /// This is equivalent to the `Deref` impl, but is explicit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::cell::LazyCell;
+ ///
+ /// let lazy = LazyCell::new(|| 92);
+ ///
+ /// assert_eq!(LazyCell::force(&lazy), &92);
+ /// assert_eq!(&*lazy, &92);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn force(this: &LazyCell<T, F>) -> &T {
+ this.cell.get_or_init(|| match this.init.take() {
+ Some(f) => f(),
+ None => panic!("`Lazy` instance has previously been poisoned"),
+ })
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T, F: FnOnce() -> T> Deref for LazyCell<T, F> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ LazyCell::force(self)
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Default> Default for LazyCell<T> {
+ /// Creates a new lazy value using `Default` as the initializing function.
+ fn default() -> LazyCell<T> {
+ LazyCell::new(T::default)
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: fmt::Debug, F> fmt::Debug for LazyCell<T, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
+ }
+}
diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs
new file mode 100644
index 000000000..3c39394dd
--- /dev/null
+++ b/library/core/src/cell/once.rs
@@ -0,0 +1,283 @@
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::mem;
+
+/// A cell which can be written to only once.
+///
+/// Unlike `RefCell`, a `OnceCell` only provides shared `&T` references to its value.
+/// Unlike `Cell`, a `OnceCell` doesn't require copying or replacing the value to access it.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(once_cell)]
+///
+/// use std::cell::OnceCell;
+///
+/// let cell = OnceCell::new();
+/// assert!(cell.get().is_none());
+///
+/// let value: &String = cell.get_or_init(|| {
+/// "Hello, World!".to_string()
+/// });
+/// assert_eq!(value, "Hello, World!");
+/// assert!(cell.get().is_some());
+/// ```
+#[unstable(feature = "once_cell", issue = "74465")]
+pub struct OnceCell<T> {
+ // Invariant: written to at most once.
+ inner: UnsafeCell<Option<T>>,
+}
+
+impl<T> OnceCell<T> {
+ /// Creates a new empty cell.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ #[must_use]
+ pub const fn new() -> OnceCell<T> {
+ OnceCell { inner: UnsafeCell::new(None) }
+ }
+
+ /// Gets the reference to the underlying value.
+ ///
+ /// Returns `None` if the cell is empty.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get(&self) -> Option<&T> {
+ // SAFETY: Safe due to `inner`'s invariant
+ unsafe { &*self.inner.get() }.as_ref()
+ }
+
+ /// Gets the mutable reference to the underlying value.
+ ///
+ /// Returns `None` if the cell is empty.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_mut(&mut self) -> Option<&mut T> {
+ self.inner.get_mut().as_mut()
+ }
+
+ /// Sets the contents of the cell to `value`.
+ ///
+ /// # Errors
+ ///
+ /// This method returns `Ok(())` if the cell was empty and `Err(value)` if
+ /// it was full.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::cell::OnceCell;
+ ///
+ /// let cell = OnceCell::new();
+ /// assert!(cell.get().is_none());
+ ///
+ /// assert_eq!(cell.set(92), Ok(()));
+ /// assert_eq!(cell.set(62), Err(62));
+ ///
+ /// assert!(cell.get().is_some());
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn set(&self, value: T) -> Result<(), T> {
+ // SAFETY: Safe because we cannot have overlapping mutable borrows
+ let slot = unsafe { &*self.inner.get() };
+ if slot.is_some() {
+ return Err(value);
+ }
+
+ // SAFETY: This is the only place where we set the slot, no races
+ // due to reentrancy/concurrency are possible, and we've
+ // checked that slot is currently `None`, so this write
+ // maintains the `inner`'s invariant.
+ let slot = unsafe { &mut *self.inner.get() };
+ *slot = Some(value);
+ Ok(())
+ }
+
+ /// Gets the contents of the cell, initializing it with `f`
+ /// if the cell was empty.
+ ///
+ /// # Panics
+ ///
+ /// If `f` panics, the panic is propagated to the caller, and the cell
+ /// remains uninitialized.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. Doing
+ /// so results in a panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::cell::OnceCell;
+ ///
+ /// let cell = OnceCell::new();
+ /// let value = cell.get_or_init(|| 92);
+ /// assert_eq!(value, &92);
+ /// let value = cell.get_or_init(|| unreachable!());
+ /// assert_eq!(value, &92);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_or_init<F>(&self, f: F) -> &T
+ where
+ F: FnOnce() -> T,
+ {
+ match self.get_or_try_init(|| Ok::<T, !>(f())) {
+ Ok(val) => val,
+ }
+ }
+
+ /// Gets the contents of the cell, initializing it with `f` if
+ /// the cell was empty. If the cell was empty and `f` failed, an
+ /// error is returned.
+ ///
+ /// # Panics
+ ///
+ /// If `f` panics, the panic is propagated to the caller, and the cell
+ /// remains uninitialized.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. Doing
+ /// so results in a panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::cell::OnceCell;
+ ///
+ /// let cell = OnceCell::new();
+ /// assert_eq!(cell.get_or_try_init(|| Err(())), Err(()));
+ /// assert!(cell.get().is_none());
+ /// let value = cell.get_or_try_init(|| -> Result<i32, ()> {
+ /// Ok(92)
+ /// });
+ /// assert_eq!(value, Ok(&92));
+ /// assert_eq!(cell.get(), Some(&92))
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ if let Some(val) = self.get() {
+ return Ok(val);
+ }
+ /// Avoid inlining the initialization closure into the common path that fetches
+ /// the already initialized value
+ #[cold]
+ fn outlined_call<F, T, E>(f: F) -> Result<T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ f()
+ }
+ let val = outlined_call(f)?;
+ // Note that *some* forms of reentrant initialization might lead to
+ // UB (see `reentrant_init` test). I believe that just removing this
+ // `assert`, while keeping `set/get` would be sound, but it seems
+ // better to panic, rather than to silently use an old value.
+ assert!(self.set(val).is_ok(), "reentrant init");
+ Ok(self.get().unwrap())
+ }
+
+ /// Consumes the cell, returning the wrapped value.
+ ///
+ /// Returns `None` if the cell was empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::cell::OnceCell;
+ ///
+ /// let cell: OnceCell<String> = OnceCell::new();
+ /// assert_eq!(cell.into_inner(), None);
+ ///
+ /// let cell = OnceCell::new();
+ /// cell.set("hello".to_string()).unwrap();
+ /// assert_eq!(cell.into_inner(), Some("hello".to_string()));
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn into_inner(self) -> Option<T> {
+ // Because `into_inner` takes `self` by value, the compiler statically verifies
+ // that it is not currently borrowed. So it is safe to move out `Option<T>`.
+ self.inner.into_inner()
+ }
+
+ /// Takes the value out of this `OnceCell`, moving it back to an uninitialized state.
+ ///
+ /// Has no effect and returns `None` if the `OnceCell` hasn't been initialized.
+ ///
+ /// Safety is guaranteed by requiring a mutable reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::cell::OnceCell;
+ ///
+ /// let mut cell: OnceCell<String> = OnceCell::new();
+ /// assert_eq!(cell.take(), None);
+ ///
+ /// let mut cell = OnceCell::new();
+ /// cell.set("hello".to_string()).unwrap();
+ /// assert_eq!(cell.take(), Some("hello".to_string()));
+ /// assert_eq!(cell.get(), None);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn take(&mut self) -> Option<T> {
+ mem::take(self).into_inner()
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T> Default for OnceCell<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.get() {
+ Some(v) => f.debug_tuple("OnceCell").field(v).finish(),
+ None => f.write_str("OnceCell(Uninit)"),
+ }
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Clone> Clone for OnceCell<T> {
+ fn clone(&self) -> OnceCell<T> {
+ let res = OnceCell::new();
+ if let Some(value) = self.get() {
+ match res.set(value.clone()) {
+ Ok(()) => (),
+ Err(_) => unreachable!(),
+ }
+ }
+ res
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: PartialEq> PartialEq for OnceCell<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.get() == other.get()
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Eq> Eq for OnceCell<T> {}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T> const From<T> for OnceCell<T> {
+ /// Creates a new `OnceCell<T>` which already contains the given `value`.
+ fn from(value: T) -> Self {
+ OnceCell { inner: UnsafeCell::new(Some(value)) }
+ }
+}
diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs
new file mode 100644
index 000000000..7c5f82f5e
--- /dev/null
+++ b/library/core/src/char/convert.rs
@@ -0,0 +1,258 @@
+//! Character conversions.
+
+use crate::char::TryFromCharError;
+use crate::convert::TryFrom;
+use crate::fmt;
+use crate::mem::transmute;
+use crate::str::FromStr;
+
+/// Converts a `u32` to a `char`. See [`char::from_u32`].
+#[must_use]
+#[inline]
+pub(super) const fn from_u32(i: u32) -> Option<char> {
+ // FIXME: once Result::ok is const fn, use it here
+ match char_try_from_u32(i) {
+ Ok(c) => Some(c),
+ Err(_) => None,
+ }
+}
+
+/// Converts a `u32` to a `char`, ignoring validity. See [`char::from_u32_unchecked`].
+#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[inline]
+#[must_use]
+pub(super) const unsafe fn from_u32_unchecked(i: u32) -> char {
+ // SAFETY: the caller must guarantee that `i` is a valid char value.
+ if cfg!(debug_assertions) { char::from_u32(i).unwrap() } else { unsafe { transmute(i) } }
+}
+
+#[stable(feature = "char_convert", since = "1.13.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl const From<char> for u32 {
+ /// Converts a [`char`] into a [`u32`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// let c = 'c';
+ /// let u = u32::from(c);
+ /// assert!(4 == mem::size_of_val(&u))
+ /// ```
+ #[inline]
+ fn from(c: char) -> Self {
+ c as u32
+ }
+}
+
+#[stable(feature = "more_char_conversions", since = "1.51.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl const From<char> for u64 {
+ /// Converts a [`char`] into a [`u64`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// let c = '👤';
+ /// let u = u64::from(c);
+ /// assert!(8 == mem::size_of_val(&u))
+ /// ```
+ #[inline]
+ fn from(c: char) -> Self {
+ // The char is casted to the value of the code point, then zero-extended to 64 bit.
+ // See [https://doc.rust-lang.org/reference/expressions/operator-expr.html#semantics]
+ c as u64
+ }
+}
+
+#[stable(feature = "more_char_conversions", since = "1.51.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl const From<char> for u128 {
+ /// Converts a [`char`] into a [`u128`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// let c = '⚙';
+ /// let u = u128::from(c);
+ /// assert!(16 == mem::size_of_val(&u))
+ /// ```
+ #[inline]
+ fn from(c: char) -> Self {
+ // The char is casted to the value of the code point, then zero-extended to 128 bit.
+ // See [https://doc.rust-lang.org/reference/expressions/operator-expr.html#semantics]
+ c as u128
+ }
+}
+
+/// Map `char` with code point in U+0000..=U+00FF to byte in 0x00..=0xFF with same value, failing
+/// if the code point is greater than U+00FF.
+///
+/// See [`impl From<u8> for char`](char#impl-From<u8>-for-char) for details on the encoding.
+#[stable(feature = "u8_from_char", since = "1.59.0")]
+impl TryFrom<char> for u8 {
+ type Error = TryFromCharError;
+
+ #[inline]
+ fn try_from(c: char) -> Result<u8, Self::Error> {
+ u8::try_from(u32::from(c)).map_err(|_| TryFromCharError(()))
+ }
+}
+
+/// Maps a byte in 0x00..=0xFF to a `char` whose code point has the same value, in U+0000..=U+00FF.
+///
+/// Unicode is designed such that this effectively decodes bytes
+/// with the character encoding that IANA calls ISO-8859-1.
+/// This encoding is compatible with ASCII.
+///
+/// Note that this is different from ISO/IEC 8859-1 a.k.a. ISO 8859-1 (with one less hyphen),
+/// which leaves some "blanks", byte values that are not assigned to any character.
+/// ISO-8859-1 (the IANA one) assigns them to the C0 and C1 control codes.
+///
+/// Note that this is *also* different from Windows-1252 a.k.a. code page 1252,
+/// which is a superset ISO/IEC 8859-1 that assigns some (not all!) blanks
+/// to punctuation and various Latin characters.
+///
+/// To confuse things further, [on the Web](https://encoding.spec.whatwg.org/)
+/// `ascii`, `iso-8859-1`, and `windows-1252` are all aliases
+/// for a superset of Windows-1252 that fills the remaining blanks with corresponding
+/// C0 and C1 control codes.
+#[stable(feature = "char_convert", since = "1.13.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl const From<u8> for char {
+ /// Converts a [`u8`] into a [`char`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// let u = 32 as u8;
+ /// let c = char::from(u);
+ /// assert!(4 == mem::size_of_val(&c))
+ /// ```
+ #[inline]
+ fn from(i: u8) -> Self {
+ i as char
+ }
+}
+
+/// An error which can be returned when parsing a char.
+///
+/// This `struct` is created when using the [`char::from_str`] method.
+#[stable(feature = "char_from_str", since = "1.20.0")]
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct ParseCharError {
+ kind: CharErrorKind,
+}
+
+impl ParseCharError {
+ #[unstable(
+ feature = "char_error_internals",
+ reason = "this method should not be available publicly",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ CharErrorKind::EmptyString => "cannot parse char from empty string",
+ CharErrorKind::TooManyChars => "too many characters in string",
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum CharErrorKind {
+ EmptyString,
+ TooManyChars,
+}
+
+#[stable(feature = "char_from_str", since = "1.20.0")]
+impl fmt::Display for ParseCharError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+#[stable(feature = "char_from_str", since = "1.20.0")]
+impl FromStr for char {
+ type Err = ParseCharError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ let mut chars = s.chars();
+ match (chars.next(), chars.next()) {
+ (None, _) => Err(ParseCharError { kind: CharErrorKind::EmptyString }),
+ (Some(c), None) => Ok(c),
+ _ => Err(ParseCharError { kind: CharErrorKind::TooManyChars }),
+ }
+ }
+}
+
+#[inline]
+const fn char_try_from_u32(i: u32) -> Result<char, CharTryFromError> {
+ // This is an optimized version of the check
+ // (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF),
+ // which can also be written as
+ // i >= 0x110000 || (i >= 0xD800 && i < 0xE000).
+ //
+ // The XOR with 0xD800 permutes the ranges such that 0xD800..0xE000 is
+ // mapped to 0x0000..0x0800, while keeping all the high bits outside 0xFFFF the same.
+ // In particular, numbers >= 0x110000 stay in this range.
+ //
+ // Subtracting 0x800 causes 0x0000..0x0800 to wrap, meaning that a single
+ // unsigned comparison against 0x110000 - 0x800 will detect both the wrapped
+ // surrogate range as well as the numbers originally larger than 0x110000.
+ //
+ if (i ^ 0xD800).wrapping_sub(0x800) >= 0x110000 - 0x800 {
+ Err(CharTryFromError(()))
+ } else {
+ // SAFETY: checked that it's a legal unicode value
+ Ok(unsafe { transmute(i) })
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl TryFrom<u32> for char {
+ type Error = CharTryFromError;
+
+ #[inline]
+ fn try_from(i: u32) -> Result<Self, Self::Error> {
+ char_try_from_u32(i)
+ }
+}
+
+/// The error type returned when a conversion from [`prim@u32`] to [`prim@char`] fails.
+///
+/// This `struct` is created by the [`char::try_from<u32>`](char#impl-TryFrom<u32>-for-char) method.
+/// See its documentation for more.
+#[stable(feature = "try_from", since = "1.34.0")]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct CharTryFromError(());
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl fmt::Display for CharTryFromError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "converted integer out of range for `char`".fmt(f)
+ }
+}
+
+/// Converts a digit in the given radix to a `char`. See [`char::from_digit`].
+#[inline]
+#[must_use]
+pub(super) const fn from_digit(num: u32, radix: u32) -> Option<char> {
+ if radix > 36 {
+ panic!("from_digit: radix is too high (maximum 36)");
+ }
+ if num < radix {
+ let num = num as u8;
+ if num < 10 { Some((b'0' + num) as char) } else { Some((b'a' + num - 10) as char) }
+ } else {
+ None
+ }
+}
diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs
new file mode 100644
index 000000000..71297acd1
--- /dev/null
+++ b/library/core/src/char/decode.rs
@@ -0,0 +1,123 @@
+//! UTF-8 and UTF-16 decoding iterators
+
+use crate::fmt;
+
+use super::from_u32_unchecked;
+
+/// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s.
+///
+/// This `struct` is created by the [`decode_utf16`] method on [`char`]. See its
+/// documentation for more.
+///
+/// [`decode_utf16`]: char::decode_utf16
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+#[derive(Clone, Debug)]
+pub struct DecodeUtf16<I>
+where
+ I: Iterator<Item = u16>,
+{
+ iter: I,
+ buf: Option<u16>,
+}
+
+/// An error that can be returned when decoding UTF-16 code points.
+///
+/// This `struct` is created when using the [`DecodeUtf16`] type.
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct DecodeUtf16Error {
+ code: u16,
+}
+
+/// Creates an iterator over the UTF-16 encoded code points in `iter`,
+/// returning unpaired surrogates as `Err`s. See [`char::decode_utf16`].
+#[inline]
+pub(super) fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::IntoIter> {
+ DecodeUtf16 { iter: iter.into_iter(), buf: None }
+}
+
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> {
+ type Item = Result<char, DecodeUtf16Error>;
+
+ fn next(&mut self) -> Option<Result<char, DecodeUtf16Error>> {
+ let u = match self.buf.take() {
+ Some(buf) => buf,
+ None => self.iter.next()?,
+ };
+
+ if !u.is_utf16_surrogate() {
+ // SAFETY: not a surrogate
+ Some(Ok(unsafe { from_u32_unchecked(u as u32) }))
+ } else if u >= 0xDC00 {
+ // a trailing surrogate
+ Some(Err(DecodeUtf16Error { code: u }))
+ } else {
+ let u2 = match self.iter.next() {
+ Some(u2) => u2,
+ // eof
+ None => return Some(Err(DecodeUtf16Error { code: u })),
+ };
+ if u2 < 0xDC00 || u2 > 0xDFFF {
+ // not a trailing surrogate so we're not a valid
+ // surrogate pair, so rewind to redecode u2 next time.
+ self.buf = Some(u2);
+ return Some(Err(DecodeUtf16Error { code: u }));
+ }
+
+ // all ok, so lets decode it.
+ let c = (((u - 0xD800) as u32) << 10 | (u2 - 0xDC00) as u32) + 0x1_0000;
+ // SAFETY: we checked that it's a legal unicode value
+ Some(Ok(unsafe { from_u32_unchecked(c) }))
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (low, high) = self.iter.size_hint();
+
+ let (low_buf, high_buf) = match self.buf {
+ // buf is empty, no additional elements from it.
+ None => (0, 0),
+ // `u` is a non surrogate, so it's always an additional character.
+ Some(u) if !u.is_utf16_surrogate() => (1, 1),
+ // `u` is a leading surrogate (it can never be a trailing surrogate and
+ // it's a surrogate due to the previous branch) and `self.iter` is empty.
+ //
+ // `u` can't be paired, since the `self.iter` is empty,
+ // so it will always become an additional element (error).
+ Some(_u) if high == Some(0) => (1, 1),
+ // `u` is a leading surrogate and `iter` may be non-empty.
+ //
+ // `u` can either pair with a trailing surrogate, in which case no additional elements
+ // are produced, or it can become an error, in which case it's an additional character (error).
+ Some(_u) => (0, 1),
+ };
+
+ // `self.iter` could contain entirely valid surrogates (2 elements per
+ // char), or entirely non-surrogates (1 element per char).
+ //
+ // On odd lower bound, at least one element must stay unpaired
+ // (with other elements from `self.iter`), so we round up.
+ let low = low.div_ceil(2) + low_buf;
+ let high = high.and_then(|h| h.checked_add(high_buf));
+
+ (low, high)
+ }
+}
+
+impl DecodeUtf16Error {
+ /// Returns the unpaired surrogate which caused this error.
+ #[must_use]
+ #[stable(feature = "decode_utf16", since = "1.9.0")]
+ pub fn unpaired_surrogate(&self) -> u16 {
+ self.code
+ }
+}
+
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+impl fmt::Display for DecodeUtf16Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "unpaired surrogate found: {:x}", self.code)
+ }
+}
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
new file mode 100644
index 000000000..eae567cad
--- /dev/null
+++ b/library/core/src/char/methods.rs
@@ -0,0 +1,1741 @@
+//! impl char {}
+
+use crate::slice;
+use crate::str::from_utf8_unchecked_mut;
+use crate::unicode::printable::is_printable;
+use crate::unicode::{self, conversions};
+
+use super::*;
+
+impl char {
+ /// The highest valid code point a `char` can have, `'\u{10FFFF}'`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn something_which_returns_char() -> char { 'a' }
+ /// let c: char = something_which_returns_char();
+ /// assert!(c <= char::MAX);
+ ///
+ /// let value_at_max = char::MAX as u32;
+ /// assert_eq!(char::from_u32(value_at_max), Some('\u{10FFFF}'));
+ /// assert_eq!(char::from_u32(value_at_max + 1), None);
+ /// ```
+ #[stable(feature = "assoc_char_consts", since = "1.52.0")]
+ pub const MAX: char = '\u{10ffff}';
+
+ /// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a
+ /// decoding error.
+ ///
+ /// It can occur, for example, when giving ill-formed UTF-8 bytes to
+ /// [`String::from_utf8_lossy`](../std/string/struct.String.html#method.from_utf8_lossy).
+ #[stable(feature = "assoc_char_consts", since = "1.52.0")]
+ pub const REPLACEMENT_CHARACTER: char = '\u{FFFD}';
+
+ /// The version of [Unicode](https://www.unicode.org/) that the Unicode parts of
+ /// `char` and `str` methods are based on.
+ ///
+ /// New versions of Unicode are released regularly and subsequently all methods
+ /// in the standard library depending on Unicode are updated. Therefore the
+ /// behavior of some `char` and `str` methods and the value of this constant
+ /// changes over time. This is *not* considered to be a breaking change.
+ ///
+ /// The version numbering scheme is explained in
+ /// [Unicode 11.0 or later, Section 3.1 Versions of the Unicode Standard](https://www.unicode.org/versions/Unicode11.0.0/ch03.pdf#page=4).
+ #[stable(feature = "assoc_char_consts", since = "1.52.0")]
+ pub const UNICODE_VERSION: (u8, u8, u8) = crate::unicode::UNICODE_VERSION;
+
+ /// Creates an iterator over the UTF-16 encoded code points in `iter`,
+ /// returning unpaired surrogates as `Err`s.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::char::decode_utf16;
+ ///
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = [
+ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834,
+ /// ];
+ ///
+ /// assert_eq!(
+ /// decode_utf16(v)
+ /// .map(|r| r.map_err(|e| e.unpaired_surrogate()))
+ /// .collect::<Vec<_>>(),
+ /// vec![
+ /// Ok('𝄞'),
+ /// Ok('m'), Ok('u'), Ok('s'),
+ /// Err(0xDD1E),
+ /// Ok('i'), Ok('c'),
+ /// Err(0xD834)
+ /// ]
+ /// );
+ /// ```
+ ///
+ /// A lossy decoder can be obtained by replacing `Err` results with the replacement character:
+ ///
+ /// ```
+ /// use std::char::{decode_utf16, REPLACEMENT_CHARACTER};
+ ///
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = [
+ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834,
+ /// ];
+ ///
+ /// assert_eq!(
+ /// decode_utf16(v)
+ /// .map(|r| r.unwrap_or(REPLACEMENT_CHARACTER))
+ /// .collect::<String>(),
+ /// "𝄞mus�ic�"
+ /// );
+ /// ```
+ #[stable(feature = "assoc_char_funcs", since = "1.52.0")]
+ #[inline]
+ pub fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::IntoIter> {
+ super::decode::decode_utf16(iter)
+ }
+
+ /// Converts a `u32` to a `char`.
+ ///
+ /// Note that all `char`s are valid [`u32`]s, and can be cast to one with
+ /// [`as`](../std/keyword.as.html):
+ ///
+ /// ```
+ /// let c = '💯';
+ /// let i = c as u32;
+ ///
+ /// assert_eq!(128175, i);
+ /// ```
+ ///
+ /// However, the reverse is not true: not all valid [`u32`]s are valid
+ /// `char`s. `from_u32()` will return `None` if the input is not a valid value
+ /// for a `char`.
+ ///
+ /// For an unsafe version of this function which ignores these checks, see
+ /// [`from_u32_unchecked`].
+ ///
+ /// [`from_u32_unchecked`]: #method.from_u32_unchecked
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = char::from_u32(0x2764);
+ ///
+ /// assert_eq!(Some('❤'), c);
+ /// ```
+ ///
+ /// Returning `None` when the input is not a valid `char`:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = char::from_u32(0x110000);
+ ///
+ /// assert_eq!(None, c);
+ /// ```
+ #[stable(feature = "assoc_char_funcs", since = "1.52.0")]
+ #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[must_use]
+ #[inline]
+ pub const fn from_u32(i: u32) -> Option<char> {
+ super::convert::from_u32(i)
+ }
+
+ /// Converts a `u32` to a `char`, ignoring validity.
+ ///
+ /// Note that all `char`s are valid [`u32`]s, and can be cast to one with
+ /// `as`:
+ ///
+ /// ```
+ /// let c = '💯';
+ /// let i = c as u32;
+ ///
+ /// assert_eq!(128175, i);
+ /// ```
+ ///
+ /// However, the reverse is not true: not all valid [`u32`]s are valid
+ /// `char`s. `from_u32_unchecked()` will ignore this, and blindly cast to
+ /// `char`, possibly creating an invalid one.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe, as it may construct invalid `char` values.
+ ///
+ /// For a safe version of this function, see the [`from_u32`] function.
+ ///
+ /// [`from_u32`]: #method.from_u32
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = unsafe { char::from_u32_unchecked(0x2764) };
+ ///
+ /// assert_eq!('❤', c);
+ /// ```
+ #[stable(feature = "assoc_char_funcs", since = "1.52.0")]
+ #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[must_use]
+ #[inline]
+ pub const unsafe fn from_u32_unchecked(i: u32) -> char {
+ // SAFETY: the safety contract must be upheld by the caller.
+ unsafe { super::convert::from_u32_unchecked(i) }
+ }
+
+ /// Converts a digit in the given radix to a `char`.
+ ///
+ /// A 'radix' here is sometimes also called a 'base'. A radix of two
+ /// indicates a binary number, a radix of ten, decimal, and a radix of
+ /// sixteen, hexadecimal, to give some common values. Arbitrary
+ /// radices are supported.
+ ///
+ /// `from_digit()` will return `None` if the input is not a digit in
+ /// the given radix.
+ ///
+ /// # Panics
+ ///
+ /// Panics if given a radix larger than 36.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = char::from_digit(4, 10);
+ ///
+ /// assert_eq!(Some('4'), c);
+ ///
+ /// // Decimal 11 is a single digit in base 16
+ /// let c = char::from_digit(11, 16);
+ ///
+ /// assert_eq!(Some('b'), c);
+ /// ```
+ ///
+ /// Returning `None` when the input is not a digit:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = char::from_digit(20, 10);
+ ///
+ /// assert_eq!(None, c);
+ /// ```
+ ///
+ /// Passing a large radix, causing a panic:
+ ///
+ /// ```should_panic
+ /// use std::char;
+ ///
+ /// // this panics
+ /// let _c = char::from_digit(1, 37);
+ /// ```
+ #[stable(feature = "assoc_char_funcs", since = "1.52.0")]
+ #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[must_use]
+ #[inline]
+ pub const fn from_digit(num: u32, radix: u32) -> Option<char> {
+ super::convert::from_digit(num, radix)
+ }
+
+ /// Checks if a `char` is a digit in the given radix.
+ ///
+ /// A 'radix' here is sometimes also called a 'base'. A radix of two
+ /// indicates a binary number, a radix of ten, decimal, and a radix of
+ /// sixteen, hexadecimal, to give some common values. Arbitrary
+ /// radices are supported.
+ ///
+ /// Compared to [`is_numeric()`], this function only recognizes the characters
+ /// `0-9`, `a-z` and `A-Z`.
+ ///
+ /// 'Digit' is defined to be only the following characters:
+ ///
+ /// * `0-9`
+ /// * `a-z`
+ /// * `A-Z`
+ ///
+ /// For a more comprehensive understanding of 'digit', see [`is_numeric()`].
+ ///
+ /// [`is_numeric()`]: #method.is_numeric
+ ///
+ /// # Panics
+ ///
+ /// Panics if given a radix larger than 36.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('1'.is_digit(10));
+ /// assert!('f'.is_digit(16));
+ /// assert!(!'f'.is_digit(10));
+ /// ```
+ ///
+ /// Passing a large radix, causing a panic:
+ ///
+ /// ```should_panic
+ /// // this panics
+ /// '1'.is_digit(37);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_digit(self, radix: u32) -> bool {
+ self.to_digit(radix).is_some()
+ }
+
+ /// Converts a `char` to a digit in the given radix.
+ ///
+ /// A 'radix' here is sometimes also called a 'base'. A radix of two
+ /// indicates a binary number, a radix of ten, decimal, and a radix of
+ /// sixteen, hexadecimal, to give some common values. Arbitrary
+ /// radices are supported.
+ ///
+ /// 'Digit' is defined to be only the following characters:
+ ///
+ /// * `0-9`
+ /// * `a-z`
+ /// * `A-Z`
+ ///
+ /// # Errors
+ ///
+ /// Returns `None` if the `char` does not refer to a digit in the given radix.
+ ///
+ /// # Panics
+ ///
+ /// Panics if given a radix larger than 36.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!('1'.to_digit(10), Some(1));
+ /// assert_eq!('f'.to_digit(16), Some(15));
+ /// ```
+ ///
+ /// Passing a non-digit results in failure:
+ ///
+ /// ```
+ /// assert_eq!('f'.to_digit(10), None);
+ /// assert_eq!('z'.to_digit(16), None);
+ /// ```
+ ///
+ /// Passing a large radix, causing a panic:
+ ///
+ /// ```should_panic
+ /// // this panics
+ /// let _ = '1'.to_digit(37);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_digit(self, radix: u32) -> Option<u32> {
+ // If not a digit, a number greater than radix will be created.
+ let mut digit = (self as u32).wrapping_sub('0' as u32);
+ if radix > 10 {
+ assert!(radix <= 36, "to_digit: radix is too high (maximum 36)");
+ if digit < 10 {
+ return Some(digit);
+ }
+ // Force the 6th bit to be set to ensure ascii is lower case.
+ digit = (self as u32 | 0b10_0000).wrapping_sub('a' as u32).saturating_add(10);
+ }
+ // FIXME: once then_some is const fn, use it here
+ if digit < radix { Some(digit) } else { None }
+ }
+
+ /// Returns an iterator that yields the hexadecimal Unicode escape of a
+ /// character as `char`s.
+ ///
+ /// This will escape characters with the Rust syntax of the form
+ /// `\u{NNNNNN}` where `NNNNNN` is a hexadecimal representation.
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in '❤'.escape_unicode() {
+ /// print!("{c}");
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", '❤'.escape_unicode());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\u{{2764}}");
+ /// ```
+ ///
+ /// Using [`to_string`](../std/string/trait.ToString.html#tymethod.to_string):
+ ///
+ /// ```
+ /// assert_eq!('❤'.escape_unicode().to_string(), "\\u{2764}");
+ /// ```
+ #[must_use = "this returns the escaped char as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn escape_unicode(self) -> EscapeUnicode {
+ let c = self as u32;
+
+ // or-ing 1 ensures that for c==0 the code computes that one
+ // digit should be printed and (which is the same) avoids the
+ // (31 - 32) underflow
+ let msb = 31 - (c | 1).leading_zeros();
+
+ // the index of the most significant hex digit
+ let ms_hex_digit = msb / 4;
+ EscapeUnicode {
+ c: self,
+ state: EscapeUnicodeState::Backslash,
+ hex_digit_idx: ms_hex_digit as usize,
+ }
+ }
+
+ /// An extended version of `escape_debug` that optionally permits escaping
+ /// Extended Grapheme codepoints, single quotes, and double quotes. This
+ /// allows us to format characters like nonspacing marks better when they're
+ /// at the start of a string, and allows escaping single quotes in
+ /// characters, and double quotes in strings.
+ #[inline]
+ pub(crate) fn escape_debug_ext(self, args: EscapeDebugExtArgs) -> EscapeDebug {
+ let init_state = match self {
+ '\0' => EscapeDefaultState::Backslash('0'),
+ '\t' => EscapeDefaultState::Backslash('t'),
+ '\r' => EscapeDefaultState::Backslash('r'),
+ '\n' => EscapeDefaultState::Backslash('n'),
+ '\\' => EscapeDefaultState::Backslash(self),
+ '"' if args.escape_double_quote => EscapeDefaultState::Backslash(self),
+ '\'' if args.escape_single_quote => EscapeDefaultState::Backslash(self),
+ _ if args.escape_grapheme_extended && self.is_grapheme_extended() => {
+ EscapeDefaultState::Unicode(self.escape_unicode())
+ }
+ _ if is_printable(self) => EscapeDefaultState::Char(self),
+ _ => EscapeDefaultState::Unicode(self.escape_unicode()),
+ };
+ EscapeDebug(EscapeDefault { state: init_state })
+ }
+
+ /// Returns an iterator that yields the literal escape code of a character
+ /// as `char`s.
+ ///
+ /// This will escape the characters similar to the [`Debug`](core::fmt::Debug) implementations
+ /// of `str` or `char`.
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in '\n'.escape_debug() {
+ /// print!("{c}");
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", '\n'.escape_debug());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\n");
+ /// ```
+ ///
+ /// Using [`to_string`](../std/string/trait.ToString.html#tymethod.to_string):
+ ///
+ /// ```
+ /// assert_eq!('\n'.escape_debug().to_string(), "\\n");
+ /// ```
+ #[must_use = "this returns the escaped char as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "char_escape_debug", since = "1.20.0")]
+ #[inline]
+ pub fn escape_debug(self) -> EscapeDebug {
+ self.escape_debug_ext(EscapeDebugExtArgs::ESCAPE_ALL)
+ }
+
+ /// Returns an iterator that yields the literal escape code of a character
+ /// as `char`s.
+ ///
+ /// The default is chosen with a bias toward producing literals that are
+ /// legal in a variety of languages, including C++11 and similar C-family
+ /// languages. The exact rules are:
+ ///
+ /// * Tab is escaped as `\t`.
+ /// * Carriage return is escaped as `\r`.
+ /// * Line feed is escaped as `\n`.
+ /// * Single quote is escaped as `\'`.
+ /// * Double quote is escaped as `\"`.
+ /// * Backslash is escaped as `\\`.
+ /// * Any character in the 'printable ASCII' range `0x20` .. `0x7e`
+ /// inclusive is not escaped.
+ /// * All other characters are given hexadecimal Unicode escapes; see
+ /// [`escape_unicode`].
+ ///
+ /// [`escape_unicode`]: #method.escape_unicode
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in '"'.escape_default() {
+ /// print!("{c}");
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", '"'.escape_default());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\\"");
+ /// ```
+ ///
+ /// Using [`to_string`](../std/string/trait.ToString.html#tymethod.to_string):
+ ///
+ /// ```
+ /// assert_eq!('"'.escape_default().to_string(), "\\\"");
+ /// ```
+ #[must_use = "this returns the escaped char as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn escape_default(self) -> EscapeDefault {
+ let init_state = match self {
+ '\t' => EscapeDefaultState::Backslash('t'),
+ '\r' => EscapeDefaultState::Backslash('r'),
+ '\n' => EscapeDefaultState::Backslash('n'),
+ '\\' | '\'' | '"' => EscapeDefaultState::Backslash(self),
+ '\x20'..='\x7e' => EscapeDefaultState::Char(self),
+ _ => EscapeDefaultState::Unicode(self.escape_unicode()),
+ };
+ EscapeDefault { state: init_state }
+ }
+
+ /// Returns the number of bytes this `char` would need if encoded in UTF-8.
+ ///
+ /// That number of bytes is always between 1 and 4, inclusive.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let len = 'A'.len_utf8();
+ /// assert_eq!(len, 1);
+ ///
+ /// let len = 'ß'.len_utf8();
+ /// assert_eq!(len, 2);
+ ///
+ /// let len = 'ℝ'.len_utf8();
+ /// assert_eq!(len, 3);
+ ///
+ /// let len = '💣'.len_utf8();
+ /// assert_eq!(len, 4);
+ /// ```
+ ///
+ /// The `&str` type guarantees that its contents are UTF-8, and so we can compare the length it
+ /// would take if each code point was represented as a `char` vs in the `&str` itself:
+ ///
+ /// ```
+ /// // as chars
+ /// let eastern = '東';
+ /// let capital = '京';
+ ///
+ /// // both can be represented as three bytes
+ /// assert_eq!(3, eastern.len_utf8());
+ /// assert_eq!(3, capital.len_utf8());
+ ///
+ /// // as a &str, these two are encoded in UTF-8
+ /// let tokyo = "東京";
+ ///
+ /// let len = eastern.len_utf8() + capital.len_utf8();
+ ///
+ /// // we can see that they take six bytes total...
+ /// assert_eq!(6, tokyo.len());
+ ///
+ /// // ... just like the &str
+ /// assert_eq!(len, tokyo.len());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_char_len_utf", since = "1.52.0")]
+ #[inline]
+ pub const fn len_utf8(self) -> usize {
+ len_utf8(self as u32)
+ }
+
+ /// Returns the number of 16-bit code units this `char` would need if
+ /// encoded in UTF-16.
+ ///
+ /// See the documentation for [`len_utf8()`] for more explanation of this
+ /// concept. This function is a mirror, but for UTF-16 instead of UTF-8.
+ ///
+ /// [`len_utf8()`]: #method.len_utf8
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 'ß'.len_utf16();
+ /// assert_eq!(n, 1);
+ ///
+ /// let len = '💣'.len_utf16();
+ /// assert_eq!(len, 2);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_char_len_utf", since = "1.52.0")]
+ #[inline]
+ pub const fn len_utf16(self) -> usize {
+ let ch = self as u32;
+ if (ch & 0xFFFF) == ch { 1 } else { 2 }
+ }
+
+ /// Encodes this character as UTF-8 into the provided byte buffer,
+ /// and then returns the subslice of the buffer that contains the encoded character.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the buffer is not large enough.
+ /// A buffer of length four is large enough to encode any `char`.
+ ///
+ /// # Examples
+ ///
+ /// In both of these examples, 'ß' takes two bytes to encode.
+ ///
+ /// ```
+ /// let mut b = [0; 2];
+ ///
+ /// let result = 'ß'.encode_utf8(&mut b);
+ ///
+ /// assert_eq!(result, "ß");
+ ///
+ /// assert_eq!(result.len(), 2);
+ /// ```
+ ///
+ /// A buffer that's too small:
+ ///
+ /// ```should_panic
+ /// let mut b = [0; 1];
+ ///
+ /// // this panics
+ /// 'ß'.encode_utf8(&mut b);
+ /// ```
+ #[stable(feature = "unicode_encode_char", since = "1.15.0")]
+ #[inline]
+ pub fn encode_utf8(self, dst: &mut [u8]) -> &mut str {
+ // SAFETY: `char` is not a surrogate, so this is valid UTF-8.
+ unsafe { from_utf8_unchecked_mut(encode_utf8_raw(self as u32, dst)) }
+ }
+
+ /// Encodes this character as UTF-16 into the provided `u16` buffer,
+ /// and then returns the subslice of the buffer that contains the encoded character.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the buffer is not large enough.
+ /// A buffer of length 2 is large enough to encode any `char`.
+ ///
+ /// # Examples
+ ///
+ /// In both of these examples, '𝕊' takes two `u16`s to encode.
+ ///
+ /// ```
+ /// let mut b = [0; 2];
+ ///
+ /// let result = '𝕊'.encode_utf16(&mut b);
+ ///
+ /// assert_eq!(result.len(), 2);
+ /// ```
+ ///
+ /// A buffer that's too small:
+ ///
+ /// ```should_panic
+ /// let mut b = [0; 1];
+ ///
+ /// // this panics
+ /// '𝕊'.encode_utf16(&mut b);
+ /// ```
+ #[stable(feature = "unicode_encode_char", since = "1.15.0")]
+ #[inline]
+ pub fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16] {
+ encode_utf16_raw(self as u32, dst)
+ }
+
+ /// Returns `true` if this `char` has the `Alphabetic` property.
+ ///
+ /// `Alphabetic` is described in Chapter 4 (Character Properties) of the [Unicode Standard] and
+ /// specified in the [Unicode Character Database][ucd] [`DerivedCoreProperties.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`DerivedCoreProperties.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('a'.is_alphabetic());
+ /// assert!('京'.is_alphabetic());
+ ///
+ /// let c = '💝';
+ /// // love is many things, but it is not alphabetic
+ /// assert!(!c.is_alphabetic());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_alphabetic(self) -> bool {
+ match self {
+ 'a'..='z' | 'A'..='Z' => true,
+ c => c > '\x7f' && unicode::Alphabetic(c),
+ }
+ }
+
+ /// Returns `true` if this `char` has the `Lowercase` property.
+ ///
+ /// `Lowercase` is described in Chapter 4 (Character Properties) of the [Unicode Standard] and
+ /// specified in the [Unicode Character Database][ucd] [`DerivedCoreProperties.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`DerivedCoreProperties.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('a'.is_lowercase());
+ /// assert!('δ'.is_lowercase());
+ /// assert!(!'A'.is_lowercase());
+ /// assert!(!'Δ'.is_lowercase());
+ ///
+ /// // The various Chinese scripts and punctuation do not have case, and so:
+ /// assert!(!'中'.is_lowercase());
+ /// assert!(!' '.is_lowercase());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_lowercase(self) -> bool {
+ match self {
+ 'a'..='z' => true,
+ c => c > '\x7f' && unicode::Lowercase(c),
+ }
+ }
+
+ /// Returns `true` if this `char` has the `Uppercase` property.
+ ///
+ /// `Uppercase` is described in Chapter 4 (Character Properties) of the [Unicode Standard] and
+ /// specified in the [Unicode Character Database][ucd] [`DerivedCoreProperties.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`DerivedCoreProperties.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!(!'a'.is_uppercase());
+ /// assert!(!'δ'.is_uppercase());
+ /// assert!('A'.is_uppercase());
+ /// assert!('Δ'.is_uppercase());
+ ///
+ /// // The various Chinese scripts and punctuation do not have case, and so:
+ /// assert!(!'中'.is_uppercase());
+ /// assert!(!' '.is_uppercase());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_uppercase(self) -> bool {
+ match self {
+ 'A'..='Z' => true,
+ c => c > '\x7f' && unicode::Uppercase(c),
+ }
+ }
+
+ /// Returns `true` if this `char` has the `White_Space` property.
+ ///
+ /// `White_Space` is specified in the [Unicode Character Database][ucd] [`PropList.txt`].
+ ///
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`PropList.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/PropList.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!(' '.is_whitespace());
+ ///
+ /// // line break
+ /// assert!('\n'.is_whitespace());
+ ///
+ /// // a non-breaking space
+ /// assert!('\u{A0}'.is_whitespace());
+ ///
+ /// assert!(!'越'.is_whitespace());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_whitespace(self) -> bool {
+ match self {
+ ' ' | '\x09'..='\x0d' => true,
+ c => c > '\x7f' && unicode::White_Space(c),
+ }
+ }
+
+ /// Returns `true` if this `char` satisfies either [`is_alphabetic()`] or [`is_numeric()`].
+ ///
+ /// [`is_alphabetic()`]: #method.is_alphabetic
+ /// [`is_numeric()`]: #method.is_numeric
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('٣'.is_alphanumeric());
+ /// assert!('7'.is_alphanumeric());
+ /// assert!('৬'.is_alphanumeric());
+ /// assert!('¾'.is_alphanumeric());
+ /// assert!('①'.is_alphanumeric());
+ /// assert!('K'.is_alphanumeric());
+ /// assert!('و'.is_alphanumeric());
+ /// assert!('藏'.is_alphanumeric());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_alphanumeric(self) -> bool {
+ self.is_alphabetic() || self.is_numeric()
+ }
+
+ /// Returns `true` if this `char` has the general category for control codes.
+ ///
+ /// Control codes (code points with the general category of `Cc`) are described in Chapter 4
+ /// (Character Properties) of the [Unicode Standard] and specified in the [Unicode Character
+ /// Database][ucd] [`UnicodeData.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // U+009C, STRING TERMINATOR
+ /// assert!('œ'.is_control());
+ /// assert!(!'q'.is_control());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_control(self) -> bool {
+ unicode::Cc(self)
+ }
+
+ /// Returns `true` if this `char` has the `Grapheme_Extend` property.
+ ///
+ /// `Grapheme_Extend` is described in [Unicode Standard Annex #29 (Unicode Text
+ /// Segmentation)][uax29] and specified in the [Unicode Character Database][ucd]
+ /// [`DerivedCoreProperties.txt`].
+ ///
+ /// [uax29]: https://www.unicode.org/reports/tr29/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`DerivedCoreProperties.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+ #[must_use]
+ #[inline]
+ pub(crate) fn is_grapheme_extended(self) -> bool {
+ unicode::Grapheme_Extend(self)
+ }
+
+ /// Returns `true` if this `char` has one of the general categories for numbers.
+ ///
+ /// The general categories for numbers (`Nd` for decimal digits, `Nl` for letter-like numeric
+ /// characters, and `No` for other numeric characters) are specified in the [Unicode Character
+ /// Database][ucd] [`UnicodeData.txt`]. Note that this means ideographic numbers like '三'
+ /// are considered alphabetic, not numeric. Please consider to use `is_ascii_digit` or `is_digit`.
+ ///
+ /// This method doesn't cover everything that could be considered a number, e.g. ideographic numbers like '三'.
+ /// If you want everything including characters with overlapping purposes then you might want to use
+ /// a unicode or language-processing library that exposes the appropriate character properties instead
+ /// of looking at the unicode categories.
+ ///
+ /// If you want to parse ASCII decimal digits (0-9) or ASCII base-N, use
+ /// `is_ascii_digit` or `is_digit` instead.
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('٣'.is_numeric());
+ /// assert!('7'.is_numeric());
+ /// assert!('৬'.is_numeric());
+ /// assert!('¾'.is_numeric());
+ /// assert!('①'.is_numeric());
+ /// assert!(!'K'.is_numeric());
+ /// assert!(!'و'.is_numeric());
+ /// assert!(!'藏'.is_numeric());
+ /// assert!(!'三'.is_numeric());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_numeric(self) -> bool {
+ match self {
+ '0'..='9' => true,
+ c => c > '\x7f' && unicode::N(c),
+ }
+ }
+
+ /// Returns an iterator that yields the lowercase mapping of this `char` as one or more
+ /// `char`s.
+ ///
+ /// If this `char` does not have a lowercase mapping, the iterator yields the same `char`.
+ ///
+ /// If this `char` has a one-to-one lowercase mapping given by the [Unicode Character
+ /// Database][ucd] [`UnicodeData.txt`], the iterator yields that `char`.
+ ///
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+ ///
+ /// If this `char` requires special considerations (e.g. multiple `char`s) the iterator yields
+ /// the `char`(s) given by [`SpecialCasing.txt`].
+ ///
+ /// [`SpecialCasing.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/SpecialCasing.txt
+ ///
+ /// This operation performs an unconditional mapping without tailoring. That is, the conversion
+ /// is independent of context and language.
+ ///
+ /// In the [Unicode Standard], Chapter 4 (Character Properties) discusses case mapping in
+ /// general and Chapter 3 (Conformance) discusses the default algorithm for case conversion.
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in 'İ'.to_lowercase() {
+ /// print!("{c}");
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", 'İ'.to_lowercase());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("i\u{307}");
+ /// ```
+ ///
+ /// Using [`to_string`](../std/string/trait.ToString.html#tymethod.to_string):
+ ///
+ /// ```
+ /// assert_eq!('C'.to_lowercase().to_string(), "c");
+ ///
+ /// // Sometimes the result is more than one character:
+ /// assert_eq!('İ'.to_lowercase().to_string(), "i\u{307}");
+ ///
+ /// // Characters that do not have both uppercase and lowercase
+ /// // convert into themselves.
+ /// assert_eq!('山'.to_lowercase().to_string(), "山");
+ /// ```
+ #[must_use = "this returns the lowercase character as a new iterator, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_lowercase(self) -> ToLowercase {
+ ToLowercase(CaseMappingIter::new(conversions::to_lower(self)))
+ }
+
+ /// Returns an iterator that yields the uppercase mapping of this `char` as one or more
+ /// `char`s.
+ ///
+ /// If this `char` does not have an uppercase mapping, the iterator yields the same `char`.
+ ///
+ /// If this `char` has a one-to-one uppercase mapping given by the [Unicode Character
+ /// Database][ucd] [`UnicodeData.txt`], the iterator yields that `char`.
+ ///
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+ ///
+ /// If this `char` requires special considerations (e.g. multiple `char`s) the iterator yields
+ /// the `char`(s) given by [`SpecialCasing.txt`].
+ ///
+ /// [`SpecialCasing.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/SpecialCasing.txt
+ ///
+ /// This operation performs an unconditional mapping without tailoring. That is, the conversion
+ /// is independent of context and language.
+ ///
+ /// In the [Unicode Standard], Chapter 4 (Character Properties) discusses case mapping in
+ /// general and Chapter 3 (Conformance) discusses the default algorithm for case conversion.
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in 'ß'.to_uppercase() {
+ /// print!("{c}");
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", 'ß'.to_uppercase());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("SS");
+ /// ```
+ ///
+ /// Using [`to_string`](../std/string/trait.ToString.html#tymethod.to_string):
+ ///
+ /// ```
+ /// assert_eq!('c'.to_uppercase().to_string(), "C");
+ ///
+ /// // Sometimes the result is more than one character:
+ /// assert_eq!('ß'.to_uppercase().to_string(), "SS");
+ ///
+ /// // Characters that do not have both uppercase and lowercase
+ /// // convert into themselves.
+ /// assert_eq!('山'.to_uppercase().to_string(), "山");
+ /// ```
+ ///
+ /// # Note on locale
+ ///
+ /// In Turkish, the equivalent of 'i' in Latin has five forms instead of two:
+ ///
+ /// * 'Dotless': I / ı, sometimes written ï
+ /// * 'Dotted': İ / i
+ ///
+ /// Note that the lowercase dotted 'i' is the same as the Latin. Therefore:
+ ///
+ /// ```
+ /// let upper_i = 'i'.to_uppercase().to_string();
+ /// ```
+ ///
+ /// The value of `upper_i` here relies on the language of the text: if we're
+ /// in `en-US`, it should be `"I"`, but if we're in `tr_TR`, it should
+ /// be `"İ"`. `to_uppercase()` does not take this into account, and so:
+ ///
+ /// ```
+ /// let upper_i = 'i'.to_uppercase().to_string();
+ ///
+ /// assert_eq!(upper_i, "I");
+ /// ```
+ ///
+ /// holds across languages.
+ #[must_use = "this returns the uppercase character as a new iterator, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_uppercase(self) -> ToUppercase {
+ ToUppercase(CaseMappingIter::new(conversions::to_upper(self)))
+ }
+
+ /// Checks if the value is within the ASCII range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = 'a';
+ /// let non_ascii = '❤';
+ ///
+ /// assert!(ascii.is_ascii());
+ /// assert!(!non_ascii.is_ascii());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_char_is_ascii", since = "1.32.0")]
+ #[inline]
+ pub const fn is_ascii(&self) -> bool {
+ *self as u32 <= 0x7F
+ }
+
+ /// Makes a copy of the value in its ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase()`].
+ ///
+ /// To uppercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_uppercase()`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = 'a';
+ /// let non_ascii = '❤';
+ ///
+ /// assert_eq!('A', ascii.to_ascii_uppercase());
+ /// assert_eq!('❤', non_ascii.to_ascii_uppercase());
+ /// ```
+ ///
+ /// [`make_ascii_uppercase()`]: #method.make_ascii_uppercase
+ /// [`to_uppercase()`]: #method.to_uppercase
+ #[must_use = "to uppercase the value in-place, use `make_ascii_uppercase()`"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
+ #[inline]
+ pub const fn to_ascii_uppercase(&self) -> char {
+ if self.is_ascii_lowercase() {
+ (*self as u8).ascii_change_case_unchecked() as char
+ } else {
+ *self
+ }
+ }
+
+ /// Makes a copy of the value in its ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase()`].
+ ///
+ /// To lowercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_lowercase()`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = 'A';
+ /// let non_ascii = '❤';
+ ///
+ /// assert_eq!('a', ascii.to_ascii_lowercase());
+ /// assert_eq!('❤', non_ascii.to_ascii_lowercase());
+ /// ```
+ ///
+ /// [`make_ascii_lowercase()`]: #method.make_ascii_lowercase
+ /// [`to_lowercase()`]: #method.to_lowercase
+ #[must_use = "to lowercase the value in-place, use `make_ascii_lowercase()`"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
+ #[inline]
+ pub const fn to_ascii_lowercase(&self) -> char {
+ if self.is_ascii_uppercase() {
+ (*self as u8).ascii_change_case_unchecked() as char
+ } else {
+ *self
+ }
+ }
+
+ /// Checks that two values are an ASCII case-insensitive match.
+ ///
+ /// Equivalent to <code>[to_ascii_lowercase]\(a) == [to_ascii_lowercase]\(b)</code>.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let upper_a = 'A';
+ /// let lower_a = 'a';
+ /// let lower_z = 'z';
+ ///
+ /// assert!(upper_a.eq_ignore_ascii_case(&lower_a));
+ /// assert!(upper_a.eq_ignore_ascii_case(&upper_a));
+ /// assert!(!upper_a.eq_ignore_ascii_case(&lower_z));
+ /// ```
+ ///
+ /// [to_ascii_lowercase]: #method.to_ascii_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
+ #[inline]
+ pub const fn eq_ignore_ascii_case(&self, other: &char) -> bool {
+ self.to_ascii_lowercase() == other.to_ascii_lowercase()
+ }
+
+ /// Converts this type to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase()`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut ascii = 'a';
+ ///
+ /// ascii.make_ascii_uppercase();
+ ///
+ /// assert_eq!('A', ascii);
+ /// ```
+ ///
+ /// [`to_ascii_uppercase()`]: #method.to_ascii_uppercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ *self = self.to_ascii_uppercase();
+ }
+
+ /// Converts this type to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase()`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut ascii = 'A';
+ ///
+ /// ascii.make_ascii_lowercase();
+ ///
+ /// assert_eq!('a', ascii);
+ /// ```
+ ///
+ /// [`to_ascii_lowercase()`]: #method.to_ascii_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ *self = self.to_ascii_lowercase();
+ }
+
+ /// Checks if the value is an ASCII alphabetic character:
+ ///
+ /// - U+0041 'A' ..= U+005A 'Z', or
+ /// - U+0061 'a' ..= U+007A 'z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_alphabetic());
+ /// assert!(uppercase_g.is_ascii_alphabetic());
+ /// assert!(a.is_ascii_alphabetic());
+ /// assert!(g.is_ascii_alphabetic());
+ /// assert!(!zero.is_ascii_alphabetic());
+ /// assert!(!percent.is_ascii_alphabetic());
+ /// assert!(!space.is_ascii_alphabetic());
+ /// assert!(!lf.is_ascii_alphabetic());
+ /// assert!(!esc.is_ascii_alphabetic());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_alphabetic(&self) -> bool {
+ matches!(*self, 'A'..='Z' | 'a'..='z')
+ }
+
+ /// Checks if the value is an ASCII uppercase character:
+ /// U+0041 'A' ..= U+005A 'Z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_uppercase());
+ /// assert!(uppercase_g.is_ascii_uppercase());
+ /// assert!(!a.is_ascii_uppercase());
+ /// assert!(!g.is_ascii_uppercase());
+ /// assert!(!zero.is_ascii_uppercase());
+ /// assert!(!percent.is_ascii_uppercase());
+ /// assert!(!space.is_ascii_uppercase());
+ /// assert!(!lf.is_ascii_uppercase());
+ /// assert!(!esc.is_ascii_uppercase());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_uppercase(&self) -> bool {
+ matches!(*self, 'A'..='Z')
+ }
+
+ /// Checks if the value is an ASCII lowercase character:
+ /// U+0061 'a' ..= U+007A 'z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_lowercase());
+ /// assert!(!uppercase_g.is_ascii_lowercase());
+ /// assert!(a.is_ascii_lowercase());
+ /// assert!(g.is_ascii_lowercase());
+ /// assert!(!zero.is_ascii_lowercase());
+ /// assert!(!percent.is_ascii_lowercase());
+ /// assert!(!space.is_ascii_lowercase());
+ /// assert!(!lf.is_ascii_lowercase());
+ /// assert!(!esc.is_ascii_lowercase());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_lowercase(&self) -> bool {
+ matches!(*self, 'a'..='z')
+ }
+
+ /// Checks if the value is an ASCII alphanumeric character:
+ ///
+ /// - U+0041 'A' ..= U+005A 'Z', or
+ /// - U+0061 'a' ..= U+007A 'z', or
+ /// - U+0030 '0' ..= U+0039 '9'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_alphanumeric());
+ /// assert!(uppercase_g.is_ascii_alphanumeric());
+ /// assert!(a.is_ascii_alphanumeric());
+ /// assert!(g.is_ascii_alphanumeric());
+ /// assert!(zero.is_ascii_alphanumeric());
+ /// assert!(!percent.is_ascii_alphanumeric());
+ /// assert!(!space.is_ascii_alphanumeric());
+ /// assert!(!lf.is_ascii_alphanumeric());
+ /// assert!(!esc.is_ascii_alphanumeric());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_alphanumeric(&self) -> bool {
+ matches!(*self, '0'..='9' | 'A'..='Z' | 'a'..='z')
+ }
+
+ /// Checks if the value is an ASCII decimal digit:
+ /// U+0030 '0' ..= U+0039 '9'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_digit());
+ /// assert!(!uppercase_g.is_ascii_digit());
+ /// assert!(!a.is_ascii_digit());
+ /// assert!(!g.is_ascii_digit());
+ /// assert!(zero.is_ascii_digit());
+ /// assert!(!percent.is_ascii_digit());
+ /// assert!(!space.is_ascii_digit());
+ /// assert!(!lf.is_ascii_digit());
+ /// assert!(!esc.is_ascii_digit());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_digit(&self) -> bool {
+ matches!(*self, '0'..='9')
+ }
+
+ /// Checks if the value is an ASCII hexadecimal digit:
+ ///
+ /// - U+0030 '0' ..= U+0039 '9', or
+ /// - U+0041 'A' ..= U+0046 'F', or
+ /// - U+0061 'a' ..= U+0066 'f'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_hexdigit());
+ /// assert!(!uppercase_g.is_ascii_hexdigit());
+ /// assert!(a.is_ascii_hexdigit());
+ /// assert!(!g.is_ascii_hexdigit());
+ /// assert!(zero.is_ascii_hexdigit());
+ /// assert!(!percent.is_ascii_hexdigit());
+ /// assert!(!space.is_ascii_hexdigit());
+ /// assert!(!lf.is_ascii_hexdigit());
+ /// assert!(!esc.is_ascii_hexdigit());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_hexdigit(&self) -> bool {
+ matches!(*self, '0'..='9' | 'A'..='F' | 'a'..='f')
+ }
+
+ /// Checks if the value is an ASCII punctuation character:
+ ///
+ /// - U+0021 ..= U+002F `! " # $ % & ' ( ) * + , - . /`, or
+ /// - U+003A ..= U+0040 `: ; < = > ? @`, or
+ /// - U+005B ..= U+0060 ``[ \ ] ^ _ ` ``, or
+ /// - U+007B ..= U+007E `{ | } ~`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_punctuation());
+ /// assert!(!uppercase_g.is_ascii_punctuation());
+ /// assert!(!a.is_ascii_punctuation());
+ /// assert!(!g.is_ascii_punctuation());
+ /// assert!(!zero.is_ascii_punctuation());
+ /// assert!(percent.is_ascii_punctuation());
+ /// assert!(!space.is_ascii_punctuation());
+ /// assert!(!lf.is_ascii_punctuation());
+ /// assert!(!esc.is_ascii_punctuation());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_punctuation(&self) -> bool {
+ matches!(*self, '!'..='/' | ':'..='@' | '['..='`' | '{'..='~')
+ }
+
+ /// Checks if the value is an ASCII graphic character:
+ /// U+0021 '!' ..= U+007E '~'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_graphic());
+ /// assert!(uppercase_g.is_ascii_graphic());
+ /// assert!(a.is_ascii_graphic());
+ /// assert!(g.is_ascii_graphic());
+ /// assert!(zero.is_ascii_graphic());
+ /// assert!(percent.is_ascii_graphic());
+ /// assert!(!space.is_ascii_graphic());
+ /// assert!(!lf.is_ascii_graphic());
+ /// assert!(!esc.is_ascii_graphic());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_graphic(&self) -> bool {
+ matches!(*self, '!'..='~')
+ }
+
+ /// Checks if the value is an ASCII whitespace character:
+ /// U+0020 SPACE, U+0009 HORIZONTAL TAB, U+000A LINE FEED,
+ /// U+000C FORM FEED, or U+000D CARRIAGE RETURN.
+ ///
+ /// Rust uses the WhatWG Infra Standard's [definition of ASCII
+ /// whitespace][infra-aw]. There are several other definitions in
+ /// wide use. For instance, [the POSIX locale][pct] includes
+ /// U+000B VERTICAL TAB as well as all the above characters,
+ /// but—from the very same specification—[the default rule for
+ /// "field splitting" in the Bourne shell][bfs] considers *only*
+ /// SPACE, HORIZONTAL TAB, and LINE FEED as whitespace.
+ ///
+ /// If you are writing a program that will process an existing
+ /// file format, check what that format's definition of whitespace is
+ /// before using this function.
+ ///
+ /// [infra-aw]: https://infra.spec.whatwg.org/#ascii-whitespace
+ /// [pct]: https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap07.html#tag_07_03_01
+ /// [bfs]: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_05
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_whitespace());
+ /// assert!(!uppercase_g.is_ascii_whitespace());
+ /// assert!(!a.is_ascii_whitespace());
+ /// assert!(!g.is_ascii_whitespace());
+ /// assert!(!zero.is_ascii_whitespace());
+ /// assert!(!percent.is_ascii_whitespace());
+ /// assert!(space.is_ascii_whitespace());
+ /// assert!(lf.is_ascii_whitespace());
+ /// assert!(!esc.is_ascii_whitespace());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_whitespace(&self) -> bool {
+ matches!(*self, '\t' | '\n' | '\x0C' | '\r' | ' ')
+ }
+
+ /// Checks if the value is an ASCII control character:
+ /// U+0000 NUL ..= U+001F UNIT SEPARATOR, or U+007F DELETE.
+ /// Note that most ASCII whitespace characters are control
+ /// characters, but SPACE is not.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc = '\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_control());
+ /// assert!(!uppercase_g.is_ascii_control());
+ /// assert!(!a.is_ascii_control());
+ /// assert!(!g.is_ascii_control());
+ /// assert!(!zero.is_ascii_control());
+ /// assert!(!percent.is_ascii_control());
+ /// assert!(!space.is_ascii_control());
+ /// assert!(lf.is_ascii_control());
+ /// assert!(esc.is_ascii_control());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_control(&self) -> bool {
+ matches!(*self, '\0'..='\x1F' | '\x7F')
+ }
+}
+
+pub(crate) struct EscapeDebugExtArgs {
+ /// Escape Extended Grapheme codepoints?
+ pub(crate) escape_grapheme_extended: bool,
+
+ /// Escape single quotes?
+ pub(crate) escape_single_quote: bool,
+
+ /// Escape double quotes?
+ pub(crate) escape_double_quote: bool,
+}
+
+impl EscapeDebugExtArgs {
+ pub(crate) const ESCAPE_ALL: Self = Self {
+ escape_grapheme_extended: true,
+ escape_single_quote: true,
+ escape_double_quote: true,
+ };
+}
+
+#[inline]
+const fn len_utf8(code: u32) -> usize {
+ if code < MAX_ONE_B {
+ 1
+ } else if code < MAX_TWO_B {
+ 2
+ } else if code < MAX_THREE_B {
+ 3
+ } else {
+ 4
+ }
+}
+
+/// Encodes a raw u32 value as UTF-8 into the provided byte buffer,
+/// and then returns the subslice of the buffer that contains the encoded character.
+///
+/// Unlike `char::encode_utf8`, this method also handles codepoints in the surrogate range.
+/// (Creating a `char` in the surrogate range is UB.)
+/// The result is valid [generalized UTF-8] but not valid UTF-8.
+///
+/// [generalized UTF-8]: https://simonsapin.github.io/wtf-8/#generalized-utf8
+///
+/// # Panics
+///
+/// Panics if the buffer is not large enough.
+/// A buffer of length four is large enough to encode any `char`.
+#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
+#[doc(hidden)]
+#[inline]
+pub fn encode_utf8_raw(code: u32, dst: &mut [u8]) -> &mut [u8] {
+ let len = len_utf8(code);
+ match (len, &mut dst[..]) {
+ (1, [a, ..]) => {
+ *a = code as u8;
+ }
+ (2, [a, b, ..]) => {
+ *a = (code >> 6 & 0x1F) as u8 | TAG_TWO_B;
+ *b = (code & 0x3F) as u8 | TAG_CONT;
+ }
+ (3, [a, b, c, ..]) => {
+ *a = (code >> 12 & 0x0F) as u8 | TAG_THREE_B;
+ *b = (code >> 6 & 0x3F) as u8 | TAG_CONT;
+ *c = (code & 0x3F) as u8 | TAG_CONT;
+ }
+ (4, [a, b, c, d, ..]) => {
+ *a = (code >> 18 & 0x07) as u8 | TAG_FOUR_B;
+ *b = (code >> 12 & 0x3F) as u8 | TAG_CONT;
+ *c = (code >> 6 & 0x3F) as u8 | TAG_CONT;
+ *d = (code & 0x3F) as u8 | TAG_CONT;
+ }
+ _ => panic!(
+ "encode_utf8: need {} bytes to encode U+{:X}, but the buffer has {}",
+ len,
+ code,
+ dst.len(),
+ ),
+ };
+ &mut dst[..len]
+}
+
+/// Encodes a raw u32 value as UTF-16 into the provided `u16` buffer,
+/// and then returns the subslice of the buffer that contains the encoded character.
+///
+/// Unlike `char::encode_utf16`, this method also handles codepoints in the surrogate range.
+/// (Creating a `char` in the surrogate range is UB.)
+///
+/// # Panics
+///
+/// Panics if the buffer is not large enough.
+/// A buffer of length 2 is large enough to encode any `char`.
+#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
+#[doc(hidden)]
+#[inline]
+pub fn encode_utf16_raw(mut code: u32, dst: &mut [u16]) -> &mut [u16] {
+ // SAFETY: each arm checks whether there are enough bits to write into
+ unsafe {
+ if (code & 0xFFFF) == code && !dst.is_empty() {
+ // The BMP falls through
+ *dst.get_unchecked_mut(0) = code as u16;
+ slice::from_raw_parts_mut(dst.as_mut_ptr(), 1)
+ } else if dst.len() >= 2 {
+ // Supplementary planes break into surrogates.
+ code -= 0x1_0000;
+ *dst.get_unchecked_mut(0) = 0xD800 | ((code >> 10) as u16);
+ *dst.get_unchecked_mut(1) = 0xDC00 | ((code as u16) & 0x3FF);
+ slice::from_raw_parts_mut(dst.as_mut_ptr(), 2)
+ } else {
+ panic!(
+ "encode_utf16: need {} units to encode U+{:X}, but the buffer has {}",
+ from_u32_unchecked(code).len_utf16(),
+ code,
+ dst.len(),
+ )
+ }
+ }
+}
diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs
new file mode 100644
index 000000000..0df23e7bb
--- /dev/null
+++ b/library/core/src/char/mod.rs
@@ -0,0 +1,584 @@
+//! A character type.
+//!
+//! The `char` type represents a single character. More specifically, since
+//! 'character' isn't a well-defined concept in Unicode, `char` is a '[Unicode
+//! scalar value]', which is similar to, but not the same as, a '[Unicode code
+//! point]'.
+//!
+//! [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
+//! [Unicode code point]: https://www.unicode.org/glossary/#code_point
+//!
+//! This module exists for technical reasons, the primary documentation for
+//! `char` is directly on [the `char` primitive type][char] itself.
+//!
+//! This module is the home of the iterator implementations for the iterators
+//! implemented on `char`, as well as some useful constants and conversion
+//! functions that convert various types to `char`.
+
+#![allow(non_snake_case)]
+#![stable(feature = "core_char", since = "1.2.0")]
+
+mod convert;
+mod decode;
+mod methods;
+
+// stable re-exports
+#[stable(feature = "try_from", since = "1.34.0")]
+pub use self::convert::CharTryFromError;
+#[stable(feature = "char_from_str", since = "1.20.0")]
+pub use self::convert::ParseCharError;
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+pub use self::decode::{DecodeUtf16, DecodeUtf16Error};
+
+// perma-unstable re-exports
+#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
+pub use self::methods::encode_utf16_raw;
+#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
+pub use self::methods::encode_utf8_raw;
+
+use crate::fmt::{self, Write};
+use crate::iter::FusedIterator;
+
+pub(crate) use self::methods::EscapeDebugExtArgs;
+
+// UTF-8 ranges and tags for encoding characters
+const TAG_CONT: u8 = 0b1000_0000;
+const TAG_TWO_B: u8 = 0b1100_0000;
+const TAG_THREE_B: u8 = 0b1110_0000;
+const TAG_FOUR_B: u8 = 0b1111_0000;
+const MAX_ONE_B: u32 = 0x80;
+const MAX_TWO_B: u32 = 0x800;
+const MAX_THREE_B: u32 = 0x10000;
+
+/*
+ Lu Uppercase_Letter an uppercase letter
+ Ll Lowercase_Letter a lowercase letter
+ Lt Titlecase_Letter a digraphic character, with first part uppercase
+ Lm Modifier_Letter a modifier letter
+ Lo Other_Letter other letters, including syllables and ideographs
+ Mn Nonspacing_Mark a nonspacing combining mark (zero advance width)
+ Mc Spacing_Mark a spacing combining mark (positive advance width)
+ Me Enclosing_Mark an enclosing combining mark
+ Nd Decimal_Number a decimal digit
+ Nl Letter_Number a letterlike numeric character
+ No Other_Number a numeric character of other type
+ Pc Connector_Punctuation a connecting punctuation mark, like a tie
+ Pd Dash_Punctuation a dash or hyphen punctuation mark
+ Ps Open_Punctuation an opening punctuation mark (of a pair)
+ Pe Close_Punctuation a closing punctuation mark (of a pair)
+ Pi Initial_Punctuation an initial quotation mark
+ Pf Final_Punctuation a final quotation mark
+ Po Other_Punctuation a punctuation mark of other type
+ Sm Math_Symbol a symbol of primarily mathematical use
+ Sc Currency_Symbol a currency sign
+ Sk Modifier_Symbol a non-letterlike modifier symbol
+ So Other_Symbol a symbol of other type
+ Zs Space_Separator a space character (of various non-zero widths)
+ Zl Line_Separator U+2028 LINE SEPARATOR only
+ Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only
+ Cc Control a C0 or C1 control code
+ Cf Format a format control character
+ Cs Surrogate a surrogate code point
+ Co Private_Use a private-use character
+ Cn Unassigned a reserved unassigned code point or a noncharacter
+*/
+
+/// The highest valid code point a `char` can have, `'\u{10FFFF}'`. Use [`char::MAX`] instead.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX: char = char::MAX;
+
+/// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a
+/// decoding error. Use [`char::REPLACEMENT_CHARACTER`] instead.
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+pub const REPLACEMENT_CHARACTER: char = char::REPLACEMENT_CHARACTER;
+
+/// The version of [Unicode](https://www.unicode.org/) that the Unicode parts of
+/// `char` and `str` methods are based on. Use [`char::UNICODE_VERSION`] instead.
+#[stable(feature = "unicode_version", since = "1.45.0")]
+pub const UNICODE_VERSION: (u8, u8, u8) = char::UNICODE_VERSION;
+
+/// Creates an iterator over the UTF-16 encoded code points in `iter`, returning
+/// unpaired surrogates as `Err`s. Use [`char::decode_utf16`] instead.
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+#[inline]
+pub fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::IntoIter> {
+ self::decode::decode_utf16(iter)
+}
+
+/// Converts a `u32` to a `char`. Use [`char::from_u32`] instead.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[must_use]
+#[inline]
+pub const fn from_u32(i: u32) -> Option<char> {
+ self::convert::from_u32(i)
+}
+
+/// Converts a `u32` to a `char`, ignoring validity. Use [`char::from_u32_unchecked`].
+/// instead.
+#[stable(feature = "char_from_unchecked", since = "1.5.0")]
+#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[must_use]
+#[inline]
+pub const unsafe fn from_u32_unchecked(i: u32) -> char {
+ // SAFETY: the safety contract must be upheld by the caller.
+ unsafe { self::convert::from_u32_unchecked(i) }
+}
+
+/// Converts a digit in the given radix to a `char`. Use [`char::from_digit`] instead.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[must_use]
+#[inline]
+pub const fn from_digit(num: u32, radix: u32) -> Option<char> {
+ self::convert::from_digit(num, radix)
+}
+
+/// Returns an iterator that yields the hexadecimal Unicode escape of a
+/// character, as `char`s.
+///
+/// This `struct` is created by the [`escape_unicode`] method on [`char`]. See
+/// its documentation for more.
+///
+/// [`escape_unicode`]: char::escape_unicode
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct EscapeUnicode {
+ c: char,
+ state: EscapeUnicodeState,
+
+ // The index of the next hex digit to be printed (0 if none),
+ // i.e., the number of remaining hex digits to be printed;
+ // increasing from the least significant digit: 0x543210
+ hex_digit_idx: usize,
+}
+
+// The enum values are ordered so that their representation is the
+// same as the remaining length (besides the hexadecimal digits). This
+// likely makes `len()` a single load from memory) and inline-worth.
+#[derive(Clone, Debug)]
+enum EscapeUnicodeState {
+ Done,
+ RightBrace,
+ Value,
+ LeftBrace,
+ Type,
+ Backslash,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for EscapeUnicode {
+ type Item = char;
+
+ fn next(&mut self) -> Option<char> {
+ match self.state {
+ EscapeUnicodeState::Backslash => {
+ self.state = EscapeUnicodeState::Type;
+ Some('\\')
+ }
+ EscapeUnicodeState::Type => {
+ self.state = EscapeUnicodeState::LeftBrace;
+ Some('u')
+ }
+ EscapeUnicodeState::LeftBrace => {
+ self.state = EscapeUnicodeState::Value;
+ Some('{')
+ }
+ EscapeUnicodeState::Value => {
+ let hex_digit = ((self.c as u32) >> (self.hex_digit_idx * 4)) & 0xf;
+ let c = from_digit(hex_digit, 16).unwrap();
+ if self.hex_digit_idx == 0 {
+ self.state = EscapeUnicodeState::RightBrace;
+ } else {
+ self.hex_digit_idx -= 1;
+ }
+ Some(c)
+ }
+ EscapeUnicodeState::RightBrace => {
+ self.state = EscapeUnicodeState::Done;
+ Some('}')
+ }
+ EscapeUnicodeState::Done => None,
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.len();
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ fn last(self) -> Option<char> {
+ match self.state {
+ EscapeUnicodeState::Done => None,
+
+ EscapeUnicodeState::RightBrace
+ | EscapeUnicodeState::Value
+ | EscapeUnicodeState::LeftBrace
+ | EscapeUnicodeState::Type
+ | EscapeUnicodeState::Backslash => Some('}'),
+ }
+ }
+}
+
+#[stable(feature = "exact_size_escape", since = "1.11.0")]
+impl ExactSizeIterator for EscapeUnicode {
+ #[inline]
+ fn len(&self) -> usize {
+ // The match is a single memory access with no branching
+ self.hex_digit_idx
+ + match self.state {
+ EscapeUnicodeState::Done => 0,
+ EscapeUnicodeState::RightBrace => 1,
+ EscapeUnicodeState::Value => 2,
+ EscapeUnicodeState::LeftBrace => 3,
+ EscapeUnicodeState::Type => 4,
+ EscapeUnicodeState::Backslash => 5,
+ }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EscapeUnicode {}
+
+#[stable(feature = "char_struct_display", since = "1.16.0")]
+impl fmt::Display for EscapeUnicode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for c in self.clone() {
+ f.write_char(c)?;
+ }
+ Ok(())
+ }
+}
+
+/// An iterator that yields the literal escape code of a `char`.
+///
+/// This `struct` is created by the [`escape_default`] method on [`char`]. See
+/// its documentation for more.
+///
+/// [`escape_default`]: char::escape_default
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct EscapeDefault {
+ state: EscapeDefaultState,
+}
+
+#[derive(Clone, Debug)]
+enum EscapeDefaultState {
+ Done,
+ Char(char),
+ Backslash(char),
+ Unicode(EscapeUnicode),
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for EscapeDefault {
+ type Item = char;
+
+ fn next(&mut self) -> Option<char> {
+ match self.state {
+ EscapeDefaultState::Backslash(c) => {
+ self.state = EscapeDefaultState::Char(c);
+ Some('\\')
+ }
+ EscapeDefaultState::Char(c) => {
+ self.state = EscapeDefaultState::Done;
+ Some(c)
+ }
+ EscapeDefaultState::Done => None,
+ EscapeDefaultState::Unicode(ref mut iter) => iter.next(),
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.len();
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ fn nth(&mut self, n: usize) -> Option<char> {
+ match self.state {
+ EscapeDefaultState::Backslash(c) if n == 0 => {
+ self.state = EscapeDefaultState::Char(c);
+ Some('\\')
+ }
+ EscapeDefaultState::Backslash(c) if n == 1 => {
+ self.state = EscapeDefaultState::Done;
+ Some(c)
+ }
+ EscapeDefaultState::Backslash(_) => {
+ self.state = EscapeDefaultState::Done;
+ None
+ }
+ EscapeDefaultState::Char(c) => {
+ self.state = EscapeDefaultState::Done;
+
+ if n == 0 { Some(c) } else { None }
+ }
+ EscapeDefaultState::Done => None,
+ EscapeDefaultState::Unicode(ref mut i) => i.nth(n),
+ }
+ }
+
+ fn last(self) -> Option<char> {
+ match self.state {
+ EscapeDefaultState::Unicode(iter) => iter.last(),
+ EscapeDefaultState::Done => None,
+ EscapeDefaultState::Backslash(c) | EscapeDefaultState::Char(c) => Some(c),
+ }
+ }
+}
+
+#[stable(feature = "exact_size_escape", since = "1.11.0")]
+impl ExactSizeIterator for EscapeDefault {
+ fn len(&self) -> usize {
+ match self.state {
+ EscapeDefaultState::Done => 0,
+ EscapeDefaultState::Char(_) => 1,
+ EscapeDefaultState::Backslash(_) => 2,
+ EscapeDefaultState::Unicode(ref iter) => iter.len(),
+ }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EscapeDefault {}
+
+#[stable(feature = "char_struct_display", since = "1.16.0")]
+impl fmt::Display for EscapeDefault {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for c in self.clone() {
+ f.write_char(c)?;
+ }
+ Ok(())
+ }
+}
+
+/// An iterator that yields the literal escape code of a `char`.
+///
+/// This `struct` is created by the [`escape_debug`] method on [`char`]. See its
+/// documentation for more.
+///
+/// [`escape_debug`]: char::escape_debug
+#[stable(feature = "char_escape_debug", since = "1.20.0")]
+#[derive(Clone, Debug)]
+pub struct EscapeDebug(EscapeDefault);
+
+#[stable(feature = "char_escape_debug", since = "1.20.0")]
+impl Iterator for EscapeDebug {
+ type Item = char;
+ fn next(&mut self) -> Option<char> {
+ self.0.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+#[stable(feature = "char_escape_debug", since = "1.20.0")]
+impl ExactSizeIterator for EscapeDebug {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EscapeDebug {}
+
+#[stable(feature = "char_escape_debug", since = "1.20.0")]
+impl fmt::Display for EscapeDebug {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+/// Returns an iterator that yields the lowercase equivalent of a `char`.
+///
+/// This `struct` is created by the [`to_lowercase`] method on [`char`]. See
+/// its documentation for more.
+///
+/// [`to_lowercase`]: char::to_lowercase
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug, Clone)]
+pub struct ToLowercase(CaseMappingIter);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for ToLowercase {
+ type Item = char;
+ fn next(&mut self) -> Option<char> {
+ self.0.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+#[stable(feature = "case_mapping_double_ended", since = "1.59.0")]
+impl DoubleEndedIterator for ToLowercase {
+ fn next_back(&mut self) -> Option<char> {
+ self.0.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for ToLowercase {}
+
+#[stable(feature = "exact_size_case_mapping_iter", since = "1.35.0")]
+impl ExactSizeIterator for ToLowercase {}
+
+/// Returns an iterator that yields the uppercase equivalent of a `char`.
+///
+/// This `struct` is created by the [`to_uppercase`] method on [`char`]. See
+/// its documentation for more.
+///
+/// [`to_uppercase`]: char::to_uppercase
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug, Clone)]
+pub struct ToUppercase(CaseMappingIter);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for ToUppercase {
+ type Item = char;
+ fn next(&mut self) -> Option<char> {
+ self.0.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+#[stable(feature = "case_mapping_double_ended", since = "1.59.0")]
+impl DoubleEndedIterator for ToUppercase {
+ fn next_back(&mut self) -> Option<char> {
+ self.0.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for ToUppercase {}
+
+#[stable(feature = "exact_size_case_mapping_iter", since = "1.35.0")]
+impl ExactSizeIterator for ToUppercase {}
+
+#[derive(Debug, Clone)]
+enum CaseMappingIter {
+ Three(char, char, char),
+ Two(char, char),
+ One(char),
+ Zero,
+}
+
+impl CaseMappingIter {
+ fn new(chars: [char; 3]) -> CaseMappingIter {
+ if chars[2] == '\0' {
+ if chars[1] == '\0' {
+ CaseMappingIter::One(chars[0]) // Including if chars[0] == '\0'
+ } else {
+ CaseMappingIter::Two(chars[0], chars[1])
+ }
+ } else {
+ CaseMappingIter::Three(chars[0], chars[1], chars[2])
+ }
+ }
+}
+
+impl Iterator for CaseMappingIter {
+ type Item = char;
+ fn next(&mut self) -> Option<char> {
+ match *self {
+ CaseMappingIter::Three(a, b, c) => {
+ *self = CaseMappingIter::Two(b, c);
+ Some(a)
+ }
+ CaseMappingIter::Two(b, c) => {
+ *self = CaseMappingIter::One(c);
+ Some(b)
+ }
+ CaseMappingIter::One(c) => {
+ *self = CaseMappingIter::Zero;
+ Some(c)
+ }
+ CaseMappingIter::Zero => None,
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let size = match self {
+ CaseMappingIter::Three(..) => 3,
+ CaseMappingIter::Two(..) => 2,
+ CaseMappingIter::One(_) => 1,
+ CaseMappingIter::Zero => 0,
+ };
+ (size, Some(size))
+ }
+}
+
+impl DoubleEndedIterator for CaseMappingIter {
+ fn next_back(&mut self) -> Option<char> {
+ match *self {
+ CaseMappingIter::Three(a, b, c) => {
+ *self = CaseMappingIter::Two(a, b);
+ Some(c)
+ }
+ CaseMappingIter::Two(b, c) => {
+ *self = CaseMappingIter::One(b);
+ Some(c)
+ }
+ CaseMappingIter::One(c) => {
+ *self = CaseMappingIter::Zero;
+ Some(c)
+ }
+ CaseMappingIter::Zero => None,
+ }
+ }
+}
+
+impl fmt::Display for CaseMappingIter {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ CaseMappingIter::Three(a, b, c) => {
+ f.write_char(a)?;
+ f.write_char(b)?;
+ f.write_char(c)
+ }
+ CaseMappingIter::Two(b, c) => {
+ f.write_char(b)?;
+ f.write_char(c)
+ }
+ CaseMappingIter::One(c) => f.write_char(c),
+ CaseMappingIter::Zero => Ok(()),
+ }
+ }
+}
+
+#[stable(feature = "char_struct_display", since = "1.16.0")]
+impl fmt::Display for ToLowercase {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+#[stable(feature = "char_struct_display", since = "1.16.0")]
+impl fmt::Display for ToUppercase {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+/// The error type returned when a checked char conversion fails.
+#[stable(feature = "u8_from_char", since = "1.59.0")]
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct TryFromCharError(pub(crate) ());
+
+#[stable(feature = "u8_from_char", since = "1.59.0")]
+impl fmt::Display for TryFromCharError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "unicode code point out of range".fmt(fmt)
+ }
+}
diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs
new file mode 100644
index 000000000..06dca7e59
--- /dev/null
+++ b/library/core/src/clone.rs
@@ -0,0 +1,245 @@
+//! The `Clone` trait for types that cannot be 'implicitly copied'.
+//!
+//! In Rust, some simple types are "implicitly copyable" and when you
+//! assign them or pass them as arguments, the receiver will get a copy,
+//! leaving the original value in place. These types do not require
+//! allocation to copy and do not have finalizers (i.e., they do not
+//! contain owned boxes or implement [`Drop`]), so the compiler considers
+//! them cheap and safe to copy. For other types copies must be made
+//! explicitly, by convention implementing the [`Clone`] trait and calling
+//! the [`clone`] method.
+//!
+//! [`clone`]: Clone::clone
+//!
+//! Basic usage example:
+//!
+//! ```
+//! let s = String::new(); // String type implements Clone
+//! let copy = s.clone(); // so we can clone it
+//! ```
+//!
+//! To easily implement the Clone trait, you can also use
+//! `#[derive(Clone)]`. Example:
+//!
+//! ```
+//! #[derive(Clone)] // we add the Clone trait to Morpheus struct
+//! struct Morpheus {
+//! blue_pill: f32,
+//! red_pill: i64,
+//! }
+//!
+//! fn main() {
+//! let f = Morpheus { blue_pill: 0.0, red_pill: 0 };
+//! let copy = f.clone(); // and now we can clone it!
+//! }
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::marker::Destruct;
+
+/// A common trait for the ability to explicitly duplicate an object.
+///
+/// Differs from [`Copy`] in that [`Copy`] is implicit and an inexpensive bit-wise copy, while
+/// `Clone` is always explicit and may or may not be expensive. In order to enforce
+/// these characteristics, Rust does not allow you to reimplement [`Copy`], but you
+/// may reimplement `Clone` and run arbitrary code.
+///
+/// Since `Clone` is more general than [`Copy`], you can automatically make anything
+/// [`Copy`] be `Clone` as well.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]` if all fields are `Clone`. The `derive`d
+/// implementation of [`Clone`] calls [`clone`] on each field.
+///
+/// [`clone`]: Clone::clone
+///
+/// For a generic struct, `#[derive]` implements `Clone` conditionally by adding bound `Clone` on
+/// generic parameters.
+///
+/// ```
+/// // `derive` implements Clone for Reading<T> when T is Clone.
+/// #[derive(Clone)]
+/// struct Reading<T> {
+/// frequency: T,
+/// }
+/// ```
+///
+/// ## How can I implement `Clone`?
+///
+/// Types that are [`Copy`] should have a trivial implementation of `Clone`. More formally:
+/// if `T: Copy`, `x: T`, and `y: &T`, then `let x = y.clone();` is equivalent to `let x = *y;`.
+/// Manual implementations should be careful to uphold this invariant; however, unsafe code
+/// must not rely on it to ensure memory safety.
+///
+/// An example is a generic struct holding a function pointer. In this case, the
+/// implementation of `Clone` cannot be `derive`d, but can be implemented as:
+///
+/// ```
+/// struct Generate<T>(fn() -> T);
+///
+/// impl<T> Copy for Generate<T> {}
+///
+/// impl<T> Clone for Generate<T> {
+/// fn clone(&self) -> Self {
+/// *self
+/// }
+/// }
+/// ```
+///
+/// ## Additional implementors
+///
+/// In addition to the [implementors listed below][impls],
+/// the following types also implement `Clone`:
+///
+/// * Function item types (i.e., the distinct types defined for each function)
+/// * Function pointer types (e.g., `fn() -> i32`)
+/// * Closure types, if they capture no value from the environment
+/// or if all such captured values implement `Clone` themselves.
+/// Note that variables captured by shared reference always implement `Clone`
+/// (even if the referent doesn't),
+/// while variables captured by mutable reference never implement `Clone`.
+///
+/// [impls]: #implementors
+#[stable(feature = "rust1", since = "1.0.0")]
+#[lang = "clone"]
+#[rustc_diagnostic_item = "Clone"]
+#[rustc_trivial_field_reads]
+#[const_trait]
+pub trait Clone: Sized {
+ /// Returns a copy of the value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(noop_method_call)]
+ /// let hello = "Hello"; // &str implements Clone
+ ///
+ /// assert_eq!("Hello", hello.clone());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "cloning is often expensive and is not expected to have side effects"]
+ fn clone(&self) -> Self;
+
+ /// Performs copy-assignment from `source`.
+ ///
+ /// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality,
+ /// but can be overridden to reuse the resources of `a` to avoid unnecessary
+ /// allocations.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn clone_from(&mut self, source: &Self)
+ where
+ Self: ~const Destruct,
+ {
+ *self = source.clone()
+ }
+}
+
+/// Derive macro generating an impl of the trait `Clone`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics, derive_clone_copy)]
+pub macro Clone($item:item) {
+ /* compiler built-in */
+}
+
+// FIXME(aburka): these structs are used solely by #[derive] to
+// assert that every component of a type implements Clone or Copy.
+//
+// These structs should never appear in user code.
+#[doc(hidden)]
+#[allow(missing_debug_implementations)]
+#[unstable(
+ feature = "derive_clone_copy",
+ reason = "deriving hack, should not be public",
+ issue = "none"
+)]
+pub struct AssertParamIsClone<T: Clone + ?Sized> {
+ _field: crate::marker::PhantomData<T>,
+}
+#[doc(hidden)]
+#[allow(missing_debug_implementations)]
+#[unstable(
+ feature = "derive_clone_copy",
+ reason = "deriving hack, should not be public",
+ issue = "none"
+)]
+pub struct AssertParamIsCopy<T: Copy + ?Sized> {
+ _field: crate::marker::PhantomData<T>,
+}
+
+/// Implementations of `Clone` for primitive types.
+///
+/// Implementations that cannot be described in Rust
+/// are implemented in `traits::SelectionContext::copy_clone_conditions()`
+/// in `rustc_trait_selection`.
+mod impls {
+
+ use super::Clone;
+
+ macro_rules! impl_clone {
+ ($($t:ty)*) => {
+ $(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+ impl const Clone for $t {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+ )*
+ }
+ }
+
+ impl_clone! {
+ usize u8 u16 u32 u64 u128
+ isize i8 i16 i32 i64 i128
+ f32 f64
+ bool char
+ }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ #[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+ impl const Clone for ! {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+ impl<T: ?Sized> const Clone for *const T {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+ impl<T: ?Sized> const Clone for *mut T {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ /// Shared references can be cloned, but mutable references *cannot*!
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+ impl<T: ?Sized> const Clone for &T {
+ #[inline]
+ #[rustc_diagnostic_item = "noop_method_clone"]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ /// Shared references can be cloned, but mutable references *cannot*!
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> !Clone for &mut T {}
+}
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
new file mode 100644
index 000000000..20bb67687
--- /dev/null
+++ b/library/core/src/cmp.rs
@@ -0,0 +1,1643 @@
+//! Functionality for ordering and comparison.
+//!
+//! This module contains various tools for ordering and comparing values. In
+//! summary:
+//!
+//! * [`Eq`] and [`PartialEq`] are traits that allow you to define total and
+//! partial equality between values, respectively. Implementing them overloads
+//! the `==` and `!=` operators.
+//! * [`Ord`] and [`PartialOrd`] are traits that allow you to define total and
+//! partial orderings between values, respectively. Implementing them overloads
+//! the `<`, `<=`, `>`, and `>=` operators.
+//! * [`Ordering`] is an enum returned by the main functions of [`Ord`] and
+//! [`PartialOrd`], and describes an ordering.
+//! * [`Reverse`] is a struct that allows you to easily reverse an ordering.
+//! * [`max`] and [`min`] are functions that build off of [`Ord`] and allow you
+//! to find the maximum or minimum of two values.
+//!
+//! For more details, see the respective documentation of each item in the list.
+//!
+//! [`max`]: Ord::max
+//! [`min`]: Ord::min
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::marker::Destruct;
+
+use self::Ordering::*;
+
+/// Trait for equality comparisons which are [partial equivalence
+/// relations](https://en.wikipedia.org/wiki/Partial_equivalence_relation).
+///
+/// `x.eq(y)` can also be written `x == y`, and `x.ne(y)` can be written `x != y`.
+/// We use the easier-to-read infix notation in the remainder of this documentation.
+///
+/// This trait allows for partial equality, for types that do not have a full
+/// equivalence relation. For example, in floating point numbers `NaN != NaN`,
+/// so floating point types implement `PartialEq` but not [`trait@Eq`].
+///
+/// Implementations must ensure that `eq` and `ne` are consistent with each other:
+///
+/// - `a != b` if and only if `!(a == b)`
+/// (ensured by the default implementation).
+///
+/// If [`PartialOrd`] or [`Ord`] are also implemented for `Self` and `Rhs`, their methods must also
+/// be consistent with `PartialEq` (see the documentation of those traits for the exact
+/// requirements). It's easy to accidentally make them disagree by deriving some of the traits and
+/// manually implementing others.
+///
+/// The equality relation `==` must satisfy the following conditions
+/// (for all `a`, `b`, `c` of type `A`, `B`, `C`):
+///
+/// - **Symmetric**: if `A: PartialEq<B>` and `B: PartialEq<A>`, then **`a == b`
+/// implies `b == a`**; and
+///
+/// - **Transitive**: if `A: PartialEq<B>` and `B: PartialEq<C>` and `A:
+/// PartialEq<C>`, then **`a == b` and `b == c` implies `a == c`**.
+///
+/// Note that the `B: PartialEq<A>` (symmetric) and `A: PartialEq<C>`
+/// (transitive) impls are not forced to exist, but these requirements apply
+/// whenever they do exist.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d on structs, two
+/// instances are equal if all fields are equal, and not equal if any fields
+/// are not equal. When `derive`d on enums, two instances are equal if they
+/// are the same variant and all fields are equal.
+///
+/// ## How can I implement `PartialEq`?
+///
+/// An example implementation for a domain in which two books are considered
+/// the same book if their ISBN matches, even if the formats differ:
+///
+/// ```
+/// enum BookFormat {
+/// Paperback,
+/// Hardback,
+/// Ebook,
+/// }
+///
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+///
+/// impl PartialEq for Book {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.isbn == other.isbn
+/// }
+/// }
+///
+/// let b1 = Book { isbn: 3, format: BookFormat::Paperback };
+/// let b2 = Book { isbn: 3, format: BookFormat::Ebook };
+/// let b3 = Book { isbn: 10, format: BookFormat::Paperback };
+///
+/// assert!(b1 == b2);
+/// assert!(b1 != b3);
+/// ```
+///
+/// ## How can I compare two different types?
+///
+/// The type you can compare with is controlled by `PartialEq`'s type parameter.
+/// For example, let's tweak our previous code a bit:
+///
+/// ```
+/// // The derive implements <BookFormat> == <BookFormat> comparisons
+/// #[derive(PartialEq)]
+/// enum BookFormat {
+/// Paperback,
+/// Hardback,
+/// Ebook,
+/// }
+///
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+///
+/// // Implement <Book> == <BookFormat> comparisons
+/// impl PartialEq<BookFormat> for Book {
+/// fn eq(&self, other: &BookFormat) -> bool {
+/// self.format == *other
+/// }
+/// }
+///
+/// // Implement <BookFormat> == <Book> comparisons
+/// impl PartialEq<Book> for BookFormat {
+/// fn eq(&self, other: &Book) -> bool {
+/// *self == other.format
+/// }
+/// }
+///
+/// let b1 = Book { isbn: 3, format: BookFormat::Paperback };
+///
+/// assert!(b1 == BookFormat::Paperback);
+/// assert!(BookFormat::Ebook != b1);
+/// ```
+///
+/// By changing `impl PartialEq for Book` to `impl PartialEq<BookFormat> for Book`,
+/// we allow `BookFormat`s to be compared with `Book`s.
+///
+/// A comparison like the one above, which ignores some fields of the struct,
+/// can be dangerous. It can easily lead to an unintended violation of the
+/// requirements for a partial equivalence relation. For example, if we kept
+/// the above implementation of `PartialEq<Book>` for `BookFormat` and added an
+/// implementation of `PartialEq<Book>` for `Book` (either via a `#[derive]` or
+/// via the manual implementation from the first example) then the result would
+/// violate transitivity:
+///
+/// ```should_panic
+/// #[derive(PartialEq)]
+/// enum BookFormat {
+/// Paperback,
+/// Hardback,
+/// Ebook,
+/// }
+///
+/// #[derive(PartialEq)]
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+///
+/// impl PartialEq<BookFormat> for Book {
+/// fn eq(&self, other: &BookFormat) -> bool {
+/// self.format == *other
+/// }
+/// }
+///
+/// impl PartialEq<Book> for BookFormat {
+/// fn eq(&self, other: &Book) -> bool {
+/// *self == other.format
+/// }
+/// }
+///
+/// fn main() {
+/// let b1 = Book { isbn: 1, format: BookFormat::Paperback };
+/// let b2 = Book { isbn: 2, format: BookFormat::Paperback };
+///
+/// assert!(b1 == BookFormat::Paperback);
+/// assert!(BookFormat::Paperback == b2);
+///
+/// // The following should hold by transitivity but doesn't.
+/// assert!(b1 == b2); // <-- PANICS
+/// }
+/// ```
+///
+/// # Examples
+///
+/// ```
+/// let x: u32 = 0;
+/// let y: u32 = 1;
+///
+/// assert_eq!(x == y, false);
+/// assert_eq!(x.eq(&y), false);
+/// ```
+///
+/// [`eq`]: PartialEq::eq
+/// [`ne`]: PartialEq::ne
+#[lang = "eq"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "==")]
+#[doc(alias = "!=")]
+#[cfg_attr(
+ bootstrap,
+ rustc_on_unimplemented(
+ message = "can't compare `{Self}` with `{Rhs}`",
+ label = "no implementation for `{Self} == {Rhs}`"
+ )
+)]
+#[cfg_attr(
+ not(bootstrap),
+ rustc_on_unimplemented(
+ message = "can't compare `{Self}` with `{Rhs}`",
+ label = "no implementation for `{Self} == {Rhs}`",
+ append_const_msg,
+ )
+)]
+#[const_trait]
+#[rustc_diagnostic_item = "PartialEq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ /// This method tests for `self` and `other` values to be equal, and is used
+ /// by `==`.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn eq(&self, other: &Rhs) -> bool;
+
+ /// This method tests for `!=`.
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn ne(&self, other: &Rhs) -> bool {
+ !self.eq(other)
+ }
+}
+
+/// Derive macro generating an impl of the trait `PartialEq`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics, structural_match)]
+pub macro PartialEq($item:item) {
+ /* compiler built-in */
+}
+
+/// Trait for equality comparisons which are [equivalence relations](
+/// https://en.wikipedia.org/wiki/Equivalence_relation).
+///
+/// This means, that in addition to `a == b` and `a != b` being strict inverses, the equality must
+/// be (for all `a`, `b` and `c`):
+///
+/// - reflexive: `a == a`;
+/// - symmetric: `a == b` implies `b == a`; and
+/// - transitive: `a == b` and `b == c` implies `a == c`.
+///
+/// This property cannot be checked by the compiler, and therefore `Eq` implies
+/// [`PartialEq`], and has no extra methods.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d, because `Eq` has
+/// no extra methods, it is only informing the compiler that this is an
+/// equivalence relation rather than a partial equivalence relation. Note that
+/// the `derive` strategy requires all fields are `Eq`, which isn't
+/// always desired.
+///
+/// ## How can I implement `Eq`?
+///
+/// If you cannot use the `derive` strategy, specify that your type implements
+/// `Eq`, which has no methods:
+///
+/// ```
+/// enum BookFormat { Paperback, Hardback, Ebook }
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+/// impl PartialEq for Book {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.isbn == other.isbn
+/// }
+/// }
+/// impl Eq for Book {}
+/// ```
+#[doc(alias = "==")]
+#[doc(alias = "!=")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "Eq"]
+pub trait Eq: PartialEq<Self> {
+ // this method is used solely by #[deriving] to assert
+ // that every component of a type implements #[deriving]
+ // itself, the current deriving infrastructure means doing this
+ // assertion without using a method on this trait is nearly
+ // impossible.
+ //
+ // This should never be implemented by hand.
+ #[doc(hidden)]
+ #[no_coverage] // rust-lang/rust#84605
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn assert_receiver_is_total_eq(&self) {}
+}
+
+/// Derive macro generating an impl of the trait `Eq`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match, no_coverage)]
+pub macro Eq($item:item) {
+ /* compiler built-in */
+}
+
+// FIXME: this struct is used solely by #[derive] to
+// assert that every component of a type implements Eq.
+//
+// This struct should never appear in user code.
+#[doc(hidden)]
+#[allow(missing_debug_implementations)]
+#[unstable(feature = "derive_eq", reason = "deriving hack, should not be public", issue = "none")]
+pub struct AssertParamIsEq<T: Eq + ?Sized> {
+ _field: crate::marker::PhantomData<T>,
+}
+
+/// An `Ordering` is the result of a comparison between two values.
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// let result = 1.cmp(&2);
+/// assert_eq!(Ordering::Less, result);
+///
+/// let result = 1.cmp(&1);
+/// assert_eq!(Ordering::Equal, result);
+///
+/// let result = 2.cmp(&1);
+/// assert_eq!(Ordering::Greater, result);
+/// ```
+#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[repr(i8)]
+pub enum Ordering {
+ /// An ordering where a compared value is less than another.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Less = -1,
+ /// An ordering where a compared value is equal to another.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Equal = 0,
+ /// An ordering where a compared value is greater than another.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Greater = 1,
+}
+
+impl Ordering {
+ /// Returns `true` if the ordering is the `Equal` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_eq(), false);
+ /// assert_eq!(Ordering::Equal.is_eq(), true);
+ /// assert_eq!(Ordering::Greater.is_eq(), false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "ordering_helpers", since = "1.53.0")]
+ #[stable(feature = "ordering_helpers", since = "1.53.0")]
+ pub const fn is_eq(self) -> bool {
+ matches!(self, Equal)
+ }
+
+ /// Returns `true` if the ordering is not the `Equal` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_ne(), true);
+ /// assert_eq!(Ordering::Equal.is_ne(), false);
+ /// assert_eq!(Ordering::Greater.is_ne(), true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "ordering_helpers", since = "1.53.0")]
+ #[stable(feature = "ordering_helpers", since = "1.53.0")]
+ pub const fn is_ne(self) -> bool {
+ !matches!(self, Equal)
+ }
+
+ /// Returns `true` if the ordering is the `Less` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_lt(), true);
+ /// assert_eq!(Ordering::Equal.is_lt(), false);
+ /// assert_eq!(Ordering::Greater.is_lt(), false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "ordering_helpers", since = "1.53.0")]
+ #[stable(feature = "ordering_helpers", since = "1.53.0")]
+ pub const fn is_lt(self) -> bool {
+ matches!(self, Less)
+ }
+
+ /// Returns `true` if the ordering is the `Greater` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_gt(), false);
+ /// assert_eq!(Ordering::Equal.is_gt(), false);
+ /// assert_eq!(Ordering::Greater.is_gt(), true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "ordering_helpers", since = "1.53.0")]
+ #[stable(feature = "ordering_helpers", since = "1.53.0")]
+ pub const fn is_gt(self) -> bool {
+ matches!(self, Greater)
+ }
+
+ /// Returns `true` if the ordering is either the `Less` or `Equal` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_le(), true);
+ /// assert_eq!(Ordering::Equal.is_le(), true);
+ /// assert_eq!(Ordering::Greater.is_le(), false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "ordering_helpers", since = "1.53.0")]
+ #[stable(feature = "ordering_helpers", since = "1.53.0")]
+ pub const fn is_le(self) -> bool {
+ !matches!(self, Greater)
+ }
+
+ /// Returns `true` if the ordering is either the `Greater` or `Equal` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_ge(), false);
+ /// assert_eq!(Ordering::Equal.is_ge(), true);
+ /// assert_eq!(Ordering::Greater.is_ge(), true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "ordering_helpers", since = "1.53.0")]
+ #[stable(feature = "ordering_helpers", since = "1.53.0")]
+ pub const fn is_ge(self) -> bool {
+ !matches!(self, Less)
+ }
+
+ /// Reverses the `Ordering`.
+ ///
+ /// * `Less` becomes `Greater`.
+ /// * `Greater` becomes `Less`.
+ /// * `Equal` becomes `Equal`.
+ ///
+ /// # Examples
+ ///
+ /// Basic behavior:
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.reverse(), Ordering::Greater);
+ /// assert_eq!(Ordering::Equal.reverse(), Ordering::Equal);
+ /// assert_eq!(Ordering::Greater.reverse(), Ordering::Less);
+ /// ```
+ ///
+ /// This method can be used to reverse a comparison:
+ ///
+ /// ```
+ /// let data: &mut [_] = &mut [2, 10, 5, 8];
+ ///
+ /// // sort the array from largest to smallest.
+ /// data.sort_by(|a, b| a.cmp(b).reverse());
+ ///
+ /// let b: &mut [_] = &mut [10, 8, 5, 2];
+ /// assert!(data == b);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "const_ordering", since = "1.48.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn reverse(self) -> Ordering {
+ match self {
+ Less => Greater,
+ Equal => Equal,
+ Greater => Less,
+ }
+ }
+
+ /// Chains two orderings.
+ ///
+ /// Returns `self` when it's not `Equal`. Otherwise returns `other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// let result = Ordering::Equal.then(Ordering::Less);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Less.then(Ordering::Equal);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Less.then(Ordering::Greater);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Equal.then(Ordering::Equal);
+ /// assert_eq!(result, Ordering::Equal);
+ ///
+ /// let x: (i64, i64, i64) = (1, 2, 7);
+ /// let y: (i64, i64, i64) = (1, 5, 3);
+ /// let result = x.0.cmp(&y.0).then(x.1.cmp(&y.1)).then(x.2.cmp(&y.2));
+ ///
+ /// assert_eq!(result, Ordering::Less);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "const_ordering", since = "1.48.0")]
+ #[stable(feature = "ordering_chaining", since = "1.17.0")]
+ pub const fn then(self, other: Ordering) -> Ordering {
+ match self {
+ Equal => other,
+ _ => self,
+ }
+ }
+
+ /// Chains the ordering with the given function.
+ ///
+ /// Returns `self` when it's not `Equal`. Otherwise calls `f` and returns
+ /// the result.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// let result = Ordering::Equal.then_with(|| Ordering::Less);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Less.then_with(|| Ordering::Equal);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Less.then_with(|| Ordering::Greater);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Equal.then_with(|| Ordering::Equal);
+ /// assert_eq!(result, Ordering::Equal);
+ ///
+ /// let x: (i64, i64, i64) = (1, 2, 7);
+ /// let y: (i64, i64, i64) = (1, 5, 3);
+ /// let result = x.0.cmp(&y.0).then_with(|| x.1.cmp(&y.1)).then_with(|| x.2.cmp(&y.2));
+ ///
+ /// assert_eq!(result, Ordering::Less);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "ordering_chaining", since = "1.17.0")]
+ pub fn then_with<F: FnOnce() -> Ordering>(self, f: F) -> Ordering {
+ match self {
+ Equal => f(),
+ _ => self,
+ }
+ }
+}
+
+/// A helper struct for reverse ordering.
+///
+/// This struct is a helper to be used with functions like [`Vec::sort_by_key`] and
+/// can be used to reverse order a part of a key.
+///
+/// [`Vec::sort_by_key`]: ../../std/vec/struct.Vec.html#method.sort_by_key
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp::Reverse;
+///
+/// let mut v = vec![1, 2, 3, 4, 5, 6];
+/// v.sort_by_key(|&num| (num > 3, Reverse(num)));
+/// assert_eq!(v, vec![3, 2, 1, 6, 5, 4]);
+/// ```
+#[derive(PartialEq, Eq, Debug, Copy, Default, Hash)]
+#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
+#[repr(transparent)]
+pub struct Reverse<T>(#[stable(feature = "reverse_cmp_key", since = "1.19.0")] pub T);
+
+#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+impl<T: ~const PartialOrd> const PartialOrd for Reverse<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &Reverse<T>) -> Option<Ordering> {
+ other.0.partial_cmp(&self.0)
+ }
+
+ #[inline]
+ fn lt(&self, other: &Self) -> bool {
+ other.0 < self.0
+ }
+ #[inline]
+ fn le(&self, other: &Self) -> bool {
+ other.0 <= self.0
+ }
+ #[inline]
+ fn gt(&self, other: &Self) -> bool {
+ other.0 > self.0
+ }
+ #[inline]
+ fn ge(&self, other: &Self) -> bool {
+ other.0 >= self.0
+ }
+}
+
+#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
+impl<T: Ord> Ord for Reverse<T> {
+ #[inline]
+ fn cmp(&self, other: &Reverse<T>) -> Ordering {
+ other.0.cmp(&self.0)
+ }
+}
+
+#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
+impl<T: Clone> Clone for Reverse<T> {
+ #[inline]
+ fn clone(&self) -> Reverse<T> {
+ Reverse(self.0.clone())
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.0.clone_from(&other.0)
+ }
+}
+
+/// Trait for types that form a [total order](https://en.wikipedia.org/wiki/Total_order).
+///
+/// Implementations must be consistent with the [`PartialOrd`] implementation, and ensure
+/// `max`, `min`, and `clamp` are consistent with `cmp`:
+///
+/// - `partial_cmp(a, b) == Some(cmp(a, b))`.
+/// - `max(a, b) == max_by(a, b, cmp)` (ensured by the default implementation).
+/// - `min(a, b) == min_by(a, b, cmp)` (ensured by the default implementation).
+/// - For `a.clamp(min, max)`, see the [method docs](#method.clamp)
+/// (ensured by the default implementation).
+///
+/// It's easy to accidentally make `cmp` and `partial_cmp` disagree by
+/// deriving some of the traits and manually implementing others.
+///
+/// ## Corollaries
+///
+/// From the above and the requirements of `PartialOrd`, it follows that `<` defines a strict total order.
+/// This means that for all `a`, `b` and `c`:
+///
+/// - exactly one of `a < b`, `a == b` or `a > b` is true; and
+/// - `<` is transitive: `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`.
+///
+/// When `derive`d on structs, it will produce a
+/// [lexicographic](https://en.wikipedia.org/wiki/Lexicographic_order) ordering
+/// based on the top-to-bottom declaration order of the struct's members.
+///
+/// When `derive`d on enums, variants are ordered by their discriminants.
+/// By default, the discriminant is smallest for variants at the top, and
+/// largest for variants at the bottom. Here's an example:
+///
+/// ```
+/// #[derive(PartialEq, Eq, PartialOrd, Ord)]
+/// enum E {
+/// Top,
+/// Bottom,
+/// }
+///
+/// assert!(E::Top < E::Bottom);
+/// ```
+///
+/// However, manually setting the discriminants can override this default
+/// behavior:
+///
+/// ```
+/// #[derive(PartialEq, Eq, PartialOrd, Ord)]
+/// enum E {
+/// Top = 2,
+/// Bottom = 1,
+/// }
+///
+/// assert!(E::Bottom < E::Top);
+/// ```
+///
+/// ## Lexicographical comparison
+///
+/// Lexicographical comparison is an operation with the following properties:
+/// - Two sequences are compared element by element.
+/// - The first mismatching element defines which sequence is lexicographically less or greater than the other.
+/// - If one sequence is a prefix of another, the shorter sequence is lexicographically less than the other.
+/// - If two sequence have equivalent elements and are of the same length, then the sequences are lexicographically equal.
+/// - An empty sequence is lexicographically less than any non-empty sequence.
+/// - Two empty sequences are lexicographically equal.
+///
+/// ## How can I implement `Ord`?
+///
+/// `Ord` requires that the type also be [`PartialOrd`] and [`Eq`] (which requires [`PartialEq`]).
+///
+/// Then you must define an implementation for [`cmp`]. You may find it useful to use
+/// [`cmp`] on your type's fields.
+///
+/// Here's an example where you want to sort people by height only, disregarding `id`
+/// and `name`:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// #[derive(Eq)]
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: u32,
+/// }
+///
+/// impl Ord for Person {
+/// fn cmp(&self, other: &Self) -> Ordering {
+/// self.height.cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+/// Some(self.cmp(other))
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
+///
+/// [`cmp`]: Ord::cmp
+#[doc(alias = "<")]
+#[doc(alias = ">")]
+#[doc(alias = "<=")]
+#[doc(alias = ">=")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "Ord"]
+#[const_trait]
+pub trait Ord: Eq + PartialOrd<Self> {
+ /// This method returns an [`Ordering`] between `self` and `other`.
+ ///
+ /// By convention, `self.cmp(&other)` returns the ordering matching the expression
+ /// `self <operator> other` if true.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(5.cmp(&10), Ordering::Less);
+ /// assert_eq!(10.cmp(&5), Ordering::Greater);
+ /// assert_eq!(5.cmp(&5), Ordering::Equal);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn cmp(&self, other: &Self) -> Ordering;
+
+ /// Compares and returns the maximum of two values.
+ ///
+ /// Returns the second argument if the comparison determines them to be equal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(2, 1.max(2));
+ /// assert_eq!(2, 2.max(2));
+ /// ```
+ #[stable(feature = "ord_max_min", since = "1.21.0")]
+ #[inline]
+ #[must_use]
+ fn max(self, other: Self) -> Self
+ where
+ Self: Sized,
+ Self: ~const Destruct,
+ {
+ // HACK(fee1-dead): go back to using `self.max_by(other, Ord::cmp)`
+ // when trait methods are allowed to be used when a const closure is
+ // expected.
+ match self.cmp(&other) {
+ Ordering::Less | Ordering::Equal => other,
+ Ordering::Greater => self,
+ }
+ }
+
+ /// Compares and returns the minimum of two values.
+ ///
+ /// Returns the first argument if the comparison determines them to be equal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(1, 1.min(2));
+ /// assert_eq!(2, 2.min(2));
+ /// ```
+ #[stable(feature = "ord_max_min", since = "1.21.0")]
+ #[inline]
+ #[must_use]
+ fn min(self, other: Self) -> Self
+ where
+ Self: Sized,
+ Self: ~const Destruct,
+ {
+ // HACK(fee1-dead): go back to using `self.min_by(other, Ord::cmp)`
+ // when trait methods are allowed to be used when a const closure is
+ // expected.
+ match self.cmp(&other) {
+ Ordering::Less | Ordering::Equal => self,
+ Ordering::Greater => other,
+ }
+ }
+
+ /// Restrict a value to a certain interval.
+ ///
+ /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
+ /// less than `min`. Otherwise this returns `self`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `min > max`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!((-3).clamp(-2, 1) == -2);
+ /// assert!(0.clamp(-2, 1) == 0);
+ /// assert!(2.clamp(-2, 1) == 1);
+ /// ```
+ #[must_use]
+ #[stable(feature = "clamp", since = "1.50.0")]
+ fn clamp(self, min: Self, max: Self) -> Self
+ where
+ Self: Sized,
+ Self: ~const Destruct,
+ Self: ~const PartialOrd,
+ {
+ assert!(min <= max);
+ if self < min {
+ min
+ } else if self > max {
+ max
+ } else {
+ self
+ }
+ }
+}
+
+/// Derive macro generating an impl of the trait `Ord`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics)]
+pub macro Ord($item:item) {
+ /* compiler built-in */
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+impl const Ord for Ordering {
+ #[inline]
+ fn cmp(&self, other: &Ordering) -> Ordering {
+ (*self as i32).cmp(&(*other as i32))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+impl const PartialOrd for Ordering {
+ #[inline]
+ fn partial_cmp(&self, other: &Ordering) -> Option<Ordering> {
+ (*self as i32).partial_cmp(&(*other as i32))
+ }
+}
+
+/// Trait for types that form a [partial order](https://en.wikipedia.org/wiki/Partial_order).
+///
+/// The `lt`, `le`, `gt`, and `ge` methods of this trait can be called using
+/// the `<`, `<=`, `>`, and `>=` operators, respectively.
+///
+/// The methods of this trait must be consistent with each other and with those of [`PartialEq`].
+/// The following conditions must hold:
+///
+/// 1. `a == b` if and only if `partial_cmp(a, b) == Some(Equal)`.
+/// 2. `a < b` if and only if `partial_cmp(a, b) == Some(Less)`
+/// 3. `a > b` if and only if `partial_cmp(a, b) == Some(Greater)`
+/// 4. `a <= b` if and only if `a < b || a == b`
+/// 5. `a >= b` if and only if `a > b || a == b`
+/// 6. `a != b` if and only if `!(a == b)`.
+///
+/// Conditions 2–5 above are ensured by the default implementation.
+/// Condition 6 is already ensured by [`PartialEq`].
+///
+/// If [`Ord`] is also implemented for `Self` and `Rhs`, it must also be consistent with
+/// `partial_cmp` (see the documentation of that trait for the exact requirements). It's
+/// easy to accidentally make them disagree by deriving some of the traits and manually
+/// implementing others.
+///
+/// The comparison must satisfy, for all `a`, `b` and `c`:
+///
+/// - transitivity: `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+/// - duality: `a < b` if and only if `b > a`.
+///
+/// Note that these requirements mean that the trait itself must be implemented symmetrically and
+/// transitively: if `T: PartialOrd<U>` and `U: PartialOrd<V>` then `U: PartialOrd<T>` and `T:
+/// PartialOrd<V>`.
+///
+/// ## Corollaries
+///
+/// The following corollaries follow from the above requirements:
+///
+/// - irreflexivity of `<` and `>`: `!(a < a)`, `!(a > a)`
+/// - transitivity of `>`: if `a > b` and `b > c` then `a > c`
+/// - duality of `partial_cmp`: `partial_cmp(a, b) == partial_cmp(b, a).map(Ordering::reverse)`
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`.
+///
+/// When `derive`d on structs, it will produce a
+/// [lexicographic](https://en.wikipedia.org/wiki/Lexicographic_order) ordering
+/// based on the top-to-bottom declaration order of the struct's members.
+///
+/// When `derive`d on enums, variants are ordered by their discriminants.
+/// By default, the discriminant is smallest for variants at the top, and
+/// largest for variants at the bottom. Here's an example:
+///
+/// ```
+/// #[derive(PartialEq, PartialOrd)]
+/// enum E {
+/// Top,
+/// Bottom,
+/// }
+///
+/// assert!(E::Top < E::Bottom);
+/// ```
+///
+/// However, manually setting the discriminants can override this default
+/// behavior:
+///
+/// ```
+/// #[derive(PartialEq, PartialOrd)]
+/// enum E {
+/// Top = 2,
+/// Bottom = 1,
+/// }
+///
+/// assert!(E::Bottom < E::Top);
+/// ```
+///
+/// ## How can I implement `PartialOrd`?
+///
+/// `PartialOrd` only requires implementation of the [`partial_cmp`] method, with the others
+/// generated from default implementations.
+///
+/// However it remains possible to implement the others separately for types which do not have a
+/// total order. For example, for floating point numbers, `NaN < 0 == false` and `NaN >= 0 ==
+/// false` (cf. IEEE 754-2008 section 5.11).
+///
+/// `PartialOrd` requires your type to be [`PartialEq`].
+///
+/// If your type is [`Ord`], you can implement [`partial_cmp`] by using [`cmp`]:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// #[derive(Eq)]
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: u32,
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+/// Some(self.cmp(other))
+/// }
+/// }
+///
+/// impl Ord for Person {
+/// fn cmp(&self, other: &Self) -> Ordering {
+/// self.height.cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
+///
+/// You may also find it useful to use [`partial_cmp`] on your type's fields. Here
+/// is an example of `Person` types who have a floating-point `height` field that
+/// is the only field to be used for sorting:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: f64,
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+/// self.height.partial_cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
+///
+/// # Examples
+///
+/// ```
+/// let x: u32 = 0;
+/// let y: u32 = 1;
+///
+/// assert_eq!(x < y, true);
+/// assert_eq!(x.lt(&y), true);
+/// ```
+///
+/// [`partial_cmp`]: PartialOrd::partial_cmp
+/// [`cmp`]: Ord::cmp
+#[lang = "partial_ord"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = ">")]
+#[doc(alias = "<")]
+#[doc(alias = "<=")]
+#[doc(alias = ">=")]
+#[cfg_attr(
+ bootstrap,
+ rustc_on_unimplemented(
+ message = "can't compare `{Self}` with `{Rhs}`",
+ label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`",
+ )
+)]
+#[cfg_attr(
+ not(bootstrap),
+ rustc_on_unimplemented(
+ message = "can't compare `{Self}` with `{Rhs}`",
+ label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`",
+ append_const_msg,
+ )
+)]
+#[const_trait]
+#[rustc_diagnostic_item = "PartialOrd"]
+pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> {
+ /// This method returns an ordering between `self` and `other` values if one exists.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// let result = 1.0.partial_cmp(&2.0);
+ /// assert_eq!(result, Some(Ordering::Less));
+ ///
+ /// let result = 1.0.partial_cmp(&1.0);
+ /// assert_eq!(result, Some(Ordering::Equal));
+ ///
+ /// let result = 2.0.partial_cmp(&1.0);
+ /// assert_eq!(result, Some(Ordering::Greater));
+ /// ```
+ ///
+ /// When comparison is impossible:
+ ///
+ /// ```
+ /// let result = f64::NAN.partial_cmp(&1.0);
+ /// assert_eq!(result, None);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn partial_cmp(&self, other: &Rhs) -> Option<Ordering>;
+
+ /// This method tests less than (for `self` and `other`) and is used by the `<` operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let result = 1.0 < 2.0;
+ /// assert_eq!(result, true);
+ ///
+ /// let result = 2.0 < 1.0;
+ /// assert_eq!(result, false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn lt(&self, other: &Rhs) -> bool {
+ matches!(self.partial_cmp(other), Some(Less))
+ }
+
+ /// This method tests less than or equal to (for `self` and `other`) and is used by the `<=`
+ /// operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let result = 1.0 <= 2.0;
+ /// assert_eq!(result, true);
+ ///
+ /// let result = 2.0 <= 2.0;
+ /// assert_eq!(result, true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn le(&self, other: &Rhs) -> bool {
+ // Pattern `Some(Less | Eq)` optimizes worse than negating `None | Some(Greater)`.
+ // FIXME: The root cause was fixed upstream in LLVM with:
+ // https://github.com/llvm/llvm-project/commit/9bad7de9a3fb844f1ca2965f35d0c2a3d1e11775
+ // Revert this workaround once support for LLVM 12 gets dropped.
+ !matches!(self.partial_cmp(other), None | Some(Greater))
+ }
+
+ /// This method tests greater than (for `self` and `other`) and is used by the `>` operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let result = 1.0 > 2.0;
+ /// assert_eq!(result, false);
+ ///
+ /// let result = 2.0 > 2.0;
+ /// assert_eq!(result, false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn gt(&self, other: &Rhs) -> bool {
+ matches!(self.partial_cmp(other), Some(Greater))
+ }
+
+ /// This method tests greater than or equal to (for `self` and `other`) and is used by the `>=`
+ /// operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let result = 2.0 >= 1.0;
+ /// assert_eq!(result, true);
+ ///
+ /// let result = 2.0 >= 2.0;
+ /// assert_eq!(result, true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn ge(&self, other: &Rhs) -> bool {
+ matches!(self.partial_cmp(other), Some(Greater | Equal))
+ }
+}
+
+/// Derive macro generating an impl of the trait `PartialOrd`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics)]
+pub macro PartialOrd($item:item) {
+ /* compiler built-in */
+}
+
+/// Compares and returns the minimum of two values.
+///
+/// Returns the first argument if the comparison determines them to be equal.
+///
+/// Internally uses an alias to [`Ord::min`].
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp;
+///
+/// assert_eq!(1, cmp::min(1, 2));
+/// assert_eq!(2, cmp::min(2, 2));
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "cmp_min")]
+pub const fn min<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T {
+ v1.min(v2)
+}
+
+/// Returns the minimum of two values with respect to the specified comparison function.
+///
+/// Returns the first argument if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp;
+///
+/// assert_eq!(cmp::min_by(-2, 1, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), 1);
+/// assert_eq!(cmp::min_by(-2, 2, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), -2);
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
+pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
+ match compare(&v1, &v2) {
+ Ordering::Less | Ordering::Equal => v1,
+ Ordering::Greater => v2,
+ }
+}
+
+/// Returns the element that gives the minimum value from the specified function.
+///
+/// Returns the first argument if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp;
+///
+/// assert_eq!(cmp::min_by_key(-2, 1, |x: &i32| x.abs()), 1);
+/// assert_eq!(cmp::min_by_key(-2, 2, |x: &i32| x.abs()), -2);
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
+pub fn min_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
+ min_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+}
+
+/// Compares and returns the maximum of two values.
+///
+/// Returns the second argument if the comparison determines them to be equal.
+///
+/// Internally uses an alias to [`Ord::max`].
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp;
+///
+/// assert_eq!(2, cmp::max(1, 2));
+/// assert_eq!(2, cmp::max(2, 2));
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "cmp_max")]
+pub const fn max<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T {
+ v1.max(v2)
+}
+
+/// Returns the maximum of two values with respect to the specified comparison function.
+///
+/// Returns the second argument if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp;
+///
+/// assert_eq!(cmp::max_by(-2, 1, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), -2);
+/// assert_eq!(cmp::max_by(-2, 2, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), 2);
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
+pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
+ match compare(&v1, &v2) {
+ Ordering::Less | Ordering::Equal => v2,
+ Ordering::Greater => v1,
+ }
+}
+
+/// Returns the element that gives the maximum value from the specified function.
+///
+/// Returns the second argument if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp;
+///
+/// assert_eq!(cmp::max_by_key(-2, 1, |x: &i32| x.abs()), -2);
+/// assert_eq!(cmp::max_by_key(-2, 2, |x: &i32| x.abs()), 2);
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
+pub fn max_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
+ max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+}
+
+// Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types
+mod impls {
+ use crate::cmp::Ordering::{self, Equal, Greater, Less};
+ use crate::hint::unreachable_unchecked;
+
+ macro_rules! partial_eq_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const PartialEq for $t {
+ #[inline]
+ fn eq(&self, other: &$t) -> bool { (*self) == (*other) }
+ #[inline]
+ fn ne(&self, other: &$t) -> bool { (*self) != (*other) }
+ }
+ )*)
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const PartialEq for () {
+ #[inline]
+ fn eq(&self, _other: &()) -> bool {
+ true
+ }
+ #[inline]
+ fn ne(&self, _other: &()) -> bool {
+ false
+ }
+ }
+
+ partial_eq_impl! {
+ bool char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64
+ }
+
+ macro_rules! eq_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Eq for $t {}
+ )*)
+ }
+
+ eq_impl! { () bool char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+ macro_rules! partial_ord_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const PartialOrd for $t {
+ #[inline]
+ fn partial_cmp(&self, other: &$t) -> Option<Ordering> {
+ match (*self <= *other, *self >= *other) {
+ (false, false) => None,
+ (false, true) => Some(Greater),
+ (true, false) => Some(Less),
+ (true, true) => Some(Equal),
+ }
+ }
+ #[inline]
+ fn lt(&self, other: &$t) -> bool { (*self) < (*other) }
+ #[inline]
+ fn le(&self, other: &$t) -> bool { (*self) <= (*other) }
+ #[inline]
+ fn ge(&self, other: &$t) -> bool { (*self) >= (*other) }
+ #[inline]
+ fn gt(&self, other: &$t) -> bool { (*self) > (*other) }
+ }
+ )*)
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const PartialOrd for () {
+ #[inline]
+ fn partial_cmp(&self, _: &()) -> Option<Ordering> {
+ Some(Equal)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const PartialOrd for bool {
+ #[inline]
+ fn partial_cmp(&self, other: &bool) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+ }
+
+ partial_ord_impl! { f32 f64 }
+
+ macro_rules! ord_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const PartialOrd for $t {
+ #[inline]
+ fn partial_cmp(&self, other: &$t) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+ #[inline]
+ fn lt(&self, other: &$t) -> bool { (*self) < (*other) }
+ #[inline]
+ fn le(&self, other: &$t) -> bool { (*self) <= (*other) }
+ #[inline]
+ fn ge(&self, other: &$t) -> bool { (*self) >= (*other) }
+ #[inline]
+ fn gt(&self, other: &$t) -> bool { (*self) > (*other) }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const Ord for $t {
+ #[inline]
+ fn cmp(&self, other: &$t) -> Ordering {
+ // The order here is important to generate more optimal assembly.
+ // See <https://github.com/rust-lang/rust/issues/63758> for more info.
+ if *self < *other { Less }
+ else if *self == *other { Equal }
+ else { Greater }
+ }
+ }
+ )*)
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const Ord for () {
+ #[inline]
+ fn cmp(&self, _other: &()) -> Ordering {
+ Equal
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const Ord for bool {
+ #[inline]
+ fn cmp(&self, other: &bool) -> Ordering {
+ // Casting to i8's and converting the difference to an Ordering generates
+ // more optimal assembly.
+ // See <https://github.com/rust-lang/rust/issues/66780> for more info.
+ match (*self as i8) - (*other as i8) {
+ -1 => Less,
+ 0 => Equal,
+ 1 => Greater,
+ // SAFETY: bool as i8 returns 0 or 1, so the difference can't be anything else
+ _ => unsafe { unreachable_unchecked() },
+ }
+ }
+ }
+
+ ord_impl! { char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const PartialEq for ! {
+ fn eq(&self, _: &!) -> bool {
+ *self
+ }
+ }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl Eq for ! {}
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const PartialOrd for ! {
+ fn partial_cmp(&self, _: &!) -> Option<Ordering> {
+ *self
+ }
+ }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl const Ord for ! {
+ fn cmp(&self, _: &!) -> Ordering {
+ *self
+ }
+ }
+
+ // & pointers
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl<A: ?Sized, B: ?Sized> const PartialEq<&B> for &A
+ where
+ A: ~const PartialEq<B>,
+ {
+ #[inline]
+ fn eq(&self, other: &&B) -> bool {
+ PartialEq::eq(*self, *other)
+ }
+ #[inline]
+ fn ne(&self, other: &&B) -> bool {
+ PartialEq::ne(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialOrd<&B> for &A
+ where
+ A: PartialOrd<B>,
+ {
+ #[inline]
+ fn partial_cmp(&self, other: &&B) -> Option<Ordering> {
+ PartialOrd::partial_cmp(*self, *other)
+ }
+ #[inline]
+ fn lt(&self, other: &&B) -> bool {
+ PartialOrd::lt(*self, *other)
+ }
+ #[inline]
+ fn le(&self, other: &&B) -> bool {
+ PartialOrd::le(*self, *other)
+ }
+ #[inline]
+ fn gt(&self, other: &&B) -> bool {
+ PartialOrd::gt(*self, *other)
+ }
+ #[inline]
+ fn ge(&self, other: &&B) -> bool {
+ PartialOrd::ge(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized> Ord for &A
+ where
+ A: Ord,
+ {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized> Eq for &A where A: Eq {}
+
+ // &mut pointers
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialEq<&mut B> for &mut A
+ where
+ A: PartialEq<B>,
+ {
+ #[inline]
+ fn eq(&self, other: &&mut B) -> bool {
+ PartialEq::eq(*self, *other)
+ }
+ #[inline]
+ fn ne(&self, other: &&mut B) -> bool {
+ PartialEq::ne(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialOrd<&mut B> for &mut A
+ where
+ A: PartialOrd<B>,
+ {
+ #[inline]
+ fn partial_cmp(&self, other: &&mut B) -> Option<Ordering> {
+ PartialOrd::partial_cmp(*self, *other)
+ }
+ #[inline]
+ fn lt(&self, other: &&mut B) -> bool {
+ PartialOrd::lt(*self, *other)
+ }
+ #[inline]
+ fn le(&self, other: &&mut B) -> bool {
+ PartialOrd::le(*self, *other)
+ }
+ #[inline]
+ fn gt(&self, other: &&mut B) -> bool {
+ PartialOrd::gt(*self, *other)
+ }
+ #[inline]
+ fn ge(&self, other: &&mut B) -> bool {
+ PartialOrd::ge(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized> Ord for &mut A
+ where
+ A: Ord,
+ {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized> Eq for &mut A where A: Eq {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialEq<&mut B> for &A
+ where
+ A: PartialEq<B>,
+ {
+ #[inline]
+ fn eq(&self, other: &&mut B) -> bool {
+ PartialEq::eq(*self, *other)
+ }
+ #[inline]
+ fn ne(&self, other: &&mut B) -> bool {
+ PartialEq::ne(*self, *other)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialEq<&B> for &mut A
+ where
+ A: PartialEq<B>,
+ {
+ #[inline]
+ fn eq(&self, other: &&B) -> bool {
+ PartialEq::eq(*self, *other)
+ }
+ #[inline]
+ fn ne(&self, other: &&B) -> bool {
+ PartialEq::ne(*self, *other)
+ }
+ }
+}
diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs
new file mode 100644
index 000000000..b30c8a4ae
--- /dev/null
+++ b/library/core/src/convert/mod.rs
@@ -0,0 +1,755 @@
+//! Traits for conversions between types.
+//!
+//! The traits in this module provide a way to convert from one type to another type.
+//! Each trait serves a different purpose:
+//!
+//! - Implement the [`AsRef`] trait for cheap reference-to-reference conversions
+//! - Implement the [`AsMut`] trait for cheap mutable-to-mutable conversions
+//! - Implement the [`From`] trait for consuming value-to-value conversions
+//! - Implement the [`Into`] trait for consuming value-to-value conversions to types
+//! outside the current crate
+//! - The [`TryFrom`] and [`TryInto`] traits behave like [`From`] and [`Into`],
+//! but should be implemented when the conversion can fail.
+//!
+//! The traits in this module are often used as trait bounds for generic functions such that to
+//! arguments of multiple types are supported. See the documentation of each trait for examples.
+//!
+//! As a library author, you should always prefer implementing [`From<T>`][`From`] or
+//! [`TryFrom<T>`][`TryFrom`] rather than [`Into<U>`][`Into`] or [`TryInto<U>`][`TryInto`],
+//! as [`From`] and [`TryFrom`] provide greater flexibility and offer
+//! equivalent [`Into`] or [`TryInto`] implementations for free, thanks to a
+//! blanket implementation in the standard library. When targeting a version prior to Rust 1.41, it
+//! may be necessary to implement [`Into`] or [`TryInto`] directly when converting to a type
+//! outside the current crate.
+//!
+//! # Generic Implementations
+//!
+//! - [`AsRef`] and [`AsMut`] auto-dereference if the inner type is a reference
+//! - [`From`]`<U> for T` implies [`Into`]`<T> for U`
+//! - [`TryFrom`]`<U> for T` implies [`TryInto`]`<T> for U`
+//! - [`From`] and [`Into`] are reflexive, which means that all types can
+//! `into` themselves and `from` themselves
+//!
+//! See each trait for usage examples.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+
+mod num;
+
+#[unstable(feature = "convert_float_to_int", issue = "67057")]
+pub use num::FloatToInt;
+
+/// The identity function.
+///
+/// Two things are important to note about this function:
+///
+/// - It is not always equivalent to a closure like `|x| x`, since the
+/// closure may coerce `x` into a different type.
+///
+/// - It moves the input `x` passed to the function.
+///
+/// While it might seem strange to have a function that just returns back the
+/// input, there are some interesting uses.
+///
+/// # Examples
+///
+/// Using `identity` to do nothing in a sequence of other, interesting,
+/// functions:
+///
+/// ```rust
+/// use std::convert::identity;
+///
+/// fn manipulation(x: u32) -> u32 {
+/// // Let's pretend that adding one is an interesting function.
+/// x + 1
+/// }
+///
+/// let _arr = &[identity, manipulation];
+/// ```
+///
+/// Using `identity` as a "do nothing" base case in a conditional:
+///
+/// ```rust
+/// use std::convert::identity;
+///
+/// # let condition = true;
+/// #
+/// # fn manipulation(x: u32) -> u32 { x + 1 }
+/// #
+/// let do_stuff = if condition { manipulation } else { identity };
+///
+/// // Do more interesting stuff...
+///
+/// let _results = do_stuff(42);
+/// ```
+///
+/// Using `identity` to keep the `Some` variants of an iterator of `Option<T>`:
+///
+/// ```rust
+/// use std::convert::identity;
+///
+/// let iter = [Some(1), None, Some(3)].into_iter();
+/// let filtered = iter.filter_map(identity).collect::<Vec<_>>();
+/// assert_eq!(vec![1, 3], filtered);
+/// ```
+#[stable(feature = "convert_id", since = "1.33.0")]
+#[rustc_const_stable(feature = "const_identity", since = "1.33.0")]
+#[inline]
+pub const fn identity<T>(x: T) -> T {
+ x
+}
+
+/// Used to do a cheap reference-to-reference conversion.
+///
+/// This trait is similar to [`AsMut`] which is used for converting between mutable references.
+/// If you need to do a costly conversion it is better to implement [`From`] with type
+/// `&T` or write a custom function.
+///
+/// `AsRef` has the same signature as [`Borrow`], but [`Borrow`] is different in a few aspects:
+///
+/// - Unlike `AsRef`, [`Borrow`] has a blanket impl for any `T`, and can be used to accept either
+/// a reference or a value.
+/// - [`Borrow`] also requires that [`Hash`], [`Eq`] and [`Ord`] for a borrowed value are
+/// equivalent to those of the owned value. For this reason, if you want to
+/// borrow only a single field of a struct you can implement `AsRef`, but not [`Borrow`].
+///
+/// **Note: This trait must not fail**. If the conversion can fail, use a
+/// dedicated method which returns an [`Option<T>`] or a [`Result<T, E>`].
+///
+/// # Generic Implementations
+///
+/// - `AsRef` auto-dereferences if the inner type is a reference or a mutable
+/// reference (e.g.: `foo.as_ref()` will work the same if `foo` has type
+/// `&mut Foo` or `&&mut Foo`)
+///
+/// # Examples
+///
+/// By using trait bounds we can accept arguments of different types as long as they can be
+/// converted to the specified type `T`.
+///
+/// For example: By creating a generic function that takes an `AsRef<str>` we express that we
+/// want to accept all references that can be converted to [`&str`] as an argument.
+/// Since both [`String`] and [`&str`] implement `AsRef<str>` we can accept both as input argument.
+///
+/// [`&str`]: primitive@str
+/// [`Borrow`]: crate::borrow::Borrow
+/// [`Eq`]: crate::cmp::Eq
+/// [`Ord`]: crate::cmp::Ord
+/// [`String`]: ../../std/string/struct.String.html
+///
+/// ```
+/// fn is_hello<T: AsRef<str>>(s: T) {
+/// assert_eq!("hello", s.as_ref());
+/// }
+///
+/// let s = "hello";
+/// is_hello(s);
+///
+/// let s = "hello".to_string();
+/// is_hello(s);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "AsRef")]
+pub trait AsRef<T: ?Sized> {
+ /// Converts this type into a shared reference of the (usually inferred) input type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn as_ref(&self) -> &T;
+}
+
+/// Used to do a cheap mutable-to-mutable reference conversion.
+///
+/// This trait is similar to [`AsRef`] but used for converting between mutable
+/// references. If you need to do a costly conversion it is better to
+/// implement [`From`] with type `&mut T` or write a custom function.
+///
+/// **Note: This trait must not fail**. If the conversion can fail, use a
+/// dedicated method which returns an [`Option<T>`] or a [`Result<T, E>`].
+///
+/// # Generic Implementations
+///
+/// - `AsMut` auto-dereferences if the inner type is a mutable reference
+/// (e.g.: `foo.as_mut()` will work the same if `foo` has type `&mut Foo`
+/// or `&mut &mut Foo`)
+///
+/// # Examples
+///
+/// Using `AsMut` as trait bound for a generic function we can accept all mutable references
+/// that can be converted to type `&mut T`. Because [`Box<T>`] implements `AsMut<T>` we can
+/// write a function `add_one` that takes all arguments that can be converted to `&mut u64`.
+/// Because [`Box<T>`] implements `AsMut<T>`, `add_one` accepts arguments of type
+/// `&mut Box<u64>` as well:
+///
+/// ```
+/// fn add_one<T: AsMut<u64>>(num: &mut T) {
+/// *num.as_mut() += 1;
+/// }
+///
+/// let mut boxed_num = Box::new(0);
+/// add_one(&mut boxed_num);
+/// assert_eq!(*boxed_num, 1);
+/// ```
+///
+/// [`Box<T>`]: ../../std/boxed/struct.Box.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "AsMut")]
+pub trait AsMut<T: ?Sized> {
+ /// Converts this type into a mutable reference of the (usually inferred) input type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn as_mut(&mut self) -> &mut T;
+}
+
+/// A value-to-value conversion that consumes the input value. The
+/// opposite of [`From`].
+///
+/// One should avoid implementing [`Into`] and implement [`From`] instead.
+/// Implementing [`From`] automatically provides one with an implementation of [`Into`]
+/// thanks to the blanket implementation in the standard library.
+///
+/// Prefer using [`Into`] over [`From`] when specifying trait bounds on a generic function
+/// to ensure that types that only implement [`Into`] can be used as well.
+///
+/// **Note: This trait must not fail**. If the conversion can fail, use [`TryInto`].
+///
+/// # Generic Implementations
+///
+/// - [`From`]`<T> for U` implies `Into<U> for T`
+/// - [`Into`] is reflexive, which means that `Into<T> for T` is implemented
+///
+/// # Implementing [`Into`] for conversions to external types in old versions of Rust
+///
+/// Prior to Rust 1.41, if the destination type was not part of the current crate
+/// then you couldn't implement [`From`] directly.
+/// For example, take this code:
+///
+/// ```
+/// struct Wrapper<T>(Vec<T>);
+/// impl<T> From<Wrapper<T>> for Vec<T> {
+/// fn from(w: Wrapper<T>) -> Vec<T> {
+/// w.0
+/// }
+/// }
+/// ```
+/// This will fail to compile in older versions of the language because Rust's orphaning rules
+/// used to be a little bit more strict. To bypass this, you could implement [`Into`] directly:
+///
+/// ```
+/// struct Wrapper<T>(Vec<T>);
+/// impl<T> Into<Vec<T>> for Wrapper<T> {
+/// fn into(self) -> Vec<T> {
+/// self.0
+/// }
+/// }
+/// ```
+///
+/// It is important to understand that [`Into`] does not provide a [`From`] implementation
+/// (as [`From`] does with [`Into`]). Therefore, you should always try to implement [`From`]
+/// and then fall back to [`Into`] if [`From`] can't be implemented.
+///
+/// # Examples
+///
+/// [`String`] implements [`Into`]`<`[`Vec`]`<`[`u8`]`>>`:
+///
+/// In order to express that we want a generic function to take all arguments that can be
+/// converted to a specified type `T`, we can use a trait bound of [`Into`]`<T>`.
+/// For example: The function `is_hello` takes all arguments that can be converted into a
+/// [`Vec`]`<`[`u8`]`>`.
+///
+/// ```
+/// fn is_hello<T: Into<Vec<u8>>>(s: T) {
+/// let bytes = b"hello".to_vec();
+/// assert_eq!(bytes, s.into());
+/// }
+///
+/// let s = "hello".to_string();
+/// is_hello(s);
+/// ```
+///
+/// [`String`]: ../../std/string/struct.String.html
+/// [`Vec`]: ../../std/vec/struct.Vec.html
+#[rustc_diagnostic_item = "Into"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Into<T>: Sized {
+ /// Converts this type into the (usually inferred) input type.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn into(self) -> T;
+}
+
+/// Used to do value-to-value conversions while consuming the input value. It is the reciprocal of
+/// [`Into`].
+///
+/// One should always prefer implementing `From` over [`Into`]
+/// because implementing `From` automatically provides one with an implementation of [`Into`]
+/// thanks to the blanket implementation in the standard library.
+///
+/// Only implement [`Into`] when targeting a version prior to Rust 1.41 and converting to a type
+/// outside the current crate.
+/// `From` was not able to do these types of conversions in earlier versions because of Rust's
+/// orphaning rules.
+/// See [`Into`] for more details.
+///
+/// Prefer using [`Into`] over using `From` when specifying trait bounds on a generic function.
+/// This way, types that directly implement [`Into`] can be used as arguments as well.
+///
+/// The `From` is also very useful when performing error handling. When constructing a function
+/// that is capable of failing, the return type will generally be of the form `Result<T, E>`.
+/// The `From` trait simplifies error handling by allowing a function to return a single error type
+/// that encapsulate multiple error types. See the "Examples" section and [the book][book] for more
+/// details.
+///
+/// **Note: This trait must not fail**. The `From` trait is intended for perfect conversions.
+/// If the conversion can fail or is not perfect, use [`TryFrom`].
+///
+/// # Generic Implementations
+///
+/// - `From<T> for U` implies [`Into`]`<U> for T`
+/// - `From` is reflexive, which means that `From<T> for T` is implemented
+///
+/// # Examples
+///
+/// [`String`] implements `From<&str>`:
+///
+/// An explicit conversion from a `&str` to a String is done as follows:
+///
+/// ```
+/// let string = "hello".to_string();
+/// let other_string = String::from("hello");
+///
+/// assert_eq!(string, other_string);
+/// ```
+///
+/// While performing error handling it is often useful to implement `From` for your own error type.
+/// By converting underlying error types to our own custom error type that encapsulates the
+/// underlying error type, we can return a single error type without losing information on the
+/// underlying cause. The '?' operator automatically converts the underlying error type to our
+/// custom error type by calling `Into<CliError>::into` which is automatically provided when
+/// implementing `From`. The compiler then infers which implementation of `Into` should be used.
+///
+/// ```
+/// use std::fs;
+/// use std::io;
+/// use std::num;
+///
+/// enum CliError {
+/// IoError(io::Error),
+/// ParseError(num::ParseIntError),
+/// }
+///
+/// impl From<io::Error> for CliError {
+/// fn from(error: io::Error) -> Self {
+/// CliError::IoError(error)
+/// }
+/// }
+///
+/// impl From<num::ParseIntError> for CliError {
+/// fn from(error: num::ParseIntError) -> Self {
+/// CliError::ParseError(error)
+/// }
+/// }
+///
+/// fn open_and_parse_file(file_name: &str) -> Result<i32, CliError> {
+/// let mut contents = fs::read_to_string(&file_name)?;
+/// let num: i32 = contents.trim().parse()?;
+/// Ok(num)
+/// }
+/// ```
+///
+/// [`String`]: ../../std/string/struct.String.html
+/// [`from`]: From::from
+/// [book]: ../../book/ch09-00-error-handling.html
+#[rustc_diagnostic_item = "From"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(on(
+ all(_Self = "&str", T = "std::string::String"),
+ note = "to coerce a `{T}` into a `{Self}`, use `&*` as a prefix",
+))]
+pub trait From<T>: Sized {
+ /// Converts to this type from the input type.
+ #[lang = "from"]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn from(_: T) -> Self;
+}
+
+/// An attempted conversion that consumes `self`, which may or may not be
+/// expensive.
+///
+/// Library authors should usually not directly implement this trait,
+/// but should prefer implementing the [`TryFrom`] trait, which offers
+/// greater flexibility and provides an equivalent `TryInto`
+/// implementation for free, thanks to a blanket implementation in the
+/// standard library. For more information on this, see the
+/// documentation for [`Into`].
+///
+/// # Implementing `TryInto`
+///
+/// This suffers the same restrictions and reasoning as implementing
+/// [`Into`], see there for details.
+#[rustc_diagnostic_item = "TryInto"]
+#[stable(feature = "try_from", since = "1.34.0")]
+pub trait TryInto<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ #[stable(feature = "try_from", since = "1.34.0")]
+ type Error;
+
+ /// Performs the conversion.
+ #[stable(feature = "try_from", since = "1.34.0")]
+ fn try_into(self) -> Result<T, Self::Error>;
+}
+
+/// Simple and safe type conversions that may fail in a controlled
+/// way under some circumstances. It is the reciprocal of [`TryInto`].
+///
+/// This is useful when you are doing a type conversion that may
+/// trivially succeed but may also need special handling.
+/// For example, there is no way to convert an [`i64`] into an [`i32`]
+/// using the [`From`] trait, because an [`i64`] may contain a value
+/// that an [`i32`] cannot represent and so the conversion would lose data.
+/// This might be handled by truncating the [`i64`] to an [`i32`] (essentially
+/// giving the [`i64`]'s value modulo [`i32::MAX`]) or by simply returning
+/// [`i32::MAX`], or by some other method. The [`From`] trait is intended
+/// for perfect conversions, so the `TryFrom` trait informs the
+/// programmer when a type conversion could go bad and lets them
+/// decide how to handle it.
+///
+/// # Generic Implementations
+///
+/// - `TryFrom<T> for U` implies [`TryInto`]`<U> for T`
+/// - [`try_from`] is reflexive, which means that `TryFrom<T> for T`
+/// is implemented and cannot fail -- the associated `Error` type for
+/// calling `T::try_from()` on a value of type `T` is [`Infallible`].
+/// When the [`!`] type is stabilized [`Infallible`] and [`!`] will be
+/// equivalent.
+///
+/// `TryFrom<T>` can be implemented as follows:
+///
+/// ```
+/// struct GreaterThanZero(i32);
+///
+/// impl TryFrom<i32> for GreaterThanZero {
+/// type Error = &'static str;
+///
+/// fn try_from(value: i32) -> Result<Self, Self::Error> {
+/// if value <= 0 {
+/// Err("GreaterThanZero only accepts value superior than zero!")
+/// } else {
+/// Ok(GreaterThanZero(value))
+/// }
+/// }
+/// }
+/// ```
+///
+/// # Examples
+///
+/// As described, [`i32`] implements `TryFrom<`[`i64`]`>`:
+///
+/// ```
+/// let big_number = 1_000_000_000_000i64;
+/// // Silently truncates `big_number`, requires detecting
+/// // and handling the truncation after the fact.
+/// let smaller_number = big_number as i32;
+/// assert_eq!(smaller_number, -727379968);
+///
+/// // Returns an error because `big_number` is too big to
+/// // fit in an `i32`.
+/// let try_smaller_number = i32::try_from(big_number);
+/// assert!(try_smaller_number.is_err());
+///
+/// // Returns `Ok(3)`.
+/// let try_successful_smaller_number = i32::try_from(3);
+/// assert!(try_successful_smaller_number.is_ok());
+/// ```
+///
+/// [`try_from`]: TryFrom::try_from
+#[rustc_diagnostic_item = "TryFrom"]
+#[stable(feature = "try_from", since = "1.34.0")]
+pub trait TryFrom<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ #[stable(feature = "try_from", since = "1.34.0")]
+ type Error;
+
+ /// Performs the conversion.
+ #[stable(feature = "try_from", since = "1.34.0")]
+ fn try_from(value: T) -> Result<Self, Self::Error>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// GENERIC IMPLS
+////////////////////////////////////////////////////////////////////////////////
+
+// As lifts over &
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T: ?Sized, U: ?Sized> const AsRef<U> for &T
+where
+ T: ~const AsRef<U>,
+{
+ #[inline]
+ fn as_ref(&self) -> &U {
+ <T as AsRef<U>>::as_ref(*self)
+ }
+}
+
+// As lifts over &mut
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T: ?Sized, U: ?Sized> const AsRef<U> for &mut T
+where
+ T: ~const AsRef<U>,
+{
+ #[inline]
+ fn as_ref(&self) -> &U {
+ <T as AsRef<U>>::as_ref(*self)
+ }
+}
+
+// FIXME (#45742): replace the above impls for &/&mut with the following more general one:
+// // As lifts over Deref
+// impl<D: ?Sized + Deref<Target: AsRef<U>>, U: ?Sized> AsRef<U> for D {
+// fn as_ref(&self) -> &U {
+// self.deref().as_ref()
+// }
+// }
+
+// AsMut lifts over &mut
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T: ?Sized, U: ?Sized> const AsMut<U> for &mut T
+where
+ T: ~const AsMut<U>,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut U {
+ (*self).as_mut()
+ }
+}
+
+// FIXME (#45742): replace the above impl for &mut with the following more general one:
+// // AsMut lifts over DerefMut
+// impl<D: ?Sized + Deref<Target: AsMut<U>>, U: ?Sized> AsMut<U> for D {
+// fn as_mut(&mut self) -> &mut U {
+// self.deref_mut().as_mut()
+// }
+// }
+
+// From implies Into
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T, U> const Into<U> for T
+where
+ U: ~const From<T>,
+{
+ /// Calls `U::from(self)`.
+ ///
+ /// That is, this conversion is whatever the implementation of
+ /// <code>[From]&lt;T&gt; for U</code> chooses to do.
+ fn into(self) -> U {
+ U::from(self)
+ }
+}
+
+// From (and thus Into) is reflexive
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<T> for T {
+ /// Returns the argument unchanged.
+ fn from(t: T) -> T {
+ t
+ }
+}
+
+/// **Stability note:** This impl does not yet exist, but we are
+/// "reserving space" to add it in the future. See
+/// [rust-lang/rust#64715][#64715] for details.
+///
+/// [#64715]: https://github.com/rust-lang/rust/issues/64715
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+#[allow(unused_attributes)] // FIXME(#58633): do a principled fix instead.
+#[rustc_reservation_impl = "permitting this impl would forbid us from adding \
+ `impl<T> From<!> for T` later; see rust-lang/rust#64715 for details"]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<!> for T {
+ fn from(t: !) -> T {
+ t
+ }
+}
+
+// TryFrom implies TryInto
+#[stable(feature = "try_from", since = "1.34.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T, U> const TryInto<U> for T
+where
+ U: ~const TryFrom<T>,
+{
+ type Error = U::Error;
+
+ fn try_into(self) -> Result<U, U::Error> {
+ U::try_from(self)
+ }
+}
+
+// Infallible conversions are semantically equivalent to fallible conversions
+// with an uninhabited error type.
+#[stable(feature = "try_from", since = "1.34.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T, U> const TryFrom<U> for T
+where
+ U: ~const Into<T>,
+{
+ type Error = Infallible;
+
+ fn try_from(value: U) -> Result<Self, Self::Error> {
+ Ok(U::into(value))
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// CONCRETE IMPLS
+////////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> AsRef<[T]> for [T] {
+ fn as_ref(&self) -> &[T] {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> AsMut<[T]> for [T] {
+ fn as_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<str> for str {
+ #[inline]
+ fn as_ref(&self) -> &str {
+ self
+ }
+}
+
+#[stable(feature = "as_mut_str_for_str", since = "1.51.0")]
+impl AsMut<str> for str {
+ #[inline]
+ fn as_mut(&mut self) -> &mut str {
+ self
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// THE NO-ERROR ERROR TYPE
+////////////////////////////////////////////////////////////////////////////////
+
+/// The error type for errors that can never happen.
+///
+/// Since this enum has no variant, a value of this type can never actually exist.
+/// This can be useful for generic APIs that use [`Result`] and parameterize the error type,
+/// to indicate that the result is always [`Ok`].
+///
+/// For example, the [`TryFrom`] trait (conversion that returns a [`Result`])
+/// has a blanket implementation for all types where a reverse [`Into`] implementation exists.
+///
+/// ```ignore (illustrates std code, duplicating the impl in a doctest would be an error)
+/// impl<T, U> TryFrom<U> for T where U: Into<T> {
+/// type Error = Infallible;
+///
+/// fn try_from(value: U) -> Result<Self, Infallible> {
+/// Ok(U::into(value)) // Never returns `Err`
+/// }
+/// }
+/// ```
+///
+/// # Future compatibility
+///
+/// This enum has the same role as [the `!` “never” type][never],
+/// which is unstable in this version of Rust.
+/// When `!` is stabilized, we plan to make `Infallible` a type alias to it:
+///
+/// ```ignore (illustrates future std change)
+/// pub type Infallible = !;
+/// ```
+///
+/// … and eventually deprecate `Infallible`.
+///
+/// However there is one case where `!` syntax can be used
+/// before `!` is stabilized as a full-fledged type: in the position of a function’s return type.
+/// Specifically, it is possible to have implementations for two different function pointer types:
+///
+/// ```
+/// trait MyTrait {}
+/// impl MyTrait for fn() -> ! {}
+/// impl MyTrait for fn() -> std::convert::Infallible {}
+/// ```
+///
+/// With `Infallible` being an enum, this code is valid.
+/// However when `Infallible` becomes an alias for the never type,
+/// the two `impl`s will start to overlap
+/// and therefore will be disallowed by the language’s trait coherence rules.
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+#[derive(Copy)]
+pub enum Infallible {}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+impl const Clone for Infallible {
+ fn clone(&self) -> Infallible {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl fmt::Debug for Infallible {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl fmt::Display for Infallible {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl PartialEq for Infallible {
+ fn eq(&self, _: &Infallible) -> bool {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl Eq for Infallible {}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl PartialOrd for Infallible {
+ fn partial_cmp(&self, _other: &Self) -> Option<crate::cmp::Ordering> {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl Ord for Infallible {
+ fn cmp(&self, _other: &Self) -> crate::cmp::Ordering {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl const From<!> for Infallible {
+ fn from(x: !) -> Self {
+ x
+ }
+}
+
+#[stable(feature = "convert_infallible_hash", since = "1.44.0")]
+impl Hash for Infallible {
+ fn hash<H: Hasher>(&self, _: &mut H) {
+ match *self {}
+ }
+}
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
new file mode 100644
index 000000000..4fa5d129b
--- /dev/null
+++ b/library/core/src/convert/num.rs
@@ -0,0 +1,546 @@
+use super::{From, TryFrom};
+use crate::num::TryFromIntError;
+
+mod private {
+ /// This trait being unreachable from outside the crate
+ /// prevents other implementations of the `FloatToInt` trait,
+ /// which allows potentially adding more trait methods after the trait is `#[stable]`.
+ #[unstable(feature = "convert_float_to_int", issue = "67057")]
+ pub trait Sealed {}
+}
+
+/// Supporting trait for inherent methods of `f32` and `f64` such as `to_int_unchecked`.
+/// Typically doesn’t need to be used directly.
+#[unstable(feature = "convert_float_to_int", issue = "67057")]
+pub trait FloatToInt<Int>: private::Sealed + Sized {
+ #[unstable(feature = "convert_float_to_int", issue = "67057")]
+ #[doc(hidden)]
+ unsafe fn to_int_unchecked(self) -> Int;
+}
+
+macro_rules! impl_float_to_int {
+ ( $Float: ident => $( $Int: ident )+ ) => {
+ #[unstable(feature = "convert_float_to_int", issue = "67057")]
+ impl private::Sealed for $Float {}
+ $(
+ #[unstable(feature = "convert_float_to_int", issue = "67057")]
+ impl FloatToInt<$Int> for $Float {
+ #[inline]
+ unsafe fn to_int_unchecked(self) -> $Int {
+ // SAFETY: the safety contract must be upheld by the caller.
+ unsafe { crate::intrinsics::float_to_int_unchecked(self) }
+ }
+ }
+ )+
+ }
+}
+
+impl_float_to_int!(f32 => u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize);
+impl_float_to_int!(f64 => u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize);
+
+// Conversion traits for primitive integer and float types
+// Conversions T -> T are covered by a blanket impl and therefore excluded
+// Some conversions from and to usize/isize are not implemented due to portability concerns
+macro_rules! impl_from {
+ ($Small: ty, $Large: ty, #[$attr:meta], $doc: expr) => {
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+ impl const From<$Small> for $Large {
+ // Rustdocs on the impl block show a "[+] show undocumented items" toggle.
+ // Rustdocs on functions do not.
+ #[doc = $doc]
+ #[inline]
+ fn from(small: $Small) -> Self {
+ small as Self
+ }
+ }
+ };
+ ($Small: ty, $Large: ty, #[$attr:meta]) => {
+ impl_from!($Small,
+ $Large,
+ #[$attr],
+ concat!("Converts `",
+ stringify!($Small),
+ "` to `",
+ stringify!($Large),
+ "` losslessly."));
+ }
+}
+
+macro_rules! impl_from_bool {
+ ($target: ty, #[$attr:meta]) => {
+ impl_from!(bool, $target, #[$attr], concat!("Converts a `bool` to a `",
+ stringify!($target), "`. The resulting value is `0` for `false` and `1` for `true`
+values.
+
+# Examples
+
+```
+assert_eq!(", stringify!($target), "::from(true), 1);
+assert_eq!(", stringify!($target), "::from(false), 0);
+```"));
+ };
+}
+
+// Bool -> Any
+impl_from_bool! { u8, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { u16, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { u32, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { u64, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { u128, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { usize, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i8, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i16, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i32, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i64, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i128, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { isize, #[stable(feature = "from_bool", since = "1.28.0")] }
+
+// Unsigned -> Unsigned
+impl_from! { u8, u16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, u32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, u128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u8, usize, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, u32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, u128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u32, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u32, u128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u64, u128, #[stable(feature = "i128", since = "1.26.0")] }
+
+// Signed -> Signed
+impl_from! { i8, i16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i8, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i8, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i8, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { i8, isize, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i16, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i16, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i16, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { i32, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i32, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { i64, i128, #[stable(feature = "i128", since = "1.26.0")] }
+
+// Unsigned -> Signed
+impl_from! { u8, i16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u16, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u32, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u32, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u64, i128, #[stable(feature = "i128", since = "1.26.0")] }
+
+// The C99 standard defines bounds on INTPTR_MIN, INTPTR_MAX, and UINTPTR_MAX
+// which imply that pointer-sized integers must be at least 16 bits:
+// https://port70.net/~nsz/c/c99/n1256.html#7.18.2.4
+impl_from! { u16, usize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] }
+impl_from! { u8, isize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] }
+impl_from! { i16, isize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] }
+
+// RISC-V defines the possibility of a 128-bit address space (RV128).
+
+// CHERI proposes 256-bit “capabilities”. Unclear if this would be relevant to usize/isize.
+// https://www.cl.cam.ac.uk/research/security/ctsrd/pdfs/20171017a-cheri-poster.pdf
+// https://www.csl.sri.com/users/neumann/2012resolve-cheri.pdf
+
+// Note: integers can only be represented with full precision in a float if
+// they fit in the significand, which is 24 bits in f32 and 53 bits in f64.
+// Lossy float conversions are not implemented at this time.
+
+// Signed -> Float
+impl_from! { i8, f32, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { i8, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { i16, f32, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { i16, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { i32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+
+// Unsigned -> Float
+impl_from! { u8, f32, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { u8, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { u16, f32, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { u16, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { u32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+
+// Float -> Float
+impl_from! { f32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+
+// no possible bounds violation
+macro_rules! try_from_unbounded {
+ ($source:ty, $($target:ty),*) => {$(
+ #[stable(feature = "try_from", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+ impl const TryFrom<$source> for $target {
+ type Error = TryFromIntError;
+
+ /// Try to create the target number type from a source
+ /// number type. This returns an error if the source value
+ /// is outside of the range of the target type.
+ #[inline]
+ fn try_from(value: $source) -> Result<Self, Self::Error> {
+ Ok(value as Self)
+ }
+ }
+ )*}
+}
+
+// only negative bounds
+macro_rules! try_from_lower_bounded {
+ ($source:ty, $($target:ty),*) => {$(
+ #[stable(feature = "try_from", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+ impl const TryFrom<$source> for $target {
+ type Error = TryFromIntError;
+
+ /// Try to create the target number type from a source
+ /// number type. This returns an error if the source value
+ /// is outside of the range of the target type.
+ #[inline]
+ fn try_from(u: $source) -> Result<Self, Self::Error> {
+ if u >= 0 {
+ Ok(u as Self)
+ } else {
+ Err(TryFromIntError(()))
+ }
+ }
+ }
+ )*}
+}
+
+// unsigned to signed (only positive bound)
+macro_rules! try_from_upper_bounded {
+ ($source:ty, $($target:ty),*) => {$(
+ #[stable(feature = "try_from", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+ impl const TryFrom<$source> for $target {
+ type Error = TryFromIntError;
+
+ /// Try to create the target number type from a source
+ /// number type. This returns an error if the source value
+ /// is outside of the range of the target type.
+ #[inline]
+ fn try_from(u: $source) -> Result<Self, Self::Error> {
+ if u > (Self::MAX as $source) {
+ Err(TryFromIntError(()))
+ } else {
+ Ok(u as Self)
+ }
+ }
+ }
+ )*}
+}
+
+// all other cases
+macro_rules! try_from_both_bounded {
+ ($source:ty, $($target:ty),*) => {$(
+ #[stable(feature = "try_from", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+ impl const TryFrom<$source> for $target {
+ type Error = TryFromIntError;
+
+ /// Try to create the target number type from a source
+ /// number type. This returns an error if the source value
+ /// is outside of the range of the target type.
+ #[inline]
+ fn try_from(u: $source) -> Result<Self, Self::Error> {
+ let min = Self::MIN as $source;
+ let max = Self::MAX as $source;
+ if u < min || u > max {
+ Err(TryFromIntError(()))
+ } else {
+ Ok(u as Self)
+ }
+ }
+ }
+ )*}
+}
+
+macro_rules! rev {
+ ($mac:ident, $source:ty, $($target:ty),*) => {$(
+ $mac!($target, $source);
+ )*}
+}
+
+// intra-sign conversions
+try_from_upper_bounded!(u16, u8);
+try_from_upper_bounded!(u32, u16, u8);
+try_from_upper_bounded!(u64, u32, u16, u8);
+try_from_upper_bounded!(u128, u64, u32, u16, u8);
+
+try_from_both_bounded!(i16, i8);
+try_from_both_bounded!(i32, i16, i8);
+try_from_both_bounded!(i64, i32, i16, i8);
+try_from_both_bounded!(i128, i64, i32, i16, i8);
+
+// unsigned-to-signed
+try_from_upper_bounded!(u8, i8);
+try_from_upper_bounded!(u16, i8, i16);
+try_from_upper_bounded!(u32, i8, i16, i32);
+try_from_upper_bounded!(u64, i8, i16, i32, i64);
+try_from_upper_bounded!(u128, i8, i16, i32, i64, i128);
+
+// signed-to-unsigned
+try_from_lower_bounded!(i8, u8, u16, u32, u64, u128);
+try_from_lower_bounded!(i16, u16, u32, u64, u128);
+try_from_lower_bounded!(i32, u32, u64, u128);
+try_from_lower_bounded!(i64, u64, u128);
+try_from_lower_bounded!(i128, u128);
+try_from_both_bounded!(i16, u8);
+try_from_both_bounded!(i32, u16, u8);
+try_from_both_bounded!(i64, u32, u16, u8);
+try_from_both_bounded!(i128, u64, u32, u16, u8);
+
+// usize/isize
+try_from_upper_bounded!(usize, isize);
+try_from_lower_bounded!(isize, usize);
+
+#[cfg(target_pointer_width = "16")]
+mod ptr_try_from_impls {
+ use super::TryFromIntError;
+ use crate::convert::TryFrom;
+
+ try_from_upper_bounded!(usize, u8);
+ try_from_unbounded!(usize, u16, u32, u64, u128);
+ try_from_upper_bounded!(usize, i8, i16);
+ try_from_unbounded!(usize, i32, i64, i128);
+
+ try_from_both_bounded!(isize, u8);
+ try_from_lower_bounded!(isize, u16, u32, u64, u128);
+ try_from_both_bounded!(isize, i8);
+ try_from_unbounded!(isize, i16, i32, i64, i128);
+
+ rev!(try_from_upper_bounded, usize, u32, u64, u128);
+ rev!(try_from_lower_bounded, usize, i8, i16);
+ rev!(try_from_both_bounded, usize, i32, i64, i128);
+
+ rev!(try_from_upper_bounded, isize, u16, u32, u64, u128);
+ rev!(try_from_both_bounded, isize, i32, i64, i128);
+}
+
+#[cfg(target_pointer_width = "32")]
+mod ptr_try_from_impls {
+ use super::TryFromIntError;
+ use crate::convert::TryFrom;
+
+ try_from_upper_bounded!(usize, u8, u16);
+ try_from_unbounded!(usize, u32, u64, u128);
+ try_from_upper_bounded!(usize, i8, i16, i32);
+ try_from_unbounded!(usize, i64, i128);
+
+ try_from_both_bounded!(isize, u8, u16);
+ try_from_lower_bounded!(isize, u32, u64, u128);
+ try_from_both_bounded!(isize, i8, i16);
+ try_from_unbounded!(isize, i32, i64, i128);
+
+ rev!(try_from_unbounded, usize, u32);
+ rev!(try_from_upper_bounded, usize, u64, u128);
+ rev!(try_from_lower_bounded, usize, i8, i16, i32);
+ rev!(try_from_both_bounded, usize, i64, i128);
+
+ rev!(try_from_unbounded, isize, u16);
+ rev!(try_from_upper_bounded, isize, u32, u64, u128);
+ rev!(try_from_unbounded, isize, i32);
+ rev!(try_from_both_bounded, isize, i64, i128);
+}
+
+#[cfg(target_pointer_width = "64")]
+mod ptr_try_from_impls {
+ use super::TryFromIntError;
+ use crate::convert::TryFrom;
+
+ try_from_upper_bounded!(usize, u8, u16, u32);
+ try_from_unbounded!(usize, u64, u128);
+ try_from_upper_bounded!(usize, i8, i16, i32, i64);
+ try_from_unbounded!(usize, i128);
+
+ try_from_both_bounded!(isize, u8, u16, u32);
+ try_from_lower_bounded!(isize, u64, u128);
+ try_from_both_bounded!(isize, i8, i16, i32);
+ try_from_unbounded!(isize, i64, i128);
+
+ rev!(try_from_unbounded, usize, u32, u64);
+ rev!(try_from_upper_bounded, usize, u128);
+ rev!(try_from_lower_bounded, usize, i8, i16, i32, i64);
+ rev!(try_from_both_bounded, usize, i128);
+
+ rev!(try_from_unbounded, isize, u16, u32);
+ rev!(try_from_upper_bounded, isize, u64, u128);
+ rev!(try_from_unbounded, isize, i32, i64);
+ rev!(try_from_both_bounded, isize, i128);
+}
+
+// Conversion traits for non-zero integer types
+use crate::num::NonZeroI128;
+use crate::num::NonZeroI16;
+use crate::num::NonZeroI32;
+use crate::num::NonZeroI64;
+use crate::num::NonZeroI8;
+use crate::num::NonZeroIsize;
+use crate::num::NonZeroU128;
+use crate::num::NonZeroU16;
+use crate::num::NonZeroU32;
+use crate::num::NonZeroU64;
+use crate::num::NonZeroU8;
+use crate::num::NonZeroUsize;
+
+macro_rules! nzint_impl_from {
+ ($Small: ty, $Large: ty, #[$attr:meta], $doc: expr) => {
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+ impl const From<$Small> for $Large {
+ // Rustdocs on the impl block show a "[+] show undocumented items" toggle.
+ // Rustdocs on functions do not.
+ #[doc = $doc]
+ #[inline]
+ fn from(small: $Small) -> Self {
+ // SAFETY: input type guarantees the value is non-zero
+ unsafe {
+ Self::new_unchecked(From::from(small.get()))
+ }
+ }
+ }
+ };
+ ($Small: ty, $Large: ty, #[$attr:meta]) => {
+ nzint_impl_from!($Small,
+ $Large,
+ #[$attr],
+ concat!("Converts `",
+ stringify!($Small),
+ "` to `",
+ stringify!($Large),
+ "` losslessly."));
+ }
+}
+
+// Non-zero Unsigned -> Non-zero Unsigned
+nzint_impl_from! { NonZeroU8, NonZeroU16, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroU32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroU64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroU128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroUsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroU32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroU64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroU128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroUsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU32, NonZeroU64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU32, NonZeroU128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU64, NonZeroU128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+
+// Non-zero Signed -> Non-zero Signed
+nzint_impl_from! { NonZeroI8, NonZeroI16, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI8, NonZeroI32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI8, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI8, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI8, NonZeroIsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI16, NonZeroI32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI16, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI16, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI16, NonZeroIsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI32, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI32, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI64, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+
+// NonZero UnSigned -> Non-zero Signed
+nzint_impl_from! { NonZeroU8, NonZeroI16, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroI32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroIsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroI32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU32, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU32, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU64, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+
+macro_rules! nzint_impl_try_from_int {
+ ($Int: ty, $NonZeroInt: ty, #[$attr:meta], $doc: expr) => {
+ #[$attr]
+ impl TryFrom<$Int> for $NonZeroInt {
+ type Error = TryFromIntError;
+
+ // Rustdocs on the impl block show a "[+] show undocumented items" toggle.
+ // Rustdocs on functions do not.
+ #[doc = $doc]
+ #[inline]
+ fn try_from(value: $Int) -> Result<Self, Self::Error> {
+ Self::new(value).ok_or(TryFromIntError(()))
+ }
+ }
+ };
+ ($Int: ty, $NonZeroInt: ty, #[$attr:meta]) => {
+ nzint_impl_try_from_int!($Int,
+ $NonZeroInt,
+ #[$attr],
+ concat!("Attempts to convert `",
+ stringify!($Int),
+ "` to `",
+ stringify!($NonZeroInt),
+ "`."));
+ }
+}
+
+// Int -> Non-zero Int
+nzint_impl_try_from_int! { u8, NonZeroU8, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { u16, NonZeroU16, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { u32, NonZeroU32, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { u64, NonZeroU64, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { u128, NonZeroU128, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { usize, NonZeroUsize, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i8, NonZeroI8, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i16, NonZeroI16, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i32, NonZeroI32, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i64, NonZeroI64, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i128, NonZeroI128, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { isize, NonZeroIsize, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+
+macro_rules! nzint_impl_try_from_nzint {
+ ($From:ty => $To:ty, $doc: expr) => {
+ #[stable(feature = "nzint_try_from_nzint_conv", since = "1.49.0")]
+ impl TryFrom<$From> for $To {
+ type Error = TryFromIntError;
+
+ // Rustdocs on the impl block show a "[+] show undocumented items" toggle.
+ // Rustdocs on functions do not.
+ #[doc = $doc]
+ #[inline]
+ fn try_from(value: $From) -> Result<Self, Self::Error> {
+ TryFrom::try_from(value.get()).map(|v| {
+ // SAFETY: $From is a NonZero type, so v is not zero.
+ unsafe { Self::new_unchecked(v) }
+ })
+ }
+ }
+ };
+ ($To:ty: $($From: ty),*) => {$(
+ nzint_impl_try_from_nzint!(
+ $From => $To,
+ concat!(
+ "Attempts to convert `",
+ stringify!($From),
+ "` to `",
+ stringify!($To),
+ "`.",
+ )
+ );
+ )*};
+}
+
+// Non-zero int -> non-zero unsigned int
+nzint_impl_try_from_nzint! { NonZeroU8: NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroU16: NonZeroI8, NonZeroI16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroU32: NonZeroI8, NonZeroI16, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroU64: NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroU128: NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroUsize: NonZeroI8, NonZeroI16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroIsize }
+
+// Non-zero int -> non-zero signed int
+nzint_impl_try_from_nzint! { NonZeroI8: NonZeroU8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroI16: NonZeroU16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroI32: NonZeroU32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroI64: NonZeroU64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroI128: NonZeroU128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroIsize: NonZeroU16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize }
diff --git a/library/core/src/default.rs b/library/core/src/default.rs
new file mode 100644
index 000000000..1ce00828b
--- /dev/null
+++ b/library/core/src/default.rs
@@ -0,0 +1,222 @@
+//! The `Default` trait for types which may have meaningful default values.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+/// A trait for giving a type a useful default value.
+///
+/// Sometimes, you want to fall back to some kind of default value, and
+/// don't particularly care what it is. This comes up often with `struct`s
+/// that define a set of options:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// struct SomeOptions {
+/// foo: i32,
+/// bar: f32,
+/// }
+/// ```
+///
+/// How can we define some default values? You can use `Default`:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// #[derive(Default)]
+/// struct SomeOptions {
+/// foo: i32,
+/// bar: f32,
+/// }
+///
+/// fn main() {
+/// let options: SomeOptions = Default::default();
+/// }
+/// ```
+///
+/// Now, you get all of the default values. Rust implements `Default` for various primitives types.
+///
+/// If you want to override a particular option, but still retain the other defaults:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// # #[derive(Default)]
+/// # struct SomeOptions {
+/// # foo: i32,
+/// # bar: f32,
+/// # }
+/// fn main() {
+/// let options = SomeOptions { foo: 42, ..Default::default() };
+/// }
+/// ```
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]` if all of the type's fields implement
+/// `Default`. When `derive`d, it will use the default value for each field's type.
+///
+/// ### `enum`s
+///
+/// When using `#[derive(Default)]` on an `enum`, you need to choose which unit variant will be
+/// default. You do this by placing the `#[default]` attribute on the variant.
+///
+/// ```
+/// #[derive(Default)]
+/// enum Kind {
+/// #[default]
+/// A,
+/// B,
+/// C,
+/// }
+/// ```
+///
+/// You cannot use the `#[default]` attribute on non-unit or non-exhaustive variants.
+///
+/// ## How can I implement `Default`?
+///
+/// Provide an implementation for the `default()` method that returns the value of
+/// your type that should be the default:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// enum Kind {
+/// A,
+/// B,
+/// C,
+/// }
+///
+/// impl Default for Kind {
+/// fn default() -> Self { Kind::A }
+/// }
+/// ```
+///
+/// # Examples
+///
+/// ```
+/// # #[allow(dead_code)]
+/// #[derive(Default)]
+/// struct SomeOptions {
+/// foo: i32,
+/// bar: f32,
+/// }
+/// ```
+#[cfg_attr(not(test), rustc_diagnostic_item = "Default")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Default: Sized {
+ /// Returns the "default value" for a type.
+ ///
+ /// Default values are often some kind of initial value, identity value, or anything else that
+ /// may make sense as a default.
+ ///
+ /// # Examples
+ ///
+ /// Using built-in default values:
+ ///
+ /// ```
+ /// let i: i8 = Default::default();
+ /// let (x, y): (Option<String>, f64) = Default::default();
+ /// let (a, b, (c, d)): (i32, u32, (bool, bool)) = Default::default();
+ /// ```
+ ///
+ /// Making your own:
+ ///
+ /// ```
+ /// # #[allow(dead_code)]
+ /// enum Kind {
+ /// A,
+ /// B,
+ /// C,
+ /// }
+ ///
+ /// impl Default for Kind {
+ /// fn default() -> Self { Kind::A }
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn default() -> Self;
+}
+
+/// Return the default value of a type according to the `Default` trait.
+///
+/// The type to return is inferred from context; this is equivalent to
+/// `Default::default()` but shorter to type.
+///
+/// For example:
+/// ```
+/// #![feature(default_free_fn)]
+///
+/// use std::default::default;
+///
+/// #[derive(Default)]
+/// struct AppConfig {
+/// foo: FooConfig,
+/// bar: BarConfig,
+/// }
+///
+/// #[derive(Default)]
+/// struct FooConfig {
+/// foo: i32,
+/// }
+///
+/// #[derive(Default)]
+/// struct BarConfig {
+/// bar: f32,
+/// baz: u8,
+/// }
+///
+/// fn main() {
+/// let options = AppConfig {
+/// foo: default(),
+/// bar: BarConfig {
+/// bar: 10.1,
+/// ..default()
+/// },
+/// };
+/// }
+/// ```
+#[unstable(feature = "default_free_fn", issue = "73014")]
+#[must_use]
+#[inline]
+pub fn default<T: Default>() -> T {
+ Default::default()
+}
+
+/// Derive macro generating an impl of the trait `Default`.
+#[rustc_builtin_macro(Default, attributes(default))]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics)]
+pub macro Default($item:item) {
+ /* compiler built-in */
+}
+
+macro_rules! default_impl {
+ ($t:ty, $v:expr, $doc:tt) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+ impl const Default for $t {
+ #[inline]
+ #[doc = $doc]
+ fn default() -> $t {
+ $v
+ }
+ }
+ };
+}
+
+default_impl! { (), (), "Returns the default value of `()`" }
+default_impl! { bool, false, "Returns the default value of `false`" }
+default_impl! { char, '\x00', "Returns the default value of `\\x00`" }
+
+default_impl! { usize, 0, "Returns the default value of `0`" }
+default_impl! { u8, 0, "Returns the default value of `0`" }
+default_impl! { u16, 0, "Returns the default value of `0`" }
+default_impl! { u32, 0, "Returns the default value of `0`" }
+default_impl! { u64, 0, "Returns the default value of `0`" }
+default_impl! { u128, 0, "Returns the default value of `0`" }
+
+default_impl! { isize, 0, "Returns the default value of `0`" }
+default_impl! { i8, 0, "Returns the default value of `0`" }
+default_impl! { i16, 0, "Returns the default value of `0`" }
+default_impl! { i32, 0, "Returns the default value of `0`" }
+default_impl! { i64, 0, "Returns the default value of `0`" }
+default_impl! { i128, 0, "Returns the default value of `0`" }
+
+default_impl! { f32, 0.0f32, "Returns the default value of `0.0`" }
+default_impl! { f64, 0.0f64, "Returns the default value of `0.0`" }
diff --git a/library/core/src/ffi/c_char.md b/library/core/src/ffi/c_char.md
new file mode 100644
index 000000000..b262a3663
--- /dev/null
+++ b/library/core/src/ffi/c_char.md
@@ -0,0 +1,8 @@
+Equivalent to C's `char` type.
+
+[C's `char` type] is completely unlike [Rust's `char` type]; while Rust's type represents a unicode scalar value, C's `char` type is just an ordinary integer. On modern architectures this type will always be either [`i8`] or [`u8`], as they use byte-addresses memory with 8-bit bytes.
+
+C chars are most commonly used to make C strings. Unlike Rust, where the length of a string is included alongside the string, C strings mark the end of a string with the character `'\0'`. See `CStr` for more information.
+
+[C's `char` type]: https://en.wikipedia.org/wiki/C_data_types#Basic_types
+[Rust's `char` type]: char
diff --git a/library/core/src/ffi/c_double.md b/library/core/src/ffi/c_double.md
new file mode 100644
index 000000000..57f453482
--- /dev/null
+++ b/library/core/src/ffi/c_double.md
@@ -0,0 +1,6 @@
+Equivalent to C's `double` type.
+
+This type will almost always be [`f64`], which is guaranteed to be an [IEEE-754 double-precision float] in Rust. That said, the standard technically only guarantees that it be a floating-point number with at least the precision of a [`float`], and it may be `f32` or something entirely different from the IEEE-754 standard.
+
+[IEEE-754 double-precision float]: https://en.wikipedia.org/wiki/IEEE_754
+[`float`]: c_float
diff --git a/library/core/src/ffi/c_float.md b/library/core/src/ffi/c_float.md
new file mode 100644
index 000000000..61e2abc05
--- /dev/null
+++ b/library/core/src/ffi/c_float.md
@@ -0,0 +1,5 @@
+Equivalent to C's `float` type.
+
+This type will almost always be [`f32`], which is guaranteed to be an [IEEE-754 single-precision float] in Rust. That said, the standard technically only guarantees that it be a floating-point number, and it may have less precision than `f32` or not follow the IEEE-754 standard at all.
+
+[IEEE-754 single-precision float]: https://en.wikipedia.org/wiki/IEEE_754
diff --git a/library/core/src/ffi/c_int.md b/library/core/src/ffi/c_int.md
new file mode 100644
index 000000000..8062ff230
--- /dev/null
+++ b/library/core/src/ffi/c_int.md
@@ -0,0 +1,5 @@
+Equivalent to C's `signed int` (`int`) type.
+
+This type will almost always be [`i32`], but may differ on some esoteric systems. The C standard technically only requires that this type be a signed integer that is at least the size of a [`short`]; some systems define it as an [`i16`], for example.
+
+[`short`]: c_short
diff --git a/library/core/src/ffi/c_long.md b/library/core/src/ffi/c_long.md
new file mode 100644
index 000000000..cc160783f
--- /dev/null
+++ b/library/core/src/ffi/c_long.md
@@ -0,0 +1,5 @@
+Equivalent to C's `signed long` (`long`) type.
+
+This type will always be [`i32`] or [`i64`]. Most notably, many Linux-based systems assume an `i64`, but Windows assumes `i32`. The C standard technically only requires that this type be a signed integer that is at least 32 bits and at least the size of an [`int`], although in practice, no system would have a `long` that is neither an `i32` nor `i64`.
+
+[`int`]: c_int
diff --git a/library/core/src/ffi/c_longlong.md b/library/core/src/ffi/c_longlong.md
new file mode 100644
index 000000000..49c61bd61
--- /dev/null
+++ b/library/core/src/ffi/c_longlong.md
@@ -0,0 +1,5 @@
+Equivalent to C's `signed long long` (`long long`) type.
+
+This type will almost always be [`i64`], but may differ on some systems. The C standard technically only requires that this type be a signed integer that is at least 64 bits and at least the size of a [`long`], although in practice, no system would have a `long long` that is not an `i64`, as most systems do not have a standardised [`i128`] type.
+
+[`long`]: c_int
diff --git a/library/core/src/ffi/c_schar.md b/library/core/src/ffi/c_schar.md
new file mode 100644
index 000000000..69879c9f1
--- /dev/null
+++ b/library/core/src/ffi/c_schar.md
@@ -0,0 +1,5 @@
+Equivalent to C's `signed char` type.
+
+This type will always be [`i8`], but is included for completeness. It is defined as being a signed integer the same size as a C [`char`].
+
+[`char`]: c_char
diff --git a/library/core/src/ffi/c_short.md b/library/core/src/ffi/c_short.md
new file mode 100644
index 000000000..3d1e53d13
--- /dev/null
+++ b/library/core/src/ffi/c_short.md
@@ -0,0 +1,5 @@
+Equivalent to C's `signed short` (`short`) type.
+
+This type will almost always be [`i16`], but may differ on some esoteric systems. The C standard technically only requires that this type be a signed integer with at least 16 bits; some systems may define it as `i32`, for example.
+
+[`char`]: c_char
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
new file mode 100644
index 000000000..82e63a7fe
--- /dev/null
+++ b/library/core/src/ffi/c_str.rs
@@ -0,0 +1,608 @@
+use crate::ascii;
+use crate::cmp::Ordering;
+use crate::ffi::c_char;
+use crate::fmt::{self, Write};
+use crate::intrinsics;
+use crate::ops;
+use crate::slice;
+use crate::slice::memchr;
+use crate::str;
+
+/// Representation of a borrowed C string.
+///
+/// This type represents a borrowed reference to a nul-terminated
+/// array of bytes. It can be constructed safely from a <code>&[[u8]]</code>
+/// slice, or unsafely from a raw `*const c_char`. It can then be
+/// converted to a Rust <code>&[str]</code> by performing UTF-8 validation, or
+/// into an owned `CString`.
+///
+/// `&CStr` is to `CString` as <code>&[str]</code> is to `String`: the former
+/// in each pair are borrowed references; the latter are owned
+/// strings.
+///
+/// Note that this structure is **not** `repr(C)` and is not recommended to be
+/// placed in the signatures of FFI functions. Instead, safe wrappers of FFI
+/// functions may leverage the unsafe [`CStr::from_ptr`] constructor to provide
+/// a safe interface to other consumers.
+///
+/// # Examples
+///
+/// Inspecting a foreign C string:
+///
+/// ```ignore (extern-declaration)
+/// use std::ffi::CStr;
+/// use std::os::raw::c_char;
+///
+/// extern "C" { fn my_string() -> *const c_char; }
+///
+/// unsafe {
+/// let slice = CStr::from_ptr(my_string());
+/// println!("string buffer size without nul terminator: {}", slice.to_bytes().len());
+/// }
+/// ```
+///
+/// Passing a Rust-originating C string:
+///
+/// ```ignore (extern-declaration)
+/// use std::ffi::{CString, CStr};
+/// use std::os::raw::c_char;
+///
+/// fn work(data: &CStr) {
+/// extern "C" { fn work_with(data: *const c_char); }
+///
+/// unsafe { work_with(data.as_ptr()) }
+/// }
+///
+/// let s = CString::new("data data data data").expect("CString::new failed");
+/// work(&s);
+/// ```
+///
+/// Converting a foreign C string into a Rust `String`:
+///
+/// ```ignore (extern-declaration)
+/// use std::ffi::CStr;
+/// use std::os::raw::c_char;
+///
+/// extern "C" { fn my_string() -> *const c_char; }
+///
+/// fn my_string_safe() -> String {
+/// let cstr = unsafe { CStr::from_ptr(my_string()) };
+/// // Get copy-on-write Cow<'_, str>, then guarantee a freshly-owned String allocation
+/// String::from_utf8_lossy(cstr.to_bytes()).to_string()
+/// }
+///
+/// println!("string: {}", my_string_safe());
+/// ```
+///
+/// [str]: prim@str "str"
+#[derive(Hash)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "CStr")]
+#[stable(feature = "core_c_str", since = "1.64.0")]
+#[rustc_has_incoherent_inherent_impls]
+// FIXME:
+// `fn from` in `impl From<&CStr> for Box<CStr>` current implementation relies
+// on `CStr` being layout-compatible with `[u8]`.
+// When attribute privacy is implemented, `CStr` should be annotated as `#[repr(transparent)]`.
+// Anyway, `CStr` representation and layout are considered implementation detail, are
+// not documented and must not be relied upon.
+pub struct CStr {
+ // FIXME: this should not be represented with a DST slice but rather with
+ // just a raw `c_char` along with some form of marker to make
+ // this an unsized type. Essentially `sizeof(&CStr)` should be the
+ // same as `sizeof(&c_char)` but `CStr` should be an unsized type.
+ inner: [c_char],
+}
+
+/// An error indicating that a nul byte was not in the expected position.
+///
+/// The slice used to create a [`CStr`] must have one and only one nul byte,
+/// positioned at the end.
+///
+/// This error is created by the [`CStr::from_bytes_with_nul`] method.
+/// See its documentation for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::ffi::{CStr, FromBytesWithNulError};
+///
+/// let _: FromBytesWithNulError = CStr::from_bytes_with_nul(b"f\0oo").unwrap_err();
+/// ```
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "core_c_str", since = "1.64.0")]
+pub struct FromBytesWithNulError {
+ kind: FromBytesWithNulErrorKind,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+enum FromBytesWithNulErrorKind {
+ InteriorNul(usize),
+ NotNulTerminated,
+}
+
+impl FromBytesWithNulError {
+ fn interior_nul(pos: usize) -> FromBytesWithNulError {
+ FromBytesWithNulError { kind: FromBytesWithNulErrorKind::InteriorNul(pos) }
+ }
+ fn not_nul_terminated() -> FromBytesWithNulError {
+ FromBytesWithNulError { kind: FromBytesWithNulErrorKind::NotNulTerminated }
+ }
+
+ #[doc(hidden)]
+ #[unstable(feature = "cstr_internals", issue = "none")]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ FromBytesWithNulErrorKind::InteriorNul(..) => {
+ "data provided contains an interior nul byte"
+ }
+ FromBytesWithNulErrorKind::NotNulTerminated => "data provided is not nul terminated",
+ }
+ }
+}
+
+/// An error indicating that no nul byte was present.
+///
+/// A slice used to create a [`CStr`] must contain a nul byte somewhere
+/// within the slice.
+///
+/// This error is created by the [`CStr::from_bytes_until_nul`] method.
+///
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+pub struct FromBytesUntilNulError(());
+
+#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+impl fmt::Display for FromBytesUntilNulError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "data provided does not contain a nul")
+ }
+}
+
+#[stable(feature = "cstr_debug", since = "1.3.0")]
+impl fmt::Debug for CStr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "\"")?;
+ for byte in self.to_bytes().iter().flat_map(|&b| ascii::escape_default(b)) {
+ f.write_char(byte as char)?;
+ }
+ write!(f, "\"")
+ }
+}
+
+#[stable(feature = "cstr_default", since = "1.10.0")]
+impl Default for &CStr {
+ fn default() -> Self {
+ const SLICE: &[c_char] = &[0];
+ // SAFETY: `SLICE` is indeed pointing to a valid nul-terminated string.
+ unsafe { CStr::from_ptr(SLICE.as_ptr()) }
+ }
+}
+
+#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")]
+impl fmt::Display for FromBytesWithNulError {
+ #[allow(deprecated, deprecated_in_future)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.__description())?;
+ if let FromBytesWithNulErrorKind::InteriorNul(pos) = self.kind {
+ write!(f, " at byte pos {pos}")?;
+ }
+ Ok(())
+ }
+}
+
+impl CStr {
+ /// Wraps a raw C string with a safe C string wrapper.
+ ///
+ /// This function will wrap the provided `ptr` with a `CStr` wrapper, which
+ /// allows inspection and interoperation of non-owned C strings. The total
+ /// size of the raw C string must be smaller than `isize::MAX` **bytes**
+ /// in memory due to calling the `slice::from_raw_parts` function.
+ ///
+ /// # Safety
+ ///
+ /// * The memory pointed to by `ptr` must contain a valid nul terminator at the
+ /// end of the string.
+ ///
+ /// * `ptr` must be [valid] for reads of bytes up to and including the null terminator.
+ /// This means in particular:
+ ///
+ /// * The entire memory range of this `CStr` must be contained within a single allocated object!
+ /// * `ptr` must be non-null even for a zero-length cstr.
+ ///
+ /// * The memory referenced by the returned `CStr` must not be mutated for
+ /// the duration of lifetime `'a`.
+ ///
+ /// > **Note**: This operation is intended to be a 0-cost cast but it is
+ /// > currently implemented with an up-front calculation of the length of
+ /// > the string. This is not guaranteed to always be the case.
+ ///
+ /// # Caveat
+ ///
+ /// The lifetime for the returned slice is inferred from its usage. To prevent accidental misuse,
+ /// it's suggested to tie the lifetime to whichever source lifetime is safe in the context,
+ /// such as by providing a helper function taking the lifetime of a host value for the slice,
+ /// or by explicit annotation.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (extern-declaration)
+ /// # fn main() {
+ /// use std::ffi::CStr;
+ /// use std::os::raw::c_char;
+ ///
+ /// extern "C" {
+ /// fn my_string() -> *const c_char;
+ /// }
+ ///
+ /// unsafe {
+ /// let slice = CStr::from_ptr(my_string());
+ /// println!("string returned: {}", slice.to_str().unwrap());
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// [valid]: core::ptr#safety
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
+ // SAFETY: The caller has provided a pointer that points to a valid C
+ // string with a NUL terminator of size less than `isize::MAX`, whose
+ // content remain valid and doesn't change for the lifetime of the
+ // returned `CStr`.
+ //
+ // Thus computing the length is fine (a NUL byte exists), the call to
+ // from_raw_parts is safe because we know the length is at most `isize::MAX`, meaning
+ // the call to `from_bytes_with_nul_unchecked` is correct.
+ //
+ // The cast from c_char to u8 is ok because a c_char is always one byte.
+ unsafe {
+ extern "C" {
+ /// Provided by libc or compiler_builtins.
+ fn strlen(s: *const c_char) -> usize;
+ }
+ let len = strlen(ptr);
+ let ptr = ptr as *const u8;
+ CStr::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr, len as usize + 1))
+ }
+ }
+
+ /// Creates a C string wrapper from a byte slice.
+ ///
+ /// This method will create a `CStr` from any byte slice that contains at
+ /// least one nul byte. The caller does not need to know or specify where
+ /// the nul byte is located.
+ ///
+ /// If the first byte is a nul character, this method will return an
+ /// empty `CStr`. If multiple nul characters are present, the `CStr` will
+ /// end at the first one.
+ ///
+ /// If the slice only has a single nul byte at the end, this method is
+ /// equivalent to [`CStr::from_bytes_with_nul`].
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(cstr_from_bytes_until_nul)]
+ ///
+ /// use std::ffi::CStr;
+ ///
+ /// let mut buffer = [0u8; 16];
+ /// unsafe {
+ /// // Here we might call an unsafe C function that writes a string
+ /// // into the buffer.
+ /// let buf_ptr = buffer.as_mut_ptr();
+ /// buf_ptr.write_bytes(b'A', 8);
+ /// }
+ /// // Attempt to extract a C nul-terminated string from the buffer.
+ /// let c_str = CStr::from_bytes_until_nul(&buffer[..]).unwrap();
+ /// assert_eq!(c_str.to_str().unwrap(), "AAAAAAAA");
+ /// ```
+ ///
+ #[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+ pub fn from_bytes_until_nul(bytes: &[u8]) -> Result<&CStr, FromBytesUntilNulError> {
+ let nul_pos = memchr::memchr(0, bytes);
+ match nul_pos {
+ Some(nul_pos) => {
+ let subslice = &bytes[..nul_pos + 1];
+ // SAFETY: We know there is a nul byte at nul_pos, so this slice
+ // (ending at the nul byte) is a well-formed C string.
+ Ok(unsafe { CStr::from_bytes_with_nul_unchecked(subslice) })
+ }
+ None => Err(FromBytesUntilNulError(())),
+ }
+ }
+
+ /// Creates a C string wrapper from a byte slice.
+ ///
+ /// This function will cast the provided `bytes` to a `CStr`
+ /// wrapper after ensuring that the byte slice is nul-terminated
+ /// and does not contain any interior nul bytes.
+ ///
+ /// If the nul byte may not be at the end,
+ /// [`CStr::from_bytes_until_nul`] can be used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"hello\0");
+ /// assert!(cstr.is_ok());
+ /// ```
+ ///
+ /// Creating a `CStr` without a trailing nul terminator is an error:
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"hello");
+ /// assert!(cstr.is_err());
+ /// ```
+ ///
+ /// Creating a `CStr` with an interior nul byte is an error:
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"he\0llo\0");
+ /// assert!(cstr.is_err());
+ /// ```
+ #[stable(feature = "cstr_from_bytes", since = "1.10.0")]
+ pub fn from_bytes_with_nul(bytes: &[u8]) -> Result<&Self, FromBytesWithNulError> {
+ let nul_pos = memchr::memchr(0, bytes);
+ match nul_pos {
+ Some(nul_pos) if nul_pos + 1 == bytes.len() => {
+ // SAFETY: We know there is only one nul byte, at the end
+ // of the byte slice.
+ Ok(unsafe { Self::from_bytes_with_nul_unchecked(bytes) })
+ }
+ Some(nul_pos) => Err(FromBytesWithNulError::interior_nul(nul_pos)),
+ None => Err(FromBytesWithNulError::not_nul_terminated()),
+ }
+ }
+
+ /// Unsafely creates a C string wrapper from a byte slice.
+ ///
+ /// This function will cast the provided `bytes` to a `CStr` wrapper without
+ /// performing any sanity checks.
+ ///
+ /// # Safety
+ /// The provided slice **must** be nul-terminated and not contain any interior
+ /// nul bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{CStr, CString};
+ ///
+ /// unsafe {
+ /// let cstring = CString::new("hello").expect("CString::new failed");
+ /// let cstr = CStr::from_bytes_with_nul_unchecked(cstring.to_bytes_with_nul());
+ /// assert_eq!(cstr, &*cstring);
+ /// }
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "cstr_from_bytes", since = "1.10.0")]
+ #[rustc_const_stable(feature = "const_cstr_unchecked", since = "1.59.0")]
+ #[rustc_allow_const_fn_unstable(const_eval_select)]
+ pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr {
+ fn rt_impl(bytes: &[u8]) -> &CStr {
+ // Chance at catching some UB at runtime with debug builds.
+ debug_assert!(!bytes.is_empty() && bytes[bytes.len() - 1] == 0);
+
+ // SAFETY: Casting to CStr is safe because its internal representation
+ // is a [u8] too (safe only inside std).
+ // Dereferencing the obtained pointer is safe because it comes from a
+ // reference. Making a reference is then safe because its lifetime
+ // is bound by the lifetime of the given `bytes`.
+ unsafe { &*(bytes as *const [u8] as *const CStr) }
+ }
+
+ const fn const_impl(bytes: &[u8]) -> &CStr {
+ // Saturating so that an empty slice panics in the assert with a good
+ // message, not here due to underflow.
+ let mut i = bytes.len().saturating_sub(1);
+ assert!(!bytes.is_empty() && bytes[i] == 0, "input was not nul-terminated");
+
+ // Ending null byte exists, skip to the rest.
+ while i != 0 {
+ i -= 1;
+ let byte = bytes[i];
+ assert!(byte != 0, "input contained interior nul");
+ }
+
+ // SAFETY: See `rt_impl` cast.
+ unsafe { &*(bytes as *const [u8] as *const CStr) }
+ }
+
+ // SAFETY: The const and runtime versions have identical behavior
+ // unless the safety contract of `from_bytes_with_nul_unchecked` is
+ // violated, which is UB.
+ unsafe { intrinsics::const_eval_select((bytes,), const_impl, rt_impl) }
+ }
+
+ /// Returns the inner pointer to this C string.
+ ///
+ /// The returned pointer will be valid for as long as `self` is, and points
+ /// to a contiguous region of memory terminated with a 0 byte to represent
+ /// the end of the string.
+ ///
+ /// **WARNING**
+ ///
+ /// The returned pointer is read-only; writing to it (including passing it
+ /// to C code that writes to it) causes undefined behavior.
+ ///
+ /// It is your responsibility to make sure that the underlying memory is not
+ /// freed too early. For example, the following code will cause undefined
+ /// behavior when `ptr` is used inside the `unsafe` block:
+ ///
+ /// ```no_run
+ /// # #![allow(unused_must_use)] #![allow(temporary_cstring_as_ptr)]
+ /// use std::ffi::CString;
+ ///
+ /// let ptr = CString::new("Hello").expect("CString::new failed").as_ptr();
+ /// unsafe {
+ /// // `ptr` is dangling
+ /// *ptr;
+ /// }
+ /// ```
+ ///
+ /// This happens because the pointer returned by `as_ptr` does not carry any
+ /// lifetime information and the `CString` is deallocated immediately after
+ /// the `CString::new("Hello").expect("CString::new failed").as_ptr()`
+ /// expression is evaluated.
+ /// To fix the problem, bind the `CString` to a local variable:
+ ///
+ /// ```no_run
+ /// # #![allow(unused_must_use)]
+ /// use std::ffi::CString;
+ ///
+ /// let hello = CString::new("Hello").expect("CString::new failed");
+ /// let ptr = hello.as_ptr();
+ /// unsafe {
+ /// // `ptr` is valid because `hello` is in scope
+ /// *ptr;
+ /// }
+ /// ```
+ ///
+ /// This way, the lifetime of the `CString` in `hello` encompasses
+ /// the lifetime of `ptr` and the `unsafe` block.
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_str_as_ptr", since = "1.32.0")]
+ pub const fn as_ptr(&self) -> *const c_char {
+ self.inner.as_ptr()
+ }
+
+ /// Converts this C string to a byte slice.
+ ///
+ /// The returned slice will **not** contain the trailing nul terminator that this C
+ /// string has.
+ ///
+ /// > **Note**: This method is currently implemented as a constant-time
+ /// > cast, but it is planned to alter its definition in the future to
+ /// > perform the length calculation whenever this method is called.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(cstr.to_bytes(), b"foo");
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn to_bytes(&self) -> &[u8] {
+ let bytes = self.to_bytes_with_nul();
+ // SAFETY: to_bytes_with_nul returns slice with length at least 1
+ unsafe { bytes.get_unchecked(..bytes.len() - 1) }
+ }
+
+ /// Converts this C string to a byte slice containing the trailing 0 byte.
+ ///
+ /// This function is the equivalent of [`CStr::to_bytes`] except that it
+ /// will retain the trailing nul terminator instead of chopping it off.
+ ///
+ /// > **Note**: This method is currently implemented as a 0-cost cast, but
+ /// > it is planned to alter its definition in the future to perform the
+ /// > length calculation whenever this method is called.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(cstr.to_bytes_with_nul(), b"foo\0");
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn to_bytes_with_nul(&self) -> &[u8] {
+ // SAFETY: Transmuting a slice of `c_char`s to a slice of `u8`s
+ // is safe on all supported targets.
+ unsafe { &*(&self.inner as *const [c_char] as *const [u8]) }
+ }
+
+ /// Yields a <code>&[str]</code> slice if the `CStr` contains valid UTF-8.
+ ///
+ /// If the contents of the `CStr` are valid UTF-8 data, this
+ /// function will return the corresponding <code>&[str]</code> slice. Otherwise,
+ /// it will return an error with details of where UTF-8 validation failed.
+ ///
+ /// [str]: prim@str "str"
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(cstr.to_str(), Ok("foo"));
+ /// ```
+ #[stable(feature = "cstr_to_str", since = "1.4.0")]
+ pub fn to_str(&self) -> Result<&str, str::Utf8Error> {
+ // N.B., when `CStr` is changed to perform the length check in `.to_bytes()`
+ // instead of in `from_ptr()`, it may be worth considering if this should
+ // be rewritten to do the UTF-8 check inline with the length calculation
+ // instead of doing it afterwards.
+ str::from_utf8(self.to_bytes())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq for CStr {
+ fn eq(&self, other: &CStr) -> bool {
+ self.to_bytes().eq(other.to_bytes())
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Eq for CStr {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for CStr {
+ fn partial_cmp(&self, other: &CStr) -> Option<Ordering> {
+ self.to_bytes().partial_cmp(&other.to_bytes())
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for CStr {
+ fn cmp(&self, other: &CStr) -> Ordering {
+ self.to_bytes().cmp(&other.to_bytes())
+ }
+}
+
+#[stable(feature = "cstr_range_from", since = "1.47.0")]
+impl ops::Index<ops::RangeFrom<usize>> for CStr {
+ type Output = CStr;
+
+ fn index(&self, index: ops::RangeFrom<usize>) -> &CStr {
+ let bytes = self.to_bytes_with_nul();
+ // we need to manually check the starting index to account for the null
+ // byte, since otherwise we could get an empty string that doesn't end
+ // in a null.
+ if index.start < bytes.len() {
+ // SAFETY: Non-empty tail of a valid `CStr` is still a valid `CStr`.
+ unsafe { CStr::from_bytes_with_nul_unchecked(&bytes[index.start..]) }
+ } else {
+ panic!(
+ "index out of bounds: the len is {} but the index is {}",
+ bytes.len(),
+ index.start
+ );
+ }
+ }
+}
+
+#[stable(feature = "cstring_asref", since = "1.7.0")]
+impl AsRef<CStr> for CStr {
+ #[inline]
+ fn as_ref(&self) -> &CStr {
+ self
+ }
+}
diff --git a/library/core/src/ffi/c_uchar.md b/library/core/src/ffi/c_uchar.md
new file mode 100644
index 000000000..b633bb7f8
--- /dev/null
+++ b/library/core/src/ffi/c_uchar.md
@@ -0,0 +1,5 @@
+Equivalent to C's `unsigned char` type.
+
+This type will always be [`u8`], but is included for completeness. It is defined as being an unsigned integer the same size as a C [`char`].
+
+[`char`]: c_char
diff --git a/library/core/src/ffi/c_uint.md b/library/core/src/ffi/c_uint.md
new file mode 100644
index 000000000..f3abea359
--- /dev/null
+++ b/library/core/src/ffi/c_uint.md
@@ -0,0 +1,5 @@
+Equivalent to C's `unsigned int` type.
+
+This type will almost always be [`u32`], but may differ on some esoteric systems. The C standard technically only requires that this type be an unsigned integer with the same size as an [`int`]; some systems define it as a [`u16`], for example.
+
+[`int`]: c_int
diff --git a/library/core/src/ffi/c_ulong.md b/library/core/src/ffi/c_ulong.md
new file mode 100644
index 000000000..4ab304e65
--- /dev/null
+++ b/library/core/src/ffi/c_ulong.md
@@ -0,0 +1,5 @@
+Equivalent to C's `unsigned long` type.
+
+This type will always be [`u32`] or [`u64`]. Most notably, many Linux-based systems assume an `u64`, but Windows assumes `u32`. The C standard technically only requires that this type be an unsigned integer with the size of a [`long`], although in practice, no system would have a `ulong` that is neither a `u32` nor `u64`.
+
+[`long`]: c_long
diff --git a/library/core/src/ffi/c_ulonglong.md b/library/core/src/ffi/c_ulonglong.md
new file mode 100644
index 000000000..a27d70e17
--- /dev/null
+++ b/library/core/src/ffi/c_ulonglong.md
@@ -0,0 +1,5 @@
+Equivalent to C's `unsigned long long` type.
+
+This type will almost always be [`u64`], but may differ on some systems. The C standard technically only requires that this type be an unsigned integer with the size of a [`long long`], although in practice, no system would have a `long long` that is not a `u64`, as most systems do not have a standardised [`u128`] type.
+
+[`long long`]: c_longlong
diff --git a/library/core/src/ffi/c_ushort.md b/library/core/src/ffi/c_ushort.md
new file mode 100644
index 000000000..6928e51b3
--- /dev/null
+++ b/library/core/src/ffi/c_ushort.md
@@ -0,0 +1,5 @@
+Equivalent to C's `unsigned short` type.
+
+This type will almost always be [`u16`], but may differ on some esoteric systems. The C standard technically only requires that this type be an unsigned integer with the same size as a [`short`].
+
+[`short`]: c_short
diff --git a/library/core/src/ffi/c_void.md b/library/core/src/ffi/c_void.md
new file mode 100644
index 000000000..ee7403aa0
--- /dev/null
+++ b/library/core/src/ffi/c_void.md
@@ -0,0 +1,16 @@
+Equivalent to C's `void` type when used as a [pointer].
+
+In essence, `*const c_void` is equivalent to C's `const void*`
+and `*mut c_void` is equivalent to C's `void*`. That said, this is
+*not* the same as C's `void` return type, which is Rust's `()` type.
+
+To model pointers to opaque types in FFI, until `extern type` is
+stabilized, it is recommended to use a newtype wrapper around an empty
+byte array. See the [Nomicon] for details.
+
+One could use `std::os::raw::c_void` if they want to support old Rust
+compiler down to 1.1.0. After Rust 1.30.0, it was re-exported by
+this definition. For more information, please read [RFC 2521].
+
+[Nomicon]: https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
+[RFC 2521]: https://github.com/rust-lang/rfcs/blob/master/text/2521-c_void-reunification.md
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
new file mode 100644
index 000000000..ec1eaa99f
--- /dev/null
+++ b/library/core/src/ffi/mod.rs
@@ -0,0 +1,580 @@
+//! Platform-specific types, as defined by C.
+//!
+//! Code that interacts via FFI will almost certainly be using the
+//! base types provided by C, which aren't nearly as nicely defined
+//! as Rust's primitive types. This module provides types which will
+//! match those defined by C, so that code that interacts with C will
+//! refer to the correct types.
+
+#![stable(feature = "", since = "1.30.0")]
+#![allow(non_camel_case_types)]
+
+use crate::fmt;
+use crate::marker::PhantomData;
+use crate::num::*;
+use crate::ops::{Deref, DerefMut};
+
+#[stable(feature = "core_c_str", since = "1.64.0")]
+pub use self::c_str::{CStr, FromBytesUntilNulError, FromBytesWithNulError};
+
+mod c_str;
+
+macro_rules! type_alias_no_nz {
+ {
+ $Docfile:tt, $Alias:ident = $Real:ty;
+ $( $Cfg:tt )*
+ } => {
+ #[doc = include_str!($Docfile)]
+ $( $Cfg )*
+ #[stable(feature = "core_ffi_c", since = "1.64.0")]
+ pub type $Alias = $Real;
+ }
+}
+
+// To verify that the NonZero types in this file's macro invocations correspond
+//
+// perl -n < library/std/src/os/raw/mod.rs -e 'next unless m/type_alias\!/; die "$_ ?" unless m/, (c_\w+) = (\w+), NonZero_(\w+) = NonZero(\w+)/; die "$_ ?" unless $3 eq $1 and $4 eq ucfirst $2'
+//
+// NB this does not check that the main c_* types are right.
+
+macro_rules! type_alias {
+ {
+ $Docfile:tt, $Alias:ident = $Real:ty, $NZAlias:ident = $NZReal:ty;
+ $( $Cfg:tt )*
+ } => {
+ type_alias_no_nz! { $Docfile, $Alias = $Real; $( $Cfg )* }
+
+ #[doc = concat!("Type alias for `NonZero` version of [`", stringify!($Alias), "`]")]
+ #[unstable(feature = "raw_os_nonzero", issue = "82363")]
+ $( $Cfg )*
+ pub type $NZAlias = $NZReal;
+ }
+}
+
+type_alias! { "c_char.md", c_char = c_char_definition::c_char, NonZero_c_char = c_char_definition::NonZero_c_char;
+// Make this type alias appear cfg-dependent so that Clippy does not suggest
+// replacing `0 as c_char` with `0_i8`/`0_u8`. This #[cfg(all())] can be removed
+// after the false positive in https://github.com/rust-lang/rust-clippy/issues/8093
+// is fixed.
+#[cfg(all())]
+#[doc(cfg(all()))] }
+
+type_alias! { "c_schar.md", c_schar = i8, NonZero_c_schar = NonZeroI8; }
+type_alias! { "c_uchar.md", c_uchar = u8, NonZero_c_uchar = NonZeroU8; }
+type_alias! { "c_short.md", c_short = i16, NonZero_c_short = NonZeroI16; }
+type_alias! { "c_ushort.md", c_ushort = u16, NonZero_c_ushort = NonZeroU16; }
+
+type_alias! { "c_int.md", c_int = c_int_definition::c_int, NonZero_c_int = c_int_definition::NonZero_c_int;
+#[doc(cfg(all()))] }
+type_alias! { "c_uint.md", c_uint = c_int_definition::c_uint, NonZero_c_uint = c_int_definition::NonZero_c_uint;
+#[doc(cfg(all()))] }
+
+type_alias! { "c_long.md", c_long = c_long_definition::c_long, NonZero_c_long = c_long_definition::NonZero_c_long;
+#[doc(cfg(all()))] }
+type_alias! { "c_ulong.md", c_ulong = c_long_definition::c_ulong, NonZero_c_ulong = c_long_definition::NonZero_c_ulong;
+#[doc(cfg(all()))] }
+
+type_alias! { "c_longlong.md", c_longlong = i64, NonZero_c_longlong = NonZeroI64; }
+type_alias! { "c_ulonglong.md", c_ulonglong = u64, NonZero_c_ulonglong = NonZeroU64; }
+
+type_alias_no_nz! { "c_float.md", c_float = f32; }
+type_alias_no_nz! { "c_double.md", c_double = f64; }
+
+/// Equivalent to C's `size_t` type, from `stddef.h` (or `cstddef` for C++).
+///
+/// This type is currently always [`usize`], however in the future there may be
+/// platforms where this is not the case.
+#[unstable(feature = "c_size_t", issue = "88345")]
+pub type c_size_t = usize;
+
+/// Equivalent to C's `ptrdiff_t` type, from `stddef.h` (or `cstddef` for C++).
+///
+/// This type is currently always [`isize`], however in the future there may be
+/// platforms where this is not the case.
+#[unstable(feature = "c_size_t", issue = "88345")]
+pub type c_ptrdiff_t = isize;
+
+/// Equivalent to C's `ssize_t` (on POSIX) or `SSIZE_T` (on Windows) type.
+///
+/// This type is currently always [`isize`], however in the future there may be
+/// platforms where this is not the case.
+#[unstable(feature = "c_size_t", issue = "88345")]
+pub type c_ssize_t = isize;
+
+mod c_char_definition {
+ cfg_if! {
+ // These are the targets on which c_char is unsigned.
+ if #[cfg(any(
+ all(
+ target_os = "linux",
+ any(
+ target_arch = "aarch64",
+ target_arch = "arm",
+ target_arch = "hexagon",
+ target_arch = "powerpc",
+ target_arch = "powerpc64",
+ target_arch = "s390x",
+ target_arch = "riscv64",
+ target_arch = "riscv32"
+ )
+ ),
+ all(target_os = "android", any(target_arch = "aarch64", target_arch = "arm")),
+ all(target_os = "l4re", target_arch = "x86_64"),
+ all(
+ any(target_os = "freebsd", target_os = "openbsd"),
+ any(
+ target_arch = "aarch64",
+ target_arch = "arm",
+ target_arch = "powerpc",
+ target_arch = "powerpc64",
+ target_arch = "riscv64"
+ )
+ ),
+ all(
+ target_os = "netbsd",
+ any(target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc")
+ ),
+ all(
+ target_os = "vxworks",
+ any(
+ target_arch = "aarch64",
+ target_arch = "arm",
+ target_arch = "powerpc64",
+ target_arch = "powerpc"
+ )
+ ),
+ all(target_os = "fuchsia", target_arch = "aarch64"),
+ target_os = "horizon"
+ ))] {
+ pub type c_char = u8;
+ pub type NonZero_c_char = crate::num::NonZeroU8;
+ } else {
+ // On every other target, c_char is signed.
+ pub type c_char = i8;
+ pub type NonZero_c_char = crate::num::NonZeroI8;
+ }
+ }
+}
+
+mod c_int_definition {
+ cfg_if! {
+ if #[cfg(any(target_arch = "avr", target_arch = "msp430"))] {
+ pub type c_int = i16;
+ pub type NonZero_c_int = crate::num::NonZeroI16;
+ pub type c_uint = u16;
+ pub type NonZero_c_uint = crate::num::NonZeroU16;
+ } else {
+ pub type c_int = i32;
+ pub type NonZero_c_int = crate::num::NonZeroI32;
+ pub type c_uint = u32;
+ pub type NonZero_c_uint = crate::num::NonZeroU32;
+ }
+ }
+}
+
+mod c_long_definition {
+ cfg_if! {
+ if #[cfg(all(target_pointer_width = "64", not(windows)))] {
+ pub type c_long = i64;
+ pub type NonZero_c_long = crate::num::NonZeroI64;
+ pub type c_ulong = u64;
+ pub type NonZero_c_ulong = crate::num::NonZeroU64;
+ } else {
+ // The minimal size of `long` in the C standard is 32 bits
+ pub type c_long = i32;
+ pub type NonZero_c_long = crate::num::NonZeroI32;
+ pub type c_ulong = u32;
+ pub type NonZero_c_ulong = crate::num::NonZeroU32;
+ }
+ }
+}
+
+// N.B., for LLVM to recognize the void pointer type and by extension
+// functions like malloc(), we need to have it represented as i8* in
+// LLVM bitcode. The enum used here ensures this and prevents misuse
+// of the "raw" type by only having private variants. We need two
+// variants, because the compiler complains about the repr attribute
+// otherwise and we need at least one variant as otherwise the enum
+// would be uninhabited and at least dereferencing such pointers would
+// be UB.
+#[doc = include_str!("c_void.md")]
+#[repr(u8)]
+#[stable(feature = "core_c_void", since = "1.30.0")]
+pub enum c_void {
+ #[unstable(
+ feature = "c_void_variant",
+ reason = "temporary implementation detail",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ __variant1,
+ #[unstable(
+ feature = "c_void_variant",
+ reason = "temporary implementation detail",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ __variant2,
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for c_void {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("c_void").finish()
+ }
+}
+
+/// Basic implementation of a `va_list`.
+// The name is WIP, using `VaListImpl` for now.
+#[cfg(any(
+ all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ target_family = "wasm",
+ target_arch = "asmjs",
+ target_os = "uefi",
+ windows,
+))]
+#[repr(transparent)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ ptr: *mut c_void,
+
+ // Invariant over `'f`, so each `VaListImpl<'f>` object is tied to
+ // the region of the function it's defined in
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
+#[cfg(any(
+ all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ target_family = "wasm",
+ target_arch = "asmjs",
+ target_os = "uefi",
+ windows,
+))]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> fmt::Debug for VaListImpl<'f> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "va_list* {:p}", self.ptr)
+ }
+}
+
+/// AArch64 ABI implementation of a `va_list`. See the
+/// [AArch64 Procedure Call Standard] for more details.
+///
+/// [AArch64 Procedure Call Standard]:
+/// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf
+#[cfg(all(
+ target_arch = "aarch64",
+ not(any(target_os = "macos", target_os = "ios")),
+ not(target_os = "uefi"),
+ not(windows),
+))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ stack: *mut c_void,
+ gr_top: *mut c_void,
+ vr_top: *mut c_void,
+ gr_offs: i32,
+ vr_offs: i32,
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
+/// PowerPC ABI implementation of a `va_list`.
+#[cfg(all(target_arch = "powerpc", not(target_os = "uefi"), not(windows)))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ gpr: u8,
+ fpr: u8,
+ reserved: u16,
+ overflow_arg_area: *mut c_void,
+ reg_save_area: *mut c_void,
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
+/// x86_64 ABI implementation of a `va_list`.
+#[cfg(all(target_arch = "x86_64", not(target_os = "uefi"), not(windows)))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ gp_offset: i32,
+ fp_offset: i32,
+ overflow_arg_area: *mut c_void,
+ reg_save_area: *mut c_void,
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
+/// A wrapper for a `va_list`
+#[repr(transparent)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+pub struct VaList<'a, 'f: 'a> {
+ #[cfg(any(
+ all(
+ not(target_arch = "aarch64"),
+ not(target_arch = "powerpc"),
+ not(target_arch = "x86_64")
+ ),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ target_family = "wasm",
+ target_arch = "asmjs",
+ target_os = "uefi",
+ windows,
+ ))]
+ inner: VaListImpl<'f>,
+
+ #[cfg(all(
+ any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
+ not(target_family = "wasm"),
+ not(target_arch = "asmjs"),
+ not(target_os = "uefi"),
+ not(windows),
+ ))]
+ inner: &'a mut VaListImpl<'f>,
+
+ _marker: PhantomData<&'a mut VaListImpl<'f>>,
+}
+
+#[cfg(any(
+ all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ target_family = "wasm",
+ target_arch = "asmjs",
+ target_os = "uefi",
+ windows,
+))]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> VaListImpl<'f> {
+ /// Convert a `VaListImpl` into a `VaList` that is binary-compatible with C's `va_list`.
+ #[inline]
+ pub fn as_va_list<'a>(&'a mut self) -> VaList<'a, 'f> {
+ VaList { inner: VaListImpl { ..*self }, _marker: PhantomData }
+ }
+}
+
+#[cfg(all(
+ any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
+ not(target_family = "wasm"),
+ not(target_arch = "asmjs"),
+ not(target_os = "uefi"),
+ not(windows),
+))]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> VaListImpl<'f> {
+ /// Convert a `VaListImpl` into a `VaList` that is binary-compatible with C's `va_list`.
+ #[inline]
+ pub fn as_va_list<'a>(&'a mut self) -> VaList<'a, 'f> {
+ VaList { inner: self, _marker: PhantomData }
+ }
+}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'a, 'f: 'a> Deref for VaList<'a, 'f> {
+ type Target = VaListImpl<'f>;
+
+ #[inline]
+ fn deref(&self) -> &VaListImpl<'f> {
+ &self.inner
+ }
+}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'a, 'f: 'a> DerefMut for VaList<'a, 'f> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut VaListImpl<'f> {
+ &mut self.inner
+ }
+}
+
+// The VaArgSafe trait needs to be used in public interfaces, however, the trait
+// itself must not be allowed to be used outside this module. Allowing users to
+// implement the trait for a new type (thereby allowing the va_arg intrinsic to
+// be used on a new type) is likely to cause undefined behavior.
+//
+// FIXME(dlrobertson): In order to use the VaArgSafe trait in a public interface
+// but also ensure it cannot be used elsewhere, the trait needs to be public
+// within a private module. Once RFC 2145 has been implemented look into
+// improving this.
+mod sealed_trait {
+ /// Trait which permits the allowed types to be used with [super::VaListImpl::arg].
+ #[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+ )]
+ pub trait VaArgSafe {}
+}
+
+macro_rules! impl_va_arg_safe {
+ ($($t:ty),+) => {
+ $(
+ #[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930")]
+ impl sealed_trait::VaArgSafe for $t {}
+ )+
+ }
+}
+
+impl_va_arg_safe! {i8, i16, i32, i64, usize}
+impl_va_arg_safe! {u8, u16, u32, u64, isize}
+impl_va_arg_safe! {f64}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<T> sealed_trait::VaArgSafe for *mut T {}
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<T> sealed_trait::VaArgSafe for *const T {}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> VaListImpl<'f> {
+ /// Advance to the next arg.
+ #[inline]
+ pub unsafe fn arg<T: sealed_trait::VaArgSafe>(&mut self) -> T {
+ // SAFETY: the caller must uphold the safety contract for `va_arg`.
+ unsafe { va_arg(self) }
+ }
+
+ /// Copies the `va_list` at the current location.
+ pub unsafe fn with_copy<F, R>(&self, f: F) -> R
+ where
+ F: for<'copy> FnOnce(VaList<'copy, 'f>) -> R,
+ {
+ let mut ap = self.clone();
+ let ret = f(ap.as_va_list());
+ // SAFETY: the caller must uphold the safety contract for `va_end`.
+ unsafe {
+ va_end(&mut ap);
+ }
+ ret
+ }
+}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> Clone for VaListImpl<'f> {
+ #[inline]
+ fn clone(&self) -> Self {
+ let mut dest = crate::mem::MaybeUninit::uninit();
+ // SAFETY: we write to the `MaybeUninit`, thus it is initialized and `assume_init` is legal
+ unsafe {
+ va_copy(dest.as_mut_ptr(), self);
+ dest.assume_init()
+ }
+ }
+}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> Drop for VaListImpl<'f> {
+ fn drop(&mut self) {
+ // FIXME: this should call `va_end`, but there's no clean way to
+ // guarantee that `drop` always gets inlined into its caller,
+ // so the `va_end` would get directly called from the same function as
+ // the corresponding `va_copy`. `man va_end` states that C requires this,
+ // and LLVM basically follows the C semantics, so we need to make sure
+ // that `va_end` is always called from the same function as `va_copy`.
+ // For more details, see https://github.com/rust-lang/rust/pull/59625
+ // and https://llvm.org/docs/LangRef.html#llvm-va-end-intrinsic.
+ //
+ // This works for now, since `va_end` is a no-op on all current LLVM targets.
+ }
+}
+
+extern "rust-intrinsic" {
+ /// Destroy the arglist `ap` after initialization with `va_start` or
+ /// `va_copy`.
+ fn va_end(ap: &mut VaListImpl<'_>);
+
+ /// Copies the current location of arglist `src` to the arglist `dst`.
+ fn va_copy<'f>(dest: *mut VaListImpl<'f>, src: &VaListImpl<'f>);
+
+ /// Loads an argument of type `T` from the `va_list` `ap` and increment the
+ /// argument `ap` points to.
+ fn va_arg<T: sealed_trait::VaArgSafe>(ap: &mut VaListImpl<'_>) -> T;
+}
diff --git a/library/core/src/fmt/builders.rs b/library/core/src/fmt/builders.rs
new file mode 100644
index 000000000..32d1a4e55
--- /dev/null
+++ b/library/core/src/fmt/builders.rs
@@ -0,0 +1,939 @@
+#![allow(unused_imports)]
+
+use crate::fmt::{self, Debug, Formatter};
+
+struct PadAdapter<'buf, 'state> {
+ buf: &'buf mut (dyn fmt::Write + 'buf),
+ state: &'state mut PadAdapterState,
+}
+
+struct PadAdapterState {
+ on_newline: bool,
+}
+
+impl Default for PadAdapterState {
+ fn default() -> Self {
+ PadAdapterState { on_newline: true }
+ }
+}
+
+impl<'buf, 'state> PadAdapter<'buf, 'state> {
+ fn wrap<'slot, 'fmt: 'buf + 'slot>(
+ fmt: &'fmt mut fmt::Formatter<'_>,
+ slot: &'slot mut Option<Self>,
+ state: &'state mut PadAdapterState,
+ ) -> fmt::Formatter<'slot> {
+ fmt.wrap_buf(move |buf| slot.insert(PadAdapter { buf, state }))
+ }
+}
+
+impl fmt::Write for PadAdapter<'_, '_> {
+ fn write_str(&mut self, mut s: &str) -> fmt::Result {
+ while !s.is_empty() {
+ if self.state.on_newline {
+ self.buf.write_str(" ")?;
+ }
+
+ let split = match s.find('\n') {
+ Some(pos) => {
+ self.state.on_newline = true;
+ pos + 1
+ }
+ None => {
+ self.state.on_newline = false;
+ s.len()
+ }
+ };
+ self.buf.write_str(&s[..split])?;
+ s = &s[split..];
+ }
+
+ Ok(())
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted struct as a part of your
+/// [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_struct`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo {
+/// bar: i32,
+/// baz: String,
+/// }
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_struct("Foo")
+/// .field("bar", &self.bar)
+/// .field("baz", &self.baz)
+/// .finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo { bar: 10, baz: "Hello World".to_string() }),
+/// "Foo { bar: 10, baz: \"Hello World\" }",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugStruct<'a, 'b: 'a> {
+ fmt: &'a mut fmt::Formatter<'b>,
+ result: fmt::Result,
+ has_fields: bool,
+}
+
+pub(super) fn debug_struct_new<'a, 'b>(
+ fmt: &'a mut fmt::Formatter<'b>,
+ name: &str,
+) -> DebugStruct<'a, 'b> {
+ let result = fmt.write_str(name);
+ DebugStruct { fmt, result, has_fields: false }
+}
+
+impl<'a, 'b: 'a> DebugStruct<'a, 'b> {
+ /// Adds a new field to the generated struct output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Bar {
+ /// bar: i32,
+ /// another: String,
+ /// }
+ ///
+ /// impl fmt::Debug for Bar {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_struct("Bar")
+ /// .field("bar", &self.bar) // We add `bar` field.
+ /// .field("another", &self.another) // We add `another` field.
+ /// // We even add a field which doesn't exist (because why not?).
+ /// .field("not_existing_field", &1)
+ /// .finish() // We're good to go!
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Bar { bar: 10, another: "Hello World".to_string() }),
+ /// "Bar { bar: 10, another: \"Hello World\", not_existing_field: 1 }",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn field(&mut self, name: &str, value: &dyn fmt::Debug) -> &mut Self {
+ self.result = self.result.and_then(|_| {
+ if self.is_pretty() {
+ if !self.has_fields {
+ self.fmt.write_str(" {\n")?;
+ }
+ let mut slot = None;
+ let mut state = Default::default();
+ let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
+ writer.write_str(name)?;
+ writer.write_str(": ")?;
+ value.fmt(&mut writer)?;
+ writer.write_str(",\n")
+ } else {
+ let prefix = if self.has_fields { ", " } else { " { " };
+ self.fmt.write_str(prefix)?;
+ self.fmt.write_str(name)?;
+ self.fmt.write_str(": ")?;
+ value.fmt(self.fmt)
+ }
+ });
+
+ self.has_fields = true;
+ self
+ }
+
+ /// Marks the struct as non-exhaustive, indicating to the reader that there are some other
+ /// fields that are not shown in the debug representation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Bar {
+ /// bar: i32,
+ /// hidden: f32,
+ /// }
+ ///
+ /// impl fmt::Debug for Bar {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_struct("Bar")
+ /// .field("bar", &self.bar)
+ /// .finish_non_exhaustive() // Show that some other field(s) exist.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Bar { bar: 10, hidden: 1.0 }),
+ /// "Bar { bar: 10, .. }",
+ /// );
+ /// ```
+ #[stable(feature = "debug_non_exhaustive", since = "1.53.0")]
+ pub fn finish_non_exhaustive(&mut self) -> fmt::Result {
+ self.result = self.result.and_then(|_| {
+ if self.has_fields {
+ if self.is_pretty() {
+ let mut slot = None;
+ let mut state = Default::default();
+ let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
+ writer.write_str("..\n")?;
+ self.fmt.write_str("}")
+ } else {
+ self.fmt.write_str(", .. }")
+ }
+ } else {
+ self.fmt.write_str(" { .. }")
+ }
+ });
+ self.result
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Bar {
+ /// bar: i32,
+ /// baz: String,
+ /// }
+ ///
+ /// impl fmt::Debug for Bar {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_struct("Bar")
+ /// .field("bar", &self.bar)
+ /// .field("baz", &self.baz)
+ /// .finish() // You need to call it to "finish" the
+ /// // struct formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Bar { bar: 10, baz: "Hello World".to_string() }),
+ /// "Bar { bar: 10, baz: \"Hello World\" }",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ if self.has_fields {
+ self.result = self.result.and_then(|_| {
+ if self.is_pretty() { self.fmt.write_str("}") } else { self.fmt.write_str(" }") }
+ });
+ }
+ self.result
+ }
+
+ fn is_pretty(&self) -> bool {
+ self.fmt.alternate()
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted tuple as a part of your
+/// [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_tuple`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo(i32, String);
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_tuple("Foo")
+/// .field(&self.0)
+/// .field(&self.1)
+/// .finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo(10, "Hello World".to_string())),
+/// "Foo(10, \"Hello World\")",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugTuple<'a, 'b: 'a> {
+ fmt: &'a mut fmt::Formatter<'b>,
+ result: fmt::Result,
+ fields: usize,
+ empty_name: bool,
+}
+
+pub(super) fn debug_tuple_new<'a, 'b>(
+ fmt: &'a mut fmt::Formatter<'b>,
+ name: &str,
+) -> DebugTuple<'a, 'b> {
+ let result = fmt.write_str(name);
+ DebugTuple { fmt, result, fields: 0, empty_name: name.is_empty() }
+}
+
+impl<'a, 'b: 'a> DebugTuple<'a, 'b> {
+ /// Adds a new field to the generated tuple struct output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32, String);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_tuple("Foo")
+ /// .field(&self.0) // We add the first field.
+ /// .field(&self.1) // We add the second field.
+ /// .finish() // We're good to go!
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(10, "Hello World".to_string())),
+ /// "Foo(10, \"Hello World\")",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn field(&mut self, value: &dyn fmt::Debug) -> &mut Self {
+ self.result = self.result.and_then(|_| {
+ if self.is_pretty() {
+ if self.fields == 0 {
+ self.fmt.write_str("(\n")?;
+ }
+ let mut slot = None;
+ let mut state = Default::default();
+ let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
+ value.fmt(&mut writer)?;
+ writer.write_str(",\n")
+ } else {
+ let prefix = if self.fields == 0 { "(" } else { ", " };
+ self.fmt.write_str(prefix)?;
+ value.fmt(self.fmt)
+ }
+ });
+
+ self.fields += 1;
+ self
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32, String);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_tuple("Foo")
+ /// .field(&self.0)
+ /// .field(&self.1)
+ /// .finish() // You need to call it to "finish" the
+ /// // tuple formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(10, "Hello World".to_string())),
+ /// "Foo(10, \"Hello World\")",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ if self.fields > 0 {
+ self.result = self.result.and_then(|_| {
+ if self.fields == 1 && self.empty_name && !self.is_pretty() {
+ self.fmt.write_str(",")?;
+ }
+ self.fmt.write_str(")")
+ });
+ }
+ self.result
+ }
+
+ fn is_pretty(&self) -> bool {
+ self.fmt.alternate()
+ }
+}
+
+struct DebugInner<'a, 'b: 'a> {
+ fmt: &'a mut fmt::Formatter<'b>,
+ result: fmt::Result,
+ has_fields: bool,
+}
+
+impl<'a, 'b: 'a> DebugInner<'a, 'b> {
+ fn entry(&mut self, entry: &dyn fmt::Debug) {
+ self.result = self.result.and_then(|_| {
+ if self.is_pretty() {
+ if !self.has_fields {
+ self.fmt.write_str("\n")?;
+ }
+ let mut slot = None;
+ let mut state = Default::default();
+ let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
+ entry.fmt(&mut writer)?;
+ writer.write_str(",\n")
+ } else {
+ if self.has_fields {
+ self.fmt.write_str(", ")?
+ }
+ entry.fmt(self.fmt)
+ }
+ });
+
+ self.has_fields = true;
+ }
+
+ fn is_pretty(&self) -> bool {
+ self.fmt.alternate()
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted set of items as a part
+/// of your [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_set`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo(Vec<i32>);
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_set().entries(self.0.iter()).finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo(vec![10, 11])),
+/// "{10, 11}",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugSet<'a, 'b: 'a> {
+ inner: DebugInner<'a, 'b>,
+}
+
+pub(super) fn debug_set_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugSet<'a, 'b> {
+ let result = fmt.write_str("{");
+ DebugSet { inner: DebugInner { fmt, result, has_fields: false } }
+}
+
+impl<'a, 'b: 'a> DebugSet<'a, 'b> {
+ /// Adds a new entry to the set output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>, Vec<u32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_set()
+ /// .entry(&self.0) // Adds the first "entry".
+ /// .entry(&self.1) // Adds the second "entry".
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])),
+ /// "{[10, 11], [12, 13]}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut Self {
+ self.inner.entry(entry);
+ self
+ }
+
+ /// Adds the contents of an iterator of entries to the set output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>, Vec<u32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_set()
+ /// .entries(self.0.iter()) // Adds the first "entry".
+ /// .entries(self.1.iter()) // Adds the second "entry".
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])),
+ /// "{10, 11, 12, 13}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entries<D, I>(&mut self, entries: I) -> &mut Self
+ where
+ D: fmt::Debug,
+ I: IntoIterator<Item = D>,
+ {
+ for entry in entries {
+ self.entry(&entry);
+ }
+ self
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_set()
+ /// .entries(self.0.iter())
+ /// .finish() // Ends the struct formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11])),
+ /// "{10, 11}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ self.inner.result.and_then(|_| self.inner.fmt.write_str("}"))
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted list of items as a part
+/// of your [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_list`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo(Vec<i32>);
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_list().entries(self.0.iter()).finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo(vec![10, 11])),
+/// "[10, 11]",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugList<'a, 'b: 'a> {
+ inner: DebugInner<'a, 'b>,
+}
+
+pub(super) fn debug_list_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugList<'a, 'b> {
+ let result = fmt.write_str("[");
+ DebugList { inner: DebugInner { fmt, result, has_fields: false } }
+}
+
+impl<'a, 'b: 'a> DebugList<'a, 'b> {
+ /// Adds a new entry to the list output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>, Vec<u32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_list()
+ /// .entry(&self.0) // We add the first "entry".
+ /// .entry(&self.1) // We add the second "entry".
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])),
+ /// "[[10, 11], [12, 13]]",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut Self {
+ self.inner.entry(entry);
+ self
+ }
+
+ /// Adds the contents of an iterator of entries to the list output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>, Vec<u32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_list()
+ /// .entries(self.0.iter())
+ /// .entries(self.1.iter())
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])),
+ /// "[10, 11, 12, 13]",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entries<D, I>(&mut self, entries: I) -> &mut Self
+ where
+ D: fmt::Debug,
+ I: IntoIterator<Item = D>,
+ {
+ for entry in entries {
+ self.entry(&entry);
+ }
+ self
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_list()
+ /// .entries(self.0.iter())
+ /// .finish() // Ends the struct formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11])),
+ /// "[10, 11]",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ self.inner.result.and_then(|_| self.inner.fmt.write_str("]"))
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted map as a part of your
+/// [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_map`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo(Vec<(String, i32)>);
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_map().entries(self.0.iter().map(|&(ref k, ref v)| (k, v))).finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+/// "{\"A\": 10, \"B\": 11}",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugMap<'a, 'b: 'a> {
+ fmt: &'a mut fmt::Formatter<'b>,
+ result: fmt::Result,
+ has_fields: bool,
+ has_key: bool,
+ // The state of newlines is tracked between keys and values
+ state: PadAdapterState,
+}
+
+pub(super) fn debug_map_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugMap<'a, 'b> {
+ let result = fmt.write_str("{");
+ DebugMap { fmt, result, has_fields: false, has_key: false, state: Default::default() }
+}
+
+impl<'a, 'b: 'a> DebugMap<'a, 'b> {
+ /// Adds a new entry to the map output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// .entry(&"whole", &self.0) // We add the "whole" entry.
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"whole\": [(\"A\", 10), (\"B\", 11)]}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entry(&mut self, key: &dyn fmt::Debug, value: &dyn fmt::Debug) -> &mut Self {
+ self.key(key).value(value)
+ }
+
+ /// Adds the key part of a new entry to the map output.
+ ///
+ /// This method, together with `value`, is an alternative to `entry` that
+ /// can be used when the complete entry isn't known upfront. Prefer the `entry`
+ /// method when it's possible to use.
+ ///
+ /// # Panics
+ ///
+ /// `key` must be called before `value` and each call to `key` must be followed
+ /// by a corresponding call to `value`. Otherwise this method will panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// .key(&"whole").value(&self.0) // We add the "whole" entry.
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"whole\": [(\"A\", 10), (\"B\", 11)]}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_map_key_value", since = "1.42.0")]
+ pub fn key(&mut self, key: &dyn fmt::Debug) -> &mut Self {
+ self.result = self.result.and_then(|_| {
+ assert!(
+ !self.has_key,
+ "attempted to begin a new map entry \
+ without completing the previous one"
+ );
+
+ if self.is_pretty() {
+ if !self.has_fields {
+ self.fmt.write_str("\n")?;
+ }
+ let mut slot = None;
+ self.state = Default::default();
+ let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut self.state);
+ key.fmt(&mut writer)?;
+ writer.write_str(": ")?;
+ } else {
+ if self.has_fields {
+ self.fmt.write_str(", ")?
+ }
+ key.fmt(self.fmt)?;
+ self.fmt.write_str(": ")?;
+ }
+
+ self.has_key = true;
+ Ok(())
+ });
+
+ self
+ }
+
+ /// Adds the value part of a new entry to the map output.
+ ///
+ /// This method, together with `key`, is an alternative to `entry` that
+ /// can be used when the complete entry isn't known upfront. Prefer the `entry`
+ /// method when it's possible to use.
+ ///
+ /// # Panics
+ ///
+ /// `key` must be called before `value` and each call to `key` must be followed
+ /// by a corresponding call to `value`. Otherwise this method will panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// .key(&"whole").value(&self.0) // We add the "whole" entry.
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"whole\": [(\"A\", 10), (\"B\", 11)]}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_map_key_value", since = "1.42.0")]
+ pub fn value(&mut self, value: &dyn fmt::Debug) -> &mut Self {
+ self.result = self.result.and_then(|_| {
+ assert!(self.has_key, "attempted to format a map value before its key");
+
+ if self.is_pretty() {
+ let mut slot = None;
+ let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut self.state);
+ value.fmt(&mut writer)?;
+ writer.write_str(",\n")?;
+ } else {
+ value.fmt(self.fmt)?;
+ }
+
+ self.has_key = false;
+ Ok(())
+ });
+
+ self.has_fields = true;
+ self
+ }
+
+ /// Adds the contents of an iterator of entries to the map output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// // We map our vec so each entries' first field will become
+ /// // the "key".
+ /// .entries(self.0.iter().map(|&(ref k, ref v)| (k, v)))
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"A\": 10, \"B\": 11}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entries<K, V, I>(&mut self, entries: I) -> &mut Self
+ where
+ K: fmt::Debug,
+ V: fmt::Debug,
+ I: IntoIterator<Item = (K, V)>,
+ {
+ for (k, v) in entries {
+ self.entry(&k, &v);
+ }
+ self
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Panics
+ ///
+ /// `key` must be called before `value` and each call to `key` must be followed
+ /// by a corresponding call to `value`. Otherwise this method will panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// .entries(self.0.iter().map(|&(ref k, ref v)| (k, v)))
+ /// .finish() // Ends the struct formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"A\": 10, \"B\": 11}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ self.result.and_then(|_| {
+ assert!(!self.has_key, "attempted to finish a map with a partial entry");
+
+ self.fmt.write_str("}")
+ })
+ }
+
+ fn is_pretty(&self) -> bool {
+ self.fmt.alternate()
+ }
+}
diff --git a/library/core/src/fmt/float.rs b/library/core/src/fmt/float.rs
new file mode 100644
index 000000000..89d5fac30
--- /dev/null
+++ b/library/core/src/fmt/float.rs
@@ -0,0 +1,226 @@
+use crate::fmt::{Debug, Display, Formatter, LowerExp, Result, UpperExp};
+use crate::mem::MaybeUninit;
+use crate::num::flt2dec;
+use crate::num::fmt as numfmt;
+
+#[doc(hidden)]
+trait GeneralFormat: PartialOrd {
+ /// Determines if a value should use exponential based on its magnitude, given the precondition
+ /// that it will not be rounded any further before it is displayed.
+ fn already_rounded_value_should_use_exponential(&self) -> bool;
+}
+
+macro_rules! impl_general_format {
+ ($($t:ident)*) => {
+ $(impl GeneralFormat for $t {
+ fn already_rounded_value_should_use_exponential(&self) -> bool {
+ let abs = $t::abs_private(*self);
+ (abs != 0.0 && abs < 1e-4) || abs >= 1e+16
+ }
+ })*
+ }
+}
+
+impl_general_format! { f32 f64 }
+
+// Don't inline this so callers don't use the stack space this function
+// requires unless they have to.
+#[inline(never)]
+fn float_to_decimal_common_exact<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ sign: flt2dec::Sign,
+ precision: usize,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ let mut buf: [MaybeUninit<u8>; 1024] = MaybeUninit::uninit_array(); // enough for f32 and f64
+ let mut parts: [MaybeUninit<numfmt::Part<'_>>; 4] = MaybeUninit::uninit_array();
+ let formatted = flt2dec::to_exact_fixed_str(
+ flt2dec::strategy::grisu::format_exact,
+ *num,
+ sign,
+ precision,
+ &mut buf,
+ &mut parts,
+ );
+ fmt.pad_formatted_parts(&formatted)
+}
+
+// Don't inline this so callers that call both this and the above won't wind
+// up using the combined stack space of both functions in some cases.
+#[inline(never)]
+fn float_to_decimal_common_shortest<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ sign: flt2dec::Sign,
+ precision: usize,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ // enough for f32 and f64
+ let mut buf: [MaybeUninit<u8>; flt2dec::MAX_SIG_DIGITS] = MaybeUninit::uninit_array();
+ let mut parts: [MaybeUninit<numfmt::Part<'_>>; 4] = MaybeUninit::uninit_array();
+ let formatted = flt2dec::to_shortest_str(
+ flt2dec::strategy::grisu::format_shortest,
+ *num,
+ sign,
+ precision,
+ &mut buf,
+ &mut parts,
+ );
+ fmt.pad_formatted_parts(&formatted)
+}
+
+fn float_to_decimal_display<T>(fmt: &mut Formatter<'_>, num: &T) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ let force_sign = fmt.sign_plus();
+ let sign = match force_sign {
+ false => flt2dec::Sign::Minus,
+ true => flt2dec::Sign::MinusPlus,
+ };
+
+ if let Some(precision) = fmt.precision {
+ float_to_decimal_common_exact(fmt, num, sign, precision)
+ } else {
+ let min_precision = 0;
+ float_to_decimal_common_shortest(fmt, num, sign, min_precision)
+ }
+}
+
+// Don't inline this so callers don't use the stack space this function
+// requires unless they have to.
+#[inline(never)]
+fn float_to_exponential_common_exact<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ sign: flt2dec::Sign,
+ precision: usize,
+ upper: bool,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ let mut buf: [MaybeUninit<u8>; 1024] = MaybeUninit::uninit_array(); // enough for f32 and f64
+ let mut parts: [MaybeUninit<numfmt::Part<'_>>; 6] = MaybeUninit::uninit_array();
+ let formatted = flt2dec::to_exact_exp_str(
+ flt2dec::strategy::grisu::format_exact,
+ *num,
+ sign,
+ precision,
+ upper,
+ &mut buf,
+ &mut parts,
+ );
+ fmt.pad_formatted_parts(&formatted)
+}
+
+// Don't inline this so callers that call both this and the above won't wind
+// up using the combined stack space of both functions in some cases.
+#[inline(never)]
+fn float_to_exponential_common_shortest<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ sign: flt2dec::Sign,
+ upper: bool,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ // enough for f32 and f64
+ let mut buf: [MaybeUninit<u8>; flt2dec::MAX_SIG_DIGITS] = MaybeUninit::uninit_array();
+ let mut parts: [MaybeUninit<numfmt::Part<'_>>; 6] = MaybeUninit::uninit_array();
+ let formatted = flt2dec::to_shortest_exp_str(
+ flt2dec::strategy::grisu::format_shortest,
+ *num,
+ sign,
+ (0, 0),
+ upper,
+ &mut buf,
+ &mut parts,
+ );
+ fmt.pad_formatted_parts(&formatted)
+}
+
+// Common code of floating point LowerExp and UpperExp.
+fn float_to_exponential_common<T>(fmt: &mut Formatter<'_>, num: &T, upper: bool) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ let force_sign = fmt.sign_plus();
+ let sign = match force_sign {
+ false => flt2dec::Sign::Minus,
+ true => flt2dec::Sign::MinusPlus,
+ };
+
+ if let Some(precision) = fmt.precision {
+ // 1 integral digit + `precision` fractional digits = `precision + 1` total digits
+ float_to_exponential_common_exact(fmt, num, sign, precision + 1, upper)
+ } else {
+ float_to_exponential_common_shortest(fmt, num, sign, upper)
+ }
+}
+
+fn float_to_general_debug<T>(fmt: &mut Formatter<'_>, num: &T) -> Result
+where
+ T: flt2dec::DecodableFloat + GeneralFormat,
+{
+ let force_sign = fmt.sign_plus();
+ let sign = match force_sign {
+ false => flt2dec::Sign::Minus,
+ true => flt2dec::Sign::MinusPlus,
+ };
+
+ if let Some(precision) = fmt.precision {
+ // this behavior of {:.PREC?} predates exponential formatting for {:?}
+ float_to_decimal_common_exact(fmt, num, sign, precision)
+ } else {
+ // since there is no precision, there will be no rounding
+ if num.already_rounded_value_should_use_exponential() {
+ let upper = false;
+ float_to_exponential_common_shortest(fmt, num, sign, upper)
+ } else {
+ let min_precision = 1;
+ float_to_decimal_common_shortest(fmt, num, sign, min_precision)
+ }
+ }
+}
+
+macro_rules! floating {
+ ($ty:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Debug for $ty {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ float_to_general_debug(fmt, self)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Display for $ty {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ float_to_decimal_display(fmt, self)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl LowerExp for $ty {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ float_to_exponential_common(fmt, self, false)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl UpperExp for $ty {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ float_to_exponential_common(fmt, self, true)
+ }
+ }
+ };
+}
+
+floating! { f32 }
+floating! { f64 }
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
new file mode 100644
index 000000000..372141e09
--- /dev/null
+++ b/library/core/src/fmt/mod.rs
@@ -0,0 +1,2664 @@
+//! Utilities for formatting and printing strings.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cell::{Cell, Ref, RefCell, RefMut, SyncUnsafeCell, UnsafeCell};
+use crate::char::EscapeDebugExtArgs;
+use crate::iter;
+use crate::marker::PhantomData;
+use crate::mem;
+use crate::num::fmt as numfmt;
+use crate::ops::Deref;
+use crate::result;
+use crate::str;
+
+mod builders;
+#[cfg(not(no_fp_fmt_parse))]
+mod float;
+#[cfg(no_fp_fmt_parse)]
+mod nofloat;
+mod num;
+
+#[stable(feature = "fmt_flags_align", since = "1.28.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Alignment")]
+/// Possible alignments returned by `Formatter::align`
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Alignment {
+ #[stable(feature = "fmt_flags_align", since = "1.28.0")]
+ /// Indication that contents should be left-aligned.
+ Left,
+ #[stable(feature = "fmt_flags_align", since = "1.28.0")]
+ /// Indication that contents should be right-aligned.
+ Right,
+ #[stable(feature = "fmt_flags_align", since = "1.28.0")]
+ /// Indication that contents should be center-aligned.
+ Center,
+}
+
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub use self::builders::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
+
+#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+#[doc(hidden)]
+pub mod rt {
+ pub mod v1;
+}
+
+/// The type returned by formatter methods.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// #[derive(Debug)]
+/// struct Triangle {
+/// a: f32,
+/// b: f32,
+/// c: f32
+/// }
+///
+/// impl fmt::Display for Triangle {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// write!(f, "({}, {}, {})", self.a, self.b, self.c)
+/// }
+/// }
+///
+/// let pythagorean_triple = Triangle { a: 3.0, b: 4.0, c: 5.0 };
+///
+/// assert_eq!(format!("{pythagorean_triple}"), "(3, 4, 5)");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type Result = result::Result<(), Error>;
+
+/// The error type which is returned from formatting a message into a stream.
+///
+/// This type does not support transmission of an error other than that an error
+/// occurred. Any extra information must be arranged to be transmitted through
+/// some other means.
+///
+/// An important thing to remember is that the type `fmt::Error` should not be
+/// confused with [`std::io::Error`] or [`std::error::Error`], which you may also
+/// have in scope.
+///
+/// [`std::io::Error`]: ../../std/io/struct.Error.html
+/// [`std::error::Error`]: ../../std/error/trait.Error.html
+///
+/// # Examples
+///
+/// ```rust
+/// use std::fmt::{self, write};
+///
+/// let mut output = String::new();
+/// if let Err(fmt::Error) = write(&mut output, format_args!("Hello {}!", "world")) {
+/// panic!("An error occurred");
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct Error;
+
+/// A trait for writing or formatting into Unicode-accepting buffers or streams.
+///
+/// This trait only accepts UTF-8–encoded data and is not [flushable]. If you only
+/// want to accept Unicode and you don't need flushing, you should implement this trait;
+/// otherwise you should implement [`std::io::Write`].
+///
+/// [`std::io::Write`]: ../../std/io/trait.Write.html
+/// [flushable]: ../../std/io/trait.Write.html#tymethod.flush
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Write {
+ /// Writes a string slice into this writer, returning whether the write
+ /// succeeded.
+ ///
+ /// This method can only succeed if the entire string slice was successfully
+ /// written, and this method will not return until all data has been
+ /// written or an error occurs.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an instance of [`Error`] on error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt::{Error, Write};
+ ///
+ /// fn writer<W: Write>(f: &mut W, s: &str) -> Result<(), Error> {
+ /// f.write_str(s)
+ /// }
+ ///
+ /// let mut buf = String::new();
+ /// writer(&mut buf, "hola").unwrap();
+ /// assert_eq!(&buf, "hola");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write_str(&mut self, s: &str) -> Result;
+
+ /// Writes a [`char`] into this writer, returning whether the write succeeded.
+ ///
+ /// A single [`char`] may be encoded as more than one byte.
+ /// This method can only succeed if the entire byte sequence was successfully
+ /// written, and this method will not return until all data has been
+ /// written or an error occurs.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an instance of [`Error`] on error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt::{Error, Write};
+ ///
+ /// fn writer<W: Write>(f: &mut W, c: char) -> Result<(), Error> {
+ /// f.write_char(c)
+ /// }
+ ///
+ /// let mut buf = String::new();
+ /// writer(&mut buf, 'a').unwrap();
+ /// writer(&mut buf, 'b').unwrap();
+ /// assert_eq!(&buf, "ab");
+ /// ```
+ #[stable(feature = "fmt_write_char", since = "1.1.0")]
+ fn write_char(&mut self, c: char) -> Result {
+ self.write_str(c.encode_utf8(&mut [0; 4]))
+ }
+
+ /// Glue for usage of the [`write!`] macro with implementors of this trait.
+ ///
+ /// This method should generally not be invoked manually, but rather through
+ /// the [`write!`] macro itself.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt::{Error, Write};
+ ///
+ /// fn writer<W: Write>(f: &mut W, s: &str) -> Result<(), Error> {
+ /// f.write_fmt(format_args!("{s}"))
+ /// }
+ ///
+ /// let mut buf = String::new();
+ /// writer(&mut buf, "world").unwrap();
+ /// assert_eq!(&buf, "world");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write_fmt(mut self: &mut Self, args: Arguments<'_>) -> Result {
+ write(&mut self, args)
+ }
+}
+
+#[stable(feature = "fmt_write_blanket_impl", since = "1.4.0")]
+impl<W: Write + ?Sized> Write for &mut W {
+ fn write_str(&mut self, s: &str) -> Result {
+ (**self).write_str(s)
+ }
+
+ fn write_char(&mut self, c: char) -> Result {
+ (**self).write_char(c)
+ }
+
+ fn write_fmt(&mut self, args: Arguments<'_>) -> Result {
+ (**self).write_fmt(args)
+ }
+}
+
+/// Configuration for formatting.
+///
+/// A `Formatter` represents various options related to formatting. Users do not
+/// construct `Formatter`s directly; a mutable reference to one is passed to
+/// the `fmt` method of all formatting traits, like [`Debug`] and [`Display`].
+///
+/// To interact with a `Formatter`, you'll call various methods to change the
+/// various options related to formatting. For examples, please see the
+/// documentation of the methods defined on `Formatter` below.
+#[allow(missing_debug_implementations)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Formatter<'a> {
+ flags: u32,
+ fill: char,
+ align: rt::v1::Alignment,
+ width: Option<usize>,
+ precision: Option<usize>,
+
+ buf: &'a mut (dyn Write + 'a),
+}
+
+impl<'a> Formatter<'a> {
+ /// Creates a new formatter with default settings.
+ ///
+ /// This can be used as a micro-optimization in cases where a full `Arguments`
+ /// structure (as created by `format_args!`) is not necessary; `Arguments`
+ /// is a little more expensive to use in simple formatting scenarios.
+ ///
+ /// Currently not intended for use outside of the standard library.
+ #[unstable(feature = "fmt_internals", reason = "internal to standard library", issue = "none")]
+ #[doc(hidden)]
+ pub fn new(buf: &'a mut (dyn Write + 'a)) -> Formatter<'a> {
+ Formatter {
+ flags: 0,
+ fill: ' ',
+ align: rt::v1::Alignment::Unknown,
+ width: None,
+ precision: None,
+ buf,
+ }
+ }
+}
+
+// NB. Argument is essentially an optimized partially applied formatting function,
+// equivalent to `exists T.(&T, fn(&T, &mut Formatter<'_>) -> Result`.
+
+extern "C" {
+ type Opaque;
+}
+
+/// This struct represents the generic "argument" which is taken by the Xprintf
+/// family of functions. It contains a function to format the given value. At
+/// compile time it is ensured that the function and the value have the correct
+/// types, and then this struct is used to canonicalize arguments to one type.
+#[derive(Copy, Clone)]
+#[allow(missing_debug_implementations)]
+#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+#[doc(hidden)]
+pub struct ArgumentV1<'a> {
+ value: &'a Opaque,
+ formatter: fn(&Opaque, &mut Formatter<'_>) -> Result,
+}
+
+/// This struct represents the unsafety of constructing an `Arguments`.
+/// It exists, rather than an unsafe function, in order to simplify the expansion
+/// of `format_args!(..)` and reduce the scope of the `unsafe` block.
+#[allow(missing_debug_implementations)]
+#[doc(hidden)]
+#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+pub struct UnsafeArg {
+ _private: (),
+}
+
+impl UnsafeArg {
+ /// See documentation where `UnsafeArg` is required to know when it is safe to
+ /// create and use `UnsafeArg`.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ #[inline(always)]
+ pub unsafe fn new() -> Self {
+ Self { _private: () }
+ }
+}
+
+// This guarantees a single stable value for the function pointer associated with
+// indices/counts in the formatting infrastructure.
+//
+// Note that a function defined as such would not be correct as functions are
+// always tagged unnamed_addr with the current lowering to LLVM IR, so their
+// address is not considered important to LLVM and as such the as_usize cast
+// could have been miscompiled. In practice, we never call as_usize on non-usize
+// containing data (as a matter of static generation of the formatting
+// arguments), so this is merely an additional check.
+//
+// We primarily want to ensure that the function pointer at `USIZE_MARKER` has
+// an address corresponding *only* to functions that also take `&usize` as their
+// first argument. The read_volatile here ensures that we can safely ready out a
+// usize from the passed reference and that this address does not point at a
+// non-usize taking function.
+#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+static USIZE_MARKER: fn(&usize, &mut Formatter<'_>) -> Result = |ptr, _| {
+ // SAFETY: ptr is a reference
+ let _v: usize = unsafe { crate::ptr::read_volatile(ptr) };
+ loop {}
+};
+
+macro_rules! arg_new {
+ ($f: ident, $t: ident) => {
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ #[inline]
+ pub fn $f<'b, T: $t>(x: &'b T) -> ArgumentV1<'_> {
+ Self::new(x, $t::fmt)
+ }
+ };
+}
+
+#[rustc_diagnostic_item = "ArgumentV1Methods"]
+impl<'a> ArgumentV1<'a> {
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ #[inline]
+ pub fn new<'b, T>(x: &'b T, f: fn(&T, &mut Formatter<'_>) -> Result) -> ArgumentV1<'b> {
+ // SAFETY: `mem::transmute(x)` is safe because
+ // 1. `&'b T` keeps the lifetime it originated with `'b`
+ // (so as to not have an unbounded lifetime)
+ // 2. `&'b T` and `&'b Opaque` have the same memory layout
+ // (when `T` is `Sized`, as it is here)
+ // `mem::transmute(f)` is safe since `fn(&T, &mut Formatter<'_>) -> Result`
+ // and `fn(&Opaque, &mut Formatter<'_>) -> Result` have the same ABI
+ // (as long as `T` is `Sized`)
+ unsafe { ArgumentV1 { formatter: mem::transmute(f), value: mem::transmute(x) } }
+ }
+
+ arg_new!(new_display, Display);
+ arg_new!(new_debug, Debug);
+ arg_new!(new_octal, Octal);
+ arg_new!(new_lower_hex, LowerHex);
+ arg_new!(new_upper_hex, UpperHex);
+ arg_new!(new_pointer, Pointer);
+ arg_new!(new_binary, Binary);
+ arg_new!(new_lower_exp, LowerExp);
+ arg_new!(new_upper_exp, UpperExp);
+
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ pub fn from_usize(x: &usize) -> ArgumentV1<'_> {
+ ArgumentV1::new(x, USIZE_MARKER)
+ }
+
+ fn as_usize(&self) -> Option<usize> {
+ // We are type punning a bit here: USIZE_MARKER only takes an &usize but
+ // formatter takes an &Opaque. Rust understandably doesn't think we should compare
+ // the function pointers if they don't have the same signature, so we cast to
+ // usizes to tell it that we just want to compare addresses.
+ if self.formatter as usize == USIZE_MARKER as usize {
+ // SAFETY: The `formatter` field is only set to USIZE_MARKER if
+ // the value is a usize, so this is safe
+ Some(unsafe { *(self.value as *const _ as *const usize) })
+ } else {
+ None
+ }
+ }
+}
+
+// flags available in the v1 format of format_args
+#[derive(Copy, Clone)]
+enum FlagV1 {
+ SignPlus,
+ SignMinus,
+ Alternate,
+ SignAwareZeroPad,
+ DebugLowerHex,
+ DebugUpperHex,
+}
+
+impl<'a> Arguments<'a> {
+ /// When using the format_args!() macro, this function is used to generate the
+ /// Arguments structure.
+ #[doc(hidden)]
+ #[inline]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ #[rustc_const_unstable(feature = "const_fmt_arguments_new", issue = "none")]
+ pub const fn new_v1(pieces: &'a [&'static str], args: &'a [ArgumentV1<'a>]) -> Arguments<'a> {
+ if pieces.len() < args.len() || pieces.len() > args.len() + 1 {
+ panic!("invalid args");
+ }
+ Arguments { pieces, fmt: None, args }
+ }
+
+ /// This function is used to specify nonstandard formatting parameters.
+ ///
+ /// An `UnsafeArg` is required because the following invariants must be held
+ /// in order for this function to be safe:
+ /// 1. The `pieces` slice must be at least as long as `fmt`.
+ /// 2. Every [`rt::v1::Argument::position`] value within `fmt` must be a
+ /// valid index of `args`.
+ /// 3. Every [`Count::Param`] within `fmt` must contain a valid index of
+ /// `args`.
+ #[doc(hidden)]
+ #[inline]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ #[rustc_const_unstable(feature = "const_fmt_arguments_new", issue = "none")]
+ pub const fn new_v1_formatted(
+ pieces: &'a [&'static str],
+ args: &'a [ArgumentV1<'a>],
+ fmt: &'a [rt::v1::Argument],
+ _unsafe_arg: UnsafeArg,
+ ) -> Arguments<'a> {
+ Arguments { pieces, fmt: Some(fmt), args }
+ }
+
+ /// Estimates the length of the formatted text.
+ ///
+ /// This is intended to be used for setting initial `String` capacity
+ /// when using `format!`. Note: this is neither the lower nor upper bound.
+ #[doc(hidden)]
+ #[inline]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ pub fn estimated_capacity(&self) -> usize {
+ let pieces_length: usize = self.pieces.iter().map(|x| x.len()).sum();
+
+ if self.args.is_empty() {
+ pieces_length
+ } else if !self.pieces.is_empty() && self.pieces[0].is_empty() && pieces_length < 16 {
+ // If the format string starts with an argument,
+ // don't preallocate anything, unless length
+ // of pieces is significant.
+ 0
+ } else {
+ // There are some arguments, so any additional push
+ // will reallocate the string. To avoid that,
+ // we're "pre-doubling" the capacity here.
+ pieces_length.checked_mul(2).unwrap_or(0)
+ }
+ }
+}
+
+/// This structure represents a safely precompiled version of a format string
+/// and its arguments. This cannot be generated at runtime because it cannot
+/// safely be done, so no constructors are given and the fields are private
+/// to prevent modification.
+///
+/// The [`format_args!`] macro will safely create an instance of this structure.
+/// The macro validates the format string at compile-time so usage of the
+/// [`write()`] and [`format()`] functions can be safely performed.
+///
+/// You can use the `Arguments<'a>` that [`format_args!`] returns in `Debug`
+/// and `Display` contexts as seen below. The example also shows that `Debug`
+/// and `Display` format to the same thing: the interpolated format string
+/// in `format_args!`.
+///
+/// ```rust
+/// let debug = format!("{:?}", format_args!("{} foo {:?}", 1, 2));
+/// let display = format!("{}", format_args!("{} foo {:?}", 1, 2));
+/// assert_eq!("1 foo 2", display);
+/// assert_eq!(display, debug);
+/// ```
+///
+/// [`format()`]: ../../std/fmt/fn.format.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Arguments")]
+#[derive(Copy, Clone)]
+pub struct Arguments<'a> {
+ // Format string pieces to print.
+ pieces: &'a [&'static str],
+
+ // Placeholder specs, or `None` if all specs are default (as in "{}{}").
+ fmt: Option<&'a [rt::v1::Argument]>,
+
+ // Dynamic arguments for interpolation, to be interleaved with string
+ // pieces. (Every argument is preceded by a string piece.)
+ args: &'a [ArgumentV1<'a>],
+}
+
+impl<'a> Arguments<'a> {
+ /// Get the formatted string, if it has no arguments to be formatted.
+ ///
+ /// This can be used to avoid allocations in the most trivial case.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt::Arguments;
+ ///
+ /// fn write_str(_: &str) { /* ... */ }
+ ///
+ /// fn write_fmt(args: &Arguments) {
+ /// if let Some(s) = args.as_str() {
+ /// write_str(s)
+ /// } else {
+ /// write_str(&args.to_string());
+ /// }
+ /// }
+ /// ```
+ ///
+ /// ```rust
+ /// assert_eq!(format_args!("hello").as_str(), Some("hello"));
+ /// assert_eq!(format_args!("").as_str(), Some(""));
+ /// assert_eq!(format_args!("{}", 1).as_str(), None);
+ /// ```
+ #[stable(feature = "fmt_as_str", since = "1.52.0")]
+ #[rustc_const_unstable(feature = "const_arguments_as_str", issue = "none")]
+ #[must_use]
+ #[inline]
+ pub const fn as_str(&self) -> Option<&'static str> {
+ match (self.pieces, self.args) {
+ ([], []) => Some(""),
+ ([s], []) => Some(s),
+ _ => None,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for Arguments<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for Arguments<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ write(fmt.buf, *self)
+ }
+}
+
+/// `?` formatting.
+///
+/// `Debug` should format the output in a programmer-facing, debugging context.
+///
+/// Generally speaking, you should just `derive` a `Debug` implementation.
+///
+/// When used with the alternate format specifier `#?`, the output is pretty-printed.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// This trait can be used with `#[derive]` if all fields implement `Debug`. When
+/// `derive`d for structs, it will use the name of the `struct`, then `{`, then a
+/// comma-separated list of each field's name and `Debug` value, then `}`. For
+/// `enum`s, it will use the name of the variant and, if applicable, `(`, then the
+/// `Debug` values of the fields, then `)`.
+///
+/// # Stability
+///
+/// Derived `Debug` formats are not stable, and so may change with future Rust
+/// versions. Additionally, `Debug` implementations of types provided by the
+/// standard library (`libstd`, `libcore`, `liballoc`, etc.) are not stable, and
+/// may also change with future Rust versions.
+///
+/// # Examples
+///
+/// Deriving an implementation:
+///
+/// ```
+/// #[derive(Debug)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// assert_eq!(format!("The origin is: {origin:?}"), "The origin is: Point { x: 0, y: 0 }");
+/// ```
+///
+/// Manually implementing:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl fmt::Debug for Point {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// f.debug_struct("Point")
+/// .field("x", &self.x)
+/// .field("y", &self.y)
+/// .finish()
+/// }
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// assert_eq!(format!("The origin is: {origin:?}"), "The origin is: Point { x: 0, y: 0 }");
+/// ```
+///
+/// There are a number of helper methods on the [`Formatter`] struct to help you with manual
+/// implementations, such as [`debug_struct`].
+///
+/// [`debug_struct`]: Formatter::debug_struct
+///
+/// Types that do not wish to use the standard suite of debug representations
+/// provided by the `Formatter` trait (`debug_struct`, `debug_tuple`,
+/// `debug_list`, `debug_set`, `debug_map`) can do something totally custom by
+/// manually writing an arbitrary representation to the `Formatter`.
+///
+/// ```
+/// # use std::fmt;
+/// # struct Point {
+/// # x: i32,
+/// # y: i32,
+/// # }
+/// #
+/// impl fmt::Debug for Point {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// write!(f, "Point [{} {}]", self.x, self.y)
+/// }
+/// }
+/// ```
+///
+/// `Debug` implementations using either `derive` or the debug builder API
+/// on [`Formatter`] support pretty-printing using the alternate flag: `{:#?}`.
+///
+/// Pretty-printing with `#?`:
+///
+/// ```
+/// #[derive(Debug)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// assert_eq!(format!("The origin is: {origin:#?}"),
+/// "The origin is: Point {
+/// x: 0,
+/// y: 0,
+/// }");
+/// ```
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ on(
+ crate_local,
+ label = "`{Self}` cannot be formatted using `{{:?}}`",
+ note = "add `#[derive(Debug)]` to `{Self}` or manually `impl {Debug} for {Self}`"
+ ),
+ message = "`{Self}` doesn't implement `{Debug}`",
+ label = "`{Self}` cannot be formatted using `{{:?}}` because it doesn't implement `{Debug}`"
+)]
+#[doc(alias = "{:?}")]
+#[rustc_diagnostic_item = "Debug"]
+#[rustc_trivial_field_reads]
+pub trait Debug {
+ /// Formats the value using the given formatter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Position {
+ /// longitude: f32,
+ /// latitude: f32,
+ /// }
+ ///
+ /// impl fmt::Debug for Position {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// f.debug_tuple("")
+ /// .field(&self.longitude)
+ /// .field(&self.latitude)
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// let position = Position { longitude: 1.987, latitude: 2.983 };
+ /// assert_eq!(format!("{position:?}"), "(1.987, 2.983)");
+ ///
+ /// assert_eq!(format!("{position:#?}"), "(
+ /// 1.987,
+ /// 2.983,
+ /// )");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+// Separate module to reexport the macro `Debug` from prelude without the trait `Debug`.
+pub(crate) mod macros {
+ /// Derive macro generating an impl of the trait `Debug`.
+ #[rustc_builtin_macro]
+ #[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+ #[allow_internal_unstable(core_intrinsics, fmt_helpers_for_derive)]
+ pub macro Debug($item:item) {
+ /* compiler built-in */
+ }
+}
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[doc(inline)]
+pub use macros::Debug;
+
+/// Format trait for an empty format, `{}`.
+///
+/// `Display` is similar to [`Debug`], but `Display` is for user-facing
+/// output, and so cannot be derived.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// # Examples
+///
+/// Implementing `Display` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl fmt::Display for Point {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// write!(f, "({}, {})", self.x, self.y)
+/// }
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// assert_eq!(format!("The origin is: {origin}"), "The origin is: (0, 0)");
+/// ```
+#[rustc_on_unimplemented(
+ on(
+ any(_Self = "std::path::Path", _Self = "std::path::PathBuf"),
+ label = "`{Self}` cannot be formatted with the default formatter; call `.display()` on it",
+ note = "call `.display()` or `.to_string_lossy()` to safely print paths, \
+ as they may contain non-Unicode data"
+ ),
+ message = "`{Self}` doesn't implement `{Display}`",
+ label = "`{Self}` cannot be formatted with the default formatter",
+ note = "in format strings you may be able to use `{{:?}}` (or {{:#?}} for pretty-print) instead"
+)]
+#[doc(alias = "{}")]
+#[rustc_diagnostic_item = "Display"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Display {
+ /// Formats the value using the given formatter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Position {
+ /// longitude: f32,
+ /// latitude: f32,
+ /// }
+ ///
+ /// impl fmt::Display for Position {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "({}, {})", self.longitude, self.latitude)
+ /// }
+ /// }
+ ///
+ /// assert_eq!("(1.987, 2.983)",
+ /// format!("{}", Position { longitude: 1.987, latitude: 2.983, }));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `o` formatting.
+///
+/// The `Octal` trait should format its output as a number in base-8.
+///
+/// For primitive signed integers (`i8` to `i128`, and `isize`),
+/// negative values are formatted as the two’s complement representation.
+///
+/// The alternate flag, `#`, adds a `0o` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '52' in octal
+///
+/// assert_eq!(format!("{x:o}"), "52");
+/// assert_eq!(format!("{x:#o}"), "0o52");
+///
+/// assert_eq!(format!("{:o}", -16), "37777777760");
+/// ```
+///
+/// Implementing `Octal` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Octal for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = self.0;
+///
+/// fmt::Octal::fmt(&val, f) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(9);
+///
+/// assert_eq!(format!("l as octal is: {l:o}"), "l as octal is: 11");
+///
+/// assert_eq!(format!("l as octal is: {l:#06o}"), "l as octal is: 0o0011");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Octal {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `b` formatting.
+///
+/// The `Binary` trait should format its output as a number in binary.
+///
+/// For primitive signed integers ([`i8`] to [`i128`], and [`isize`]),
+/// negative values are formatted as the two’s complement representation.
+///
+/// The alternate flag, `#`, adds a `0b` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// # Examples
+///
+/// Basic usage with [`i32`]:
+///
+/// ```
+/// let x = 42; // 42 is '101010' in binary
+///
+/// assert_eq!(format!("{x:b}"), "101010");
+/// assert_eq!(format!("{x:#b}"), "0b101010");
+///
+/// assert_eq!(format!("{:b}", -16), "11111111111111111111111111110000");
+/// ```
+///
+/// Implementing `Binary` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Binary for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = self.0;
+///
+/// fmt::Binary::fmt(&val, f) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(107);
+///
+/// assert_eq!(format!("l as binary is: {l:b}"), "l as binary is: 1101011");
+///
+/// assert_eq!(
+/// format!("l as binary is: {l:#032b}"),
+/// "l as binary is: 0b000000000000000000000001101011"
+/// );
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Binary {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `x` formatting.
+///
+/// The `LowerHex` trait should format its output as a number in hexadecimal, with `a` through `f`
+/// in lower case.
+///
+/// For primitive signed integers (`i8` to `i128`, and `isize`),
+/// negative values are formatted as the two’s complement representation.
+///
+/// The alternate flag, `#`, adds a `0x` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '2a' in hex
+///
+/// assert_eq!(format!("{x:x}"), "2a");
+/// assert_eq!(format!("{x:#x}"), "0x2a");
+///
+/// assert_eq!(format!("{:x}", -16), "fffffff0");
+/// ```
+///
+/// Implementing `LowerHex` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::LowerHex for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = self.0;
+///
+/// fmt::LowerHex::fmt(&val, f) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(9);
+///
+/// assert_eq!(format!("l as hex is: {l:x}"), "l as hex is: 9");
+///
+/// assert_eq!(format!("l as hex is: {l:#010x}"), "l as hex is: 0x00000009");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait LowerHex {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `X` formatting.
+///
+/// The `UpperHex` trait should format its output as a number in hexadecimal, with `A` through `F`
+/// in upper case.
+///
+/// For primitive signed integers (`i8` to `i128`, and `isize`),
+/// negative values are formatted as the two’s complement representation.
+///
+/// The alternate flag, `#`, adds a `0x` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '2A' in hex
+///
+/// assert_eq!(format!("{x:X}"), "2A");
+/// assert_eq!(format!("{x:#X}"), "0x2A");
+///
+/// assert_eq!(format!("{:X}", -16), "FFFFFFF0");
+/// ```
+///
+/// Implementing `UpperHex` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::UpperHex for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = self.0;
+///
+/// fmt::UpperHex::fmt(&val, f) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(i32::MAX);
+///
+/// assert_eq!(format!("l as hex is: {l:X}"), "l as hex is: 7FFFFFFF");
+///
+/// assert_eq!(format!("l as hex is: {l:#010X}"), "l as hex is: 0x7FFFFFFF");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait UpperHex {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `p` formatting.
+///
+/// The `Pointer` trait should format its output as a memory location. This is commonly presented
+/// as hexadecimal.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// # Examples
+///
+/// Basic usage with `&i32`:
+///
+/// ```
+/// let x = &42;
+///
+/// let address = format!("{x:p}"); // this produces something like '0x7f06092ac6d0'
+/// ```
+///
+/// Implementing `Pointer` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Pointer for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// // use `as` to convert to a `*const T`, which implements Pointer, which we can use
+///
+/// let ptr = self as *const Self;
+/// fmt::Pointer::fmt(&ptr, f)
+/// }
+/// }
+///
+/// let l = Length(42);
+///
+/// println!("l is in memory here: {l:p}");
+///
+/// let l_ptr = format!("{l:018p}");
+/// assert_eq!(l_ptr.len(), 18);
+/// assert_eq!(&l_ptr[..2], "0x");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "Pointer"]
+pub trait Pointer {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "pointer_trait_fmt"]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `e` formatting.
+///
+/// The `LowerExp` trait should format its output in scientific notation with a lower-case `e`.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// # Examples
+///
+/// Basic usage with `f64`:
+///
+/// ```
+/// let x = 42.0; // 42.0 is '4.2e1' in scientific notation
+///
+/// assert_eq!(format!("{x:e}"), "4.2e1");
+/// ```
+///
+/// Implementing `LowerExp` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::LowerExp for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = f64::from(self.0);
+/// fmt::LowerExp::fmt(&val, f) // delegate to f64's implementation
+/// }
+/// }
+///
+/// let l = Length(100);
+///
+/// assert_eq!(
+/// format!("l in scientific notation is: {l:e}"),
+/// "l in scientific notation is: 1e2"
+/// );
+///
+/// assert_eq!(
+/// format!("l in scientific notation is: {l:05e}"),
+/// "l in scientific notation is: 001e2"
+/// );
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait LowerExp {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `E` formatting.
+///
+/// The `UpperExp` trait should format its output in scientific notation with an upper-case `E`.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../../std/fmt/index.html
+///
+/// # Examples
+///
+/// Basic usage with `f64`:
+///
+/// ```
+/// let x = 42.0; // 42.0 is '4.2E1' in scientific notation
+///
+/// assert_eq!(format!("{x:E}"), "4.2E1");
+/// ```
+///
+/// Implementing `UpperExp` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::UpperExp for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = f64::from(self.0);
+/// fmt::UpperExp::fmt(&val, f) // delegate to f64's implementation
+/// }
+/// }
+///
+/// let l = Length(100);
+///
+/// assert_eq!(
+/// format!("l in scientific notation is: {l:E}"),
+/// "l in scientific notation is: 1E2"
+/// );
+///
+/// assert_eq!(
+/// format!("l in scientific notation is: {l:05E}"),
+/// "l in scientific notation is: 001E2"
+/// );
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait UpperExp {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// The `write` function takes an output stream, and an `Arguments` struct
+/// that can be precompiled with the `format_args!` macro.
+///
+/// The arguments will be formatted according to the specified format string
+/// into the output stream provided.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::fmt;
+///
+/// let mut output = String::new();
+/// fmt::write(&mut output, format_args!("Hello {}!", "world"))
+/// .expect("Error occurred while trying to write in String");
+/// assert_eq!(output, "Hello world!");
+/// ```
+///
+/// Please note that using [`write!`] might be preferable. Example:
+///
+/// ```
+/// use std::fmt::Write;
+///
+/// let mut output = String::new();
+/// write!(&mut output, "Hello {}!", "world")
+/// .expect("Error occurred while trying to write in String");
+/// assert_eq!(output, "Hello world!");
+/// ```
+///
+/// [`write!`]: crate::write!
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn write(output: &mut dyn Write, args: Arguments<'_>) -> Result {
+ let mut formatter = Formatter::new(output);
+ let mut idx = 0;
+
+ match args.fmt {
+ None => {
+ // We can use default formatting parameters for all arguments.
+ for (i, arg) in args.args.iter().enumerate() {
+ // SAFETY: args.args and args.pieces come from the same Arguments,
+ // which guarantees the indexes are always within bounds.
+ let piece = unsafe { args.pieces.get_unchecked(i) };
+ if !piece.is_empty() {
+ formatter.buf.write_str(*piece)?;
+ }
+ (arg.formatter)(arg.value, &mut formatter)?;
+ idx += 1;
+ }
+ }
+ Some(fmt) => {
+ // Every spec has a corresponding argument that is preceded by
+ // a string piece.
+ for (i, arg) in fmt.iter().enumerate() {
+ // SAFETY: fmt and args.pieces come from the same Arguments,
+ // which guarantees the indexes are always within bounds.
+ let piece = unsafe { args.pieces.get_unchecked(i) };
+ if !piece.is_empty() {
+ formatter.buf.write_str(*piece)?;
+ }
+ // SAFETY: arg and args.args come from the same Arguments,
+ // which guarantees the indexes are always within bounds.
+ unsafe { run(&mut formatter, arg, args.args) }?;
+ idx += 1;
+ }
+ }
+ }
+
+ // There can be only one trailing string piece left.
+ if let Some(piece) = args.pieces.get(idx) {
+ formatter.buf.write_str(*piece)?;
+ }
+
+ Ok(())
+}
+
+unsafe fn run(fmt: &mut Formatter<'_>, arg: &rt::v1::Argument, args: &[ArgumentV1<'_>]) -> Result {
+ fmt.fill = arg.format.fill;
+ fmt.align = arg.format.align;
+ fmt.flags = arg.format.flags;
+ // SAFETY: arg and args come from the same Arguments,
+ // which guarantees the indexes are always within bounds.
+ unsafe {
+ fmt.width = getcount(args, &arg.format.width);
+ fmt.precision = getcount(args, &arg.format.precision);
+ }
+
+ // Extract the correct argument
+ debug_assert!(arg.position < args.len());
+ // SAFETY: arg and args come from the same Arguments,
+ // which guarantees its index is always within bounds.
+ let value = unsafe { args.get_unchecked(arg.position) };
+
+ // Then actually do some printing
+ (value.formatter)(value.value, fmt)
+}
+
+unsafe fn getcount(args: &[ArgumentV1<'_>], cnt: &rt::v1::Count) -> Option<usize> {
+ match *cnt {
+ rt::v1::Count::Is(n) => Some(n),
+ rt::v1::Count::Implied => None,
+ rt::v1::Count::Param(i) => {
+ debug_assert!(i < args.len());
+ // SAFETY: cnt and args come from the same Arguments,
+ // which guarantees this index is always within bounds.
+ unsafe { args.get_unchecked(i).as_usize() }
+ }
+ }
+}
+
+/// Padding after the end of something. Returned by `Formatter::padding`.
+#[must_use = "don't forget to write the post padding"]
+pub(crate) struct PostPadding {
+ fill: char,
+ padding: usize,
+}
+
+impl PostPadding {
+ fn new(fill: char, padding: usize) -> PostPadding {
+ PostPadding { fill, padding }
+ }
+
+ /// Write this post padding.
+ pub(crate) fn write(self, f: &mut Formatter<'_>) -> Result {
+ for _ in 0..self.padding {
+ f.buf.write_char(self.fill)?;
+ }
+ Ok(())
+ }
+}
+
+impl<'a> Formatter<'a> {
+ fn wrap_buf<'b, 'c, F>(&'b mut self, wrap: F) -> Formatter<'c>
+ where
+ 'b: 'c,
+ F: FnOnce(&'b mut (dyn Write + 'b)) -> &'c mut (dyn Write + 'c),
+ {
+ Formatter {
+ // We want to change this
+ buf: wrap(self.buf),
+
+ // And preserve these
+ flags: self.flags,
+ fill: self.fill,
+ align: self.align,
+ width: self.width,
+ precision: self.precision,
+ }
+ }
+
+ // Helper methods used for padding and processing formatting arguments that
+ // all formatting traits can use.
+
+ /// Performs the correct padding for an integer which has already been
+ /// emitted into a str. The str should *not* contain the sign for the
+ /// integer, that will be added by this method.
+ ///
+ /// # Arguments
+ ///
+ /// * is_nonnegative - whether the original integer was either positive or zero.
+ /// * prefix - if the '#' character (Alternate) is provided, this
+ /// is the prefix to put in front of the number.
+ /// * buf - the byte array that the number has been formatted into
+ ///
+ /// This function will correctly account for the flags provided as well as
+ /// the minimum width. It will not take precision into account.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo { nb: i32 }
+ ///
+ /// impl Foo {
+ /// fn new(nb: i32) -> Foo {
+ /// Foo {
+ /// nb,
+ /// }
+ /// }
+ /// }
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// // We need to remove "-" from the number output.
+ /// let tmp = self.nb.abs().to_string();
+ ///
+ /// formatter.pad_integral(self.nb >= 0, "Foo ", &tmp)
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{}", Foo::new(2)), "2");
+ /// assert_eq!(&format!("{}", Foo::new(-1)), "-1");
+ /// assert_eq!(&format!("{}", Foo::new(0)), "0");
+ /// assert_eq!(&format!("{:#}", Foo::new(-1)), "-Foo 1");
+ /// assert_eq!(&format!("{:0>#8}", Foo::new(-1)), "00-Foo 1");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pad_integral(&mut self, is_nonnegative: bool, prefix: &str, buf: &str) -> Result {
+ let mut width = buf.len();
+
+ let mut sign = None;
+ if !is_nonnegative {
+ sign = Some('-');
+ width += 1;
+ } else if self.sign_plus() {
+ sign = Some('+');
+ width += 1;
+ }
+
+ let prefix = if self.alternate() {
+ width += prefix.chars().count();
+ Some(prefix)
+ } else {
+ None
+ };
+
+ // Writes the sign if it exists, and then the prefix if it was requested
+ #[inline(never)]
+ fn write_prefix(f: &mut Formatter<'_>, sign: Option<char>, prefix: Option<&str>) -> Result {
+ if let Some(c) = sign {
+ f.buf.write_char(c)?;
+ }
+ if let Some(prefix) = prefix { f.buf.write_str(prefix) } else { Ok(()) }
+ }
+
+ // The `width` field is more of a `min-width` parameter at this point.
+ match self.width {
+ // If there's no minimum length requirements then we can just
+ // write the bytes.
+ None => {
+ write_prefix(self, sign, prefix)?;
+ self.buf.write_str(buf)
+ }
+ // Check if we're over the minimum width, if so then we can also
+ // just write the bytes.
+ Some(min) if width >= min => {
+ write_prefix(self, sign, prefix)?;
+ self.buf.write_str(buf)
+ }
+ // The sign and prefix goes before the padding if the fill character
+ // is zero
+ Some(min) if self.sign_aware_zero_pad() => {
+ let old_fill = crate::mem::replace(&mut self.fill, '0');
+ let old_align = crate::mem::replace(&mut self.align, rt::v1::Alignment::Right);
+ write_prefix(self, sign, prefix)?;
+ let post_padding = self.padding(min - width, rt::v1::Alignment::Right)?;
+ self.buf.write_str(buf)?;
+ post_padding.write(self)?;
+ self.fill = old_fill;
+ self.align = old_align;
+ Ok(())
+ }
+ // Otherwise, the sign and prefix goes after the padding
+ Some(min) => {
+ let post_padding = self.padding(min - width, rt::v1::Alignment::Right)?;
+ write_prefix(self, sign, prefix)?;
+ self.buf.write_str(buf)?;
+ post_padding.write(self)
+ }
+ }
+ }
+
+ /// This function takes a string slice and emits it to the internal buffer
+ /// after applying the relevant formatting flags specified. The flags
+ /// recognized for generic strings are:
+ ///
+ /// * width - the minimum width of what to emit
+ /// * fill/align - what to emit and where to emit it if the string
+ /// provided needs to be padded
+ /// * precision - the maximum length to emit, the string is truncated if it
+ /// is longer than this length
+ ///
+ /// Notably this function ignores the `flag` parameters.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo;
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// formatter.pad("Foo")
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{Foo:<4}"), "Foo ");
+ /// assert_eq!(&format!("{Foo:0>4}"), "0Foo");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pad(&mut self, s: &str) -> Result {
+ // Make sure there's a fast path up front
+ if self.width.is_none() && self.precision.is_none() {
+ return self.buf.write_str(s);
+ }
+ // The `precision` field can be interpreted as a `max-width` for the
+ // string being formatted.
+ let s = if let Some(max) = self.precision {
+ // If our string is longer that the precision, then we must have
+ // truncation. However other flags like `fill`, `width` and `align`
+ // must act as always.
+ if let Some((i, _)) = s.char_indices().nth(max) {
+ // LLVM here can't prove that `..i` won't panic `&s[..i]`, but
+ // we know that it can't panic. Use `get` + `unwrap_or` to avoid
+ // `unsafe` and otherwise don't emit any panic-related code
+ // here.
+ s.get(..i).unwrap_or(s)
+ } else {
+ &s
+ }
+ } else {
+ &s
+ };
+ // The `width` field is more of a `min-width` parameter at this point.
+ match self.width {
+ // If we're under the maximum length, and there's no minimum length
+ // requirements, then we can just emit the string
+ None => self.buf.write_str(s),
+ Some(width) => {
+ let chars_count = s.chars().count();
+ // If we're under the maximum width, check if we're over the minimum
+ // width, if so it's as easy as just emitting the string.
+ if chars_count >= width {
+ self.buf.write_str(s)
+ }
+ // If we're under both the maximum and the minimum width, then fill
+ // up the minimum width with the specified string + some alignment.
+ else {
+ let align = rt::v1::Alignment::Left;
+ let post_padding = self.padding(width - chars_count, align)?;
+ self.buf.write_str(s)?;
+ post_padding.write(self)
+ }
+ }
+ }
+ }
+
+ /// Write the pre-padding and return the unwritten post-padding. Callers are
+ /// responsible for ensuring post-padding is written after the thing that is
+ /// being padded.
+ pub(crate) fn padding(
+ &mut self,
+ padding: usize,
+ default: rt::v1::Alignment,
+ ) -> result::Result<PostPadding, Error> {
+ let align = match self.align {
+ rt::v1::Alignment::Unknown => default,
+ _ => self.align,
+ };
+
+ let (pre_pad, post_pad) = match align {
+ rt::v1::Alignment::Left => (0, padding),
+ rt::v1::Alignment::Right | rt::v1::Alignment::Unknown => (padding, 0),
+ rt::v1::Alignment::Center => (padding / 2, (padding + 1) / 2),
+ };
+
+ for _ in 0..pre_pad {
+ self.buf.write_char(self.fill)?;
+ }
+
+ Ok(PostPadding::new(self.fill, post_pad))
+ }
+
+ /// Takes the formatted parts and applies the padding.
+ /// Assumes that the caller already has rendered the parts with required precision,
+ /// so that `self.precision` can be ignored.
+ fn pad_formatted_parts(&mut self, formatted: &numfmt::Formatted<'_>) -> Result {
+ if let Some(mut width) = self.width {
+ // for the sign-aware zero padding, we render the sign first and
+ // behave as if we had no sign from the beginning.
+ let mut formatted = formatted.clone();
+ let old_fill = self.fill;
+ let old_align = self.align;
+ let mut align = old_align;
+ if self.sign_aware_zero_pad() {
+ // a sign always goes first
+ let sign = formatted.sign;
+ self.buf.write_str(sign)?;
+
+ // remove the sign from the formatted parts
+ formatted.sign = "";
+ width = width.saturating_sub(sign.len());
+ align = rt::v1::Alignment::Right;
+ self.fill = '0';
+ self.align = rt::v1::Alignment::Right;
+ }
+
+ // remaining parts go through the ordinary padding process.
+ let len = formatted.len();
+ let ret = if width <= len {
+ // no padding
+ self.write_formatted_parts(&formatted)
+ } else {
+ let post_padding = self.padding(width - len, align)?;
+ self.write_formatted_parts(&formatted)?;
+ post_padding.write(self)
+ };
+ self.fill = old_fill;
+ self.align = old_align;
+ ret
+ } else {
+ // this is the common case and we take a shortcut
+ self.write_formatted_parts(formatted)
+ }
+ }
+
+ fn write_formatted_parts(&mut self, formatted: &numfmt::Formatted<'_>) -> Result {
+ fn write_bytes(buf: &mut dyn Write, s: &[u8]) -> Result {
+ // SAFETY: This is used for `numfmt::Part::Num` and `numfmt::Part::Copy`.
+ // It's safe to use for `numfmt::Part::Num` since every char `c` is between
+ // `b'0'` and `b'9'`, which means `s` is valid UTF-8.
+ // It's also probably safe in practice to use for `numfmt::Part::Copy(buf)`
+ // since `buf` should be plain ASCII, but it's possible for someone to pass
+ // in a bad value for `buf` into `numfmt::to_shortest_str` since it is a
+ // public function.
+ // FIXME: Determine whether this could result in UB.
+ buf.write_str(unsafe { str::from_utf8_unchecked(s) })
+ }
+
+ if !formatted.sign.is_empty() {
+ self.buf.write_str(formatted.sign)?;
+ }
+ for part in formatted.parts {
+ match *part {
+ numfmt::Part::Zero(mut nzeroes) => {
+ const ZEROES: &str = // 64 zeroes
+ "0000000000000000000000000000000000000000000000000000000000000000";
+ while nzeroes > ZEROES.len() {
+ self.buf.write_str(ZEROES)?;
+ nzeroes -= ZEROES.len();
+ }
+ if nzeroes > 0 {
+ self.buf.write_str(&ZEROES[..nzeroes])?;
+ }
+ }
+ numfmt::Part::Num(mut v) => {
+ let mut s = [0; 5];
+ let len = part.len();
+ for c in s[..len].iter_mut().rev() {
+ *c = b'0' + (v % 10) as u8;
+ v /= 10;
+ }
+ write_bytes(self.buf, &s[..len])?;
+ }
+ numfmt::Part::Copy(buf) => {
+ write_bytes(self.buf, buf)?;
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// Writes some data to the underlying buffer contained within this
+ /// formatter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo;
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// formatter.write_str("Foo")
+ /// // This is equivalent to:
+ /// // write!(formatter, "Foo")
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{Foo}"), "Foo");
+ /// assert_eq!(&format!("{Foo:0>8}"), "Foo");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn write_str(&mut self, data: &str) -> Result {
+ self.buf.write_str(data)
+ }
+
+ /// Writes some formatted information into this instance.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// formatter.write_fmt(format_args!("Foo {}", self.0))
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{}", Foo(-1)), "Foo -1");
+ /// assert_eq!(&format!("{:0>8}", Foo(2)), "Foo 2");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn write_fmt(&mut self, fmt: Arguments<'_>) -> Result {
+ write(self.buf, fmt)
+ }
+
+ /// Flags for formatting
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.24.0",
+ note = "use the `sign_plus`, `sign_minus`, `alternate`, \
+ or `sign_aware_zero_pad` methods instead"
+ )]
+ pub fn flags(&self) -> u32 {
+ self.flags
+ }
+
+ /// Character used as 'fill' whenever there is alignment.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo;
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// let c = formatter.fill();
+ /// if let Some(width) = formatter.width() {
+ /// for _ in 0..width {
+ /// write!(formatter, "{c}")?;
+ /// }
+ /// Ok(())
+ /// } else {
+ /// write!(formatter, "{c}")
+ /// }
+ /// }
+ /// }
+ ///
+ /// // We set alignment to the right with ">".
+ /// assert_eq!(&format!("{Foo:G>3}"), "GGG");
+ /// assert_eq!(&format!("{Foo:t>6}"), "tttttt");
+ /// ```
+ #[must_use]
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn fill(&self) -> char {
+ self.fill
+ }
+
+ /// Flag indicating what form of alignment was requested.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// extern crate core;
+ ///
+ /// use std::fmt::{self, Alignment};
+ ///
+ /// struct Foo;
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// let s = if let Some(s) = formatter.align() {
+ /// match s {
+ /// Alignment::Left => "left",
+ /// Alignment::Right => "right",
+ /// Alignment::Center => "center",
+ /// }
+ /// } else {
+ /// "into the void"
+ /// };
+ /// write!(formatter, "{s}")
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{Foo:<}"), "left");
+ /// assert_eq!(&format!("{Foo:>}"), "right");
+ /// assert_eq!(&format!("{Foo:^}"), "center");
+ /// assert_eq!(&format!("{Foo}"), "into the void");
+ /// ```
+ #[must_use]
+ #[stable(feature = "fmt_flags_align", since = "1.28.0")]
+ pub fn align(&self) -> Option<Alignment> {
+ match self.align {
+ rt::v1::Alignment::Left => Some(Alignment::Left),
+ rt::v1::Alignment::Right => Some(Alignment::Right),
+ rt::v1::Alignment::Center => Some(Alignment::Center),
+ rt::v1::Alignment::Unknown => None,
+ }
+ }
+
+ /// Optionally specified integer width that the output should be.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if let Some(width) = formatter.width() {
+ /// // If we received a width, we use it
+ /// write!(formatter, "{:width$}", &format!("Foo({})", self.0), width = width)
+ /// } else {
+ /// // Otherwise we do nothing special
+ /// write!(formatter, "Foo({})", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:10}", Foo(23)), "Foo(23) ");
+ /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)");
+ /// ```
+ #[must_use]
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn width(&self) -> Option<usize> {
+ self.width
+ }
+
+ /// Optionally specified precision for numeric types. Alternatively, the
+ /// maximum width for string types.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(f32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if let Some(precision) = formatter.precision() {
+ /// // If we received a precision, we use it.
+ /// write!(formatter, "Foo({1:.*})", precision, self.0)
+ /// } else {
+ /// // Otherwise we default to 2.
+ /// write!(formatter, "Foo({:.2})", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:.4}", Foo(23.2)), "Foo(23.2000)");
+ /// assert_eq!(&format!("{}", Foo(23.2)), "Foo(23.20)");
+ /// ```
+ #[must_use]
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn precision(&self) -> Option<usize> {
+ self.precision
+ }
+
+ /// Determines if the `+` flag was specified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if formatter.sign_plus() {
+ /// write!(formatter,
+ /// "Foo({}{})",
+ /// if self.0 < 0 { '-' } else { '+' },
+ /// self.0)
+ /// } else {
+ /// write!(formatter, "Foo({})", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:+}", Foo(23)), "Foo(+23)");
+ /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)");
+ /// ```
+ #[must_use]
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn sign_plus(&self) -> bool {
+ self.flags & (1 << FlagV1::SignPlus as u32) != 0
+ }
+
+ /// Determines if the `-` flag was specified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if formatter.sign_minus() {
+ /// // You want a minus sign? Have one!
+ /// write!(formatter, "-Foo({})", self.0)
+ /// } else {
+ /// write!(formatter, "Foo({})", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:-}", Foo(23)), "-Foo(23)");
+ /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)");
+ /// ```
+ #[must_use]
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn sign_minus(&self) -> bool {
+ self.flags & (1 << FlagV1::SignMinus as u32) != 0
+ }
+
+ /// Determines if the `#` flag was specified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if formatter.alternate() {
+ /// write!(formatter, "Foo({})", self.0)
+ /// } else {
+ /// write!(formatter, "{}", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:#}", Foo(23)), "Foo(23)");
+ /// assert_eq!(&format!("{}", Foo(23)), "23");
+ /// ```
+ #[must_use]
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn alternate(&self) -> bool {
+ self.flags & (1 << FlagV1::Alternate as u32) != 0
+ }
+
+ /// Determines if the `0` flag was specified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// assert!(formatter.sign_aware_zero_pad());
+ /// assert_eq!(formatter.width(), Some(4));
+ /// // We ignore the formatter's options.
+ /// write!(formatter, "{}", self.0)
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:04}", Foo(23)), "23");
+ /// ```
+ #[must_use]
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn sign_aware_zero_pad(&self) -> bool {
+ self.flags & (1 << FlagV1::SignAwareZeroPad as u32) != 0
+ }
+
+ // FIXME: Decide what public API we want for these two flags.
+ // https://github.com/rust-lang/rust/issues/48584
+ fn debug_lower_hex(&self) -> bool {
+ self.flags & (1 << FlagV1::DebugLowerHex as u32) != 0
+ }
+
+ fn debug_upper_hex(&self) -> bool {
+ self.flags & (1 << FlagV1::DebugUpperHex as u32) != 0
+ }
+
+ /// Creates a [`DebugStruct`] builder designed to assist with creation of
+ /// [`fmt::Debug`] implementations for structs.
+ ///
+ /// [`fmt::Debug`]: self::Debug
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ /// use std::net::Ipv4Addr;
+ ///
+ /// struct Foo {
+ /// bar: i32,
+ /// baz: String,
+ /// addr: Ipv4Addr,
+ /// }
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_struct("Foo")
+ /// .field("bar", &self.bar)
+ /// .field("baz", &self.baz)
+ /// .field("addr", &format_args!("{}", self.addr))
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// "Foo { bar: 10, baz: \"Hello World\", addr: 127.0.0.1 }",
+ /// format!("{:?}", Foo {
+ /// bar: 10,
+ /// baz: "Hello World".to_string(),
+ /// addr: Ipv4Addr::new(127, 0, 0, 1),
+ /// })
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_struct<'b>(&'b mut self, name: &str) -> DebugStruct<'b, 'a> {
+ builders::debug_struct_new(self, name)
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_struct_fields_finish` is more general, but this is faster for 1 field.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_struct_field1_finish<'b>(
+ &'b mut self,
+ name: &str,
+ name1: &str,
+ value1: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_struct_new(self, name);
+ builder.field(name1, value1);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_struct_fields_finish` is more general, but this is faster for 2 fields.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_struct_field2_finish<'b>(
+ &'b mut self,
+ name: &str,
+ name1: &str,
+ value1: &dyn Debug,
+ name2: &str,
+ value2: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_struct_new(self, name);
+ builder.field(name1, value1);
+ builder.field(name2, value2);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_struct_fields_finish` is more general, but this is faster for 3 fields.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_struct_field3_finish<'b>(
+ &'b mut self,
+ name: &str,
+ name1: &str,
+ value1: &dyn Debug,
+ name2: &str,
+ value2: &dyn Debug,
+ name3: &str,
+ value3: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_struct_new(self, name);
+ builder.field(name1, value1);
+ builder.field(name2, value2);
+ builder.field(name3, value3);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_struct_fields_finish` is more general, but this is faster for 4 fields.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_struct_field4_finish<'b>(
+ &'b mut self,
+ name: &str,
+ name1: &str,
+ value1: &dyn Debug,
+ name2: &str,
+ value2: &dyn Debug,
+ name3: &str,
+ value3: &dyn Debug,
+ name4: &str,
+ value4: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_struct_new(self, name);
+ builder.field(name1, value1);
+ builder.field(name2, value2);
+ builder.field(name3, value3);
+ builder.field(name4, value4);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_struct_fields_finish` is more general, but this is faster for 5 fields.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_struct_field5_finish<'b>(
+ &'b mut self,
+ name: &str,
+ name1: &str,
+ value1: &dyn Debug,
+ name2: &str,
+ value2: &dyn Debug,
+ name3: &str,
+ value3: &dyn Debug,
+ name4: &str,
+ value4: &dyn Debug,
+ name5: &str,
+ value5: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_struct_new(self, name);
+ builder.field(name1, value1);
+ builder.field(name2, value2);
+ builder.field(name3, value3);
+ builder.field(name4, value4);
+ builder.field(name5, value5);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// For the cases not covered by `debug_struct_field[12345]_finish`.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_struct_fields_finish<'b>(
+ &'b mut self,
+ name: &str,
+ names: &[&str],
+ values: &[&dyn Debug],
+ ) -> Result {
+ assert_eq!(names.len(), values.len());
+ let mut builder = builders::debug_struct_new(self, name);
+ for (name, value) in iter::zip(names, values) {
+ builder.field(name, value);
+ }
+ builder.finish()
+ }
+
+ /// Creates a `DebugTuple` builder designed to assist with creation of
+ /// `fmt::Debug` implementations for tuple structs.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ /// use std::marker::PhantomData;
+ ///
+ /// struct Foo<T>(i32, String, PhantomData<T>);
+ ///
+ /// impl<T> fmt::Debug for Foo<T> {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_tuple("Foo")
+ /// .field(&self.0)
+ /// .field(&self.1)
+ /// .field(&format_args!("_"))
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// "Foo(10, \"Hello\", _)",
+ /// format!("{:?}", Foo(10, "Hello".to_string(), PhantomData::<u8>))
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_tuple<'b>(&'b mut self, name: &str) -> DebugTuple<'b, 'a> {
+ builders::debug_tuple_new(self, name)
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_tuple_fields_finish` is more general, but this is faster for 1 field.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_tuple_field1_finish<'b>(&'b mut self, name: &str, value1: &dyn Debug) -> Result {
+ let mut builder = builders::debug_tuple_new(self, name);
+ builder.field(value1);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_tuple_fields_finish` is more general, but this is faster for 2 fields.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_tuple_field2_finish<'b>(
+ &'b mut self,
+ name: &str,
+ value1: &dyn Debug,
+ value2: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_tuple_new(self, name);
+ builder.field(value1);
+ builder.field(value2);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_tuple_fields_finish` is more general, but this is faster for 3 fields.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_tuple_field3_finish<'b>(
+ &'b mut self,
+ name: &str,
+ value1: &dyn Debug,
+ value2: &dyn Debug,
+ value3: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_tuple_new(self, name);
+ builder.field(value1);
+ builder.field(value2);
+ builder.field(value3);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_tuple_fields_finish` is more general, but this is faster for 4 fields.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_tuple_field4_finish<'b>(
+ &'b mut self,
+ name: &str,
+ value1: &dyn Debug,
+ value2: &dyn Debug,
+ value3: &dyn Debug,
+ value4: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_tuple_new(self, name);
+ builder.field(value1);
+ builder.field(value2);
+ builder.field(value3);
+ builder.field(value4);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// `debug_tuple_fields_finish` is more general, but this is faster for 5 fields.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_tuple_field5_finish<'b>(
+ &'b mut self,
+ name: &str,
+ value1: &dyn Debug,
+ value2: &dyn Debug,
+ value3: &dyn Debug,
+ value4: &dyn Debug,
+ value5: &dyn Debug,
+ ) -> Result {
+ let mut builder = builders::debug_tuple_new(self, name);
+ builder.field(value1);
+ builder.field(value2);
+ builder.field(value3);
+ builder.field(value4);
+ builder.field(value5);
+ builder.finish()
+ }
+
+ /// Used to shrink `derive(Debug)` code, for faster compilation and smaller binaries.
+ /// For the cases not covered by `debug_tuple_field[12345]_finish`.
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_helpers_for_derive", issue = "none")]
+ pub fn debug_tuple_fields_finish<'b>(
+ &'b mut self,
+ name: &str,
+ values: &[&dyn Debug],
+ ) -> Result {
+ let mut builder = builders::debug_tuple_new(self, name);
+ for value in values {
+ builder.field(value);
+ }
+ builder.finish()
+ }
+
+ /// Creates a `DebugList` builder designed to assist with creation of
+ /// `fmt::Debug` implementations for list-like structures.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_list().entries(self.0.iter()).finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(format!("{:?}", Foo(vec![10, 11])), "[10, 11]");
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_list<'b>(&'b mut self) -> DebugList<'b, 'a> {
+ builders::debug_list_new(self)
+ }
+
+ /// Creates a `DebugSet` builder designed to assist with creation of
+ /// `fmt::Debug` implementations for set-like structures.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_set().entries(self.0.iter()).finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(format!("{:?}", Foo(vec![10, 11])), "{10, 11}");
+ /// ```
+ ///
+ /// [`format_args!`]: crate::format_args
+ ///
+ /// In this more complex example, we use [`format_args!`] and `.debug_set()`
+ /// to build a list of match arms:
+ ///
+ /// ```rust
+ /// use std::fmt;
+ ///
+ /// struct Arm<'a, L: 'a, R: 'a>(&'a (L, R));
+ /// struct Table<'a, K: 'a, V: 'a>(&'a [(K, V)], V);
+ ///
+ /// impl<'a, L, R> fmt::Debug for Arm<'a, L, R>
+ /// where
+ /// L: 'a + fmt::Debug, R: 'a + fmt::Debug
+ /// {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// L::fmt(&(self.0).0, fmt)?;
+ /// fmt.write_str(" => ")?;
+ /// R::fmt(&(self.0).1, fmt)
+ /// }
+ /// }
+ ///
+ /// impl<'a, K, V> fmt::Debug for Table<'a, K, V>
+ /// where
+ /// K: 'a + fmt::Debug, V: 'a + fmt::Debug
+ /// {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_set()
+ /// .entries(self.0.iter().map(Arm))
+ /// .entry(&Arm(&(format_args!("_"), &self.1)))
+ /// .finish()
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_set<'b>(&'b mut self) -> DebugSet<'b, 'a> {
+ builders::debug_set_new(self)
+ }
+
+ /// Creates a `DebugMap` builder designed to assist with creation of
+ /// `fmt::Debug` implementations for map-like structures.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_map().entries(self.0.iter().map(|&(ref k, ref v)| (k, v))).finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// r#"{"A": 10, "B": 11}"#
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_map<'b>(&'b mut self) -> DebugMap<'b, 'a> {
+ builders::debug_map_new(self)
+ }
+}
+
+#[stable(since = "1.2.0", feature = "formatter_write")]
+impl Write for Formatter<'_> {
+ fn write_str(&mut self, s: &str) -> Result {
+ self.buf.write_str(s)
+ }
+
+ fn write_char(&mut self, c: char) -> Result {
+ self.buf.write_char(c)
+ }
+
+ fn write_fmt(&mut self, args: Arguments<'_>) -> Result {
+ write(self.buf, args)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for Error {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Display::fmt("an error occurred when formatting an argument", f)
+ }
+}
+
+// Implementations of the core formatting traits
+
+macro_rules! fmt_refs {
+ ($($tr:ident),*) => {
+ $(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized + $tr> $tr for &T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized + $tr> $tr for &mut T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
+ }
+ )*
+ }
+}
+
+fmt_refs! { Debug, Display, Octal, Binary, LowerHex, UpperHex, LowerExp, UpperExp }
+
+#[unstable(feature = "never_type", issue = "35121")]
+impl Debug for ! {
+ fn fmt(&self, _: &mut Formatter<'_>) -> Result {
+ *self
+ }
+}
+
+#[unstable(feature = "never_type", issue = "35121")]
+impl Display for ! {
+ fn fmt(&self, _: &mut Formatter<'_>) -> Result {
+ *self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for bool {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Display::fmt(self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for bool {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Display::fmt(if *self { "true" } else { "false" }, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for str {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.write_char('"')?;
+ let mut from = 0;
+ for (i, c) in self.char_indices() {
+ let esc = c.escape_debug_ext(EscapeDebugExtArgs {
+ escape_grapheme_extended: true,
+ escape_single_quote: false,
+ escape_double_quote: true,
+ });
+ // If char needs escaping, flush backlog so far and write, else skip
+ if esc.len() != 1 {
+ f.write_str(&self[from..i])?;
+ for c in esc {
+ f.write_char(c)?;
+ }
+ from = i + c.len_utf8();
+ }
+ }
+ f.write_str(&self[from..])?;
+ f.write_char('"')
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for str {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.pad(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for char {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.write_char('\'')?;
+ for c in self.escape_debug_ext(EscapeDebugExtArgs {
+ escape_grapheme_extended: true,
+ escape_single_quote: true,
+ escape_double_quote: false,
+ }) {
+ f.write_char(c)?
+ }
+ f.write_char('\'')
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for char {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ if f.width.is_none() && f.precision.is_none() {
+ f.write_char(*self)
+ } else {
+ f.pad(self.encode_utf8(&mut [0; 4]))
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Pointer for *const T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ // Cast is needed here because `.addr()` requires `T: Sized`.
+ pointer_fmt_inner((*self as *const ()).addr(), f)
+ }
+}
+
+/// Since the formatting will be identical for all pointer types, use a non-monomorphized
+/// implementation for the actual formatting to reduce the amount of codegen work needed.
+///
+/// This uses `ptr_addr: usize` and not `ptr: *const ()` to be able to use this for
+/// `fn(...) -> ...` without using [problematic] "Oxford Casts".
+///
+/// [problematic]: https://github.com/rust-lang/rust/issues/95489
+pub(crate) fn pointer_fmt_inner(ptr_addr: usize, f: &mut Formatter<'_>) -> Result {
+ let old_width = f.width;
+ let old_flags = f.flags;
+
+ // The alternate flag is already treated by LowerHex as being special-
+ // it denotes whether to prefix with 0x. We use it to work out whether
+ // or not to zero extend, and then unconditionally set it to get the
+ // prefix.
+ if f.alternate() {
+ f.flags |= 1 << (FlagV1::SignAwareZeroPad as u32);
+
+ if f.width.is_none() {
+ f.width = Some((usize::BITS / 4) as usize + 2);
+ }
+ }
+ f.flags |= 1 << (FlagV1::Alternate as u32);
+
+ let ret = LowerHex::fmt(&ptr_addr, f);
+
+ f.width = old_width;
+ f.flags = old_flags;
+
+ ret
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Pointer for *mut T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(&(*self as *const T), f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Pointer for &T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(&(*self as *const T), f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Pointer for &mut T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(&(&**self as *const T), f)
+ }
+}
+
+// Implementation of Display/Debug for various core types
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Debug for *const T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(self, f)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Debug for *mut T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(self, f)
+ }
+}
+
+macro_rules! peel {
+ ($name:ident, $($other:ident,)*) => (tuple! { $($other,)* })
+}
+
+macro_rules! tuple {
+ () => ();
+ ( $($name:ident,)+ ) => (
+ maybe_tuple_doc! {
+ $($name)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($name:Debug),+> Debug for ($($name,)+) where last_type!($($name,)+): ?Sized {
+ #[allow(non_snake_case, unused_assignments)]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ let mut builder = f.debug_tuple("");
+ let ($(ref $name,)+) = *self;
+ $(
+ builder.field(&$name);
+ )+
+
+ builder.finish()
+ }
+ }
+ }
+ peel! { $($name,)+ }
+ )
+}
+
+macro_rules! maybe_tuple_doc {
+ ($a:ident @ #[$meta:meta] $item:item) => {
+ #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc = "This trait is implemented for tuples up to twelve items long."]
+ #[$meta]
+ $item
+ };
+ ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => {
+ #[doc(hidden)]
+ #[$meta]
+ $item
+ };
+}
+
+macro_rules! last_type {
+ ($a:ident,) => { $a };
+ ($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
+}
+
+tuple! { E, D, C, B, A, Z, Y, X, W, V, U, T, }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Debug> Debug for [T] {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for () {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.pad("()")
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Debug for PhantomData<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.debug_struct("PhantomData").finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Copy + Debug> Debug for Cell<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.debug_struct("Cell").field("value", &self.get()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Debug> Debug for RefCell<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ match self.try_borrow() {
+ Ok(borrow) => f.debug_struct("RefCell").field("value", &borrow).finish(),
+ Err(_) => {
+ // The RefCell is mutably borrowed so we can't look at its value
+ // here. Show a placeholder instead.
+ struct BorrowedPlaceholder;
+
+ impl Debug for BorrowedPlaceholder {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.write_str("<borrowed>")
+ }
+ }
+
+ f.debug_struct("RefCell").field("value", &BorrowedPlaceholder).finish()
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Debug> Debug for Ref<'_, T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Debug> Debug for RefMut<'_, T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Debug::fmt(&*(self.deref()), f)
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: ?Sized> Debug for UnsafeCell<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.debug_struct("UnsafeCell").finish_non_exhaustive()
+ }
+}
+
+#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
+impl<T: ?Sized> Debug for SyncUnsafeCell<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.debug_struct("SyncUnsafeCell").finish_non_exhaustive()
+ }
+}
+
+// If you expected tests to be here, look instead at the core/tests/fmt.rs file,
+// it's a lot easier than creating all of the rt::Piece structures here.
+// There are also tests in the alloc crate, for those that need allocations.
diff --git a/library/core/src/fmt/nofloat.rs b/library/core/src/fmt/nofloat.rs
new file mode 100644
index 000000000..cfb94cd9d
--- /dev/null
+++ b/library/core/src/fmt/nofloat.rs
@@ -0,0 +1,15 @@
+use crate::fmt::{Debug, Formatter, Result};
+
+macro_rules! floating {
+ ($ty:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Debug for $ty {
+ fn fmt(&self, _fmt: &mut Formatter<'_>) -> Result {
+ panic!("floating point support is turned off");
+ }
+ }
+ };
+}
+
+floating! { f32 }
+floating! { f64 }
diff --git a/library/core/src/fmt/num.rs b/library/core/src/fmt/num.rs
new file mode 100644
index 000000000..25789d37c
--- /dev/null
+++ b/library/core/src/fmt/num.rs
@@ -0,0 +1,683 @@
+//! Integer and floating-point number formatting
+
+use crate::fmt;
+use crate::mem::MaybeUninit;
+use crate::num::fmt as numfmt;
+use crate::ops::{Div, Rem, Sub};
+use crate::ptr;
+use crate::slice;
+use crate::str;
+
+#[doc(hidden)]
+trait DisplayInt:
+ PartialEq + PartialOrd + Div<Output = Self> + Rem<Output = Self> + Sub<Output = Self> + Copy
+{
+ fn zero() -> Self;
+ fn from_u8(u: u8) -> Self;
+ fn to_u8(&self) -> u8;
+ fn to_u16(&self) -> u16;
+ fn to_u32(&self) -> u32;
+ fn to_u64(&self) -> u64;
+ fn to_u128(&self) -> u128;
+}
+
+macro_rules! impl_int {
+ ($($t:ident)*) => (
+ $(impl DisplayInt for $t {
+ fn zero() -> Self { 0 }
+ fn from_u8(u: u8) -> Self { u as Self }
+ fn to_u8(&self) -> u8 { *self as u8 }
+ fn to_u16(&self) -> u16 { *self as u16 }
+ fn to_u32(&self) -> u32 { *self as u32 }
+ fn to_u64(&self) -> u64 { *self as u64 }
+ fn to_u128(&self) -> u128 { *self as u128 }
+ })*
+ )
+}
+macro_rules! impl_uint {
+ ($($t:ident)*) => (
+ $(impl DisplayInt for $t {
+ fn zero() -> Self { 0 }
+ fn from_u8(u: u8) -> Self { u as Self }
+ fn to_u8(&self) -> u8 { *self as u8 }
+ fn to_u16(&self) -> u16 { *self as u16 }
+ fn to_u32(&self) -> u32 { *self as u32 }
+ fn to_u64(&self) -> u64 { *self as u64 }
+ fn to_u128(&self) -> u128 { *self as u128 }
+ })*
+ )
+}
+
+impl_int! { i8 i16 i32 i64 i128 isize }
+impl_uint! { u8 u16 u32 u64 u128 usize }
+
+/// A type that represents a specific radix
+#[doc(hidden)]
+trait GenericRadix: Sized {
+ /// The number of digits.
+ const BASE: u8;
+
+ /// A radix-specific prefix string.
+ const PREFIX: &'static str;
+
+ /// Converts an integer to corresponding radix digit.
+ fn digit(x: u8) -> u8;
+
+ /// Format an integer using the radix using a formatter.
+ fn fmt_int<T: DisplayInt>(&self, mut x: T, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // The radix can be as low as 2, so we need a buffer of at least 128
+ // characters for a base 2 number.
+ let zero = T::zero();
+ let is_nonnegative = x >= zero;
+ let mut buf = [MaybeUninit::<u8>::uninit(); 128];
+ let mut curr = buf.len();
+ let base = T::from_u8(Self::BASE);
+ if is_nonnegative {
+ // Accumulate each digit of the number from the least significant
+ // to the most significant figure.
+ for byte in buf.iter_mut().rev() {
+ let n = x % base; // Get the current place value.
+ x = x / base; // Deaccumulate the number.
+ byte.write(Self::digit(n.to_u8())); // Store the digit in the buffer.
+ curr -= 1;
+ if x == zero {
+ // No more digits left to accumulate.
+ break;
+ };
+ }
+ } else {
+ // Do the same as above, but accounting for two's complement.
+ for byte in buf.iter_mut().rev() {
+ let n = zero - (x % base); // Get the current place value.
+ x = x / base; // Deaccumulate the number.
+ byte.write(Self::digit(n.to_u8())); // Store the digit in the buffer.
+ curr -= 1;
+ if x == zero {
+ // No more digits left to accumulate.
+ break;
+ };
+ }
+ }
+ let buf = &buf[curr..];
+ // SAFETY: The only chars in `buf` are created by `Self::digit` which are assumed to be
+ // valid UTF-8
+ let buf = unsafe {
+ str::from_utf8_unchecked(slice::from_raw_parts(
+ MaybeUninit::slice_as_ptr(buf),
+ buf.len(),
+ ))
+ };
+ f.pad_integral(is_nonnegative, Self::PREFIX, buf)
+ }
+}
+
+/// A binary (base 2) radix
+#[derive(Clone, PartialEq)]
+struct Binary;
+
+/// An octal (base 8) radix
+#[derive(Clone, PartialEq)]
+struct Octal;
+
+/// A hexadecimal (base 16) radix, formatted with lower-case characters
+#[derive(Clone, PartialEq)]
+struct LowerHex;
+
+/// A hexadecimal (base 16) radix, formatted with upper-case characters
+#[derive(Clone, PartialEq)]
+struct UpperHex;
+
+macro_rules! radix {
+ ($T:ident, $base:expr, $prefix:expr, $($x:pat => $conv:expr),+) => {
+ impl GenericRadix for $T {
+ const BASE: u8 = $base;
+ const PREFIX: &'static str = $prefix;
+ fn digit(x: u8) -> u8 {
+ match x {
+ $($x => $conv,)+
+ x => panic!("number not in the range 0..={}: {}", Self::BASE - 1, x),
+ }
+ }
+ }
+ }
+}
+
+radix! { Binary, 2, "0b", x @ 0 ..= 1 => b'0' + x }
+radix! { Octal, 8, "0o", x @ 0 ..= 7 => b'0' + x }
+radix! { LowerHex, 16, "0x", x @ 0 ..= 9 => b'0' + x, x @ 10 ..= 15 => b'a' + (x - 10) }
+radix! { UpperHex, 16, "0x", x @ 0 ..= 9 => b'0' + x, x @ 10 ..= 15 => b'A' + (x - 10) }
+
+macro_rules! int_base {
+ (fmt::$Trait:ident for $T:ident as $U:ident -> $Radix:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl fmt::$Trait for $T {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ $Radix.fmt_int(*self as $U, f)
+ }
+ }
+ };
+}
+
+macro_rules! integer {
+ ($Int:ident, $Uint:ident) => {
+ int_base! { fmt::Binary for $Int as $Uint -> Binary }
+ int_base! { fmt::Octal for $Int as $Uint -> Octal }
+ int_base! { fmt::LowerHex for $Int as $Uint -> LowerHex }
+ int_base! { fmt::UpperHex for $Int as $Uint -> UpperHex }
+
+ int_base! { fmt::Binary for $Uint as $Uint -> Binary }
+ int_base! { fmt::Octal for $Uint as $Uint -> Octal }
+ int_base! { fmt::LowerHex for $Uint as $Uint -> LowerHex }
+ int_base! { fmt::UpperHex for $Uint as $Uint -> UpperHex }
+ };
+}
+integer! { isize, usize }
+integer! { i8, u8 }
+integer! { i16, u16 }
+integer! { i32, u32 }
+integer! { i64, u64 }
+integer! { i128, u128 }
+macro_rules! debug {
+ ($($T:ident)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl fmt::Debug for $T {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if f.debug_lower_hex() {
+ fmt::LowerHex::fmt(self, f)
+ } else if f.debug_upper_hex() {
+ fmt::UpperHex::fmt(self, f)
+ } else {
+ fmt::Display::fmt(self, f)
+ }
+ }
+ }
+ )*};
+}
+debug! {
+ i8 i16 i32 i64 i128 isize
+ u8 u16 u32 u64 u128 usize
+}
+
+// 2 digit decimal look up table
+static DEC_DIGITS_LUT: &[u8; 200] = b"0001020304050607080910111213141516171819\
+ 2021222324252627282930313233343536373839\
+ 4041424344454647484950515253545556575859\
+ 6061626364656667686970717273747576777879\
+ 8081828384858687888990919293949596979899";
+
+macro_rules! impl_Display {
+ ($($t:ident),* as $u:ident via $conv_fn:ident named $name:ident) => {
+ fn $name(mut n: $u, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // 2^128 is about 3*10^38, so 39 gives an extra byte of space
+ let mut buf = [MaybeUninit::<u8>::uninit(); 39];
+ let mut curr = buf.len() as isize;
+ let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
+ let lut_ptr = DEC_DIGITS_LUT.as_ptr();
+
+ // SAFETY: Since `d1` and `d2` are always less than or equal to `198`, we
+ // can copy from `lut_ptr[d1..d1 + 1]` and `lut_ptr[d2..d2 + 1]`. To show
+ // that it's OK to copy into `buf_ptr`, notice that at the beginning
+ // `curr == buf.len() == 39 > log(n)` since `n < 2^128 < 10^39`, and at
+ // each step this is kept the same as `n` is divided. Since `n` is always
+ // non-negative, this means that `curr > 0` so `buf_ptr[curr..curr + 1]`
+ // is safe to access.
+ unsafe {
+ // need at least 16 bits for the 4-characters-at-a-time to work.
+ assert!(crate::mem::size_of::<$u>() >= 2);
+
+ // eagerly decode 4 characters at a time
+ while n >= 10000 {
+ let rem = (n % 10000) as isize;
+ n /= 10000;
+
+ let d1 = (rem / 100) << 1;
+ let d2 = (rem % 100) << 1;
+ curr -= 4;
+
+ // We are allowed to copy to `buf_ptr[curr..curr + 3]` here since
+ // otherwise `curr < 0`. But then `n` was originally at least `10000^10`
+ // which is `10^40 > 2^128 > n`.
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
+ }
+
+ // if we reach here numbers are <= 9999, so at most 4 chars long
+ let mut n = n as isize; // possibly reduce 64bit math
+
+ // decode 2 more chars, if > 2 chars
+ if n >= 100 {
+ let d1 = (n % 100) << 1;
+ n /= 100;
+ curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ }
+
+ // decode last 1 or 2 chars
+ if n < 10 {
+ curr -= 1;
+ *buf_ptr.offset(curr) = (n as u8) + b'0';
+ } else {
+ let d1 = n << 1;
+ curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ }
+ }
+
+ // SAFETY: `curr` > 0 (since we made `buf` large enough), and all the chars are valid
+ // UTF-8 since `DEC_DIGITS_LUT` is
+ let buf_slice = unsafe {
+ str::from_utf8_unchecked(
+ slice::from_raw_parts(buf_ptr.offset(curr), buf.len() - curr as usize))
+ };
+ f.pad_integral(is_nonnegative, "", buf_slice)
+ }
+
+ $(#[stable(feature = "rust1", since = "1.0.0")]
+ impl fmt::Display for $t {
+ #[allow(unused_comparisons)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let is_nonnegative = *self >= 0;
+ let n = if is_nonnegative {
+ self.$conv_fn()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.$conv_fn()).wrapping_add(1)
+ };
+ $name(n, is_nonnegative, f)
+ }
+ })*
+ };
+}
+
+macro_rules! impl_Exp {
+ ($($t:ident),* as $u:ident via $conv_fn:ident named $name:ident) => {
+ fn $name(
+ mut n: $u,
+ is_nonnegative: bool,
+ upper: bool,
+ f: &mut fmt::Formatter<'_>
+ ) -> fmt::Result {
+ let (mut n, mut exponent, trailing_zeros, added_precision) = {
+ let mut exponent = 0;
+ // count and remove trailing decimal zeroes
+ while n % 10 == 0 && n >= 10 {
+ n /= 10;
+ exponent += 1;
+ }
+
+ let (added_precision, subtracted_precision) = match f.precision() {
+ Some(fmt_prec) => {
+ // number of decimal digits minus 1
+ let mut tmp = n;
+ let mut prec = 0;
+ while tmp >= 10 {
+ tmp /= 10;
+ prec += 1;
+ }
+ (fmt_prec.saturating_sub(prec), prec.saturating_sub(fmt_prec))
+ }
+ None => (0, 0)
+ };
+ for _ in 1..subtracted_precision {
+ n /= 10;
+ exponent += 1;
+ }
+ if subtracted_precision != 0 {
+ let rem = n % 10;
+ n /= 10;
+ exponent += 1;
+ // round up last digit
+ if rem >= 5 {
+ n += 1;
+ }
+ }
+ (n, exponent, exponent, added_precision)
+ };
+
+ // 39 digits (worst case u128) + . = 40
+ // Since `curr` always decreases by the number of digits copied, this means
+ // that `curr >= 0`.
+ let mut buf = [MaybeUninit::<u8>::uninit(); 40];
+ let mut curr = buf.len() as isize; //index for buf
+ let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
+ let lut_ptr = DEC_DIGITS_LUT.as_ptr();
+
+ // decode 2 chars at a time
+ while n >= 100 {
+ let d1 = ((n % 100) as isize) << 1;
+ curr -= 2;
+ // SAFETY: `d1 <= 198`, so we can copy from `lut_ptr[d1..d1 + 2]` since
+ // `DEC_DIGITS_LUT` has a length of 200.
+ unsafe {
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ }
+ n /= 100;
+ exponent += 2;
+ }
+ // n is <= 99, so at most 2 chars long
+ let mut n = n as isize; // possibly reduce 64bit math
+ // decode second-to-last character
+ if n >= 10 {
+ curr -= 1;
+ // SAFETY: Safe since `40 > curr >= 0` (see comment)
+ unsafe {
+ *buf_ptr.offset(curr) = (n as u8 % 10_u8) + b'0';
+ }
+ n /= 10;
+ exponent += 1;
+ }
+ // add decimal point iff >1 mantissa digit will be printed
+ if exponent != trailing_zeros || added_precision != 0 {
+ curr -= 1;
+ // SAFETY: Safe since `40 > curr >= 0`
+ unsafe {
+ *buf_ptr.offset(curr) = b'.';
+ }
+ }
+
+ // SAFETY: Safe since `40 > curr >= 0`
+ let buf_slice = unsafe {
+ // decode last character
+ curr -= 1;
+ *buf_ptr.offset(curr) = (n as u8) + b'0';
+
+ let len = buf.len() - curr as usize;
+ slice::from_raw_parts(buf_ptr.offset(curr), len)
+ };
+
+ // stores 'e' (or 'E') and the up to 2-digit exponent
+ let mut exp_buf = [MaybeUninit::<u8>::uninit(); 3];
+ let exp_ptr = MaybeUninit::slice_as_mut_ptr(&mut exp_buf);
+ // SAFETY: In either case, `exp_buf` is written within bounds and `exp_ptr[..len]`
+ // is contained within `exp_buf` since `len <= 3`.
+ let exp_slice = unsafe {
+ *exp_ptr.offset(0) = if upper { b'E' } else { b'e' };
+ let len = if exponent < 10 {
+ *exp_ptr.offset(1) = (exponent as u8) + b'0';
+ 2
+ } else {
+ let off = exponent << 1;
+ ptr::copy_nonoverlapping(lut_ptr.offset(off), exp_ptr.offset(1), 2);
+ 3
+ };
+ slice::from_raw_parts(exp_ptr, len)
+ };
+
+ let parts = &[
+ numfmt::Part::Copy(buf_slice),
+ numfmt::Part::Zero(added_precision),
+ numfmt::Part::Copy(exp_slice)
+ ];
+ let sign = if !is_nonnegative {
+ "-"
+ } else if f.sign_plus() {
+ "+"
+ } else {
+ ""
+ };
+ let formatted = numfmt::Formatted{sign, parts};
+ f.pad_formatted_parts(&formatted)
+ }
+
+ $(
+ #[stable(feature = "integer_exp_format", since = "1.42.0")]
+ impl fmt::LowerExp for $t {
+ #[allow(unused_comparisons)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let is_nonnegative = *self >= 0;
+ let n = if is_nonnegative {
+ self.$conv_fn()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.$conv_fn()).wrapping_add(1)
+ };
+ $name(n, is_nonnegative, false, f)
+ }
+ })*
+ $(
+ #[stable(feature = "integer_exp_format", since = "1.42.0")]
+ impl fmt::UpperExp for $t {
+ #[allow(unused_comparisons)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let is_nonnegative = *self >= 0;
+ let n = if is_nonnegative {
+ self.$conv_fn()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.$conv_fn()).wrapping_add(1)
+ };
+ $name(n, is_nonnegative, true, f)
+ }
+ })*
+ };
+}
+
+// Include wasm32 in here since it doesn't reflect the native pointer size, and
+// often cares strongly about getting a smaller code size.
+#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))]
+mod imp {
+ use super::*;
+ impl_Display!(
+ i8, u8, i16, u16, i32, u32, i64, u64, usize, isize
+ as u64 via to_u64 named fmt_u64
+ );
+ impl_Exp!(
+ i8, u8, i16, u16, i32, u32, i64, u64, usize, isize
+ as u64 via to_u64 named exp_u64
+ );
+}
+
+#[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))]
+mod imp {
+ use super::*;
+ impl_Display!(i8, u8, i16, u16, i32, u32, isize, usize as u32 via to_u32 named fmt_u32);
+ impl_Display!(i64, u64 as u64 via to_u64 named fmt_u64);
+ impl_Exp!(i8, u8, i16, u16, i32, u32, isize, usize as u32 via to_u32 named exp_u32);
+ impl_Exp!(i64, u64 as u64 via to_u64 named exp_u64);
+}
+impl_Exp!(i128, u128 as u128 via to_u128 named exp_u128);
+
+/// Helper function for writing a u64 into `buf` going from last to first, with `curr`.
+fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], curr: &mut isize) {
+ let buf_ptr = MaybeUninit::slice_as_mut_ptr(buf);
+ let lut_ptr = DEC_DIGITS_LUT.as_ptr();
+ assert!(*curr > 19);
+
+ // SAFETY:
+ // Writes at most 19 characters into the buffer. Guaranteed that any ptr into LUT is at most
+ // 198, so will never OOB. There is a check above that there are at least 19 characters
+ // remaining.
+ unsafe {
+ if n >= 1e16 as u64 {
+ let to_parse = n % 1e16 as u64;
+ n /= 1e16 as u64;
+
+ // Some of these are nops but it looks more elegant this way.
+ let d1 = ((to_parse / 1e14 as u64) % 100) << 1;
+ let d2 = ((to_parse / 1e12 as u64) % 100) << 1;
+ let d3 = ((to_parse / 1e10 as u64) % 100) << 1;
+ let d4 = ((to_parse / 1e8 as u64) % 100) << 1;
+ let d5 = ((to_parse / 1e6 as u64) % 100) << 1;
+ let d6 = ((to_parse / 1e4 as u64) % 100) << 1;
+ let d7 = ((to_parse / 1e2 as u64) % 100) << 1;
+ let d8 = ((to_parse / 1e0 as u64) % 100) << 1;
+
+ *curr -= 16;
+
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d5 as isize), buf_ptr.offset(*curr + 8), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d6 as isize), buf_ptr.offset(*curr + 10), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d7 as isize), buf_ptr.offset(*curr + 12), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d8 as isize), buf_ptr.offset(*curr + 14), 2);
+ }
+ if n >= 1e8 as u64 {
+ let to_parse = n % 1e8 as u64;
+ n /= 1e8 as u64;
+
+ // Some of these are nops but it looks more elegant this way.
+ let d1 = ((to_parse / 1e6 as u64) % 100) << 1;
+ let d2 = ((to_parse / 1e4 as u64) % 100) << 1;
+ let d3 = ((to_parse / 1e2 as u64) % 100) << 1;
+ let d4 = ((to_parse / 1e0 as u64) % 100) << 1;
+ *curr -= 8;
+
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2);
+ }
+ // `n` < 1e8 < (1 << 32)
+ let mut n = n as u32;
+ if n >= 1e4 as u32 {
+ let to_parse = n % 1e4 as u32;
+ n /= 1e4 as u32;
+
+ let d1 = (to_parse / 100) << 1;
+ let d2 = (to_parse % 100) << 1;
+ *curr -= 4;
+
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
+ }
+
+ // `n` < 1e4 < (1 << 16)
+ let mut n = n as u16;
+ if n >= 100 {
+ let d1 = (n % 100) << 1;
+ n /= 100;
+ *curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2);
+ }
+
+ // decode last 1 or 2 chars
+ if n < 10 {
+ *curr -= 1;
+ *buf_ptr.offset(*curr) = (n as u8) + b'0';
+ } else {
+ let d1 = n << 1;
+ *curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2);
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for u128 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt_u128(*self, true, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for i128 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let is_nonnegative = *self >= 0;
+ let n = if is_nonnegative {
+ self.to_u128()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.to_u128()).wrapping_add(1)
+ };
+ fmt_u128(n, is_nonnegative, f)
+ }
+}
+
+/// Specialized optimization for u128. Instead of taking two items at a time, it splits
+/// into at most 2 u64s, and then chunks by 10e16, 10e8, 10e4, 10e2, and then 10e1.
+/// It also has to handle 1 last item, as 10^40 > 2^128 > 10^39, whereas
+/// 10^20 > 2^64 > 10^19.
+fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // 2^128 is about 3*10^38, so 39 gives an extra byte of space
+ let mut buf = [MaybeUninit::<u8>::uninit(); 39];
+ let mut curr = buf.len() as isize;
+
+ let (n, rem) = udiv_1e19(n);
+ parse_u64_into(rem, &mut buf, &mut curr);
+
+ if n != 0 {
+ // 0 pad up to point
+ let target = (buf.len() - 19) as isize;
+ // SAFETY: Guaranteed that we wrote at most 19 bytes, and there must be space
+ // remaining since it has length 39
+ unsafe {
+ ptr::write_bytes(
+ MaybeUninit::slice_as_mut_ptr(&mut buf).offset(target),
+ b'0',
+ (curr - target) as usize,
+ );
+ }
+ curr = target;
+
+ let (n, rem) = udiv_1e19(n);
+ parse_u64_into(rem, &mut buf, &mut curr);
+ // Should this following branch be annotated with unlikely?
+ if n != 0 {
+ let target = (buf.len() - 38) as isize;
+ // The raw `buf_ptr` pointer is only valid until `buf` is used the next time,
+ // buf `buf` is not used in this scope so we are good.
+ let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
+ // SAFETY: At this point we wrote at most 38 bytes, pad up to that point,
+ // There can only be at most 1 digit remaining.
+ unsafe {
+ ptr::write_bytes(buf_ptr.offset(target), b'0', (curr - target) as usize);
+ curr = target - 1;
+ *buf_ptr.offset(curr) = (n as u8) + b'0';
+ }
+ }
+ }
+
+ // SAFETY: `curr` > 0 (since we made `buf` large enough), and all the chars are valid
+ // UTF-8 since `DEC_DIGITS_LUT` is
+ let buf_slice = unsafe {
+ str::from_utf8_unchecked(slice::from_raw_parts(
+ MaybeUninit::slice_as_mut_ptr(&mut buf).offset(curr),
+ buf.len() - curr as usize,
+ ))
+ };
+ f.pad_integral(is_nonnegative, "", buf_slice)
+}
+
+/// Partition of `n` into n > 1e19 and rem <= 1e19
+///
+/// Integer division algorithm is based on the following paper:
+///
+/// T. Granlund and P. Montgomery, “Division by Invariant Integers Using Multiplication”
+/// in Proc. of the SIGPLAN94 Conference on Programming Language Design and
+/// Implementation, 1994, pp. 61–72
+///
+fn udiv_1e19(n: u128) -> (u128, u64) {
+ const DIV: u64 = 1e19 as u64;
+ const FACTOR: u128 = 156927543384667019095894735580191660403;
+
+ let quot = if n < 1 << 83 {
+ ((n >> 19) as u64 / (DIV >> 19)) as u128
+ } else {
+ u128_mulhi(n, FACTOR) >> 62
+ };
+
+ let rem = (n - quot * DIV as u128) as u64;
+ (quot, rem)
+}
+
+/// Multiply unsigned 128 bit integers, return upper 128 bits of the result
+#[inline]
+fn u128_mulhi(x: u128, y: u128) -> u128 {
+ let x_lo = x as u64;
+ let x_hi = (x >> 64) as u64;
+ let y_lo = y as u64;
+ let y_hi = (y >> 64) as u64;
+
+ // handle possibility of overflow
+ let carry = (x_lo as u128 * y_lo as u128) >> 64;
+ let m = x_lo as u128 * y_hi as u128 + carry;
+ let high1 = m >> 64;
+
+ let m_lo = m as u64;
+ let high2 = (x_hi as u128 * y_lo as u128 + m_lo as u128) >> 64;
+
+ x_hi as u128 * y_hi as u128 + high1 + high2
+}
diff --git a/library/core/src/fmt/rt/v1.rs b/library/core/src/fmt/rt/v1.rs
new file mode 100644
index 000000000..37202b277
--- /dev/null
+++ b/library/core/src/fmt/rt/v1.rs
@@ -0,0 +1,45 @@
+//! This is an internal module used by the ifmt! runtime. These structures are
+//! emitted to static arrays to precompile format strings ahead of time.
+//!
+//! These definitions are similar to their `ct` equivalents, but differ in that
+//! these can be statically allocated and are slightly optimized for the runtime
+#![allow(missing_debug_implementations)]
+
+#[derive(Copy, Clone)]
+pub struct Argument {
+ pub position: usize,
+ pub format: FormatSpec,
+}
+
+#[derive(Copy, Clone)]
+pub struct FormatSpec {
+ pub fill: char,
+ pub align: Alignment,
+ pub flags: u32,
+ pub precision: Count,
+ pub width: Count,
+}
+
+/// Possible alignments that can be requested as part of a formatting directive.
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum Alignment {
+ /// Indication that contents should be left-aligned.
+ Left,
+ /// Indication that contents should be right-aligned.
+ Right,
+ /// Indication that contents should be center-aligned.
+ Center,
+ /// No alignment was requested.
+ Unknown,
+}
+
+/// Used by [width](https://doc.rust-lang.org/std/fmt/#width) and [precision](https://doc.rust-lang.org/std/fmt/#precision) specifiers.
+#[derive(Copy, Clone)]
+pub enum Count {
+ /// Specified with a literal number, stores the value
+ Is(usize),
+ /// Specified using `$` and `*` syntaxes, stores the index into `args`
+ Param(usize),
+ /// Not specified
+ Implied,
+}
diff --git a/library/core/src/future/future.rs b/library/core/src/future/future.rs
new file mode 100644
index 000000000..f29d3e1e9
--- /dev/null
+++ b/library/core/src/future/future.rs
@@ -0,0 +1,126 @@
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+use crate::marker::Unpin;
+use crate::ops;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// A future represents an asynchronous computation obtained by use of [`async`].
+///
+/// A future is a value that might not have finished computing yet. This kind of
+/// "asynchronous value" makes it possible for a thread to continue doing useful
+/// work while it waits for the value to become available.
+///
+/// # The `poll` method
+///
+/// The core method of future, `poll`, *attempts* to resolve the future into a
+/// final value. This method does not block if the value is not ready. Instead,
+/// the current task is scheduled to be woken up when it's possible to make
+/// further progress by `poll`ing again. The `context` passed to the `poll`
+/// method can provide a [`Waker`], which is a handle for waking up the current
+/// task.
+///
+/// When using a future, you generally won't call `poll` directly, but instead
+/// `.await` the value.
+///
+/// [`async`]: ../../std/keyword.async.html
+/// [`Waker`]: crate::task::Waker
+#[doc(notable_trait)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[stable(feature = "futures_api", since = "1.36.0")]
+#[lang = "future_trait"]
+#[rustc_on_unimplemented(
+ label = "`{Self}` is not a future",
+ message = "`{Self}` is not a future",
+ note = "{Self} must be a future or must implement `IntoFuture` to be awaited"
+)]
+pub trait Future {
+ /// The type of value produced on completion.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ type Output;
+
+ /// Attempt to resolve the future to a final value, registering
+ /// the current task for wakeup if the value is not yet available.
+ ///
+ /// # Return value
+ ///
+ /// This function returns:
+ ///
+ /// - [`Poll::Pending`] if the future is not ready yet
+ /// - [`Poll::Ready(val)`] with the result `val` of this future if it
+ /// finished successfully.
+ ///
+ /// Once a future has finished, clients should not `poll` it again.
+ ///
+ /// When a future is not ready yet, `poll` returns `Poll::Pending` and
+ /// stores a clone of the [`Waker`] copied from the current [`Context`].
+ /// This [`Waker`] is then woken once the future can make progress.
+ /// For example, a future waiting for a socket to become
+ /// readable would call `.clone()` on the [`Waker`] and store it.
+ /// When a signal arrives elsewhere indicating that the socket is readable,
+ /// [`Waker::wake`] is called and the socket future's task is awoken.
+ /// Once a task has been woken up, it should attempt to `poll` the future
+ /// again, which may or may not produce a final value.
+ ///
+ /// Note that on multiple calls to `poll`, only the [`Waker`] from the
+ /// [`Context`] passed to the most recent call should be scheduled to
+ /// receive a wakeup.
+ ///
+ /// # Runtime characteristics
+ ///
+ /// Futures alone are *inert*; they must be *actively* `poll`ed to make
+ /// progress, meaning that each time the current task is woken up, it should
+ /// actively re-`poll` pending futures that it still has an interest in.
+ ///
+ /// The `poll` function is not called repeatedly in a tight loop -- instead,
+ /// it should only be called when the future indicates that it is ready to
+ /// make progress (by calling `wake()`). If you're familiar with the
+ /// `poll(2)` or `select(2)` syscalls on Unix it's worth noting that futures
+ /// typically do *not* suffer the same problems of "all wakeups must poll
+ /// all events"; they are more like `epoll(4)`.
+ ///
+ /// An implementation of `poll` should strive to return quickly, and should
+ /// not block. Returning quickly prevents unnecessarily clogging up
+ /// threads or event loops. If it is known ahead of time that a call to
+ /// `poll` may end up taking awhile, the work should be offloaded to a
+ /// thread pool (or something similar) to ensure that `poll` can return
+ /// quickly.
+ ///
+ /// # Panics
+ ///
+ /// Once a future has completed (returned `Ready` from `poll`), calling its
+ /// `poll` method again may panic, block forever, or cause other kinds of
+ /// problems; the `Future` trait places no requirements on the effects of
+ /// such a call. However, as the `poll` method is not marked `unsafe`,
+ /// Rust's usual rules apply: calls must never cause undefined behavior
+ /// (memory corruption, incorrect use of `unsafe` functions, or the like),
+ /// regardless of the future's state.
+ ///
+ /// [`Poll::Ready(val)`]: Poll::Ready
+ /// [`Waker`]: crate::task::Waker
+ /// [`Waker::wake`]: crate::task::Waker::wake
+ #[lang = "poll"]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output>;
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<F: ?Sized + Future + Unpin> Future for &mut F {
+ type Output = F::Output;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ F::poll(Pin::new(&mut **self), cx)
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<P> Future for Pin<P>
+where
+ P: ops::DerefMut<Target: Future>,
+{
+ type Output = <<P as ops::Deref>::Target as Future>::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ <P::Target as Future>::poll(self.as_deref_mut(), cx)
+ }
+}
diff --git a/library/core/src/future/into_future.rs b/library/core/src/future/into_future.rs
new file mode 100644
index 000000000..649b43387
--- /dev/null
+++ b/library/core/src/future/into_future.rs
@@ -0,0 +1,139 @@
+use crate::future::Future;
+
+/// Conversion into a `Future`.
+///
+/// By implementing `IntoFuture` for a type, you define how it will be
+/// converted to a future.
+///
+/// # `.await` desugaring
+///
+/// The `.await` keyword desugars into a call to `IntoFuture::into_future`
+/// first before polling the future to completion. `IntoFuture` is implemented
+/// for all `T: Future` which means the `into_future` method will be available
+/// on all futures.
+///
+/// ```no_run
+/// use std::future::IntoFuture;
+///
+/// # async fn foo() {
+/// let v = async { "meow" };
+/// let mut fut = v.into_future();
+/// assert_eq!("meow", fut.await);
+/// # }
+/// ```
+///
+/// # Async builders
+///
+/// When implementing futures manually there will often be a choice between
+/// implementing `Future` or `IntoFuture` for a type. Implementing `Future` is a
+/// good choice in most cases. But implementing `IntoFuture` is most useful when
+/// implementing "async builder" types, which allow their values to be modified
+/// multiple times before being `.await`ed.
+///
+/// ```rust
+/// use std::future::{ready, Ready, IntoFuture};
+///
+/// /// Eventually multiply two numbers
+/// pub struct Multiply {
+/// num: u16,
+/// factor: u16,
+/// }
+///
+/// impl Multiply {
+/// /// Construct a new instance of `Multiply`.
+/// pub fn new(num: u16, factor: u16) -> Self {
+/// Self { num, factor }
+/// }
+///
+/// /// Set the number to multiply by the factor.
+/// pub fn number(mut self, num: u16) -> Self {
+/// self.num = num;
+/// self
+/// }
+///
+/// /// Set the factor to multiply the number with.
+/// pub fn factor(mut self, factor: u16) -> Self {
+/// self.factor = factor;
+/// self
+/// }
+/// }
+///
+/// impl IntoFuture for Multiply {
+/// type Output = u16;
+/// type IntoFuture = Ready<Self::Output>;
+///
+/// fn into_future(self) -> Self::IntoFuture {
+/// ready(self.num * self.factor)
+/// }
+/// }
+///
+/// // NOTE: Rust does not yet have an `async fn main` function, that functionality
+/// // currently only exists in the ecosystem.
+/// async fn run() {
+/// let num = Multiply::new(0, 0) // initialize the builder to number: 0, factor: 0
+/// .number(2) // change the number to 2
+/// .factor(2) // change the factor to 2
+/// .await; // convert to future and .await
+///
+/// assert_eq!(num, 4);
+/// }
+/// ```
+///
+/// # Usage in trait bounds
+///
+/// Using `IntoFuture` in trait bounds allows a function to be generic over both
+/// `Future` and `IntoFuture`. This is convenient for users of the function, so
+/// when they are using it they don't have to make an extra call to
+/// `IntoFuture::into_future` to obtain an instance of `Future`:
+///
+/// ```rust
+/// use std::future::IntoFuture;
+///
+/// /// Convert the output of a future to a string.
+/// async fn fut_to_string<Fut>(fut: Fut) -> String
+/// where
+/// Fut: IntoFuture,
+/// Fut::Output: std::fmt::Debug,
+/// {
+/// format!("{:?}", fut.await)
+/// }
+/// ```
+#[stable(feature = "into_future", since = "1.64.0")]
+pub trait IntoFuture {
+ /// The output that the future will produce on completion.
+ #[stable(feature = "into_future", since = "1.64.0")]
+ type Output;
+
+ /// Which kind of future are we turning this into?
+ #[stable(feature = "into_future", since = "1.64.0")]
+ type IntoFuture: Future<Output = Self::Output>;
+
+ /// Creates a future from a value.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::future::IntoFuture;
+ ///
+ /// # async fn foo() {
+ /// let v = async { "meow" };
+ /// let mut fut = v.into_future();
+ /// assert_eq!("meow", fut.await);
+ /// # }
+ /// ```
+ #[stable(feature = "into_future", since = "1.64.0")]
+ #[lang = "into_future"]
+ fn into_future(self) -> Self::IntoFuture;
+}
+
+#[stable(feature = "into_future", since = "1.64.0")]
+impl<F: Future> IntoFuture for F {
+ type Output = F::Output;
+ type IntoFuture = F;
+
+ fn into_future(self) -> Self::IntoFuture {
+ self
+ }
+}
diff --git a/library/core/src/future/join.rs b/library/core/src/future/join.rs
new file mode 100644
index 000000000..35f0dea06
--- /dev/null
+++ b/library/core/src/future/join.rs
@@ -0,0 +1,193 @@
+#![allow(unused_imports, unused_macros)] // items are used by the macro
+
+use crate::cell::UnsafeCell;
+use crate::future::{poll_fn, Future};
+use crate::mem;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// Polls multiple futures simultaneously, returning a tuple
+/// of all results once complete.
+///
+/// While `join!(a, b).await` is similar to `(a.await, b.await)`,
+/// `join!` polls both futures concurrently and is therefore more efficient.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(future_join)]
+///
+/// use std::future::join;
+///
+/// async fn one() -> usize { 1 }
+/// async fn two() -> usize { 2 }
+///
+/// # let _ = async {
+/// let x = join!(one(), two()).await;
+/// assert_eq!(x, (1, 2));
+/// # };
+/// ```
+///
+/// `join!` is variadic, so you can pass any number of futures:
+///
+/// ```
+/// #![feature(future_join)]
+///
+/// use std::future::join;
+///
+/// async fn one() -> usize { 1 }
+/// async fn two() -> usize { 2 }
+/// async fn three() -> usize { 3 }
+///
+/// # let _ = async {
+/// let x = join!(one(), two(), three()).await;
+/// assert_eq!(x, (1, 2, 3));
+/// # };
+/// ```
+#[unstable(feature = "future_join", issue = "91642")]
+pub macro join( $($fut:expr),+ $(,)? ) {
+ // Funnel through an internal macro not to leak implementation details.
+ join_internal! {
+ current_position: []
+ futures_and_positions: []
+ munching: [ $($fut)+ ]
+ }
+}
+
+// FIXME(danielhenrymantilla): a private macro should need no stability guarantee.
+#[unstable(feature = "future_join", issue = "91642")]
+/// To be able to *name* the i-th future in the tuple (say we want the .4-th),
+/// the following trick will be used: `let (_, _, _, _, it, ..) = tuple;`
+/// In order to do that, we need to generate a `i`-long repetition of `_`,
+/// for each i-th fut. Hence the recursive muncher approach.
+macro join_internal {
+ // Recursion step: map each future with its "position" (underscore count).
+ (
+ // Accumulate a token for each future that has been expanded: "_ _ _".
+ current_position: [
+ $($underscores:tt)*
+ ]
+ // Accumulate Futures and their positions in the tuple: `_0th () _1st ( _ ) …`.
+ futures_and_positions: [
+ $($acc:tt)*
+ ]
+ // Munch one future.
+ munching: [
+ $current:tt
+ $($rest:tt)*
+ ]
+ ) => (
+ join_internal! {
+ current_position: [
+ $($underscores)*
+ _
+ ]
+ futures_and_positions: [
+ $($acc)*
+ $current ( $($underscores)* )
+ ]
+ munching: [
+ $($rest)*
+ ]
+ }
+ ),
+
+ // End of recursion: generate the output future.
+ (
+ current_position: $_:tt
+ futures_and_positions: [
+ $(
+ $fut_expr:tt ( $($pos:tt)* )
+ )*
+ ]
+ // Nothing left to munch.
+ munching: []
+ ) => (
+ match ( $( MaybeDone::Future($fut_expr), )* ) { futures => async {
+ let mut futures = futures;
+ // SAFETY: this is `pin_mut!`.
+ let mut futures = unsafe { Pin::new_unchecked(&mut futures) };
+ poll_fn(move |cx| {
+ let mut done = true;
+ // For each `fut`, pin-project to it, and poll it.
+ $(
+ // SAFETY: pinning projection
+ let fut = unsafe {
+ futures.as_mut().map_unchecked_mut(|it| {
+ let ( $($pos,)* fut, .. ) = it;
+ fut
+ })
+ };
+ // Despite how tempting it may be to `let () = fut.poll(cx).ready()?;`
+ // doing so would defeat the point of `join!`: to start polling eagerly all
+ // of the futures, to allow parallelizing the waits.
+ done &= fut.poll(cx).is_ready();
+ )*
+ if !done {
+ return Poll::Pending;
+ }
+ // All ready; time to extract all the outputs.
+
+ // SAFETY: `.take_output()` does not break the `Pin` invariants for that `fut`.
+ let futures = unsafe {
+ futures.as_mut().get_unchecked_mut()
+ };
+ Poll::Ready(
+ ($(
+ {
+ let ( $($pos,)* fut, .. ) = &mut *futures;
+ fut.take_output().unwrap()
+ }
+ ),*) // <- no trailing comma since we don't want 1-tuples.
+ )
+ }).await
+ }}
+ ),
+}
+
+/// Future used by `join!` that stores it's output to
+/// be later taken and doesn't panic when polled after ready.
+///
+/// This type is public in a private module for use by the macro.
+#[allow(missing_debug_implementations)]
+#[unstable(feature = "future_join", issue = "91642")]
+pub enum MaybeDone<F: Future> {
+ Future(F),
+ Done(F::Output),
+ Taken,
+}
+
+#[unstable(feature = "future_join", issue = "91642")]
+impl<F: Future> MaybeDone<F> {
+ pub fn take_output(&mut self) -> Option<F::Output> {
+ match *self {
+ MaybeDone::Done(_) => match mem::replace(self, Self::Taken) {
+ MaybeDone::Done(val) => Some(val),
+ _ => unreachable!(),
+ },
+ _ => None,
+ }
+ }
+}
+
+#[unstable(feature = "future_join", issue = "91642")]
+impl<F: Future> Future for MaybeDone<F> {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // SAFETY: pinning in structural for `f`
+ unsafe {
+ // Do not mix match ergonomics with unsafe.
+ match *self.as_mut().get_unchecked_mut() {
+ MaybeDone::Future(ref mut f) => {
+ let val = Pin::new_unchecked(f).poll(cx).ready()?;
+ self.set(Self::Done(val));
+ }
+ MaybeDone::Done(_) => {}
+ MaybeDone::Taken => unreachable!(),
+ }
+ }
+
+ Poll::Ready(())
+ }
+}
diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs
new file mode 100644
index 000000000..6487aa088
--- /dev/null
+++ b/library/core/src/future/mod.rs
@@ -0,0 +1,110 @@
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+//! Asynchronous basic functionality.
+//!
+//! Please see the fundamental [`async`] and [`await`] keywords and the [async book]
+//! for more information on asynchronous programming in Rust.
+//!
+//! [`async`]: ../../std/keyword.async.html
+//! [`await`]: ../../std/keyword.await.html
+//! [async book]: https://rust-lang.github.io/async-book/
+
+use crate::{
+ ops::{Generator, GeneratorState},
+ pin::Pin,
+ ptr::NonNull,
+ task::{Context, Poll},
+};
+
+mod future;
+mod into_future;
+mod join;
+mod pending;
+mod poll_fn;
+mod ready;
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub use self::future::Future;
+
+#[unstable(feature = "future_join", issue = "91642")]
+pub use self::join::join;
+
+#[stable(feature = "into_future", since = "1.64.0")]
+pub use into_future::IntoFuture;
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+pub use pending::{pending, Pending};
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+pub use ready::{ready, Ready};
+
+#[stable(feature = "future_poll_fn", since = "1.64.0")]
+pub use poll_fn::{poll_fn, PollFn};
+
+/// This type is needed because:
+///
+/// a) Generators cannot implement `for<'a, 'b> Generator<&'a mut Context<'b>>`, so we need to pass
+/// a raw pointer (see <https://github.com/rust-lang/rust/issues/68923>).
+/// b) Raw pointers and `NonNull` aren't `Send` or `Sync`, so that would make every single future
+/// non-Send/Sync as well, and we don't want that.
+///
+/// It also simplifies the HIR lowering of `.await`.
+#[doc(hidden)]
+#[unstable(feature = "gen_future", issue = "50547")]
+#[derive(Debug, Copy, Clone)]
+pub struct ResumeTy(NonNull<Context<'static>>);
+
+#[unstable(feature = "gen_future", issue = "50547")]
+unsafe impl Send for ResumeTy {}
+
+#[unstable(feature = "gen_future", issue = "50547")]
+unsafe impl Sync for ResumeTy {}
+
+/// Wrap a generator in a future.
+///
+/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give
+/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`).
+// This is `const` to avoid extra errors after we recover from `const async fn`
+#[lang = "from_generator"]
+#[doc(hidden)]
+#[unstable(feature = "gen_future", issue = "50547")]
+#[rustc_const_unstable(feature = "gen_future", issue = "50547")]
+#[inline]
+pub const fn from_generator<T>(gen: T) -> impl Future<Output = T::Return>
+where
+ T: Generator<ResumeTy, Yield = ()>,
+{
+ #[rustc_diagnostic_item = "gen_future"]
+ struct GenFuture<T: Generator<ResumeTy, Yield = ()>>(T);
+
+ // We rely on the fact that async/await futures are immovable in order to create
+ // self-referential borrows in the underlying generator.
+ impl<T: Generator<ResumeTy, Yield = ()>> !Unpin for GenFuture<T> {}
+
+ impl<T: Generator<ResumeTy, Yield = ()>> Future for GenFuture<T> {
+ type Output = T::Return;
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // SAFETY: Safe because we're !Unpin + !Drop, and this is just a field projection.
+ let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) };
+
+ // Resume the generator, turning the `&mut Context` into a `NonNull` raw pointer. The
+ // `.await` lowering will safely cast that back to a `&mut Context`.
+ match gen.resume(ResumeTy(NonNull::from(cx).cast::<Context<'static>>())) {
+ GeneratorState::Yielded(()) => Poll::Pending,
+ GeneratorState::Complete(x) => Poll::Ready(x),
+ }
+ }
+ }
+
+ GenFuture(gen)
+}
+
+#[lang = "get_context"]
+#[doc(hidden)]
+#[unstable(feature = "gen_future", issue = "50547")]
+#[must_use]
+#[inline]
+pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
+ // SAFETY: the caller must guarantee that `cx.0` is a valid pointer
+ // that fulfills all the requirements for a mutable reference.
+ unsafe { &mut *cx.0.as_ptr().cast() }
+}
diff --git a/library/core/src/future/pending.rs b/library/core/src/future/pending.rs
new file mode 100644
index 000000000..2877e66ec
--- /dev/null
+++ b/library/core/src/future/pending.rs
@@ -0,0 +1,58 @@
+use crate::fmt::{self, Debug};
+use crate::future::Future;
+use crate::marker;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// This `struct` is created by [`pending()`]. See its
+/// documentation for more.
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Pending<T> {
+ _data: marker::PhantomData<fn() -> T>,
+}
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::future;
+///
+/// # async fn run() {
+/// let future = future::pending();
+/// let () = future.await;
+/// unreachable!();
+/// # }
+/// ```
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+pub fn pending<T>() -> Pending<T> {
+ Pending { _data: marker::PhantomData }
+}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Future for Pending<T> {
+ type Output = T;
+
+ fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<T> {
+ Poll::Pending
+ }
+}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Debug for Pending<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Pending").finish()
+ }
+}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Clone for Pending<T> {
+ fn clone(&self) -> Self {
+ pending()
+ }
+}
diff --git a/library/core/src/future/poll_fn.rs b/library/core/src/future/poll_fn.rs
new file mode 100644
index 000000000..db2a52332
--- /dev/null
+++ b/library/core/src/future/poll_fn.rs
@@ -0,0 +1,63 @@
+use crate::fmt;
+use crate::future::Future;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// Creates a future that wraps a function returning [`Poll`].
+///
+/// Polling the future delegates to the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// # async fn run() {
+/// use core::future::poll_fn;
+/// use std::task::{Context, Poll};
+///
+/// fn read_line(_cx: &mut Context<'_>) -> Poll<String> {
+/// Poll::Ready("Hello, World!".into())
+/// }
+///
+/// let read_future = poll_fn(read_line);
+/// assert_eq!(read_future.await, "Hello, World!".to_owned());
+/// # }
+/// ```
+#[stable(feature = "future_poll_fn", since = "1.64.0")]
+pub fn poll_fn<T, F>(f: F) -> PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<T>,
+{
+ PollFn { f }
+}
+
+/// A Future that wraps a function returning [`Poll`].
+///
+/// This `struct` is created by [`poll_fn()`]. See its
+/// documentation for more.
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[stable(feature = "future_poll_fn", since = "1.64.0")]
+pub struct PollFn<F> {
+ f: F,
+}
+
+#[stable(feature = "future_poll_fn", since = "1.64.0")]
+impl<F> Unpin for PollFn<F> {}
+
+#[stable(feature = "future_poll_fn", since = "1.64.0")]
+impl<F> fmt::Debug for PollFn<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PollFn").finish()
+ }
+}
+
+#[stable(feature = "future_poll_fn", since = "1.64.0")]
+impl<T, F> Future for PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<T>,
+{
+ type Output = T;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ (&mut self.f)(cx)
+ }
+}
diff --git a/library/core/src/future/ready.rs b/library/core/src/future/ready.rs
new file mode 100644
index 000000000..48f20f90a
--- /dev/null
+++ b/library/core/src/future/ready.rs
@@ -0,0 +1,46 @@
+use crate::future::Future;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// A future that is immediately ready with a value.
+///
+/// This `struct` is created by [`ready()`]. See its
+/// documentation for more.
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+#[derive(Debug, Clone)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Ready<T>(Option<T>);
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Unpin for Ready<T> {}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Future for Ready<T> {
+ type Output = T;
+
+ #[inline]
+ fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> {
+ Poll::Ready(self.0.take().expect("`Ready` polled after completion"))
+ }
+}
+
+/// Creates a future that is immediately ready with a value.
+///
+/// Futures created through this function are functionally similar to those
+/// created through `async {}`. The main difference is that futures created
+/// through this function are named and implement `Unpin`.
+///
+/// # Examples
+///
+/// ```
+/// use std::future;
+///
+/// # async fn run() {
+/// let a = future::ready(1);
+/// assert_eq!(a.await, 1);
+/// # }
+/// ```
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+pub fn ready<T>(t: T) -> Ready<T> {
+ Ready(Some(t))
+}
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
new file mode 100644
index 000000000..5974562ac
--- /dev/null
+++ b/library/core/src/hash/mod.rs
@@ -0,0 +1,978 @@
+//! Generic hashing support.
+//!
+//! This module provides a generic way to compute the [hash] of a value.
+//! Hashes are most commonly used with [`HashMap`] and [`HashSet`].
+//!
+//! [hash]: https://en.wikipedia.org/wiki/Hash_function
+//! [`HashMap`]: ../../std/collections/struct.HashMap.html
+//! [`HashSet`]: ../../std/collections/struct.HashSet.html
+//!
+//! The simplest way to make a type hashable is to use `#[derive(Hash)]`:
+//!
+//! # Examples
+//!
+//! ```rust
+//! use std::collections::hash_map::DefaultHasher;
+//! use std::hash::{Hash, Hasher};
+//!
+//! #[derive(Hash)]
+//! struct Person {
+//! id: u32,
+//! name: String,
+//! phone: u64,
+//! }
+//!
+//! let person1 = Person {
+//! id: 5,
+//! name: "Janet".to_string(),
+//! phone: 555_666_7777,
+//! };
+//! let person2 = Person {
+//! id: 5,
+//! name: "Bob".to_string(),
+//! phone: 555_666_7777,
+//! };
+//!
+//! assert!(calculate_hash(&person1) != calculate_hash(&person2));
+//!
+//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
+//! let mut s = DefaultHasher::new();
+//! t.hash(&mut s);
+//! s.finish()
+//! }
+//! ```
+//!
+//! If you need more control over how a value is hashed, you need to implement
+//! the [`Hash`] trait:
+//!
+//! ```rust
+//! use std::collections::hash_map::DefaultHasher;
+//! use std::hash::{Hash, Hasher};
+//!
+//! struct Person {
+//! id: u32,
+//! # #[allow(dead_code)]
+//! name: String,
+//! phone: u64,
+//! }
+//!
+//! impl Hash for Person {
+//! fn hash<H: Hasher>(&self, state: &mut H) {
+//! self.id.hash(state);
+//! self.phone.hash(state);
+//! }
+//! }
+//!
+//! let person1 = Person {
+//! id: 5,
+//! name: "Janet".to_string(),
+//! phone: 555_666_7777,
+//! };
+//! let person2 = Person {
+//! id: 5,
+//! name: "Bob".to_string(),
+//! phone: 555_666_7777,
+//! };
+//!
+//! assert_eq!(calculate_hash(&person1), calculate_hash(&person2));
+//!
+//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
+//! let mut s = DefaultHasher::new();
+//! t.hash(&mut s);
+//! s.finish()
+//! }
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fmt;
+use crate::marker;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+pub use self::sip::SipHasher;
+
+#[unstable(feature = "hashmap_internals", issue = "none")]
+#[allow(deprecated)]
+#[doc(hidden)]
+pub use self::sip::SipHasher13;
+
+mod sip;
+
+/// A hashable type.
+///
+/// Types implementing `Hash` are able to be [`hash`]ed with an instance of
+/// [`Hasher`].
+///
+/// ## Implementing `Hash`
+///
+/// You can derive `Hash` with `#[derive(Hash)]` if all fields implement `Hash`.
+/// The resulting hash will be the combination of the values from calling
+/// [`hash`] on each field.
+///
+/// ```
+/// #[derive(Hash)]
+/// struct Rustacean {
+/// name: String,
+/// country: String,
+/// }
+/// ```
+///
+/// If you need more control over how a value is hashed, you can of course
+/// implement the `Hash` trait yourself:
+///
+/// ```
+/// use std::hash::{Hash, Hasher};
+///
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// phone: u64,
+/// }
+///
+/// impl Hash for Person {
+/// fn hash<H: Hasher>(&self, state: &mut H) {
+/// self.id.hash(state);
+/// self.phone.hash(state);
+/// }
+/// }
+/// ```
+///
+/// ## `Hash` and `Eq`
+///
+/// When implementing both `Hash` and [`Eq`], it is important that the following
+/// property holds:
+///
+/// ```text
+/// k1 == k2 -> hash(k1) == hash(k2)
+/// ```
+///
+/// In other words, if two keys are equal, their hashes must also be equal.
+/// [`HashMap`] and [`HashSet`] both rely on this behavior.
+///
+/// Thankfully, you won't need to worry about upholding this property when
+/// deriving both [`Eq`] and `Hash` with `#[derive(PartialEq, Eq, Hash)]`.
+///
+/// ## Prefix collisions
+///
+/// Implementations of `hash` should ensure that the data they
+/// pass to the `Hasher` are prefix-free. That is,
+/// unequal values should cause two different sequences of values to be written,
+/// and neither of the two sequences should be a prefix of the other.
+///
+/// For example, the standard implementation of [`Hash` for `&str`][impl] passes an extra
+/// `0xFF` byte to the `Hasher` so that the values `("ab", "c")` and `("a",
+/// "bc")` hash differently.
+///
+/// ## Portability
+///
+/// Due to differences in endianness and type sizes, data fed by `Hash` to a `Hasher`
+/// should not be considered portable across platforms. Additionally the data passed by most
+/// standard library types should not be considered stable between compiler versions.
+///
+/// This means tests shouldn't probe hard-coded hash values or data fed to a `Hasher` and
+/// instead should check consistency with `Eq`.
+///
+/// Serialization formats intended to be portable between platforms or compiler versions should
+/// either avoid encoding hashes or only rely on `Hash` and `Hasher` implementations that
+/// provide additional guarantees.
+///
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+/// [`HashSet`]: ../../std/collections/struct.HashSet.html
+/// [`hash`]: Hash::hash
+/// [impl]: ../../std/primitive.str.html#impl-Hash-for-str
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "Hash"]
+pub trait Hash {
+ /// Feeds this value into the given [`Hasher`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::{Hash, Hasher};
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// 7920.hash(&mut hasher);
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn hash<H: Hasher>(&self, state: &mut H);
+
+ /// Feeds a slice of this type into the given [`Hasher`].
+ ///
+ /// This method is meant as a convenience, but its implementation is
+ /// also explicitly left unspecified. It isn't guaranteed to be
+ /// equivalent to repeated calls of [`hash`] and implementations of
+ /// [`Hash`] should keep that in mind and call [`hash`] themselves
+ /// if the slice isn't treated as a whole unit in the [`PartialEq`]
+ /// implementation.
+ ///
+ /// For example, a [`VecDeque`] implementation might naïvely call
+ /// [`as_slices`] and then [`hash_slice`] on each slice, but this
+ /// is wrong since the two slices can change with a call to
+ /// [`make_contiguous`] without affecting the [`PartialEq`]
+ /// result. Since these slices aren't treated as singular
+ /// units, and instead part of a larger deque, this method cannot
+ /// be used.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::{Hash, Hasher};
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// let numbers = [6, 28, 496, 8128];
+ /// Hash::hash_slice(&numbers, &mut hasher);
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ ///
+ /// [`VecDeque`]: ../../std/collections/struct.VecDeque.html
+ /// [`as_slices`]: ../../std/collections/struct.VecDeque.html#method.as_slices
+ /// [`make_contiguous`]: ../../std/collections/struct.VecDeque.html#method.make_contiguous
+ /// [`hash`]: Hash::hash
+ /// [`hash_slice`]: Hash::hash_slice
+ #[stable(feature = "hash_slice", since = "1.3.0")]
+ fn hash_slice<H: Hasher>(data: &[Self], state: &mut H)
+ where
+ Self: Sized,
+ {
+ for piece in data {
+ piece.hash(state);
+ }
+ }
+}
+
+// Separate module to reexport the macro `Hash` from prelude without the trait `Hash`.
+pub(crate) mod macros {
+ /// Derive macro generating an impl of the trait `Hash`.
+ #[rustc_builtin_macro]
+ #[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+ #[allow_internal_unstable(core_intrinsics)]
+ pub macro Hash($item:item) {
+ /* compiler built-in */
+ }
+}
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[doc(inline)]
+pub use macros::Hash;
+
+/// A trait for hashing an arbitrary stream of bytes.
+///
+/// Instances of `Hasher` usually represent state that is changed while hashing
+/// data.
+///
+/// `Hasher` provides a fairly basic interface for retrieving the generated hash
+/// (with [`finish`]), and writing integers as well as slices of bytes into an
+/// instance (with [`write`] and [`write_u8`] etc.). Most of the time, `Hasher`
+/// instances are used in conjunction with the [`Hash`] trait.
+///
+/// This trait provides no guarantees about how the various `write_*` methods are
+/// defined and implementations of [`Hash`] should not assume that they work one
+/// way or another. You cannot assume, for example, that a [`write_u32`] call is
+/// equivalent to four calls of [`write_u8`]. Nor can you assume that adjacent
+/// `write` calls are merged, so it's possible, for example, that
+/// ```
+/// # fn foo(hasher: &mut impl std::hash::Hasher) {
+/// hasher.write(&[1, 2]);
+/// hasher.write(&[3, 4, 5, 6]);
+/// # }
+/// ```
+/// and
+/// ```
+/// # fn foo(hasher: &mut impl std::hash::Hasher) {
+/// hasher.write(&[1, 2, 3, 4]);
+/// hasher.write(&[5, 6]);
+/// # }
+/// ```
+/// end up producing different hashes.
+///
+/// Thus to produce the same hash value, [`Hash`] implementations must ensure
+/// for equivalent items that exactly the same sequence of calls is made -- the
+/// same methods with the same parameters in the same order.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::hash_map::DefaultHasher;
+/// use std::hash::Hasher;
+///
+/// let mut hasher = DefaultHasher::new();
+///
+/// hasher.write_u32(1989);
+/// hasher.write_u8(11);
+/// hasher.write_u8(9);
+/// hasher.write(b"Huh?");
+///
+/// println!("Hash is {:x}!", hasher.finish());
+/// ```
+///
+/// [`finish`]: Hasher::finish
+/// [`write`]: Hasher::write
+/// [`write_u8`]: Hasher::write_u8
+/// [`write_u32`]: Hasher::write_u32
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Hasher {
+ /// Returns the hash value for the values written so far.
+ ///
+ /// Despite its name, the method does not reset the hasher’s internal
+ /// state. Additional [`write`]s will continue from the current value.
+ /// If you need to start a fresh hash value, you will have to create
+ /// a new hasher.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::Hasher;
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// hasher.write(b"Cool!");
+ ///
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ ///
+ /// [`write`]: Hasher::write
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn finish(&self) -> u64;
+
+ /// Writes some data into this `Hasher`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::Hasher;
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// let data = [0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
+ ///
+ /// hasher.write(&data);
+ ///
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ ///
+ /// # Note to Implementers
+ ///
+ /// You generally should not do length-prefixing as part of implementing
+ /// this method. It's up to the [`Hash`] implementation to call
+ /// [`Hasher::write_length_prefix`] before sequences that need it.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write(&mut self, bytes: &[u8]);
+
+ /// Writes a single `u8` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_u8(&mut self, i: u8) {
+ self.write(&[i])
+ }
+ /// Writes a single `u16` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_u16(&mut self, i: u16) {
+ self.write(&i.to_ne_bytes())
+ }
+ /// Writes a single `u32` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_u32(&mut self, i: u32) {
+ self.write(&i.to_ne_bytes())
+ }
+ /// Writes a single `u64` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_u64(&mut self, i: u64) {
+ self.write(&i.to_ne_bytes())
+ }
+ /// Writes a single `u128` into this hasher.
+ #[inline]
+ #[stable(feature = "i128", since = "1.26.0")]
+ fn write_u128(&mut self, i: u128) {
+ self.write(&i.to_ne_bytes())
+ }
+ /// Writes a single `usize` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_usize(&mut self, i: usize) {
+ self.write(&i.to_ne_bytes())
+ }
+
+ /// Writes a single `i8` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_i8(&mut self, i: i8) {
+ self.write_u8(i as u8)
+ }
+ /// Writes a single `i16` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_i16(&mut self, i: i16) {
+ self.write_u16(i as u16)
+ }
+ /// Writes a single `i32` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_i32(&mut self, i: i32) {
+ self.write_u32(i as u32)
+ }
+ /// Writes a single `i64` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_i64(&mut self, i: i64) {
+ self.write_u64(i as u64)
+ }
+ /// Writes a single `i128` into this hasher.
+ #[inline]
+ #[stable(feature = "i128", since = "1.26.0")]
+ fn write_i128(&mut self, i: i128) {
+ self.write_u128(i as u128)
+ }
+ /// Writes a single `isize` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_isize(&mut self, i: isize) {
+ self.write_usize(i as usize)
+ }
+
+ /// Writes a length prefix into this hasher, as part of being prefix-free.
+ ///
+ /// If you're implementing [`Hash`] for a custom collection, call this before
+ /// writing its contents to this `Hasher`. That way
+ /// `(collection![1, 2, 3], collection![4, 5])` and
+ /// `(collection![1, 2], collection![3, 4, 5])` will provide different
+ /// sequences of values to the `Hasher`
+ ///
+ /// The `impl<T> Hash for [T]` includes a call to this method, so if you're
+ /// hashing a slice (or array or vector) via its `Hash::hash` method,
+ /// you should **not** call this yourself.
+ ///
+ /// This method is only for providing domain separation. If you want to
+ /// hash a `usize` that represents part of the *data*, then it's important
+ /// that you pass it to [`Hasher::write_usize`] instead of to this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hasher_prefixfree_extras)]
+ /// # // Stubs to make the `impl` below pass the compiler
+ /// # struct MyCollection<T>(Option<T>);
+ /// # impl<T> MyCollection<T> {
+ /// # fn len(&self) -> usize { todo!() }
+ /// # }
+ /// # impl<'a, T> IntoIterator for &'a MyCollection<T> {
+ /// # type Item = T;
+ /// # type IntoIter = std::iter::Empty<T>;
+ /// # fn into_iter(self) -> Self::IntoIter { todo!() }
+ /// # }
+ ///
+ /// use std::hash::{Hash, Hasher};
+ /// impl<T: Hash> Hash for MyCollection<T> {
+ /// fn hash<H: Hasher>(&self, state: &mut H) {
+ /// state.write_length_prefix(self.len());
+ /// for elt in self {
+ /// elt.hash(state);
+ /// }
+ /// }
+ /// }
+ /// ```
+ ///
+ /// # Note to Implementers
+ ///
+ /// If you've decided that your `Hasher` is willing to be susceptible to
+ /// Hash-DoS attacks, then you might consider skipping hashing some or all
+ /// of the `len` provided in the name of increased performance.
+ #[inline]
+ #[unstable(feature = "hasher_prefixfree_extras", issue = "96762")]
+ fn write_length_prefix(&mut self, len: usize) {
+ self.write_usize(len);
+ }
+
+ /// Writes a single `str` into this hasher.
+ ///
+ /// If you're implementing [`Hash`], you generally do not need to call this,
+ /// as the `impl Hash for str` does, so you should prefer that instead.
+ ///
+ /// This includes the domain separator for prefix-freedom, so you should
+ /// **not** call `Self::write_length_prefix` before calling this.
+ ///
+ /// # Note to Implementers
+ ///
+ /// There are at least two reasonable default ways to implement this.
+ /// Which one will be the default is not yet decided, so for now
+ /// you probably want to override it specifically.
+ ///
+ /// ## The general answer
+ ///
+ /// It's always correct to implement this with a length prefix:
+ ///
+ /// ```
+ /// # #![feature(hasher_prefixfree_extras)]
+ /// # struct Foo;
+ /// # impl std::hash::Hasher for Foo {
+ /// # fn finish(&self) -> u64 { unimplemented!() }
+ /// # fn write(&mut self, _bytes: &[u8]) { unimplemented!() }
+ /// fn write_str(&mut self, s: &str) {
+ /// self.write_length_prefix(s.len());
+ /// self.write(s.as_bytes());
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// And, if your `Hasher` works in `usize` chunks, this is likely a very
+ /// efficient way to do it, as anything more complicated may well end up
+ /// slower than just running the round with the length.
+ ///
+ /// ## If your `Hasher` works byte-wise
+ ///
+ /// One nice thing about `str` being UTF-8 is that the `b'\xFF'` byte
+ /// never happens. That means that you can append that to the byte stream
+ /// being hashed and maintain prefix-freedom:
+ ///
+ /// ```
+ /// # #![feature(hasher_prefixfree_extras)]
+ /// # struct Foo;
+ /// # impl std::hash::Hasher for Foo {
+ /// # fn finish(&self) -> u64 { unimplemented!() }
+ /// # fn write(&mut self, _bytes: &[u8]) { unimplemented!() }
+ /// fn write_str(&mut self, s: &str) {
+ /// self.write(s.as_bytes());
+ /// self.write_u8(0xff);
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// This does require that your implementation not add extra padding, and
+ /// thus generally requires that you maintain a buffer, running a round
+ /// only once that buffer is full (or `finish` is called).
+ ///
+ /// That's because if `write` pads data out to a fixed chunk size, it's
+ /// likely that it does it in such a way that `"a"` and `"a\x00"` would
+ /// end up hashing the same sequence of things, introducing conflicts.
+ #[inline]
+ #[unstable(feature = "hasher_prefixfree_extras", issue = "96762")]
+ fn write_str(&mut self, s: &str) {
+ self.write(s.as_bytes());
+ self.write_u8(0xff);
+ }
+}
+
+#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
+impl<H: Hasher + ?Sized> Hasher for &mut H {
+ fn finish(&self) -> u64 {
+ (**self).finish()
+ }
+ fn write(&mut self, bytes: &[u8]) {
+ (**self).write(bytes)
+ }
+ fn write_u8(&mut self, i: u8) {
+ (**self).write_u8(i)
+ }
+ fn write_u16(&mut self, i: u16) {
+ (**self).write_u16(i)
+ }
+ fn write_u32(&mut self, i: u32) {
+ (**self).write_u32(i)
+ }
+ fn write_u64(&mut self, i: u64) {
+ (**self).write_u64(i)
+ }
+ fn write_u128(&mut self, i: u128) {
+ (**self).write_u128(i)
+ }
+ fn write_usize(&mut self, i: usize) {
+ (**self).write_usize(i)
+ }
+ fn write_i8(&mut self, i: i8) {
+ (**self).write_i8(i)
+ }
+ fn write_i16(&mut self, i: i16) {
+ (**self).write_i16(i)
+ }
+ fn write_i32(&mut self, i: i32) {
+ (**self).write_i32(i)
+ }
+ fn write_i64(&mut self, i: i64) {
+ (**self).write_i64(i)
+ }
+ fn write_i128(&mut self, i: i128) {
+ (**self).write_i128(i)
+ }
+ fn write_isize(&mut self, i: isize) {
+ (**self).write_isize(i)
+ }
+ fn write_length_prefix(&mut self, len: usize) {
+ (**self).write_length_prefix(len)
+ }
+ fn write_str(&mut self, s: &str) {
+ (**self).write_str(s)
+ }
+}
+
+/// A trait for creating instances of [`Hasher`].
+///
+/// A `BuildHasher` is typically used (e.g., by [`HashMap`]) to create
+/// [`Hasher`]s for each key such that they are hashed independently of one
+/// another, since [`Hasher`]s contain state.
+///
+/// For each instance of `BuildHasher`, the [`Hasher`]s created by
+/// [`build_hasher`] should be identical. That is, if the same stream of bytes
+/// is fed into each hasher, the same output will also be generated.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::hash_map::RandomState;
+/// use std::hash::{BuildHasher, Hasher};
+///
+/// let s = RandomState::new();
+/// let mut hasher_1 = s.build_hasher();
+/// let mut hasher_2 = s.build_hasher();
+///
+/// hasher_1.write_u32(8128);
+/// hasher_2.write_u32(8128);
+///
+/// assert_eq!(hasher_1.finish(), hasher_2.finish());
+/// ```
+///
+/// [`build_hasher`]: BuildHasher::build_hasher
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+#[stable(since = "1.7.0", feature = "build_hasher")]
+pub trait BuildHasher {
+ /// Type of the hasher that will be created.
+ #[stable(since = "1.7.0", feature = "build_hasher")]
+ type Hasher: Hasher;
+
+ /// Creates a new hasher.
+ ///
+ /// Each call to `build_hasher` on the same instance should produce identical
+ /// [`Hasher`]s.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::RandomState;
+ /// use std::hash::BuildHasher;
+ ///
+ /// let s = RandomState::new();
+ /// let new_s = s.build_hasher();
+ /// ```
+ #[stable(since = "1.7.0", feature = "build_hasher")]
+ fn build_hasher(&self) -> Self::Hasher;
+
+ /// Calculates the hash of a single value.
+ ///
+ /// This is intended as a convenience for code which *consumes* hashes, such
+ /// as the implementation of a hash table or in unit tests that check
+ /// whether a custom [`Hash`] implementation behaves as expected.
+ ///
+ /// This must not be used in any code which *creates* hashes, such as in an
+ /// implementation of [`Hash`]. The way to create a combined hash of
+ /// multiple values is to call [`Hash::hash`] multiple times using the same
+ /// [`Hasher`], not to call this method repeatedly and combine the results.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(build_hasher_simple_hash_one)]
+ ///
+ /// use std::cmp::{max, min};
+ /// use std::hash::{BuildHasher, Hash, Hasher};
+ /// struct OrderAmbivalentPair<T: Ord>(T, T);
+ /// impl<T: Ord + Hash> Hash for OrderAmbivalentPair<T> {
+ /// fn hash<H: Hasher>(&self, hasher: &mut H) {
+ /// min(&self.0, &self.1).hash(hasher);
+ /// max(&self.0, &self.1).hash(hasher);
+ /// }
+ /// }
+ ///
+ /// // Then later, in a `#[test]` for the type...
+ /// let bh = std::collections::hash_map::RandomState::new();
+ /// assert_eq!(
+ /// bh.hash_one(OrderAmbivalentPair(1, 2)),
+ /// bh.hash_one(OrderAmbivalentPair(2, 1))
+ /// );
+ /// assert_eq!(
+ /// bh.hash_one(OrderAmbivalentPair(10, 2)),
+ /// bh.hash_one(&OrderAmbivalentPair(2, 10))
+ /// );
+ /// ```
+ #[unstable(feature = "build_hasher_simple_hash_one", issue = "86161")]
+ fn hash_one<T: Hash>(&self, x: T) -> u64
+ where
+ Self: Sized,
+ {
+ let mut hasher = self.build_hasher();
+ x.hash(&mut hasher);
+ hasher.finish()
+ }
+}
+
+/// Used to create a default [`BuildHasher`] instance for types that implement
+/// [`Hasher`] and [`Default`].
+///
+/// `BuildHasherDefault<H>` can be used when a type `H` implements [`Hasher`] and
+/// [`Default`], and you need a corresponding [`BuildHasher`] instance, but none is
+/// defined.
+///
+/// Any `BuildHasherDefault` is [zero-sized]. It can be created with
+/// [`default`][method.default]. When using `BuildHasherDefault` with [`HashMap`] or
+/// [`HashSet`], this doesn't need to be done, since they implement appropriate
+/// [`Default`] instances themselves.
+///
+/// # Examples
+///
+/// Using `BuildHasherDefault` to specify a custom [`BuildHasher`] for
+/// [`HashMap`]:
+///
+/// ```
+/// use std::collections::HashMap;
+/// use std::hash::{BuildHasherDefault, Hasher};
+///
+/// #[derive(Default)]
+/// struct MyHasher;
+///
+/// impl Hasher for MyHasher {
+/// fn write(&mut self, bytes: &[u8]) {
+/// // Your hashing algorithm goes here!
+/// unimplemented!()
+/// }
+///
+/// fn finish(&self) -> u64 {
+/// // Your hashing algorithm goes here!
+/// unimplemented!()
+/// }
+/// }
+///
+/// type MyBuildHasher = BuildHasherDefault<MyHasher>;
+///
+/// let hash_map = HashMap::<u32, u32, MyBuildHasher>::default();
+/// ```
+///
+/// [method.default]: BuildHasherDefault::default
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+/// [`HashSet`]: ../../std/collections/struct.HashSet.html
+/// [zero-sized]: https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts
+#[stable(since = "1.7.0", feature = "build_hasher")]
+pub struct BuildHasherDefault<H>(marker::PhantomData<fn() -> H>);
+
+#[stable(since = "1.9.0", feature = "core_impl_debug")]
+impl<H> fmt::Debug for BuildHasherDefault<H> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BuildHasherDefault").finish()
+ }
+}
+
+#[stable(since = "1.7.0", feature = "build_hasher")]
+impl<H: Default + Hasher> BuildHasher for BuildHasherDefault<H> {
+ type Hasher = H;
+
+ fn build_hasher(&self) -> H {
+ H::default()
+ }
+}
+
+#[stable(since = "1.7.0", feature = "build_hasher")]
+impl<H> Clone for BuildHasherDefault<H> {
+ fn clone(&self) -> BuildHasherDefault<H> {
+ BuildHasherDefault(marker::PhantomData)
+ }
+}
+
+#[stable(since = "1.7.0", feature = "build_hasher")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<H> const Default for BuildHasherDefault<H> {
+ fn default() -> BuildHasherDefault<H> {
+ BuildHasherDefault(marker::PhantomData)
+ }
+}
+
+#[stable(since = "1.29.0", feature = "build_hasher_eq")]
+impl<H> PartialEq for BuildHasherDefault<H> {
+ fn eq(&self, _other: &BuildHasherDefault<H>) -> bool {
+ true
+ }
+}
+
+#[stable(since = "1.29.0", feature = "build_hasher_eq")]
+impl<H> Eq for BuildHasherDefault<H> {}
+
+mod impls {
+ use crate::mem;
+ use crate::slice;
+
+ use super::*;
+
+ macro_rules! impl_write {
+ ($(($ty:ident, $meth:ident),)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for $ty {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.$meth(*self)
+ }
+
+ #[inline]
+ fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
+ let newlen = data.len() * mem::size_of::<$ty>();
+ let ptr = data.as_ptr() as *const u8;
+ // SAFETY: `ptr` is valid and aligned, as this macro is only used
+ // for numeric primitives which have no padding. The new slice only
+ // spans across `data` and is never mutated, and its total size is the
+ // same as the original `data` so it can't be over `isize::MAX`.
+ state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
+ }
+ }
+ )*}
+ }
+
+ impl_write! {
+ (u8, write_u8),
+ (u16, write_u16),
+ (u32, write_u32),
+ (u64, write_u64),
+ (usize, write_usize),
+ (i8, write_i8),
+ (i16, write_i16),
+ (i32, write_i32),
+ (i64, write_i64),
+ (isize, write_isize),
+ (u128, write_u128),
+ (i128, write_i128),
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for bool {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_u8(*self as u8)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for char {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_u32(*self as u32)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for str {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_str(self);
+ }
+ }
+
+ #[stable(feature = "never_hash", since = "1.29.0")]
+ impl Hash for ! {
+ #[inline]
+ fn hash<H: Hasher>(&self, _: &mut H) {
+ *self
+ }
+ }
+
+ macro_rules! impl_hash_tuple {
+ () => (
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for () {
+ #[inline]
+ fn hash<H: Hasher>(&self, _state: &mut H) {}
+ }
+ );
+
+ ( $($name:ident)+) => (
+ maybe_tuple_doc! {
+ $($name)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($name: Hash),+> Hash for ($($name,)+) where last_type!($($name,)+): ?Sized {
+ #[allow(non_snake_case)]
+ #[inline]
+ fn hash<S: Hasher>(&self, state: &mut S) {
+ let ($(ref $name,)+) = *self;
+ $($name.hash(state);)+
+ }
+ }
+ }
+ );
+ }
+
+ macro_rules! maybe_tuple_doc {
+ ($a:ident @ #[$meta:meta] $item:item) => {
+ #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc = "This trait is implemented for tuples up to twelve items long."]
+ #[$meta]
+ $item
+ };
+ ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => {
+ #[doc(hidden)]
+ #[$meta]
+ $item
+ };
+ }
+
+ macro_rules! last_type {
+ ($a:ident,) => { $a };
+ ($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
+ }
+
+ impl_hash_tuple! {}
+ impl_hash_tuple! { T }
+ impl_hash_tuple! { T B }
+ impl_hash_tuple! { T B C }
+ impl_hash_tuple! { T B C D }
+ impl_hash_tuple! { T B C D E }
+ impl_hash_tuple! { T B C D E F }
+ impl_hash_tuple! { T B C D E F G }
+ impl_hash_tuple! { T B C D E F G H }
+ impl_hash_tuple! { T B C D E F G H I }
+ impl_hash_tuple! { T B C D E F G H I J }
+ impl_hash_tuple! { T B C D E F G H I J K }
+ impl_hash_tuple! { T B C D E F G H I J K L }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: Hash> Hash for [T] {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_length_prefix(self.len());
+ Hash::hash_slice(self, state)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized + Hash> Hash for &T {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized + Hash> Hash for &mut T {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Hash for *const T {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ let (address, metadata) = self.to_raw_parts();
+ state.write_usize(address.addr());
+ metadata.hash(state);
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Hash for *mut T {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ let (address, metadata) = self.to_raw_parts();
+ state.write_usize(address.addr());
+ metadata.hash(state);
+ }
+ }
+}
diff --git a/library/core/src/hash/sip.rs b/library/core/src/hash/sip.rs
new file mode 100644
index 000000000..81bf1dfdf
--- /dev/null
+++ b/library/core/src/hash/sip.rs
@@ -0,0 +1,401 @@
+//! An implementation of SipHash.
+
+#![allow(deprecated)] // the types in this module are deprecated
+
+use crate::cmp;
+use crate::marker::PhantomData;
+use crate::mem;
+use crate::ptr;
+
+/// An implementation of SipHash 1-3.
+///
+/// This is currently the default hashing function used by standard library
+/// (e.g., `collections::HashMap` uses it by default).
+///
+/// See: <https://131002.net/siphash>
+#[unstable(feature = "hashmap_internals", issue = "none")]
+#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[derive(Debug, Clone, Default)]
+#[doc(hidden)]
+pub struct SipHasher13 {
+ hasher: Hasher<Sip13Rounds>,
+}
+
+/// An implementation of SipHash 2-4.
+///
+/// See: <https://131002.net/siphash/>
+#[unstable(feature = "hashmap_internals", issue = "none")]
+#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[derive(Debug, Clone, Default)]
+struct SipHasher24 {
+ hasher: Hasher<Sip24Rounds>,
+}
+
+/// An implementation of SipHash 2-4.
+///
+/// See: <https://131002.net/siphash/>
+///
+/// SipHash is a general-purpose hashing function: it runs at a good
+/// speed (competitive with Spooky and City) and permits strong _keyed_
+/// hashing. This lets you key your hash tables from a strong RNG, such as
+/// [`rand::os::OsRng`](https://docs.rs/rand/latest/rand/rngs/struct.OsRng.html).
+///
+/// Although the SipHash algorithm is considered to be generally strong,
+/// it is not intended for cryptographic purposes. As such, all
+/// cryptographic uses of this implementation are _strongly discouraged_.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[derive(Debug, Clone, Default)]
+pub struct SipHasher(SipHasher24);
+
+#[derive(Debug)]
+struct Hasher<S: Sip> {
+ k0: u64,
+ k1: u64,
+ length: usize, // how many bytes we've processed
+ state: State, // hash State
+ tail: u64, // unprocessed bytes le
+ ntail: usize, // how many bytes in tail are valid
+ _marker: PhantomData<S>,
+}
+
+#[derive(Debug, Clone, Copy)]
+#[repr(C)]
+struct State {
+ // v0, v2 and v1, v3 show up in pairs in the algorithm,
+ // and simd implementations of SipHash will use vectors
+ // of v02 and v13. By placing them in this order in the struct,
+ // the compiler can pick up on just a few simd optimizations by itself.
+ v0: u64,
+ v2: u64,
+ v1: u64,
+ v3: u64,
+}
+
+macro_rules! compress {
+ ($state:expr) => {{ compress!($state.v0, $state.v1, $state.v2, $state.v3) }};
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
+ $v0 = $v0.wrapping_add($v1);
+ $v1 = $v1.rotate_left(13);
+ $v1 ^= $v0;
+ $v0 = $v0.rotate_left(32);
+ $v2 = $v2.wrapping_add($v3);
+ $v3 = $v3.rotate_left(16);
+ $v3 ^= $v2;
+ $v0 = $v0.wrapping_add($v3);
+ $v3 = $v3.rotate_left(21);
+ $v3 ^= $v0;
+ $v2 = $v2.wrapping_add($v1);
+ $v1 = $v1.rotate_left(17);
+ $v1 ^= $v2;
+ $v2 = $v2.rotate_left(32);
+ }};
+}
+
+/// Loads an integer of the desired type from a byte stream, in LE order. Uses
+/// `copy_nonoverlapping` to let the compiler generate the most efficient way
+/// to load it from a possibly unaligned address.
+///
+/// Safety: this performs unchecked indexing of `$buf` at
+/// `$i..$i+size_of::<$int_ty>()`, so that must be in-bounds.
+macro_rules! load_int_le {
+ ($buf:expr, $i:expr, $int_ty:ident) => {{
+ debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
+ let mut data = 0 as $int_ty;
+ ptr::copy_nonoverlapping(
+ $buf.as_ptr().add($i),
+ &mut data as *mut _ as *mut u8,
+ mem::size_of::<$int_ty>(),
+ );
+ data.to_le()
+ }};
+}
+
+/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
+/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
+/// sizes and avoid calling `memcpy`, which is good for speed.
+///
+/// Safety: this performs unchecked indexing of `buf` at `start..start+len`, so
+/// that must be in-bounds.
+#[inline]
+unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
+ debug_assert!(len < 8);
+ let mut i = 0; // current byte index (from LSB) in the output u64
+ let mut out = 0;
+ if i + 3 < len {
+ // SAFETY: `i` cannot be greater than `len`, and the caller must guarantee
+ // that the index start..start+len is in bounds.
+ out = unsafe { load_int_le!(buf, start + i, u32) } as u64;
+ i += 4;
+ }
+ if i + 1 < len {
+ // SAFETY: same as above.
+ out |= (unsafe { load_int_le!(buf, start + i, u16) } as u64) << (i * 8);
+ i += 2
+ }
+ if i < len {
+ // SAFETY: same as above.
+ out |= (unsafe { *buf.get_unchecked(start + i) } as u64) << (i * 8);
+ i += 1;
+ }
+ debug_assert_eq!(i, len);
+ out
+}
+
+impl SipHasher {
+ /// Creates a new `SipHasher` with the two initial keys set to 0.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.13.0",
+ note = "use `std::collections::hash_map::DefaultHasher` instead"
+ )]
+ #[must_use]
+ pub fn new() -> SipHasher {
+ SipHasher::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher` that is keyed off the provided keys.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.13.0",
+ note = "use `std::collections::hash_map::DefaultHasher` instead"
+ )]
+ #[must_use]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
+ SipHasher(SipHasher24 { hasher: Hasher::new_with_keys(key0, key1) })
+ }
+}
+
+impl SipHasher13 {
+ /// Creates a new `SipHasher13` with the two initial keys set to 0.
+ #[inline]
+ #[unstable(feature = "hashmap_internals", issue = "none")]
+ #[deprecated(
+ since = "1.13.0",
+ note = "use `std::collections::hash_map::DefaultHasher` instead"
+ )]
+ pub fn new() -> SipHasher13 {
+ SipHasher13::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher13` that is keyed off the provided keys.
+ #[inline]
+ #[unstable(feature = "hashmap_internals", issue = "none")]
+ #[deprecated(
+ since = "1.13.0",
+ note = "use `std::collections::hash_map::DefaultHasher` instead"
+ )]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
+ SipHasher13 { hasher: Hasher::new_with_keys(key0, key1) }
+ }
+}
+
+impl<S: Sip> Hasher<S> {
+ #[inline]
+ fn new_with_keys(key0: u64, key1: u64) -> Hasher<S> {
+ let mut state = Hasher {
+ k0: key0,
+ k1: key1,
+ length: 0,
+ state: State { v0: 0, v1: 0, v2: 0, v3: 0 },
+ tail: 0,
+ ntail: 0,
+ _marker: PhantomData,
+ };
+ state.reset();
+ state
+ }
+
+ #[inline]
+ fn reset(&mut self) {
+ self.length = 0;
+ self.state.v0 = self.k0 ^ 0x736f6d6570736575;
+ self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
+ self.state.v2 = self.k0 ^ 0x6c7967656e657261;
+ self.state.v3 = self.k1 ^ 0x7465646279746573;
+ self.ntail = 0;
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl super::Hasher for SipHasher {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.0.hasher.write(msg)
+ }
+
+ #[inline]
+ fn write_str(&mut self, s: &str) {
+ self.0.hasher.write_str(s);
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.0.hasher.finish()
+ }
+}
+
+#[unstable(feature = "hashmap_internals", issue = "none")]
+impl super::Hasher for SipHasher13 {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.hasher.write(msg)
+ }
+
+ #[inline]
+ fn write_str(&mut self, s: &str) {
+ self.hasher.write_str(s);
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.hasher.finish()
+ }
+}
+
+impl<S: Sip> super::Hasher for Hasher<S> {
+ // Note: no integer hashing methods (`write_u*`, `write_i*`) are defined
+ // for this type. We could add them, copy the `short_write` implementation
+ // in librustc_data_structures/sip128.rs, and add `write_u*`/`write_i*`
+ // methods to `SipHasher`, `SipHasher13`, and `DefaultHasher`. This would
+ // greatly speed up integer hashing by those hashers, at the cost of
+ // slightly slowing down compile speeds on some benchmarks. See #69152 for
+ // details.
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ let length = msg.len();
+ self.length += length;
+
+ let mut needed = 0;
+
+ if self.ntail != 0 {
+ needed = 8 - self.ntail;
+ // SAFETY: `cmp::min(length, needed)` is guaranteed to not be over `length`
+ self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
+ if length < needed {
+ self.ntail += length;
+ return;
+ } else {
+ self.state.v3 ^= self.tail;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= self.tail;
+ self.ntail = 0;
+ }
+ }
+
+ // Buffered tail is now flushed, process new input.
+ let len = length - needed;
+ let left = len & 0x7; // len % 8
+
+ let mut i = needed;
+ while i < len - left {
+ // SAFETY: because `len - left` is the biggest multiple of 8 under
+ // `len`, and because `i` starts at `needed` where `len` is `length - needed`,
+ // `i + 8` is guaranteed to be less than or equal to `length`.
+ let mi = unsafe { load_int_le!(msg, i, u64) };
+
+ self.state.v3 ^= mi;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= mi;
+
+ i += 8;
+ }
+
+ // SAFETY: `i` is now `needed + len.div_euclid(8) * 8`,
+ // so `i + left` = `needed + len` = `length`, which is by
+ // definition equal to `msg.len()`.
+ self.tail = unsafe { u8to64_le(msg, i, left) };
+ self.ntail = left;
+ }
+
+ #[inline]
+ fn write_str(&mut self, s: &str) {
+ // This hasher works byte-wise, and `0xFF` cannot show up in a `str`,
+ // so just hashing the one extra byte is enough to be prefix-free.
+ self.write(s.as_bytes());
+ self.write_u8(0xFF);
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ let mut state = self.state;
+
+ let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
+
+ state.v3 ^= b;
+ S::c_rounds(&mut state);
+ state.v0 ^= b;
+
+ state.v2 ^= 0xff;
+ S::d_rounds(&mut state);
+
+ state.v0 ^ state.v1 ^ state.v2 ^ state.v3
+ }
+}
+
+impl<S: Sip> Clone for Hasher<S> {
+ #[inline]
+ fn clone(&self) -> Hasher<S> {
+ Hasher {
+ k0: self.k0,
+ k1: self.k1,
+ length: self.length,
+ state: self.state,
+ tail: self.tail,
+ ntail: self.ntail,
+ _marker: self._marker,
+ }
+ }
+}
+
+impl<S: Sip> Default for Hasher<S> {
+ /// Creates a `Hasher<S>` with the two initial keys set to 0.
+ #[inline]
+ fn default() -> Hasher<S> {
+ Hasher::new_with_keys(0, 0)
+ }
+}
+
+#[doc(hidden)]
+trait Sip {
+ fn c_rounds(_: &mut State);
+ fn d_rounds(_: &mut State);
+}
+
+#[derive(Debug, Clone, Default)]
+struct Sip13Rounds;
+
+impl Sip for Sip13Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+struct Sip24Rounds;
+
+impl Sip for Sip24Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
new file mode 100644
index 000000000..81b6d5737
--- /dev/null
+++ b/library/core/src/hint.rs
@@ -0,0 +1,350 @@
+#![stable(feature = "core_hint", since = "1.27.0")]
+
+//! Hints to compiler that affects how code should be emitted or optimized.
+//! Hints may be compile time or runtime.
+
+use crate::intrinsics;
+
+/// Informs the compiler that the site which is calling this function is not
+/// reachable, possibly enabling further optimizations.
+///
+/// # Safety
+///
+/// Reaching this function is *Undefined Behavior*.
+///
+/// As the compiler assumes that all forms of Undefined Behavior can never
+/// happen, it will eliminate all branches in the surrounding code that it can
+/// determine will invariably lead to a call to `unreachable_unchecked()`.
+///
+/// If the assumptions embedded in using this function turn out to be wrong -
+/// that is, if the site which is calling `unreachable_unchecked()` is actually
+/// reachable at runtime - the compiler may have generated nonsensical machine
+/// instructions for this situation, including in seemingly unrelated code,
+/// causing difficult-to-debug problems.
+///
+/// Use this function sparingly. Consider using the [`unreachable!`] macro,
+/// which may prevent some optimizations but will safely panic in case it is
+/// actually reached at runtime. Benchmark your code to find out if using
+/// `unreachable_unchecked()` comes with a performance benefit.
+///
+/// # Examples
+///
+/// `unreachable_unchecked()` can be used in situations where the compiler
+/// can't prove invariants that were previously established. Such situations
+/// have a higher chance of occuring if those invariants are upheld by
+/// external code that the compiler can't analyze.
+/// ```
+/// fn prepare_inputs(divisors: &mut Vec<u32>) {
+/// // Note to future-self when making changes: The invariant established
+/// // here is NOT checked in `do_computation()`; if this changes, you HAVE
+/// // to change `do_computation()`.
+/// divisors.retain(|divisor| *divisor != 0)
+/// }
+///
+/// /// # Safety
+/// /// All elements of `divisor` must be non-zero.
+/// unsafe fn do_computation(i: u32, divisors: &[u32]) -> u32 {
+/// divisors.iter().fold(i, |acc, divisor| {
+/// // Convince the compiler that a division by zero can't happen here
+/// // and a check is not needed below.
+/// if *divisor == 0 {
+/// // Safety: `divisor` can't be zero because of `prepare_inputs`,
+/// // but the compiler does not know about this. We *promise*
+/// // that we always call `prepare_inputs`.
+/// std::hint::unreachable_unchecked()
+/// }
+/// // The compiler would normally introduce a check here that prevents
+/// // a division by zero. However, if `divisor` was zero, the branch
+/// // above would reach what we explicitly marked as unreachable.
+/// // The compiler concludes that `divisor` can't be zero at this point
+/// // and removes the - now proven useless - check.
+/// acc / divisor
+/// })
+/// }
+///
+/// let mut divisors = vec![2, 0, 4];
+/// prepare_inputs(&mut divisors);
+/// let result = unsafe {
+/// // Safety: prepare_inputs() guarantees that divisors is non-zero
+/// do_computation(100, &divisors)
+/// };
+/// assert_eq!(result, 12);
+///
+/// ```
+///
+/// While using `unreachable_unchecked()` is perfectly sound in the following
+/// example, the compiler is able to prove that a division by zero is not
+/// possible. Benchmarking reveals that `unreachable_unchecked()` provides
+/// no benefit over using [`unreachable!`], while the latter does not introduce
+/// the possibility of Undefined Behavior.
+///
+/// ```
+/// fn div_1(a: u32, b: u32) -> u32 {
+/// use std::hint::unreachable_unchecked;
+///
+/// // `b.saturating_add(1)` is always positive (not zero),
+/// // hence `checked_div` will never return `None`.
+/// // Therefore, the else branch is unreachable.
+/// a.checked_div(b.saturating_add(1))
+/// .unwrap_or_else(|| unsafe { unreachable_unchecked() })
+/// }
+///
+/// assert_eq!(div_1(7, 0), 7);
+/// assert_eq!(div_1(9, 1), 4);
+/// assert_eq!(div_1(11, u32::MAX), 0);
+/// ```
+#[inline]
+#[stable(feature = "unreachable", since = "1.27.0")]
+#[rustc_const_stable(feature = "const_unreachable_unchecked", since = "1.57.0")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const unsafe fn unreachable_unchecked() -> ! {
+ // SAFETY: the safety contract for `intrinsics::unreachable` must
+ // be upheld by the caller.
+ unsafe { intrinsics::unreachable() }
+}
+
+/// Emits a machine instruction to signal the processor that it is running in
+/// a busy-wait spin-loop ("spin lock").
+///
+/// Upon receiving the spin-loop signal the processor can optimize its behavior by,
+/// for example, saving power or switching hyper-threads.
+///
+/// This function is different from [`thread::yield_now`] which directly
+/// yields to the system's scheduler, whereas `spin_loop` does not interact
+/// with the operating system.
+///
+/// A common use case for `spin_loop` is implementing bounded optimistic
+/// spinning in a CAS loop in synchronization primitives. To avoid problems
+/// like priority inversion, it is strongly recommended that the spin loop is
+/// terminated after a finite amount of iterations and an appropriate blocking
+/// syscall is made.
+///
+/// **Note**: On platforms that do not support receiving spin-loop hints this
+/// function does not do anything at all.
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::atomic::{AtomicBool, Ordering};
+/// use std::sync::Arc;
+/// use std::{hint, thread};
+///
+/// // A shared atomic value that threads will use to coordinate
+/// let live = Arc::new(AtomicBool::new(false));
+///
+/// // In a background thread we'll eventually set the value
+/// let bg_work = {
+/// let live = live.clone();
+/// thread::spawn(move || {
+/// // Do some work, then make the value live
+/// do_some_work();
+/// live.store(true, Ordering::Release);
+/// })
+/// };
+///
+/// // Back on our current thread, we wait for the value to be set
+/// while !live.load(Ordering::Acquire) {
+/// // The spin loop is a hint to the CPU that we're waiting, but probably
+/// // not for very long
+/// hint::spin_loop();
+/// }
+///
+/// // The value is now set
+/// # fn do_some_work() {}
+/// do_some_work();
+/// bg_work.join()?;
+/// # Ok::<(), Box<dyn core::any::Any + Send + 'static>>(())
+/// ```
+///
+/// [`thread::yield_now`]: ../../std/thread/fn.yield_now.html
+#[inline]
+#[stable(feature = "renamed_spin_loop", since = "1.49.0")]
+pub fn spin_loop() {
+ #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2"))]
+ {
+ #[cfg(target_arch = "x86")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on x86 targets.
+ unsafe { crate::arch::x86::_mm_pause() };
+ }
+
+ #[cfg(target_arch = "x86_64")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on x86_64 targets.
+ unsafe { crate::arch::x86_64::_mm_pause() };
+ }
+ }
+
+ // RISC-V platform spin loop hint implementation
+ {
+ // RISC-V RV32 and RV64 share the same PAUSE instruction, but they are located in different
+ // modules in `core::arch`.
+ // In this case, here we call `pause` function in each core arch module.
+ #[cfg(target_arch = "riscv32")]
+ {
+ crate::arch::riscv32::pause();
+ }
+ #[cfg(target_arch = "riscv64")]
+ {
+ crate::arch::riscv64::pause();
+ }
+ }
+
+ #[cfg(any(target_arch = "aarch64", all(target_arch = "arm", target_feature = "v6")))]
+ {
+ #[cfg(target_arch = "aarch64")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on aarch64 targets.
+ unsafe { crate::arch::aarch64::__isb(crate::arch::aarch64::SY) };
+ }
+ #[cfg(target_arch = "arm")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on arm targets
+ // with support for the v6 feature.
+ unsafe { crate::arch::arm::__yield() };
+ }
+ }
+}
+
+/// An identity function that *__hints__* to the compiler to be maximally pessimistic about what
+/// `black_box` could do.
+///
+/// Unlike [`std::convert::identity`], a Rust compiler is encouraged to assume that `black_box` can
+/// use `dummy` in any possible valid way that Rust code is allowed to without introducing undefined
+/// behavior in the calling code. This property makes `black_box` useful for writing code in which
+/// certain optimizations are not desired, such as benchmarks.
+///
+/// Note however, that `black_box` is only (and can only be) provided on a "best-effort" basis. The
+/// extent to which it can block optimisations may vary depending upon the platform and code-gen
+/// backend used. Programs cannot rely on `black_box` for *correctness* in any way.
+///
+/// [`std::convert::identity`]: crate::convert::identity
+#[inline]
+#[unstable(feature = "bench_black_box", issue = "64102")]
+#[rustc_const_unstable(feature = "const_black_box", issue = "none")]
+pub const fn black_box<T>(dummy: T) -> T {
+ crate::intrinsics::black_box(dummy)
+}
+
+/// An identity function that causes an `unused_must_use` warning to be
+/// triggered if the given value is not used (returned, stored in a variable,
+/// etc) by the caller.
+///
+/// This is primarily intended for use in macro-generated code, in which a
+/// [`#[must_use]` attribute][must_use] either on a type or a function would not
+/// be convenient.
+///
+/// [must_use]: https://doc.rust-lang.org/reference/attributes/diagnostics.html#the-must_use-attribute
+///
+/// # Example
+///
+/// ```
+/// #![feature(hint_must_use)]
+///
+/// use core::fmt;
+///
+/// pub struct Error(/* ... */);
+///
+/// #[macro_export]
+/// macro_rules! make_error {
+/// ($($args:expr),*) => {
+/// core::hint::must_use({
+/// let error = $crate::make_error(core::format_args!($($args),*));
+/// error
+/// })
+/// };
+/// }
+///
+/// // Implementation detail of make_error! macro.
+/// #[doc(hidden)]
+/// pub fn make_error(args: fmt::Arguments<'_>) -> Error {
+/// Error(/* ... */)
+/// }
+///
+/// fn demo() -> Option<Error> {
+/// if true {
+/// // Oops, meant to write `return Some(make_error!("..."));`
+/// Some(make_error!("..."));
+/// }
+/// None
+/// }
+/// #
+/// # // Make rustdoc not wrap the whole snippet in fn main, so that $crate::make_error works
+/// # fn main() {}
+/// ```
+///
+/// In the above example, we'd like an `unused_must_use` lint to apply to the
+/// value created by `make_error!`. However, neither `#[must_use]` on a struct
+/// nor `#[must_use]` on a function is appropriate here, so the macro expands
+/// using `core::hint::must_use` instead.
+///
+/// - We wouldn't want `#[must_use]` on the `struct Error` because that would
+/// make the following unproblematic code trigger a warning:
+///
+/// ```
+/// # struct Error;
+/// #
+/// fn f(arg: &str) -> Result<(), Error>
+/// # { Ok(()) }
+///
+/// #[test]
+/// fn t() {
+/// // Assert that `f` returns error if passed an empty string.
+/// // A value of type `Error` is unused here but that's not a problem.
+/// f("").unwrap_err();
+/// }
+/// ```
+///
+/// - Using `#[must_use]` on `fn make_error` can't help because the return value
+/// *is* used, as the right-hand side of a `let` statement. The `let`
+/// statement looks useless but is in fact necessary for ensuring that
+/// temporaries within the `format_args` expansion are not kept alive past the
+/// creation of the `Error`, as keeping them alive past that point can cause
+/// autotrait issues in async code:
+///
+/// ```
+/// # #![feature(hint_must_use)]
+/// #
+/// # struct Error;
+/// #
+/// # macro_rules! make_error {
+/// # ($($args:expr),*) => {
+/// # core::hint::must_use({
+/// # // If `let` isn't used, then `f()` produces a non-Send future.
+/// # let error = make_error(core::format_args!($($args),*));
+/// # error
+/// # })
+/// # };
+/// # }
+/// #
+/// # fn make_error(args: core::fmt::Arguments<'_>) -> Error {
+/// # Error
+/// # }
+/// #
+/// async fn f() {
+/// // Using `let` inside the make_error expansion causes temporaries like
+/// // `unsync()` to drop at the semicolon of that `let` statement, which
+/// // is prior to the await point. They would otherwise stay around until
+/// // the semicolon on *this* statement, which is after the await point,
+/// // and the enclosing Future would not implement Send.
+/// log(make_error!("look: {:p}", unsync())).await;
+/// }
+///
+/// async fn log(error: Error) {/* ... */}
+///
+/// // Returns something without a Sync impl.
+/// fn unsync() -> *const () {
+/// 0 as *const ()
+/// }
+/// #
+/// # fn test() {
+/// # fn assert_send(_: impl Send) {}
+/// # assert_send(f());
+/// # }
+/// ```
+#[unstable(feature = "hint_must_use", issue = "94745")]
+#[rustc_const_unstable(feature = "hint_must_use", issue = "94745")]
+#[must_use] // <-- :)
+pub const fn must_use<T>(value: T) -> T {
+ value
+}
diff --git a/library/core/src/internal_macros.rs b/library/core/src/internal_macros.rs
new file mode 100644
index 000000000..5d4c9ba73
--- /dev/null
+++ b/library/core/src/internal_macros.rs
@@ -0,0 +1,258 @@
+// implements the unary operator "op &T"
+// based on "op T" where T is expected to be `Copy`able
+macro_rules! forward_ref_unop {
+ (impl const $imp:ident, $method:ident for $t:ty) => {
+ forward_ref_unop!(impl const $imp, $method for $t,
+ #[stable(feature = "rust1", since = "1.0.0")]);
+ };
+ // Equivalent to the non-const version, with the addition of `rustc_const_unstable`
+ (impl const $imp:ident, $method:ident for $t:ty, #[$attr:meta]) => {
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const $imp for &$t {
+ type Output = <$t as $imp>::Output;
+
+ #[inline]
+ fn $method(self) -> <$t as $imp>::Output {
+ $imp::$method(*self)
+ }
+ }
+ };
+ (impl $imp:ident, $method:ident for $t:ty, #[$attr:meta]) => {
+ #[$attr]
+ impl $imp for &$t {
+ type Output = <$t as $imp>::Output;
+
+ #[inline]
+ fn $method(self) -> <$t as $imp>::Output {
+ $imp::$method(*self)
+ }
+ }
+ }
+}
+
+// implements binary operators "&T op U", "T op &U", "&T op &U"
+// based on "T op U" where T and U are expected to be `Copy`able
+macro_rules! forward_ref_binop {
+ (impl const $imp:ident, $method:ident for $t:ty, $u:ty) => {
+ forward_ref_binop!(impl const $imp, $method for $t, $u,
+ #[stable(feature = "rust1", since = "1.0.0")]);
+ };
+ // Equivalent to the non-const version, with the addition of `rustc_const_unstable`
+ (impl const $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl<'a> const $imp<$u> for &'a $t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(*self, other)
+ }
+ }
+
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const $imp<&$u> for $t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(self, *other)
+ }
+ }
+
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const $imp<&$u> for &$t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(*self, *other)
+ }
+ }
+ };
+ (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
+ impl<'a> $imp<$u> for &'a $t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(*self, other)
+ }
+ }
+
+ #[$attr]
+ impl $imp<&$u> for $t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(self, *other)
+ }
+ }
+
+ #[$attr]
+ impl $imp<&$u> for &$t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(*self, *other)
+ }
+ }
+ }
+}
+
+// implements "T op= &U", based on "T op= U"
+// where U is expected to be `Copy`able
+macro_rules! forward_ref_op_assign {
+ (impl $imp:ident, $method:ident for $t:ty, $u:ty) => {
+ forward_ref_op_assign!(impl $imp, $method for $t, $u,
+ #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]);
+ };
+ (impl const $imp:ident, $method:ident for $t:ty, $u:ty) => {
+ forward_ref_op_assign!(impl const $imp, $method for $t, $u,
+ #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]);
+ };
+ // Equivalent to the non-const version, with the addition of `rustc_const_unstable`
+ (impl const $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const $imp<&$u> for $t {
+ #[inline]
+ fn $method(&mut self, other: &$u) {
+ $imp::$method(self, *other);
+ }
+ }
+ };
+ (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
+ impl $imp<&$u> for $t {
+ #[inline]
+ fn $method(&mut self, other: &$u) {
+ $imp::$method(self, *other);
+ }
+ }
+ }
+}
+
+/// Create a zero-size type similar to a closure type, but named.
+macro_rules! impl_fn_for_zst {
+ ($(
+ $( #[$attr: meta] )*
+ struct $Name: ident impl$( <$( $lifetime : lifetime ),+> )? Fn =
+ |$( $arg: ident: $ArgTy: ty ),*| -> $ReturnTy: ty
+ $body: block;
+ )+) => {
+ $(
+ $( #[$attr] )*
+ struct $Name;
+
+ impl $( <$( $lifetime ),+> )? Fn<($( $ArgTy, )*)> for $Name {
+ #[inline]
+ extern "rust-call" fn call(&self, ($( $arg, )*): ($( $ArgTy, )*)) -> $ReturnTy {
+ $body
+ }
+ }
+
+ impl $( <$( $lifetime ),+> )? FnMut<($( $ArgTy, )*)> for $Name {
+ #[inline]
+ extern "rust-call" fn call_mut(
+ &mut self,
+ ($( $arg, )*): ($( $ArgTy, )*)
+ ) -> $ReturnTy {
+ Fn::call(&*self, ($( $arg, )*))
+ }
+ }
+
+ impl $( <$( $lifetime ),+> )? FnOnce<($( $ArgTy, )*)> for $Name {
+ type Output = $ReturnTy;
+
+ #[inline]
+ extern "rust-call" fn call_once(self, ($( $arg, )*): ($( $ArgTy, )*)) -> $ReturnTy {
+ Fn::call(&self, ($( $arg, )*))
+ }
+ }
+ )+
+ }
+}
+
+/// A macro for defining `#[cfg]` if-else statements.
+///
+/// `cfg_if` is similar to the `if/elif` C preprocessor macro by allowing definition of a cascade
+/// of `#[cfg]` cases, emitting the implementation which matches first.
+///
+/// This allows you to conveniently provide a long list `#[cfg]`'d blocks of code without having to
+/// rewrite each clause multiple times.
+///
+/// # Example
+///
+/// ```ignore(cannot-test-this-because-non-exported-macro)
+/// cfg_if! {
+/// if #[cfg(unix)] {
+/// fn foo() { /* unix specific functionality */ }
+/// } else if #[cfg(target_pointer_width = "32")] {
+/// fn foo() { /* non-unix, 32-bit functionality */ }
+/// } else {
+/// fn foo() { /* fallback implementation */ }
+/// }
+/// }
+///
+/// # fn main() {}
+/// ```
+// This is a copy of `cfg_if!` from the `cfg_if` crate.
+// The recursive invocations should use $crate if this is ever exported.
+macro_rules! cfg_if {
+ // match if/else chains with a final `else`
+ (
+ $(
+ if #[cfg( $i_meta:meta )] { $( $i_tokens:tt )* }
+ ) else+
+ else { $( $e_tokens:tt )* }
+ ) => {
+ cfg_if! {
+ @__items () ;
+ $(
+ (( $i_meta ) ( $( $i_tokens )* )) ,
+ )+
+ (() ( $( $e_tokens )* )) ,
+ }
+ };
+
+ // Internal and recursive macro to emit all the items
+ //
+ // Collects all the previous cfgs in a list at the beginning, so they can be
+ // negated. After the semicolon is all the remaining items.
+ (@__items ( $( $_:meta , )* ) ; ) => {};
+ (
+ @__items ( $( $no:meta , )* ) ;
+ (( $( $yes:meta )? ) ( $( $tokens:tt )* )) ,
+ $( $rest:tt , )*
+ ) => {
+ // Emit all items within one block, applying an appropriate #[cfg]. The
+ // #[cfg] will require all `$yes` matchers specified and must also negate
+ // all previous matchers.
+ #[cfg(all(
+ $( $yes , )?
+ not(any( $( $no ),* ))
+ ))]
+ cfg_if! { @__identity $( $tokens )* }
+
+ // Recurse to emit all other items in `$rest`, and when we do so add all
+ // our `$yes` matchers to the list of `$no` matchers as future emissions
+ // will have to negate everything we just matched as well.
+ cfg_if! {
+ @__items ( $( $no , )* $( $yes , )? ) ;
+ $( $rest , )*
+ }
+ };
+
+ // Internal macro to make __apply work out right for different match types,
+ // because of how macros match/expand stuff.
+ (@__identity $( $tokens:tt )* ) => {
+ $( $tokens )*
+ };
+}
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
new file mode 100644
index 000000000..cabc5017f
--- /dev/null
+++ b/library/core/src/intrinsics.rs
@@ -0,0 +1,2716 @@
+//! Compiler intrinsics.
+//!
+//! The corresponding definitions are in <https://github.com/rust-lang/rust/blob/master/compiler/rustc_codegen_llvm/src/intrinsic.rs>.
+//! The corresponding const implementations are in <https://github.com/rust-lang/rust/blob/master/compiler/rustc_const_eval/src/interpret/intrinsics.rs>.
+//!
+//! # Const intrinsics
+//!
+//! Note: any changes to the constness of intrinsics should be discussed with the language team.
+//! This includes changes in the stability of the constness.
+//!
+//! In order to make an intrinsic usable at compile-time, one needs to copy the implementation
+//! from <https://github.com/rust-lang/miri/blob/master/src/shims/intrinsics.rs> to
+//! <https://github.com/rust-lang/rust/blob/master/compiler/rustc_const_eval/src/interpret/intrinsics.rs> and add a
+//! `#[rustc_const_unstable(feature = "const_such_and_such", issue = "01234")]` to the intrinsic declaration.
+//!
+//! If an intrinsic is supposed to be used from a `const fn` with a `rustc_const_stable` attribute,
+//! the intrinsic's attribute must be `rustc_const_stable`, too. Such a change should not be done
+//! without T-lang consultation, because it bakes a feature into the language that cannot be
+//! replicated in user code without compiler support.
+//!
+//! # Volatiles
+//!
+//! The volatile intrinsics provide operations intended to act on I/O
+//! memory, which are guaranteed to not be reordered by the compiler
+//! across other volatile intrinsics. See the LLVM documentation on
+//! [[volatile]].
+//!
+//! [volatile]: https://llvm.org/docs/LangRef.html#volatile-memory-accesses
+//!
+//! # Atomics
+//!
+//! The atomic intrinsics provide common atomic operations on machine
+//! words, with multiple possible memory orderings. They obey the same
+//! semantics as C++11. See the LLVM documentation on [[atomics]].
+//!
+//! [atomics]: https://llvm.org/docs/Atomics.html
+//!
+//! A quick refresher on memory ordering:
+//!
+//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
+//! take place after the barrier.
+//! * Release - a barrier for releasing a lock. Preceding reads and writes
+//! take place before the barrier.
+//! * Sequentially consistent - sequentially consistent operations are
+//! guaranteed to happen in order. This is the standard mode for working
+//! with atomic types and is equivalent to Java's `volatile`.
+
+#![unstable(
+ feature = "core_intrinsics",
+ reason = "intrinsics are unlikely to ever be stabilized, instead \
+ they should be used through stabilized interfaces \
+ in the rest of the standard library",
+ issue = "none"
+)]
+#![allow(missing_docs)]
+
+use crate::marker::{Destruct, DiscriminantKind};
+use crate::mem;
+
+// These imports are used for simplifying intra-doc links
+#[allow(unused_imports)]
+#[cfg(all(target_has_atomic = "8", target_has_atomic = "32", target_has_atomic = "ptr"))]
+use crate::sync::atomic::{self, AtomicBool, AtomicI32, AtomicIsize, AtomicU32, Ordering};
+
+#[stable(feature = "drop_in_place", since = "1.8.0")]
+#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[deprecated(note = "no longer an intrinsic - use `ptr::drop_in_place` directly", since = "1.52.0")]
+#[inline]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // SAFETY: see `ptr::drop_in_place`
+ unsafe { crate::ptr::drop_in_place(to_drop) }
+}
+
+// These have been renamed.
+#[cfg(bootstrap)]
+extern "rust-intrinsic" {
+ pub fn atomic_cxchg<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchg_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchg_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchg_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchg_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchg_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchg_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchg_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchg_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_load<T: Copy>(src: *const T) -> T;
+ pub fn atomic_load_acq<T: Copy>(src: *const T) -> T;
+ pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
+ pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
+ pub fn atomic_store<T: Copy>(dst: *mut T, val: T);
+ pub fn atomic_store_rel<T: Copy>(dst: *mut T, val: T);
+ pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
+ pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
+ pub fn atomic_xchg<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xchg_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xchg_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xadd<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xadd_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xadd_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xsub<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xsub_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xsub_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_and<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_and_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_and_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_nand<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_nand_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_nand_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_or<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_or_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_or_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xor<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xor_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xor_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_max<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_max_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_max_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_min<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_min_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_min_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umin<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umin_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umin_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umax<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umax_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umax_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_fence();
+ pub fn atomic_fence_acq();
+ pub fn atomic_fence_rel();
+ pub fn atomic_fence_acqrel();
+ pub fn atomic_singlethreadfence();
+ pub fn atomic_singlethreadfence_acq();
+ pub fn atomic_singlethreadfence_rel();
+ pub fn atomic_singlethreadfence_acqrel();
+}
+
+// These have been renamed.
+#[cfg(bootstrap)]
+mod atomics {
+ pub use super::atomic_cxchg as atomic_cxchg_seqcst_seqcst;
+ pub use super::atomic_cxchg_acq as atomic_cxchg_acquire_acquire;
+ pub use super::atomic_cxchg_acq_failrelaxed as atomic_cxchg_acquire_relaxed;
+ pub use super::atomic_cxchg_acqrel as atomic_cxchg_acqrel_acquire;
+ pub use super::atomic_cxchg_acqrel_failrelaxed as atomic_cxchg_acqrel_relaxed;
+ pub use super::atomic_cxchg_failacq as atomic_cxchg_seqcst_acquire;
+ pub use super::atomic_cxchg_failrelaxed as atomic_cxchg_seqcst_relaxed;
+ pub use super::atomic_cxchg_rel as atomic_cxchg_release_relaxed;
+ pub use super::atomic_cxchg_relaxed as atomic_cxchg_relaxed_relaxed;
+
+ pub use super::atomic_cxchgweak as atomic_cxchgweak_seqcst_seqcst;
+ pub use super::atomic_cxchgweak_acq as atomic_cxchgweak_acquire_acquire;
+ pub use super::atomic_cxchgweak_acq_failrelaxed as atomic_cxchgweak_acquire_relaxed;
+ pub use super::atomic_cxchgweak_acqrel as atomic_cxchgweak_acqrel_acquire;
+ pub use super::atomic_cxchgweak_acqrel_failrelaxed as atomic_cxchgweak_acqrel_relaxed;
+ pub use super::atomic_cxchgweak_failacq as atomic_cxchgweak_seqcst_acquire;
+ pub use super::atomic_cxchgweak_failrelaxed as atomic_cxchgweak_seqcst_relaxed;
+ pub use super::atomic_cxchgweak_rel as atomic_cxchgweak_release_relaxed;
+ pub use super::atomic_cxchgweak_relaxed as atomic_cxchgweak_relaxed_relaxed;
+
+ pub use super::atomic_load as atomic_load_seqcst;
+ pub use super::atomic_load_acq as atomic_load_acquire;
+ pub use super::atomic_load_relaxed;
+ pub use super::atomic_load_unordered;
+
+ pub use super::atomic_store as atomic_store_seqcst;
+ pub use super::atomic_store_rel as atomic_store_release;
+ pub use super::atomic_store_relaxed;
+ pub use super::atomic_store_unordered;
+
+ pub use super::atomic_xchg as atomic_xchg_seqcst;
+ pub use super::atomic_xchg_acq as atomic_xchg_acquire;
+ pub use super::atomic_xchg_acqrel;
+ pub use super::atomic_xchg_rel as atomic_xchg_release;
+ pub use super::atomic_xchg_relaxed;
+
+ pub use super::atomic_xadd as atomic_xadd_seqcst;
+ pub use super::atomic_xadd_acq as atomic_xadd_acquire;
+ pub use super::atomic_xadd_acqrel;
+ pub use super::atomic_xadd_rel as atomic_xadd_release;
+ pub use super::atomic_xadd_relaxed;
+
+ pub use super::atomic_xsub as atomic_xsub_seqcst;
+ pub use super::atomic_xsub_acq as atomic_xsub_acquire;
+ pub use super::atomic_xsub_acqrel;
+ pub use super::atomic_xsub_rel as atomic_xsub_release;
+ pub use super::atomic_xsub_relaxed;
+
+ pub use super::atomic_and as atomic_and_seqcst;
+ pub use super::atomic_and_acq as atomic_and_acquire;
+ pub use super::atomic_and_acqrel;
+ pub use super::atomic_and_rel as atomic_and_release;
+ pub use super::atomic_and_relaxed;
+
+ pub use super::atomic_nand as atomic_nand_seqcst;
+ pub use super::atomic_nand_acq as atomic_nand_acquire;
+ pub use super::atomic_nand_acqrel;
+ pub use super::atomic_nand_rel as atomic_nand_release;
+ pub use super::atomic_nand_relaxed;
+
+ pub use super::atomic_or as atomic_or_seqcst;
+ pub use super::atomic_or_acq as atomic_or_acquire;
+ pub use super::atomic_or_acqrel;
+ pub use super::atomic_or_rel as atomic_or_release;
+ pub use super::atomic_or_relaxed;
+
+ pub use super::atomic_xor as atomic_xor_seqcst;
+ pub use super::atomic_xor_acq as atomic_xor_acquire;
+ pub use super::atomic_xor_acqrel;
+ pub use super::atomic_xor_rel as atomic_xor_release;
+ pub use super::atomic_xor_relaxed;
+
+ pub use super::atomic_max as atomic_max_seqcst;
+ pub use super::atomic_max_acq as atomic_max_acquire;
+ pub use super::atomic_max_acqrel;
+ pub use super::atomic_max_rel as atomic_max_release;
+ pub use super::atomic_max_relaxed;
+
+ pub use super::atomic_min as atomic_min_seqcst;
+ pub use super::atomic_min_acq as atomic_min_acquire;
+ pub use super::atomic_min_acqrel;
+ pub use super::atomic_min_rel as atomic_min_release;
+ pub use super::atomic_min_relaxed;
+
+ pub use super::atomic_umin as atomic_umin_seqcst;
+ pub use super::atomic_umin_acq as atomic_umin_acquire;
+ pub use super::atomic_umin_acqrel;
+ pub use super::atomic_umin_rel as atomic_umin_release;
+ pub use super::atomic_umin_relaxed;
+
+ pub use super::atomic_umax as atomic_umax_seqcst;
+ pub use super::atomic_umax_acq as atomic_umax_acquire;
+ pub use super::atomic_umax_acqrel;
+ pub use super::atomic_umax_rel as atomic_umax_release;
+ pub use super::atomic_umax_relaxed;
+
+ pub use super::atomic_fence as atomic_fence_seqcst;
+ pub use super::atomic_fence_acq as atomic_fence_acquire;
+ pub use super::atomic_fence_acqrel;
+ pub use super::atomic_fence_rel as atomic_fence_release;
+
+ pub use super::atomic_singlethreadfence as atomic_singlethreadfence_seqcst;
+ pub use super::atomic_singlethreadfence_acq as atomic_singlethreadfence_acquire;
+ pub use super::atomic_singlethreadfence_acqrel;
+ pub use super::atomic_singlethreadfence_rel as atomic_singlethreadfence_release;
+}
+
+#[cfg(bootstrap)]
+pub use atomics::*;
+
+#[cfg(not(bootstrap))]
+extern "rust-intrinsic" {
+ // N.B., these intrinsics take raw pointers because they mutate aliased
+ // memory, which is not valid for either `&` or `&mut`.
+
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Relaxed`] as both the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_relaxed_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Relaxed`] and [`Ordering::Acquire`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_relaxed_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Relaxed`] and [`Ordering::SeqCst`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_relaxed_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Acquire`] and [`Ordering::Relaxed`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acquire_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Acquire`] as both the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Acquire`] and [`Ordering::SeqCst`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acquire_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Release`] and [`Ordering::Relaxed`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_release_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Release`] and [`Ordering::Acquire`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_release_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Release`] and [`Ordering::SeqCst`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::AcqRel`] and [`Ordering::Relaxed`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acqrel_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::AcqRel`] and [`Ordering::Acquire`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acqrel_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::AcqRel`] and [`Ordering::SeqCst`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acqrel_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::SeqCst`] and [`Ordering::Relaxed`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_seqcst_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::SeqCst`] and [`Ordering::Acquire`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::SeqCst`] as both the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Relaxed`] as both the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_relaxed_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Relaxed`] and [`Ordering::Acquire`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_relaxed_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Relaxed`] and [`Ordering::SeqCst`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_relaxed_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Acquire`] and [`Ordering::Relaxed`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acquire_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Acquire`] as both the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Acquire`] and [`Ordering::SeqCst`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acquire_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Release`] and [`Ordering::Relaxed`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_release_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Release`] and [`Ordering::Acquire`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_release_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Release`] and [`Ordering::SeqCst`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::AcqRel`] and [`Ordering::Relaxed`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acqrel_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::AcqRel`] and [`Ordering::Acquire`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acqrel_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::AcqRel`] and [`Ordering::SeqCst`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acqrel_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::SeqCst`] and [`Ordering::Relaxed`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_seqcst_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::SeqCst`] and [`Ordering::Acquire`] as the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::SeqCst`] as both the success and failure parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+
+ /// Loads the current value of the pointer.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `load` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::load`].
+ pub fn atomic_load_seqcst<T: Copy>(src: *const T) -> T;
+ /// Loads the current value of the pointer.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `load` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::load`].
+ pub fn atomic_load_acquire<T: Copy>(src: *const T) -> T;
+ /// Loads the current value of the pointer.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `load` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
+ pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
+ pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
+
+ /// Stores the value at the specified memory location.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `store` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::store`].
+ pub fn atomic_store_seqcst<T: Copy>(dst: *mut T, val: T);
+ /// Stores the value at the specified memory location.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `store` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::store`].
+ pub fn atomic_store_release<T: Copy>(dst: *mut T, val: T);
+ /// Stores the value at the specified memory location.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `store` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::store`].
+ pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
+ pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
+
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Maximum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_release<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// An atomic fence.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::fence`] by passing [`Ordering::SeqCst`]
+ /// as the `order`.
+ pub fn atomic_fence_seqcst();
+ /// An atomic fence.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::fence`] by passing [`Ordering::Acquire`]
+ /// as the `order`.
+ pub fn atomic_fence_acquire();
+ /// An atomic fence.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::fence`] by passing [`Ordering::Release`]
+ /// as the `order`.
+ pub fn atomic_fence_release();
+ /// An atomic fence.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::fence`] by passing [`Ordering::AcqRel`]
+ /// as the `order`.
+ pub fn atomic_fence_acqrel();
+
+ /// A compiler-only memory barrier.
+ ///
+ /// Memory accesses will never be reordered across this barrier by the
+ /// compiler, but no instructions will be emitted for it. This is
+ /// appropriate for operations on the same thread that may be preempted,
+ /// such as when interacting with signal handlers.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::compiler_fence`] by passing [`Ordering::SeqCst`]
+ /// as the `order`.
+ pub fn atomic_singlethreadfence_seqcst();
+ /// A compiler-only memory barrier.
+ ///
+ /// Memory accesses will never be reordered across this barrier by the
+ /// compiler, but no instructions will be emitted for it. This is
+ /// appropriate for operations on the same thread that may be preempted,
+ /// such as when interacting with signal handlers.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::compiler_fence`] by passing [`Ordering::Acquire`]
+ /// as the `order`.
+ pub fn atomic_singlethreadfence_acquire();
+ /// A compiler-only memory barrier.
+ ///
+ /// Memory accesses will never be reordered across this barrier by the
+ /// compiler, but no instructions will be emitted for it. This is
+ /// appropriate for operations on the same thread that may be preempted,
+ /// such as when interacting with signal handlers.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::compiler_fence`] by passing [`Ordering::Release`]
+ /// as the `order`.
+ pub fn atomic_singlethreadfence_release();
+ /// A compiler-only memory barrier.
+ ///
+ /// Memory accesses will never be reordered across this barrier by the
+ /// compiler, but no instructions will be emitted for it. This is
+ /// appropriate for operations on the same thread that may be preempted,
+ /// such as when interacting with signal handlers.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::compiler_fence`] by passing [`Ordering::AcqRel`]
+ /// as the `order`.
+ pub fn atomic_singlethreadfence_acqrel();
+}
+
+// These have been renamed.
+//
+// These are the aliases for the old names.
+// To be removed when stdarch and panic_unwind have been updated.
+#[cfg(not(bootstrap))]
+mod atomics {
+ pub use super::atomic_cxchg_acqrel_acquire as atomic_cxchg_acqrel;
+ pub use super::atomic_cxchg_acqrel_relaxed as atomic_cxchg_acqrel_failrelaxed;
+ pub use super::atomic_cxchg_acquire_acquire as atomic_cxchg_acq;
+ pub use super::atomic_cxchg_acquire_relaxed as atomic_cxchg_acq_failrelaxed;
+ pub use super::atomic_cxchg_relaxed_relaxed as atomic_cxchg_relaxed;
+ pub use super::atomic_cxchg_release_relaxed as atomic_cxchg_rel;
+ pub use super::atomic_cxchg_seqcst_acquire as atomic_cxchg_failacq;
+ pub use super::atomic_cxchg_seqcst_relaxed as atomic_cxchg_failrelaxed;
+ pub use super::atomic_cxchg_seqcst_seqcst as atomic_cxchg;
+ pub use super::atomic_store_seqcst as atomic_store;
+}
+
+#[cfg(not(bootstrap))]
+pub use atomics::*;
+
+extern "rust-intrinsic" {
+ /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
+ /// if supported; otherwise, it is a no-op.
+ /// Prefetches have no effect on the behavior of the program but can change its performance
+ /// characteristics.
+ ///
+ /// The `locality` argument must be a constant integer and is a temporal locality specifier
+ /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn prefetch_read_data<T>(data: *const T, locality: i32);
+ /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
+ /// if supported; otherwise, it is a no-op.
+ /// Prefetches have no effect on the behavior of the program but can change its performance
+ /// characteristics.
+ ///
+ /// The `locality` argument must be a constant integer and is a temporal locality specifier
+ /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn prefetch_write_data<T>(data: *const T, locality: i32);
+ /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
+ /// if supported; otherwise, it is a no-op.
+ /// Prefetches have no effect on the behavior of the program but can change its performance
+ /// characteristics.
+ ///
+ /// The `locality` argument must be a constant integer and is a temporal locality specifier
+ /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn prefetch_read_instruction<T>(data: *const T, locality: i32);
+ /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
+ /// if supported; otherwise, it is a no-op.
+ /// Prefetches have no effect on the behavior of the program but can change its performance
+ /// characteristics.
+ ///
+ /// The `locality` argument must be a constant integer and is a temporal locality specifier
+ /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn prefetch_write_instruction<T>(data: *const T, locality: i32);
+
+ /// Magic intrinsic that derives its meaning from attributes
+ /// attached to the function.
+ ///
+ /// For example, dataflow uses this to inject static assertions so
+ /// that `rustc_peek(potentially_uninitialized)` would actually
+ /// double-check that dataflow did indeed compute that it is
+ /// uninitialized at that point in the control flow.
+ ///
+ /// This intrinsic should not be used outside of the compiler.
+ pub fn rustc_peek<T>(_: T) -> T;
+
+ /// Aborts the execution of the process.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// [`std::process::abort`](../../std/process/fn.abort.html) is to be preferred if possible,
+ /// as its behavior is more user-friendly and more stable.
+ ///
+ /// The current implementation of `intrinsics::abort` is to invoke an invalid instruction,
+ /// on most platforms.
+ /// On Unix, the
+ /// process will probably terminate with a signal like `SIGABRT`, `SIGILL`, `SIGTRAP`, `SIGSEGV` or
+ /// `SIGBUS`. The precise behaviour is not guaranteed and not stable.
+ pub fn abort() -> !;
+
+ /// Informs the optimizer that this point in the code is not reachable,
+ /// enabling further optimizations.
+ ///
+ /// N.B., this is very different from the `unreachable!()` macro: Unlike the
+ /// macro, which panics when it is executed, it is *undefined behavior* to
+ /// reach code marked with this function.
+ ///
+ /// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`].
+ #[rustc_const_stable(feature = "const_unreachable_unchecked", since = "1.57.0")]
+ pub fn unreachable() -> !;
+
+ /// Informs the optimizer that a condition is always true.
+ /// If the condition is false, the behavior is undefined.
+ ///
+ /// No code is generated for this intrinsic, but the optimizer will try
+ /// to preserve it (and its condition) between passes, which may interfere
+ /// with optimization of surrounding code and reduce performance. It should
+ /// not be used if the invariant can be discovered by the optimizer on its
+ /// own, or if it does not enable any significant optimizations.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_assume", issue = "76972")]
+ pub fn assume(b: bool);
+
+ /// Hints to the compiler that branch condition is likely to be true.
+ /// Returns the value passed to it.
+ ///
+ /// Any use other than with `if` statements will probably not have an effect.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_likely", issue = "none")]
+ pub fn likely(b: bool) -> bool;
+
+ /// Hints to the compiler that branch condition is likely to be false.
+ /// Returns the value passed to it.
+ ///
+ /// Any use other than with `if` statements will probably not have an effect.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_likely", issue = "none")]
+ pub fn unlikely(b: bool) -> bool;
+
+ /// Executes a breakpoint trap, for inspection by a debugger.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn breakpoint();
+
+ /// The size of a type in bytes.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// More specifically, this is the offset in bytes between successive
+ /// items of the same type, including alignment padding.
+ ///
+ /// The stabilized version of this intrinsic is [`core::mem::size_of`].
+ #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
+ pub fn size_of<T>() -> usize;
+
+ /// The minimum alignment of a type.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is [`core::mem::align_of`].
+ #[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
+ pub fn min_align_of<T>() -> usize;
+ /// The preferred alignment of a type.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ /// It's "tracking issue" is [#91971](https://github.com/rust-lang/rust/issues/91971).
+ #[rustc_const_unstable(feature = "const_pref_align_of", issue = "91971")]
+ pub fn pref_align_of<T>() -> usize;
+
+ /// The size of the referenced value in bytes.
+ ///
+ /// The stabilized version of this intrinsic is [`mem::size_of_val`].
+ #[rustc_const_unstable(feature = "const_size_of_val", issue = "46571")]
+ pub fn size_of_val<T: ?Sized>(_: *const T) -> usize;
+ /// The required alignment of the referenced value.
+ ///
+ /// The stabilized version of this intrinsic is [`core::mem::align_of_val`].
+ #[rustc_const_unstable(feature = "const_align_of_val", issue = "46571")]
+ pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize;
+
+ /// Gets a static string slice containing the name of a type.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is [`core::any::type_name`].
+ #[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
+ pub fn type_name<T: ?Sized>() -> &'static str;
+
+ /// Gets an identifier which is globally unique to the specified type. This
+ /// function will return the same value for a type regardless of whichever
+ /// crate it is invoked in.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
+ #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+ pub fn type_id<T: ?Sized + 'static>() -> u64;
+
+ /// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
+ /// This will statically either panic, or do nothing.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_stable(feature = "const_assert_type", since = "1.59.0")]
+ pub fn assert_inhabited<T>();
+
+ /// A guard for unsafe functions that cannot ever be executed if `T` does not permit
+ /// zero-initialization: This will statically either panic, or do nothing.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
+ pub fn assert_zero_valid<T>();
+
+ /// A guard for unsafe functions that cannot ever be executed if `T` has invalid
+ /// bit patterns: This will statically either panic, or do nothing.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
+ pub fn assert_uninit_valid<T>();
+
+ /// Gets a reference to a static `Location` indicating where it was called.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// Consider using [`core::panic::Location::caller`] instead.
+ #[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
+ pub fn caller_location() -> &'static crate::panic::Location<'static>;
+
+ /// Moves a value out of scope without running drop glue.
+ ///
+ /// This exists solely for [`mem::forget_unsized`]; normal `forget` uses
+ /// `ManuallyDrop` instead.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ #[rustc_const_unstable(feature = "const_intrinsic_forget", issue = "none")]
+ pub fn forget<T: ?Sized>(_: T);
+
+ /// Reinterprets the bits of a value of one type as another type.
+ ///
+ /// Both types must have the same size. Compilation will fail if this is not guaranteed.
+ ///
+ /// `transmute` is semantically equivalent to a bitwise move of one type
+ /// into another. It copies the bits from the source value into the
+ /// destination value, then forgets the original. Note that source and destination
+ /// are passed by-value, which means if `T` or `U` contain padding, that padding
+ /// is *not* guaranteed to be preserved by `transmute`.
+ ///
+ /// Both the argument and the result must be [valid](../../nomicon/what-unsafe-does.html) at
+ /// their given type. Violating this condition leads to [undefined behavior][ub]. The compiler
+ /// will generate code *assuming that you, the programmer, ensure that there will never be
+ /// undefined behavior*. It is therefore your responsibility to guarantee that every value
+ /// passed to `transmute` is valid at both types `T` and `U`. Failing to uphold this condition
+ /// may lead to unexpected and unstable compilation results. This makes `transmute` **incredibly
+ /// unsafe**. `transmute` should be the absolute last resort.
+ ///
+ /// Transmuting pointers to integers in a `const` context is [undefined behavior][ub].
+ /// Any attempt to use the resulting value for integer operations will abort const-evaluation.
+ /// (And even outside `const`, such transmutation is touching on many unspecified aspects of the
+ /// Rust memory model and should be avoided. See below for alternatives.)
+ ///
+ /// Because `transmute` is a by-value operation, alignment of the *transmuted values
+ /// themselves* is not a concern. As with any other function, the compiler already ensures
+ /// both `T` and `U` are properly aligned. However, when transmuting values that *point
+ /// elsewhere* (such as pointers, references, boxes…), the caller has to ensure proper
+ /// alignment of the pointed-to values.
+ ///
+ /// The [nomicon](../../nomicon/transmutes.html) has additional documentation.
+ ///
+ /// [ub]: ../../reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// There are a few things that `transmute` is really useful for.
+ ///
+ /// Turning a pointer into a function pointer. This is *not* portable to
+ /// machines where function pointers and data pointers have different sizes.
+ ///
+ /// ```
+ /// fn foo() -> i32 {
+ /// 0
+ /// }
+ /// // Crucially, we `as`-cast to a raw pointer before `transmute`ing to a function pointer.
+ /// // This avoids an integer-to-pointer `transmute`, which can be problematic.
+ /// // Transmuting between raw pointers and function pointers (i.e., two pointer types) is fine.
+ /// let pointer = foo as *const ();
+ /// let function = unsafe {
+ /// std::mem::transmute::<*const (), fn() -> i32>(pointer)
+ /// };
+ /// assert_eq!(function(), 0);
+ /// ```
+ ///
+ /// Extending a lifetime, or shortening an invariant lifetime. This is
+ /// advanced, very unsafe Rust!
+ ///
+ /// ```
+ /// struct R<'a>(&'a i32);
+ /// unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> {
+ /// std::mem::transmute::<R<'b>, R<'static>>(r)
+ /// }
+ ///
+ /// unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>)
+ /// -> &'b mut R<'c> {
+ /// std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r)
+ /// }
+ /// ```
+ ///
+ /// # Alternatives
+ ///
+ /// Don't despair: many uses of `transmute` can be achieved through other means.
+ /// Below are common applications of `transmute` which can be replaced with safer
+ /// constructs.
+ ///
+ /// Turning raw bytes (`&[u8]`) into `u32`, `f64`, etc.:
+ ///
+ /// ```
+ /// let raw_bytes = [0x78, 0x56, 0x34, 0x12];
+ ///
+ /// let num = unsafe {
+ /// std::mem::transmute::<[u8; 4], u32>(raw_bytes)
+ /// };
+ ///
+ /// // use `u32::from_ne_bytes` instead
+ /// let num = u32::from_ne_bytes(raw_bytes);
+ /// // or use `u32::from_le_bytes` or `u32::from_be_bytes` to specify the endianness
+ /// let num = u32::from_le_bytes(raw_bytes);
+ /// assert_eq!(num, 0x12345678);
+ /// let num = u32::from_be_bytes(raw_bytes);
+ /// assert_eq!(num, 0x78563412);
+ /// ```
+ ///
+ /// Turning a pointer into a `usize`:
+ ///
+ /// ```no_run
+ /// let ptr = &0;
+ /// let ptr_num_transmute = unsafe {
+ /// std::mem::transmute::<&i32, usize>(ptr)
+ /// };
+ ///
+ /// // Use an `as` cast instead
+ /// let ptr_num_cast = ptr as *const i32 as usize;
+ /// ```
+ ///
+ /// Note that using `transmute` to turn a pointer to a `usize` is (as noted above) [undefined
+ /// behavior][ub] in `const` contexts. Also outside of consts, this operation might not behave
+ /// as expected -- this is touching on many unspecified aspects of the Rust memory model.
+ /// Depending on what the code is doing, the following alternatives are preferrable to
+ /// pointer-to-integer transmutation:
+ /// - If the code just wants to store data of arbitrary type in some buffer and needs to pick a
+ /// type for that buffer, it can use [`MaybeUninit`][mem::MaybeUninit].
+ /// - If the code actually wants to work on the address the pointer points to, it can use `as`
+ /// casts or [`ptr.addr()`][pointer::addr].
+ ///
+ /// Turning a `*mut T` into an `&mut T`:
+ ///
+ /// ```
+ /// let ptr: *mut i32 = &mut 0;
+ /// let ref_transmuted = unsafe {
+ /// std::mem::transmute::<*mut i32, &mut i32>(ptr)
+ /// };
+ ///
+ /// // Use a reborrow instead
+ /// let ref_casted = unsafe { &mut *ptr };
+ /// ```
+ ///
+ /// Turning an `&mut T` into an `&mut U`:
+ ///
+ /// ```
+ /// let ptr = &mut 0;
+ /// let val_transmuted = unsafe {
+ /// std::mem::transmute::<&mut i32, &mut u32>(ptr)
+ /// };
+ ///
+ /// // Now, put together `as` and reborrowing - note the chaining of `as`
+ /// // `as` is not transitive
+ /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) };
+ /// ```
+ ///
+ /// Turning an `&str` into a `&[u8]`:
+ ///
+ /// ```
+ /// // this is not a good way to do this.
+ /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") };
+ /// assert_eq!(slice, &[82, 117, 115, 116]);
+ ///
+ /// // You could use `str::as_bytes`
+ /// let slice = "Rust".as_bytes();
+ /// assert_eq!(slice, &[82, 117, 115, 116]);
+ ///
+ /// // Or, just use a byte string, if you have control over the string
+ /// // literal
+ /// assert_eq!(b"Rust", &[82, 117, 115, 116]);
+ /// ```
+ ///
+ /// Turning a `Vec<&T>` into a `Vec<Option<&T>>`.
+ ///
+ /// To transmute the inner type of the contents of a container, you must make sure to not
+ /// violate any of the container's invariants. For `Vec`, this means that both the size
+ /// *and alignment* of the inner types have to match. Other containers might rely on the
+ /// size of the type, alignment, or even the `TypeId`, in which case transmuting wouldn't
+ /// be possible at all without violating the container invariants.
+ ///
+ /// ```
+ /// let store = [0, 1, 2, 3];
+ /// let v_orig = store.iter().collect::<Vec<&i32>>();
+ ///
+ /// // clone the vector as we will reuse them later
+ /// let v_clone = v_orig.clone();
+ ///
+ /// // Using transmute: this relies on the unspecified data layout of `Vec`, which is a
+ /// // bad idea and could cause Undefined Behavior.
+ /// // However, it is no-copy.
+ /// let v_transmuted = unsafe {
+ /// std::mem::transmute::<Vec<&i32>, Vec<Option<&i32>>>(v_clone)
+ /// };
+ ///
+ /// let v_clone = v_orig.clone();
+ ///
+ /// // This is the suggested, safe way.
+ /// // It does copy the entire vector, though, into a new array.
+ /// let v_collected = v_clone.into_iter()
+ /// .map(Some)
+ /// .collect::<Vec<Option<&i32>>>();
+ ///
+ /// let v_clone = v_orig.clone();
+ ///
+ /// // This is the proper no-copy, unsafe way of "transmuting" a `Vec`, without relying on the
+ /// // data layout. Instead of literally calling `transmute`, we perform a pointer cast, but
+ /// // in terms of converting the original inner type (`&i32`) to the new one (`Option<&i32>`),
+ /// // this has all the same caveats. Besides the information provided above, also consult the
+ /// // [`from_raw_parts`] documentation.
+ /// let v_from_raw = unsafe {
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Ensure the original vector is not dropped.
+ /// let mut v_clone = std::mem::ManuallyDrop::new(v_clone);
+ /// Vec::from_raw_parts(v_clone.as_mut_ptr() as *mut Option<&i32>,
+ /// v_clone.len(),
+ /// v_clone.capacity())
+ /// };
+ /// ```
+ ///
+ /// [`from_raw_parts`]: ../../std/vec/struct.Vec.html#method.from_raw_parts
+ ///
+ /// Implementing `split_at_mut`:
+ ///
+ /// ```
+ /// use std::{slice, mem};
+ ///
+ /// // There are multiple ways to do this, and there are multiple problems
+ /// // with the following (transmute) way.
+ /// fn split_at_mut_transmute<T>(slice: &mut [T], mid: usize)
+ /// -> (&mut [T], &mut [T]) {
+ /// let len = slice.len();
+ /// assert!(mid <= len);
+ /// unsafe {
+ /// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice);
+ /// // first: transmute is not type safe; all it checks is that T and
+ /// // U are of the same size. Second, right here, you have two
+ /// // mutable references pointing to the same memory.
+ /// (&mut slice[0..mid], &mut slice2[mid..len])
+ /// }
+ /// }
+ ///
+ /// // This gets rid of the type safety problems; `&mut *` will *only* give
+ /// // you an `&mut T` from an `&mut T` or `*mut T`.
+ /// fn split_at_mut_casts<T>(slice: &mut [T], mid: usize)
+ /// -> (&mut [T], &mut [T]) {
+ /// let len = slice.len();
+ /// assert!(mid <= len);
+ /// unsafe {
+ /// let slice2 = &mut *(slice as *mut [T]);
+ /// // however, you still have two mutable references pointing to
+ /// // the same memory.
+ /// (&mut slice[0..mid], &mut slice2[mid..len])
+ /// }
+ /// }
+ ///
+ /// // This is how the standard library does it. This is the best method, if
+ /// // you need to do something like this
+ /// fn split_at_stdlib<T>(slice: &mut [T], mid: usize)
+ /// -> (&mut [T], &mut [T]) {
+ /// let len = slice.len();
+ /// assert!(mid <= len);
+ /// unsafe {
+ /// let ptr = slice.as_mut_ptr();
+ /// // This now has three mutable references pointing at the same
+ /// // memory. `slice`, the rvalue ret.0, and the rvalue ret.1.
+ /// // `slice` is never used after `let ptr = ...`, and so one can
+ /// // treat it as "dead", and therefore, you only have two real
+ /// // mutable slices.
+ /// (slice::from_raw_parts_mut(ptr, mid),
+ /// slice::from_raw_parts_mut(ptr.add(mid), len - mid))
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+ #[rustc_const_stable(feature = "const_transmute", since = "1.56.0")]
+ #[rustc_diagnostic_item = "transmute"]
+ pub fn transmute<T, U>(e: T) -> U;
+
+ /// Returns `true` if the actual type given as `T` requires drop
+ /// glue; returns `false` if the actual type provided for `T`
+ /// implements `Copy`.
+ ///
+ /// If the actual type neither requires drop glue nor implements
+ /// `Copy`, then the return value of this function is unspecified.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop).
+ #[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")]
+ pub fn needs_drop<T: ?Sized>() -> bool;
+
+ /// Calculates the offset from a pointer.
+ ///
+ /// This is implemented as an intrinsic to avoid converting to and from an
+ /// integer, since the conversion would throw away aliasing information.
+ ///
+ /// # Safety
+ ///
+ /// Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of an allocated object. If either pointer is out of
+ /// bounds or arithmetic overflow occurs then any further use of the
+ /// returned value will result in undefined behavior.
+ ///
+ /// The stabilized version of this intrinsic is [`pointer::offset`].
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
+
+ /// Calculates the offset from a pointer, potentially wrapping.
+ ///
+ /// This is implemented as an intrinsic to avoid converting to and from an
+ /// integer, since the conversion inhibits certain optimizations.
+ ///
+ /// # Safety
+ ///
+ /// Unlike the `offset` intrinsic, this intrinsic does not restrict the
+ /// resulting pointer to point into or one byte past the end of an allocated
+ /// object, and it wraps with two's complement arithmetic. The resulting
+ /// value is not necessarily valid to be used to actually access memory.
+ ///
+ /// The stabilized version of this intrinsic is [`pointer::wrapping_offset`].
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
+
+ /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
+ /// a size of `count` * `size_of::<T>()` and an alignment of
+ /// `min_align_of::<T>()`
+ ///
+ /// The volatile parameter is set to `true`, so it will not be optimized out
+ /// unless size is equal to zero.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: usize);
+ /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
+ /// a size of `count * size_of::<T>()` and an alignment of
+ /// `min_align_of::<T>()`
+ ///
+ /// The volatile parameter is set to `true`, so it will not be optimized out
+ /// unless size is equal to zero.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
+ /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
+ /// size of `count * size_of::<T>()` and an alignment of
+ /// `min_align_of::<T>()`.
+ ///
+ /// The volatile parameter is set to `true`, so it will not be optimized out
+ /// unless size is equal to zero.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
+
+ /// Performs a volatile load from the `src` pointer.
+ ///
+ /// The stabilized version of this intrinsic is [`core::ptr::read_volatile`].
+ pub fn volatile_load<T>(src: *const T) -> T;
+ /// Performs a volatile store to the `dst` pointer.
+ ///
+ /// The stabilized version of this intrinsic is [`core::ptr::write_volatile`].
+ pub fn volatile_store<T>(dst: *mut T, val: T);
+
+ /// Performs a volatile load from the `src` pointer
+ /// The pointer is not required to be aligned.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn unaligned_volatile_load<T>(src: *const T) -> T;
+ /// Performs a volatile store to the `dst` pointer.
+ /// The pointer is not required to be aligned.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn unaligned_volatile_store<T>(dst: *mut T, val: T);
+
+ /// Returns the square root of an `f32`
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::sqrt`](../../std/primitive.f32.html#method.sqrt)
+ pub fn sqrtf32(x: f32) -> f32;
+ /// Returns the square root of an `f64`
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::sqrt`](../../std/primitive.f64.html#method.sqrt)
+ pub fn sqrtf64(x: f64) -> f64;
+
+ /// Raises an `f32` to an integer power.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::powi`](../../std/primitive.f32.html#method.powi)
+ pub fn powif32(a: f32, x: i32) -> f32;
+ /// Raises an `f64` to an integer power.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::powi`](../../std/primitive.f64.html#method.powi)
+ pub fn powif64(a: f64, x: i32) -> f64;
+
+ /// Returns the sine of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::sin`](../../std/primitive.f32.html#method.sin)
+ pub fn sinf32(x: f32) -> f32;
+ /// Returns the sine of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::sin`](../../std/primitive.f64.html#method.sin)
+ pub fn sinf64(x: f64) -> f64;
+
+ /// Returns the cosine of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::cos`](../../std/primitive.f32.html#method.cos)
+ pub fn cosf32(x: f32) -> f32;
+ /// Returns the cosine of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::cos`](../../std/primitive.f64.html#method.cos)
+ pub fn cosf64(x: f64) -> f64;
+
+ /// Raises an `f32` to an `f32` power.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::powf`](../../std/primitive.f32.html#method.powf)
+ pub fn powf32(a: f32, x: f32) -> f32;
+ /// Raises an `f64` to an `f64` power.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::powf`](../../std/primitive.f64.html#method.powf)
+ pub fn powf64(a: f64, x: f64) -> f64;
+
+ /// Returns the exponential of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::exp`](../../std/primitive.f32.html#method.exp)
+ pub fn expf32(x: f32) -> f32;
+ /// Returns the exponential of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::exp`](../../std/primitive.f64.html#method.exp)
+ pub fn expf64(x: f64) -> f64;
+
+ /// Returns 2 raised to the power of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::exp2`](../../std/primitive.f32.html#method.exp2)
+ pub fn exp2f32(x: f32) -> f32;
+ /// Returns 2 raised to the power of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::exp2`](../../std/primitive.f64.html#method.exp2)
+ pub fn exp2f64(x: f64) -> f64;
+
+ /// Returns the natural logarithm of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::ln`](../../std/primitive.f32.html#method.ln)
+ pub fn logf32(x: f32) -> f32;
+ /// Returns the natural logarithm of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::ln`](../../std/primitive.f64.html#method.ln)
+ pub fn logf64(x: f64) -> f64;
+
+ /// Returns the base 10 logarithm of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::log10`](../../std/primitive.f32.html#method.log10)
+ pub fn log10f32(x: f32) -> f32;
+ /// Returns the base 10 logarithm of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::log10`](../../std/primitive.f64.html#method.log10)
+ pub fn log10f64(x: f64) -> f64;
+
+ /// Returns the base 2 logarithm of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::log2`](../../std/primitive.f32.html#method.log2)
+ pub fn log2f32(x: f32) -> f32;
+ /// Returns the base 2 logarithm of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::log2`](../../std/primitive.f64.html#method.log2)
+ pub fn log2f64(x: f64) -> f64;
+
+ /// Returns `a * b + c` for `f32` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::mul_add`](../../std/primitive.f32.html#method.mul_add)
+ pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
+ /// Returns `a * b + c` for `f64` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::mul_add`](../../std/primitive.f64.html#method.mul_add)
+ pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
+
+ /// Returns the absolute value of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::abs`](../../std/primitive.f32.html#method.abs)
+ pub fn fabsf32(x: f32) -> f32;
+ /// Returns the absolute value of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::abs`](../../std/primitive.f64.html#method.abs)
+ pub fn fabsf64(x: f64) -> f64;
+
+ /// Returns the minimum of two `f32` values.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::min`]
+ pub fn minnumf32(x: f32, y: f32) -> f32;
+ /// Returns the minimum of two `f64` values.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::min`]
+ pub fn minnumf64(x: f64, y: f64) -> f64;
+ /// Returns the maximum of two `f32` values.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::max`]
+ pub fn maxnumf32(x: f32, y: f32) -> f32;
+ /// Returns the maximum of two `f64` values.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::max`]
+ pub fn maxnumf64(x: f64, y: f64) -> f64;
+
+ /// Copies the sign from `y` to `x` for `f32` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::copysign`](../../std/primitive.f32.html#method.copysign)
+ pub fn copysignf32(x: f32, y: f32) -> f32;
+ /// Copies the sign from `y` to `x` for `f64` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::copysign`](../../std/primitive.f64.html#method.copysign)
+ pub fn copysignf64(x: f64, y: f64) -> f64;
+
+ /// Returns the largest integer less than or equal to an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::floor`](../../std/primitive.f32.html#method.floor)
+ pub fn floorf32(x: f32) -> f32;
+ /// Returns the largest integer less than or equal to an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::floor`](../../std/primitive.f64.html#method.floor)
+ pub fn floorf64(x: f64) -> f64;
+
+ /// Returns the smallest integer greater than or equal to an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::ceil`](../../std/primitive.f32.html#method.ceil)
+ pub fn ceilf32(x: f32) -> f32;
+ /// Returns the smallest integer greater than or equal to an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::ceil`](../../std/primitive.f64.html#method.ceil)
+ pub fn ceilf64(x: f64) -> f64;
+
+ /// Returns the integer part of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::trunc`](../../std/primitive.f32.html#method.trunc)
+ pub fn truncf32(x: f32) -> f32;
+ /// Returns the integer part of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::trunc`](../../std/primitive.f64.html#method.trunc)
+ pub fn truncf64(x: f64) -> f64;
+
+ /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
+ /// if the argument is not an integer.
+ pub fn rintf32(x: f32) -> f32;
+ /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
+ /// if the argument is not an integer.
+ pub fn rintf64(x: f64) -> f64;
+
+ /// Returns the nearest integer to an `f32`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn nearbyintf32(x: f32) -> f32;
+ /// Returns the nearest integer to an `f64`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn nearbyintf64(x: f64) -> f64;
+
+ /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::round`](../../std/primitive.f32.html#method.round)
+ pub fn roundf32(x: f32) -> f32;
+ /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::round`](../../std/primitive.f64.html#method.round)
+ pub fn roundf64(x: f64) -> f64;
+
+ /// Float addition that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn fadd_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Float subtraction that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn fsub_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Float multiplication that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn fmul_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Float division that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn fdiv_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Float remainder that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn frem_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Convert with LLVM’s fptoui/fptosi, which may return undef for values out of range
+ /// (<https://github.com/rust-lang/rust/issues/10184>)
+ ///
+ /// Stabilized as [`f32::to_int_unchecked`] and [`f64::to_int_unchecked`].
+ pub fn float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> Int;
+
+ /// Returns the number of bits set in an integer type `T`
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `count_ones` method. For example,
+ /// [`u32::count_ones`]
+ #[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")]
+ pub fn ctpop<T: Copy>(x: T) -> T;
+
+ /// Returns the number of leading unset bits (zeroes) in an integer type `T`.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `leading_zeros` method. For example,
+ /// [`u32::leading_zeros`]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::ctlz;
+ ///
+ /// let x = 0b0001_1100_u8;
+ /// let num_leading = ctlz(x);
+ /// assert_eq!(num_leading, 3);
+ /// ```
+ ///
+ /// An `x` with value `0` will return the bit width of `T`.
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::ctlz;
+ ///
+ /// let x = 0u16;
+ /// let num_leading = ctlz(x);
+ /// assert_eq!(num_leading, 16);
+ /// ```
+ #[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")]
+ pub fn ctlz<T: Copy>(x: T) -> T;
+
+ /// Like `ctlz`, but extra-unsafe as it returns `undef` when
+ /// given an `x` with value `0`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::ctlz_nonzero;
+ ///
+ /// let x = 0b0001_1100_u8;
+ /// let num_leading = unsafe { ctlz_nonzero(x) };
+ /// assert_eq!(num_leading, 3);
+ /// ```
+ #[rustc_const_stable(feature = "constctlz", since = "1.50.0")]
+ pub fn ctlz_nonzero<T: Copy>(x: T) -> T;
+
+ /// Returns the number of trailing unset bits (zeroes) in an integer type `T`.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `trailing_zeros` method. For example,
+ /// [`u32::trailing_zeros`]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::cttz;
+ ///
+ /// let x = 0b0011_1000_u8;
+ /// let num_trailing = cttz(x);
+ /// assert_eq!(num_trailing, 3);
+ /// ```
+ ///
+ /// An `x` with value `0` will return the bit width of `T`:
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::cttz;
+ ///
+ /// let x = 0u16;
+ /// let num_trailing = cttz(x);
+ /// assert_eq!(num_trailing, 16);
+ /// ```
+ #[rustc_const_stable(feature = "const_cttz", since = "1.40.0")]
+ pub fn cttz<T: Copy>(x: T) -> T;
+
+ /// Like `cttz`, but extra-unsafe as it returns `undef` when
+ /// given an `x` with value `0`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::cttz_nonzero;
+ ///
+ /// let x = 0b0011_1000_u8;
+ /// let num_trailing = unsafe { cttz_nonzero(x) };
+ /// assert_eq!(num_trailing, 3);
+ /// ```
+ #[rustc_const_stable(feature = "const_cttz_nonzero", since = "1.53.0")]
+ pub fn cttz_nonzero<T: Copy>(x: T) -> T;
+
+ /// Reverses the bytes in an integer type `T`.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `swap_bytes` method. For example,
+ /// [`u32::swap_bytes`]
+ #[rustc_const_stable(feature = "const_bswap", since = "1.40.0")]
+ pub fn bswap<T: Copy>(x: T) -> T;
+
+ /// Reverses the bits in an integer type `T`.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `reverse_bits` method. For example,
+ /// [`u32::reverse_bits`]
+ #[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")]
+ pub fn bitreverse<T: Copy>(x: T) -> T;
+
+ /// Performs checked integer addition.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `overflowing_add` method. For example,
+ /// [`u32::overflowing_add`]
+ #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
+
+ /// Performs checked integer subtraction
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `overflowing_sub` method. For example,
+ /// [`u32::overflowing_sub`]
+ #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
+
+ /// Performs checked integer multiplication
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `overflowing_mul` method. For example,
+ /// [`u32::overflowing_mul`]
+ #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
+
+ /// Performs an exact division, resulting in undefined behavior where
+ /// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn exact_div<T: Copy>(x: T, y: T) -> T;
+
+ /// Performs an unchecked division, resulting in undefined behavior
+ /// where `y == 0` or `x == T::MIN && y == -1`
+ ///
+ /// Safe wrappers for this intrinsic are available on the integer
+ /// primitives via the `checked_div` method. For example,
+ /// [`u32::checked_div`]
+ #[rustc_const_stable(feature = "const_int_unchecked_div", since = "1.52.0")]
+ pub fn unchecked_div<T: Copy>(x: T, y: T) -> T;
+ /// Returns the remainder of an unchecked division, resulting in
+ /// undefined behavior when `y == 0` or `x == T::MIN && y == -1`
+ ///
+ /// Safe wrappers for this intrinsic are available on the integer
+ /// primitives via the `checked_rem` method. For example,
+ /// [`u32::checked_rem`]
+ #[rustc_const_stable(feature = "const_int_unchecked_rem", since = "1.52.0")]
+ pub fn unchecked_rem<T: Copy>(x: T, y: T) -> T;
+
+ /// Performs an unchecked left shift, resulting in undefined behavior when
+ /// `y < 0` or `y >= N`, where N is the width of T in bits.
+ ///
+ /// Safe wrappers for this intrinsic are available on the integer
+ /// primitives via the `checked_shl` method. For example,
+ /// [`u32::checked_shl`]
+ #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
+ pub fn unchecked_shl<T: Copy>(x: T, y: T) -> T;
+ /// Performs an unchecked right shift, resulting in undefined behavior when
+ /// `y < 0` or `y >= N`, where N is the width of T in bits.
+ ///
+ /// Safe wrappers for this intrinsic are available on the integer
+ /// primitives via the `checked_shr` method. For example,
+ /// [`u32::checked_shr`]
+ #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
+ pub fn unchecked_shr<T: Copy>(x: T, y: T) -> T;
+
+ /// Returns the result of an unchecked addition, resulting in
+ /// undefined behavior when `x + y > T::MAX` or `x + y < T::MIN`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ pub fn unchecked_add<T: Copy>(x: T, y: T) -> T;
+
+ /// Returns the result of an unchecked subtraction, resulting in
+ /// undefined behavior when `x - y > T::MAX` or `x - y < T::MIN`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ pub fn unchecked_sub<T: Copy>(x: T, y: T) -> T;
+
+ /// Returns the result of an unchecked multiplication, resulting in
+ /// undefined behavior when `x * y > T::MAX` or `x * y < T::MIN`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ pub fn unchecked_mul<T: Copy>(x: T, y: T) -> T;
+
+ /// Performs rotate left.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `rotate_left` method. For example,
+ /// [`u32::rotate_left`]
+ #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
+ pub fn rotate_left<T: Copy>(x: T, y: T) -> T;
+
+ /// Performs rotate right.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `rotate_right` method. For example,
+ /// [`u32::rotate_right`]
+ #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
+ pub fn rotate_right<T: Copy>(x: T, y: T) -> T;
+
+ /// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `wrapping_add` method. For example,
+ /// [`u32::wrapping_add`]
+ #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ pub fn wrapping_add<T: Copy>(a: T, b: T) -> T;
+ /// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `wrapping_sub` method. For example,
+ /// [`u32::wrapping_sub`]
+ #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T;
+ /// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `wrapping_mul` method. For example,
+ /// [`u32::wrapping_mul`]
+ #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T;
+
+ /// Computes `a + b`, saturating at numeric bounds.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `saturating_add` method. For example,
+ /// [`u32::saturating_add`]
+ #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
+ pub fn saturating_add<T: Copy>(a: T, b: T) -> T;
+ /// Computes `a - b`, saturating at numeric bounds.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `saturating_sub` method. For example,
+ /// [`u32::saturating_sub`]
+ #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
+ pub fn saturating_sub<T: Copy>(a: T, b: T) -> T;
+
+ /// Returns the value of the discriminant for the variant in 'v';
+ /// if `T` has no discriminant, returns `0`.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is [`core::mem::discriminant`].
+ #[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
+ pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
+
+ /// Returns the number of variants of the type `T` cast to a `usize`;
+ /// if `T` has no variants, returns `0`. Uninhabited variants will be counted.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The to-be-stabilized version of this intrinsic is [`mem::variant_count`].
+ #[rustc_const_unstable(feature = "variant_count", issue = "73662")]
+ pub fn variant_count<T>() -> usize;
+
+ /// Rust's "try catch" construct which invokes the function pointer `try_fn`
+ /// with the data pointer `data`.
+ ///
+ /// The third argument is a function called if a panic occurs. This function
+ /// takes the data pointer and a pointer to the target-specific exception
+ /// object that was caught. For more information see the compiler's
+ /// source as well as std's catch implementation.
+ pub fn r#try(try_fn: fn(*mut u8), data: *mut u8, catch_fn: fn(*mut u8, *mut u8)) -> i32;
+
+ /// Emits a `!nontemporal` store according to LLVM (see their docs).
+ /// Probably will never become stable.
+ pub fn nontemporal_store<T>(ptr: *mut T, val: T);
+
+ /// See documentation of `<*const T>::offset_from` for details.
+ #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;
+
+ /// See documentation of `<*const T>::sub_ptr` for details.
+ #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ pub fn ptr_offset_from_unsigned<T>(ptr: *const T, base: *const T) -> usize;
+
+ /// See documentation of `<*const T>::guaranteed_eq` for details.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ pub fn ptr_guaranteed_eq<T>(ptr: *const T, other: *const T) -> bool;
+
+ /// See documentation of `<*const T>::guaranteed_ne` for details.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ pub fn ptr_guaranteed_ne<T>(ptr: *const T, other: *const T) -> bool;
+
+ /// Allocates a block of memory at compile time.
+ /// At runtime, just returns a null pointer.
+ ///
+ /// # Safety
+ ///
+ /// - The `align` argument must be a power of two.
+ /// - At compile time, a compile error occurs if this constraint is violated.
+ /// - At runtime, it is not checked.
+ #[rustc_const_unstable(feature = "const_heap", issue = "79597")]
+ pub fn const_allocate(size: usize, align: usize) -> *mut u8;
+
+ /// Deallocates a memory which allocated by `intrinsics::const_allocate` at compile time.
+ /// At runtime, does nothing.
+ ///
+ /// # Safety
+ ///
+ /// - The `align` argument must be a power of two.
+ /// - At compile time, a compile error occurs if this constraint is violated.
+ /// - At runtime, it is not checked.
+ /// - If the `ptr` is created in an another const, this intrinsic doesn't deallocate it.
+ /// - If the `ptr` is pointing to a local variable, this intrinsic doesn't deallocate it.
+ #[rustc_const_unstable(feature = "const_heap", issue = "79597")]
+ pub fn const_deallocate(ptr: *mut u8, size: usize, align: usize);
+
+ /// Determines whether the raw bytes of the two values are equal.
+ ///
+ /// This is particularly handy for arrays, since it allows things like just
+ /// comparing `i96`s instead of forcing `alloca`s for `[6 x i16]`.
+ ///
+ /// Above some backend-decided threshold this will emit calls to `memcmp`,
+ /// like slice equality does, instead of causing massive code size.
+ ///
+ /// # Safety
+ ///
+ /// It's UB to call this if any of the *bytes* in `*a` or `*b` are uninitialized.
+ /// Note that this is a stricter criterion than just the *values* being
+ /// fully-initialized: if `T` has padding, it's UB to call this intrinsic.
+ ///
+ /// (The implementation is allowed to branch on the results of comparisons,
+ /// which is UB if any of their inputs are `undef`.)
+ #[rustc_const_unstable(feature = "const_intrinsic_raw_eq", issue = "none")]
+ pub fn raw_eq<T>(a: &T, b: &T) -> bool;
+
+ /// See documentation of [`std::hint::black_box`] for details.
+ ///
+ /// [`std::hint::black_box`]: crate::hint::black_box
+ #[rustc_const_unstable(feature = "const_black_box", issue = "none")]
+ pub fn black_box<T>(dummy: T) -> T;
+
+ /// `ptr` must point to a vtable.
+ /// The intrinsic will return the size stored in that vtable.
+ #[cfg(not(bootstrap))]
+ pub fn vtable_size(ptr: *const ()) -> usize;
+
+ /// `ptr` must point to a vtable.
+ /// The intrinsic will return the alignment stored in that vtable.
+ #[cfg(not(bootstrap))]
+ pub fn vtable_align(ptr: *const ()) -> usize;
+}
+
+// Some functions are defined here because they accidentally got made
+// available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>.
+// (`transmute` also falls into this category, but it cannot be wrapped due to the
+// check that `T` and `U` have the same size.)
+
+/// Check that the preconditions of an unsafe function are followed, if debug_assertions are on,
+/// and only at runtime.
+///
+/// # Safety
+///
+/// Invoking this macro is only sound if the following code is already UB when the passed
+/// expression evaluates to false.
+///
+/// This macro expands to a check at runtime if debug_assertions is set. It has no effect at
+/// compile time, but the semantics of the contained `const_eval_select` must be the same at
+/// runtime and at compile time. Thus if the expression evaluates to false, this macro produces
+/// different behavior at compile time and at runtime, and invoking it is incorrect.
+///
+/// So in a sense it is UB if this macro is useful, but we expect callers of `unsafe fn` to make
+/// the occasional mistake, and this check should help them figure things out.
+#[allow_internal_unstable(const_eval_select)] // permit this to be called in stably-const fn
+macro_rules! assert_unsafe_precondition {
+ ($e:expr) => {
+ if cfg!(debug_assertions) {
+ // Use a closure so that we can capture arbitrary expressions from the invocation
+ let runtime = || {
+ if !$e {
+ // abort instead of panicking to reduce impact on code size
+ ::core::intrinsics::abort();
+ }
+ };
+ const fn comptime() {}
+
+ ::core::intrinsics::const_eval_select((), comptime, runtime);
+ }
+ };
+}
+pub(crate) use assert_unsafe_precondition;
+
+/// Checks whether `ptr` is properly aligned with respect to
+/// `align_of::<T>()`.
+pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
+ !ptr.is_null() && ptr.addr() % mem::align_of::<T>() == 0
+}
+
+/// Checks whether the regions of memory starting at `src` and `dst` of size
+/// `count * size_of::<T>()` do *not* overlap.
+pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool {
+ let src_usize = src.addr();
+ let dst_usize = dst.addr();
+ let size = mem::size_of::<T>().checked_mul(count).unwrap();
+ let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize };
+ // If the absolute distance between the ptrs is at least as big as the size of the buffer,
+ // they do not overlap.
+ diff >= size
+}
+
+/// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
+/// and destination must *not* overlap.
+///
+/// For regions of memory which might overlap, use [`copy`] instead.
+///
+/// `copy_nonoverlapping` is semantically equivalent to C's [`memcpy`], but
+/// with the argument order swapped.
+///
+/// The copy is "untyped" in the sense that data may be uninitialized or otherwise violate the
+/// requirements of `T`. The initialization state is preserved exactly.
+///
+/// [`memcpy`]: https://en.cppreference.com/w/c/string/byte/memcpy
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
+///
+/// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
+///
+/// * Both `src` and `dst` must be properly aligned.
+///
+/// * The region of memory beginning at `src` with a size of `count *
+/// size_of::<T>()` bytes must *not* overlap with the region of memory
+/// beginning at `dst` with the same size.
+///
+/// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
+/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
+/// in the region beginning at `*src` and the region beginning at `*dst` can
+/// [violate memory safety][read-ownership].
+///
+/// Note that even if the effectively copied size (`count * size_of::<T>()`) is
+/// `0`, the pointers must be non-null and properly aligned.
+///
+/// [`read`]: crate::ptr::read
+/// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
+/// [valid]: crate::ptr#safety
+///
+/// # Examples
+///
+/// Manually implement [`Vec::append`]:
+///
+/// ```
+/// use std::ptr;
+///
+/// /// Moves all the elements of `src` into `dst`, leaving `src` empty.
+/// fn append<T>(dst: &mut Vec<T>, src: &mut Vec<T>) {
+/// let src_len = src.len();
+/// let dst_len = dst.len();
+///
+/// // Ensure that `dst` has enough capacity to hold all of `src`.
+/// dst.reserve(src_len);
+///
+/// unsafe {
+/// // The call to offset is always safe because `Vec` will never
+/// // allocate more than `isize::MAX` bytes.
+/// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize);
+/// let src_ptr = src.as_ptr();
+///
+/// // Truncate `src` without dropping its contents. We do this first,
+/// // to avoid problems in case something further down panics.
+/// src.set_len(0);
+///
+/// // The two regions cannot overlap because mutable references do
+/// // not alias, and two different vectors cannot own the same
+/// // memory.
+/// ptr::copy_nonoverlapping(src_ptr, dst_ptr, src_len);
+///
+/// // Notify `dst` that it now holds the contents of `src`.
+/// dst.set_len(dst_len + src_len);
+/// }
+/// }
+///
+/// let mut a = vec!['r'];
+/// let mut b = vec!['u', 's', 't'];
+///
+/// append(&mut a, &mut b);
+///
+/// assert_eq!(a, &['r', 'u', 's', 't']);
+/// assert!(b.is_empty());
+/// ```
+///
+/// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append
+#[doc(alias = "memcpy")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+#[inline]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ // SAFETY: the safety contract for `copy_nonoverlapping` must be
+ // upheld by the caller.
+ unsafe {
+ assert_unsafe_precondition!(
+ is_aligned_and_not_null(src)
+ && is_aligned_and_not_null(dst)
+ && is_nonoverlapping(src, dst, count)
+ );
+ copy_nonoverlapping(src, dst, count)
+ }
+}
+
+/// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
+/// and destination may overlap.
+///
+/// If the source and destination will *never* overlap,
+/// [`copy_nonoverlapping`] can be used instead.
+///
+/// `copy` is semantically equivalent to C's [`memmove`], but with the argument
+/// order swapped. Copying takes place as if the bytes were copied from `src`
+/// to a temporary array and then copied from the array to `dst`.
+///
+/// The copy is "untyped" in the sense that data may be uninitialized or otherwise violate the
+/// requirements of `T`. The initialization state is preserved exactly.
+///
+/// [`memmove`]: https://en.cppreference.com/w/c/string/byte/memmove
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
+///
+/// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
+///
+/// * Both `src` and `dst` must be properly aligned.
+///
+/// Like [`read`], `copy` creates a bitwise copy of `T`, regardless of
+/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the values
+/// in the region beginning at `*src` and the region beginning at `*dst` can
+/// [violate memory safety][read-ownership].
+///
+/// Note that even if the effectively copied size (`count * size_of::<T>()`) is
+/// `0`, the pointers must be non-null and properly aligned.
+///
+/// [`read`]: crate::ptr::read
+/// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
+/// [valid]: crate::ptr#safety
+///
+/// # Examples
+///
+/// Efficiently create a Rust vector from an unsafe buffer:
+///
+/// ```
+/// use std::ptr;
+///
+/// /// # Safety
+/// ///
+/// /// * `ptr` must be correctly aligned for its type and non-zero.
+/// /// * `ptr` must be valid for reads of `elts` contiguous elements of type `T`.
+/// /// * Those elements must not be used after calling this function unless `T: Copy`.
+/// # #[allow(dead_code)]
+/// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
+/// let mut dst = Vec::with_capacity(elts);
+///
+/// // SAFETY: Our precondition ensures the source is aligned and valid,
+/// // and `Vec::with_capacity` ensures that we have usable space to write them.
+/// ptr::copy(ptr, dst.as_mut_ptr(), elts);
+///
+/// // SAFETY: We created it with this much capacity earlier,
+/// // and the previous `copy` has initialized these elements.
+/// dst.set_len(elts);
+/// dst
+/// }
+/// ```
+#[doc(alias = "memmove")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+#[inline]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ // SAFETY: the safety contract for `copy` must be upheld by the caller.
+ unsafe {
+ assert_unsafe_precondition!(is_aligned_and_not_null(src) && is_aligned_and_not_null(dst));
+ copy(src, dst, count)
+ }
+}
+
+/// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
+/// `val`.
+///
+/// `write_bytes` is similar to C's [`memset`], but sets `count *
+/// size_of::<T>()` bytes to `val`.
+///
+/// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
+///
+/// * `dst` must be properly aligned.
+///
+/// Note that even if the effectively copied size (`count * size_of::<T>()`) is
+/// `0`, the pointer must be non-null and properly aligned.
+///
+/// Additionally, note that changing `*dst` in this way can easily lead to undefined behavior (UB)
+/// later if the written bytes are not a valid representation of some `T`. For instance, the
+/// following is an **incorrect** use of this function:
+///
+/// ```rust,no_run
+/// unsafe {
+/// let mut value: u8 = 0;
+/// let ptr: *mut bool = &mut value as *mut u8 as *mut bool;
+/// let _bool = ptr.read(); // This is fine, `ptr` points to a valid `bool`.
+/// ptr.write_bytes(42u8, 1); // This function itself does not cause UB...
+/// let _bool = ptr.read(); // ...but it makes this operation UB! ⚠️
+/// }
+/// ```
+///
+/// [valid]: crate::ptr#safety
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut vec = vec![0u32; 4];
+/// unsafe {
+/// let vec_ptr = vec.as_mut_ptr();
+/// ptr::write_bytes(vec_ptr, 0xfe, 2);
+/// }
+/// assert_eq!(vec, [0xfefefefe, 0xfefefefe, 0, 0]);
+/// ```
+#[doc(alias = "memset")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+#[inline]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
+ extern "rust-intrinsic" {
+ #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ }
+
+ // SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
+ unsafe {
+ assert_unsafe_precondition!(is_aligned_and_not_null(dst));
+ write_bytes(dst, val, count)
+ }
+}
+
+/// Selects which function to call depending on the context.
+///
+/// If this function is evaluated at compile-time, then a call to this
+/// intrinsic will be replaced with a call to `called_in_const`. It gets
+/// replaced with a call to `called_at_rt` otherwise.
+///
+/// # Type Requirements
+///
+/// The two functions must be both function items. They cannot be function
+/// pointers or closures.
+///
+/// `arg` will be the arguments that will be passed to either one of the
+/// two functions, therefore, both functions must accept the same type of
+/// arguments. Both functions must return RET.
+///
+/// # Safety
+///
+/// The two functions must behave observably equivalent. Safe code in other
+/// crates may assume that calling a `const fn` at compile-time and at run-time
+/// produces the same result. A function that produces a different result when
+/// evaluated at run-time, or has any other observable side-effects, is
+/// *unsound*.
+///
+/// Here is an example of how this could cause a problem:
+/// ```no_run
+/// #![feature(const_eval_select)]
+/// #![feature(core_intrinsics)]
+/// use std::hint::unreachable_unchecked;
+/// use std::intrinsics::const_eval_select;
+///
+/// // Crate A
+/// pub const fn inconsistent() -> i32 {
+/// fn runtime() -> i32 { 1 }
+/// const fn compiletime() -> i32 { 2 }
+///
+/// unsafe {
+// // ⚠ This code violates the required equivalence of `compiletime`
+/// // and `runtime`.
+/// const_eval_select((), compiletime, runtime)
+/// }
+/// }
+///
+/// // Crate B
+/// const X: i32 = inconsistent();
+/// let x = inconsistent();
+/// if x != X { unsafe { unreachable_unchecked(); }}
+/// ```
+///
+/// This code causes Undefined Behavior when being run, since the
+/// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
+/// which violates the principle that a `const fn` must behave the same at
+/// compile-time and at run-time. The unsafe code in crate B is fine.
+#[unstable(
+ feature = "const_eval_select",
+ issue = "none",
+ reason = "const_eval_select will never be stable"
+)]
+#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
+#[lang = "const_eval_select"]
+#[rustc_do_not_const_check]
+#[inline]
+pub const unsafe fn const_eval_select<ARG, F, G, RET>(
+ arg: ARG,
+ _called_in_const: F,
+ called_at_rt: G,
+) -> RET
+where
+ F: ~const FnOnce<ARG, Output = RET>,
+ G: FnOnce<ARG, Output = RET> + ~const Destruct,
+{
+ called_at_rt.call_once(arg)
+}
+
+#[unstable(
+ feature = "const_eval_select",
+ issue = "none",
+ reason = "const_eval_select will never be stable"
+)]
+#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
+#[lang = "const_eval_select_ct"]
+pub const unsafe fn const_eval_select_ct<ARG, F, G, RET>(
+ arg: ARG,
+ called_in_const: F,
+ _called_at_rt: G,
+) -> RET
+where
+ F: ~const FnOnce<ARG, Output = RET>,
+ G: FnOnce<ARG, Output = RET> + ~const Destruct,
+{
+ called_in_const.call_once(arg)
+}
diff --git a/library/core/src/iter/adapters/by_ref_sized.rs b/library/core/src/iter/adapters/by_ref_sized.rs
new file mode 100644
index 000000000..cc1e8e8a2
--- /dev/null
+++ b/library/core/src/iter/adapters/by_ref_sized.rs
@@ -0,0 +1,86 @@
+use crate::ops::Try;
+
+/// Like `Iterator::by_ref`, but requiring `Sized` so it can forward generics.
+///
+/// Ideally this will no longer be required, eventually, but as can be seen in
+/// the benchmarks (as of Feb 2022 at least) `by_ref` can have performance cost.
+#[unstable(feature = "std_internals", issue = "none")]
+#[derive(Debug)]
+pub struct ByRefSized<'a, I>(pub &'a mut I);
+
+#[unstable(feature = "std_internals", issue = "none")]
+impl<I: Iterator> Iterator for ByRefSized<'_, I> {
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ self.0.advance_by(n)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.0.nth(n)
+ }
+
+ #[inline]
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.0.fold(init, f)
+ }
+
+ #[inline]
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.0.try_fold(init, f)
+ }
+}
+
+#[unstable(feature = "std_internals", issue = "none")]
+impl<I: DoubleEndedIterator> DoubleEndedIterator for ByRefSized<'_, I> {
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.0.next_back()
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.0.advance_back_by(n)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.0.nth_back(n)
+ }
+
+ #[inline]
+ fn rfold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.0.rfold(init, f)
+ }
+
+ #[inline]
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.0.try_rfold(init, f)
+ }
+}
diff --git a/library/core/src/iter/adapters/chain.rs b/library/core/src/iter/adapters/chain.rs
new file mode 100644
index 000000000..60eb3a6da
--- /dev/null
+++ b/library/core/src/iter/adapters/chain.rs
@@ -0,0 +1,292 @@
+use crate::iter::{DoubleEndedIterator, FusedIterator, Iterator, TrustedLen};
+use crate::ops::Try;
+
+/// An iterator that links two iterators together, in a chain.
+///
+/// This `struct` is created by [`Iterator::chain`]. See its documentation
+/// for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::iter::Chain;
+/// use std::slice::Iter;
+///
+/// let a1 = [1, 2, 3];
+/// let a2 = [4, 5, 6];
+/// let iter: Chain<Iter<_>, Iter<_>> = a1.iter().chain(a2.iter());
+/// ```
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Chain<A, B> {
+ // These are "fused" with `Option` so we don't need separate state to track which part is
+ // already exhausted, and we may also get niche layout for `None`. We don't use the real `Fuse`
+ // adapter because its specialization for `FusedIterator` unconditionally descends into the
+ // iterator, and that could be expensive to keep revisiting stuff like nested chains. It also
+ // hurts compiler performance to add more iterator layers to `Chain`.
+ //
+ // Only the "first" iterator is actually set `None` when exhausted, depending on whether you
+ // iterate forward or backward. If you mix directions, then both sides may be `None`.
+ a: Option<A>,
+ b: Option<B>,
+}
+impl<A, B> Chain<A, B> {
+ pub(in super::super) fn new(a: A, b: B) -> Chain<A, B> {
+ Chain { a: Some(a), b: Some(b) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> Iterator for Chain<A, B>
+where
+ A: Iterator,
+ B: Iterator<Item = A::Item>,
+{
+ type Item = A::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<A::Item> {
+ and_then_or_clear(&mut self.a, Iterator::next).or_else(|| self.b.as_mut()?.next())
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn count(self) -> usize {
+ let a_count = match self.a {
+ Some(a) => a.count(),
+ None => 0,
+ };
+ let b_count = match self.b {
+ Some(b) => b.count(),
+ None => 0,
+ };
+ a_count + b_count
+ }
+
+ fn try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ if let Some(ref mut a) = self.a {
+ acc = a.try_fold(acc, &mut f)?;
+ self.a = None;
+ }
+ if let Some(ref mut b) = self.b {
+ acc = b.try_fold(acc, f)?;
+ // we don't fuse the second iterator
+ }
+ try { acc }
+ }
+
+ fn fold<Acc, F>(self, mut acc: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if let Some(a) = self.a {
+ acc = a.fold(acc, &mut f);
+ }
+ if let Some(b) = self.b {
+ acc = b.fold(acc, f);
+ }
+ acc
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let mut rem = n;
+
+ if let Some(ref mut a) = self.a {
+ match a.advance_by(rem) {
+ Ok(()) => return Ok(()),
+ Err(k) => rem -= k,
+ }
+ self.a = None;
+ }
+
+ if let Some(ref mut b) = self.b {
+ match b.advance_by(rem) {
+ Ok(()) => return Ok(()),
+ Err(k) => rem -= k,
+ }
+ // we don't fuse the second iterator
+ }
+
+ if rem == 0 { Ok(()) } else { Err(n - rem) }
+ }
+
+ #[inline]
+ fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
+ if let Some(ref mut a) = self.a {
+ match a.advance_by(n) {
+ Ok(()) => match a.next() {
+ None => n = 0,
+ x => return x,
+ },
+ Err(k) => n -= k,
+ }
+
+ self.a = None;
+ }
+
+ self.b.as_mut()?.nth(n)
+ }
+
+ #[inline]
+ fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ and_then_or_clear(&mut self.a, |a| a.find(&mut predicate))
+ .or_else(|| self.b.as_mut()?.find(predicate))
+ }
+
+ #[inline]
+ fn last(self) -> Option<A::Item> {
+ // Must exhaust a before b.
+ let a_last = self.a.and_then(Iterator::last);
+ let b_last = self.b.and_then(Iterator::last);
+ b_last.or(a_last)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self {
+ Chain { a: Some(a), b: Some(b) } => {
+ let (a_lower, a_upper) = a.size_hint();
+ let (b_lower, b_upper) = b.size_hint();
+
+ let lower = a_lower.saturating_add(b_lower);
+
+ let upper = match (a_upper, b_upper) {
+ (Some(x), Some(y)) => x.checked_add(y),
+ _ => None,
+ };
+
+ (lower, upper)
+ }
+ Chain { a: Some(a), b: None } => a.size_hint(),
+ Chain { a: None, b: Some(b) } => b.size_hint(),
+ Chain { a: None, b: None } => (0, Some(0)),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> DoubleEndedIterator for Chain<A, B>
+where
+ A: DoubleEndedIterator,
+ B: DoubleEndedIterator<Item = A::Item>,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<A::Item> {
+ and_then_or_clear(&mut self.b, |b| b.next_back()).or_else(|| self.a.as_mut()?.next_back())
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let mut rem = n;
+
+ if let Some(ref mut b) = self.b {
+ match b.advance_back_by(rem) {
+ Ok(()) => return Ok(()),
+ Err(k) => rem -= k,
+ }
+ self.b = None;
+ }
+
+ if let Some(ref mut a) = self.a {
+ match a.advance_back_by(rem) {
+ Ok(()) => return Ok(()),
+ Err(k) => rem -= k,
+ }
+ // we don't fuse the second iterator
+ }
+
+ if rem == 0 { Ok(()) } else { Err(n - rem) }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, mut n: usize) -> Option<Self::Item> {
+ if let Some(ref mut b) = self.b {
+ match b.advance_back_by(n) {
+ Ok(()) => match b.next_back() {
+ None => n = 0,
+ x => return x,
+ },
+ Err(k) => n -= k,
+ }
+
+ self.b = None;
+ }
+
+ self.a.as_mut()?.nth_back(n)
+ }
+
+ #[inline]
+ fn rfind<P>(&mut self, mut predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ and_then_or_clear(&mut self.b, |b| b.rfind(&mut predicate))
+ .or_else(|| self.a.as_mut()?.rfind(predicate))
+ }
+
+ fn try_rfold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ if let Some(ref mut b) = self.b {
+ acc = b.try_rfold(acc, &mut f)?;
+ self.b = None;
+ }
+ if let Some(ref mut a) = self.a {
+ acc = a.try_rfold(acc, f)?;
+ // we don't fuse the second iterator
+ }
+ try { acc }
+ }
+
+ fn rfold<Acc, F>(self, mut acc: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if let Some(b) = self.b {
+ acc = b.rfold(acc, &mut f);
+ }
+ if let Some(a) = self.a {
+ acc = a.rfold(acc, f);
+ }
+ acc
+ }
+}
+
+// Note: *both* must be fused to handle double-ended iterators.
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A, B> FusedIterator for Chain<A, B>
+where
+ A: FusedIterator,
+ B: FusedIterator<Item = A::Item>,
+{
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A, B> TrustedLen for Chain<A, B>
+where
+ A: TrustedLen,
+ B: TrustedLen<Item = A::Item>,
+{
+}
+
+#[inline]
+fn and_then_or_clear<T, U>(opt: &mut Option<T>, f: impl FnOnce(&mut T) -> Option<U>) -> Option<U> {
+ let x = f(opt.as_mut()?);
+ if x.is_none() {
+ *opt = None;
+ }
+ x
+}
diff --git a/library/core/src/iter/adapters/cloned.rs b/library/core/src/iter/adapters/cloned.rs
new file mode 100644
index 000000000..aba24a79d
--- /dev/null
+++ b/library/core/src/iter/adapters/cloned.rs
@@ -0,0 +1,142 @@
+use crate::iter::adapters::{
+ zip::try_get_unchecked, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
+};
+use crate::iter::{FusedIterator, TrustedLen};
+use crate::ops::Try;
+
+/// An iterator that clones the elements of an underlying iterator.
+///
+/// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`cloned`]: Iterator::cloned
+/// [`Iterator`]: trait.Iterator.html
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct Cloned<I> {
+ it: I,
+}
+
+impl<I> Cloned<I> {
+ pub(in crate::iter) fn new(it: I) -> Cloned<I> {
+ Cloned { it }
+ }
+}
+
+fn clone_try_fold<T: Clone, Acc, R>(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R {
+ move |acc, elt| f(acc, elt.clone())
+}
+
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+impl<'a, I, T: 'a> Iterator for Cloned<I>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.it.next().cloned()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.it.try_fold(init, clone_try_fold(f))
+ }
+
+ fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.it.map(T::clone).fold(init, f)
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { try_get_unchecked(&mut self.it, idx).clone() }
+ }
+}
+
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+impl<'a, I, T: 'a> DoubleEndedIterator for Cloned<I>
+where
+ I: DoubleEndedIterator<Item = &'a T>,
+ T: Clone,
+{
+ fn next_back(&mut self) -> Option<T> {
+ self.it.next_back().cloned()
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.it.try_rfold(init, clone_try_fold(f))
+ }
+
+ fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.it.map(T::clone).rfold(init, f)
+ }
+}
+
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+impl<'a, I, T: 'a> ExactSizeIterator for Cloned<I>
+where
+ I: ExactSizeIterator<Item = &'a T>,
+ T: Clone,
+{
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.it.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<'a, I, T: 'a> FusedIterator for Cloned<I>
+where
+ I: FusedIterator<Item = &'a T>,
+ T: Clone,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccess for Cloned<I> where I: TrustedRandomAccess {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccessNoCoerce for Cloned<I>
+where
+ I: TrustedRandomAccessNoCoerce,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = true;
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<'a, I, T: 'a> TrustedLen for Cloned<I>
+where
+ I: TrustedLen<Item = &'a T>,
+ T: Clone,
+{
+}
diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs
new file mode 100644
index 000000000..f9bfd77d7
--- /dev/null
+++ b/library/core/src/iter/adapters/copied.rs
@@ -0,0 +1,168 @@
+use crate::iter::adapters::{
+ zip::try_get_unchecked, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
+};
+use crate::iter::{FusedIterator, TrustedLen};
+use crate::ops::Try;
+
+/// An iterator that copies the elements of an underlying iterator.
+///
+/// This `struct` is created by the [`copied`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`copied`]: Iterator::copied
+/// [`Iterator`]: trait.Iterator.html
+#[stable(feature = "iter_copied", since = "1.36.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct Copied<I> {
+ it: I,
+}
+
+impl<I> Copied<I> {
+ pub(in crate::iter) fn new(it: I) -> Copied<I> {
+ Copied { it }
+ }
+}
+
+fn copy_fold<T: Copy, Acc>(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, &T) -> Acc {
+ move |acc, &elt| f(acc, elt)
+}
+
+fn copy_try_fold<T: Copy, Acc, R>(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R {
+ move |acc, &elt| f(acc, elt)
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+impl<'a, I, T: 'a> Iterator for Copied<I>
+where
+ I: Iterator<Item = &'a T>,
+ T: Copy,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.it.next().copied()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.it.try_fold(init, copy_try_fold(f))
+ }
+
+ fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.it.fold(init, copy_fold(f))
+ }
+
+ fn nth(&mut self, n: usize) -> Option<T> {
+ self.it.nth(n).copied()
+ }
+
+ fn last(self) -> Option<T> {
+ self.it.last().copied()
+ }
+
+ fn count(self) -> usize {
+ self.it.count()
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ self.it.advance_by(n)
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ *unsafe { try_get_unchecked(&mut self.it, idx) }
+ }
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+impl<'a, I, T: 'a> DoubleEndedIterator for Copied<I>
+where
+ I: DoubleEndedIterator<Item = &'a T>,
+ T: Copy,
+{
+ fn next_back(&mut self) -> Option<T> {
+ self.it.next_back().copied()
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.it.try_rfold(init, copy_try_fold(f))
+ }
+
+ fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.it.rfold(init, copy_fold(f))
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.it.advance_back_by(n)
+ }
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+impl<'a, I, T: 'a> ExactSizeIterator for Copied<I>
+where
+ I: ExactSizeIterator<Item = &'a T>,
+ T: Copy,
+{
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.it.is_empty()
+ }
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+impl<'a, I, T: 'a> FusedIterator for Copied<I>
+where
+ I: FusedIterator<Item = &'a T>,
+ T: Copy,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccess for Copied<I> where I: TrustedRandomAccess {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccessNoCoerce for Copied<I>
+where
+ I: TrustedRandomAccessNoCoerce,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = I::MAY_HAVE_SIDE_EFFECT;
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+unsafe impl<'a, I, T: 'a> TrustedLen for Copied<I>
+where
+ I: TrustedLen<Item = &'a T>,
+ T: Copy,
+{
+}
diff --git a/library/core/src/iter/adapters/cycle.rs b/library/core/src/iter/adapters/cycle.rs
new file mode 100644
index 000000000..02b593907
--- /dev/null
+++ b/library/core/src/iter/adapters/cycle.rs
@@ -0,0 +1,108 @@
+use crate::{iter::FusedIterator, ops::Try};
+
+/// An iterator that repeats endlessly.
+///
+/// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`cycle`]: Iterator::cycle
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Cycle<I> {
+ orig: I,
+ iter: I,
+}
+
+impl<I: Clone> Cycle<I> {
+ pub(in crate::iter) fn new(iter: I) -> Cycle<I> {
+ Cycle { orig: iter.clone(), iter }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Cycle<I>
+where
+ I: Clone + Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ match self.iter.next() {
+ None => {
+ self.iter = self.orig.clone();
+ self.iter.next()
+ }
+ y => y,
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // the cycle iterator is either empty or infinite
+ match self.orig.size_hint() {
+ sz @ (0, Some(0)) => sz,
+ (0, _) => (0, None),
+ _ => (usize::MAX, None),
+ }
+ }
+
+ #[inline]
+ fn try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ // fully iterate the current iterator. this is necessary because
+ // `self.iter` may be empty even when `self.orig` isn't
+ acc = self.iter.try_fold(acc, &mut f)?;
+ self.iter = self.orig.clone();
+
+ // complete a full cycle, keeping track of whether the cycled
+ // iterator is empty or not. we need to return early in case
+ // of an empty iterator to prevent an infinite loop
+ let mut is_empty = true;
+ acc = self.iter.try_fold(acc, |acc, x| {
+ is_empty = false;
+ f(acc, x)
+ })?;
+
+ if is_empty {
+ return try { acc };
+ }
+
+ loop {
+ self.iter = self.orig.clone();
+ acc = self.iter.try_fold(acc, &mut f)?;
+ }
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let mut rem = n;
+ match self.iter.advance_by(rem) {
+ ret @ Ok(_) => return ret,
+ Err(advanced) => rem -= advanced,
+ }
+
+ while rem > 0 {
+ self.iter = self.orig.clone();
+ match self.iter.advance_by(rem) {
+ ret @ Ok(_) => return ret,
+ Err(0) => return Err(n - rem),
+ Err(advanced) => rem -= advanced,
+ }
+ }
+
+ Ok(())
+ }
+
+ // No `fold` override, because `fold` doesn't make much sense for `Cycle`,
+ // and we can't do anything better than the default.
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Cycle<I> where I: Clone + Iterator {}
diff --git a/library/core/src/iter/adapters/enumerate.rs b/library/core/src/iter/adapters/enumerate.rs
new file mode 100644
index 000000000..14a126951
--- /dev/null
+++ b/library/core/src/iter/adapters/enumerate.rs
@@ -0,0 +1,266 @@
+use crate::iter::adapters::{
+ zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
+};
+use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen};
+use crate::ops::Try;
+
+/// An iterator that yields the current count and the element during iteration.
+///
+/// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`enumerate`]: Iterator::enumerate
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Enumerate<I> {
+ iter: I,
+ count: usize,
+}
+impl<I> Enumerate<I> {
+ pub(in crate::iter) fn new(iter: I) -> Enumerate<I> {
+ Enumerate { iter, count: 0 }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Enumerate<I>
+where
+ I: Iterator,
+{
+ type Item = (usize, <I as Iterator>::Item);
+
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so enumerating more than
+ /// `usize::MAX` elements either produces the wrong result or panics. If
+ /// debug assertions are enabled, a panic is guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// Might panic if the index of the element overflows a `usize`.
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
+ let a = self.iter.next()?;
+ let i = self.count;
+ self.count += 1;
+ Some((i, a))
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> {
+ let a = self.iter.nth(n)?;
+ let i = self.count + n;
+ self.count = i + 1;
+ Some((i, a))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ #[inline]
+ fn enumerate<'a, T, Acc, R>(
+ count: &'a mut usize,
+ mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a,
+ ) -> impl FnMut(Acc, T) -> R + 'a {
+ #[rustc_inherit_overflow_checks]
+ move |acc, item| {
+ let acc = fold(acc, (*count, item));
+ *count += 1;
+ acc
+ }
+ }
+
+ self.iter.try_fold(init, enumerate(&mut self.count, fold))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn enumerate<T, Acc>(
+ mut count: usize,
+ mut fold: impl FnMut(Acc, (usize, T)) -> Acc,
+ ) -> impl FnMut(Acc, T) -> Acc {
+ #[rustc_inherit_overflow_checks]
+ move |acc, item| {
+ let acc = fold(acc, (count, item));
+ count += 1;
+ acc
+ }
+ }
+
+ self.iter.fold(init, enumerate(self.count, fold))
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ match self.iter.advance_by(n) {
+ ret @ Ok(_) => {
+ self.count += n;
+ ret
+ }
+ ret @ Err(advanced) => {
+ self.count += advanced;
+ ret
+ }
+ }
+ }
+
+ #[rustc_inherit_overflow_checks]
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> <Self as Iterator>::Item
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ let value = unsafe { try_get_unchecked(&mut self.iter, idx) };
+ (self.count + idx, value)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> DoubleEndedIterator for Enumerate<I>
+where
+ I: ExactSizeIterator + DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
+ let a = self.iter.next_back()?;
+ let len = self.iter.len();
+ // Can safely add, `ExactSizeIterator` promises that the number of
+ // elements fits into a `usize`.
+ Some((self.count + len, a))
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<(usize, <I as Iterator>::Item)> {
+ let a = self.iter.nth_back(n)?;
+ let len = self.iter.len();
+ // Can safely add, `ExactSizeIterator` promises that the number of
+ // elements fits into a `usize`.
+ Some((self.count + len, a))
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ // Can safely add and subtract the count, as `ExactSizeIterator` promises
+ // that the number of elements fits into a `usize`.
+ fn enumerate<T, Acc, R>(
+ mut count: usize,
+ mut fold: impl FnMut(Acc, (usize, T)) -> R,
+ ) -> impl FnMut(Acc, T) -> R {
+ move |acc, item| {
+ count -= 1;
+ fold(acc, (count, item))
+ }
+ }
+
+ let count = self.count + self.iter.len();
+ self.iter.try_rfold(init, enumerate(count, fold))
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ // Can safely add and subtract the count, as `ExactSizeIterator` promises
+ // that the number of elements fits into a `usize`.
+ fn enumerate<T, Acc>(
+ mut count: usize,
+ mut fold: impl FnMut(Acc, (usize, T)) -> Acc,
+ ) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| {
+ count -= 1;
+ fold(acc, (count, item))
+ }
+ }
+
+ let count = self.count + self.iter.len();
+ self.iter.rfold(init, enumerate(count, fold))
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ // we do not need to update the count since that only tallies the number of items
+ // consumed from the front. consuming items from the back can never reduce that.
+ self.iter.advance_back_by(n)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Enumerate<I>
+where
+ I: ExactSizeIterator,
+{
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccess for Enumerate<I> where I: TrustedRandomAccess {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccessNoCoerce for Enumerate<I>
+where
+ I: TrustedRandomAccessNoCoerce,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = I::MAY_HAVE_SIDE_EFFECT;
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Enumerate<I> where I: FusedIterator {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I> TrustedLen for Enumerate<I> where I: TrustedLen {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> SourceIter for Enumerate<I>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Enumerate<I> {}
diff --git a/library/core/src/iter/adapters/filter.rs b/library/core/src/iter/adapters/filter.rs
new file mode 100644
index 000000000..a0afaa326
--- /dev/null
+++ b/library/core/src/iter/adapters/filter.rs
@@ -0,0 +1,152 @@
+use crate::fmt;
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::ops::Try;
+
+/// An iterator that filters the elements of `iter` with `predicate`.
+///
+/// This `struct` is created by the [`filter`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`filter`]: Iterator::filter
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Filter<I, P> {
+ // Used for `SplitWhitespace` and `SplitAsciiWhitespace` `as_str` methods
+ pub(crate) iter: I,
+ predicate: P,
+}
+impl<I, P> Filter<I, P> {
+ pub(in crate::iter) fn new(iter: I, predicate: P) -> Filter<I, P> {
+ Filter { iter, predicate }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for Filter<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Filter").field("iter", &self.iter).finish()
+ }
+}
+
+fn filter_fold<T, Acc>(
+ mut predicate: impl FnMut(&T) -> bool,
+ mut fold: impl FnMut(Acc, T) -> Acc,
+) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| if predicate(&item) { fold(acc, item) } else { acc }
+}
+
+fn filter_try_fold<'a, T, Acc, R: Try<Output = Acc>>(
+ predicate: &'a mut impl FnMut(&T) -> bool,
+ mut fold: impl FnMut(Acc, T) -> R + 'a,
+) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, item| if predicate(&item) { fold(acc, item) } else { try { acc } }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, P> Iterator for Filter<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ self.iter.find(&mut self.predicate)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+
+ // this special case allows the compiler to make `.filter(_).count()`
+ // branchless. Barring perfect branch prediction (which is unattainable in
+ // the general case), this will be much faster in >90% of cases (containing
+ // virtually all real workloads) and only a tiny bit slower in the rest.
+ //
+ // Having this specialization thus allows us to write `.filter(p).count()`
+ // where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is
+ // less readable and also less backwards-compatible to Rust before 1.10.
+ //
+ // Using the branchless version will also simplify the LLVM byte code, thus
+ // leaving more budget for LLVM optimizations.
+ #[inline]
+ fn count(self) -> usize {
+ #[inline]
+ fn to_usize<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut(T) -> usize {
+ move |x| predicate(&x) as usize
+ }
+
+ self.iter.map(to_usize(self.predicate)).sum()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.iter.try_fold(init, filter_try_fold(&mut self.predicate, fold))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, filter_fold(self.predicate, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator, P> DoubleEndedIterator for Filter<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<I::Item> {
+ self.iter.rfind(&mut self.predicate)
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.iter.try_rfold(init, filter_try_fold(&mut self.predicate, fold))
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, filter_fold(self.predicate, fold))
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator, P> FusedIterator for Filter<I, P> where P: FnMut(&I::Item) -> bool {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<P, I> SourceIter for Filter<I, P>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable, P> InPlaceIterable for Filter<I, P> where P: FnMut(&I::Item) -> bool {}
diff --git a/library/core/src/iter/adapters/filter_map.rs b/library/core/src/iter/adapters/filter_map.rs
new file mode 100644
index 000000000..e0d665c9e
--- /dev/null
+++ b/library/core/src/iter/adapters/filter_map.rs
@@ -0,0 +1,149 @@
+use crate::fmt;
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator that uses `f` to both filter and map elements from `iter`.
+///
+/// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`filter_map`]: Iterator::filter_map
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct FilterMap<I, F> {
+ iter: I,
+ f: F,
+}
+impl<I, F> FilterMap<I, F> {
+ pub(in crate::iter) fn new(iter: I, f: F) -> FilterMap<I, F> {
+ FilterMap { iter, f }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for FilterMap<I, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FilterMap").field("iter", &self.iter).finish()
+ }
+}
+
+fn filter_map_fold<T, B, Acc>(
+ mut f: impl FnMut(T) -> Option<B>,
+ mut fold: impl FnMut(Acc, B) -> Acc,
+) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| match f(item) {
+ Some(x) => fold(acc, x),
+ None => acc,
+ }
+}
+
+fn filter_map_try_fold<'a, T, B, Acc, R: Try<Output = Acc>>(
+ f: &'a mut impl FnMut(T) -> Option<B>,
+ mut fold: impl FnMut(Acc, B) -> R + 'a,
+) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, item| match f(item) {
+ Some(x) => fold(acc, x),
+ None => try { acc },
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: Iterator, F> Iterator for FilterMap<I, F>
+where
+ F: FnMut(I::Item) -> Option<B>,
+{
+ type Item = B;
+
+ #[inline]
+ fn next(&mut self) -> Option<B> {
+ self.iter.find_map(&mut self.f)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.iter.try_fold(init, filter_map_try_fold(&mut self.f, fold))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, filter_map_fold(self.f, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for FilterMap<I, F>
+where
+ F: FnMut(I::Item) -> Option<B>,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<B> {
+ #[inline]
+ fn find<T, B>(
+ f: &mut impl FnMut(T) -> Option<B>,
+ ) -> impl FnMut((), T) -> ControlFlow<B> + '_ {
+ move |(), x| match f(x) {
+ Some(x) => ControlFlow::Break(x),
+ None => ControlFlow::CONTINUE,
+ }
+ }
+
+ self.iter.try_rfold((), find(&mut self.f)).break_value()
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.iter.try_rfold(init, filter_map_try_fold(&mut self.f, fold))
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, filter_map_fold(self.f, fold))
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<B, I: FusedIterator, F> FusedIterator for FilterMap<I, F> where F: FnMut(I::Item) -> Option<B> {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, F> SourceIter for FilterMap<I, F>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<B, I: InPlaceIterable, F> InPlaceIterable for FilterMap<I, F> where
+ F: FnMut(I::Item) -> Option<B>
+{
+}
diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs
new file mode 100644
index 000000000..15a120e35
--- /dev/null
+++ b/library/core/src/iter/adapters/flatten.rs
@@ -0,0 +1,599 @@
+use crate::fmt;
+use crate::iter::{DoubleEndedIterator, Fuse, FusedIterator, Iterator, Map, TrustedLen};
+use crate::ops::Try;
+
+/// An iterator that maps each element to an iterator, and yields the elements
+/// of the produced iterators.
+///
+/// This `struct` is created by [`Iterator::flat_map`]. See its documentation
+/// for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct FlatMap<I, U: IntoIterator, F> {
+ inner: FlattenCompat<Map<I, F>, <U as IntoIterator>::IntoIter>,
+}
+
+impl<I: Iterator, U: IntoIterator, F: FnMut(I::Item) -> U> FlatMap<I, U, F> {
+ pub(in crate::iter) fn new(iter: I, f: F) -> FlatMap<I, U, F> {
+ FlatMap { inner: FlattenCompat::new(iter.map(f)) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Clone, U, F: Clone> Clone for FlatMap<I, U, F>
+where
+ U: Clone + IntoIterator<IntoIter: Clone>,
+{
+ fn clone(&self) -> Self {
+ FlatMap { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, U, F> fmt::Debug for FlatMap<I, U, F>
+where
+ U: IntoIterator<IntoIter: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FlatMap").field("inner", &self.inner).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, U: IntoIterator, F> Iterator for FlatMap<I, U, F>
+where
+ F: FnMut(I::Item) -> U,
+{
+ type Item = U::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<U::Item> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.inner.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.fold(init, fold)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator, U, F> DoubleEndedIterator for FlatMap<I, U, F>
+where
+ F: FnMut(I::Item) -> U,
+ U: IntoIterator<IntoIter: DoubleEndedIterator>,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<U::Item> {
+ self.inner.next_back()
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.inner.try_rfold(init, fold)
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.rfold(init, fold)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I, U, F> FusedIterator for FlatMap<I, U, F>
+where
+ I: FusedIterator,
+ U: IntoIterator,
+ F: FnMut(I::Item) -> U,
+{
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, I, F, const N: usize> TrustedLen for FlatMap<I, [T; N], F>
+where
+ I: TrustedLen,
+ F: FnMut(I::Item) -> [T; N],
+{
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<'a, T, I, F, const N: usize> TrustedLen for FlatMap<I, &'a [T; N], F>
+where
+ I: TrustedLen,
+ F: FnMut(I::Item) -> &'a [T; N],
+{
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<'a, T, I, F, const N: usize> TrustedLen for FlatMap<I, &'a mut [T; N], F>
+where
+ I: TrustedLen,
+ F: FnMut(I::Item) -> &'a mut [T; N],
+{
+}
+
+/// An iterator that flattens one level of nesting in an iterator of things
+/// that can be turned into iterators.
+///
+/// This `struct` is created by the [`flatten`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`flatten`]: Iterator::flatten()
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+pub struct Flatten<I: Iterator<Item: IntoIterator>> {
+ inner: FlattenCompat<I, <I::Item as IntoIterator>::IntoIter>,
+}
+
+impl<I: Iterator<Item: IntoIterator>> Flatten<I> {
+ pub(in super::super) fn new(iter: I) -> Flatten<I> {
+ Flatten { inner: FlattenCompat::new(iter) }
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> fmt::Debug for Flatten<I>
+where
+ I: fmt::Debug + Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: fmt::Debug + Iterator,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Flatten").field("inner", &self.inner).finish()
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> Clone for Flatten<I>
+where
+ I: Clone + Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: Clone + Iterator,
+{
+ fn clone(&self) -> Self {
+ Flatten { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> Iterator for Flatten<I>
+where
+ I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: Iterator,
+{
+ type Item = U::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<U::Item> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.inner.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.fold(init, fold)
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> DoubleEndedIterator for Flatten<I>
+where
+ I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<U::Item> {
+ self.inner.next_back()
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.inner.try_rfold(init, fold)
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.rfold(init, fold)
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> FusedIterator for Flatten<I>
+where
+ I: FusedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: Iterator,
+{
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I> TrustedLen for Flatten<I>
+where
+ I: TrustedLen,
+ <I as Iterator>::Item: TrustedConstSize,
+{
+}
+
+/// Real logic of both `Flatten` and `FlatMap` which simply delegate to
+/// this type.
+#[derive(Clone, Debug)]
+struct FlattenCompat<I, U> {
+ iter: Fuse<I>,
+ frontiter: Option<U>,
+ backiter: Option<U>,
+}
+impl<I, U> FlattenCompat<I, U>
+where
+ I: Iterator,
+{
+ /// Adapts an iterator by flattening it, for use in `flatten()` and `flat_map()`.
+ fn new(iter: I) -> FlattenCompat<I, U> {
+ FlattenCompat { iter: iter.fuse(), frontiter: None, backiter: None }
+ }
+}
+
+impl<I, U> Iterator for FlattenCompat<I, U>
+where
+ I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: Iterator,
+{
+ type Item = U::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<U::Item> {
+ loop {
+ if let elt @ Some(_) = and_then_or_clear(&mut self.frontiter, Iterator::next) {
+ return elt;
+ }
+ match self.iter.next() {
+ None => return and_then_or_clear(&mut self.backiter, Iterator::next),
+ Some(inner) => self.frontiter = Some(inner.into_iter()),
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), U::size_hint);
+ let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), U::size_hint);
+ let lo = flo.saturating_add(blo);
+
+ if let Some(fixed_size) = <<I as Iterator>::Item as ConstSizeIntoIterator>::size() {
+ let (lower, upper) = self.iter.size_hint();
+
+ let lower = lower.saturating_mul(fixed_size).saturating_add(lo);
+ let upper =
+ try { fhi?.checked_add(bhi?)?.checked_add(fixed_size.checked_mul(upper?)?)? };
+
+ return (lower, upper);
+ }
+
+ match (self.iter.size_hint(), fhi, bhi) {
+ ((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)),
+ _ => (lo, None),
+ }
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ #[inline]
+ fn flatten<'a, T: IntoIterator, Acc, R: Try<Output = Acc>>(
+ frontiter: &'a mut Option<T::IntoIter>,
+ fold: &'a mut impl FnMut(Acc, T::Item) -> R,
+ ) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, x| {
+ let mut mid = x.into_iter();
+ let r = mid.try_fold(acc, &mut *fold);
+ *frontiter = Some(mid);
+ r
+ }
+ }
+
+ if let Some(ref mut front) = self.frontiter {
+ init = front.try_fold(init, &mut fold)?;
+ }
+ self.frontiter = None;
+
+ init = self.iter.try_fold(init, flatten(&mut self.frontiter, &mut fold))?;
+ self.frontiter = None;
+
+ if let Some(ref mut back) = self.backiter {
+ init = back.try_fold(init, &mut fold)?;
+ }
+ self.backiter = None;
+
+ try { init }
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, mut init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn flatten<T: IntoIterator, Acc>(
+ fold: &mut impl FnMut(Acc, T::Item) -> Acc,
+ ) -> impl FnMut(Acc, T) -> Acc + '_ {
+ move |acc, x| x.into_iter().fold(acc, &mut *fold)
+ }
+
+ if let Some(front) = self.frontiter {
+ init = front.fold(init, &mut fold);
+ }
+
+ init = self.iter.fold(init, flatten(&mut fold));
+
+ if let Some(back) = self.backiter {
+ init = back.fold(init, &mut fold);
+ }
+
+ init
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let mut rem = n;
+ loop {
+ if let Some(ref mut front) = self.frontiter {
+ match front.advance_by(rem) {
+ ret @ Ok(_) => return ret,
+ Err(advanced) => rem -= advanced,
+ }
+ }
+ self.frontiter = match self.iter.next() {
+ Some(iterable) => Some(iterable.into_iter()),
+ _ => break,
+ }
+ }
+
+ self.frontiter = None;
+
+ if let Some(ref mut back) = self.backiter {
+ match back.advance_by(rem) {
+ ret @ Ok(_) => return ret,
+ Err(advanced) => rem -= advanced,
+ }
+ }
+
+ if rem > 0 {
+ return Err(n - rem);
+ }
+
+ self.backiter = None;
+
+ Ok(())
+ }
+}
+
+impl<I, U> DoubleEndedIterator for FlattenCompat<I, U>
+where
+ I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<U::Item> {
+ loop {
+ if let elt @ Some(_) = and_then_or_clear(&mut self.backiter, |b| b.next_back()) {
+ return elt;
+ }
+ match self.iter.next_back() {
+ None => return and_then_or_clear(&mut self.frontiter, |f| f.next_back()),
+ Some(inner) => self.backiter = Some(inner.into_iter()),
+ }
+ }
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ #[inline]
+ fn flatten<'a, T: IntoIterator, Acc, R: Try<Output = Acc>>(
+ backiter: &'a mut Option<T::IntoIter>,
+ fold: &'a mut impl FnMut(Acc, T::Item) -> R,
+ ) -> impl FnMut(Acc, T) -> R + 'a
+ where
+ T::IntoIter: DoubleEndedIterator,
+ {
+ move |acc, x| {
+ let mut mid = x.into_iter();
+ let r = mid.try_rfold(acc, &mut *fold);
+ *backiter = Some(mid);
+ r
+ }
+ }
+
+ if let Some(ref mut back) = self.backiter {
+ init = back.try_rfold(init, &mut fold)?;
+ }
+ self.backiter = None;
+
+ init = self.iter.try_rfold(init, flatten(&mut self.backiter, &mut fold))?;
+ self.backiter = None;
+
+ if let Some(ref mut front) = self.frontiter {
+ init = front.try_rfold(init, &mut fold)?;
+ }
+ self.frontiter = None;
+
+ try { init }
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, mut init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn flatten<T: IntoIterator, Acc>(
+ fold: &mut impl FnMut(Acc, T::Item) -> Acc,
+ ) -> impl FnMut(Acc, T) -> Acc + '_
+ where
+ T::IntoIter: DoubleEndedIterator,
+ {
+ move |acc, x| x.into_iter().rfold(acc, &mut *fold)
+ }
+
+ if let Some(back) = self.backiter {
+ init = back.rfold(init, &mut fold);
+ }
+
+ init = self.iter.rfold(init, flatten(&mut fold));
+
+ if let Some(front) = self.frontiter {
+ init = front.rfold(init, &mut fold);
+ }
+
+ init
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let mut rem = n;
+ loop {
+ if let Some(ref mut back) = self.backiter {
+ match back.advance_back_by(rem) {
+ ret @ Ok(_) => return ret,
+ Err(advanced) => rem -= advanced,
+ }
+ }
+ match self.iter.next_back() {
+ Some(iterable) => self.backiter = Some(iterable.into_iter()),
+ _ => break,
+ }
+ }
+
+ self.backiter = None;
+
+ if let Some(ref mut front) = self.frontiter {
+ match front.advance_back_by(rem) {
+ ret @ Ok(_) => return ret,
+ Err(advanced) => rem -= advanced,
+ }
+ }
+
+ if rem > 0 {
+ return Err(n - rem);
+ }
+
+ self.frontiter = None;
+
+ Ok(())
+ }
+}
+
+trait ConstSizeIntoIterator: IntoIterator {
+ // FIXME(#31844): convert to an associated const once specialization supports that
+ fn size() -> Option<usize>;
+}
+
+impl<T> ConstSizeIntoIterator for T
+where
+ T: IntoIterator,
+{
+ #[inline]
+ default fn size() -> Option<usize> {
+ None
+ }
+}
+
+impl<T, const N: usize> ConstSizeIntoIterator for [T; N] {
+ #[inline]
+ fn size() -> Option<usize> {
+ Some(N)
+ }
+}
+
+impl<T, const N: usize> ConstSizeIntoIterator for &[T; N] {
+ #[inline]
+ fn size() -> Option<usize> {
+ Some(N)
+ }
+}
+
+impl<T, const N: usize> ConstSizeIntoIterator for &mut [T; N] {
+ #[inline]
+ fn size() -> Option<usize> {
+ Some(N)
+ }
+}
+
+#[doc(hidden)]
+#[unstable(feature = "std_internals", issue = "none")]
+// FIXME(#20400): Instead of this helper trait there should be multiple impl TrustedLen for Flatten<>
+// blocks with different bounds on Iterator::Item but the compiler erroneously considers them overlapping
+pub unsafe trait TrustedConstSize: IntoIterator {}
+
+#[unstable(feature = "std_internals", issue = "none")]
+unsafe impl<T, const N: usize> TrustedConstSize for [T; N] {}
+#[unstable(feature = "std_internals", issue = "none")]
+unsafe impl<T, const N: usize> TrustedConstSize for &'_ [T; N] {}
+#[unstable(feature = "std_internals", issue = "none")]
+unsafe impl<T, const N: usize> TrustedConstSize for &'_ mut [T; N] {}
+
+#[inline]
+fn and_then_or_clear<T, U>(opt: &mut Option<T>, f: impl FnOnce(&mut T) -> Option<U>) -> Option<U> {
+ let x = f(opt.as_mut()?);
+ if x.is_none() {
+ *opt = None;
+ }
+ x
+}
diff --git a/library/core/src/iter/adapters/fuse.rs b/library/core/src/iter/adapters/fuse.rs
new file mode 100644
index 000000000..c93144542
--- /dev/null
+++ b/library/core/src/iter/adapters/fuse.rs
@@ -0,0 +1,413 @@
+use crate::intrinsics;
+use crate::iter::adapters::zip::try_get_unchecked;
+use crate::iter::{
+ DoubleEndedIterator, ExactSizeIterator, FusedIterator, TrustedLen, TrustedRandomAccess,
+ TrustedRandomAccessNoCoerce,
+};
+use crate::ops::Try;
+
+/// An iterator that yields `None` forever after the underlying iterator
+/// yields `None` once.
+///
+/// This `struct` is created by [`Iterator::fuse`]. See its documentation
+/// for more.
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Fuse<I> {
+ // NOTE: for `I: FusedIterator`, we never bother setting `None`, but
+ // we still have to be prepared for that state due to variance.
+ // See rust-lang/rust#85863
+ iter: Option<I>,
+}
+impl<I> Fuse<I> {
+ pub(in crate::iter) fn new(iter: I) -> Fuse<I> {
+ Fuse { iter: Some(iter) }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Fuse<I> where I: Iterator {}
+
+// Any specialized implementation here is made internal
+// to avoid exposing default fns outside this trait.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Fuse<I>
+where
+ I: Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ FuseImpl::next(self)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ FuseImpl::nth(self, n)
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ match self.iter {
+ Some(iter) => iter.last(),
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ match self.iter {
+ Some(iter) => iter.count(),
+ None => 0,
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self.iter {
+ Some(ref iter) => iter.size_hint(),
+ None => (0, Some(0)),
+ }
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ FuseImpl::try_fold(self, acc, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, mut acc: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if let Some(iter) = self.iter {
+ acc = iter.fold(acc, fold);
+ }
+ acc
+ }
+
+ #[inline]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ FuseImpl::find(self, predicate)
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ match self.iter {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ Some(ref mut iter) => unsafe { try_get_unchecked(iter, idx) },
+ // SAFETY: the caller asserts there is an item at `i`, so we're not exhausted.
+ None => unsafe { intrinsics::unreachable() },
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> DoubleEndedIterator for Fuse<I>
+where
+ I: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
+ FuseImpl::next_back(self)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
+ FuseImpl::nth_back(self, n)
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ FuseImpl::try_rfold(self, acc, fold)
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, mut acc: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if let Some(iter) = self.iter {
+ acc = iter.rfold(acc, fold);
+ }
+ acc
+ }
+
+ #[inline]
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ FuseImpl::rfind(self, predicate)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Fuse<I>
+where
+ I: ExactSizeIterator,
+{
+ fn len(&self) -> usize {
+ match self.iter {
+ Some(ref iter) => iter.len(),
+ None => 0,
+ }
+ }
+
+ fn is_empty(&self) -> bool {
+ match self.iter {
+ Some(ref iter) => iter.is_empty(),
+ None => true,
+ }
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+// SAFETY: `TrustedLen` requires that an accurate length is reported via `size_hint()`. As `Fuse`
+// is just forwarding this to the wrapped iterator `I` this property is preserved and it is safe to
+// implement `TrustedLen` here.
+unsafe impl<I> TrustedLen for Fuse<I> where I: TrustedLen {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+// SAFETY: `TrustedRandomAccess` requires that `size_hint()` must be exact and cheap to call, and
+// `Iterator::__iterator_get_unchecked()` must be implemented accordingly.
+//
+// This is safe to implement as `Fuse` is just forwarding these to the wrapped iterator `I`, which
+// preserves these properties.
+unsafe impl<I> TrustedRandomAccess for Fuse<I> where I: TrustedRandomAccess {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccessNoCoerce for Fuse<I>
+where
+ I: TrustedRandomAccessNoCoerce,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = I::MAY_HAVE_SIDE_EFFECT;
+}
+
+/// Fuse specialization trait
+///
+/// We only need to worry about `&mut self` methods, which
+/// may exhaust the iterator without consuming it.
+#[doc(hidden)]
+trait FuseImpl<I> {
+ type Item;
+
+ // Functions specific to any normal Iterators
+ fn next(&mut self) -> Option<Self::Item>;
+ fn nth(&mut self, n: usize) -> Option<Self::Item>;
+ fn try_fold<Acc, Fold, R>(&mut self, acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>;
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool;
+
+ // Functions specific to DoubleEndedIterators
+ fn next_back(&mut self) -> Option<Self::Item>
+ where
+ I: DoubleEndedIterator;
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item>
+ where
+ I: DoubleEndedIterator;
+ fn try_rfold<Acc, Fold, R>(&mut self, acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ I: DoubleEndedIterator;
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ I: DoubleEndedIterator;
+}
+
+/// General `Fuse` impl which sets `iter = None` when exhausted.
+#[doc(hidden)]
+impl<I> FuseImpl<I> for Fuse<I>
+where
+ I: Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ default fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ and_then_or_clear(&mut self.iter, Iterator::next)
+ }
+
+ #[inline]
+ default fn nth(&mut self, n: usize) -> Option<I::Item> {
+ and_then_or_clear(&mut self.iter, |iter| iter.nth(n))
+ }
+
+ #[inline]
+ default fn try_fold<Acc, Fold, R>(&mut self, mut acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ if let Some(ref mut iter) = self.iter {
+ acc = iter.try_fold(acc, fold)?;
+ self.iter = None;
+ }
+ try { acc }
+ }
+
+ #[inline]
+ default fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ and_then_or_clear(&mut self.iter, |iter| iter.find(predicate))
+ }
+
+ #[inline]
+ default fn next_back(&mut self) -> Option<<I as Iterator>::Item>
+ where
+ I: DoubleEndedIterator,
+ {
+ and_then_or_clear(&mut self.iter, |iter| iter.next_back())
+ }
+
+ #[inline]
+ default fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item>
+ where
+ I: DoubleEndedIterator,
+ {
+ and_then_or_clear(&mut self.iter, |iter| iter.nth_back(n))
+ }
+
+ #[inline]
+ default fn try_rfold<Acc, Fold, R>(&mut self, mut acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ I: DoubleEndedIterator,
+ {
+ if let Some(ref mut iter) = self.iter {
+ acc = iter.try_rfold(acc, fold)?;
+ self.iter = None;
+ }
+ try { acc }
+ }
+
+ #[inline]
+ default fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ I: DoubleEndedIterator,
+ {
+ and_then_or_clear(&mut self.iter, |iter| iter.rfind(predicate))
+ }
+}
+
+/// Specialized `Fuse` impl which doesn't bother clearing `iter` when exhausted.
+/// However, we must still be prepared for the possibility that it was already cleared!
+#[doc(hidden)]
+impl<I> FuseImpl<I> for Fuse<I>
+where
+ I: FusedIterator,
+{
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ self.iter.as_mut()?.next()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ self.iter.as_mut()?.nth(n)
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, mut acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ if let Some(ref mut iter) = self.iter {
+ acc = iter.try_fold(acc, fold)?;
+ }
+ try { acc }
+ }
+
+ #[inline]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.iter.as_mut()?.find(predicate)
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<<I as Iterator>::Item>
+ where
+ I: DoubleEndedIterator,
+ {
+ self.iter.as_mut()?.next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item>
+ where
+ I: DoubleEndedIterator,
+ {
+ self.iter.as_mut()?.nth_back(n)
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, mut acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ I: DoubleEndedIterator,
+ {
+ if let Some(ref mut iter) = self.iter {
+ acc = iter.try_rfold(acc, fold)?;
+ }
+ try { acc }
+ }
+
+ #[inline]
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ I: DoubleEndedIterator,
+ {
+ self.iter.as_mut()?.rfind(predicate)
+ }
+}
+
+#[inline]
+fn and_then_or_clear<T, U>(opt: &mut Option<T>, f: impl FnOnce(&mut T) -> Option<U>) -> Option<U> {
+ let x = f(opt.as_mut()?);
+ if x.is_none() {
+ *opt = None;
+ }
+ x
+}
diff --git a/library/core/src/iter/adapters/inspect.rs b/library/core/src/iter/adapters/inspect.rs
new file mode 100644
index 000000000..19839fdfe
--- /dev/null
+++ b/library/core/src/iter/adapters/inspect.rs
@@ -0,0 +1,166 @@
+use crate::fmt;
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::ops::Try;
+
+/// An iterator that calls a function with a reference to each element before
+/// yielding it.
+///
+/// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`inspect`]: Iterator::inspect
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Inspect<I, F> {
+ iter: I,
+ f: F,
+}
+impl<I, F> Inspect<I, F> {
+ pub(in crate::iter) fn new(iter: I, f: F) -> Inspect<I, F> {
+ Inspect { iter, f }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for Inspect<I, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Inspect").field("iter", &self.iter).finish()
+ }
+}
+
+impl<I: Iterator, F> Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+{
+ #[inline]
+ fn do_inspect(&mut self, elt: Option<I::Item>) -> Option<I::Item> {
+ if let Some(ref a) = elt {
+ (self.f)(a);
+ }
+
+ elt
+ }
+}
+
+fn inspect_fold<T, Acc>(
+ mut f: impl FnMut(&T),
+ mut fold: impl FnMut(Acc, T) -> Acc,
+) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| {
+ f(&item);
+ fold(acc, item)
+ }
+}
+
+fn inspect_try_fold<'a, T, Acc, R>(
+ f: &'a mut impl FnMut(&T),
+ mut fold: impl FnMut(Acc, T) -> R + 'a,
+) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, item| {
+ f(&item);
+ fold(acc, item)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, F> Iterator for Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ let next = self.iter.next();
+ self.do_inspect(next)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.iter.try_fold(init, inspect_try_fold(&mut self.f, fold))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, inspect_fold(self.f, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator, F> DoubleEndedIterator for Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<I::Item> {
+ let next = self.iter.next_back();
+ self.do_inspect(next)
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.iter.try_rfold(init, inspect_try_fold(&mut self.f, fold))
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, inspect_fold(self.f, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator, F> ExactSizeIterator for Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+{
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator, F> FusedIterator for Inspect<I, F> where F: FnMut(&I::Item) {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, F> SourceIter for Inspect<I, F>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for Inspect<I, F> where F: FnMut(&I::Item) {}
diff --git a/library/core/src/iter/adapters/intersperse.rs b/library/core/src/iter/adapters/intersperse.rs
new file mode 100644
index 000000000..d8bbd424c
--- /dev/null
+++ b/library/core/src/iter/adapters/intersperse.rs
@@ -0,0 +1,187 @@
+use super::Peekable;
+
+/// An iterator adapter that places a separator between all elements.
+///
+/// This `struct` is created by [`Iterator::intersperse`]. See its documentation
+/// for more information.
+#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+#[derive(Debug, Clone)]
+pub struct Intersperse<I: Iterator>
+where
+ I::Item: Clone,
+{
+ separator: I::Item,
+ iter: Peekable<I>,
+ needs_sep: bool,
+}
+
+impl<I: Iterator> Intersperse<I>
+where
+ I::Item: Clone,
+{
+ pub(in crate::iter) fn new(iter: I, separator: I::Item) -> Self {
+ Self { iter: iter.peekable(), separator, needs_sep: false }
+ }
+}
+
+#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+impl<I> Iterator for Intersperse<I>
+where
+ I: Iterator,
+ I::Item: Clone,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ if self.needs_sep && self.iter.peek().is_some() {
+ self.needs_sep = false;
+ Some(self.separator.clone())
+ } else {
+ self.needs_sep = true;
+ self.iter.next()
+ }
+ }
+
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let separator = self.separator;
+ intersperse_fold(self.iter, init, f, move || separator.clone(), self.needs_sep)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ intersperse_size_hint(&self.iter, self.needs_sep)
+ }
+}
+
+/// An iterator adapter that places a separator between all elements.
+///
+/// This `struct` is created by [`Iterator::intersperse_with`]. See its
+/// documentation for more information.
+#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+pub struct IntersperseWith<I, G>
+where
+ I: Iterator,
+{
+ separator: G,
+ iter: Peekable<I>,
+ needs_sep: bool,
+}
+
+#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+impl<I, G> crate::fmt::Debug for IntersperseWith<I, G>
+where
+ I: Iterator + crate::fmt::Debug,
+ I::Item: crate::fmt::Debug,
+ G: crate::fmt::Debug,
+{
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
+ f.debug_struct("IntersperseWith")
+ .field("separator", &self.separator)
+ .field("iter", &self.iter)
+ .field("needs_sep", &self.needs_sep)
+ .finish()
+ }
+}
+
+#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+impl<I, G> crate::clone::Clone for IntersperseWith<I, G>
+where
+ I: Iterator + crate::clone::Clone,
+ I::Item: crate::clone::Clone,
+ G: Clone,
+{
+ fn clone(&self) -> Self {
+ IntersperseWith {
+ separator: self.separator.clone(),
+ iter: self.iter.clone(),
+ needs_sep: self.needs_sep.clone(),
+ }
+ }
+}
+
+impl<I, G> IntersperseWith<I, G>
+where
+ I: Iterator,
+ G: FnMut() -> I::Item,
+{
+ pub(in crate::iter) fn new(iter: I, separator: G) -> Self {
+ Self { iter: iter.peekable(), separator, needs_sep: false }
+ }
+}
+
+#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+impl<I, G> Iterator for IntersperseWith<I, G>
+where
+ I: Iterator,
+ G: FnMut() -> I::Item,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ if self.needs_sep && self.iter.peek().is_some() {
+ self.needs_sep = false;
+ Some((self.separator)())
+ } else {
+ self.needs_sep = true;
+ self.iter.next()
+ }
+ }
+
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ intersperse_fold(self.iter, init, f, self.separator, self.needs_sep)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ intersperse_size_hint(&self.iter, self.needs_sep)
+ }
+}
+
+fn intersperse_size_hint<I>(iter: &I, needs_sep: bool) -> (usize, Option<usize>)
+where
+ I: Iterator,
+{
+ let (lo, hi) = iter.size_hint();
+ let next_is_elem = !needs_sep;
+ (
+ lo.saturating_sub(next_is_elem as usize).saturating_add(lo),
+ hi.and_then(|hi| hi.saturating_sub(next_is_elem as usize).checked_add(hi)),
+ )
+}
+
+fn intersperse_fold<I, B, F, G>(
+ mut iter: I,
+ init: B,
+ mut f: F,
+ mut separator: G,
+ needs_sep: bool,
+) -> B
+where
+ I: Iterator,
+ F: FnMut(B, I::Item) -> B,
+ G: FnMut() -> I::Item,
+{
+ let mut accum = init;
+
+ if !needs_sep {
+ if let Some(x) = iter.next() {
+ accum = f(accum, x);
+ } else {
+ return accum;
+ }
+ }
+
+ iter.fold(accum, |mut accum, x| {
+ accum = f(accum, separator());
+ accum = f(accum, x);
+ accum
+ })
+}
diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs
new file mode 100644
index 000000000..9e25dbe46
--- /dev/null
+++ b/library/core/src/iter/adapters/map.rs
@@ -0,0 +1,218 @@
+use crate::fmt;
+use crate::iter::adapters::{
+ zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
+};
+use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen};
+use crate::ops::Try;
+
+/// An iterator that maps the values of `iter` with `f`.
+///
+/// This `struct` is created by the [`map`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`map`]: Iterator::map
+/// [`Iterator`]: trait.Iterator.html
+///
+/// # Notes about side effects
+///
+/// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that
+/// you can also [`map`] backwards:
+///
+/// ```rust
+/// let v: Vec<i32> = [1, 2, 3].into_iter().map(|x| x + 1).rev().collect();
+///
+/// assert_eq!(v, [4, 3, 2]);
+/// ```
+///
+/// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html
+///
+/// But if your closure has state, iterating backwards may act in a way you do
+/// not expect. Let's go through an example. First, in the forward direction:
+///
+/// ```rust
+/// let mut c = 0;
+///
+/// for pair in ['a', 'b', 'c'].into_iter()
+/// .map(|letter| { c += 1; (letter, c) }) {
+/// println!("{pair:?}");
+/// }
+/// ```
+///
+/// This will print `('a', 1), ('b', 2), ('c', 3)`.
+///
+/// Now consider this twist where we add a call to `rev`. This version will
+/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed,
+/// but the values of the counter still go in order. This is because `map()` is
+/// still being called lazily on each item, but we are popping items off the
+/// back of the vector now, instead of shifting them from the front.
+///
+/// ```rust
+/// let mut c = 0;
+///
+/// for pair in ['a', 'b', 'c'].into_iter()
+/// .map(|letter| { c += 1; (letter, c) })
+/// .rev() {
+/// println!("{pair:?}");
+/// }
+/// ```
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Map<I, F> {
+ // Used for `SplitWhitespace` and `SplitAsciiWhitespace` `as_str` methods
+ pub(crate) iter: I,
+ f: F,
+}
+
+impl<I, F> Map<I, F> {
+ pub(in crate::iter) fn new(iter: I, f: F) -> Map<I, F> {
+ Map { iter, f }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for Map<I, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Map").field("iter", &self.iter).finish()
+ }
+}
+
+fn map_fold<T, B, Acc>(
+ mut f: impl FnMut(T) -> B,
+ mut g: impl FnMut(Acc, B) -> Acc,
+) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, elt| g(acc, f(elt))
+}
+
+fn map_try_fold<'a, T, B, Acc, R>(
+ f: &'a mut impl FnMut(T) -> B,
+ mut g: impl FnMut(Acc, B) -> R + 'a,
+) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, elt| g(acc, f(elt))
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: Iterator, F> Iterator for Map<I, F>
+where
+ F: FnMut(I::Item) -> B,
+{
+ type Item = B;
+
+ #[inline]
+ fn next(&mut self) -> Option<B> {
+ self.iter.next().map(&mut self.f)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn try_fold<Acc, G, R>(&mut self, init: Acc, g: G) -> R
+ where
+ Self: Sized,
+ G: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.iter.try_fold(init, map_try_fold(&mut self.f, g))
+ }
+
+ fn fold<Acc, G>(self, init: Acc, g: G) -> Acc
+ where
+ G: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, map_fold(self.f, g))
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> B
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { (self.f)(try_get_unchecked(&mut self.iter, idx)) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for Map<I, F>
+where
+ F: FnMut(I::Item) -> B,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<B> {
+ self.iter.next_back().map(&mut self.f)
+ }
+
+ fn try_rfold<Acc, G, R>(&mut self, init: Acc, g: G) -> R
+ where
+ Self: Sized,
+ G: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.iter.try_rfold(init, map_try_fold(&mut self.f, g))
+ }
+
+ fn rfold<Acc, G>(self, init: Acc, g: G) -> Acc
+ where
+ G: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, map_fold(self.f, g))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: ExactSizeIterator, F> ExactSizeIterator for Map<I, F>
+where
+ F: FnMut(I::Item) -> B,
+{
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<B, I: FusedIterator, F> FusedIterator for Map<I, F> where F: FnMut(I::Item) -> B {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<B, I, F> TrustedLen for Map<I, F>
+where
+ I: TrustedLen,
+ F: FnMut(I::Item) -> B,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I, F> TrustedRandomAccess for Map<I, F> where I: TrustedRandomAccess {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I, F> TrustedRandomAccessNoCoerce for Map<I, F>
+where
+ I: TrustedRandomAccessNoCoerce,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = true;
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, F> SourceIter for Map<I, F>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<B, I: InPlaceIterable, F> InPlaceIterable for Map<I, F> where F: FnMut(I::Item) -> B {}
diff --git a/library/core/src/iter/adapters/map_while.rs b/library/core/src/iter/adapters/map_while.rs
new file mode 100644
index 000000000..1e8d6bf3e
--- /dev/null
+++ b/library/core/src/iter/adapters/map_while.rs
@@ -0,0 +1,100 @@
+use crate::fmt;
+use crate::iter::{adapters::SourceIter, InPlaceIterable};
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator that only accepts elements while `predicate` returns `Some(_)`.
+///
+/// This `struct` is created by the [`map_while`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`map_while`]: Iterator::map_while
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "iter_map_while", since = "1.57.0")]
+#[derive(Clone)]
+pub struct MapWhile<I, P> {
+ iter: I,
+ predicate: P,
+}
+
+impl<I, P> MapWhile<I, P> {
+ pub(in crate::iter) fn new(iter: I, predicate: P) -> MapWhile<I, P> {
+ MapWhile { iter, predicate }
+ }
+}
+
+#[stable(feature = "iter_map_while", since = "1.57.0")]
+impl<I: fmt::Debug, P> fmt::Debug for MapWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MapWhile").field("iter", &self.iter).finish()
+ }
+}
+
+#[stable(feature = "iter_map_while", since = "1.57.0")]
+impl<B, I: Iterator, P> Iterator for MapWhile<I, P>
+where
+ P: FnMut(I::Item) -> Option<B>,
+{
+ type Item = B;
+
+ #[inline]
+ fn next(&mut self) -> Option<B> {
+ let x = self.iter.next()?;
+ (self.predicate)(x)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ let Self { iter, predicate } = self;
+ iter.try_fold(init, |acc, x| match predicate(x) {
+ Some(item) => ControlFlow::from_try(fold(acc, item)),
+ None => ControlFlow::Break(try { acc }),
+ })
+ .into_try()
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, P> SourceIter for MapWhile<I, P>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<B, I: InPlaceIterable, P> InPlaceIterable for MapWhile<I, P> where
+ P: FnMut(I::Item) -> Option<B>
+{
+}
diff --git a/library/core/src/iter/adapters/mod.rs b/library/core/src/iter/adapters/mod.rs
new file mode 100644
index 000000000..916a26e24
--- /dev/null
+++ b/library/core/src/iter/adapters/mod.rs
@@ -0,0 +1,232 @@
+use crate::iter::{InPlaceIterable, Iterator};
+use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, NeverShortCircuit, Residual, Try};
+
+mod by_ref_sized;
+mod chain;
+mod cloned;
+mod copied;
+mod cycle;
+mod enumerate;
+mod filter;
+mod filter_map;
+mod flatten;
+mod fuse;
+mod inspect;
+mod intersperse;
+mod map;
+mod map_while;
+mod peekable;
+mod rev;
+mod scan;
+mod skip;
+mod skip_while;
+mod step_by;
+mod take;
+mod take_while;
+mod zip;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::{
+ chain::Chain, cycle::Cycle, enumerate::Enumerate, filter::Filter, filter_map::FilterMap,
+ flatten::FlatMap, fuse::Fuse, inspect::Inspect, map::Map, peekable::Peekable, rev::Rev,
+ scan::Scan, skip::Skip, skip_while::SkipWhile, take::Take, take_while::TakeWhile, zip::Zip,
+};
+
+#[unstable(feature = "std_internals", issue = "none")]
+pub use self::by_ref_sized::ByRefSized;
+
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+pub use self::cloned::Cloned;
+
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+pub use self::step_by::StepBy;
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+pub use self::flatten::Flatten;
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+pub use self::copied::Copied;
+
+#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+pub use self::intersperse::{Intersperse, IntersperseWith};
+
+#[stable(feature = "iter_map_while", since = "1.57.0")]
+pub use self::map_while::MapWhile;
+
+#[unstable(feature = "trusted_random_access", issue = "none")]
+pub use self::zip::TrustedRandomAccess;
+
+#[unstable(feature = "trusted_random_access", issue = "none")]
+pub use self::zip::TrustedRandomAccessNoCoerce;
+
+#[stable(feature = "iter_zip", since = "1.59.0")]
+pub use self::zip::zip;
+
+/// This trait provides transitive access to source-stage in an iterator-adapter pipeline
+/// under the conditions that
+/// * the iterator source `S` itself implements `SourceIter<Source = S>`
+/// * there is a delegating implementation of this trait for each adapter in the pipeline between
+/// the source and the pipeline consumer.
+///
+/// When the source is an owning iterator struct (commonly called `IntoIter`) then
+/// this can be useful for specializing [`FromIterator`] implementations or recovering the
+/// remaining elements after an iterator has been partially exhausted.
+///
+/// Note that implementations do not necessarily have to provide access to the inner-most
+/// source of a pipeline. A stateful intermediate adapter might eagerly evaluate a part
+/// of the pipeline and expose its internal storage as source.
+///
+/// The trait is unsafe because implementers must uphold additional safety properties.
+/// See [`as_inner`] for details.
+///
+/// The primary use of this trait is in-place iteration. Refer to the [`vec::in_place_collect`]
+/// module documentation for more information.
+///
+/// [`vec::in_place_collect`]: ../../../../alloc/vec/in_place_collect/index.html
+///
+/// # Examples
+///
+/// Retrieving a partially consumed source:
+///
+/// ```
+/// # #![feature(inplace_iteration)]
+/// # use std::iter::SourceIter;
+///
+/// let mut iter = vec![9, 9, 9].into_iter().map(|i| i * i);
+/// let _ = iter.next();
+/// let mut remainder = std::mem::replace(unsafe { iter.as_inner() }, Vec::new().into_iter());
+/// println!("n = {} elements remaining", remainder.len());
+/// ```
+///
+/// [`FromIterator`]: crate::iter::FromIterator
+/// [`as_inner`]: SourceIter::as_inner
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+#[rustc_specialization_trait]
+pub unsafe trait SourceIter {
+ /// A source stage in an iterator pipeline.
+ type Source;
+
+ /// Retrieve the source of an iterator pipeline.
+ ///
+ /// # Safety
+ ///
+ /// Implementations of must return the same mutable reference for their lifetime, unless
+ /// replaced by a caller.
+ /// Callers may only replace the reference when they stopped iteration and drop the
+ /// iterator pipeline after extracting the source.
+ ///
+ /// This means iterator adapters can rely on the source not changing during
+ /// iteration but they cannot rely on it in their Drop implementations.
+ ///
+ /// Implementing this method means adapters relinquish private-only access to their
+ /// source and can only rely on guarantees made based on method receiver types.
+ /// The lack of restricted access also requires that adapters must uphold the source's
+ /// public API even when they have access to its internals.
+ ///
+ /// Callers in turn must expect the source to be in any state that is consistent with
+ /// its public API since adapters sitting between it and the source have the same
+ /// access. In particular an adapter may have consumed more elements than strictly necessary.
+ ///
+ /// The overall goal of these requirements is to let the consumer of a pipeline use
+ /// * whatever remains in the source after iteration has stopped
+ /// * the memory that has become unused by advancing a consuming iterator
+ ///
+ /// [`next()`]: Iterator::next()
+ unsafe fn as_inner(&mut self) -> &mut Self::Source;
+}
+
+/// An iterator adapter that produces output as long as the underlying
+/// iterator produces values where `Try::branch` says to `ControlFlow::Continue`.
+///
+/// If a `ControlFlow::Break` is encountered, the iterator stops and the
+/// residual is stored.
+pub(crate) struct GenericShunt<'a, I, R> {
+ iter: I,
+ residual: &'a mut Option<R>,
+}
+
+/// Process the given iterator as if it yielded a the item's `Try::Output`
+/// type instead. Any `Try::Residual`s encountered will stop the inner iterator
+/// and be propagated back to the overall result.
+pub(crate) fn try_process<I, T, R, F, U>(iter: I, mut f: F) -> ChangeOutputType<I::Item, U>
+where
+ I: Iterator<Item: Try<Output = T, Residual = R>>,
+ for<'a> F: FnMut(GenericShunt<'a, I, R>) -> U,
+ R: Residual<U>,
+{
+ let mut residual = None;
+ let shunt = GenericShunt { iter, residual: &mut residual };
+ let value = f(shunt);
+ match residual {
+ Some(r) => FromResidual::from_residual(r),
+ None => Try::from_output(value),
+ }
+}
+
+impl<I, R> Iterator for GenericShunt<'_, I, R>
+where
+ I: Iterator<Item: Try<Residual = R>>,
+{
+ type Item = <I::Item as Try>::Output;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.try_for_each(ControlFlow::Break).break_value()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.residual.is_some() {
+ (0, Some(0))
+ } else {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper)
+ }
+ }
+
+ fn try_fold<B, F, T>(&mut self, init: B, mut f: F) -> T
+ where
+ F: FnMut(B, Self::Item) -> T,
+ T: Try<Output = B>,
+ {
+ self.iter
+ .try_fold(init, |acc, x| match Try::branch(x) {
+ ControlFlow::Continue(x) => ControlFlow::from_try(f(acc, x)),
+ ControlFlow::Break(r) => {
+ *self.residual = Some(r);
+ ControlFlow::Break(try { acc })
+ }
+ })
+ .into_try()
+ }
+
+ fn fold<B, F>(mut self, init: B, fold: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, R> SourceIter for GenericShunt<'_, I, R>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut Self::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+// SAFETY: GenericShunt::next calls `I::try_for_each`, which has to advance `iter`
+// in order to return `Some(_)`. Since `iter` has type `I: InPlaceIterable` it's
+// guaranteed that at least one item will be moved out from the underlying source.
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, T, R> InPlaceIterable for GenericShunt<'_, I, R> where
+ I: Iterator<Item: Try<Output = T, Residual = R>> + InPlaceIterable
+{
+}
diff --git a/library/core/src/iter/adapters/peekable.rs b/library/core/src/iter/adapters/peekable.rs
new file mode 100644
index 000000000..20aca323b
--- /dev/null
+++ b/library/core/src/iter/adapters/peekable.rs
@@ -0,0 +1,335 @@
+use crate::iter::{adapters::SourceIter, FusedIterator, TrustedLen};
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator with a `peek()` that returns an optional reference to the next
+/// element.
+///
+/// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`peekable`]: Iterator::peekable
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Peekable<I: Iterator> {
+ iter: I,
+ /// Remember a peeked value, even if it was None.
+ peeked: Option<Option<I::Item>>,
+}
+
+impl<I: Iterator> Peekable<I> {
+ pub(in crate::iter) fn new(iter: I) -> Peekable<I> {
+ Peekable { iter, peeked: None }
+ }
+}
+
+// Peekable must remember if a None has been seen in the `.peek()` method.
+// It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the
+// underlying iterator at most once. This does not by itself make the iterator
+// fused.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator> Iterator for Peekable<I> {
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ match self.peeked.take() {
+ Some(v) => v,
+ None => self.iter.next(),
+ }
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn count(mut self) -> usize {
+ match self.peeked.take() {
+ Some(None) => 0,
+ Some(Some(_)) => 1 + self.iter.count(),
+ None => self.iter.count(),
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ match self.peeked.take() {
+ Some(None) => None,
+ Some(v @ Some(_)) if n == 0 => v,
+ Some(Some(_)) => self.iter.nth(n - 1),
+ None => self.iter.nth(n),
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<I::Item> {
+ let peek_opt = match self.peeked.take() {
+ Some(None) => return None,
+ Some(v) => v,
+ None => None,
+ };
+ self.iter.last().or(peek_opt)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let peek_len = match self.peeked {
+ Some(None) => return (0, Some(0)),
+ Some(Some(_)) => 1,
+ None => 0,
+ };
+ let (lo, hi) = self.iter.size_hint();
+ let lo = lo.saturating_add(peek_len);
+ let hi = match hi {
+ Some(x) => x.checked_add(peek_len),
+ None => None,
+ };
+ (lo, hi)
+ }
+
+ #[inline]
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let acc = match self.peeked.take() {
+ Some(None) => return try { init },
+ Some(Some(v)) => f(init, v)?,
+ None => init,
+ };
+ self.iter.try_fold(acc, f)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let acc = match self.peeked {
+ Some(None) => return init,
+ Some(Some(v)) => fold(init, v),
+ None => init,
+ };
+ self.iter.fold(acc, fold)
+ }
+}
+
+#[stable(feature = "double_ended_peek_iterator", since = "1.38.0")]
+impl<I> DoubleEndedIterator for Peekable<I>
+where
+ I: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ match self.peeked.as_mut() {
+ Some(v @ Some(_)) => self.iter.next_back().or_else(|| v.take()),
+ Some(None) => None,
+ None => self.iter.next_back(),
+ }
+ }
+
+ #[inline]
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ match self.peeked.take() {
+ Some(None) => try { init },
+ Some(Some(v)) => match self.iter.try_rfold(init, &mut f).branch() {
+ ControlFlow::Continue(acc) => f(acc, v),
+ ControlFlow::Break(r) => {
+ self.peeked = Some(Some(v));
+ R::from_residual(r)
+ }
+ },
+ None => self.iter.try_rfold(init, f),
+ }
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ match self.peeked {
+ Some(None) => init,
+ Some(Some(v)) => {
+ let acc = self.iter.rfold(init, &mut fold);
+ fold(acc, v)
+ }
+ None => self.iter.rfold(init, fold),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator> ExactSizeIterator for Peekable<I> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator> FusedIterator for Peekable<I> {}
+
+impl<I: Iterator> Peekable<I> {
+ /// Returns a reference to the next() value without advancing the iterator.
+ ///
+ /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`.
+ /// But if the iteration is over, `None` is returned.
+ ///
+ /// [`next`]: Iterator::next
+ ///
+ /// Because `peek()` returns a reference, and many iterators iterate over
+ /// references, there can be a possibly confusing situation where the
+ /// return value is a double reference. You can see this effect in the
+ /// examples below.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let xs = [1, 2, 3];
+ ///
+ /// let mut iter = xs.iter().peekable();
+ ///
+ /// // peek() lets us see into the future
+ /// assert_eq!(iter.peek(), Some(&&1));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ ///
+ /// // The iterator does not advance even if we `peek` multiple times
+ /// assert_eq!(iter.peek(), Some(&&3));
+ /// assert_eq!(iter.peek(), Some(&&3));
+ ///
+ /// assert_eq!(iter.next(), Some(&3));
+ ///
+ /// // After the iterator is finished, so is `peek()`
+ /// assert_eq!(iter.peek(), None);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn peek(&mut self) -> Option<&I::Item> {
+ let iter = &mut self.iter;
+ self.peeked.get_or_insert_with(|| iter.next()).as_ref()
+ }
+
+ /// Returns a mutable reference to the next() value without advancing the iterator.
+ ///
+ /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`.
+ /// But if the iteration is over, `None` is returned.
+ ///
+ /// Because `peek_mut()` returns a reference, and many iterators iterate over
+ /// references, there can be a possibly confusing situation where the
+ /// return value is a double reference. You can see this effect in the examples
+ /// below.
+ ///
+ /// [`next`]: Iterator::next
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut iter = [1, 2, 3].iter().peekable();
+ ///
+ /// // Like with `peek()`, we can see into the future without advancing the iterator.
+ /// assert_eq!(iter.peek_mut(), Some(&mut &1));
+ /// assert_eq!(iter.peek_mut(), Some(&mut &1));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// // Peek into the iterator and set the value behind the mutable reference.
+ /// if let Some(p) = iter.peek_mut() {
+ /// assert_eq!(*p, &2);
+ /// *p = &5;
+ /// }
+ ///
+ /// // The value we put in reappears as the iterator continues.
+ /// assert_eq!(iter.collect::<Vec<_>>(), vec![&5, &3]);
+ /// ```
+ #[inline]
+ #[stable(feature = "peekable_peek_mut", since = "1.53.0")]
+ pub fn peek_mut(&mut self) -> Option<&mut I::Item> {
+ let iter = &mut self.iter;
+ self.peeked.get_or_insert_with(|| iter.next()).as_mut()
+ }
+
+ /// Consume and return the next value of this iterator if a condition is true.
+ ///
+ /// If `func` returns `true` for the next value of this iterator, consume and return it.
+ /// Otherwise, return `None`.
+ ///
+ /// # Examples
+ /// Consume a number if it's equal to 0.
+ /// ```
+ /// let mut iter = (0..5).peekable();
+ /// // The first item of the iterator is 0; consume it.
+ /// assert_eq!(iter.next_if(|&x| x == 0), Some(0));
+ /// // The next item returned is now 1, so `consume` will return `false`.
+ /// assert_eq!(iter.next_if(|&x| x == 0), None);
+ /// // `next_if` saves the value of the next item if it was not equal to `expected`.
+ /// assert_eq!(iter.next(), Some(1));
+ /// ```
+ ///
+ /// Consume any number less than 10.
+ /// ```
+ /// let mut iter = (1..20).peekable();
+ /// // Consume all numbers less than 10
+ /// while iter.next_if(|&x| x < 10).is_some() {}
+ /// // The next value returned will be 10
+ /// assert_eq!(iter.next(), Some(10));
+ /// ```
+ #[stable(feature = "peekable_next_if", since = "1.51.0")]
+ pub fn next_if(&mut self, func: impl FnOnce(&I::Item) -> bool) -> Option<I::Item> {
+ match self.next() {
+ Some(matched) if func(&matched) => Some(matched),
+ other => {
+ // Since we called `self.next()`, we consumed `self.peeked`.
+ assert!(self.peeked.is_none());
+ self.peeked = Some(other);
+ None
+ }
+ }
+ }
+
+ /// Consume and return the next item if it is equal to `expected`.
+ ///
+ /// # Example
+ /// Consume a number if it's equal to 0.
+ /// ```
+ /// let mut iter = (0..5).peekable();
+ /// // The first item of the iterator is 0; consume it.
+ /// assert_eq!(iter.next_if_eq(&0), Some(0));
+ /// // The next item returned is now 1, so `consume` will return `false`.
+ /// assert_eq!(iter.next_if_eq(&0), None);
+ /// // `next_if_eq` saves the value of the next item if it was not equal to `expected`.
+ /// assert_eq!(iter.next(), Some(1));
+ /// ```
+ #[stable(feature = "peekable_next_if", since = "1.51.0")]
+ pub fn next_if_eq<T>(&mut self, expected: &T) -> Option<I::Item>
+ where
+ T: ?Sized,
+ I::Item: PartialEq<T>,
+ {
+ self.next_if(|next| next == expected)
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I> TrustedLen for Peekable<I> where I: TrustedLen {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: Iterator> SourceIter for Peekable<I>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
diff --git a/library/core/src/iter/adapters/rev.rs b/library/core/src/iter/adapters/rev.rs
new file mode 100644
index 000000000..139fb7bbd
--- /dev/null
+++ b/library/core/src/iter/adapters/rev.rs
@@ -0,0 +1,137 @@
+use crate::iter::{FusedIterator, TrustedLen};
+use crate::ops::Try;
+
+/// A double-ended iterator with the direction inverted.
+///
+/// This `struct` is created by the [`rev`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`rev`]: Iterator::rev
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Rev<T> {
+ iter: T,
+}
+
+impl<T> Rev<T> {
+ pub(in crate::iter) fn new(iter: T) -> Rev<T> {
+ Rev { iter }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Rev<I>
+where
+ I: DoubleEndedIterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ self.iter.next_back()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ self.iter.advance_back_by(n)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
+ self.iter.nth_back(n)
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.iter.try_rfold(init, f)
+ }
+
+ fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, f)
+ }
+
+ #[inline]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.iter.rfind(predicate)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> DoubleEndedIterator for Rev<I>
+where
+ I: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.iter.advance_by(n)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
+ self.iter.nth(n)
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.iter.try_fold(init, f)
+ }
+
+ fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, f)
+ }
+
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.iter.find(predicate)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Rev<I>
+where
+ I: ExactSizeIterator + DoubleEndedIterator,
+{
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Rev<I> where I: FusedIterator + DoubleEndedIterator {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I> TrustedLen for Rev<I> where I: TrustedLen + DoubleEndedIterator {}
diff --git a/library/core/src/iter/adapters/scan.rs b/library/core/src/iter/adapters/scan.rs
new file mode 100644
index 000000000..80bfd2231
--- /dev/null
+++ b/library/core/src/iter/adapters/scan.rs
@@ -0,0 +1,110 @@
+use crate::fmt;
+use crate::iter::{adapters::SourceIter, InPlaceIterable};
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator to maintain state while iterating another iterator.
+///
+/// This `struct` is created by the [`scan`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`scan`]: Iterator::scan
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Scan<I, St, F> {
+ iter: I,
+ f: F,
+ state: St,
+}
+
+impl<I, St, F> Scan<I, St, F> {
+ pub(in crate::iter) fn new(iter: I, state: St, f: F) -> Scan<I, St, F> {
+ Scan { iter, state, f }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, St: fmt::Debug, F> fmt::Debug for Scan<I, St, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Scan").field("iter", &self.iter).field("state", &self.state).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I, St, F> Iterator for Scan<I, St, F>
+where
+ I: Iterator,
+ F: FnMut(&mut St, I::Item) -> Option<B>,
+{
+ type Item = B;
+
+ #[inline]
+ fn next(&mut self) -> Option<B> {
+ let a = self.iter.next()?;
+ (self.f)(&mut self.state, a)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the scan function
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ fn scan<'a, T, St, B, Acc, R: Try<Output = Acc>>(
+ state: &'a mut St,
+ f: &'a mut impl FnMut(&mut St, T) -> Option<B>,
+ mut fold: impl FnMut(Acc, B) -> R + 'a,
+ ) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
+ move |acc, x| match f(state, x) {
+ None => ControlFlow::Break(try { acc }),
+ Some(x) => ControlFlow::from_try(fold(acc, x)),
+ }
+ }
+
+ let state = &mut self.state;
+ let f = &mut self.f;
+ self.iter.try_fold(init, scan(state, f, fold)).into_try()
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<St, F, I> SourceIter for Scan<I, St, F>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<St, F, B, I: InPlaceIterable> InPlaceIterable for Scan<I, St, F> where
+ F: FnMut(&mut St, I::Item) -> Option<B>
+{
+}
diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs
new file mode 100644
index 000000000..2c283100f
--- /dev/null
+++ b/library/core/src/iter/adapters/skip.rs
@@ -0,0 +1,239 @@
+use crate::intrinsics::unlikely;
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator that skips over `n` elements of `iter`.
+///
+/// This `struct` is created by the [`skip`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`skip`]: Iterator::skip
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Skip<I> {
+ iter: I,
+ n: usize,
+}
+
+impl<I> Skip<I> {
+ pub(in crate::iter) fn new(iter: I, n: usize) -> Skip<I> {
+ Skip { iter, n }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Skip<I>
+where
+ I: Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ if unlikely(self.n > 0) {
+ self.iter.nth(crate::mem::take(&mut self.n) - 1)?;
+ }
+ self.iter.next()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ // Can't just add n + self.n due to overflow.
+ if self.n > 0 {
+ let to_skip = self.n;
+ self.n = 0;
+ // nth(n) skips n+1
+ self.iter.nth(to_skip - 1)?;
+ }
+ self.iter.nth(n)
+ }
+
+ #[inline]
+ fn count(mut self) -> usize {
+ if self.n > 0 {
+ // nth(n) skips n+1
+ if self.iter.nth(self.n - 1).is_none() {
+ return 0;
+ }
+ }
+ self.iter.count()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<I::Item> {
+ if self.n > 0 {
+ // nth(n) skips n+1
+ self.iter.nth(self.n - 1)?;
+ }
+ self.iter.last()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lower, upper) = self.iter.size_hint();
+
+ let lower = lower.saturating_sub(self.n);
+ let upper = match upper {
+ Some(x) => Some(x.saturating_sub(self.n)),
+ None => None,
+ };
+
+ (lower, upper)
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ let n = self.n;
+ self.n = 0;
+ if n > 0 {
+ // nth(n) skips n+1
+ if self.iter.nth(n - 1).is_none() {
+ return try { init };
+ }
+ }
+ self.iter.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if self.n > 0 {
+ // nth(n) skips n+1
+ if self.iter.nth(self.n - 1).is_none() {
+ return init;
+ }
+ }
+ self.iter.fold(init, fold)
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let mut rem = n;
+ let step_one = self.n.saturating_add(rem);
+
+ match self.iter.advance_by(step_one) {
+ Ok(_) => {
+ rem -= step_one - self.n;
+ self.n = 0;
+ }
+ Err(advanced) => {
+ let advanced_without_skip = advanced.saturating_sub(self.n);
+ self.n = self.n.saturating_sub(advanced);
+ return if n == 0 { Ok(()) } else { Err(advanced_without_skip) };
+ }
+ }
+
+ // step_one calculation may have saturated
+ if unlikely(rem > 0) {
+ return match self.iter.advance_by(rem) {
+ ret @ Ok(_) => ret,
+ Err(advanced) => {
+ rem -= advanced;
+ Err(n - rem)
+ }
+ };
+ }
+
+ Ok(())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Skip<I> where I: ExactSizeIterator {}
+
+#[stable(feature = "double_ended_skip_iterator", since = "1.9.0")]
+impl<I> DoubleEndedIterator for Skip<I>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.len() > 0 { self.iter.next_back() } else { None }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+ let len = self.len();
+ if n < len {
+ self.iter.nth_back(n)
+ } else {
+ if len > 0 {
+ // consume the original iterator
+ self.iter.nth_back(len - 1);
+ }
+ None
+ }
+ }
+
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ fn check<T, Acc, R: Try<Output = Acc>>(
+ mut n: usize,
+ mut fold: impl FnMut(Acc, T) -> R,
+ ) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> {
+ move |acc, x| {
+ n -= 1;
+ let r = fold(acc, x);
+ if n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
+ }
+ }
+
+ let n = self.len();
+ if n == 0 { try { init } } else { self.iter.try_rfold(init, check(n, fold)).into_try() }
+ }
+
+ fn rfold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<Acc, T>(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, T) -> Result<Acc, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_rfold(init, ok(fold)).unwrap()
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let min = crate::cmp::min(self.len(), n);
+ return match self.iter.advance_back_by(min) {
+ ret @ Ok(_) if n <= min => ret,
+ Ok(_) => Err(min),
+ _ => panic!("ExactSizeIterator contract violation"),
+ };
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Skip<I> where I: FusedIterator {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> SourceIter for Skip<I>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Skip<I> {}
diff --git a/library/core/src/iter/adapters/skip_while.rs b/library/core/src/iter/adapters/skip_while.rs
new file mode 100644
index 000000000..f29661779
--- /dev/null
+++ b/library/core/src/iter/adapters/skip_while.rs
@@ -0,0 +1,125 @@
+use crate::fmt;
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::ops::Try;
+
+/// An iterator that rejects elements while `predicate` returns `true`.
+///
+/// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`skip_while`]: Iterator::skip_while
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct SkipWhile<I, P> {
+ iter: I,
+ flag: bool,
+ predicate: P,
+}
+
+impl<I, P> SkipWhile<I, P> {
+ pub(in crate::iter) fn new(iter: I, predicate: P) -> SkipWhile<I, P> {
+ SkipWhile { iter, flag: false, predicate }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for SkipWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SkipWhile").field("iter", &self.iter).field("flag", &self.flag).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, P> Iterator for SkipWhile<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ fn check<'a, T>(
+ flag: &'a mut bool,
+ pred: &'a mut impl FnMut(&T) -> bool,
+ ) -> impl FnMut(&T) -> bool + 'a {
+ move |x| {
+ if *flag || !pred(x) {
+ *flag = true;
+ true
+ } else {
+ false
+ }
+ }
+ }
+
+ let flag = &mut self.flag;
+ let pred = &mut self.predicate;
+ self.iter.find(check(flag, pred))
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ if !self.flag {
+ match self.next() {
+ Some(v) => init = fold(init, v)?,
+ None => return try { init },
+ }
+ }
+ self.iter.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, mut init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if !self.flag {
+ match self.next() {
+ Some(v) => init = fold(init, v),
+ None => return init,
+ }
+ }
+ self.iter.fold(init, fold)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I, P> FusedIterator for SkipWhile<I, P>
+where
+ I: FusedIterator,
+ P: FnMut(&I::Item) -> bool,
+{
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<P, I> SourceIter for SkipWhile<I, P>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for SkipWhile<I, F> where
+ F: FnMut(&I::Item) -> bool
+{
+}
diff --git a/library/core/src/iter/adapters/step_by.rs b/library/core/src/iter/adapters/step_by.rs
new file mode 100644
index 000000000..4252c34a0
--- /dev/null
+++ b/library/core/src/iter/adapters/step_by.rs
@@ -0,0 +1,235 @@
+use crate::{intrinsics, iter::from_fn, ops::Try};
+
+/// An iterator for stepping iterators by a custom amount.
+///
+/// This `struct` is created by the [`step_by`] method on [`Iterator`]. See
+/// its documentation for more.
+///
+/// [`step_by`]: Iterator::step_by
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+#[derive(Clone, Debug)]
+pub struct StepBy<I> {
+ iter: I,
+ step: usize,
+ first_take: bool,
+}
+
+impl<I> StepBy<I> {
+ pub(in crate::iter) fn new(iter: I, step: usize) -> StepBy<I> {
+ assert!(step != 0);
+ StepBy { iter, step: step - 1, first_take: true }
+ }
+}
+
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+impl<I> Iterator for StepBy<I>
+where
+ I: Iterator,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.first_take {
+ self.first_take = false;
+ self.iter.next()
+ } else {
+ self.iter.nth(self.step)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ #[inline]
+ fn first_size(step: usize) -> impl Fn(usize) -> usize {
+ move |n| if n == 0 { 0 } else { 1 + (n - 1) / (step + 1) }
+ }
+
+ #[inline]
+ fn other_size(step: usize) -> impl Fn(usize) -> usize {
+ move |n| n / (step + 1)
+ }
+
+ let (low, high) = self.iter.size_hint();
+
+ if self.first_take {
+ let f = first_size(self.step);
+ (f(low), high.map(f))
+ } else {
+ let f = other_size(self.step);
+ (f(low), high.map(f))
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
+ if self.first_take {
+ self.first_take = false;
+ let first = self.iter.next();
+ if n == 0 {
+ return first;
+ }
+ n -= 1;
+ }
+ // n and self.step are indices, we need to add 1 to get the amount of elements
+ // When calling `.nth`, we need to subtract 1 again to convert back to an index
+ // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1`
+ let mut step = self.step + 1;
+ // n + 1 could overflow
+ // thus, if n is usize::MAX, instead of adding one, we call .nth(step)
+ if n == usize::MAX {
+ self.iter.nth(step - 1);
+ } else {
+ n += 1;
+ }
+
+ // overflow handling
+ loop {
+ let mul = n.checked_mul(step);
+ {
+ if intrinsics::likely(mul.is_some()) {
+ return self.iter.nth(mul.unwrap() - 1);
+ }
+ }
+ let div_n = usize::MAX / n;
+ let div_step = usize::MAX / step;
+ let nth_n = div_n * n;
+ let nth_step = div_step * step;
+ let nth = if nth_n > nth_step {
+ step -= div_n;
+ nth_n
+ } else {
+ n -= div_step;
+ nth_step
+ };
+ self.iter.nth(nth - 1);
+ }
+ }
+
+ fn try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ #[inline]
+ fn nth<I: Iterator>(iter: &mut I, step: usize) -> impl FnMut() -> Option<I::Item> + '_ {
+ move || iter.nth(step)
+ }
+
+ if self.first_take {
+ self.first_take = false;
+ match self.iter.next() {
+ None => return try { acc },
+ Some(x) => acc = f(acc, x)?,
+ }
+ }
+ from_fn(nth(&mut self.iter, self.step)).try_fold(acc, f)
+ }
+
+ fn fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn nth<I: Iterator>(iter: &mut I, step: usize) -> impl FnMut() -> Option<I::Item> + '_ {
+ move || iter.nth(step)
+ }
+
+ if self.first_take {
+ self.first_take = false;
+ match self.iter.next() {
+ None => return acc,
+ Some(x) => acc = f(acc, x),
+ }
+ }
+ from_fn(nth(&mut self.iter, self.step)).fold(acc, f)
+ }
+}
+
+impl<I> StepBy<I>
+where
+ I: ExactSizeIterator,
+{
+ // The zero-based index starting from the end of the iterator of the
+ // last element. Used in the `DoubleEndedIterator` implementation.
+ fn next_back_index(&self) -> usize {
+ let rem = self.iter.len() % (self.step + 1);
+ if self.first_take {
+ if rem == 0 { self.step } else { rem - 1 }
+ } else {
+ rem
+ }
+ }
+}
+
+#[stable(feature = "double_ended_step_by_iterator", since = "1.38.0")]
+impl<I> DoubleEndedIterator for StepBy<I>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter.nth_back(self.next_back_index())
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ // `self.iter.nth_back(usize::MAX)` does the right thing here when `n`
+ // is out of bounds because the length of `self.iter` does not exceed
+ // `usize::MAX` (because `I: ExactSizeIterator`) and `nth_back` is
+ // zero-indexed
+ let n = n.saturating_mul(self.step + 1).saturating_add(self.next_back_index());
+ self.iter.nth_back(n)
+ }
+
+ fn try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ #[inline]
+ fn nth_back<I: DoubleEndedIterator>(
+ iter: &mut I,
+ step: usize,
+ ) -> impl FnMut() -> Option<I::Item> + '_ {
+ move || iter.nth_back(step)
+ }
+
+ match self.next_back() {
+ None => try { init },
+ Some(x) => {
+ let acc = f(init, x)?;
+ from_fn(nth_back(&mut self.iter, self.step)).try_fold(acc, f)
+ }
+ }
+ }
+
+ #[inline]
+ fn rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
+ where
+ Self: Sized,
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn nth_back<I: DoubleEndedIterator>(
+ iter: &mut I,
+ step: usize,
+ ) -> impl FnMut() -> Option<I::Item> + '_ {
+ move || iter.nth_back(step)
+ }
+
+ match self.next_back() {
+ None => init,
+ Some(x) => {
+ let acc = f(init, x);
+ from_fn(nth_back(&mut self.iter, self.step)).fold(acc, f)
+ }
+ }
+ }
+}
+
+// StepBy can only make the iterator shorter, so the len will still fit.
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {}
diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs
new file mode 100644
index 000000000..2962e0104
--- /dev/null
+++ b/library/core/src/iter/adapters/take.rs
@@ -0,0 +1,244 @@
+use crate::cmp;
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedLen};
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator that only iterates over the first `n` iterations of `iter`.
+///
+/// This `struct` is created by the [`take`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`take`]: Iterator::take
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Take<I> {
+ iter: I,
+ n: usize,
+}
+
+impl<I> Take<I> {
+ pub(in crate::iter) fn new(iter: I, n: usize) -> Take<I> {
+ Take { iter, n }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Take<I>
+where
+ I: Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ if self.n != 0 {
+ self.n -= 1;
+ self.iter.next()
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ if self.n > n {
+ self.n -= n + 1;
+ self.iter.nth(n)
+ } else {
+ if self.n > 0 {
+ self.iter.nth(self.n - 1);
+ self.n = 0;
+ }
+ None
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.n == 0 {
+ return (0, Some(0));
+ }
+
+ let (lower, upper) = self.iter.size_hint();
+
+ let lower = cmp::min(lower, self.n);
+
+ let upper = match upper {
+ Some(x) if x < self.n => Some(x),
+ _ => Some(self.n),
+ };
+
+ (lower, upper)
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ fn check<'a, T, Acc, R: Try<Output = Acc>>(
+ n: &'a mut usize,
+ mut fold: impl FnMut(Acc, T) -> R + 'a,
+ ) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
+ move |acc, x| {
+ *n -= 1;
+ let r = fold(acc, x);
+ if *n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
+ }
+ }
+
+ if self.n == 0 {
+ try { init }
+ } else {
+ let n = &mut self.n;
+ self.iter.try_fold(init, check(n, fold)).into_try()
+ }
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let min = self.n.min(n);
+ match self.iter.advance_by(min) {
+ Ok(_) => {
+ self.n -= min;
+ if min < n { Err(min) } else { Ok(()) }
+ }
+ ret @ Err(advanced) => {
+ self.n -= advanced;
+ ret
+ }
+ }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> SourceIter for Take<I>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Take<I> {}
+
+#[stable(feature = "double_ended_take_iterator", since = "1.38.0")]
+impl<I> DoubleEndedIterator for Take<I>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.n == 0 {
+ None
+ } else {
+ let n = self.n;
+ self.n -= 1;
+ self.iter.nth_back(self.iter.len().saturating_sub(n))
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.iter.len();
+ if self.n > n {
+ let m = len.saturating_sub(self.n) + n;
+ self.n -= n + 1;
+ self.iter.nth_back(m)
+ } else {
+ if len > 0 {
+ self.iter.nth_back(len - 1);
+ }
+ None
+ }
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ if self.n == 0 {
+ try { init }
+ } else {
+ let len = self.iter.len();
+ if len > self.n && self.iter.nth_back(len - self.n - 1).is_none() {
+ try { init }
+ } else {
+ self.iter.try_rfold(init, fold)
+ }
+ }
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if self.n == 0 {
+ init
+ } else {
+ let len = self.iter.len();
+ if len > self.n && self.iter.nth_back(len - self.n - 1).is_none() {
+ init
+ } else {
+ self.iter.rfold(init, fold)
+ }
+ }
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ // The amount by which the inner iterator needs to be shortened for it to be
+ // at most as long as the take() amount.
+ let trim_inner = self.iter.len().saturating_sub(self.n);
+ // The amount we need to advance inner to fulfill the caller's request.
+ // take(), advance_by() and len() all can be at most usize, so we don't have to worry
+ // about having to advance more than usize::MAX here.
+ let advance_by = trim_inner.saturating_add(n);
+
+ let advanced = match self.iter.advance_back_by(advance_by) {
+ Ok(_) => advance_by - trim_inner,
+ Err(advanced) => advanced - trim_inner,
+ };
+ self.n -= advanced;
+ return if advanced < n { Err(advanced) } else { Ok(()) };
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Take<I> where I: ExactSizeIterator {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Take<I> where I: FusedIterator {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I: TrustedLen> TrustedLen for Take<I> {}
diff --git a/library/core/src/iter/adapters/take_while.rs b/library/core/src/iter/adapters/take_while.rs
new file mode 100644
index 000000000..ded216da9
--- /dev/null
+++ b/library/core/src/iter/adapters/take_while.rs
@@ -0,0 +1,138 @@
+use crate::fmt;
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator that only accepts elements while `predicate` returns `true`.
+///
+/// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`take_while`]: Iterator::take_while
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct TakeWhile<I, P> {
+ iter: I,
+ flag: bool,
+ predicate: P,
+}
+
+impl<I, P> TakeWhile<I, P> {
+ pub(in crate::iter) fn new(iter: I, predicate: P) -> TakeWhile<I, P> {
+ TakeWhile { iter, flag: false, predicate }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for TakeWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TakeWhile").field("iter", &self.iter).field("flag", &self.flag).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, P> Iterator for TakeWhile<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ if self.flag {
+ None
+ } else {
+ let x = self.iter.next()?;
+ if (self.predicate)(&x) {
+ Some(x)
+ } else {
+ self.flag = true;
+ None
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.flag {
+ (0, Some(0))
+ } else {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ fn check<'a, T, Acc, R: Try<Output = Acc>>(
+ flag: &'a mut bool,
+ p: &'a mut impl FnMut(&T) -> bool,
+ mut fold: impl FnMut(Acc, T) -> R + 'a,
+ ) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
+ move |acc, x| {
+ if p(&x) {
+ ControlFlow::from_try(fold(acc, x))
+ } else {
+ *flag = true;
+ ControlFlow::Break(try { acc })
+ }
+ }
+ }
+
+ if self.flag {
+ try { init }
+ } else {
+ let flag = &mut self.flag;
+ let p = &mut self.predicate;
+ self.iter.try_fold(init, check(flag, p, fold)).into_try()
+ }
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I, P> FusedIterator for TakeWhile<I, P>
+where
+ I: FusedIterator,
+ P: FnMut(&I::Item) -> bool,
+{
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<P, I> SourceIter for TakeWhile<I, P>
+where
+ I: SourceIter,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for TakeWhile<I, F> where
+ F: FnMut(&I::Item) -> bool
+{
+}
diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs
new file mode 100644
index 000000000..8153c8cfe
--- /dev/null
+++ b/library/core/src/iter/adapters/zip.rs
@@ -0,0 +1,585 @@
+use crate::cmp;
+use crate::fmt::{self, Debug};
+use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator};
+use crate::iter::{InPlaceIterable, SourceIter, TrustedLen};
+
+/// An iterator that iterates two other iterators simultaneously.
+///
+/// This `struct` is created by [`zip`] or [`Iterator::zip`].
+/// See their documentation for more.
+#[derive(Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Zip<A, B> {
+ a: A,
+ b: B,
+ // index, len and a_len are only used by the specialized version of zip
+ index: usize,
+ len: usize,
+ a_len: usize,
+}
+impl<A: Iterator, B: Iterator> Zip<A, B> {
+ pub(in crate::iter) fn new(a: A, b: B) -> Zip<A, B> {
+ ZipImpl::new(a, b)
+ }
+ fn super_nth(&mut self, mut n: usize) -> Option<(A::Item, B::Item)> {
+ while let Some(x) = Iterator::next(self) {
+ if n == 0 {
+ return Some(x);
+ }
+ n -= 1;
+ }
+ None
+ }
+}
+
+/// Converts the arguments to iterators and zips them.
+///
+/// See the documentation of [`Iterator::zip`] for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::iter::zip;
+///
+/// let xs = [1, 2, 3];
+/// let ys = [4, 5, 6];
+///
+/// let mut iter = zip(xs, ys);
+///
+/// assert_eq!(iter.next().unwrap(), (1, 4));
+/// assert_eq!(iter.next().unwrap(), (2, 5));
+/// assert_eq!(iter.next().unwrap(), (3, 6));
+/// assert!(iter.next().is_none());
+///
+/// // Nested zips are also possible:
+/// let zs = [7, 8, 9];
+///
+/// let mut iter = zip(zip(xs, ys), zs);
+///
+/// assert_eq!(iter.next().unwrap(), ((1, 4), 7));
+/// assert_eq!(iter.next().unwrap(), ((2, 5), 8));
+/// assert_eq!(iter.next().unwrap(), ((3, 6), 9));
+/// assert!(iter.next().is_none());
+/// ```
+#[stable(feature = "iter_zip", since = "1.59.0")]
+pub fn zip<A, B>(a: A, b: B) -> Zip<A::IntoIter, B::IntoIter>
+where
+ A: IntoIterator,
+ B: IntoIterator,
+{
+ ZipImpl::new(a.into_iter(), b.into_iter())
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> Iterator for Zip<A, B>
+where
+ A: Iterator,
+ B: Iterator,
+{
+ type Item = (A::Item, B::Item);
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ ZipImpl::next(self)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ ZipImpl::size_hint(self)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ ZipImpl::nth(self, n)
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ // SAFETY: `ZipImpl::__iterator_get_unchecked` has same safety
+ // requirements as `Iterator::__iterator_get_unchecked`.
+ unsafe { ZipImpl::get_unchecked(self, idx) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> DoubleEndedIterator for Zip<A, B>
+where
+ A: DoubleEndedIterator + ExactSizeIterator,
+ B: DoubleEndedIterator + ExactSizeIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<(A::Item, B::Item)> {
+ ZipImpl::next_back(self)
+ }
+}
+
+// Zip specialization trait
+#[doc(hidden)]
+trait ZipImpl<A, B> {
+ type Item;
+ fn new(a: A, b: B) -> Self;
+ fn next(&mut self) -> Option<Self::Item>;
+ fn size_hint(&self) -> (usize, Option<usize>);
+ fn nth(&mut self, n: usize) -> Option<Self::Item>;
+ fn next_back(&mut self) -> Option<Self::Item>
+ where
+ A: DoubleEndedIterator + ExactSizeIterator,
+ B: DoubleEndedIterator + ExactSizeIterator;
+ // This has the same safety requirements as `Iterator::__iterator_get_unchecked`
+ unsafe fn get_unchecked(&mut self, idx: usize) -> <Self as Iterator>::Item
+ where
+ Self: Iterator + TrustedRandomAccessNoCoerce;
+}
+
+// Work around limitations of specialization, requiring `default` impls to be repeated
+// in intermediary impls.
+macro_rules! zip_impl_general_defaults {
+ () => {
+ default fn new(a: A, b: B) -> Self {
+ Zip {
+ a,
+ b,
+ index: 0, // unused
+ len: 0, // unused
+ a_len: 0, // unused
+ }
+ }
+
+ #[inline]
+ default fn next(&mut self) -> Option<(A::Item, B::Item)> {
+ let x = self.a.next()?;
+ let y = self.b.next()?;
+ Some((x, y))
+ }
+
+ #[inline]
+ default fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.super_nth(n)
+ }
+
+ #[inline]
+ default fn next_back(&mut self) -> Option<(A::Item, B::Item)>
+ where
+ A: DoubleEndedIterator + ExactSizeIterator,
+ B: DoubleEndedIterator + ExactSizeIterator,
+ {
+ // The function body below only uses `self.a/b.len()` and `self.a/b.next_back()`
+ // and doesn’t call `next_back` too often, so this implementation is safe in
+ // the `TrustedRandomAccessNoCoerce` specialization
+
+ let a_sz = self.a.len();
+ let b_sz = self.b.len();
+ if a_sz != b_sz {
+ // Adjust a, b to equal length
+ if a_sz > b_sz {
+ for _ in 0..a_sz - b_sz {
+ self.a.next_back();
+ }
+ } else {
+ for _ in 0..b_sz - a_sz {
+ self.b.next_back();
+ }
+ }
+ }
+ match (self.a.next_back(), self.b.next_back()) {
+ (Some(x), Some(y)) => Some((x, y)),
+ (None, None) => None,
+ _ => unreachable!(),
+ }
+ }
+ };
+}
+
+// General Zip impl
+#[doc(hidden)]
+impl<A, B> ZipImpl<A, B> for Zip<A, B>
+where
+ A: Iterator,
+ B: Iterator,
+{
+ type Item = (A::Item, B::Item);
+
+ zip_impl_general_defaults! {}
+
+ #[inline]
+ default fn size_hint(&self) -> (usize, Option<usize>) {
+ let (a_lower, a_upper) = self.a.size_hint();
+ let (b_lower, b_upper) = self.b.size_hint();
+
+ let lower = cmp::min(a_lower, b_lower);
+
+ let upper = match (a_upper, b_upper) {
+ (Some(x), Some(y)) => Some(cmp::min(x, y)),
+ (Some(x), None) => Some(x),
+ (None, Some(y)) => Some(y),
+ (None, None) => None,
+ };
+
+ (lower, upper)
+ }
+
+ default unsafe fn get_unchecked(&mut self, _idx: usize) -> <Self as Iterator>::Item
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ unreachable!("Always specialized");
+ }
+}
+
+#[doc(hidden)]
+impl<A, B> ZipImpl<A, B> for Zip<A, B>
+where
+ A: TrustedRandomAccessNoCoerce + Iterator,
+ B: TrustedRandomAccessNoCoerce + Iterator,
+{
+ zip_impl_general_defaults! {}
+
+ #[inline]
+ default fn size_hint(&self) -> (usize, Option<usize>) {
+ let size = cmp::min(self.a.size(), self.b.size());
+ (size, Some(size))
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(&mut self, idx: usize) -> <Self as Iterator>::Item {
+ let idx = self.index + idx;
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { (self.a.__iterator_get_unchecked(idx), self.b.__iterator_get_unchecked(idx)) }
+ }
+}
+
+#[doc(hidden)]
+impl<A, B> ZipImpl<A, B> for Zip<A, B>
+where
+ A: TrustedRandomAccess + Iterator,
+ B: TrustedRandomAccess + Iterator,
+{
+ fn new(a: A, b: B) -> Self {
+ let a_len = a.size();
+ let len = cmp::min(a_len, b.size());
+ Zip { a, b, index: 0, len, a_len }
+ }
+
+ #[inline]
+ fn next(&mut self) -> Option<(A::Item, B::Item)> {
+ if self.index < self.len {
+ let i = self.index;
+ // since get_unchecked executes code which can panic we increment the counters beforehand
+ // so that the same index won't be accessed twice, as required by TrustedRandomAccess
+ self.index += 1;
+ // SAFETY: `i` is smaller than `self.len`, thus smaller than `self.a.len()` and `self.b.len()`
+ unsafe {
+ Some((self.a.__iterator_get_unchecked(i), self.b.__iterator_get_unchecked(i)))
+ }
+ } else if A::MAY_HAVE_SIDE_EFFECT && self.index < self.a_len {
+ let i = self.index;
+ // as above, increment before executing code that may panic
+ self.index += 1;
+ self.len += 1;
+ // match the base implementation's potential side effects
+ // SAFETY: we just checked that `i` < `self.a.len()`
+ unsafe {
+ self.a.__iterator_get_unchecked(i);
+ }
+ None
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len - self.index;
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let delta = cmp::min(n, self.len - self.index);
+ let end = self.index + delta;
+ while self.index < end {
+ let i = self.index;
+ // since get_unchecked executes code which can panic we increment the counters beforehand
+ // so that the same index won't be accessed twice, as required by TrustedRandomAccess
+ self.index += 1;
+ if A::MAY_HAVE_SIDE_EFFECT {
+ // SAFETY: the usage of `cmp::min` to calculate `delta`
+ // ensures that `end` is smaller than or equal to `self.len`,
+ // so `i` is also smaller than `self.len`.
+ unsafe {
+ self.a.__iterator_get_unchecked(i);
+ }
+ }
+ if B::MAY_HAVE_SIDE_EFFECT {
+ // SAFETY: same as above.
+ unsafe {
+ self.b.__iterator_get_unchecked(i);
+ }
+ }
+ }
+
+ self.super_nth(n - delta)
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<(A::Item, B::Item)>
+ where
+ A: DoubleEndedIterator + ExactSizeIterator,
+ B: DoubleEndedIterator + ExactSizeIterator,
+ {
+ if A::MAY_HAVE_SIDE_EFFECT || B::MAY_HAVE_SIDE_EFFECT {
+ let sz_a = self.a.size();
+ let sz_b = self.b.size();
+ // Adjust a, b to equal length, make sure that only the first call
+ // of `next_back` does this, otherwise we will break the restriction
+ // on calls to `self.next_back()` after calling `get_unchecked()`.
+ if sz_a != sz_b {
+ let sz_a = self.a.size();
+ if A::MAY_HAVE_SIDE_EFFECT && sz_a > self.len {
+ for _ in 0..sz_a - self.len {
+ // since next_back() may panic we increment the counters beforehand
+ // to keep Zip's state in sync with the underlying iterator source
+ self.a_len -= 1;
+ self.a.next_back();
+ }
+ debug_assert_eq!(self.a_len, self.len);
+ }
+ let sz_b = self.b.size();
+ if B::MAY_HAVE_SIDE_EFFECT && sz_b > self.len {
+ for _ in 0..sz_b - self.len {
+ self.b.next_back();
+ }
+ }
+ }
+ }
+ if self.index < self.len {
+ // since get_unchecked executes code which can panic we increment the counters beforehand
+ // so that the same index won't be accessed twice, as required by TrustedRandomAccess
+ self.len -= 1;
+ self.a_len -= 1;
+ let i = self.len;
+ // SAFETY: `i` is smaller than the previous value of `self.len`,
+ // which is also smaller than or equal to `self.a.len()` and `self.b.len()`
+ unsafe {
+ Some((self.a.__iterator_get_unchecked(i), self.b.__iterator_get_unchecked(i)))
+ }
+ } else {
+ None
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> ExactSizeIterator for Zip<A, B>
+where
+ A: ExactSizeIterator,
+ B: ExactSizeIterator,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<A, B> TrustedRandomAccess for Zip<A, B>
+where
+ A: TrustedRandomAccess,
+ B: TrustedRandomAccess,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<A, B> TrustedRandomAccessNoCoerce for Zip<A, B>
+where
+ A: TrustedRandomAccessNoCoerce,
+ B: TrustedRandomAccessNoCoerce,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = A::MAY_HAVE_SIDE_EFFECT || B::MAY_HAVE_SIDE_EFFECT;
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A, B> FusedIterator for Zip<A, B>
+where
+ A: FusedIterator,
+ B: FusedIterator,
+{
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A, B> TrustedLen for Zip<A, B>
+where
+ A: TrustedLen,
+ B: TrustedLen,
+{
+}
+
+// Arbitrarily selects the left side of the zip iteration as extractable "source"
+// it would require negative trait bounds to be able to try both
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<A, B> SourceIter for Zip<A, B>
+where
+ A: SourceIter,
+{
+ type Source = A::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut A::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.a) }
+ }
+}
+
+// Since SourceIter forwards the left hand side we do the same here
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<A: InPlaceIterable, B: Iterator> InPlaceIterable for Zip<A, B> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Debug, B: Debug> Debug for Zip<A, B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ZipFmt::fmt(self, f)
+ }
+}
+
+trait ZipFmt<A, B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result;
+}
+
+impl<A: Debug, B: Debug> ZipFmt<A, B> for Zip<A, B> {
+ default fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Zip").field("a", &self.a).field("b", &self.b).finish()
+ }
+}
+
+impl<A: Debug + TrustedRandomAccessNoCoerce, B: Debug + TrustedRandomAccessNoCoerce> ZipFmt<A, B>
+ for Zip<A, B>
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // It's *not safe* to call fmt on the contained iterators, since once
+ // we start iterating they're in strange, potentially unsafe, states.
+ f.debug_struct("Zip").finish()
+ }
+}
+
+/// An iterator whose items are random-accessible efficiently
+///
+/// # Safety
+///
+/// The iterator's `size_hint` must be exact and cheap to call.
+///
+/// `TrustedRandomAccessNoCoerce::size` may not be overridden.
+///
+/// All subtypes and all supertypes of `Self` must also implement `TrustedRandomAccess`.
+/// In particular, this means that types with non-invariant parameters usually can not have
+/// an impl for `TrustedRandomAccess` that depends on any trait bounds on such parameters, except
+/// for bounds that come from the respective struct/enum definition itself, or bounds involving
+/// traits that themselves come with a guarantee similar to this one.
+///
+/// If `Self: ExactSizeIterator` then `self.len()` must always produce results consistent
+/// with `self.size()`.
+///
+/// If `Self: Iterator`, then `<Self as Iterator>::__iterator_get_unchecked(&mut self, idx)`
+/// must be safe to call provided the following conditions are met.
+///
+/// 1. `0 <= idx` and `idx < self.size()`.
+/// 2. If `Self: !Clone`, then `self.__iterator_get_unchecked(idx)` is never called with the same
+/// index on `self` more than once.
+/// 3. After `self.__iterator_get_unchecked(idx)` has been called, then `self.next_back()` will
+/// only be called at most `self.size() - idx - 1` times. If `Self: Clone` and `self` is cloned,
+/// then this number is calculated for `self` and its clone individually,
+/// but `self.next_back()` calls that happened before the cloning count for both `self` and the clone.
+/// 4. After `self.__iterator_get_unchecked(idx)` has been called, then only the following methods
+/// will be called on `self` or on any new clones of `self`:
+/// * `std::clone::Clone::clone`
+/// * `std::iter::Iterator::size_hint`
+/// * `std::iter::DoubleEndedIterator::next_back`
+/// * `std::iter::ExactSizeIterator::len`
+/// * `std::iter::Iterator::__iterator_get_unchecked`
+/// * `std::iter::TrustedRandomAccessNoCoerce::size`
+/// 5. If `T` is a subtype of `Self`, then `self` is allowed to be coerced
+/// to `T`. If `self` is coerced to `T` after `self.__iterator_get_unchecked(idx)` has already
+/// been called, then no methods except for the ones listed under 4. are allowed to be called
+/// on the resulting value of type `T`, either. Multiple such coercion steps are allowed.
+/// Regarding 2. and 3., the number of times `__iterator_get_unchecked(idx)` or `next_back()` is
+/// called on `self` and the resulting value of type `T` (and on further coercion results with
+/// sub-subtypes) are added together and their sums must not exceed the specified bounds.
+///
+/// Further, given that these conditions are met, it must guarantee that:
+///
+/// * It does not change the value returned from `size_hint`
+/// * It must be safe to call the methods listed above on `self` after calling
+/// `self.__iterator_get_unchecked(idx)`, assuming that the required traits are implemented.
+/// * It must also be safe to drop `self` after calling `self.__iterator_get_unchecked(idx)`.
+/// * If `T` is a subtype of `Self`, then it must be safe to coerce `self` to `T`.
+//
+// FIXME: Clarify interaction with SourceIter/InPlaceIterable. Calling `SourceIter::as_inner`
+// after `__iterator_get_unchecked` is supposed to be allowed.
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+#[rustc_specialization_trait]
+pub unsafe trait TrustedRandomAccess: TrustedRandomAccessNoCoerce {}
+
+/// Like [`TrustedRandomAccess`] but without any of the requirements / guarantees around
+/// coercions to subtypes after `__iterator_get_unchecked` (they aren’t allowed here!), and
+/// without the requirement that subtypes / supertypes implement `TrustedRandomAccessNoCoerce`.
+///
+/// This trait was created in PR #85874 to fix soundness issue #85873 without performance regressions.
+/// It is subject to change as we might want to build a more generally useful (for performance
+/// optimizations) and more sophisticated trait or trait hierarchy that replaces or extends
+/// [`TrustedRandomAccess`] and `TrustedRandomAccessNoCoerce`.
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+#[rustc_specialization_trait]
+pub unsafe trait TrustedRandomAccessNoCoerce: Sized {
+ // Convenience method.
+ fn size(&self) -> usize
+ where
+ Self: Iterator,
+ {
+ self.size_hint().0
+ }
+ /// `true` if getting an iterator element may have side effects.
+ /// Remember to take inner iterators into account.
+ const MAY_HAVE_SIDE_EFFECT: bool;
+}
+
+/// Like `Iterator::__iterator_get_unchecked`, but doesn't require the compiler to
+/// know that `U: TrustedRandomAccess`.
+///
+/// ## Safety
+///
+/// Same requirements calling `get_unchecked` directly.
+#[doc(hidden)]
+#[inline]
+pub(in crate::iter::adapters) unsafe fn try_get_unchecked<I>(it: &mut I, idx: usize) -> I::Item
+where
+ I: Iterator,
+{
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { it.try_get_unchecked(idx) }
+}
+
+unsafe trait SpecTrustedRandomAccess: Iterator {
+ /// If `Self: TrustedRandomAccess`, it must be safe to call
+ /// `Iterator::__iterator_get_unchecked(self, index)`.
+ unsafe fn try_get_unchecked(&mut self, index: usize) -> Self::Item;
+}
+
+unsafe impl<I: Iterator> SpecTrustedRandomAccess for I {
+ default unsafe fn try_get_unchecked(&mut self, _: usize) -> Self::Item {
+ panic!("Should only be called on TrustedRandomAccess iterators");
+ }
+}
+
+unsafe impl<I: Iterator + TrustedRandomAccessNoCoerce> SpecTrustedRandomAccess for I {
+ #[inline]
+ unsafe fn try_get_unchecked(&mut self, index: usize) -> Self::Item {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { self.__iterator_get_unchecked(index) }
+ }
+}
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
new file mode 100644
index 000000000..d5c6aed5b
--- /dev/null
+++ b/library/core/src/iter/mod.rs
@@ -0,0 +1,432 @@
+//! Composable external iteration.
+//!
+//! If you've found yourself with a collection of some kind, and needed to
+//! perform an operation on the elements of said collection, you'll quickly run
+//! into 'iterators'. Iterators are heavily used in idiomatic Rust code, so
+//! it's worth becoming familiar with them.
+//!
+//! Before explaining more, let's talk about how this module is structured:
+//!
+//! # Organization
+//!
+//! This module is largely organized by type:
+//!
+//! * [Traits] are the core portion: these traits define what kind of iterators
+//! exist and what you can do with them. The methods of these traits are worth
+//! putting some extra study time into.
+//! * [Functions] provide some helpful ways to create some basic iterators.
+//! * [Structs] are often the return types of the various methods on this
+//! module's traits. You'll usually want to look at the method that creates
+//! the `struct`, rather than the `struct` itself. For more detail about why,
+//! see '[Implementing Iterator](#implementing-iterator)'.
+//!
+//! [Traits]: #traits
+//! [Functions]: #functions
+//! [Structs]: #structs
+//!
+//! That's it! Let's dig into iterators.
+//!
+//! # Iterator
+//!
+//! The heart and soul of this module is the [`Iterator`] trait. The core of
+//! [`Iterator`] looks like this:
+//!
+//! ```
+//! trait Iterator {
+//! type Item;
+//! fn next(&mut self) -> Option<Self::Item>;
+//! }
+//! ```
+//!
+//! An iterator has a method, [`next`], which when called, returns an
+//! <code>[Option]\<Item></code>. Calling [`next`] will return [`Some(Item)`] as long as there
+//! are elements, and once they've all been exhausted, will return `None` to
+//! indicate that iteration is finished. Individual iterators may choose to
+//! resume iteration, and so calling [`next`] again may or may not eventually
+//! start returning [`Some(Item)`] again at some point (for example, see [`TryIter`]).
+//!
+//! [`Iterator`]'s full definition includes a number of other methods as well,
+//! but they are default methods, built on top of [`next`], and so you get
+//! them for free.
+//!
+//! Iterators are also composable, and it's common to chain them together to do
+//! more complex forms of processing. See the [Adapters](#adapters) section
+//! below for more details.
+//!
+//! [`Some(Item)`]: Some
+//! [`next`]: Iterator::next
+//! [`TryIter`]: ../../std/sync/mpsc/struct.TryIter.html
+//!
+//! # The three forms of iteration
+//!
+//! There are three common methods which can create iterators from a collection:
+//!
+//! * `iter()`, which iterates over `&T`.
+//! * `iter_mut()`, which iterates over `&mut T`.
+//! * `into_iter()`, which iterates over `T`.
+//!
+//! Various things in the standard library may implement one or more of the
+//! three, where appropriate.
+//!
+//! # Implementing Iterator
+//!
+//! Creating an iterator of your own involves two steps: creating a `struct` to
+//! hold the iterator's state, and then implementing [`Iterator`] for that `struct`.
+//! This is why there are so many `struct`s in this module: there is one for
+//! each iterator and iterator adapter.
+//!
+//! Let's make an iterator named `Counter` which counts from `1` to `5`:
+//!
+//! ```
+//! // First, the struct:
+//!
+//! /// An iterator which counts from one to five
+//! struct Counter {
+//! count: usize,
+//! }
+//!
+//! // we want our count to start at one, so let's add a new() method to help.
+//! // This isn't strictly necessary, but is convenient. Note that we start
+//! // `count` at zero, we'll see why in `next()`'s implementation below.
+//! impl Counter {
+//! fn new() -> Counter {
+//! Counter { count: 0 }
+//! }
+//! }
+//!
+//! // Then, we implement `Iterator` for our `Counter`:
+//!
+//! impl Iterator for Counter {
+//! // we will be counting with usize
+//! type Item = usize;
+//!
+//! // next() is the only required method
+//! fn next(&mut self) -> Option<Self::Item> {
+//! // Increment our count. This is why we started at zero.
+//! self.count += 1;
+//!
+//! // Check to see if we've finished counting or not.
+//! if self.count < 6 {
+//! Some(self.count)
+//! } else {
+//! None
+//! }
+//! }
+//! }
+//!
+//! // And now we can use it!
+//!
+//! let mut counter = Counter::new();
+//!
+//! assert_eq!(counter.next(), Some(1));
+//! assert_eq!(counter.next(), Some(2));
+//! assert_eq!(counter.next(), Some(3));
+//! assert_eq!(counter.next(), Some(4));
+//! assert_eq!(counter.next(), Some(5));
+//! assert_eq!(counter.next(), None);
+//! ```
+//!
+//! Calling [`next`] this way gets repetitive. Rust has a construct which can
+//! call [`next`] on your iterator, until it reaches `None`. Let's go over that
+//! next.
+//!
+//! Also note that `Iterator` provides a default implementation of methods such as `nth` and `fold`
+//! which call `next` internally. However, it is also possible to write a custom implementation of
+//! methods like `nth` and `fold` if an iterator can compute them more efficiently without calling
+//! `next`.
+//!
+//! # `for` loops and `IntoIterator`
+//!
+//! Rust's `for` loop syntax is actually sugar for iterators. Here's a basic
+//! example of `for`:
+//!
+//! ```
+//! let values = vec![1, 2, 3, 4, 5];
+//!
+//! for x in values {
+//! println!("{x}");
+//! }
+//! ```
+//!
+//! This will print the numbers one through five, each on their own line. But
+//! you'll notice something here: we never called anything on our vector to
+//! produce an iterator. What gives?
+//!
+//! There's a trait in the standard library for converting something into an
+//! iterator: [`IntoIterator`]. This trait has one method, [`into_iter`],
+//! which converts the thing implementing [`IntoIterator`] into an iterator.
+//! Let's take a look at that `for` loop again, and what the compiler converts
+//! it into:
+//!
+//! [`into_iter`]: IntoIterator::into_iter
+//!
+//! ```
+//! let values = vec![1, 2, 3, 4, 5];
+//!
+//! for x in values {
+//! println!("{x}");
+//! }
+//! ```
+//!
+//! Rust de-sugars this into:
+//!
+//! ```
+//! let values = vec![1, 2, 3, 4, 5];
+//! {
+//! let result = match IntoIterator::into_iter(values) {
+//! mut iter => loop {
+//! let next;
+//! match iter.next() {
+//! Some(val) => next = val,
+//! None => break,
+//! };
+//! let x = next;
+//! let () = { println!("{x}"); };
+//! },
+//! };
+//! result
+//! }
+//! ```
+//!
+//! First, we call `into_iter()` on the value. Then, we match on the iterator
+//! that returns, calling [`next`] over and over until we see a `None`. At
+//! that point, we `break` out of the loop, and we're done iterating.
+//!
+//! There's one more subtle bit here: the standard library contains an
+//! interesting implementation of [`IntoIterator`]:
+//!
+//! ```ignore (only-for-syntax-highlight)
+//! impl<I: Iterator> IntoIterator for I
+//! ```
+//!
+//! In other words, all [`Iterator`]s implement [`IntoIterator`], by just
+//! returning themselves. This means two things:
+//!
+//! 1. If you're writing an [`Iterator`], you can use it with a `for` loop.
+//! 2. If you're creating a collection, implementing [`IntoIterator`] for it
+//! will allow your collection to be used with the `for` loop.
+//!
+//! # Iterating by reference
+//!
+//! Since [`into_iter()`] takes `self` by value, using a `for` loop to iterate
+//! over a collection consumes that collection. Often, you may want to iterate
+//! over a collection without consuming it. Many collections offer methods that
+//! provide iterators over references, conventionally called `iter()` and
+//! `iter_mut()` respectively:
+//!
+//! ```
+//! let mut values = vec![41];
+//! for x in values.iter_mut() {
+//! *x += 1;
+//! }
+//! for x in values.iter() {
+//! assert_eq!(*x, 42);
+//! }
+//! assert_eq!(values.len(), 1); // `values` is still owned by this function.
+//! ```
+//!
+//! If a collection type `C` provides `iter()`, it usually also implements
+//! `IntoIterator` for `&C`, with an implementation that just calls `iter()`.
+//! Likewise, a collection `C` that provides `iter_mut()` generally implements
+//! `IntoIterator` for `&mut C` by delegating to `iter_mut()`. This enables a
+//! convenient shorthand:
+//!
+//! ```
+//! let mut values = vec![41];
+//! for x in &mut values { // same as `values.iter_mut()`
+//! *x += 1;
+//! }
+//! for x in &values { // same as `values.iter()`
+//! assert_eq!(*x, 42);
+//! }
+//! assert_eq!(values.len(), 1);
+//! ```
+//!
+//! While many collections offer `iter()`, not all offer `iter_mut()`. For
+//! example, mutating the keys of a [`HashSet<T>`] could put the collection
+//! into an inconsistent state if the key hashes change, so this collection
+//! only offers `iter()`.
+//!
+//! [`into_iter()`]: IntoIterator::into_iter
+//! [`HashSet<T>`]: ../../std/collections/struct.HashSet.html
+//!
+//! # Adapters
+//!
+//! Functions which take an [`Iterator`] and return another [`Iterator`] are
+//! often called 'iterator adapters', as they're a form of the 'adapter
+//! pattern'.
+//!
+//! Common iterator adapters include [`map`], [`take`], and [`filter`].
+//! For more, see their documentation.
+//!
+//! If an iterator adapter panics, the iterator will be in an unspecified (but
+//! memory safe) state. This state is also not guaranteed to stay the same
+//! across versions of Rust, so you should avoid relying on the exact values
+//! returned by an iterator which panicked.
+//!
+//! [`map`]: Iterator::map
+//! [`take`]: Iterator::take
+//! [`filter`]: Iterator::filter
+//!
+//! # Laziness
+//!
+//! Iterators (and iterator [adapters](#adapters)) are *lazy*. This means that
+//! just creating an iterator doesn't _do_ a whole lot. Nothing really happens
+//! until you call [`next`]. This is sometimes a source of confusion when
+//! creating an iterator solely for its side effects. For example, the [`map`]
+//! method calls a closure on each element it iterates over:
+//!
+//! ```
+//! # #![allow(unused_must_use)]
+//! let v = vec![1, 2, 3, 4, 5];
+//! v.iter().map(|x| println!("{x}"));
+//! ```
+//!
+//! This will not print any values, as we only created an iterator, rather than
+//! using it. The compiler will warn us about this kind of behavior:
+//!
+//! ```text
+//! warning: unused result that must be used: iterators are lazy and
+//! do nothing unless consumed
+//! ```
+//!
+//! The idiomatic way to write a [`map`] for its side effects is to use a
+//! `for` loop or call the [`for_each`] method:
+//!
+//! ```
+//! let v = vec![1, 2, 3, 4, 5];
+//!
+//! v.iter().for_each(|x| println!("{x}"));
+//! // or
+//! for x in &v {
+//! println!("{x}");
+//! }
+//! ```
+//!
+//! [`map`]: Iterator::map
+//! [`for_each`]: Iterator::for_each
+//!
+//! Another common way to evaluate an iterator is to use the [`collect`]
+//! method to produce a new collection.
+//!
+//! [`collect`]: Iterator::collect
+//!
+//! # Infinity
+//!
+//! Iterators do not have to be finite. As an example, an open-ended range is
+//! an infinite iterator:
+//!
+//! ```
+//! let numbers = 0..;
+//! ```
+//!
+//! It is common to use the [`take`] iterator adapter to turn an infinite
+//! iterator into a finite one:
+//!
+//! ```
+//! let numbers = 0..;
+//! let five_numbers = numbers.take(5);
+//!
+//! for number in five_numbers {
+//! println!("{number}");
+//! }
+//! ```
+//!
+//! This will print the numbers `0` through `4`, each on their own line.
+//!
+//! Bear in mind that methods on infinite iterators, even those for which a
+//! result can be determined mathematically in finite time, might not terminate.
+//! Specifically, methods such as [`min`], which in the general case require
+//! traversing every element in the iterator, are likely not to return
+//! successfully for any infinite iterators.
+//!
+//! ```no_run
+//! let ones = std::iter::repeat(1);
+//! let least = ones.min().unwrap(); // Oh no! An infinite loop!
+//! // `ones.min()` causes an infinite loop, so we won't reach this point!
+//! println!("The smallest number one is {least}.");
+//! ```
+//!
+//! [`take`]: Iterator::take
+//! [`min`]: Iterator::min
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::traits::Iterator;
+
+#[unstable(
+ feature = "step_trait",
+ reason = "likely to be replaced by finer-grained traits",
+ issue = "42168"
+)]
+pub use self::range::Step;
+
+#[unstable(
+ feature = "iter_from_generator",
+ issue = "43122",
+ reason = "generators are unstable"
+)]
+pub use self::sources::from_generator;
+#[stable(feature = "iter_empty", since = "1.2.0")]
+pub use self::sources::{empty, Empty};
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+pub use self::sources::{from_fn, FromFn};
+#[stable(feature = "iter_once", since = "1.2.0")]
+pub use self::sources::{once, Once};
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+pub use self::sources::{once_with, OnceWith};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::sources::{repeat, Repeat};
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+pub use self::sources::{repeat_with, RepeatWith};
+#[stable(feature = "iter_successors", since = "1.34.0")]
+pub use self::sources::{successors, Successors};
+
+#[stable(feature = "fused", since = "1.26.0")]
+pub use self::traits::FusedIterator;
+#[unstable(issue = "none", feature = "inplace_iteration")]
+pub use self::traits::InPlaceIterable;
+#[unstable(feature = "trusted_len", issue = "37572")]
+pub use self::traits::TrustedLen;
+#[unstable(feature = "trusted_step", issue = "85731")]
+pub use self::traits::TrustedStep;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::traits::{
+ DoubleEndedIterator, ExactSizeIterator, Extend, FromIterator, IntoIterator, Product, Sum,
+};
+
+#[stable(feature = "iter_zip", since = "1.59.0")]
+pub use self::adapters::zip;
+#[unstable(feature = "std_internals", issue = "none")]
+pub use self::adapters::ByRefSized;
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+pub use self::adapters::Cloned;
+#[stable(feature = "iter_copied", since = "1.36.0")]
+pub use self::adapters::Copied;
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+pub use self::adapters::Flatten;
+#[stable(feature = "iter_map_while", since = "1.57.0")]
+pub use self::adapters::MapWhile;
+#[unstable(feature = "inplace_iteration", issue = "none")]
+pub use self::adapters::SourceIter;
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+pub use self::adapters::StepBy;
+#[unstable(feature = "trusted_random_access", issue = "none")]
+pub use self::adapters::TrustedRandomAccess;
+#[unstable(feature = "trusted_random_access", issue = "none")]
+pub use self::adapters::TrustedRandomAccessNoCoerce;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::adapters::{
+ Chain, Cycle, Enumerate, Filter, FilterMap, FlatMap, Fuse, Inspect, Map, Peekable, Rev, Scan,
+ Skip, SkipWhile, Take, TakeWhile, Zip,
+};
+#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+pub use self::adapters::{Intersperse, IntersperseWith};
+
+pub(crate) use self::adapters::try_process;
+
+mod adapters;
+mod range;
+mod sources;
+mod traits;
diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs
new file mode 100644
index 000000000..f7aeee8c9
--- /dev/null
+++ b/library/core/src/iter/range.rs
@@ -0,0 +1,1253 @@
+use crate::char;
+use crate::convert::TryFrom;
+use crate::mem;
+use crate::ops::{self, Try};
+
+use super::{
+ FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, TrustedStep,
+};
+
+// Safety: All invariants are upheld.
+macro_rules! unsafe_impl_trusted_step {
+ ($($type:ty)*) => {$(
+ #[unstable(feature = "trusted_step", issue = "85731")]
+ unsafe impl TrustedStep for $type {}
+ )*};
+}
+unsafe_impl_trusted_step![char i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize];
+
+/// Objects that have a notion of *successor* and *predecessor* operations.
+///
+/// The *successor* operation moves towards values that compare greater.
+/// The *predecessor* operation moves towards values that compare lesser.
+#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+pub trait Step: Clone + PartialOrd + Sized {
+ /// Returns the number of *successor* steps required to get from `start` to `end`.
+ ///
+ /// Returns `None` if the number of steps would overflow `usize`
+ /// (or is infinite, or if `end` would never be reached).
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `b`, and `n`:
+ ///
+ /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::forward_checked(&a, n) == Some(b)`
+ /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::backward_checked(&b, n) == Some(a)`
+ /// * `steps_between(&a, &b) == Some(n)` only if `a <= b`
+ /// * Corollary: `steps_between(&a, &b) == Some(0)` if and only if `a == b`
+ /// * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`;
+ /// this is the case when it would require more than `usize::MAX` steps to get to `b`
+ /// * `steps_between(&a, &b) == None` if `a > b`
+ fn steps_between(start: &Self, end: &Self) -> Option<usize>;
+
+ /// Returns the value that would be obtained by taking the *successor*
+ /// of `self` `count` times.
+ ///
+ /// If this would overflow the range of values supported by `Self`, returns `None`.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `n`, and `m`:
+ ///
+ /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, m).and_then(|x| Step::forward_checked(x, n))`
+ ///
+ /// For any `a`, `n`, and `m` where `n + m` does not overflow:
+ ///
+ /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, n + m)`
+ ///
+ /// For any `a` and `n`:
+ ///
+ /// * `Step::forward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::forward_checked(&x, 1))`
+ /// * Corollary: `Step::forward_checked(&a, 0) == Some(a)`
+ fn forward_checked(start: Self, count: usize) -> Option<Self>;
+
+ /// Returns the value that would be obtained by taking the *successor*
+ /// of `self` `count` times.
+ ///
+ /// If this would overflow the range of values supported by `Self`,
+ /// this function is allowed to panic, wrap, or saturate.
+ /// The suggested behavior is to panic when debug assertions are enabled,
+ /// and to wrap or saturate otherwise.
+ ///
+ /// Unsafe code should not rely on the correctness of behavior after overflow.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `n`, and `m`, where no overflow occurs:
+ ///
+ /// * `Step::forward(Step::forward(a, n), m) == Step::forward(a, n + m)`
+ ///
+ /// For any `a` and `n`, where no overflow occurs:
+ ///
+ /// * `Step::forward_checked(a, n) == Some(Step::forward(a, n))`
+ /// * `Step::forward(a, n) == (0..n).fold(a, |x, _| Step::forward(x, 1))`
+ /// * Corollary: `Step::forward(a, 0) == a`
+ /// * `Step::forward(a, n) >= a`
+ /// * `Step::backward(Step::forward(a, n), n) == a`
+ fn forward(start: Self, count: usize) -> Self {
+ Step::forward_checked(start, count).expect("overflow in `Step::forward`")
+ }
+
+ /// Returns the value that would be obtained by taking the *successor*
+ /// of `self` `count` times.
+ ///
+ /// # Safety
+ ///
+ /// It is undefined behavior for this operation to overflow the
+ /// range of values supported by `Self`. If you cannot guarantee that this
+ /// will not overflow, use `forward` or `forward_checked` instead.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`:
+ ///
+ /// * if there exists `b` such that `b > a`, it is safe to call `Step::forward_unchecked(a, 1)`
+ /// * if there exists `b`, `n` such that `steps_between(&a, &b) == Some(n)`,
+ /// it is safe to call `Step::forward_unchecked(a, m)` for any `m <= n`.
+ ///
+ /// For any `a` and `n`, where no overflow occurs:
+ ///
+ /// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)`
+ unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
+ Step::forward(start, count)
+ }
+
+ /// Returns the value that would be obtained by taking the *predecessor*
+ /// of `self` `count` times.
+ ///
+ /// If this would overflow the range of values supported by `Self`, returns `None`.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `n`, and `m`:
+ ///
+ /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == n.checked_add(m).and_then(|x| Step::backward_checked(a, x))`
+ /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == try { Step::backward_checked(a, n.checked_add(m)?) }`
+ ///
+ /// For any `a` and `n`:
+ ///
+ /// * `Step::backward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::backward_checked(&x, 1))`
+ /// * Corollary: `Step::backward_checked(&a, 0) == Some(a)`
+ fn backward_checked(start: Self, count: usize) -> Option<Self>;
+
+ /// Returns the value that would be obtained by taking the *predecessor*
+ /// of `self` `count` times.
+ ///
+ /// If this would overflow the range of values supported by `Self`,
+ /// this function is allowed to panic, wrap, or saturate.
+ /// The suggested behavior is to panic when debug assertions are enabled,
+ /// and to wrap or saturate otherwise.
+ ///
+ /// Unsafe code should not rely on the correctness of behavior after overflow.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `n`, and `m`, where no overflow occurs:
+ ///
+ /// * `Step::backward(Step::backward(a, n), m) == Step::backward(a, n + m)`
+ ///
+ /// For any `a` and `n`, where no overflow occurs:
+ ///
+ /// * `Step::backward_checked(a, n) == Some(Step::backward(a, n))`
+ /// * `Step::backward(a, n) == (0..n).fold(a, |x, _| Step::backward(x, 1))`
+ /// * Corollary: `Step::backward(a, 0) == a`
+ /// * `Step::backward(a, n) <= a`
+ /// * `Step::forward(Step::backward(a, n), n) == a`
+ fn backward(start: Self, count: usize) -> Self {
+ Step::backward_checked(start, count).expect("overflow in `Step::backward`")
+ }
+
+ /// Returns the value that would be obtained by taking the *predecessor*
+ /// of `self` `count` times.
+ ///
+ /// # Safety
+ ///
+ /// It is undefined behavior for this operation to overflow the
+ /// range of values supported by `Self`. If you cannot guarantee that this
+ /// will not overflow, use `backward` or `backward_checked` instead.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`:
+ ///
+ /// * if there exists `b` such that `b < a`, it is safe to call `Step::backward_unchecked(a, 1)`
+ /// * if there exists `b`, `n` such that `steps_between(&b, &a) == Some(n)`,
+ /// it is safe to call `Step::backward_unchecked(a, m)` for any `m <= n`.
+ ///
+ /// For any `a` and `n`, where no overflow occurs:
+ ///
+ /// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)`
+ unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
+ Step::backward(start, count)
+ }
+}
+
+// These are still macro-generated because the integer literals resolve to different types.
+macro_rules! step_identical_methods {
+ () => {
+ #[inline]
+ unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
+ // SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
+ unsafe { start.unchecked_add(n as Self) }
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
+ // SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
+ unsafe { start.unchecked_sub(n as Self) }
+ }
+
+ #[inline]
+ #[allow(arithmetic_overflow)]
+ #[rustc_inherit_overflow_checks]
+ fn forward(start: Self, n: usize) -> Self {
+ // In debug builds, trigger a panic on overflow.
+ // This should optimize completely out in release builds.
+ if Self::forward_checked(start, n).is_none() {
+ let _ = Self::MAX + 1;
+ }
+ // Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
+ start.wrapping_add(n as Self)
+ }
+
+ #[inline]
+ #[allow(arithmetic_overflow)]
+ #[rustc_inherit_overflow_checks]
+ fn backward(start: Self, n: usize) -> Self {
+ // In debug builds, trigger a panic on overflow.
+ // This should optimize completely out in release builds.
+ if Self::backward_checked(start, n).is_none() {
+ let _ = Self::MIN - 1;
+ }
+ // Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
+ start.wrapping_sub(n as Self)
+ }
+ };
+}
+
+macro_rules! step_integer_impls {
+ {
+ narrower than or same width as usize:
+ $( [ $u_narrower:ident $i_narrower:ident ] ),+;
+ wider than usize:
+ $( [ $u_wider:ident $i_wider:ident ] ),+;
+ } => {
+ $(
+ #[allow(unreachable_patterns)]
+ #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+ impl Step for $u_narrower {
+ step_identical_methods!();
+
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ if *start <= *end {
+ // This relies on $u_narrower <= usize
+ Some((*end - *start) as usize)
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, n: usize) -> Option<Self> {
+ match Self::try_from(n) {
+ Ok(n) => start.checked_add(n),
+ Err(_) => None, // if n is out of range, `unsigned_start + n` is too
+ }
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, n: usize) -> Option<Self> {
+ match Self::try_from(n) {
+ Ok(n) => start.checked_sub(n),
+ Err(_) => None, // if n is out of range, `unsigned_start - n` is too
+ }
+ }
+ }
+
+ #[allow(unreachable_patterns)]
+ #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+ impl Step for $i_narrower {
+ step_identical_methods!();
+
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ if *start <= *end {
+ // This relies on $i_narrower <= usize
+ //
+ // Casting to isize extends the width but preserves the sign.
+ // Use wrapping_sub in isize space and cast to usize to compute
+ // the difference that might not fit inside the range of isize.
+ Some((*end as isize).wrapping_sub(*start as isize) as usize)
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, n: usize) -> Option<Self> {
+ match $u_narrower::try_from(n) {
+ Ok(n) => {
+ // Wrapping handles cases like
+ // `Step::forward(-120_i8, 200) == Some(80_i8)`,
+ // even though 200 is out of range for i8.
+ let wrapped = start.wrapping_add(n as Self);
+ if wrapped >= start {
+ Some(wrapped)
+ } else {
+ None // Addition overflowed
+ }
+ }
+ // If n is out of range of e.g. u8,
+ // then it is bigger than the entire range for i8 is wide
+ // so `any_i8 + n` necessarily overflows i8.
+ Err(_) => None,
+ }
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, n: usize) -> Option<Self> {
+ match $u_narrower::try_from(n) {
+ Ok(n) => {
+ // Wrapping handles cases like
+ // `Step::forward(-120_i8, 200) == Some(80_i8)`,
+ // even though 200 is out of range for i8.
+ let wrapped = start.wrapping_sub(n as Self);
+ if wrapped <= start {
+ Some(wrapped)
+ } else {
+ None // Subtraction overflowed
+ }
+ }
+ // If n is out of range of e.g. u8,
+ // then it is bigger than the entire range for i8 is wide
+ // so `any_i8 - n` necessarily overflows i8.
+ Err(_) => None,
+ }
+ }
+ }
+ )+
+
+ $(
+ #[allow(unreachable_patterns)]
+ #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+ impl Step for $u_wider {
+ step_identical_methods!();
+
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ if *start <= *end {
+ usize::try_from(*end - *start).ok()
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, n: usize) -> Option<Self> {
+ start.checked_add(n as Self)
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, n: usize) -> Option<Self> {
+ start.checked_sub(n as Self)
+ }
+ }
+
+ #[allow(unreachable_patterns)]
+ #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+ impl Step for $i_wider {
+ step_identical_methods!();
+
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ if *start <= *end {
+ match end.checked_sub(*start) {
+ Some(result) => usize::try_from(result).ok(),
+ // If the difference is too big for e.g. i128,
+ // it's also gonna be too big for usize with fewer bits.
+ None => None,
+ }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, n: usize) -> Option<Self> {
+ start.checked_add(n as Self)
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, n: usize) -> Option<Self> {
+ start.checked_sub(n as Self)
+ }
+ }
+ )+
+ };
+}
+
+#[cfg(target_pointer_width = "64")]
+step_integer_impls! {
+ narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [u64 i64], [usize isize];
+ wider than usize: [u128 i128];
+}
+
+#[cfg(target_pointer_width = "32")]
+step_integer_impls! {
+ narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [usize isize];
+ wider than usize: [u64 i64], [u128 i128];
+}
+
+#[cfg(target_pointer_width = "16")]
+step_integer_impls! {
+ narrower than or same width as usize: [u8 i8], [u16 i16], [usize isize];
+ wider than usize: [u32 i32], [u64 i64], [u128 i128];
+}
+
+#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+impl Step for char {
+ #[inline]
+ fn steps_between(&start: &char, &end: &char) -> Option<usize> {
+ let start = start as u32;
+ let end = end as u32;
+ if start <= end {
+ let count = end - start;
+ if start < 0xD800 && 0xE000 <= end {
+ usize::try_from(count - 0x800).ok()
+ } else {
+ usize::try_from(count).ok()
+ }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: char, count: usize) -> Option<char> {
+ let start = start as u32;
+ let mut res = Step::forward_checked(start, count)?;
+ if start < 0xD800 && 0xD800 <= res {
+ res = Step::forward_checked(res, 0x800)?;
+ }
+ if res <= char::MAX as u32 {
+ // SAFETY: res is a valid unicode scalar
+ // (below 0x110000 and not in 0xD800..0xE000)
+ Some(unsafe { char::from_u32_unchecked(res) })
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn backward_checked(start: char, count: usize) -> Option<char> {
+ let start = start as u32;
+ let mut res = Step::backward_checked(start, count)?;
+ if start >= 0xE000 && 0xE000 > res {
+ res = Step::backward_checked(res, 0x800)?;
+ }
+ // SAFETY: res is a valid unicode scalar
+ // (below 0x110000 and not in 0xD800..0xE000)
+ Some(unsafe { char::from_u32_unchecked(res) })
+ }
+
+ #[inline]
+ unsafe fn forward_unchecked(start: char, count: usize) -> char {
+ let start = start as u32;
+ // SAFETY: the caller must guarantee that this doesn't overflow
+ // the range of values for a char.
+ let mut res = unsafe { Step::forward_unchecked(start, count) };
+ if start < 0xD800 && 0xD800 <= res {
+ // SAFETY: the caller must guarantee that this doesn't overflow
+ // the range of values for a char.
+ res = unsafe { Step::forward_unchecked(res, 0x800) };
+ }
+ // SAFETY: because of the previous contract, this is guaranteed
+ // by the caller to be a valid char.
+ unsafe { char::from_u32_unchecked(res) }
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: char, count: usize) -> char {
+ let start = start as u32;
+ // SAFETY: the caller must guarantee that this doesn't overflow
+ // the range of values for a char.
+ let mut res = unsafe { Step::backward_unchecked(start, count) };
+ if start >= 0xE000 && 0xE000 > res {
+ // SAFETY: the caller must guarantee that this doesn't overflow
+ // the range of values for a char.
+ res = unsafe { Step::backward_unchecked(res, 0x800) };
+ }
+ // SAFETY: because of the previous contract, this is guaranteed
+ // by the caller to be a valid char.
+ unsafe { char::from_u32_unchecked(res) }
+ }
+}
+
+macro_rules! range_exact_iter_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl ExactSizeIterator for ops::Range<$t> { }
+ )*)
+}
+
+/// Safety: This macro must only be used on types that are `Copy` and result in ranges
+/// which have an exact `size_hint()` where the upper bound must not be `None`.
+macro_rules! unsafe_range_trusted_random_access_impl {
+ ($($t:ty)*) => ($(
+ #[doc(hidden)]
+ #[unstable(feature = "trusted_random_access", issue = "none")]
+ unsafe impl TrustedRandomAccess for ops::Range<$t> {}
+
+ #[doc(hidden)]
+ #[unstable(feature = "trusted_random_access", issue = "none")]
+ unsafe impl TrustedRandomAccessNoCoerce for ops::Range<$t> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+ }
+ )*)
+}
+
+macro_rules! range_incl_exact_iter_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "inclusive_range", since = "1.26.0")]
+ impl ExactSizeIterator for ops::RangeInclusive<$t> { }
+ )*)
+}
+
+/// Specialization implementations for `Range`.
+trait RangeIteratorImpl {
+ type Item;
+
+ // Iterator
+ fn spec_next(&mut self) -> Option<Self::Item>;
+ fn spec_nth(&mut self, n: usize) -> Option<Self::Item>;
+ fn spec_advance_by(&mut self, n: usize) -> Result<(), usize>;
+
+ // DoubleEndedIterator
+ fn spec_next_back(&mut self) -> Option<Self::Item>;
+ fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item>;
+ fn spec_advance_back_by(&mut self, n: usize) -> Result<(), usize>;
+}
+
+impl<A: Step> RangeIteratorImpl for ops::Range<A> {
+ type Item = A;
+
+ #[inline]
+ default fn spec_next(&mut self) -> Option<A> {
+ if self.start < self.end {
+ let n =
+ Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
+ Some(mem::replace(&mut self.start, n))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ default fn spec_nth(&mut self, n: usize) -> Option<A> {
+ if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
+ if plus_n < self.end {
+ self.start =
+ Step::forward_checked(plus_n.clone(), 1).expect("`Step` invariants not upheld");
+ return Some(plus_n);
+ }
+ }
+
+ self.start = self.end.clone();
+ None
+ }
+
+ #[inline]
+ default fn spec_advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let available = if self.start <= self.end {
+ Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
+ } else {
+ 0
+ };
+
+ let taken = available.min(n);
+
+ self.start =
+ Step::forward_checked(self.start.clone(), taken).expect("`Step` invariants not upheld");
+
+ if taken < n { Err(taken) } else { Ok(()) }
+ }
+
+ #[inline]
+ default fn spec_next_back(&mut self) -> Option<A> {
+ if self.start < self.end {
+ self.end =
+ Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
+ Some(self.end.clone())
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ default fn spec_nth_back(&mut self, n: usize) -> Option<A> {
+ if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
+ if minus_n > self.start {
+ self.end =
+ Step::backward_checked(minus_n, 1).expect("`Step` invariants not upheld");
+ return Some(self.end.clone());
+ }
+ }
+
+ self.end = self.start.clone();
+ None
+ }
+
+ #[inline]
+ default fn spec_advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let available = if self.start <= self.end {
+ Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
+ } else {
+ 0
+ };
+
+ let taken = available.min(n);
+
+ self.end =
+ Step::backward_checked(self.end.clone(), taken).expect("`Step` invariants not upheld");
+
+ if taken < n { Err(taken) } else { Ok(()) }
+ }
+}
+
+impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
+ #[inline]
+ fn spec_next(&mut self) -> Option<T> {
+ if self.start < self.end {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
+ Some(mem::replace(&mut self.start, n))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn spec_nth(&mut self, n: usize) -> Option<T> {
+ if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
+ if plus_n < self.end {
+ // SAFETY: just checked precondition
+ self.start = unsafe { Step::forward_unchecked(plus_n.clone(), 1) };
+ return Some(plus_n);
+ }
+ }
+
+ self.start = self.end.clone();
+ None
+ }
+
+ #[inline]
+ fn spec_advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let available = if self.start <= self.end {
+ Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
+ } else {
+ 0
+ };
+
+ let taken = available.min(n);
+
+ // SAFETY: the conditions above ensure that the count is in bounds. If start <= end
+ // then steps_between either returns a bound to which we clamp or returns None which
+ // together with the initial inequality implies more than usize::MAX steps.
+ // Otherwise 0 is returned which always safe to use.
+ self.start = unsafe { Step::forward_unchecked(self.start.clone(), taken) };
+
+ if taken < n { Err(taken) } else { Ok(()) }
+ }
+
+ #[inline]
+ fn spec_next_back(&mut self) -> Option<T> {
+ if self.start < self.end {
+ // SAFETY: just checked precondition
+ self.end = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
+ Some(self.end.clone())
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn spec_nth_back(&mut self, n: usize) -> Option<T> {
+ if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
+ if minus_n > self.start {
+ // SAFETY: just checked precondition
+ self.end = unsafe { Step::backward_unchecked(minus_n, 1) };
+ return Some(self.end.clone());
+ }
+ }
+
+ self.end = self.start.clone();
+ None
+ }
+
+ #[inline]
+ fn spec_advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let available = if self.start <= self.end {
+ Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
+ } else {
+ 0
+ };
+
+ let taken = available.min(n);
+
+ // SAFETY: same as the spec_advance_by() implementation
+ self.end = unsafe { Step::backward_unchecked(self.end.clone(), taken) };
+
+ if taken < n { Err(taken) } else { Ok(()) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Step> Iterator for ops::Range<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ self.spec_next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.start < self.end {
+ let hint = Step::steps_between(&self.start, &self.end);
+ (hint.unwrap_or(usize::MAX), hint)
+ } else {
+ (0, Some(0))
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<A> {
+ self.spec_nth(n)
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<A> {
+ self.next_back()
+ }
+
+ #[inline]
+ fn min(mut self) -> Option<A> {
+ self.next()
+ }
+
+ #[inline]
+ fn max(mut self) -> Option<A> {
+ self.next_back()
+ }
+
+ #[inline]
+ fn is_sorted(self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ self.spec_advance_by(n)
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ // SAFETY: The TrustedRandomAccess contract requires that callers only pass an index
+ // that is in bounds.
+ // Additionally Self: TrustedRandomAccess is only implemented for Copy types
+ // which means even repeated reads of the same index would be safe.
+ unsafe { Step::forward_unchecked(self.start.clone(), idx) }
+ }
+}
+
+// These macros generate `ExactSizeIterator` impls for various range types.
+//
+// * `ExactSizeIterator::len` is required to always return an exact `usize`,
+// so no range can be longer than `usize::MAX`.
+// * For integer types in `Range<_>` this is the case for types narrower than or as wide as `usize`.
+// For integer types in `RangeInclusive<_>`
+// this is the case for types *strictly narrower* than `usize`
+// since e.g. `(0..=u64::MAX).len()` would be `u64::MAX + 1`.
+range_exact_iter_impl! {
+ usize u8 u16
+ isize i8 i16
+
+ // These are incorrect per the reasoning above,
+ // but removing them would be a breaking change as they were stabilized in Rust 1.0.0.
+ // So e.g. `(0..66_000_u32).len()` for example will compile without error or warnings
+ // on 16-bit platforms, but continue to give a wrong result.
+ u32
+ i32
+}
+
+unsafe_range_trusted_random_access_impl! {
+ usize u8 u16
+ isize i8 i16
+}
+
+#[cfg(target_pointer_width = "32")]
+unsafe_range_trusted_random_access_impl! {
+ u32 i32
+}
+
+#[cfg(target_pointer_width = "64")]
+unsafe_range_trusted_random_access_impl! {
+ u32 i32
+ u64 i64
+}
+
+range_incl_exact_iter_impl! {
+ u8
+ i8
+
+ // These are incorrect per the reasoning above,
+ // but removing them would be a breaking change as they were stabilized in Rust 1.26.0.
+ // So e.g. `(0..=u16::MAX).len()` for example will compile without error or warnings
+ // on 16-bit platforms, but continue to give a wrong result.
+ u16
+ i16
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Step> DoubleEndedIterator for ops::Range<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.spec_next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ self.spec_nth_back(n)
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.spec_advance_back_by(n)
+ }
+}
+
+// Safety:
+// The following invariants for `Step::steps_between` exist:
+//
+// > * `steps_between(&a, &b) == Some(n)` only if `a <= b`
+// > * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`;
+// > this is the case when it would require more than `usize::MAX` steps to
+// > get to `b`
+// > * `steps_between(&a, &b) == None` if `a > b`
+//
+// The first invariant is what is generally required for `TrustedLen` to be
+// sound. The note addendum satisfies an additional `TrustedLen` invariant.
+//
+// > The upper bound must only be `None` if the actual iterator length is larger
+// > than `usize::MAX`
+//
+// The second invariant logically follows the first so long as the `PartialOrd`
+// implementation is correct; regardless it is explicitly stated. If `a < b`
+// then `(0, Some(0))` is returned by `ops::Range<A: Step>::size_hint`. As such
+// the second invariant is upheld.
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: TrustedStep> TrustedLen for ops::Range<A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A: Step> FusedIterator for ops::Range<A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Step> Iterator for ops::RangeFrom<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ let n = Step::forward(self.start.clone(), 1);
+ Some(mem::replace(&mut self.start, n))
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<A> {
+ let plus_n = Step::forward(self.start.clone(), n);
+ self.start = Step::forward(plus_n.clone(), 1);
+ Some(plus_n)
+ }
+}
+
+// Safety: See above implementation for `ops::Range<A>`
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: TrustedStep> TrustedLen for ops::RangeFrom<A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A: Step> FusedIterator for ops::RangeFrom<A> {}
+
+trait RangeInclusiveIteratorImpl {
+ type Item;
+
+ // Iterator
+ fn spec_next(&mut self) -> Option<Self::Item>;
+ fn spec_try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>;
+
+ // DoubleEndedIterator
+ fn spec_next_back(&mut self) -> Option<Self::Item>;
+ fn spec_try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>;
+}
+
+impl<A: Step> RangeInclusiveIteratorImpl for ops::RangeInclusive<A> {
+ type Item = A;
+
+ #[inline]
+ default fn spec_next(&mut self) -> Option<A> {
+ if self.is_empty() {
+ return None;
+ }
+ let is_iterating = self.start < self.end;
+ Some(if is_iterating {
+ let n =
+ Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
+ mem::replace(&mut self.start, n)
+ } else {
+ self.exhausted = true;
+ self.start.clone()
+ })
+ }
+
+ #[inline]
+ default fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, A) -> R,
+ R: Try<Output = B>,
+ {
+ if self.is_empty() {
+ return try { init };
+ }
+
+ let mut accum = init;
+
+ while self.start < self.end {
+ let n =
+ Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
+ let n = mem::replace(&mut self.start, n);
+ accum = f(accum, n)?;
+ }
+
+ self.exhausted = true;
+
+ if self.start == self.end {
+ accum = f(accum, self.start.clone())?;
+ }
+
+ try { accum }
+ }
+
+ #[inline]
+ default fn spec_next_back(&mut self) -> Option<A> {
+ if self.is_empty() {
+ return None;
+ }
+ let is_iterating = self.start < self.end;
+ Some(if is_iterating {
+ let n =
+ Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
+ mem::replace(&mut self.end, n)
+ } else {
+ self.exhausted = true;
+ self.end.clone()
+ })
+ }
+
+ #[inline]
+ default fn spec_try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, A) -> R,
+ R: Try<Output = B>,
+ {
+ if self.is_empty() {
+ return try { init };
+ }
+
+ let mut accum = init;
+
+ while self.start < self.end {
+ let n =
+ Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
+ let n = mem::replace(&mut self.end, n);
+ accum = f(accum, n)?;
+ }
+
+ self.exhausted = true;
+
+ if self.start == self.end {
+ accum = f(accum, self.start.clone())?;
+ }
+
+ try { accum }
+ }
+}
+
+impl<T: TrustedStep> RangeInclusiveIteratorImpl for ops::RangeInclusive<T> {
+ #[inline]
+ fn spec_next(&mut self) -> Option<T> {
+ if self.is_empty() {
+ return None;
+ }
+ let is_iterating = self.start < self.end;
+ Some(if is_iterating {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
+ mem::replace(&mut self.start, n)
+ } else {
+ self.exhausted = true;
+ self.start.clone()
+ })
+ }
+
+ #[inline]
+ fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, T) -> R,
+ R: Try<Output = B>,
+ {
+ if self.is_empty() {
+ return try { init };
+ }
+
+ let mut accum = init;
+
+ while self.start < self.end {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
+ let n = mem::replace(&mut self.start, n);
+ accum = f(accum, n)?;
+ }
+
+ self.exhausted = true;
+
+ if self.start == self.end {
+ accum = f(accum, self.start.clone())?;
+ }
+
+ try { accum }
+ }
+
+ #[inline]
+ fn spec_next_back(&mut self) -> Option<T> {
+ if self.is_empty() {
+ return None;
+ }
+ let is_iterating = self.start < self.end;
+ Some(if is_iterating {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
+ mem::replace(&mut self.end, n)
+ } else {
+ self.exhausted = true;
+ self.end.clone()
+ })
+ }
+
+ #[inline]
+ fn spec_try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, T) -> R,
+ R: Try<Output = B>,
+ {
+ if self.is_empty() {
+ return try { init };
+ }
+
+ let mut accum = init;
+
+ while self.start < self.end {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
+ let n = mem::replace(&mut self.end, n);
+ accum = f(accum, n)?;
+ }
+
+ self.exhausted = true;
+
+ if self.start == self.end {
+ accum = f(accum, self.start.clone())?;
+ }
+
+ try { accum }
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl<A: Step> Iterator for ops::RangeInclusive<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ self.spec_next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.is_empty() {
+ return (0, Some(0));
+ }
+
+ match Step::steps_between(&self.start, &self.end) {
+ Some(hint) => (hint.saturating_add(1), hint.checked_add(1)),
+ None => (usize::MAX, None),
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<A> {
+ if self.is_empty() {
+ return None;
+ }
+
+ if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
+ use crate::cmp::Ordering::*;
+
+ match plus_n.partial_cmp(&self.end) {
+ Some(Less) => {
+ self.start = Step::forward(plus_n.clone(), 1);
+ return Some(plus_n);
+ }
+ Some(Equal) => {
+ self.start = plus_n.clone();
+ self.exhausted = true;
+ return Some(plus_n);
+ }
+ _ => {}
+ }
+ }
+
+ self.start = self.end.clone();
+ self.exhausted = true;
+ None
+ }
+
+ #[inline]
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.spec_try_fold(init, f)
+ }
+
+ #[inline]
+ fn fold<B, F>(mut self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(f)).unwrap()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<A> {
+ self.next_back()
+ }
+
+ #[inline]
+ fn min(mut self) -> Option<A> {
+ self.next()
+ }
+
+ #[inline]
+ fn max(mut self) -> Option<A> {
+ self.next_back()
+ }
+
+ #[inline]
+ fn is_sorted(self) -> bool {
+ true
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.spec_next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ if self.is_empty() {
+ return None;
+ }
+
+ if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
+ use crate::cmp::Ordering::*;
+
+ match minus_n.partial_cmp(&self.start) {
+ Some(Greater) => {
+ self.end = Step::backward(minus_n.clone(), 1);
+ return Some(minus_n);
+ }
+ Some(Equal) => {
+ self.end = minus_n.clone();
+ self.exhausted = true;
+ return Some(minus_n);
+ }
+ _ => {}
+ }
+ }
+
+ self.end = self.start.clone();
+ self.exhausted = true;
+ None
+ }
+
+ #[inline]
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.spec_try_rfold(init, f)
+ }
+
+ #[inline]
+ fn rfold<B, F>(mut self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_rfold(init, ok(f)).unwrap()
+ }
+}
+
+// Safety: See above implementation for `ops::Range<A>`
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: TrustedStep> TrustedLen for ops::RangeInclusive<A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A: Step> FusedIterator for ops::RangeInclusive<A> {}
diff --git a/library/core/src/iter/sources.rs b/library/core/src/iter/sources.rs
new file mode 100644
index 000000000..d34772cd3
--- /dev/null
+++ b/library/core/src/iter/sources.rs
@@ -0,0 +1,36 @@
+mod empty;
+mod from_fn;
+mod from_generator;
+mod once;
+mod once_with;
+mod repeat;
+mod repeat_with;
+mod successors;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::repeat::{repeat, Repeat};
+
+#[stable(feature = "iter_empty", since = "1.2.0")]
+pub use self::empty::{empty, Empty};
+
+#[stable(feature = "iter_once", since = "1.2.0")]
+pub use self::once::{once, Once};
+
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+pub use self::repeat_with::{repeat_with, RepeatWith};
+
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+pub use self::from_fn::{from_fn, FromFn};
+
+#[unstable(
+ feature = "iter_from_generator",
+ issue = "43122",
+ reason = "generators are unstable"
+)]
+pub use self::from_generator::from_generator;
+
+#[stable(feature = "iter_successors", since = "1.34.0")]
+pub use self::successors::{successors, Successors};
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+pub use self::once_with::{once_with, OnceWith};
diff --git a/library/core/src/iter/sources/empty.rs b/library/core/src/iter/sources/empty.rs
new file mode 100644
index 000000000..98734c527
--- /dev/null
+++ b/library/core/src/iter/sources/empty.rs
@@ -0,0 +1,94 @@
+use crate::fmt;
+use crate::iter::{FusedIterator, TrustedLen};
+use crate::marker;
+
+/// Creates an iterator that yields nothing.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // this could have been an iterator over i32, but alas, it's just not.
+/// let mut nope = iter::empty::<i32>();
+///
+/// assert_eq!(None, nope.next());
+/// ```
+#[stable(feature = "iter_empty", since = "1.2.0")]
+#[rustc_const_stable(feature = "const_iter_empty", since = "1.32.0")]
+pub const fn empty<T>() -> Empty<T> {
+ Empty(marker::PhantomData)
+}
+
+// Newtype for use in `PhantomData` to avoid
+// > error: const-stable function cannot use `#[feature(const_fn_fn_ptr_basics)]`
+// in `const fn empty<T>()` above.
+struct FnReturning<T>(fn() -> T);
+
+/// An iterator that yields nothing.
+///
+/// This `struct` is created by the [`empty()`] function. See its documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "iter_empty", since = "1.2.0")]
+pub struct Empty<T>(marker::PhantomData<FnReturning<T>>);
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T> fmt::Debug for Empty<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Empty").finish()
+ }
+}
+
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> Iterator for Empty<T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(0))
+ }
+}
+
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> DoubleEndedIterator for Empty<T> {
+ fn next_back(&mut self) -> Option<T> {
+ None
+ }
+}
+
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> ExactSizeIterator for Empty<T> {
+ fn len(&self) -> usize {
+ 0
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Empty<T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Empty<T> {}
+
+// not #[derive] because that adds a Clone bound on T,
+// which isn't necessary.
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> Clone for Empty<T> {
+ fn clone(&self) -> Empty<T> {
+ Empty(marker::PhantomData)
+ }
+}
+
+// not #[derive] because that adds a Default bound on T,
+// which isn't necessary.
+#[stable(feature = "iter_empty", since = "1.2.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for Empty<T> {
+ fn default() -> Empty<T> {
+ Empty(marker::PhantomData)
+ }
+}
diff --git a/library/core/src/iter/sources/from_fn.rs b/library/core/src/iter/sources/from_fn.rs
new file mode 100644
index 000000000..3cd383047
--- /dev/null
+++ b/library/core/src/iter/sources/from_fn.rs
@@ -0,0 +1,78 @@
+use crate::fmt;
+
+/// Creates a new iterator where each iteration calls the provided closure
+/// `F: FnMut() -> Option<T>`.
+///
+/// This allows creating a custom iterator with any behavior
+/// without using the more verbose syntax of creating a dedicated type
+/// and implementing the [`Iterator`] trait for it.
+///
+/// Note that the `FromFn` iterator doesn’t make assumptions about the behavior of the closure,
+/// and therefore conservatively does not implement [`FusedIterator`],
+/// or override [`Iterator::size_hint()`] from its default `(0, None)`.
+///
+/// The closure can use captures and its environment to track state across iterations. Depending on
+/// how the iterator is used, this may require specifying the [`move`] keyword on the closure.
+///
+/// [`move`]: ../../std/keyword.move.html
+/// [`FusedIterator`]: crate::iter::FusedIterator
+///
+/// # Examples
+///
+/// Let’s re-implement the counter iterator from [module-level documentation]:
+///
+/// [module-level documentation]: crate::iter
+///
+/// ```
+/// let mut count = 0;
+/// let counter = std::iter::from_fn(move || {
+/// // Increment our count. This is why we started at zero.
+/// count += 1;
+///
+/// // Check to see if we've finished counting or not.
+/// if count < 6 {
+/// Some(count)
+/// } else {
+/// None
+/// }
+/// });
+/// assert_eq!(counter.collect::<Vec<_>>(), &[1, 2, 3, 4, 5]);
+/// ```
+#[inline]
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+pub fn from_fn<T, F>(f: F) -> FromFn<F>
+where
+ F: FnMut() -> Option<T>,
+{
+ FromFn(f)
+}
+
+/// An iterator where each iteration calls the provided closure `F: FnMut() -> Option<T>`.
+///
+/// This `struct` is created by the [`iter::from_fn()`] function.
+/// See its documentation for more.
+///
+/// [`iter::from_fn()`]: from_fn
+#[derive(Clone)]
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+pub struct FromFn<F>(F);
+
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+impl<T, F> Iterator for FromFn<F>
+where
+ F: FnMut() -> Option<T>,
+{
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ (self.0)()
+ }
+}
+
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+impl<F> fmt::Debug for FromFn<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FromFn").finish()
+ }
+}
diff --git a/library/core/src/iter/sources/from_generator.rs b/library/core/src/iter/sources/from_generator.rs
new file mode 100644
index 000000000..8e7cbd34a
--- /dev/null
+++ b/library/core/src/iter/sources/from_generator.rs
@@ -0,0 +1,43 @@
+use crate::ops::{Generator, GeneratorState};
+use crate::pin::Pin;
+
+/// Creates a new iterator where each iteration calls the provided generator.
+///
+/// Similar to [`iter::from_fn`].
+///
+/// [`iter::from_fn`]: crate::iter::from_fn
+///
+/// # Examples
+///
+/// ```
+/// #![feature(generators)]
+/// #![feature(iter_from_generator)]
+///
+/// let it = std::iter::from_generator(|| {
+/// yield 1;
+/// yield 2;
+/// yield 3;
+/// });
+/// let v: Vec<_> = it.collect();
+/// assert_eq!(v, [1, 2, 3]);
+/// ```
+#[inline]
+#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
+pub fn from_generator<G: Generator<Return = ()> + Unpin>(
+ generator: G,
+) -> impl Iterator<Item = G::Yield> {
+ FromGenerator(generator)
+}
+
+struct FromGenerator<G>(G);
+
+impl<G: Generator<Return = ()> + Unpin> Iterator for FromGenerator<G> {
+ type Item = G::Yield;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match Pin::new(&mut self.0).resume(()) {
+ GeneratorState::Yielded(n) => Some(n),
+ GeneratorState::Complete(()) => None,
+ }
+ }
+}
diff --git a/library/core/src/iter/sources/once.rs b/library/core/src/iter/sources/once.rs
new file mode 100644
index 000000000..6e9ed0d3c
--- /dev/null
+++ b/library/core/src/iter/sources/once.rs
@@ -0,0 +1,99 @@
+use crate::iter::{FusedIterator, TrustedLen};
+
+/// Creates an iterator that yields an element exactly once.
+///
+/// This is commonly used to adapt a single value into a [`chain()`] of other
+/// kinds of iteration. Maybe you have an iterator that covers almost
+/// everything, but you need an extra special case. Maybe you have a function
+/// which works on iterators, but you only need to process one value.
+///
+/// [`chain()`]: Iterator::chain
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // one is the loneliest number
+/// let mut one = iter::once(1);
+///
+/// assert_eq!(Some(1), one.next());
+///
+/// // just one, that's all we get
+/// assert_eq!(None, one.next());
+/// ```
+///
+/// Chaining together with another iterator. Let's say that we want to iterate
+/// over each file of the `.foo` directory, but also a configuration file,
+/// `.foorc`:
+///
+/// ```no_run
+/// use std::iter;
+/// use std::fs;
+/// use std::path::PathBuf;
+///
+/// let dirs = fs::read_dir(".foo").unwrap();
+///
+/// // we need to convert from an iterator of DirEntry-s to an iterator of
+/// // PathBufs, so we use map
+/// let dirs = dirs.map(|file| file.unwrap().path());
+///
+/// // now, our iterator just for our config file
+/// let config = iter::once(PathBuf::from(".foorc"));
+///
+/// // chain the two iterators together into one big iterator
+/// let files = dirs.chain(config);
+///
+/// // this will give us all of the files in .foo as well as .foorc
+/// for f in files {
+/// println!("{f:?}");
+/// }
+/// ```
+#[stable(feature = "iter_once", since = "1.2.0")]
+pub fn once<T>(value: T) -> Once<T> {
+ Once { inner: Some(value).into_iter() }
+}
+
+/// An iterator that yields an element exactly once.
+///
+/// This `struct` is created by the [`once()`] function. See its documentation for more.
+#[derive(Clone, Debug)]
+#[stable(feature = "iter_once", since = "1.2.0")]
+pub struct Once<T> {
+ inner: crate::option::IntoIter<T>,
+}
+
+#[stable(feature = "iter_once", since = "1.2.0")]
+impl<T> Iterator for Once<T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.inner.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "iter_once", since = "1.2.0")]
+impl<T> DoubleEndedIterator for Once<T> {
+ fn next_back(&mut self) -> Option<T> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "iter_once", since = "1.2.0")]
+impl<T> ExactSizeIterator for Once<T> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Once<T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Once<T> {}
diff --git a/library/core/src/iter/sources/once_with.rs b/library/core/src/iter/sources/once_with.rs
new file mode 100644
index 000000000..d79f85c25
--- /dev/null
+++ b/library/core/src/iter/sources/once_with.rs
@@ -0,0 +1,109 @@
+use crate::iter::{FusedIterator, TrustedLen};
+
+/// Creates an iterator that lazily generates a value exactly once by invoking
+/// the provided closure.
+///
+/// This is commonly used to adapt a single value generator into a [`chain()`] of
+/// other kinds of iteration. Maybe you have an iterator that covers almost
+/// everything, but you need an extra special case. Maybe you have a function
+/// which works on iterators, but you only need to process one value.
+///
+/// Unlike [`once()`], this function will lazily generate the value on request.
+///
+/// [`chain()`]: Iterator::chain
+/// [`once()`]: crate::iter::once
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // one is the loneliest number
+/// let mut one = iter::once_with(|| 1);
+///
+/// assert_eq!(Some(1), one.next());
+///
+/// // just one, that's all we get
+/// assert_eq!(None, one.next());
+/// ```
+///
+/// Chaining together with another iterator. Let's say that we want to iterate
+/// over each file of the `.foo` directory, but also a configuration file,
+/// `.foorc`:
+///
+/// ```no_run
+/// use std::iter;
+/// use std::fs;
+/// use std::path::PathBuf;
+///
+/// let dirs = fs::read_dir(".foo").unwrap();
+///
+/// // we need to convert from an iterator of DirEntry-s to an iterator of
+/// // PathBufs, so we use map
+/// let dirs = dirs.map(|file| file.unwrap().path());
+///
+/// // now, our iterator just for our config file
+/// let config = iter::once_with(|| PathBuf::from(".foorc"));
+///
+/// // chain the two iterators together into one big iterator
+/// let files = dirs.chain(config);
+///
+/// // this will give us all of the files in .foo as well as .foorc
+/// for f in files {
+/// println!("{f:?}");
+/// }
+/// ```
+#[inline]
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+pub fn once_with<A, F: FnOnce() -> A>(gen: F) -> OnceWith<F> {
+ OnceWith { gen: Some(gen) }
+}
+
+/// An iterator that yields a single element of type `A` by
+/// applying the provided closure `F: FnOnce() -> A`.
+///
+/// This `struct` is created by the [`once_with()`] function.
+/// See its documentation for more.
+#[derive(Clone, Debug)]
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+pub struct OnceWith<F> {
+ gen: Option<F>,
+}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+impl<A, F: FnOnce() -> A> Iterator for OnceWith<F> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ let f = self.gen.take()?;
+ Some(f())
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.gen.iter().size_hint()
+ }
+}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+impl<A, F: FnOnce() -> A> DoubleEndedIterator for OnceWith<F> {
+ fn next_back(&mut self) -> Option<A> {
+ self.next()
+ }
+}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+impl<A, F: FnOnce() -> A> ExactSizeIterator for OnceWith<F> {
+ fn len(&self) -> usize {
+ self.gen.iter().len()
+ }
+}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+impl<A, F: FnOnce() -> A> FusedIterator for OnceWith<F> {}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+unsafe impl<A, F: FnOnce() -> A> TrustedLen for OnceWith<F> {}
diff --git a/library/core/src/iter/sources/repeat.rs b/library/core/src/iter/sources/repeat.rs
new file mode 100644
index 000000000..733142ed0
--- /dev/null
+++ b/library/core/src/iter/sources/repeat.rs
@@ -0,0 +1,129 @@
+use crate::iter::{FusedIterator, TrustedLen};
+
+/// Creates a new iterator that endlessly repeats a single element.
+///
+/// The `repeat()` function repeats a single value over and over again.
+///
+/// Infinite iterators like `repeat()` are often used with adapters like
+/// [`Iterator::take()`], in order to make them finite.
+///
+/// If the element type of the iterator you need does not implement `Clone`,
+/// or if you do not want to keep the repeated element in memory, you can
+/// instead use the [`repeat_with()`] function.
+///
+/// [`repeat_with()`]: crate::iter::repeat_with
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // the number four 4ever:
+/// let mut fours = iter::repeat(4);
+///
+/// assert_eq!(Some(4), fours.next());
+/// assert_eq!(Some(4), fours.next());
+/// assert_eq!(Some(4), fours.next());
+/// assert_eq!(Some(4), fours.next());
+/// assert_eq!(Some(4), fours.next());
+///
+/// // yup, still four
+/// assert_eq!(Some(4), fours.next());
+/// ```
+///
+/// Going finite with [`Iterator::take()`]:
+///
+/// ```
+/// use std::iter;
+///
+/// // that last example was too many fours. Let's only have four fours.
+/// let mut four_fours = iter::repeat(4).take(4);
+///
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+///
+/// // ... and now we're done
+/// assert_eq!(None, four_fours.next());
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "iter_repeat")]
+pub fn repeat<T: Clone>(elt: T) -> Repeat<T> {
+ Repeat { element: elt }
+}
+
+/// An iterator that repeats an element endlessly.
+///
+/// This `struct` is created by the [`repeat()`] function. See its documentation for more.
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Repeat<A> {
+ element: A,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Clone> Iterator for Repeat<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ Some(self.element.clone())
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ // Advancing an infinite iterator of a single element is a no-op.
+ let _ = n;
+ Ok(())
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<A> {
+ let _ = n;
+ Some(self.element.clone())
+ }
+
+ fn last(self) -> Option<A> {
+ loop {}
+ }
+
+ fn count(self) -> usize {
+ loop {}
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Clone> DoubleEndedIterator for Repeat<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ Some(self.element.clone())
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ // Advancing an infinite iterator of a single element is a no-op.
+ let _ = n;
+ Ok(())
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ let _ = n;
+ Some(self.element.clone())
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A: Clone> FusedIterator for Repeat<A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Clone> TrustedLen for Repeat<A> {}
diff --git a/library/core/src/iter/sources/repeat_with.rs b/library/core/src/iter/sources/repeat_with.rs
new file mode 100644
index 000000000..6f62662d8
--- /dev/null
+++ b/library/core/src/iter/sources/repeat_with.rs
@@ -0,0 +1,98 @@
+use crate::iter::{FusedIterator, TrustedLen};
+
+/// Creates a new iterator that repeats elements of type `A` endlessly by
+/// applying the provided closure, the repeater, `F: FnMut() -> A`.
+///
+/// The `repeat_with()` function calls the repeater over and over again.
+///
+/// Infinite iterators like `repeat_with()` are often used with adapters like
+/// [`Iterator::take()`], in order to make them finite.
+///
+/// If the element type of the iterator you need implements [`Clone`], and
+/// it is OK to keep the source element in memory, you should instead use
+/// the [`repeat()`] function.
+///
+/// An iterator produced by `repeat_with()` is not a [`DoubleEndedIterator`].
+/// If you need `repeat_with()` to return a [`DoubleEndedIterator`],
+/// please open a GitHub issue explaining your use case.
+///
+/// [`repeat()`]: crate::iter::repeat
+/// [`DoubleEndedIterator`]: crate::iter::DoubleEndedIterator
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // let's assume we have some value of a type that is not `Clone`
+/// // or which we don't want to have in memory just yet because it is expensive:
+/// #[derive(PartialEq, Debug)]
+/// struct Expensive;
+///
+/// // a particular value forever:
+/// let mut things = iter::repeat_with(|| Expensive);
+///
+/// assert_eq!(Some(Expensive), things.next());
+/// assert_eq!(Some(Expensive), things.next());
+/// assert_eq!(Some(Expensive), things.next());
+/// assert_eq!(Some(Expensive), things.next());
+/// assert_eq!(Some(Expensive), things.next());
+/// ```
+///
+/// Using mutation and going finite:
+///
+/// ```rust
+/// use std::iter;
+///
+/// // From the zeroth to the third power of two:
+/// let mut curr = 1;
+/// let mut pow2 = iter::repeat_with(|| { let tmp = curr; curr *= 2; tmp })
+/// .take(4);
+///
+/// assert_eq!(Some(1), pow2.next());
+/// assert_eq!(Some(2), pow2.next());
+/// assert_eq!(Some(4), pow2.next());
+/// assert_eq!(Some(8), pow2.next());
+///
+/// // ... and now we're done
+/// assert_eq!(None, pow2.next());
+/// ```
+#[inline]
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+pub fn repeat_with<A, F: FnMut() -> A>(repeater: F) -> RepeatWith<F> {
+ RepeatWith { repeater }
+}
+
+/// An iterator that repeats elements of type `A` endlessly by
+/// applying the provided closure `F: FnMut() -> A`.
+///
+/// This `struct` is created by the [`repeat_with()`] function.
+/// See its documentation for more.
+#[derive(Copy, Clone, Debug)]
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+pub struct RepeatWith<F> {
+ repeater: F,
+}
+
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+impl<A, F: FnMut() -> A> Iterator for RepeatWith<F> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ Some((self.repeater)())
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+}
+
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+impl<A, F: FnMut() -> A> FusedIterator for RepeatWith<F> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A, F: FnMut() -> A> TrustedLen for RepeatWith<F> {}
diff --git a/library/core/src/iter/sources/successors.rs b/library/core/src/iter/sources/successors.rs
new file mode 100644
index 000000000..99f058a90
--- /dev/null
+++ b/library/core/src/iter/sources/successors.rs
@@ -0,0 +1,66 @@
+use crate::{fmt, iter::FusedIterator};
+
+/// Creates a new iterator where each successive item is computed based on the preceding one.
+///
+/// The iterator starts with the given first item (if any)
+/// and calls the given `FnMut(&T) -> Option<T>` closure to compute each item’s successor.
+///
+/// ```
+/// use std::iter::successors;
+///
+/// let powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10));
+/// assert_eq!(powers_of_10.collect::<Vec<_>>(), &[1, 10, 100, 1_000, 10_000]);
+/// ```
+#[stable(feature = "iter_successors", since = "1.34.0")]
+pub fn successors<T, F>(first: Option<T>, succ: F) -> Successors<T, F>
+where
+ F: FnMut(&T) -> Option<T>,
+{
+ // If this function returned `impl Iterator<Item=T>`
+ // it could be based on `unfold` and not need a dedicated type.
+ // However having a named `Successors<T, F>` type allows it to be `Clone` when `T` and `F` are.
+ Successors { next: first, succ }
+}
+
+/// An new iterator where each successive item is computed based on the preceding one.
+///
+/// This `struct` is created by the [`iter::successors()`] function.
+/// See its documentation for more.
+///
+/// [`iter::successors()`]: successors
+#[derive(Clone)]
+#[stable(feature = "iter_successors", since = "1.34.0")]
+pub struct Successors<T, F> {
+ next: Option<T>,
+ succ: F,
+}
+
+#[stable(feature = "iter_successors", since = "1.34.0")]
+impl<T, F> Iterator for Successors<T, F>
+where
+ F: FnMut(&T) -> Option<T>,
+{
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ let item = self.next.take()?;
+ self.next = (self.succ)(&item);
+ Some(item)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.next.is_some() { (1, None) } else { (0, Some(0)) }
+ }
+}
+
+#[stable(feature = "iter_successors", since = "1.34.0")]
+impl<T, F> FusedIterator for Successors<T, F> where F: FnMut(&T) -> Option<T> {}
+
+#[stable(feature = "iter_successors", since = "1.34.0")]
+impl<T: fmt::Debug, F> fmt::Debug for Successors<T, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Successors").field("next", &self.next).finish()
+ }
+}
diff --git a/library/core/src/iter/traits/accum.rs b/library/core/src/iter/traits/accum.rs
new file mode 100644
index 000000000..84d83ee39
--- /dev/null
+++ b/library/core/src/iter/traits/accum.rs
@@ -0,0 +1,231 @@
+use crate::iter;
+use crate::num::Wrapping;
+
+/// Trait to represent types that can be created by summing up an iterator.
+///
+/// This trait is used to implement [`Iterator::sum()`]. Types which implement
+/// this trait can be generated by using the [`sum()`] method on an iterator.
+/// Like [`FromIterator`], this trait should rarely be called directly.
+///
+/// [`sum()`]: Iterator::sum
+/// [`FromIterator`]: iter::FromIterator
+#[stable(feature = "iter_arith_traits", since = "1.12.0")]
+pub trait Sum<A = Self>: Sized {
+ /// Method which takes an iterator and generates `Self` from the elements by
+ /// "summing up" the items.
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ fn sum<I: Iterator<Item = A>>(iter: I) -> Self;
+}
+
+/// Trait to represent types that can be created by multiplying elements of an
+/// iterator.
+///
+/// This trait is used to implement [`Iterator::product()`]. Types which implement
+/// this trait can be generated by using the [`product()`] method on an iterator.
+/// Like [`FromIterator`], this trait should rarely be called directly.
+///
+/// [`product()`]: Iterator::product
+/// [`FromIterator`]: iter::FromIterator
+#[stable(feature = "iter_arith_traits", since = "1.12.0")]
+pub trait Product<A = Self>: Sized {
+ /// Method which takes an iterator and generates `Self` from the elements by
+ /// multiplying the items.
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ fn product<I: Iterator<Item = A>>(iter: I) -> Self;
+}
+
+macro_rules! integer_sum_product {
+ (@impls $zero:expr, $one:expr, #[$attr:meta], $($a:ty)*) => ($(
+ #[$attr]
+ impl Sum for $a {
+ fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
+ iter.fold(
+ $zero,
+ #[rustc_inherit_overflow_checks]
+ |a, b| a + b,
+ )
+ }
+ }
+
+ #[$attr]
+ impl Product for $a {
+ fn product<I: Iterator<Item=Self>>(iter: I) -> Self {
+ iter.fold(
+ $one,
+ #[rustc_inherit_overflow_checks]
+ |a, b| a * b,
+ )
+ }
+ }
+
+ #[$attr]
+ impl<'a> Sum<&'a $a> for $a {
+ fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
+ iter.fold(
+ $zero,
+ #[rustc_inherit_overflow_checks]
+ |a, b| a + b,
+ )
+ }
+ }
+
+ #[$attr]
+ impl<'a> Product<&'a $a> for $a {
+ fn product<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
+ iter.fold(
+ $one,
+ #[rustc_inherit_overflow_checks]
+ |a, b| a * b,
+ )
+ }
+ }
+ )*);
+ ($($a:ty)*) => (
+ integer_sum_product!(@impls 0, 1,
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")],
+ $($a)*);
+ integer_sum_product!(@impls Wrapping(0), Wrapping(1),
+ #[stable(feature = "wrapping_iter_arith", since = "1.14.0")],
+ $(Wrapping<$a>)*);
+ );
+}
+
+macro_rules! float_sum_product {
+ ($($a:ident)*) => ($(
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ impl Sum for $a {
+ fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
+ iter.fold(
+ 0.0,
+ #[rustc_inherit_overflow_checks]
+ |a, b| a + b,
+ )
+ }
+ }
+
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ impl Product for $a {
+ fn product<I: Iterator<Item=Self>>(iter: I) -> Self {
+ iter.fold(
+ 1.0,
+ #[rustc_inherit_overflow_checks]
+ |a, b| a * b,
+ )
+ }
+ }
+
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ impl<'a> Sum<&'a $a> for $a {
+ fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
+ iter.fold(
+ 0.0,
+ #[rustc_inherit_overflow_checks]
+ |a, b| a + b,
+ )
+ }
+ }
+
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ impl<'a> Product<&'a $a> for $a {
+ fn product<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
+ iter.fold(
+ 1.0,
+ #[rustc_inherit_overflow_checks]
+ |a, b| a * b,
+ )
+ }
+ }
+ )*)
+}
+
+integer_sum_product! { i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize }
+float_sum_product! { f32 f64 }
+
+#[stable(feature = "iter_arith_traits_result", since = "1.16.0")]
+impl<T, U, E> Sum<Result<U, E>> for Result<T, E>
+where
+ T: Sum<U>,
+{
+ /// Takes each element in the [`Iterator`]: if it is an [`Err`], no further
+ /// elements are taken, and the [`Err`] is returned. Should no [`Err`]
+ /// occur, the sum of all elements is returned.
+ ///
+ /// # Examples
+ ///
+ /// This sums up every integer in a vector, rejecting the sum if a negative
+ /// element is encountered:
+ ///
+ /// ```
+ /// let v = vec![1, 2];
+ /// let res: Result<i32, &'static str> = v.iter().map(|&x: &i32|
+ /// if x < 0 { Err("Negative element found") }
+ /// else { Ok(x) }
+ /// ).sum();
+ /// assert_eq!(res, Ok(3));
+ /// ```
+ fn sum<I>(iter: I) -> Result<T, E>
+ where
+ I: Iterator<Item = Result<U, E>>,
+ {
+ iter::try_process(iter, |i| i.sum())
+ }
+}
+
+#[stable(feature = "iter_arith_traits_result", since = "1.16.0")]
+impl<T, U, E> Product<Result<U, E>> for Result<T, E>
+where
+ T: Product<U>,
+{
+ /// Takes each element in the [`Iterator`]: if it is an [`Err`], no further
+ /// elements are taken, and the [`Err`] is returned. Should no [`Err`]
+ /// occur, the product of all elements is returned.
+ fn product<I>(iter: I) -> Result<T, E>
+ where
+ I: Iterator<Item = Result<U, E>>,
+ {
+ iter::try_process(iter, |i| i.product())
+ }
+}
+
+#[stable(feature = "iter_arith_traits_option", since = "1.37.0")]
+impl<T, U> Sum<Option<U>> for Option<T>
+where
+ T: Sum<U>,
+{
+ /// Takes each element in the [`Iterator`]: if it is a [`None`], no further
+ /// elements are taken, and the [`None`] is returned. Should no [`None`]
+ /// occur, the sum of all elements is returned.
+ ///
+ /// # Examples
+ ///
+ /// This sums up the position of the character 'a' in a vector of strings,
+ /// if a word did not have the character 'a' the operation returns `None`:
+ ///
+ /// ```
+ /// let words = vec!["have", "a", "great", "day"];
+ /// let total: Option<usize> = words.iter().map(|w| w.find('a')).sum();
+ /// assert_eq!(total, Some(5));
+ /// ```
+ fn sum<I>(iter: I) -> Option<T>
+ where
+ I: Iterator<Item = Option<U>>,
+ {
+ iter::try_process(iter, |i| i.sum())
+ }
+}
+
+#[stable(feature = "iter_arith_traits_option", since = "1.37.0")]
+impl<T, U> Product<Option<U>> for Option<T>
+where
+ T: Product<U>,
+{
+ /// Takes each element in the [`Iterator`]: if it is a [`None`], no further
+ /// elements are taken, and the [`None`] is returned. Should no [`None`]
+ /// occur, the product of all elements is returned.
+ fn product<I>(iter: I) -> Option<T>
+ where
+ I: Iterator<Item = Option<U>>,
+ {
+ iter::try_process(iter, |i| i.product())
+ }
+}
diff --git a/library/core/src/iter/traits/collect.rs b/library/core/src/iter/traits/collect.rs
new file mode 100644
index 000000000..12ca508be
--- /dev/null
+++ b/library/core/src/iter/traits/collect.rs
@@ -0,0 +1,450 @@
+/// Conversion from an [`Iterator`].
+///
+/// By implementing `FromIterator` for a type, you define how it will be
+/// created from an iterator. This is common for types which describe a
+/// collection of some kind.
+///
+/// If you want to create a collection from the contents of an iterator, the
+/// [`Iterator::collect()`] method is preferred. However, when you need to
+/// specify the container type, [`FromIterator::from_iter()`] can be more
+/// readable than using a turbofish (e.g. `::<Vec<_>>()`). See the
+/// [`Iterator::collect()`] documentation for more examples of its use.
+///
+/// See also: [`IntoIterator`].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let five_fives = std::iter::repeat(5).take(5);
+///
+/// let v = Vec::from_iter(five_fives);
+///
+/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
+/// ```
+///
+/// Using [`Iterator::collect()`] to implicitly use `FromIterator`:
+///
+/// ```
+/// let five_fives = std::iter::repeat(5).take(5);
+///
+/// let v: Vec<i32> = five_fives.collect();
+///
+/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
+/// ```
+///
+/// Using [`FromIterator::from_iter()`] as a more readable alternative to
+/// [`Iterator::collect()`]:
+///
+/// ```
+/// use std::collections::VecDeque;
+/// let first = (0..10).collect::<VecDeque<i32>>();
+/// let second = VecDeque::from_iter(0..10);
+///
+/// assert_eq!(first, second);
+/// ```
+///
+/// Implementing `FromIterator` for your type:
+///
+/// ```
+/// // A sample collection, that's just a wrapper over Vec<T>
+/// #[derive(Debug)]
+/// struct MyCollection(Vec<i32>);
+///
+/// // Let's give it some methods so we can create one and add things
+/// // to it.
+/// impl MyCollection {
+/// fn new() -> MyCollection {
+/// MyCollection(Vec::new())
+/// }
+///
+/// fn add(&mut self, elem: i32) {
+/// self.0.push(elem);
+/// }
+/// }
+///
+/// // and we'll implement FromIterator
+/// impl FromIterator<i32> for MyCollection {
+/// fn from_iter<I: IntoIterator<Item=i32>>(iter: I) -> Self {
+/// let mut c = MyCollection::new();
+///
+/// for i in iter {
+/// c.add(i);
+/// }
+///
+/// c
+/// }
+/// }
+///
+/// // Now we can make a new iterator...
+/// let iter = (0..5).into_iter();
+///
+/// // ... and make a MyCollection out of it
+/// let c = MyCollection::from_iter(iter);
+///
+/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]);
+///
+/// // collect works too!
+///
+/// let iter = (0..5).into_iter();
+/// let c: MyCollection = iter.collect();
+///
+/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ on(
+ _Self = "[{A}]",
+ message = "a slice of type `{Self}` cannot be built since `{Self}` has no definite size",
+ label = "try explicitly collecting into a `Vec<{A}>`",
+ ),
+ on(
+ all(A = "{integer}", any(_Self = "[{integral}]",)),
+ message = "a slice of type `{Self}` cannot be built since `{Self}` has no definite size",
+ label = "try explicitly collecting into a `Vec<{A}>`",
+ ),
+ on(
+ _Self = "[{A}; _]",
+ message = "an array of type `{Self}` cannot be built directly from an iterator",
+ label = "try collecting into a `Vec<{A}>`, then using `.try_into()`",
+ ),
+ on(
+ all(A = "{integer}", any(_Self = "[{integral}; _]",)),
+ message = "an array of type `{Self}` cannot be built directly from an iterator",
+ label = "try collecting into a `Vec<{A}>`, then using `.try_into()`",
+ ),
+ message = "a value of type `{Self}` cannot be built from an iterator \
+ over elements of type `{A}`",
+ label = "value of type `{Self}` cannot be built from `std::iter::Iterator<Item={A}>`"
+)]
+#[rustc_diagnostic_item = "FromIterator"]
+pub trait FromIterator<A>: Sized {
+ /// Creates a value from an iterator.
+ ///
+ /// See the [module-level documentation] for more.
+ ///
+ /// [module-level documentation]: crate::iter
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let five_fives = std::iter::repeat(5).take(5);
+ ///
+ /// let v = Vec::from_iter(five_fives);
+ ///
+ /// assert_eq!(v, vec![5, 5, 5, 5, 5]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self;
+}
+
+/// Conversion into an [`Iterator`].
+///
+/// By implementing `IntoIterator` for a type, you define how it will be
+/// converted to an iterator. This is common for types which describe a
+/// collection of some kind.
+///
+/// One benefit of implementing `IntoIterator` is that your type will [work
+/// with Rust's `for` loop syntax](crate::iter#for-loops-and-intoiterator).
+///
+/// See also: [`FromIterator`].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let v = [1, 2, 3];
+/// let mut iter = v.into_iter();
+///
+/// assert_eq!(Some(1), iter.next());
+/// assert_eq!(Some(2), iter.next());
+/// assert_eq!(Some(3), iter.next());
+/// assert_eq!(None, iter.next());
+/// ```
+/// Implementing `IntoIterator` for your type:
+///
+/// ```
+/// // A sample collection, that's just a wrapper over Vec<T>
+/// #[derive(Debug)]
+/// struct MyCollection(Vec<i32>);
+///
+/// // Let's give it some methods so we can create one and add things
+/// // to it.
+/// impl MyCollection {
+/// fn new() -> MyCollection {
+/// MyCollection(Vec::new())
+/// }
+///
+/// fn add(&mut self, elem: i32) {
+/// self.0.push(elem);
+/// }
+/// }
+///
+/// // and we'll implement IntoIterator
+/// impl IntoIterator for MyCollection {
+/// type Item = i32;
+/// type IntoIter = std::vec::IntoIter<Self::Item>;
+///
+/// fn into_iter(self) -> Self::IntoIter {
+/// self.0.into_iter()
+/// }
+/// }
+///
+/// // Now we can make a new collection...
+/// let mut c = MyCollection::new();
+///
+/// // ... add some stuff to it ...
+/// c.add(0);
+/// c.add(1);
+/// c.add(2);
+///
+/// // ... and then turn it into an Iterator:
+/// for (i, n) in c.into_iter().enumerate() {
+/// assert_eq!(i as i32, n);
+/// }
+/// ```
+///
+/// It is common to use `IntoIterator` as a trait bound. This allows
+/// the input collection type to change, so long as it is still an
+/// iterator. Additional bounds can be specified by restricting on
+/// `Item`:
+///
+/// ```rust
+/// fn collect_as_strings<T>(collection: T) -> Vec<String>
+/// where
+/// T: IntoIterator,
+/// T::Item: std::fmt::Debug,
+/// {
+/// collection
+/// .into_iter()
+/// .map(|item| format!("{item:?}"))
+/// .collect()
+/// }
+/// ```
+#[rustc_diagnostic_item = "IntoIterator"]
+#[rustc_skip_array_during_method_dispatch]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait IntoIterator {
+ /// The type of the elements being iterated over.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Item;
+
+ /// Which kind of iterator are we turning this into?
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type IntoIter: Iterator<Item = Self::Item>;
+
+ /// Creates an iterator from a value.
+ ///
+ /// See the [module-level documentation] for more.
+ ///
+ /// [module-level documentation]: crate::iter
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v = [1, 2, 3];
+ /// let mut iter = v.into_iter();
+ ///
+ /// assert_eq!(Some(1), iter.next());
+ /// assert_eq!(Some(2), iter.next());
+ /// assert_eq!(Some(3), iter.next());
+ /// assert_eq!(None, iter.next());
+ /// ```
+ #[lang = "into_iter"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn into_iter(self) -> Self::IntoIter;
+}
+
+#[rustc_const_unstable(feature = "const_intoiterator_identity", issue = "90603")]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ~const Iterator> const IntoIterator for I {
+ type Item = I::Item;
+ type IntoIter = I;
+
+ #[inline]
+ fn into_iter(self) -> I {
+ self
+ }
+}
+
+/// Extend a collection with the contents of an iterator.
+///
+/// Iterators produce a series of values, and collections can also be thought
+/// of as a series of values. The `Extend` trait bridges this gap, allowing you
+/// to extend a collection by including the contents of that iterator. When
+/// extending a collection with an already existing key, that entry is updated
+/// or, in the case of collections that permit multiple entries with equal
+/// keys, that entry is inserted.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // You can extend a String with some chars:
+/// let mut message = String::from("The first three letters are: ");
+///
+/// message.extend(&['a', 'b', 'c']);
+///
+/// assert_eq!("abc", &message[29..32]);
+/// ```
+///
+/// Implementing `Extend`:
+///
+/// ```
+/// // A sample collection, that's just a wrapper over Vec<T>
+/// #[derive(Debug)]
+/// struct MyCollection(Vec<i32>);
+///
+/// // Let's give it some methods so we can create one and add things
+/// // to it.
+/// impl MyCollection {
+/// fn new() -> MyCollection {
+/// MyCollection(Vec::new())
+/// }
+///
+/// fn add(&mut self, elem: i32) {
+/// self.0.push(elem);
+/// }
+/// }
+///
+/// // since MyCollection has a list of i32s, we implement Extend for i32
+/// impl Extend<i32> for MyCollection {
+///
+/// // This is a bit simpler with the concrete type signature: we can call
+/// // extend on anything which can be turned into an Iterator which gives
+/// // us i32s. Because we need i32s to put into MyCollection.
+/// fn extend<T: IntoIterator<Item=i32>>(&mut self, iter: T) {
+///
+/// // The implementation is very straightforward: loop through the
+/// // iterator, and add() each element to ourselves.
+/// for elem in iter {
+/// self.add(elem);
+/// }
+/// }
+/// }
+///
+/// let mut c = MyCollection::new();
+///
+/// c.add(5);
+/// c.add(6);
+/// c.add(7);
+///
+/// // let's extend our collection with three more numbers
+/// c.extend(vec![1, 2, 3]);
+///
+/// // we've added these elements onto the end
+/// assert_eq!("MyCollection([5, 6, 7, 1, 2, 3])", format!("{c:?}"));
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Extend<A> {
+ /// Extends a collection with the contents of an iterator.
+ ///
+ /// As this is the only required method for this trait, the [trait-level] docs
+ /// contain more details.
+ ///
+ /// [trait-level]: Extend
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // You can extend a String with some chars:
+ /// let mut message = String::from("abc");
+ ///
+ /// message.extend(['d', 'e', 'f'].iter());
+ ///
+ /// assert_eq!("abcdef", &message);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T);
+
+ /// Extends a collection with exactly one element.
+ #[unstable(feature = "extend_one", issue = "72631")]
+ fn extend_one(&mut self, item: A) {
+ self.extend(Some(item));
+ }
+
+ /// Reserves capacity in a collection for the given number of additional elements.
+ ///
+ /// The default implementation does nothing.
+ #[unstable(feature = "extend_one", issue = "72631")]
+ fn extend_reserve(&mut self, additional: usize) {
+ let _ = additional;
+ }
+}
+
+#[stable(feature = "extend_for_unit", since = "1.28.0")]
+impl Extend<()> for () {
+ fn extend<T: IntoIterator<Item = ()>>(&mut self, iter: T) {
+ iter.into_iter().for_each(drop)
+ }
+ fn extend_one(&mut self, _item: ()) {}
+}
+
+#[stable(feature = "extend_for_tuple", since = "1.56.0")]
+impl<A, B, ExtendA, ExtendB> Extend<(A, B)> for (ExtendA, ExtendB)
+where
+ ExtendA: Extend<A>,
+ ExtendB: Extend<B>,
+{
+ /// Allows to `extend` a tuple of collections that also implement `Extend`.
+ ///
+ /// See also: [`Iterator::unzip`]
+ ///
+ /// # Examples
+ /// ```
+ /// let mut tuple = (vec![0], vec![1]);
+ /// tuple.extend([(2, 3), (4, 5), (6, 7)]);
+ /// assert_eq!(tuple.0, [0, 2, 4, 6]);
+ /// assert_eq!(tuple.1, [1, 3, 5, 7]);
+ ///
+ /// // also allows for arbitrarily nested tuples as elements
+ /// let mut nested_tuple = (vec![1], (vec![2], vec![3]));
+ /// nested_tuple.extend([(4, (5, 6)), (7, (8, 9))]);
+ ///
+ /// let (a, (b, c)) = nested_tuple;
+ /// assert_eq!(a, [1, 4, 7]);
+ /// assert_eq!(b, [2, 5, 8]);
+ /// assert_eq!(c, [3, 6, 9]);
+ /// ```
+ fn extend<T: IntoIterator<Item = (A, B)>>(&mut self, into_iter: T) {
+ let (a, b) = self;
+ let iter = into_iter.into_iter();
+
+ fn extend<'a, A, B>(
+ a: &'a mut impl Extend<A>,
+ b: &'a mut impl Extend<B>,
+ ) -> impl FnMut((), (A, B)) + 'a {
+ move |(), (t, u)| {
+ a.extend_one(t);
+ b.extend_one(u);
+ }
+ }
+
+ let (lower_bound, _) = iter.size_hint();
+ if lower_bound > 0 {
+ a.extend_reserve(lower_bound);
+ b.extend_reserve(lower_bound);
+ }
+
+ iter.fold((), extend(a, b));
+ }
+
+ fn extend_one(&mut self, item: (A, B)) {
+ self.0.extend_one(item.0);
+ self.1.extend_one(item.1);
+ }
+
+ fn extend_reserve(&mut self, additional: usize) {
+ self.0.extend_reserve(additional);
+ self.1.extend_reserve(additional);
+ }
+}
diff --git a/library/core/src/iter/traits/double_ended.rs b/library/core/src/iter/traits/double_ended.rs
new file mode 100644
index 000000000..bdf94c792
--- /dev/null
+++ b/library/core/src/iter/traits/double_ended.rs
@@ -0,0 +1,374 @@
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator able to yield elements from both ends.
+///
+/// Something that implements `DoubleEndedIterator` has one extra capability
+/// over something that implements [`Iterator`]: the ability to also take
+/// `Item`s from the back, as well as the front.
+///
+/// It is important to note that both back and forth work on the same range,
+/// and do not cross: iteration is over when they meet in the middle.
+///
+/// In a similar fashion to the [`Iterator`] protocol, once a
+/// `DoubleEndedIterator` returns [`None`] from a [`next_back()`], calling it
+/// again may or may not ever return [`Some`] again. [`next()`] and
+/// [`next_back()`] are interchangeable for this purpose.
+///
+/// [`next_back()`]: DoubleEndedIterator::next_back
+/// [`next()`]: Iterator::next
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let numbers = vec![1, 2, 3, 4, 5, 6];
+///
+/// let mut iter = numbers.iter();
+///
+/// assert_eq!(Some(&1), iter.next());
+/// assert_eq!(Some(&6), iter.next_back());
+/// assert_eq!(Some(&5), iter.next_back());
+/// assert_eq!(Some(&2), iter.next());
+/// assert_eq!(Some(&3), iter.next());
+/// assert_eq!(Some(&4), iter.next());
+/// assert_eq!(None, iter.next());
+/// assert_eq!(None, iter.next_back());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "DoubleEndedIterator")]
+pub trait DoubleEndedIterator: Iterator {
+ /// Removes and returns an element from the end of the iterator.
+ ///
+ /// Returns `None` when there are no more elements.
+ ///
+ /// The [trait-level] docs contain more details.
+ ///
+ /// [trait-level]: DoubleEndedIterator
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let numbers = vec![1, 2, 3, 4, 5, 6];
+ ///
+ /// let mut iter = numbers.iter();
+ ///
+ /// assert_eq!(Some(&1), iter.next());
+ /// assert_eq!(Some(&6), iter.next_back());
+ /// assert_eq!(Some(&5), iter.next_back());
+ /// assert_eq!(Some(&2), iter.next());
+ /// assert_eq!(Some(&3), iter.next());
+ /// assert_eq!(Some(&4), iter.next());
+ /// assert_eq!(None, iter.next());
+ /// assert_eq!(None, iter.next_back());
+ /// ```
+ ///
+ /// # Remarks
+ ///
+ /// The elements yielded by `DoubleEndedIterator`'s methods may differ from
+ /// the ones yielded by [`Iterator`]'s methods:
+ ///
+ /// ```
+ /// let vec = vec![(1, 'a'), (1, 'b'), (1, 'c'), (2, 'a'), (2, 'b')];
+ /// let uniq_by_fst_comp = || {
+ /// let mut seen = std::collections::HashSet::new();
+ /// vec.iter().copied().filter(move |x| seen.insert(x.0))
+ /// };
+ ///
+ /// assert_eq!(uniq_by_fst_comp().last(), Some((2, 'a')));
+ /// assert_eq!(uniq_by_fst_comp().next_back(), Some((2, 'b')));
+ ///
+ /// assert_eq!(
+ /// uniq_by_fst_comp().fold(vec![], |mut v, x| {v.push(x); v}),
+ /// vec![(1, 'a'), (2, 'a')]
+ /// );
+ /// assert_eq!(
+ /// uniq_by_fst_comp().rfold(vec![], |mut v, x| {v.push(x); v}),
+ /// vec![(2, 'b'), (1, 'c')]
+ /// );
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn next_back(&mut self) -> Option<Self::Item>;
+
+ /// Advances the iterator from the back by `n` elements.
+ ///
+ /// `advance_back_by` is the reverse version of [`advance_by`]. This method will
+ /// eagerly skip `n` elements starting from the back by calling [`next_back`] up
+ /// to `n` times until [`None`] is encountered.
+ ///
+ /// `advance_back_by(n)` will return [`Ok(())`] if the iterator successfully advances by
+ /// `n` elements, or [`Err(k)`] if [`None`] is encountered, where `k` is the number of
+ /// elements the iterator is advanced by before running out of elements (i.e. the length
+ /// of the iterator). Note that `k` is always less than `n`.
+ ///
+ /// Calling `advance_back_by(0)` can do meaningful work, for example [`Flatten`] can advance its
+ /// outer iterator until it finds an inner iterator that is not empty, which then often
+ /// allows it to return a more accurate `size_hint()` than in its initial state.
+ ///
+ /// [`advance_by`]: Iterator::advance_by
+ /// [`Flatten`]: crate::iter::Flatten
+ /// [`next_back`]: DoubleEndedIterator::next_back
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_advance_by)]
+ ///
+ /// let a = [3, 4, 5, 6];
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.advance_back_by(2), Ok(()));
+ /// assert_eq!(iter.next_back(), Some(&4));
+ /// assert_eq!(iter.advance_back_by(0), Ok(()));
+ /// assert_eq!(iter.advance_back_by(100), Err(1)); // only `&3` was skipped
+ /// ```
+ ///
+ /// [`Ok(())`]: Ok
+ /// [`Err(k)`]: Err
+ #[inline]
+ #[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ for i in 0..n {
+ self.next_back().ok_or(i)?;
+ }
+ Ok(())
+ }
+
+ /// Returns the `n`th element from the end of the iterator.
+ ///
+ /// This is essentially the reversed version of [`Iterator::nth()`].
+ /// Although like most indexing operations, the count starts from zero, so
+ /// `nth_back(0)` returns the first value from the end, `nth_back(1)` the
+ /// second, and so on.
+ ///
+ /// Note that all elements between the end and the returned element will be
+ /// consumed, including the returned element. This also means that calling
+ /// `nth_back(0)` multiple times on the same iterator will return different
+ /// elements.
+ ///
+ /// `nth_back()` will return [`None`] if `n` is greater than or equal to the
+ /// length of the iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().nth_back(2), Some(&1));
+ /// ```
+ ///
+ /// Calling `nth_back()` multiple times doesn't rewind the iterator:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.nth_back(1), Some(&2));
+ /// assert_eq!(iter.nth_back(1), None);
+ /// ```
+ ///
+ /// Returning `None` if there are less than `n + 1` elements:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().nth_back(10), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_nth_back", since = "1.37.0")]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.advance_back_by(n).ok()?;
+ self.next_back()
+ }
+
+ /// This is the reverse version of [`Iterator::try_fold()`]: it takes
+ /// elements starting from the back of the iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = ["1", "2", "3"];
+ /// let sum = a.iter()
+ /// .map(|&s| s.parse::<i32>())
+ /// .try_rfold(0, |acc, x| x.and_then(|y| Ok(acc + y)));
+ /// assert_eq!(sum, Ok(6));
+ /// ```
+ ///
+ /// Short-circuiting:
+ ///
+ /// ```
+ /// let a = ["1", "rust", "3"];
+ /// let mut it = a.iter();
+ /// let sum = it
+ /// .by_ref()
+ /// .map(|&s| s.parse::<i32>())
+ /// .try_rfold(0, |acc, x| x.and_then(|y| Ok(acc + y)));
+ /// assert!(sum.is_err());
+ ///
+ /// // Because it short-circuited, the remaining elements are still
+ /// // available through the iterator.
+ /// assert_eq!(it.next_back(), Some(&"1"));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_try_fold", since = "1.27.0")]
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x)?;
+ }
+ try { accum }
+ }
+
+ /// An iterator method that reduces the iterator's elements to a single,
+ /// final value, starting from the back.
+ ///
+ /// This is the reverse version of [`Iterator::fold()`]: it takes elements
+ /// starting from the back of the iterator.
+ ///
+ /// `rfold()` takes two arguments: an initial value, and a closure with two
+ /// arguments: an 'accumulator', and an element. The closure returns the value that
+ /// the accumulator should have for the next iteration.
+ ///
+ /// The initial value is the value the accumulator will have on the first
+ /// call.
+ ///
+ /// After applying this closure to every element of the iterator, `rfold()`
+ /// returns the accumulator.
+ ///
+ /// This operation is sometimes called 'reduce' or 'inject'.
+ ///
+ /// Folding is useful whenever you have a collection of something, and want
+ /// to produce a single value from it.
+ ///
+ /// Note: `rfold()` combines elements in a *right-associative* fashion. For associative
+ /// operators like `+`, the order the elements are combined in is not important, but for non-associative
+ /// operators like `-` the order will affect the final result.
+ /// For a *left-associative* version of `rfold()`, see [`Iterator::fold()`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// // the sum of all of the elements of a
+ /// let sum = a.iter()
+ /// .rfold(0, |acc, &x| acc + x);
+ ///
+ /// assert_eq!(sum, 6);
+ /// ```
+ ///
+ /// This example demonstrates the right-associative nature of `rfold()`:
+ /// it builds a string, starting with an initial value
+ /// and continuing with each element from the back until the front:
+ ///
+ /// ```
+ /// let numbers = [1, 2, 3, 4, 5];
+ ///
+ /// let zero = "0".to_string();
+ ///
+ /// let result = numbers.iter().rfold(zero, |acc, &x| {
+ /// format!("({x} + {acc})")
+ /// });
+ ///
+ /// assert_eq!(result, "(1 + (2 + (3 + (4 + (5 + 0)))))");
+ /// ```
+ #[doc(alias = "foldr")]
+ #[inline]
+ #[stable(feature = "iter_rfold", since = "1.27.0")]
+ fn rfold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+
+ /// Searches for an element of an iterator from the back that satisfies a predicate.
+ ///
+ /// `rfind()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, starting at the end, and if any
+ /// of them return `true`, then `rfind()` returns [`Some(element)`]. If they all return
+ /// `false`, it returns [`None`].
+ ///
+ /// `rfind()` is short-circuiting; in other words, it will stop processing
+ /// as soon as the closure returns `true`.
+ ///
+ /// Because `rfind()` takes a reference, and many iterators iterate over
+ /// references, this leads to a possibly confusing situation where the
+ /// argument is a double reference. You can see this effect in the
+ /// examples below, with `&&x`.
+ ///
+ /// [`Some(element)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert_eq!(a.iter().rfind(|&&x| x == 2), Some(&2));
+ ///
+ /// assert_eq!(a.iter().rfind(|&&x| x == 5), None);
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.rfind(|&&x| x == 2), Some(&2));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next_back(), Some(&1));
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_rfind", since = "1.27.0")]
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
+ move |(), x| {
+ if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE }
+ }
+ }
+
+ self.try_rfold((), check(predicate)).break_value()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I {
+ fn next_back(&mut self) -> Option<I::Item> {
+ (**self).next_back()
+ }
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ (**self).advance_back_by(n)
+ }
+ fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth_back(n)
+ }
+}
diff --git a/library/core/src/iter/traits/exact_size.rs b/library/core/src/iter/traits/exact_size.rs
new file mode 100644
index 000000000..1757e37ec
--- /dev/null
+++ b/library/core/src/iter/traits/exact_size.rs
@@ -0,0 +1,151 @@
+/// An iterator that knows its exact length.
+///
+/// Many [`Iterator`]s don't know how many times they will iterate, but some do.
+/// If an iterator knows how many times it can iterate, providing access to
+/// that information can be useful. For example, if you want to iterate
+/// backwards, a good start is to know where the end is.
+///
+/// When implementing an `ExactSizeIterator`, you must also implement
+/// [`Iterator`]. When doing so, the implementation of [`Iterator::size_hint`]
+/// *must* return the exact size of the iterator.
+///
+/// The [`len`] method has a default implementation, so you usually shouldn't
+/// implement it. However, you may be able to provide a more performant
+/// implementation than the default, so overriding it in this case makes sense.
+///
+/// Note that this trait is a safe trait and as such does *not* and *cannot*
+/// guarantee that the returned length is correct. This means that `unsafe`
+/// code **must not** rely on the correctness of [`Iterator::size_hint`]. The
+/// unstable and unsafe [`TrustedLen`](super::marker::TrustedLen) trait gives
+/// this additional guarantee.
+///
+/// [`len`]: ExactSizeIterator::len
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // a finite range knows exactly how many times it will iterate
+/// let five = 0..5;
+///
+/// assert_eq!(5, five.len());
+/// ```
+///
+/// In the [module-level docs], we implemented an [`Iterator`], `Counter`.
+/// Let's implement `ExactSizeIterator` for it as well:
+///
+/// [module-level docs]: crate::iter
+///
+/// ```
+/// # struct Counter {
+/// # count: usize,
+/// # }
+/// # impl Counter {
+/// # fn new() -> Counter {
+/// # Counter { count: 0 }
+/// # }
+/// # }
+/// # impl Iterator for Counter {
+/// # type Item = usize;
+/// # fn next(&mut self) -> Option<Self::Item> {
+/// # self.count += 1;
+/// # if self.count < 6 {
+/// # Some(self.count)
+/// # } else {
+/// # None
+/// # }
+/// # }
+/// # }
+/// impl ExactSizeIterator for Counter {
+/// // We can easily calculate the remaining number of iterations.
+/// fn len(&self) -> usize {
+/// 5 - self.count
+/// }
+/// }
+///
+/// // And now we can use it!
+///
+/// let mut counter = Counter::new();
+///
+/// assert_eq!(5, counter.len());
+/// let _ = counter.next();
+/// assert_eq!(4, counter.len());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ExactSizeIterator: Iterator {
+ /// Returns the exact remaining length of the iterator.
+ ///
+ /// The implementation ensures that the iterator will return exactly `len()`
+ /// more times a [`Some(T)`] value, before returning [`None`].
+ /// This method has a default implementation, so you usually should not
+ /// implement it directly. However, if you can provide a more efficient
+ /// implementation, you can do so. See the [trait-level] docs for an
+ /// example.
+ ///
+ /// This function has the same safety guarantees as the
+ /// [`Iterator::size_hint`] function.
+ ///
+ /// [trait-level]: ExactSizeIterator
+ /// [`Some(T)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // a finite range knows exactly how many times it will iterate
+ /// let mut range = 0..5;
+ ///
+ /// assert_eq!(5, range.len());
+ /// let _ = range.next();
+ /// assert_eq!(4, range.len());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn len(&self) -> usize {
+ let (lower, upper) = self.size_hint();
+ // Note: This assertion is overly defensive, but it checks the invariant
+ // guaranteed by the trait. If this trait were rust-internal,
+ // we could use debug_assert!; assert_eq! will check all Rust user
+ // implementations too.
+ assert_eq!(upper, Some(lower));
+ lower
+ }
+
+ /// Returns `true` if the iterator is empty.
+ ///
+ /// This method has a default implementation using
+ /// [`ExactSizeIterator::len()`], so you don't need to implement it yourself.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(exact_size_is_empty)]
+ ///
+ /// let mut one_element = std::iter::once(0);
+ /// assert!(!one_element.is_empty());
+ ///
+ /// assert_eq!(one_element.next(), Some(0));
+ /// assert!(one_element.is_empty());
+ ///
+ /// assert_eq!(one_element.next(), None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "exact_size_is_empty", issue = "35428")]
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for &mut I {
+ fn len(&self) -> usize {
+ (**self).len()
+ }
+ fn is_empty(&self) -> bool {
+ (**self).is_empty()
+ }
+}
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
new file mode 100644
index 000000000..275412b57
--- /dev/null
+++ b/library/core/src/iter/traits/iterator.rs
@@ -0,0 +1,3836 @@
+use crate::array;
+use crate::cmp::{self, Ordering};
+use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try};
+
+use super::super::try_process;
+use super::super::ByRefSized;
+use super::super::TrustedRandomAccessNoCoerce;
+use super::super::{Chain, Cloned, Copied, Cycle, Enumerate, Filter, FilterMap, Fuse};
+use super::super::{FlatMap, Flatten};
+use super::super::{FromIterator, Intersperse, IntersperseWith, Product, Sum, Zip};
+use super::super::{
+ Inspect, Map, MapWhile, Peekable, Rev, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile,
+};
+
+fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
+
+/// An interface for dealing with iterators.
+///
+/// This is the main iterator trait. For more about the concept of iterators
+/// generally, please see the [module-level documentation]. In particular, you
+/// may want to know how to [implement `Iterator`][impl].
+///
+/// [module-level documentation]: crate::iter
+/// [impl]: crate::iter#implementing-iterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ on(
+ _Self = "std::ops::RangeTo<Idx>",
+ label = "if you meant to iterate until a value, add a starting value",
+ note = "`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \
+ bounded `Range`: `0..end`"
+ ),
+ on(
+ _Self = "std::ops::RangeToInclusive<Idx>",
+ label = "if you meant to iterate until a value (including it), add a starting value",
+ note = "`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \
+ to have a bounded `RangeInclusive`: `0..=end`"
+ ),
+ on(
+ _Self = "[]",
+ label = "`{Self}` is not an iterator; try calling `.into_iter()` or `.iter()`"
+ ),
+ on(_Self = "&[]", label = "`{Self}` is not an iterator; try calling `.iter()`"),
+ on(
+ _Self = "std::vec::Vec<T, A>",
+ label = "`{Self}` is not an iterator; try calling `.into_iter()` or `.iter()`"
+ ),
+ on(
+ _Self = "&str",
+ label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
+ ),
+ on(
+ _Self = "std::string::String",
+ label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
+ ),
+ on(
+ _Self = "{integral}",
+ note = "if you want to iterate between `start` until a value `end`, use the exclusive range \
+ syntax `start..end` or the inclusive range syntax `start..=end`"
+ ),
+ label = "`{Self}` is not an iterator",
+ message = "`{Self}` is not an iterator"
+)]
+#[doc(notable_trait)]
+#[rustc_diagnostic_item = "Iterator"]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub trait Iterator {
+ /// The type of the elements being iterated over.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Item;
+
+ /// Advances the iterator and returns the next value.
+ ///
+ /// Returns [`None`] when iteration is finished. Individual iterator
+ /// implementations may choose to resume iteration, and so calling `next()`
+ /// again may or may not eventually start returning [`Some(Item)`] again at some
+ /// point.
+ ///
+ /// [`Some(Item)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// // A call to next() returns the next value...
+ /// assert_eq!(Some(&1), iter.next());
+ /// assert_eq!(Some(&2), iter.next());
+ /// assert_eq!(Some(&3), iter.next());
+ ///
+ /// // ... and then None once it's over.
+ /// assert_eq!(None, iter.next());
+ ///
+ /// // More calls may or may not return `None`. Here, they always will.
+ /// assert_eq!(None, iter.next());
+ /// assert_eq!(None, iter.next());
+ /// ```
+ #[lang = "next"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn next(&mut self) -> Option<Self::Item>;
+
+ /// Advances the iterator and returns an array containing the next `N` values.
+ ///
+ /// If there are not enough elements to fill the array then `Err` is returned
+ /// containing an iterator over the remaining elements.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_next_chunk)]
+ ///
+ /// let mut iter = "lorem".chars();
+ ///
+ /// assert_eq!(iter.next_chunk().unwrap(), ['l', 'o']); // N is inferred as 2
+ /// assert_eq!(iter.next_chunk().unwrap(), ['r', 'e', 'm']); // N is inferred as 3
+ /// assert_eq!(iter.next_chunk::<4>().unwrap_err().as_slice(), &[]); // N is explicitly 4
+ /// ```
+ ///
+ /// Split a string and get the first three items.
+ ///
+ /// ```
+ /// #![feature(iter_next_chunk)]
+ ///
+ /// let quote = "not all those who wander are lost";
+ /// let [first, second, third] = quote.split_whitespace().next_chunk().unwrap();
+ /// assert_eq!(first, "not");
+ /// assert_eq!(second, "all");
+ /// assert_eq!(third, "those");
+ /// ```
+ #[inline]
+ #[unstable(feature = "iter_next_chunk", reason = "recently added", issue = "98326")]
+ fn next_chunk<const N: usize>(
+ &mut self,
+ ) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>>
+ where
+ Self: Sized,
+ {
+ array::iter_next_chunk(self)
+ }
+
+ /// Returns the bounds on the remaining length of the iterator.
+ ///
+ /// Specifically, `size_hint()` returns a tuple where the first element
+ /// is the lower bound, and the second element is the upper bound.
+ ///
+ /// The second half of the tuple that is returned is an <code>[Option]<[usize]></code>.
+ /// A [`None`] here means that either there is no known upper bound, or the
+ /// upper bound is larger than [`usize`].
+ ///
+ /// # Implementation notes
+ ///
+ /// It is not enforced that an iterator implementation yields the declared
+ /// number of elements. A buggy iterator may yield less than the lower bound
+ /// or more than the upper bound of elements.
+ ///
+ /// `size_hint()` is primarily intended to be used for optimizations such as
+ /// reserving space for the elements of the iterator, but must not be
+ /// trusted to e.g., omit bounds checks in unsafe code. An incorrect
+ /// implementation of `size_hint()` should not lead to memory safety
+ /// violations.
+ ///
+ /// That said, the implementation should provide a correct estimation,
+ /// because otherwise it would be a violation of the trait's protocol.
+ ///
+ /// The default implementation returns <code>(0, [None])</code> which is correct for any
+ /// iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!((3, Some(3)), iter.size_hint());
+ /// let _ = iter.next();
+ /// assert_eq!((2, Some(2)), iter.size_hint());
+ /// ```
+ ///
+ /// A more complex example:
+ ///
+ /// ```
+ /// // The even numbers in the range of zero to nine.
+ /// let iter = (0..10).filter(|x| x % 2 == 0);
+ ///
+ /// // We might iterate from zero to ten times. Knowing that it's five
+ /// // exactly wouldn't be possible without executing filter().
+ /// assert_eq!((0, Some(10)), iter.size_hint());
+ ///
+ /// // Let's add five more numbers with chain()
+ /// let iter = (0..10).filter(|x| x % 2 == 0).chain(15..20);
+ ///
+ /// // now both bounds are increased by five
+ /// assert_eq!((5, Some(15)), iter.size_hint());
+ /// ```
+ ///
+ /// Returning `None` for an upper bound:
+ ///
+ /// ```
+ /// // an infinite iterator has no upper bound
+ /// // and the maximum possible lower bound
+ /// let iter = 0..;
+ ///
+ /// assert_eq!((usize::MAX, None), iter.size_hint());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, None)
+ }
+
+ /// Consumes the iterator, counting the number of iterations and returning it.
+ ///
+ /// This method will call [`next`] repeatedly until [`None`] is encountered,
+ /// returning the number of times it saw [`Some`]. Note that [`next`] has to be
+ /// called at least once even if the iterator does not have any elements.
+ ///
+ /// [`next`]: Iterator::next
+ ///
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so counting elements of
+ /// an iterator with more than [`usize::MAX`] elements either produces the
+ /// wrong result or panics. If debug assertions are enabled, a panic is
+ /// guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic if the iterator has more than [`usize::MAX`]
+ /// elements.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().count(), 3);
+ ///
+ /// let a = [1, 2, 3, 4, 5];
+ /// assert_eq!(a.iter().count(), 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn count(self) -> usize
+ where
+ Self: Sized,
+ {
+ self.fold(
+ 0,
+ #[rustc_inherit_overflow_checks]
+ |count, _| count + 1,
+ )
+ }
+
+ /// Consumes the iterator, returning the last element.
+ ///
+ /// This method will evaluate the iterator until it returns [`None`]. While
+ /// doing so, it keeps track of the current element. After [`None`] is
+ /// returned, `last()` will then return the last element it saw.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().last(), Some(&3));
+ ///
+ /// let a = [1, 2, 3, 4, 5];
+ /// assert_eq!(a.iter().last(), Some(&5));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn last(self) -> Option<Self::Item>
+ where
+ Self: Sized,
+ {
+ #[inline]
+ fn some<T>(_: Option<T>, x: T) -> Option<T> {
+ Some(x)
+ }
+
+ self.fold(None, some)
+ }
+
+ /// Advances the iterator by `n` elements.
+ ///
+ /// This method will eagerly skip `n` elements by calling [`next`] up to `n`
+ /// times until [`None`] is encountered.
+ ///
+ /// `advance_by(n)` will return [`Ok(())`][Ok] if the iterator successfully advances by
+ /// `n` elements, or [`Err(k)`][Err] if [`None`] is encountered, where `k` is the number
+ /// of elements the iterator is advanced by before running out of elements (i.e. the
+ /// length of the iterator). Note that `k` is always less than `n`.
+ ///
+ /// Calling `advance_by(0)` can do meaningful work, for example [`Flatten`]
+ /// can advance its outer iterator until it finds an inner iterator that is not empty, which
+ /// then often allows it to return a more accurate `size_hint()` than in its initial state.
+ ///
+ /// [`Flatten`]: crate::iter::Flatten
+ /// [`next`]: Iterator::next
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_advance_by)]
+ ///
+ /// let a = [1, 2, 3, 4];
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.advance_by(2), Ok(()));
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.advance_by(0), Ok(()));
+ /// assert_eq!(iter.advance_by(100), Err(1)); // only `&4` was skipped
+ /// ```
+ #[inline]
+ #[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ for i in 0..n {
+ self.next().ok_or(i)?;
+ }
+ Ok(())
+ }
+
+ /// Returns the `n`th element of the iterator.
+ ///
+ /// Like most indexing operations, the count starts from zero, so `nth(0)`
+ /// returns the first value, `nth(1)` the second, and so on.
+ ///
+ /// Note that all preceding elements, as well as the returned element, will be
+ /// consumed from the iterator. That means that the preceding elements will be
+ /// discarded, and also that calling `nth(0)` multiple times on the same iterator
+ /// will return different elements.
+ ///
+ /// `nth()` will return [`None`] if `n` is greater than or equal to the length of the
+ /// iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().nth(1), Some(&2));
+ /// ```
+ ///
+ /// Calling `nth()` multiple times doesn't rewind the iterator:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.nth(1), Some(&2));
+ /// assert_eq!(iter.nth(1), None);
+ /// ```
+ ///
+ /// Returning `None` if there are less than `n + 1` elements:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().nth(10), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.advance_by(n).ok()?;
+ self.next()
+ }
+
+ /// Creates an iterator starting at the same point, but stepping by
+ /// the given amount at each iteration.
+ ///
+ /// Note 1: The first element of the iterator will always be returned,
+ /// regardless of the step given.
+ ///
+ /// Note 2: The time at which ignored elements are pulled is not fixed.
+ /// `StepBy` behaves like the sequence `self.next()`, `self.nth(step-1)`,
+ /// `self.nth(step-1)`, …, but is also free to behave like the sequence
+ /// `advance_n_and_return_first(&mut self, step)`,
+ /// `advance_n_and_return_first(&mut self, step)`, …
+ /// Which way is used may change for some iterators for performance reasons.
+ /// The second way will advance the iterator earlier and may consume more items.
+ ///
+ /// `advance_n_and_return_first` is the equivalent of:
+ /// ```
+ /// fn advance_n_and_return_first<I>(iter: &mut I, n: usize) -> Option<I::Item>
+ /// where
+ /// I: Iterator,
+ /// {
+ /// let next = iter.next();
+ /// if n > 1 {
+ /// iter.nth(n - 2);
+ /// }
+ /// next
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// The method will panic if the given step is `0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [0, 1, 2, 3, 4, 5];
+ /// let mut iter = a.iter().step_by(2);
+ ///
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), Some(&4));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_step_by", since = "1.28.0")]
+ fn step_by(self, step: usize) -> StepBy<Self>
+ where
+ Self: Sized,
+ {
+ StepBy::new(self, step)
+ }
+
+ /// Takes two iterators and creates a new iterator over both in sequence.
+ ///
+ /// `chain()` will return a new iterator which will first iterate over
+ /// values from the first iterator and then over values from the second
+ /// iterator.
+ ///
+ /// In other words, it links two iterators together, in a chain. 🔗
+ ///
+ /// [`once`] is commonly used to adapt a single value into a chain of
+ /// other kinds of iteration.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a1 = [1, 2, 3];
+ /// let a2 = [4, 5, 6];
+ ///
+ /// let mut iter = a1.iter().chain(a2.iter());
+ ///
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), Some(&4));
+ /// assert_eq!(iter.next(), Some(&5));
+ /// assert_eq!(iter.next(), Some(&6));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Since the argument to `chain()` uses [`IntoIterator`], we can pass
+ /// anything that can be converted into an [`Iterator`], not just an
+ /// [`Iterator`] itself. For example, slices (`&[T]`) implement
+ /// [`IntoIterator`], and so can be passed to `chain()` directly:
+ ///
+ /// ```
+ /// let s1 = &[1, 2, 3];
+ /// let s2 = &[4, 5, 6];
+ ///
+ /// let mut iter = s1.iter().chain(s2);
+ ///
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), Some(&4));
+ /// assert_eq!(iter.next(), Some(&5));
+ /// assert_eq!(iter.next(), Some(&6));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// If you work with Windows API, you may wish to convert [`OsStr`] to `Vec<u16>`:
+ ///
+ /// ```
+ /// #[cfg(windows)]
+ /// fn os_str_to_utf16(s: &std::ffi::OsStr) -> Vec<u16> {
+ /// use std::os::windows::ffi::OsStrExt;
+ /// s.encode_wide().chain(std::iter::once(0)).collect()
+ /// }
+ /// ```
+ ///
+ /// [`once`]: crate::iter::once
+ /// [`OsStr`]: ../../std/ffi/struct.OsStr.html
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter>
+ where
+ Self: Sized,
+ U: IntoIterator<Item = Self::Item>,
+ {
+ Chain::new(self, other.into_iter())
+ }
+
+ /// 'Zips up' two iterators into a single iterator of pairs.
+ ///
+ /// `zip()` returns a new iterator that will iterate over two other
+ /// iterators, returning a tuple where the first element comes from the
+ /// first iterator, and the second element comes from the second iterator.
+ ///
+ /// In other words, it zips two iterators together, into a single one.
+ ///
+ /// If either iterator returns [`None`], [`next`] from the zipped iterator
+ /// will return [`None`].
+ /// If the zipped iterator has no more elements to return then each further attempt to advance
+ /// it will first try to advance the first iterator at most one time and if it still yielded an item
+ /// try to advance the second iterator at most one time.
+ ///
+ /// To 'undo' the result of zipping up two iterators, see [`unzip`].
+ ///
+ /// [`unzip`]: Iterator::unzip
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a1 = [1, 2, 3];
+ /// let a2 = [4, 5, 6];
+ ///
+ /// let mut iter = a1.iter().zip(a2.iter());
+ ///
+ /// assert_eq!(iter.next(), Some((&1, &4)));
+ /// assert_eq!(iter.next(), Some((&2, &5)));
+ /// assert_eq!(iter.next(), Some((&3, &6)));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Since the argument to `zip()` uses [`IntoIterator`], we can pass
+ /// anything that can be converted into an [`Iterator`], not just an
+ /// [`Iterator`] itself. For example, slices (`&[T]`) implement
+ /// [`IntoIterator`], and so can be passed to `zip()` directly:
+ ///
+ /// ```
+ /// let s1 = &[1, 2, 3];
+ /// let s2 = &[4, 5, 6];
+ ///
+ /// let mut iter = s1.iter().zip(s2);
+ ///
+ /// assert_eq!(iter.next(), Some((&1, &4)));
+ /// assert_eq!(iter.next(), Some((&2, &5)));
+ /// assert_eq!(iter.next(), Some((&3, &6)));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// `zip()` is often used to zip an infinite iterator to a finite one.
+ /// This works because the finite iterator will eventually return [`None`],
+ /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate`]:
+ ///
+ /// ```
+ /// let enumerate: Vec<_> = "foo".chars().enumerate().collect();
+ ///
+ /// let zipper: Vec<_> = (0..).zip("foo".chars()).collect();
+ ///
+ /// assert_eq!((0, 'f'), enumerate[0]);
+ /// assert_eq!((0, 'f'), zipper[0]);
+ ///
+ /// assert_eq!((1, 'o'), enumerate[1]);
+ /// assert_eq!((1, 'o'), zipper[1]);
+ ///
+ /// assert_eq!((2, 'o'), enumerate[2]);
+ /// assert_eq!((2, 'o'), zipper[2]);
+ /// ```
+ ///
+ /// If both iterators have roughly equivalent syntax, it may be more readable to use [`zip`]:
+ ///
+ /// ```
+ /// use std::iter::zip;
+ ///
+ /// let a = [1, 2, 3];
+ /// let b = [2, 3, 4];
+ ///
+ /// let mut zipped = zip(
+ /// a.into_iter().map(|x| x * 2).skip(1),
+ /// b.into_iter().map(|x| x * 2).skip(1),
+ /// );
+ ///
+ /// assert_eq!(zipped.next(), Some((4, 6)));
+ /// assert_eq!(zipped.next(), Some((6, 8)));
+ /// assert_eq!(zipped.next(), None);
+ /// ```
+ ///
+ /// compared to:
+ ///
+ /// ```
+ /// # let a = [1, 2, 3];
+ /// # let b = [2, 3, 4];
+ /// #
+ /// let mut zipped = a
+ /// .into_iter()
+ /// .map(|x| x * 2)
+ /// .skip(1)
+ /// .zip(b.into_iter().map(|x| x * 2).skip(1));
+ /// #
+ /// # assert_eq!(zipped.next(), Some((4, 6)));
+ /// # assert_eq!(zipped.next(), Some((6, 8)));
+ /// # assert_eq!(zipped.next(), None);
+ /// ```
+ ///
+ /// [`enumerate`]: Iterator::enumerate
+ /// [`next`]: Iterator::next
+ /// [`zip`]: crate::iter::zip
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter>
+ where
+ Self: Sized,
+ U: IntoIterator,
+ {
+ Zip::new(self, other.into_iter())
+ }
+
+ /// Creates a new iterator which places a copy of `separator` between adjacent
+ /// items of the original iterator.
+ ///
+ /// In case `separator` does not implement [`Clone`] or needs to be
+ /// computed every time, use [`intersperse_with`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_intersperse)]
+ ///
+ /// let mut a = [0, 1, 2].iter().intersperse(&100);
+ /// assert_eq!(a.next(), Some(&0)); // The first element from `a`.
+ /// assert_eq!(a.next(), Some(&100)); // The separator.
+ /// assert_eq!(a.next(), Some(&1)); // The next element from `a`.
+ /// assert_eq!(a.next(), Some(&100)); // The separator.
+ /// assert_eq!(a.next(), Some(&2)); // The last element from `a`.
+ /// assert_eq!(a.next(), None); // The iterator is finished.
+ /// ```
+ ///
+ /// `intersperse` can be very useful to join an iterator's items using a common element:
+ /// ```
+ /// #![feature(iter_intersperse)]
+ ///
+ /// let hello = ["Hello", "World", "!"].iter().copied().intersperse(" ").collect::<String>();
+ /// assert_eq!(hello, "Hello World !");
+ /// ```
+ ///
+ /// [`Clone`]: crate::clone::Clone
+ /// [`intersperse_with`]: Iterator::intersperse_with
+ #[inline]
+ #[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+ fn intersperse(self, separator: Self::Item) -> Intersperse<Self>
+ where
+ Self: Sized,
+ Self::Item: Clone,
+ {
+ Intersperse::new(self, separator)
+ }
+
+ /// Creates a new iterator which places an item generated by `separator`
+ /// between adjacent items of the original iterator.
+ ///
+ /// The closure will be called exactly once each time an item is placed
+ /// between two adjacent items from the underlying iterator; specifically,
+ /// the closure is not called if the underlying iterator yields less than
+ /// two items and after the last item is yielded.
+ ///
+ /// If the iterator's item implements [`Clone`], it may be easier to use
+ /// [`intersperse`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_intersperse)]
+ ///
+ /// #[derive(PartialEq, Debug)]
+ /// struct NotClone(usize);
+ ///
+ /// let v = [NotClone(0), NotClone(1), NotClone(2)];
+ /// let mut it = v.into_iter().intersperse_with(|| NotClone(99));
+ ///
+ /// assert_eq!(it.next(), Some(NotClone(0))); // The first element from `v`.
+ /// assert_eq!(it.next(), Some(NotClone(99))); // The separator.
+ /// assert_eq!(it.next(), Some(NotClone(1))); // The next element from `v`.
+ /// assert_eq!(it.next(), Some(NotClone(99))); // The separator.
+ /// assert_eq!(it.next(), Some(NotClone(2))); // The last element from from `v`.
+ /// assert_eq!(it.next(), None); // The iterator is finished.
+ /// ```
+ ///
+ /// `intersperse_with` can be used in situations where the separator needs
+ /// to be computed:
+ /// ```
+ /// #![feature(iter_intersperse)]
+ ///
+ /// let src = ["Hello", "to", "all", "people", "!!"].iter().copied();
+ ///
+ /// // The closure mutably borrows its context to generate an item.
+ /// let mut happy_emojis = [" ❤️ ", " 😀 "].iter().copied();
+ /// let separator = || happy_emojis.next().unwrap_or(" 🦀 ");
+ ///
+ /// let result = src.intersperse_with(separator).collect::<String>();
+ /// assert_eq!(result, "Hello ❤️ to 😀 all 🦀 people 🦀 !!");
+ /// ```
+ /// [`Clone`]: crate::clone::Clone
+ /// [`intersperse`]: Iterator::intersperse
+ #[inline]
+ #[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
+ fn intersperse_with<G>(self, separator: G) -> IntersperseWith<Self, G>
+ where
+ Self: Sized,
+ G: FnMut() -> Self::Item,
+ {
+ IntersperseWith::new(self, separator)
+ }
+
+ /// Takes a closure and creates an iterator which calls that closure on each
+ /// element.
+ ///
+ /// `map()` transforms one iterator into another, by means of its argument:
+ /// something that implements [`FnMut`]. It produces a new iterator which
+ /// calls this closure on each element of the original iterator.
+ ///
+ /// If you are good at thinking in types, you can think of `map()` like this:
+ /// If you have an iterator that gives you elements of some type `A`, and
+ /// you want an iterator of some other type `B`, you can use `map()`,
+ /// passing a closure that takes an `A` and returns a `B`.
+ ///
+ /// `map()` is conceptually similar to a [`for`] loop. However, as `map()` is
+ /// lazy, it is best used when you're already working with other iterators.
+ /// If you're doing some sort of looping for a side effect, it's considered
+ /// more idiomatic to use [`for`] than `map()`.
+ ///
+ /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
+ /// [`FnMut`]: crate::ops::FnMut
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().map(|x| 2 * x);
+ ///
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), Some(4));
+ /// assert_eq!(iter.next(), Some(6));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// If you're doing some sort of side effect, prefer [`for`] to `map()`:
+ ///
+ /// ```
+ /// # #![allow(unused_must_use)]
+ /// // don't do this:
+ /// (0..5).map(|x| println!("{x}"));
+ ///
+ /// // it won't even execute, as it is lazy. Rust will warn you about this.
+ ///
+ /// // Instead, use for:
+ /// for x in 0..5 {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn map<B, F>(self, f: F) -> Map<Self, F>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> B,
+ {
+ Map::new(self, f)
+ }
+
+ /// Calls a closure on each element of an iterator.
+ ///
+ /// This is equivalent to using a [`for`] loop on the iterator, although
+ /// `break` and `continue` are not possible from a closure. It's generally
+ /// more idiomatic to use a `for` loop, but `for_each` may be more legible
+ /// when processing items at the end of longer iterator chains. In some
+ /// cases `for_each` may also be faster than a loop, because it will use
+ /// internal iteration on adapters like `Chain`.
+ ///
+ /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::sync::mpsc::channel;
+ ///
+ /// let (tx, rx) = channel();
+ /// (0..5).map(|x| x * 2 + 1)
+ /// .for_each(move |x| tx.send(x).unwrap());
+ ///
+ /// let v: Vec<_> = rx.iter().collect();
+ /// assert_eq!(v, vec![1, 3, 5, 7, 9]);
+ /// ```
+ ///
+ /// For such a small example, a `for` loop may be cleaner, but `for_each`
+ /// might be preferable to keep a functional style with longer iterators:
+ ///
+ /// ```
+ /// (0..5).flat_map(|x| x * 100 .. x * 110)
+ /// .enumerate()
+ /// .filter(|&(i, x)| (i + x) % 3 == 0)
+ /// .for_each(|(i, x)| println!("{i}:{x}"));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_for_each", since = "1.21.0")]
+ fn for_each<F>(self, f: F)
+ where
+ Self: Sized,
+ F: FnMut(Self::Item),
+ {
+ #[inline]
+ fn call<T>(mut f: impl FnMut(T)) -> impl FnMut((), T) {
+ move |(), item| f(item)
+ }
+
+ self.fold((), call(f));
+ }
+
+ /// Creates an iterator which uses a closure to determine if an element
+ /// should be yielded.
+ ///
+ /// Given an element the closure must return `true` or `false`. The returned
+ /// iterator will yield only the elements for which the closure returns
+ /// true.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [0i32, 1, 2];
+ ///
+ /// let mut iter = a.iter().filter(|x| x.is_positive());
+ ///
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Because the closure passed to `filter()` takes a reference, and many
+ /// iterators iterate over references, this leads to a possibly confusing
+ /// situation, where the type of the closure is a double reference:
+ ///
+ /// ```
+ /// let a = [0, 1, 2];
+ ///
+ /// let mut iter = a.iter().filter(|x| **x > 1); // need two *s!
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// It's common to instead use destructuring on the argument to strip away
+ /// one:
+ ///
+ /// ```
+ /// let a = [0, 1, 2];
+ ///
+ /// let mut iter = a.iter().filter(|&x| *x > 1); // both & and *
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// or both:
+ ///
+ /// ```
+ /// let a = [0, 1, 2];
+ ///
+ /// let mut iter = a.iter().filter(|&&x| x > 1); // two &s
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// of these layers.
+ ///
+ /// Note that `iter.filter(f).next()` is equivalent to `iter.find(f)`.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn filter<P>(self, predicate: P) -> Filter<Self, P>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ Filter::new(self, predicate)
+ }
+
+ /// Creates an iterator that both filters and maps.
+ ///
+ /// The returned iterator yields only the `value`s for which the supplied
+ /// closure returns `Some(value)`.
+ ///
+ /// `filter_map` can be used to make chains of [`filter`] and [`map`] more
+ /// concise. The example below shows how a `map().filter().map()` can be
+ /// shortened to a single call to `filter_map`.
+ ///
+ /// [`filter`]: Iterator::filter
+ /// [`map`]: Iterator::map
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = ["1", "two", "NaN", "four", "5"];
+ ///
+ /// let mut iter = a.iter().filter_map(|s| s.parse().ok());
+ ///
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(5));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Here's the same example, but with [`filter`] and [`map`]:
+ ///
+ /// ```
+ /// let a = ["1", "two", "NaN", "four", "5"];
+ /// let mut iter = a.iter().map(|s| s.parse()).filter(|s| s.is_ok()).map(|s| s.unwrap());
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(5));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> Option<B>,
+ {
+ FilterMap::new(self, f)
+ }
+
+ /// Creates an iterator which gives the current iteration count as well as
+ /// the next value.
+ ///
+ /// The iterator returned yields pairs `(i, val)`, where `i` is the
+ /// current index of iteration and `val` is the value returned by the
+ /// iterator.
+ ///
+ /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a
+ /// different sized integer, the [`zip`] function provides similar
+ /// functionality.
+ ///
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so enumerating more than
+ /// [`usize::MAX`] elements either produces the wrong result or panics. If
+ /// debug assertions are enabled, a panic is guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// The returned iterator might panic if the to-be-returned index would
+ /// overflow a [`usize`].
+ ///
+ /// [`zip`]: Iterator::zip
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = ['a', 'b', 'c'];
+ ///
+ /// let mut iter = a.iter().enumerate();
+ ///
+ /// assert_eq!(iter.next(), Some((0, &'a')));
+ /// assert_eq!(iter.next(), Some((1, &'b')));
+ /// assert_eq!(iter.next(), Some((2, &'c')));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn enumerate(self) -> Enumerate<Self>
+ where
+ Self: Sized,
+ {
+ Enumerate::new(self)
+ }
+
+ /// Creates an iterator which can use the [`peek`] and [`peek_mut`] methods
+ /// to look at the next element of the iterator without consuming it. See
+ /// their documentation for more information.
+ ///
+ /// Note that the underlying iterator is still advanced when [`peek`] or
+ /// [`peek_mut`] are called for the first time: In order to retrieve the
+ /// next element, [`next`] is called on the underlying iterator, hence any
+ /// side effects (i.e. anything other than fetching the next value) of
+ /// the [`next`] method will occur.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let xs = [1, 2, 3];
+ ///
+ /// let mut iter = xs.iter().peekable();
+ ///
+ /// // peek() lets us see into the future
+ /// assert_eq!(iter.peek(), Some(&&1));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ ///
+ /// // we can peek() multiple times, the iterator won't advance
+ /// assert_eq!(iter.peek(), Some(&&3));
+ /// assert_eq!(iter.peek(), Some(&&3));
+ ///
+ /// assert_eq!(iter.next(), Some(&3));
+ ///
+ /// // after the iterator is finished, so is peek()
+ /// assert_eq!(iter.peek(), None);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Using [`peek_mut`] to mutate the next item without advancing the
+ /// iterator:
+ ///
+ /// ```
+ /// let xs = [1, 2, 3];
+ ///
+ /// let mut iter = xs.iter().peekable();
+ ///
+ /// // `peek_mut()` lets us see into the future
+ /// assert_eq!(iter.peek_mut(), Some(&mut &1));
+ /// assert_eq!(iter.peek_mut(), Some(&mut &1));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// if let Some(mut p) = iter.peek_mut() {
+ /// assert_eq!(*p, &2);
+ /// // put a value into the iterator
+ /// *p = &1000;
+ /// }
+ ///
+ /// // The value reappears as the iterator continues
+ /// assert_eq!(iter.collect::<Vec<_>>(), vec![&1000, &3]);
+ /// ```
+ /// [`peek`]: Peekable::peek
+ /// [`peek_mut`]: Peekable::peek_mut
+ /// [`next`]: Iterator::next
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn peekable(self) -> Peekable<Self>
+ where
+ Self: Sized,
+ {
+ Peekable::new(self)
+ }
+
+ /// Creates an iterator that [`skip`]s elements based on a predicate.
+ ///
+ /// [`skip`]: Iterator::skip
+ ///
+ /// `skip_while()` takes a closure as an argument. It will call this
+ /// closure on each element of the iterator, and ignore elements
+ /// until it returns `false`.
+ ///
+ /// After `false` is returned, `skip_while()`'s job is over, and the
+ /// rest of the elements are yielded.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [-1i32, 0, 1];
+ ///
+ /// let mut iter = a.iter().skip_while(|x| x.is_negative());
+ ///
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Because the closure passed to `skip_while()` takes a reference, and many
+ /// iterators iterate over references, this leads to a possibly confusing
+ /// situation, where the type of the closure argument is a double reference:
+ ///
+ /// ```
+ /// let a = [-1, 0, 1];
+ ///
+ /// let mut iter = a.iter().skip_while(|x| **x < 0); // need two *s!
+ ///
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Stopping after an initial `false`:
+ ///
+ /// ```
+ /// let a = [-1, 0, 1, -2];
+ ///
+ /// let mut iter = a.iter().skip_while(|x| **x < 0);
+ ///
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// // while this would have been false, since we already got a false,
+ /// // skip_while() isn't used any more
+ /// assert_eq!(iter.next(), Some(&-2));
+ ///
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[doc(alias = "drop_while")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ SkipWhile::new(self, predicate)
+ }
+
+ /// Creates an iterator that yields elements based on a predicate.
+ ///
+ /// `take_while()` takes a closure as an argument. It will call this
+ /// closure on each element of the iterator, and yield elements
+ /// while it returns `true`.
+ ///
+ /// After `false` is returned, `take_while()`'s job is over, and the
+ /// rest of the elements are ignored.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [-1i32, 0, 1];
+ ///
+ /// let mut iter = a.iter().take_while(|x| x.is_negative());
+ ///
+ /// assert_eq!(iter.next(), Some(&-1));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Because the closure passed to `take_while()` takes a reference, and many
+ /// iterators iterate over references, this leads to a possibly confusing
+ /// situation, where the type of the closure is a double reference:
+ ///
+ /// ```
+ /// let a = [-1, 0, 1];
+ ///
+ /// let mut iter = a.iter().take_while(|x| **x < 0); // need two *s!
+ ///
+ /// assert_eq!(iter.next(), Some(&-1));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Stopping after an initial `false`:
+ ///
+ /// ```
+ /// let a = [-1, 0, 1, -2];
+ ///
+ /// let mut iter = a.iter().take_while(|x| **x < 0);
+ ///
+ /// assert_eq!(iter.next(), Some(&-1));
+ ///
+ /// // We have more elements that are less than zero, but since we already
+ /// // got a false, take_while() isn't used any more
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Because `take_while()` needs to look at the value in order to see if it
+ /// should be included or not, consuming iterators will see that it is
+ /// removed:
+ ///
+ /// ```
+ /// let a = [1, 2, 3, 4];
+ /// let mut iter = a.iter();
+ ///
+ /// let result: Vec<i32> = iter.by_ref()
+ /// .take_while(|n| **n != 3)
+ /// .cloned()
+ /// .collect();
+ ///
+ /// assert_eq!(result, &[1, 2]);
+ ///
+ /// let result: Vec<i32> = iter.cloned().collect();
+ ///
+ /// assert_eq!(result, &[4]);
+ /// ```
+ ///
+ /// The `3` is no longer there, because it was consumed in order to see if
+ /// the iteration should stop, but wasn't placed back into the iterator.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ TakeWhile::new(self, predicate)
+ }
+
+ /// Creates an iterator that both yields elements based on a predicate and maps.
+ ///
+ /// `map_while()` takes a closure as an argument. It will call this
+ /// closure on each element of the iterator, and yield elements
+ /// while it returns [`Some(_)`][`Some`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [-1i32, 4, 0, 1];
+ ///
+ /// let mut iter = a.iter().map_while(|x| 16i32.checked_div(*x));
+ ///
+ /// assert_eq!(iter.next(), Some(-16));
+ /// assert_eq!(iter.next(), Some(4));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Here's the same example, but with [`take_while`] and [`map`]:
+ ///
+ /// [`take_while`]: Iterator::take_while
+ /// [`map`]: Iterator::map
+ ///
+ /// ```
+ /// let a = [-1i32, 4, 0, 1];
+ ///
+ /// let mut iter = a.iter()
+ /// .map(|x| 16i32.checked_div(*x))
+ /// .take_while(|x| x.is_some())
+ /// .map(|x| x.unwrap());
+ ///
+ /// assert_eq!(iter.next(), Some(-16));
+ /// assert_eq!(iter.next(), Some(4));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Stopping after an initial [`None`]:
+ ///
+ /// ```
+ /// let a = [0, 1, 2, -3, 4, 5, -6];
+ ///
+ /// let iter = a.iter().map_while(|x| u32::try_from(*x).ok());
+ /// let vec = iter.collect::<Vec<_>>();
+ ///
+ /// // We have more elements which could fit in u32 (4, 5), but `map_while` returned `None` for `-3`
+ /// // (as the `predicate` returned `None`) and `collect` stops at the first `None` encountered.
+ /// assert_eq!(vec, vec![0, 1, 2]);
+ /// ```
+ ///
+ /// Because `map_while()` needs to look at the value in order to see if it
+ /// should be included or not, consuming iterators will see that it is
+ /// removed:
+ ///
+ /// ```
+ /// let a = [1, 2, -3, 4];
+ /// let mut iter = a.iter();
+ ///
+ /// let result: Vec<u32> = iter.by_ref()
+ /// .map_while(|n| u32::try_from(*n).ok())
+ /// .collect();
+ ///
+ /// assert_eq!(result, &[1, 2]);
+ ///
+ /// let result: Vec<i32> = iter.cloned().collect();
+ ///
+ /// assert_eq!(result, &[4]);
+ /// ```
+ ///
+ /// The `-3` is no longer there, because it was consumed in order to see if
+ /// the iteration should stop, but wasn't placed back into the iterator.
+ ///
+ /// Note that unlike [`take_while`] this iterator is **not** fused.
+ /// It is also not specified what this iterator returns after the first [`None`] is returned.
+ /// If you need fused iterator, use [`fuse`].
+ ///
+ /// [`fuse`]: Iterator::fuse
+ #[inline]
+ #[stable(feature = "iter_map_while", since = "1.57.0")]
+ fn map_while<B, P>(self, predicate: P) -> MapWhile<Self, P>
+ where
+ Self: Sized,
+ P: FnMut(Self::Item) -> Option<B>,
+ {
+ MapWhile::new(self, predicate)
+ }
+
+ /// Creates an iterator that skips the first `n` elements.
+ ///
+ /// `skip(n)` skips elements until `n` elements are skipped or the end of the
+ /// iterator is reached (whichever happens first). After that, all the remaining
+ /// elements are yielded. In particular, if the original iterator is too short,
+ /// then the returned iterator is empty.
+ ///
+ /// Rather than overriding this method directly, instead override the `nth` method.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().skip(2);
+ ///
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn skip(self, n: usize) -> Skip<Self>
+ where
+ Self: Sized,
+ {
+ Skip::new(self, n)
+ }
+
+ /// Creates an iterator that yields the first `n` elements, or fewer
+ /// if the underlying iterator ends sooner.
+ ///
+ /// `take(n)` yields elements until `n` elements are yielded or the end of
+ /// the iterator is reached (whichever happens first).
+ /// The returned iterator is a prefix of length `n` if the original iterator
+ /// contains at least `n` elements, otherwise it contains all of the
+ /// (fewer than `n`) elements of the original iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().take(2);
+ ///
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// `take()` is often used with an infinite iterator, to make it finite:
+ ///
+ /// ```
+ /// let mut iter = (0..).take(3);
+ ///
+ /// assert_eq!(iter.next(), Some(0));
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// If less than `n` elements are available,
+ /// `take` will limit itself to the size of the underlying iterator:
+ ///
+ /// ```
+ /// let v = [1, 2];
+ /// let mut iter = v.into_iter().take(5);
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn take(self, n: usize) -> Take<Self>
+ where
+ Self: Sized,
+ {
+ Take::new(self, n)
+ }
+
+ /// An iterator adapter similar to [`fold`] that holds internal state and
+ /// produces a new iterator.
+ ///
+ /// [`fold`]: Iterator::fold
+ ///
+ /// `scan()` takes two arguments: an initial value which seeds the internal
+ /// state, and a closure with two arguments, the first being a mutable
+ /// reference to the internal state and the second an iterator element.
+ /// The closure can assign to the internal state to share state between
+ /// iterations.
+ ///
+ /// On iteration, the closure will be applied to each element of the
+ /// iterator and the return value from the closure, an [`Option`], is
+ /// yielded by the iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().scan(1, |state, &x| {
+ /// // each iteration, we'll multiply the state by the element
+ /// *state = *state * x;
+ ///
+ /// // then, we'll yield the negation of the state
+ /// Some(-*state)
+ /// });
+ ///
+ /// assert_eq!(iter.next(), Some(-1));
+ /// assert_eq!(iter.next(), Some(-2));
+ /// assert_eq!(iter.next(), Some(-6));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F>
+ where
+ Self: Sized,
+ F: FnMut(&mut St, Self::Item) -> Option<B>,
+ {
+ Scan::new(self, initial_state, f)
+ }
+
+ /// Creates an iterator that works like map, but flattens nested structure.
+ ///
+ /// The [`map`] adapter is very useful, but only when the closure
+ /// argument produces values. If it produces an iterator instead, there's
+ /// an extra layer of indirection. `flat_map()` will remove this extra layer
+ /// on its own.
+ ///
+ /// You can think of `flat_map(f)` as the semantic equivalent
+ /// of [`map`]ping, and then [`flatten`]ing as in `map(f).flatten()`.
+ ///
+ /// Another way of thinking about `flat_map()`: [`map`]'s closure returns
+ /// one item for each element, and `flat_map()`'s closure returns an
+ /// iterator for each element.
+ ///
+ /// [`map`]: Iterator::map
+ /// [`flatten`]: Iterator::flatten
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let words = ["alpha", "beta", "gamma"];
+ ///
+ /// // chars() returns an iterator
+ /// let merged: String = words.iter()
+ /// .flat_map(|s| s.chars())
+ /// .collect();
+ /// assert_eq!(merged, "alphabetagamma");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F>
+ where
+ Self: Sized,
+ U: IntoIterator,
+ F: FnMut(Self::Item) -> U,
+ {
+ FlatMap::new(self, f)
+ }
+
+ /// Creates an iterator that flattens nested structure.
+ ///
+ /// This is useful when you have an iterator of iterators or an iterator of
+ /// things that can be turned into iterators and you want to remove one
+ /// level of indirection.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let data = vec![vec![1, 2, 3, 4], vec![5, 6]];
+ /// let flattened = data.into_iter().flatten().collect::<Vec<u8>>();
+ /// assert_eq!(flattened, &[1, 2, 3, 4, 5, 6]);
+ /// ```
+ ///
+ /// Mapping and then flattening:
+ ///
+ /// ```
+ /// let words = ["alpha", "beta", "gamma"];
+ ///
+ /// // chars() returns an iterator
+ /// let merged: String = words.iter()
+ /// .map(|s| s.chars())
+ /// .flatten()
+ /// .collect();
+ /// assert_eq!(merged, "alphabetagamma");
+ /// ```
+ ///
+ /// You can also rewrite this in terms of [`flat_map()`], which is preferable
+ /// in this case since it conveys intent more clearly:
+ ///
+ /// ```
+ /// let words = ["alpha", "beta", "gamma"];
+ ///
+ /// // chars() returns an iterator
+ /// let merged: String = words.iter()
+ /// .flat_map(|s| s.chars())
+ /// .collect();
+ /// assert_eq!(merged, "alphabetagamma");
+ /// ```
+ ///
+ /// Flattening only removes one level of nesting at a time:
+ ///
+ /// ```
+ /// let d3 = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]];
+ ///
+ /// let d2 = d3.iter().flatten().collect::<Vec<_>>();
+ /// assert_eq!(d2, [&[1, 2], &[3, 4], &[5, 6], &[7, 8]]);
+ ///
+ /// let d1 = d3.iter().flatten().flatten().collect::<Vec<_>>();
+ /// assert_eq!(d1, [&1, &2, &3, &4, &5, &6, &7, &8]);
+ /// ```
+ ///
+ /// Here we see that `flatten()` does not perform a "deep" flatten.
+ /// Instead, only one level of nesting is removed. That is, if you
+ /// `flatten()` a three-dimensional array, the result will be
+ /// two-dimensional and not one-dimensional. To get a one-dimensional
+ /// structure, you have to `flatten()` again.
+ ///
+ /// [`flat_map()`]: Iterator::flat_map
+ #[inline]
+ #[stable(feature = "iterator_flatten", since = "1.29.0")]
+ fn flatten(self) -> Flatten<Self>
+ where
+ Self: Sized,
+ Self::Item: IntoIterator,
+ {
+ Flatten::new(self)
+ }
+
+ /// Creates an iterator which ends after the first [`None`].
+ ///
+ /// After an iterator returns [`None`], future calls may or may not yield
+ /// [`Some(T)`] again. `fuse()` adapts an iterator, ensuring that after a
+ /// [`None`] is given, it will always return [`None`] forever.
+ ///
+ /// Note that the [`Fuse`] wrapper is a no-op on iterators that implement
+ /// the [`FusedIterator`] trait. `fuse()` may therefore behave incorrectly
+ /// if the [`FusedIterator`] trait is improperly implemented.
+ ///
+ /// [`Some(T)`]: Some
+ /// [`FusedIterator`]: crate::iter::FusedIterator
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // an iterator which alternates between Some and None
+ /// struct Alternate {
+ /// state: i32,
+ /// }
+ ///
+ /// impl Iterator for Alternate {
+ /// type Item = i32;
+ ///
+ /// fn next(&mut self) -> Option<i32> {
+ /// let val = self.state;
+ /// self.state = self.state + 1;
+ ///
+ /// // if it's even, Some(i32), else None
+ /// if val % 2 == 0 {
+ /// Some(val)
+ /// } else {
+ /// None
+ /// }
+ /// }
+ /// }
+ ///
+ /// let mut iter = Alternate { state: 0 };
+ ///
+ /// // we can see our iterator going back and forth
+ /// assert_eq!(iter.next(), Some(0));
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), None);
+ ///
+ /// // however, once we fuse it...
+ /// let mut iter = iter.fuse();
+ ///
+ /// assert_eq!(iter.next(), Some(4));
+ /// assert_eq!(iter.next(), None);
+ ///
+ /// // it will always return `None` after the first time.
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fuse(self) -> Fuse<Self>
+ where
+ Self: Sized,
+ {
+ Fuse::new(self)
+ }
+
+ /// Does something with each element of an iterator, passing the value on.
+ ///
+ /// When using iterators, you'll often chain several of them together.
+ /// While working on such code, you might want to check out what's
+ /// happening at various parts in the pipeline. To do that, insert
+ /// a call to `inspect()`.
+ ///
+ /// It's more common for `inspect()` to be used as a debugging tool than to
+ /// exist in your final code, but applications may find it useful in certain
+ /// situations when errors need to be logged before being discarded.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 4, 2, 3];
+ ///
+ /// // this iterator sequence is complex.
+ /// let sum = a.iter()
+ /// .cloned()
+ /// .filter(|x| x % 2 == 0)
+ /// .fold(0, |sum, i| sum + i);
+ ///
+ /// println!("{sum}");
+ ///
+ /// // let's add some inspect() calls to investigate what's happening
+ /// let sum = a.iter()
+ /// .cloned()
+ /// .inspect(|x| println!("about to filter: {x}"))
+ /// .filter(|x| x % 2 == 0)
+ /// .inspect(|x| println!("made it through filter: {x}"))
+ /// .fold(0, |sum, i| sum + i);
+ ///
+ /// println!("{sum}");
+ /// ```
+ ///
+ /// This will print:
+ ///
+ /// ```text
+ /// 6
+ /// about to filter: 1
+ /// about to filter: 4
+ /// made it through filter: 4
+ /// about to filter: 2
+ /// made it through filter: 2
+ /// about to filter: 3
+ /// 6
+ /// ```
+ ///
+ /// Logging errors before discarding them:
+ ///
+ /// ```
+ /// let lines = ["1", "2", "a"];
+ ///
+ /// let sum: i32 = lines
+ /// .iter()
+ /// .map(|line| line.parse::<i32>())
+ /// .inspect(|num| {
+ /// if let Err(ref e) = *num {
+ /// println!("Parsing error: {e}");
+ /// }
+ /// })
+ /// .filter_map(Result::ok)
+ /// .sum();
+ ///
+ /// println!("Sum: {sum}");
+ /// ```
+ ///
+ /// This will print:
+ ///
+ /// ```text
+ /// Parsing error: invalid digit found in string
+ /// Sum: 3
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn inspect<F>(self, f: F) -> Inspect<Self, F>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item),
+ {
+ Inspect::new(self, f)
+ }
+
+ /// Borrows an iterator, rather than consuming it.
+ ///
+ /// This is useful to allow applying iterator adapters while still
+ /// retaining ownership of the original iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut words = ["hello", "world", "of", "Rust"].into_iter();
+ ///
+ /// // Take the first two words.
+ /// let hello_world: Vec<_> = words.by_ref().take(2).collect();
+ /// assert_eq!(hello_world, vec!["hello", "world"]);
+ ///
+ /// // Collect the rest of the words.
+ /// // We can only do this because we used `by_ref` earlier.
+ /// let of_rust: Vec<_> = words.collect();
+ /// assert_eq!(of_rust, vec!["of", "Rust"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn by_ref(&mut self) -> &mut Self
+ where
+ Self: Sized,
+ {
+ self
+ }
+
+ /// Transforms an iterator into a collection.
+ ///
+ /// `collect()` can take anything iterable, and turn it into a relevant
+ /// collection. This is one of the more powerful methods in the standard
+ /// library, used in a variety of contexts.
+ ///
+ /// The most basic pattern in which `collect()` is used is to turn one
+ /// collection into another. You take a collection, call [`iter`] on it,
+ /// do a bunch of transformations, and then `collect()` at the end.
+ ///
+ /// `collect()` can also create instances of types that are not typical
+ /// collections. For example, a [`String`] can be built from [`char`]s,
+ /// and an iterator of [`Result<T, E>`][`Result`] items can be collected
+ /// into `Result<Collection<T>, E>`. See the examples below for more.
+ ///
+ /// Because `collect()` is so general, it can cause problems with type
+ /// inference. As such, `collect()` is one of the few times you'll see
+ /// the syntax affectionately known as the 'turbofish': `::<>`. This
+ /// helps the inference algorithm understand specifically which collection
+ /// you're trying to collect into.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let doubled: Vec<i32> = a.iter()
+ /// .map(|&x| x * 2)
+ /// .collect();
+ ///
+ /// assert_eq!(vec![2, 4, 6], doubled);
+ /// ```
+ ///
+ /// Note that we needed the `: Vec<i32>` on the left-hand side. This is because
+ /// we could collect into, for example, a [`VecDeque<T>`] instead:
+ ///
+ /// [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let a = [1, 2, 3];
+ ///
+ /// let doubled: VecDeque<i32> = a.iter().map(|&x| x * 2).collect();
+ ///
+ /// assert_eq!(2, doubled[0]);
+ /// assert_eq!(4, doubled[1]);
+ /// assert_eq!(6, doubled[2]);
+ /// ```
+ ///
+ /// Using the 'turbofish' instead of annotating `doubled`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<i32>>();
+ ///
+ /// assert_eq!(vec![2, 4, 6], doubled);
+ /// ```
+ ///
+ /// Because `collect()` only cares about what you're collecting into, you can
+ /// still use a partial type hint, `_`, with the turbofish:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<_>>();
+ ///
+ /// assert_eq!(vec![2, 4, 6], doubled);
+ /// ```
+ ///
+ /// Using `collect()` to make a [`String`]:
+ ///
+ /// ```
+ /// let chars = ['g', 'd', 'k', 'k', 'n'];
+ ///
+ /// let hello: String = chars.iter()
+ /// .map(|&x| x as u8)
+ /// .map(|x| (x + 1) as char)
+ /// .collect();
+ ///
+ /// assert_eq!("hello", hello);
+ /// ```
+ ///
+ /// If you have a list of [`Result<T, E>`][`Result`]s, you can use `collect()` to
+ /// see if any of them failed:
+ ///
+ /// ```
+ /// let results = [Ok(1), Err("nope"), Ok(3), Err("bad")];
+ ///
+ /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect();
+ ///
+ /// // gives us the first error
+ /// assert_eq!(Err("nope"), result);
+ ///
+ /// let results = [Ok(1), Ok(3)];
+ ///
+ /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect();
+ ///
+ /// // gives us the list of answers
+ /// assert_eq!(Ok(vec![1, 3]), result);
+ /// ```
+ ///
+ /// [`iter`]: Iterator::next
+ /// [`String`]: ../../std/string/struct.String.html
+ /// [`char`]: type@char
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"]
+ fn collect<B: FromIterator<Self::Item>>(self) -> B
+ where
+ Self: Sized,
+ {
+ FromIterator::from_iter(self)
+ }
+
+ /// Fallibly transforms an iterator into a collection, short circuiting if
+ /// a failure is encountered.
+ ///
+ /// `try_collect()` is a variation of [`collect()`][`collect`] that allows fallible
+ /// conversions during collection. Its main use case is simplifying conversions from
+ /// iterators yielding [`Option<T>`][`Option`] into `Option<Collection<T>>`, or similarly for other [`Try`]
+ /// types (e.g. [`Result`]).
+ ///
+ /// Importantly, `try_collect()` doesn't require that the outer [`Try`] type also implements [`FromIterator`];
+ /// only the inner type produced on `Try::Output` must implement it. Concretely,
+ /// this means that collecting into `ControlFlow<_, Vec<i32>>` is valid because `Vec<i32>` implements
+ /// [`FromIterator`], even though [`ControlFlow`] doesn't.
+ ///
+ /// Also, if a failure is encountered during `try_collect()`, the iterator is still valid and
+ /// may continue to be used, in which case it will continue iterating starting after the element that
+ /// triggered the failure. See the last example below for an example of how this works.
+ ///
+ /// # Examples
+ /// Successfully collecting an iterator of `Option<i32>` into `Option<Vec<i32>>`:
+ /// ```
+ /// #![feature(iterator_try_collect)]
+ ///
+ /// let u = vec![Some(1), Some(2), Some(3)];
+ /// let v = u.into_iter().try_collect::<Vec<i32>>();
+ /// assert_eq!(v, Some(vec![1, 2, 3]));
+ /// ```
+ ///
+ /// Failing to collect in the same way:
+ /// ```
+ /// #![feature(iterator_try_collect)]
+ ///
+ /// let u = vec![Some(1), Some(2), None, Some(3)];
+ /// let v = u.into_iter().try_collect::<Vec<i32>>();
+ /// assert_eq!(v, None);
+ /// ```
+ ///
+ /// A similar example, but with `Result`:
+ /// ```
+ /// #![feature(iterator_try_collect)]
+ ///
+ /// let u: Vec<Result<i32, ()>> = vec![Ok(1), Ok(2), Ok(3)];
+ /// let v = u.into_iter().try_collect::<Vec<i32>>();
+ /// assert_eq!(v, Ok(vec![1, 2, 3]));
+ ///
+ /// let u = vec![Ok(1), Ok(2), Err(()), Ok(3)];
+ /// let v = u.into_iter().try_collect::<Vec<i32>>();
+ /// assert_eq!(v, Err(()));
+ /// ```
+ ///
+ /// Finally, even [`ControlFlow`] works, despite the fact that it
+ /// doesn't implement [`FromIterator`]. Note also that the iterator can
+ /// continue to be used, even if a failure is encountered:
+ ///
+ /// ```
+ /// #![feature(iterator_try_collect)]
+ ///
+ /// use core::ops::ControlFlow::{Break, Continue};
+ ///
+ /// let u = [Continue(1), Continue(2), Break(3), Continue(4), Continue(5)];
+ /// let mut it = u.into_iter();
+ ///
+ /// let v = it.try_collect::<Vec<_>>();
+ /// assert_eq!(v, Break(3));
+ ///
+ /// let v = it.try_collect::<Vec<_>>();
+ /// assert_eq!(v, Continue(vec![4, 5]));
+ /// ```
+ ///
+ /// [`collect`]: Iterator::collect
+ #[inline]
+ #[unstable(feature = "iterator_try_collect", issue = "94047")]
+ fn try_collect<B>(&mut self) -> ChangeOutputType<Self::Item, B>
+ where
+ Self: Sized,
+ <Self as Iterator>::Item: Try,
+ <<Self as Iterator>::Item as Try>::Residual: Residual<B>,
+ B: FromIterator<<Self::Item as Try>::Output>,
+ {
+ try_process(ByRefSized(self), |i| i.collect())
+ }
+
+ /// Collects all the items from an iterator into a collection.
+ ///
+ /// This method consumes the iterator and adds all its items to the
+ /// passed collection. The collection is then returned, so the call chain
+ /// can be continued.
+ ///
+ /// This is useful when you already have a collection and wants to add
+ /// the iterator items to it.
+ ///
+ /// This method is a convenience method to call [Extend::extend](trait.Extend.html),
+ /// but instead of being called on a collection, it's called on an iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_collect_into)]
+ ///
+ /// let a = [1, 2, 3];
+ /// let mut vec: Vec::<i32> = vec![0, 1];
+ ///
+ /// a.iter().map(|&x| x * 2).collect_into(&mut vec);
+ /// a.iter().map(|&x| x * 10).collect_into(&mut vec);
+ ///
+ /// assert_eq!(vec![0, 1, 2, 4, 6, 10, 20, 30], vec);
+ /// ```
+ ///
+ /// `Vec` can have a manual set capacity to avoid reallocating it:
+ ///
+ /// ```
+ /// #![feature(iter_collect_into)]
+ ///
+ /// let a = [1, 2, 3];
+ /// let mut vec: Vec::<i32> = Vec::with_capacity(6);
+ ///
+ /// a.iter().map(|&x| x * 2).collect_into(&mut vec);
+ /// a.iter().map(|&x| x * 10).collect_into(&mut vec);
+ ///
+ /// assert_eq!(6, vec.capacity());
+ /// println!("{:?}", vec);
+ /// ```
+ ///
+ /// The returned mutable reference can be used to continue the call chain:
+ ///
+ /// ```
+ /// #![feature(iter_collect_into)]
+ ///
+ /// let a = [1, 2, 3];
+ /// let mut vec: Vec::<i32> = Vec::with_capacity(6);
+ ///
+ /// let count = a.iter().collect_into(&mut vec).iter().count();
+ ///
+ /// assert_eq!(count, vec.len());
+ /// println!("Vec len is {}", count);
+ ///
+ /// let count = a.iter().collect_into(&mut vec).iter().count();
+ ///
+ /// assert_eq!(count, vec.len());
+ /// println!("Vec len now is {}", count);
+ /// ```
+ #[inline]
+ #[unstable(feature = "iter_collect_into", reason = "new API", issue = "94780")]
+ fn collect_into<E: Extend<Self::Item>>(self, collection: &mut E) -> &mut E
+ where
+ Self: Sized,
+ {
+ collection.extend(self);
+ collection
+ }
+
+ /// Consumes an iterator, creating two collections from it.
+ ///
+ /// The predicate passed to `partition()` can return `true`, or `false`.
+ /// `partition()` returns a pair, all of the elements for which it returned
+ /// `true`, and all of the elements for which it returned `false`.
+ ///
+ /// See also [`is_partitioned()`] and [`partition_in_place()`].
+ ///
+ /// [`is_partitioned()`]: Iterator::is_partitioned
+ /// [`partition_in_place()`]: Iterator::partition_in_place
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let (even, odd): (Vec<_>, Vec<_>) = a
+ /// .into_iter()
+ /// .partition(|n| n % 2 == 0);
+ ///
+ /// assert_eq!(even, vec![2]);
+ /// assert_eq!(odd, vec![1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn partition<B, F>(self, f: F) -> (B, B)
+ where
+ Self: Sized,
+ B: Default + Extend<Self::Item>,
+ F: FnMut(&Self::Item) -> bool,
+ {
+ #[inline]
+ fn extend<'a, T, B: Extend<T>>(
+ mut f: impl FnMut(&T) -> bool + 'a,
+ left: &'a mut B,
+ right: &'a mut B,
+ ) -> impl FnMut((), T) + 'a {
+ move |(), x| {
+ if f(&x) {
+ left.extend_one(x);
+ } else {
+ right.extend_one(x);
+ }
+ }
+ }
+
+ let mut left: B = Default::default();
+ let mut right: B = Default::default();
+
+ self.fold((), extend(f, &mut left, &mut right));
+
+ (left, right)
+ }
+
+ /// Reorders the elements of this iterator *in-place* according to the given predicate,
+ /// such that all those that return `true` precede all those that return `false`.
+ /// Returns the number of `true` elements found.
+ ///
+ /// The relative order of partitioned items is not maintained.
+ ///
+ /// # Current implementation
+ ///
+ /// Current algorithms tries finding the first element for which the predicate evaluates
+ /// to false, and the last element for which it evaluates to true and repeatedly swaps them.
+ ///
+ /// Time complexity: *O*(*n*)
+ ///
+ /// See also [`is_partitioned()`] and [`partition()`].
+ ///
+ /// [`is_partitioned()`]: Iterator::is_partitioned
+ /// [`partition()`]: Iterator::partition
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(iter_partition_in_place)]
+ ///
+ /// let mut a = [1, 2, 3, 4, 5, 6, 7];
+ ///
+ /// // Partition in-place between evens and odds
+ /// let i = a.iter_mut().partition_in_place(|&n| n % 2 == 0);
+ ///
+ /// assert_eq!(i, 3);
+ /// assert!(a[..i].iter().all(|&n| n % 2 == 0)); // evens
+ /// assert!(a[i..].iter().all(|&n| n % 2 == 1)); // odds
+ /// ```
+ #[unstable(feature = "iter_partition_in_place", reason = "new API", issue = "62543")]
+ fn partition_in_place<'a, T: 'a, P>(mut self, ref mut predicate: P) -> usize
+ where
+ Self: Sized + DoubleEndedIterator<Item = &'a mut T>,
+ P: FnMut(&T) -> bool,
+ {
+ // FIXME: should we worry about the count overflowing? The only way to have more than
+ // `usize::MAX` mutable references is with ZSTs, which aren't useful to partition...
+
+ // These closure "factory" functions exist to avoid genericity in `Self`.
+
+ #[inline]
+ fn is_false<'a, T>(
+ predicate: &'a mut impl FnMut(&T) -> bool,
+ true_count: &'a mut usize,
+ ) -> impl FnMut(&&mut T) -> bool + 'a {
+ move |x| {
+ let p = predicate(&**x);
+ *true_count += p as usize;
+ !p
+ }
+ }
+
+ #[inline]
+ fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ {
+ move |x| predicate(&**x)
+ }
+
+ // Repeatedly find the first `false` and swap it with the last `true`.
+ let mut true_count = 0;
+ while let Some(head) = self.find(is_false(predicate, &mut true_count)) {
+ if let Some(tail) = self.rfind(is_true(predicate)) {
+ crate::mem::swap(head, tail);
+ true_count += 1;
+ } else {
+ break;
+ }
+ }
+ true_count
+ }
+
+ /// Checks if the elements of this iterator are partitioned according to the given predicate,
+ /// such that all those that return `true` precede all those that return `false`.
+ ///
+ /// See also [`partition()`] and [`partition_in_place()`].
+ ///
+ /// [`partition()`]: Iterator::partition
+ /// [`partition_in_place()`]: Iterator::partition_in_place
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(iter_is_partitioned)]
+ ///
+ /// assert!("Iterator".chars().is_partitioned(char::is_uppercase));
+ /// assert!(!"IntoIterator".chars().is_partitioned(char::is_uppercase));
+ /// ```
+ #[unstable(feature = "iter_is_partitioned", reason = "new API", issue = "62544")]
+ fn is_partitioned<P>(mut self, mut predicate: P) -> bool
+ where
+ Self: Sized,
+ P: FnMut(Self::Item) -> bool,
+ {
+ // Either all items test `true`, or the first clause stops at `false`
+ // and we check that there are no more `true` items after that.
+ self.all(&mut predicate) || !self.any(predicate)
+ }
+
+ /// An iterator method that applies a function as long as it returns
+ /// successfully, producing a single, final value.
+ ///
+ /// `try_fold()` takes two arguments: an initial value, and a closure with
+ /// two arguments: an 'accumulator', and an element. The closure either
+ /// returns successfully, with the value that the accumulator should have
+ /// for the next iteration, or it returns failure, with an error value that
+ /// is propagated back to the caller immediately (short-circuiting).
+ ///
+ /// The initial value is the value the accumulator will have on the first
+ /// call. If applying the closure succeeded against every element of the
+ /// iterator, `try_fold()` returns the final accumulator as success.
+ ///
+ /// Folding is useful whenever you have a collection of something, and want
+ /// to produce a single value from it.
+ ///
+ /// # Note to Implementors
+ ///
+ /// Several of the other (forward) methods have default implementations in
+ /// terms of this one, so try to implement this explicitly if it can
+ /// do something better than the default `for` loop implementation.
+ ///
+ /// In particular, try to have this call `try_fold()` on the internal parts
+ /// from which this iterator is composed. If multiple calls are needed,
+ /// the `?` operator may be convenient for chaining the accumulator value
+ /// along, but beware any invariants that need to be upheld before those
+ /// early returns. This is a `&mut self` method, so iteration needs to be
+ /// resumable after hitting an error here.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// // the checked sum of all of the elements of the array
+ /// let sum = a.iter().try_fold(0i8, |acc, &x| acc.checked_add(x));
+ ///
+ /// assert_eq!(sum, Some(6));
+ /// ```
+ ///
+ /// Short-circuiting:
+ ///
+ /// ```
+ /// let a = [10, 20, 30, 100, 40, 50];
+ /// let mut it = a.iter();
+ ///
+ /// // This sum overflows when adding the 100 element
+ /// let sum = it.try_fold(0i8, |acc, &x| acc.checked_add(x));
+ /// assert_eq!(sum, None);
+ ///
+ /// // Because it short-circuited, the remaining elements are still
+ /// // available through the iterator.
+ /// assert_eq!(it.len(), 2);
+ /// assert_eq!(it.next(), Some(&40));
+ /// ```
+ ///
+ /// While you cannot `break` from a closure, the [`ControlFlow`] type allows
+ /// a similar idea:
+ ///
+ /// ```
+ /// use std::ops::ControlFlow;
+ ///
+ /// let triangular = (1..30).try_fold(0_i8, |prev, x| {
+ /// if let Some(next) = prev.checked_add(x) {
+ /// ControlFlow::Continue(next)
+ /// } else {
+ /// ControlFlow::Break(prev)
+ /// }
+ /// });
+ /// assert_eq!(triangular, ControlFlow::Break(120));
+ ///
+ /// let triangular = (1..30).try_fold(0_u64, |prev, x| {
+ /// if let Some(next) = prev.checked_add(x) {
+ /// ControlFlow::Continue(next)
+ /// } else {
+ /// ControlFlow::Break(prev)
+ /// }
+ /// });
+ /// assert_eq!(triangular, ControlFlow::Continue(435));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_try_fold", since = "1.27.0")]
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next() {
+ accum = f(accum, x)?;
+ }
+ try { accum }
+ }
+
+ /// An iterator method that applies a fallible function to each item in the
+ /// iterator, stopping at the first error and returning that error.
+ ///
+ /// This can also be thought of as the fallible form of [`for_each()`]
+ /// or as the stateless version of [`try_fold()`].
+ ///
+ /// [`for_each()`]: Iterator::for_each
+ /// [`try_fold()`]: Iterator::try_fold
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::rename;
+ /// use std::io::{stdout, Write};
+ /// use std::path::Path;
+ ///
+ /// let data = ["no_tea.txt", "stale_bread.json", "torrential_rain.png"];
+ ///
+ /// let res = data.iter().try_for_each(|x| writeln!(stdout(), "{x}"));
+ /// assert!(res.is_ok());
+ ///
+ /// let mut it = data.iter().cloned();
+ /// let res = it.try_for_each(|x| rename(x, Path::new(x).with_extension("old")));
+ /// assert!(res.is_err());
+ /// // It short-circuited, so the remaining items are still in the iterator:
+ /// assert_eq!(it.next(), Some("stale_bread.json"));
+ /// ```
+ ///
+ /// The [`ControlFlow`] type can be used with this method for the situations
+ /// in which you'd use `break` and `continue` in a normal loop:
+ ///
+ /// ```
+ /// use std::ops::ControlFlow;
+ ///
+ /// let r = (2..100).try_for_each(|x| {
+ /// if 323 % x == 0 {
+ /// return ControlFlow::Break(x)
+ /// }
+ ///
+ /// ControlFlow::Continue(())
+ /// });
+ /// assert_eq!(r, ControlFlow::Break(17));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_try_fold", since = "1.27.0")]
+ fn try_for_each<F, R>(&mut self, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> R,
+ R: Try<Output = ()>,
+ {
+ #[inline]
+ fn call<T, R>(mut f: impl FnMut(T) -> R) -> impl FnMut((), T) -> R {
+ move |(), x| f(x)
+ }
+
+ self.try_fold((), call(f))
+ }
+
+ /// Folds every element into an accumulator by applying an operation,
+ /// returning the final result.
+ ///
+ /// `fold()` takes two arguments: an initial value, and a closure with two
+ /// arguments: an 'accumulator', and an element. The closure returns the value that
+ /// the accumulator should have for the next iteration.
+ ///
+ /// The initial value is the value the accumulator will have on the first
+ /// call.
+ ///
+ /// After applying this closure to every element of the iterator, `fold()`
+ /// returns the accumulator.
+ ///
+ /// This operation is sometimes called 'reduce' or 'inject'.
+ ///
+ /// Folding is useful whenever you have a collection of something, and want
+ /// to produce a single value from it.
+ ///
+ /// Note: `fold()`, and similar methods that traverse the entire iterator,
+ /// might not terminate for infinite iterators, even on traits for which a
+ /// result is determinable in finite time.
+ ///
+ /// Note: [`reduce()`] can be used to use the first element as the initial
+ /// value, if the accumulator type and item type is the same.
+ ///
+ /// Note: `fold()` combines elements in a *left-associative* fashion. For associative
+ /// operators like `+`, the order the elements are combined in is not important, but for non-associative
+ /// operators like `-` the order will affect the final result.
+ /// For a *right-associative* version of `fold()`, see [`DoubleEndedIterator::rfold()`].
+ ///
+ /// # Note to Implementors
+ ///
+ /// Several of the other (forward) methods have default implementations in
+ /// terms of this one, so try to implement this explicitly if it can
+ /// do something better than the default `for` loop implementation.
+ ///
+ /// In particular, try to have this call `fold()` on the internal parts
+ /// from which this iterator is composed.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// // the sum of all of the elements of the array
+ /// let sum = a.iter().fold(0, |acc, x| acc + x);
+ ///
+ /// assert_eq!(sum, 6);
+ /// ```
+ ///
+ /// Let's walk through each step of the iteration here:
+ ///
+ /// | element | acc | x | result |
+ /// |---------|-----|---|--------|
+ /// | | 0 | | |
+ /// | 1 | 0 | 1 | 1 |
+ /// | 2 | 1 | 2 | 3 |
+ /// | 3 | 3 | 3 | 6 |
+ ///
+ /// And so, our final result, `6`.
+ ///
+ /// This example demonstrates the left-associative nature of `fold()`:
+ /// it builds a string, starting with an initial value
+ /// and continuing with each element from the front until the back:
+ ///
+ /// ```
+ /// let numbers = [1, 2, 3, 4, 5];
+ ///
+ /// let zero = "0".to_string();
+ ///
+ /// let result = numbers.iter().fold(zero, |acc, &x| {
+ /// format!("({acc} + {x})")
+ /// });
+ ///
+ /// assert_eq!(result, "(((((0 + 1) + 2) + 3) + 4) + 5)");
+ /// ```
+ /// It's common for people who haven't used iterators a lot to
+ /// use a `for` loop with a list of things to build up a result. Those
+ /// can be turned into `fold()`s:
+ ///
+ /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
+ ///
+ /// ```
+ /// let numbers = [1, 2, 3, 4, 5];
+ ///
+ /// let mut result = 0;
+ ///
+ /// // for loop:
+ /// for i in &numbers {
+ /// result = result + i;
+ /// }
+ ///
+ /// // fold:
+ /// let result2 = numbers.iter().fold(0, |acc, &x| acc + x);
+ ///
+ /// // they're the same
+ /// assert_eq!(result, result2);
+ /// ```
+ ///
+ /// [`reduce()`]: Iterator::reduce
+ #[doc(alias = "inject", alias = "foldl")]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+
+ /// Reduces the elements to a single one, by repeatedly applying a reducing
+ /// operation.
+ ///
+ /// If the iterator is empty, returns [`None`]; otherwise, returns the
+ /// result of the reduction.
+ ///
+ /// The reducing function is a closure with two arguments: an 'accumulator', and an element.
+ /// For iterators with at least one element, this is the same as [`fold()`]
+ /// with the first element of the iterator as the initial accumulator value, folding
+ /// every subsequent element into it.
+ ///
+ /// [`fold()`]: Iterator::fold
+ ///
+ /// # Example
+ ///
+ /// Find the maximum value:
+ ///
+ /// ```
+ /// fn find_max<I>(iter: I) -> Option<I::Item>
+ /// where I: Iterator,
+ /// I::Item: Ord,
+ /// {
+ /// iter.reduce(|accum, item| {
+ /// if accum >= item { accum } else { item }
+ /// })
+ /// }
+ /// let a = [10, 20, 5, -23, 0];
+ /// let b: [u32; 0] = [];
+ ///
+ /// assert_eq!(find_max(a.iter()), Some(&20));
+ /// assert_eq!(find_max(b.iter()), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_fold_self", since = "1.51.0")]
+ fn reduce<F>(mut self, f: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item, Self::Item) -> Self::Item,
+ {
+ let first = self.next()?;
+ Some(self.fold(first, f))
+ }
+
+ /// Reduces the elements to a single one by repeatedly applying a reducing operation. If the
+ /// closure returns a failure, the failure is propagated back to the caller immediately.
+ ///
+ /// The return type of this method depends on the return type of the closure. If the closure
+ /// returns `Result<Self::Item, E>`, then this function will return `Result<Option<Self::Item>,
+ /// E>`. If the closure returns `Option<Self::Item>`, then this function will return
+ /// `Option<Option<Self::Item>>`.
+ ///
+ /// When called on an empty iterator, this function will return either `Some(None)` or
+ /// `Ok(None)` depending on the type of the provided closure.
+ ///
+ /// For iterators with at least one element, this is essentially the same as calling
+ /// [`try_fold()`] with the first element of the iterator as the initial accumulator value.
+ ///
+ /// [`try_fold()`]: Iterator::try_fold
+ ///
+ /// # Examples
+ ///
+ /// Safely calculate the sum of a series of numbers:
+ ///
+ /// ```
+ /// #![feature(iterator_try_reduce)]
+ ///
+ /// let numbers: Vec<usize> = vec![10, 20, 5, 23, 0];
+ /// let sum = numbers.into_iter().try_reduce(|x, y| x.checked_add(y));
+ /// assert_eq!(sum, Some(Some(58)));
+ /// ```
+ ///
+ /// Determine when a reduction short circuited:
+ ///
+ /// ```
+ /// #![feature(iterator_try_reduce)]
+ ///
+ /// let numbers = vec![1, 2, 3, usize::MAX, 4, 5];
+ /// let sum = numbers.into_iter().try_reduce(|x, y| x.checked_add(y));
+ /// assert_eq!(sum, None);
+ /// ```
+ ///
+ /// Determine when a reduction was not performed because there are no elements:
+ ///
+ /// ```
+ /// #![feature(iterator_try_reduce)]
+ ///
+ /// let numbers: Vec<usize> = Vec::new();
+ /// let sum = numbers.into_iter().try_reduce(|x, y| x.checked_add(y));
+ /// assert_eq!(sum, Some(None));
+ /// ```
+ ///
+ /// Use a [`Result`] instead of an [`Option`]:
+ ///
+ /// ```
+ /// #![feature(iterator_try_reduce)]
+ ///
+ /// let numbers = vec!["1", "2", "3", "4", "5"];
+ /// let max: Result<Option<_>, <usize as std::str::FromStr>::Err> =
+ /// numbers.into_iter().try_reduce(|x, y| {
+ /// if x.parse::<usize>()? > y.parse::<usize>()? { Ok(x) } else { Ok(y) }
+ /// });
+ /// assert_eq!(max, Ok(Some("5")));
+ /// ```
+ #[inline]
+ #[unstable(feature = "iterator_try_reduce", reason = "new API", issue = "87053")]
+ fn try_reduce<F, R>(&mut self, f: F) -> ChangeOutputType<R, Option<R::Output>>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item, Self::Item) -> R,
+ R: Try<Output = Self::Item>,
+ R::Residual: Residual<Option<Self::Item>>,
+ {
+ let first = match self.next() {
+ Some(i) => i,
+ None => return Try::from_output(None),
+ };
+
+ match self.try_fold(first, f).branch() {
+ ControlFlow::Break(r) => FromResidual::from_residual(r),
+ ControlFlow::Continue(i) => Try::from_output(Some(i)),
+ }
+ }
+
+ /// Tests if every element of the iterator matches a predicate.
+ ///
+ /// `all()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, and if they all return
+ /// `true`, then so does `all()`. If any of them return `false`, it
+ /// returns `false`.
+ ///
+ /// `all()` is short-circuiting; in other words, it will stop processing
+ /// as soon as it finds a `false`, given that no matter what else happens,
+ /// the result will also be `false`.
+ ///
+ /// An empty iterator returns `true`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert!(a.iter().all(|&x| x > 0));
+ ///
+ /// assert!(!a.iter().all(|&x| x > 2));
+ /// ```
+ ///
+ /// Stopping at the first `false`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert!(!iter.all(|&x| x != 2));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&3));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn all<F>(&mut self, f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
+ move |(), x| {
+ if f(x) { ControlFlow::CONTINUE } else { ControlFlow::BREAK }
+ }
+ }
+ self.try_fold((), check(f)) == ControlFlow::CONTINUE
+ }
+
+ /// Tests if any element of the iterator matches a predicate.
+ ///
+ /// `any()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, and if any of them return
+ /// `true`, then so does `any()`. If they all return `false`, it
+ /// returns `false`.
+ ///
+ /// `any()` is short-circuiting; in other words, it will stop processing
+ /// as soon as it finds a `true`, given that no matter what else happens,
+ /// the result will also be `true`.
+ ///
+ /// An empty iterator returns `false`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert!(a.iter().any(|&x| x > 0));
+ ///
+ /// assert!(!a.iter().any(|&x| x > 5));
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert!(iter.any(|&x| x != 2));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&2));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn any<F>(&mut self, f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
+ move |(), x| {
+ if f(x) { ControlFlow::BREAK } else { ControlFlow::CONTINUE }
+ }
+ }
+
+ self.try_fold((), check(f)) == ControlFlow::BREAK
+ }
+
+ /// Searches for an element of an iterator that satisfies a predicate.
+ ///
+ /// `find()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, and if any of them return
+ /// `true`, then `find()` returns [`Some(element)`]. If they all return
+ /// `false`, it returns [`None`].
+ ///
+ /// `find()` is short-circuiting; in other words, it will stop processing
+ /// as soon as the closure returns `true`.
+ ///
+ /// Because `find()` takes a reference, and many iterators iterate over
+ /// references, this leads to a possibly confusing situation where the
+ /// argument is a double reference. You can see this effect in the
+ /// examples below, with `&&x`.
+ ///
+ /// [`Some(element)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert_eq!(a.iter().find(|&&x| x == 2), Some(&2));
+ ///
+ /// assert_eq!(a.iter().find(|&&x| x == 5), None);
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.find(|&&x| x == 2), Some(&2));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&3));
+ /// ```
+ ///
+ /// Note that `iter.find(f)` is equivalent to `iter.filter(f).next()`.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
+ move |(), x| {
+ if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE }
+ }
+ }
+
+ self.try_fold((), check(predicate)).break_value()
+ }
+
+ /// Applies function to the elements of iterator and returns
+ /// the first non-none result.
+ ///
+ /// `iter.find_map(f)` is equivalent to `iter.filter_map(f).next()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = ["lol", "NaN", "2", "5"];
+ ///
+ /// let first_number = a.iter().find_map(|s| s.parse().ok());
+ ///
+ /// assert_eq!(first_number, Some(2));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_find_map", since = "1.30.0")]
+ fn find_map<B, F>(&mut self, f: F) -> Option<B>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> Option<B>,
+ {
+ #[inline]
+ fn check<T, B>(mut f: impl FnMut(T) -> Option<B>) -> impl FnMut((), T) -> ControlFlow<B> {
+ move |(), x| match f(x) {
+ Some(x) => ControlFlow::Break(x),
+ None => ControlFlow::CONTINUE,
+ }
+ }
+
+ self.try_fold((), check(f)).break_value()
+ }
+
+ /// Applies function to the elements of iterator and returns
+ /// the first true result or the first error.
+ ///
+ /// The return type of this method depends on the return type of the closure.
+ /// If you return `Result<bool, E>` from the closure, you'll get a `Result<Option<Self::Item>; E>`.
+ /// If you return `Option<bool>` from the closure, you'll get an `Option<Option<Self::Item>>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_find)]
+ ///
+ /// let a = ["1", "2", "lol", "NaN", "5"];
+ ///
+ /// let is_my_num = |s: &str, search: i32| -> Result<bool, std::num::ParseIntError> {
+ /// Ok(s.parse::<i32>()? == search)
+ /// };
+ ///
+ /// let result = a.iter().try_find(|&&s| is_my_num(s, 2));
+ /// assert_eq!(result, Ok(Some(&"2")));
+ ///
+ /// let result = a.iter().try_find(|&&s| is_my_num(s, 5));
+ /// assert!(result.is_err());
+ /// ```
+ ///
+ /// This also supports other types which implement `Try`, not just `Result`.
+ /// ```
+ /// #![feature(try_find)]
+ ///
+ /// use std::num::NonZeroU32;
+ /// let a = [3, 5, 7, 4, 9, 0, 11];
+ /// let result = a.iter().try_find(|&&x| NonZeroU32::new(x).map(|y| y.is_power_of_two()));
+ /// assert_eq!(result, Some(Some(&4)));
+ /// let result = a.iter().take(3).try_find(|&&x| NonZeroU32::new(x).map(|y| y.is_power_of_two()));
+ /// assert_eq!(result, Some(None));
+ /// let result = a.iter().rev().try_find(|&&x| NonZeroU32::new(x).map(|y| y.is_power_of_two()));
+ /// assert_eq!(result, None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "try_find", reason = "new API", issue = "63178")]
+ fn try_find<F, R>(&mut self, f: F) -> ChangeOutputType<R, Option<Self::Item>>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item) -> R,
+ R: Try<Output = bool>,
+ R::Residual: Residual<Option<Self::Item>>,
+ {
+ #[inline]
+ fn check<I, V, R>(
+ mut f: impl FnMut(&I) -> V,
+ ) -> impl FnMut((), I) -> ControlFlow<R::TryType>
+ where
+ V: Try<Output = bool, Residual = R>,
+ R: Residual<Option<I>>,
+ {
+ move |(), x| match f(&x).branch() {
+ ControlFlow::Continue(false) => ControlFlow::CONTINUE,
+ ControlFlow::Continue(true) => ControlFlow::Break(Try::from_output(Some(x))),
+ ControlFlow::Break(r) => ControlFlow::Break(FromResidual::from_residual(r)),
+ }
+ }
+
+ match self.try_fold((), check(f)) {
+ ControlFlow::Break(x) => x,
+ ControlFlow::Continue(()) => Try::from_output(None),
+ }
+ }
+
+ /// Searches for an element in an iterator, returning its index.
+ ///
+ /// `position()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, and if one of them
+ /// returns `true`, then `position()` returns [`Some(index)`]. If all of
+ /// them return `false`, it returns [`None`].
+ ///
+ /// `position()` is short-circuiting; in other words, it will stop
+ /// processing as soon as it finds a `true`.
+ ///
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so if there are more
+ /// than [`usize::MAX`] non-matching elements, it either produces the wrong
+ /// result or panics. If debug assertions are enabled, a panic is
+ /// guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic if the iterator has more than `usize::MAX`
+ /// non-matching elements.
+ ///
+ /// [`Some(index)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert_eq!(a.iter().position(|&x| x == 2), Some(1));
+ ///
+ /// assert_eq!(a.iter().position(|&x| x == 5), None);
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3, 4];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.position(|&x| x >= 2), Some(1));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&3));
+ ///
+ /// // The returned index depends on iterator state
+ /// assert_eq!(iter.position(|&x| x == 4), Some(0));
+ ///
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn position<P>(&mut self, predicate: P) -> Option<usize>
+ where
+ Self: Sized,
+ P: FnMut(Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(
+ mut predicate: impl FnMut(T) -> bool,
+ ) -> impl FnMut(usize, T) -> ControlFlow<usize, usize> {
+ #[rustc_inherit_overflow_checks]
+ move |i, x| {
+ if predicate(x) { ControlFlow::Break(i) } else { ControlFlow::Continue(i + 1) }
+ }
+ }
+
+ self.try_fold(0, check(predicate)).break_value()
+ }
+
+ /// Searches for an element in an iterator from the right, returning its
+ /// index.
+ ///
+ /// `rposition()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, starting from the end,
+ /// and if one of them returns `true`, then `rposition()` returns
+ /// [`Some(index)`]. If all of them return `false`, it returns [`None`].
+ ///
+ /// `rposition()` is short-circuiting; in other words, it will stop
+ /// processing as soon as it finds a `true`.
+ ///
+ /// [`Some(index)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert_eq!(a.iter().rposition(|&x| x == 3), Some(2));
+ ///
+ /// assert_eq!(a.iter().rposition(|&x| x == 5), None);
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.rposition(|&x| x == 2), Some(1));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&1));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn rposition<P>(&mut self, predicate: P) -> Option<usize>
+ where
+ P: FnMut(Self::Item) -> bool,
+ Self: Sized + ExactSizeIterator + DoubleEndedIterator,
+ {
+ // No need for an overflow check here, because `ExactSizeIterator`
+ // implies that the number of elements fits into a `usize`.
+ #[inline]
+ fn check<T>(
+ mut predicate: impl FnMut(T) -> bool,
+ ) -> impl FnMut(usize, T) -> ControlFlow<usize, usize> {
+ move |i, x| {
+ let i = i - 1;
+ if predicate(x) { ControlFlow::Break(i) } else { ControlFlow::Continue(i) }
+ }
+ }
+
+ let n = self.len();
+ self.try_rfold(n, check(predicate)).break_value()
+ }
+
+ /// Returns the maximum element of an iterator.
+ ///
+ /// If several elements are equally maximum, the last element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// Note that [`f32`]/[`f64`] doesn't implement [`Ord`] due to NaN being
+ /// incomparable. You can work around this by using [`Iterator::reduce`]:
+ /// ```
+ /// assert_eq!(
+ /// [2.4, f32::NAN, 1.3]
+ /// .into_iter()
+ /// .reduce(f32::max)
+ /// .unwrap(),
+ /// 2.4
+ /// );
+ /// ```
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let b: Vec<u32> = Vec::new();
+ ///
+ /// assert_eq!(a.iter().max(), Some(&3));
+ /// assert_eq!(b.iter().max(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn max(self) -> Option<Self::Item>
+ where
+ Self: Sized,
+ Self::Item: Ord,
+ {
+ self.max_by(Ord::cmp)
+ }
+
+ /// Returns the minimum element of an iterator.
+ ///
+ /// If several elements are equally minimum, the first element is returned.
+ /// If the iterator is empty, [`None`] is returned.
+ ///
+ /// Note that [`f32`]/[`f64`] doesn't implement [`Ord`] due to NaN being
+ /// incomparable. You can work around this by using [`Iterator::reduce`]:
+ /// ```
+ /// assert_eq!(
+ /// [2.4, f32::NAN, 1.3]
+ /// .into_iter()
+ /// .reduce(f32::min)
+ /// .unwrap(),
+ /// 1.3
+ /// );
+ /// ```
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let b: Vec<u32> = Vec::new();
+ ///
+ /// assert_eq!(a.iter().min(), Some(&1));
+ /// assert_eq!(b.iter().min(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn min(self) -> Option<Self::Item>
+ where
+ Self: Sized,
+ Self::Item: Ord,
+ {
+ self.min_by(Ord::cmp)
+ }
+
+ /// Returns the element that gives the maximum value from the
+ /// specified function.
+ ///
+ /// If several elements are equally maximum, the last element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [-3_i32, 0, 1, 5, -10];
+ /// assert_eq!(*a.iter().max_by_key(|x| x.abs()).unwrap(), -10);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_cmp_by_key", since = "1.6.0")]
+ fn max_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item) -> B,
+ {
+ #[inline]
+ fn key<T, B>(mut f: impl FnMut(&T) -> B) -> impl FnMut(T) -> (B, T) {
+ move |x| (f(&x), x)
+ }
+
+ #[inline]
+ fn compare<T, B: Ord>((x_p, _): &(B, T), (y_p, _): &(B, T)) -> Ordering {
+ x_p.cmp(y_p)
+ }
+
+ let (_, x) = self.map(key(f)).max_by(compare)?;
+ Some(x)
+ }
+
+ /// Returns the element that gives the maximum value with respect to the
+ /// specified comparison function.
+ ///
+ /// If several elements are equally maximum, the last element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [-3_i32, 0, 1, 5, -10];
+ /// assert_eq!(*a.iter().max_by(|x, y| x.cmp(y)).unwrap(), 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_max_by", since = "1.15.0")]
+ fn max_by<F>(self, compare: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item, &Self::Item) -> Ordering,
+ {
+ #[inline]
+ fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
+ move |x, y| cmp::max_by(x, y, &mut compare)
+ }
+
+ self.reduce(fold(compare))
+ }
+
+ /// Returns the element that gives the minimum value from the
+ /// specified function.
+ ///
+ /// If several elements are equally minimum, the first element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [-3_i32, 0, 1, 5, -10];
+ /// assert_eq!(*a.iter().min_by_key(|x| x.abs()).unwrap(), 0);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_cmp_by_key", since = "1.6.0")]
+ fn min_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item) -> B,
+ {
+ #[inline]
+ fn key<T, B>(mut f: impl FnMut(&T) -> B) -> impl FnMut(T) -> (B, T) {
+ move |x| (f(&x), x)
+ }
+
+ #[inline]
+ fn compare<T, B: Ord>((x_p, _): &(B, T), (y_p, _): &(B, T)) -> Ordering {
+ x_p.cmp(y_p)
+ }
+
+ let (_, x) = self.map(key(f)).min_by(compare)?;
+ Some(x)
+ }
+
+ /// Returns the element that gives the minimum value with respect to the
+ /// specified comparison function.
+ ///
+ /// If several elements are equally minimum, the first element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [-3_i32, 0, 1, 5, -10];
+ /// assert_eq!(*a.iter().min_by(|x, y| x.cmp(y)).unwrap(), -10);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_min_by", since = "1.15.0")]
+ fn min_by<F>(self, compare: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item, &Self::Item) -> Ordering,
+ {
+ #[inline]
+ fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
+ move |x, y| cmp::min_by(x, y, &mut compare)
+ }
+
+ self.reduce(fold(compare))
+ }
+
+ /// Reverses an iterator's direction.
+ ///
+ /// Usually, iterators iterate from left to right. After using `rev()`,
+ /// an iterator will instead iterate from right to left.
+ ///
+ /// This is only possible if the iterator has an end, so `rev()` only
+ /// works on [`DoubleEndedIterator`]s.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().rev();
+ ///
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[doc(alias = "reverse")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn rev(self) -> Rev<Self>
+ where
+ Self: Sized + DoubleEndedIterator,
+ {
+ Rev::new(self)
+ }
+
+ /// Converts an iterator of pairs into a pair of containers.
+ ///
+ /// `unzip()` consumes an entire iterator of pairs, producing two
+ /// collections: one from the left elements of the pairs, and one
+ /// from the right elements.
+ ///
+ /// This function is, in some sense, the opposite of [`zip`].
+ ///
+ /// [`zip`]: Iterator::zip
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [(1, 2), (3, 4), (5, 6)];
+ ///
+ /// let (left, right): (Vec<_>, Vec<_>) = a.iter().cloned().unzip();
+ ///
+ /// assert_eq!(left, [1, 3, 5]);
+ /// assert_eq!(right, [2, 4, 6]);
+ ///
+ /// // you can also unzip multiple nested tuples at once
+ /// let a = [(1, (2, 3)), (4, (5, 6))];
+ ///
+ /// let (x, (y, z)): (Vec<_>, (Vec<_>, Vec<_>)) = a.iter().cloned().unzip();
+ /// assert_eq!(x, [1, 4]);
+ /// assert_eq!(y, [2, 5]);
+ /// assert_eq!(z, [3, 6]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
+ where
+ FromA: Default + Extend<A>,
+ FromB: Default + Extend<B>,
+ Self: Sized + Iterator<Item = (A, B)>,
+ {
+ let mut unzipped: (FromA, FromB) = Default::default();
+ unzipped.extend(self);
+ unzipped
+ }
+
+ /// Creates an iterator which copies all of its elements.
+ ///
+ /// This is useful when you have an iterator over `&T`, but you need an
+ /// iterator over `T`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let v_copied: Vec<_> = a.iter().copied().collect();
+ ///
+ /// // copied is the same as .map(|&x| x)
+ /// let v_map: Vec<_> = a.iter().map(|&x| x).collect();
+ ///
+ /// assert_eq!(v_copied, vec![1, 2, 3]);
+ /// assert_eq!(v_map, vec![1, 2, 3]);
+ /// ```
+ #[stable(feature = "iter_copied", since = "1.36.0")]
+ fn copied<'a, T: 'a>(self) -> Copied<Self>
+ where
+ Self: Sized + Iterator<Item = &'a T>,
+ T: Copy,
+ {
+ Copied::new(self)
+ }
+
+ /// Creates an iterator which [`clone`]s all of its elements.
+ ///
+ /// This is useful when you have an iterator over `&T`, but you need an
+ /// iterator over `T`.
+ ///
+ /// There is no guarantee whatsoever about the `clone` method actually
+ /// being called *or* optimized away. So code should not depend on
+ /// either.
+ ///
+ /// [`clone`]: Clone::clone
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let v_cloned: Vec<_> = a.iter().cloned().collect();
+ ///
+ /// // cloned is the same as .map(|&x| x), for integers
+ /// let v_map: Vec<_> = a.iter().map(|&x| x).collect();
+ ///
+ /// assert_eq!(v_cloned, vec![1, 2, 3]);
+ /// assert_eq!(v_map, vec![1, 2, 3]);
+ /// ```
+ ///
+ /// To get the best performance, try to clone late:
+ ///
+ /// ```
+ /// let a = [vec![0_u8, 1, 2], vec![3, 4], vec![23]];
+ /// // don't do this:
+ /// let slower: Vec<_> = a.iter().cloned().filter(|s| s.len() == 1).collect();
+ /// assert_eq!(&[vec![23]], &slower[..]);
+ /// // instead call `cloned` late
+ /// let faster: Vec<_> = a.iter().filter(|s| s.len() == 1).cloned().collect();
+ /// assert_eq!(&[vec![23]], &faster[..]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn cloned<'a, T: 'a>(self) -> Cloned<Self>
+ where
+ Self: Sized + Iterator<Item = &'a T>,
+ T: Clone,
+ {
+ Cloned::new(self)
+ }
+
+ /// Repeats an iterator endlessly.
+ ///
+ /// Instead of stopping at [`None`], the iterator will instead start again,
+ /// from the beginning. After iterating again, it will start at the
+ /// beginning again. And again. And again. Forever. Note that in case the
+ /// original iterator is empty, the resulting iterator will also be empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut it = a.iter().cycle();
+ ///
+ /// assert_eq!(it.next(), Some(&1));
+ /// assert_eq!(it.next(), Some(&2));
+ /// assert_eq!(it.next(), Some(&3));
+ /// assert_eq!(it.next(), Some(&1));
+ /// assert_eq!(it.next(), Some(&2));
+ /// assert_eq!(it.next(), Some(&3));
+ /// assert_eq!(it.next(), Some(&1));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ fn cycle(self) -> Cycle<Self>
+ where
+ Self: Sized + Clone,
+ {
+ Cycle::new(self)
+ }
+
+ /// Sums the elements of an iterator.
+ ///
+ /// Takes each element, adds them together, and returns the result.
+ ///
+ /// An empty iterator returns the zero value of the type.
+ ///
+ /// # Panics
+ ///
+ /// When calling `sum()` and a primitive integer type is being returned, this
+ /// method will panic if the computation overflows and debug assertions are
+ /// enabled.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let sum: i32 = a.iter().sum();
+ ///
+ /// assert_eq!(sum, 6);
+ /// ```
+ #[stable(feature = "iter_arith", since = "1.11.0")]
+ fn sum<S>(self) -> S
+ where
+ Self: Sized,
+ S: Sum<Self::Item>,
+ {
+ Sum::sum(self)
+ }
+
+ /// Iterates over the entire iterator, multiplying all the elements
+ ///
+ /// An empty iterator returns the one value of the type.
+ ///
+ /// # Panics
+ ///
+ /// When calling `product()` and a primitive integer type is being returned,
+ /// method will panic if the computation overflows and debug assertions are
+ /// enabled.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// fn factorial(n: u32) -> u32 {
+ /// (1..=n).product()
+ /// }
+ /// assert_eq!(factorial(0), 1);
+ /// assert_eq!(factorial(1), 1);
+ /// assert_eq!(factorial(5), 120);
+ /// ```
+ #[stable(feature = "iter_arith", since = "1.11.0")]
+ fn product<P>(self) -> P
+ where
+ Self: Sized,
+ P: Product<Self::Item>,
+ {
+ Product::product(self)
+ }
+
+ /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
+ /// of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!([1].iter().cmp([1].iter()), Ordering::Equal);
+ /// assert_eq!([1].iter().cmp([1, 2].iter()), Ordering::Less);
+ /// assert_eq!([1, 2].iter().cmp([1].iter()), Ordering::Greater);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn cmp<I>(self, other: I) -> Ordering
+ where
+ I: IntoIterator<Item = Self::Item>,
+ Self::Item: Ord,
+ Self: Sized,
+ {
+ self.cmp_by(other, |x, y| x.cmp(&y))
+ }
+
+ /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
+ /// of another with respect to the specified comparison function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_order_by)]
+ ///
+ /// use std::cmp::Ordering;
+ ///
+ /// let xs = [1, 2, 3, 4];
+ /// let ys = [1, 4, 9, 16];
+ ///
+ /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| x.cmp(&y)), Ordering::Less);
+ /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (x * x).cmp(&y)), Ordering::Equal);
+ /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (2 * x).cmp(&y)), Ordering::Greater);
+ /// ```
+ #[unstable(feature = "iter_order_by", issue = "64295")]
+ fn cmp_by<I, F>(mut self, other: I, mut cmp: F) -> Ordering
+ where
+ Self: Sized,
+ I: IntoIterator,
+ F: FnMut(Self::Item, I::Item) -> Ordering,
+ {
+ let mut other = other.into_iter();
+
+ loop {
+ let x = match self.next() {
+ None => {
+ if other.next().is_none() {
+ return Ordering::Equal;
+ } else {
+ return Ordering::Less;
+ }
+ }
+ Some(val) => val,
+ };
+
+ let y = match other.next() {
+ None => return Ordering::Greater,
+ Some(val) => val,
+ };
+
+ match cmp(x, y) {
+ Ordering::Equal => (),
+ non_eq => return non_eq,
+ }
+ }
+ }
+
+ /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
+ /// of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!([1.].iter().partial_cmp([1.].iter()), Some(Ordering::Equal));
+ /// assert_eq!([1.].iter().partial_cmp([1., 2.].iter()), Some(Ordering::Less));
+ /// assert_eq!([1., 2.].iter().partial_cmp([1.].iter()), Some(Ordering::Greater));
+ ///
+ /// assert_eq!([f64::NAN].iter().partial_cmp([1.].iter()), None);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn partial_cmp<I>(self, other: I) -> Option<Ordering>
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ self.partial_cmp_by(other, |x, y| x.partial_cmp(&y))
+ }
+
+ /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
+ /// of another with respect to the specified comparison function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_order_by)]
+ ///
+ /// use std::cmp::Ordering;
+ ///
+ /// let xs = [1.0, 2.0, 3.0, 4.0];
+ /// let ys = [1.0, 4.0, 9.0, 16.0];
+ ///
+ /// assert_eq!(
+ /// xs.iter().partial_cmp_by(&ys, |&x, &y| x.partial_cmp(&y)),
+ /// Some(Ordering::Less)
+ /// );
+ /// assert_eq!(
+ /// xs.iter().partial_cmp_by(&ys, |&x, &y| (x * x).partial_cmp(&y)),
+ /// Some(Ordering::Equal)
+ /// );
+ /// assert_eq!(
+ /// xs.iter().partial_cmp_by(&ys, |&x, &y| (2.0 * x).partial_cmp(&y)),
+ /// Some(Ordering::Greater)
+ /// );
+ /// ```
+ #[unstable(feature = "iter_order_by", issue = "64295")]
+ fn partial_cmp_by<I, F>(mut self, other: I, mut partial_cmp: F) -> Option<Ordering>
+ where
+ Self: Sized,
+ I: IntoIterator,
+ F: FnMut(Self::Item, I::Item) -> Option<Ordering>,
+ {
+ let mut other = other.into_iter();
+
+ loop {
+ let x = match self.next() {
+ None => {
+ if other.next().is_none() {
+ return Some(Ordering::Equal);
+ } else {
+ return Some(Ordering::Less);
+ }
+ }
+ Some(val) => val,
+ };
+
+ let y = match other.next() {
+ None => return Some(Ordering::Greater),
+ Some(val) => val,
+ };
+
+ match partial_cmp(x, y) {
+ Some(Ordering::Equal) => (),
+ non_eq => return non_eq,
+ }
+ }
+ }
+
+ /// Determines if the elements of this [`Iterator`] are equal to those of
+ /// another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().eq([1].iter()), true);
+ /// assert_eq!([1].iter().eq([1, 2].iter()), false);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn eq<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialEq<I::Item>,
+ Self: Sized,
+ {
+ self.eq_by(other, |x, y| x == y)
+ }
+
+ /// Determines if the elements of this [`Iterator`] are equal to those of
+ /// another with respect to the specified equality function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_order_by)]
+ ///
+ /// let xs = [1, 2, 3, 4];
+ /// let ys = [1, 4, 9, 16];
+ ///
+ /// assert!(xs.iter().eq_by(&ys, |&x, &y| x * x == y));
+ /// ```
+ #[unstable(feature = "iter_order_by", issue = "64295")]
+ fn eq_by<I, F>(mut self, other: I, mut eq: F) -> bool
+ where
+ Self: Sized,
+ I: IntoIterator,
+ F: FnMut(Self::Item, I::Item) -> bool,
+ {
+ let mut other = other.into_iter();
+
+ loop {
+ let x = match self.next() {
+ None => return other.next().is_none(),
+ Some(val) => val,
+ };
+
+ let y = match other.next() {
+ None => return false,
+ Some(val) => val,
+ };
+
+ if !eq(x, y) {
+ return false;
+ }
+ }
+ }
+
+ /// Determines if the elements of this [`Iterator`] are unequal to those of
+ /// another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().ne([1].iter()), false);
+ /// assert_eq!([1].iter().ne([1, 2].iter()), true);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn ne<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialEq<I::Item>,
+ Self: Sized,
+ {
+ !self.eq(other)
+ }
+
+ /// Determines if the elements of this [`Iterator`] are [lexicographically](Ord#lexicographical-comparison)
+ /// less than those of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().lt([1].iter()), false);
+ /// assert_eq!([1].iter().lt([1, 2].iter()), true);
+ /// assert_eq!([1, 2].iter().lt([1].iter()), false);
+ /// assert_eq!([1, 2].iter().lt([1, 2].iter()), false);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn lt<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ self.partial_cmp(other) == Some(Ordering::Less)
+ }
+
+ /// Determines if the elements of this [`Iterator`] are [lexicographically](Ord#lexicographical-comparison)
+ /// less or equal to those of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().le([1].iter()), true);
+ /// assert_eq!([1].iter().le([1, 2].iter()), true);
+ /// assert_eq!([1, 2].iter().le([1].iter()), false);
+ /// assert_eq!([1, 2].iter().le([1, 2].iter()), true);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn le<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ matches!(self.partial_cmp(other), Some(Ordering::Less | Ordering::Equal))
+ }
+
+ /// Determines if the elements of this [`Iterator`] are [lexicographically](Ord#lexicographical-comparison)
+ /// greater than those of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().gt([1].iter()), false);
+ /// assert_eq!([1].iter().gt([1, 2].iter()), false);
+ /// assert_eq!([1, 2].iter().gt([1].iter()), true);
+ /// assert_eq!([1, 2].iter().gt([1, 2].iter()), false);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn gt<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ self.partial_cmp(other) == Some(Ordering::Greater)
+ }
+
+ /// Determines if the elements of this [`Iterator`] are [lexicographically](Ord#lexicographical-comparison)
+ /// greater than or equal to those of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().ge([1].iter()), true);
+ /// assert_eq!([1].iter().ge([1, 2].iter()), false);
+ /// assert_eq!([1, 2].iter().ge([1].iter()), true);
+ /// assert_eq!([1, 2].iter().ge([1, 2].iter()), true);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn ge<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ matches!(self.partial_cmp(other), Some(Ordering::Greater | Ordering::Equal))
+ }
+
+ /// Checks if the elements of this iterator are sorted.
+ ///
+ /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
+ /// iterator yields exactly zero or one element, `true` is returned.
+ ///
+ /// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
+ /// implies that this function returns `false` if any two consecutive items are not
+ /// comparable.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ ///
+ /// assert!([1, 2, 2, 9].iter().is_sorted());
+ /// assert!(![1, 3, 2, 4].iter().is_sorted());
+ /// assert!([0].iter().is_sorted());
+ /// assert!(std::iter::empty::<i32>().is_sorted());
+ /// assert!(![0.0, 1.0, f32::NAN].iter().is_sorted());
+ /// ```
+ #[inline]
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ fn is_sorted(self) -> bool
+ where
+ Self: Sized,
+ Self::Item: PartialOrd,
+ {
+ self.is_sorted_by(PartialOrd::partial_cmp)
+ }
+
+ /// Checks if the elements of this iterator are sorted using the given comparator function.
+ ///
+ /// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
+ /// function to determine the ordering of two elements. Apart from that, it's equivalent to
+ /// [`is_sorted`]; see its documentation for more information.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ ///
+ /// assert!([1, 2, 2, 9].iter().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// assert!(![1, 3, 2, 4].iter().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// assert!([0].iter().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// assert!(std::iter::empty::<i32>().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// assert!(![0.0, 1.0, f32::NAN].iter().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// ```
+ ///
+ /// [`is_sorted`]: Iterator::is_sorted
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ fn is_sorted_by<F>(mut self, compare: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
+ {
+ #[inline]
+ fn check<'a, T>(
+ last: &'a mut T,
+ mut compare: impl FnMut(&T, &T) -> Option<Ordering> + 'a,
+ ) -> impl FnMut(T) -> bool + 'a {
+ move |curr| {
+ if let Some(Ordering::Greater) | None = compare(&last, &curr) {
+ return false;
+ }
+ *last = curr;
+ true
+ }
+ }
+
+ let mut last = match self.next() {
+ Some(e) => e,
+ None => return true,
+ };
+
+ self.all(check(&mut last, compare))
+ }
+
+ /// Checks if the elements of this iterator are sorted using the given key extraction
+ /// function.
+ ///
+ /// Instead of comparing the iterator's elements directly, this function compares the keys of
+ /// the elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see
+ /// its documentation for more information.
+ ///
+ /// [`is_sorted`]: Iterator::is_sorted
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ ///
+ /// assert!(["c", "bb", "aaa"].iter().is_sorted_by_key(|s| s.len()));
+ /// assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs()));
+ /// ```
+ #[inline]
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ fn is_sorted_by_key<F, K>(self, f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> K,
+ K: PartialOrd,
+ {
+ self.map(f).is_sorted()
+ }
+
+ /// See [TrustedRandomAccess][super::super::TrustedRandomAccess]
+ // The unusual name is to avoid name collisions in method resolution
+ // see #76479.
+ #[inline]
+ #[doc(hidden)]
+ #[unstable(feature = "trusted_random_access", issue = "none")]
+ unsafe fn __iterator_get_unchecked(&mut self, _idx: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ unreachable!("Always specialized");
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator + ?Sized> Iterator for &mut I {
+ type Item = I::Item;
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ (**self).next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ (**self).advance_by(n)
+ }
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ (**self).nth(n)
+ }
+}
diff --git a/library/core/src/iter/traits/marker.rs b/library/core/src/iter/traits/marker.rs
new file mode 100644
index 000000000..da7537457
--- /dev/null
+++ b/library/core/src/iter/traits/marker.rs
@@ -0,0 +1,78 @@
+use crate::iter::Step;
+
+/// An iterator that always continues to yield `None` when exhausted.
+///
+/// Calling next on a fused iterator that has returned `None` once is guaranteed
+/// to return [`None`] again. This trait should be implemented by all iterators
+/// that behave this way because it allows optimizing [`Iterator::fuse()`].
+///
+/// Note: In general, you should not use `FusedIterator` in generic bounds if
+/// you need a fused iterator. Instead, you should just call [`Iterator::fuse()`]
+/// on the iterator. If the iterator is already fused, the additional [`Fuse`]
+/// wrapper will be a no-op with no performance penalty.
+///
+/// [`Fuse`]: crate::iter::Fuse
+#[stable(feature = "fused", since = "1.26.0")]
+#[rustc_unsafe_specialization_marker]
+pub trait FusedIterator: Iterator {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator + ?Sized> FusedIterator for &mut I {}
+
+/// An iterator that reports an accurate length using size_hint.
+///
+/// The iterator reports a size hint where it is either exact
+/// (lower bound is equal to upper bound), or the upper bound is [`None`].
+/// The upper bound must only be [`None`] if the actual iterator length is
+/// larger than [`usize::MAX`]. In that case, the lower bound must be
+/// [`usize::MAX`], resulting in an [`Iterator::size_hint()`] of
+/// `(usize::MAX, None)`.
+///
+/// The iterator must produce exactly the number of elements it reported
+/// or diverge before reaching the end.
+///
+/// # Safety
+///
+/// This trait must only be implemented when the contract is upheld. Consumers
+/// of this trait must inspect [`Iterator::size_hint()`]’s upper bound.
+#[unstable(feature = "trusted_len", issue = "37572")]
+#[rustc_unsafe_specialization_marker]
+pub unsafe trait TrustedLen: Iterator {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I: TrustedLen + ?Sized> TrustedLen for &mut I {}
+
+/// An iterator that when yielding an item will have taken at least one element
+/// from its underlying [`SourceIter`].
+///
+/// Calling any method that advances the iterator, e.g. [`next()`] or [`try_fold()`],
+/// guarantees that for each step at least one value of the iterator's underlying source
+/// has been moved out and the result of the iterator chain could be inserted
+/// in its place, assuming structural constraints of the source allow such an insertion.
+/// In other words this trait indicates that an iterator pipeline can be collected in place.
+///
+/// The primary use of this trait is in-place iteration. Refer to the [`vec::in_place_collect`]
+/// module documentation for more information.
+///
+/// [`vec::in_place_collect`]: ../../../../alloc/vec/in_place_collect/index.html
+/// [`SourceIter`]: crate::iter::SourceIter
+/// [`next()`]: Iterator::next
+/// [`try_fold()`]: Iterator::try_fold
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+pub unsafe trait InPlaceIterable: Iterator {}
+
+/// A type that upholds all invariants of [`Step`].
+///
+/// The invariants of [`Step::steps_between()`] are a superset of the invariants
+/// of [`TrustedLen`]. As such, [`TrustedLen`] is implemented for all range
+/// types with the same generic type argument.
+///
+/// # Safety
+///
+/// The implementation of [`Step`] for the given type must guarantee all
+/// invariants of all methods are upheld. See the [`Step`] trait's documentation
+/// for details. Consumers are free to rely on the invariants in unsafe code.
+#[unstable(feature = "trusted_step", issue = "85731")]
+#[rustc_specialization_trait]
+pub unsafe trait TrustedStep: Step {}
diff --git a/library/core/src/iter/traits/mod.rs b/library/core/src/iter/traits/mod.rs
new file mode 100644
index 000000000..ed0fb634d
--- /dev/null
+++ b/library/core/src/iter/traits/mod.rs
@@ -0,0 +1,21 @@
+mod accum;
+mod collect;
+mod double_ended;
+mod exact_size;
+mod iterator;
+mod marker;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::{
+ accum::{Product, Sum},
+ collect::{Extend, FromIterator, IntoIterator},
+ double_ended::DoubleEndedIterator,
+ exact_size::ExactSizeIterator,
+ iterator::Iterator,
+ marker::{FusedIterator, TrustedLen},
+};
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+pub use self::marker::InPlaceIterable;
+#[unstable(feature = "trusted_step", issue = "85731")]
+pub use self::marker::TrustedStep;
diff --git a/library/core/src/lazy.rs b/library/core/src/lazy.rs
new file mode 100644
index 000000000..f8c06c3f9
--- /dev/null
+++ b/library/core/src/lazy.rs
@@ -0,0 +1 @@
+//! Lazy values and one-time initialization of static data.
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
new file mode 100644
index 000000000..24742bb49
--- /dev/null
+++ b/library/core/src/lib.rs
@@ -0,0 +1,426 @@
+//! # The Rust Core Library
+//!
+//! The Rust Core Library is the dependency-free[^free] foundation of [The
+//! Rust Standard Library](../std/index.html). It is the portable glue
+//! between the language and its libraries, defining the intrinsic and
+//! primitive building blocks of all Rust code. It links to no
+//! upstream libraries, no system libraries, and no libc.
+//!
+//! [^free]: Strictly speaking, there are some symbols which are needed but
+//! they aren't always necessary.
+//!
+//! The core library is *minimal*: it isn't even aware of heap allocation,
+//! nor does it provide concurrency or I/O. These things require
+//! platform integration, and this library is platform-agnostic.
+//!
+//! # How to use the core library
+//!
+//! Please note that all of these details are currently not considered stable.
+//!
+// FIXME: Fill me in with more detail when the interface settles
+//! This library is built on the assumption of a few existing symbols:
+//!
+//! * `memcpy`, `memcmp`, `memset`, `strlen` - These are core memory routines which are
+//! often generated by LLVM. Additionally, this library can make explicit
+//! calls to these functions. Their signatures are the same as found in C.
+//! These functions are often provided by the system libc, but can also be
+//! provided by the [compiler-builtins crate](https://crates.io/crates/compiler_builtins).
+//!
+//! * `rust_begin_panic` - This function takes four arguments, a
+//! `fmt::Arguments`, a `&'static str`, and two `u32`'s. These four arguments
+//! dictate the panic message, the file at which panic was invoked, and the
+//! line and column inside the file. It is up to consumers of this core
+//! library to define this panic function; it is only required to never
+//! return. This requires a `lang` attribute named `panic_impl`.
+//!
+//! * `rust_eh_personality` - is used by the failure mechanisms of the
+//! compiler. This is often mapped to GCC's personality function, but crates
+//! which do not trigger a panic can be assured that this function is never
+//! called. The `lang` attribute is called `eh_personality`.
+
+// Since libcore defines many fundamental lang items, all tests live in a
+// separate crate, libcoretest, to avoid bizarre issues.
+//
+// Here we explicitly #[cfg]-out this whole crate when testing. If we don't do
+// this, both the generated test artifact and the linked libtest (which
+// transitively includes libcore) will both define the same set of lang items,
+// and this will cause the E0152 "found duplicate lang item" error. See
+// discussion in #50466 for details.
+//
+// This cfg won't affect doc tests.
+#![cfg(not(test))]
+// To run libcore tests without x.py without ending up with two copies of libcore, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
+#![stable(feature = "core", since = "1.6.0")]
+#![doc(
+ html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
+ test(no_crate_inject, attr(deny(warnings))),
+ test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
+)]
+#![doc(cfg_hide(
+ not(test),
+ any(not(feature = "miri-test-libstd"), test, doctest),
+ no_fp_fmt_parse,
+ target_pointer_width = "16",
+ target_pointer_width = "32",
+ target_pointer_width = "64",
+ target_has_atomic = "8",
+ target_has_atomic = "16",
+ target_has_atomic = "32",
+ target_has_atomic = "64",
+ target_has_atomic = "ptr",
+ target_has_atomic_equal_alignment = "8",
+ target_has_atomic_equal_alignment = "16",
+ target_has_atomic_equal_alignment = "32",
+ target_has_atomic_equal_alignment = "64",
+ target_has_atomic_equal_alignment = "ptr",
+ target_has_atomic_load_store = "8",
+ target_has_atomic_load_store = "16",
+ target_has_atomic_load_store = "32",
+ target_has_atomic_load_store = "64",
+ target_has_atomic_load_store = "ptr",
+))]
+#![no_core]
+#![rustc_coherence_is_core]
+//
+// Lints:
+#![deny(rust_2021_incompatible_or_patterns)]
+#![deny(unsafe_op_in_unsafe_fn)]
+#![warn(deprecated_in_future)]
+#![warn(missing_debug_implementations)]
+#![warn(missing_docs)]
+#![allow(explicit_outlives_requirements)]
+//
+// Library features:
+#![feature(const_align_offset)]
+#![feature(const_align_of_val)]
+#![feature(const_arguments_as_str)]
+#![feature(const_array_into_iter_constructors)]
+#![feature(const_bigint_helper_methods)]
+#![feature(const_black_box)]
+#![feature(const_caller_location)]
+#![feature(const_cell_into_inner)]
+#![feature(const_char_convert)]
+#![feature(const_clone)]
+#![feature(const_cmp)]
+#![feature(const_discriminant)]
+#![feature(const_eval_select)]
+#![feature(const_float_bits_conv)]
+#![feature(const_float_classify)]
+#![feature(const_fmt_arguments_new)]
+#![feature(const_heap)]
+#![feature(const_convert)]
+#![feature(const_inherent_unchecked_arith)]
+#![feature(const_int_unchecked_arith)]
+#![feature(const_intrinsic_forget)]
+#![feature(const_likely)]
+#![feature(const_maybe_uninit_uninit_array)]
+#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_maybe_uninit_assume_init)]
+#![feature(const_nonnull_new)]
+#![feature(const_num_from_num)]
+#![feature(const_ops)]
+#![feature(const_option)]
+#![feature(const_option_ext)]
+#![feature(const_pin)]
+#![feature(const_ptr_sub_ptr)]
+#![feature(const_replace)]
+#![feature(const_ptr_as_ref)]
+#![feature(const_ptr_is_null)]
+#![feature(const_ptr_offset_from)]
+#![feature(const_ptr_read)]
+#![feature(const_ptr_write)]
+#![feature(const_raw_ptr_comparison)]
+#![feature(const_size_of_val)]
+#![feature(const_slice_from_raw_parts_mut)]
+#![feature(const_slice_ptr_len)]
+#![feature(const_str_from_utf8_unchecked_mut)]
+#![feature(const_swap)]
+#![feature(const_trait_impl)]
+#![feature(const_type_id)]
+#![feature(const_type_name)]
+#![feature(const_default_impls)]
+#![feature(const_unsafecell_get_mut)]
+#![feature(core_panic)]
+#![feature(duration_consts_float)]
+#![feature(maybe_uninit_uninit_array)]
+#![feature(ptr_metadata)]
+#![feature(slice_ptr_get)]
+#![feature(str_internals)]
+#![feature(utf16_extra)]
+#![feature(utf16_extra_const)]
+#![feature(variant_count)]
+#![feature(const_array_from_ref)]
+#![feature(const_slice_from_ref)]
+#![feature(const_slice_index)]
+#![feature(const_is_char_boundary)]
+//
+// Language features:
+#![feature(abi_unadjusted)]
+#![feature(allow_internal_unsafe)]
+#![feature(allow_internal_unstable)]
+#![feature(associated_type_bounds)]
+#![feature(auto_traits)]
+#![feature(cfg_sanitize)]
+#![feature(cfg_target_has_atomic)]
+#![feature(cfg_target_has_atomic_equal_alignment)]
+#![feature(const_fn_floating_point_arithmetic)]
+#![feature(const_mut_refs)]
+#![feature(const_precise_live_drops)]
+#![feature(const_refs_to_cell)]
+#![feature(decl_macro)]
+#![feature(deprecated_suggestion)]
+#![feature(doc_cfg)]
+#![feature(doc_notable_trait)]
+#![feature(rustdoc_internals)]
+#![feature(exhaustive_patterns)]
+#![feature(doc_cfg_hide)]
+#![feature(extern_types)]
+#![feature(fundamental)]
+#![feature(if_let_guard)]
+#![feature(intra_doc_pointers)]
+#![feature(intrinsics)]
+#![feature(lang_items)]
+#![feature(link_llvm_intrinsics)]
+#![feature(macro_metavar_expr)]
+#![feature(min_specialization)]
+#![feature(mixed_integer_ops)]
+#![feature(must_not_suspend)]
+#![feature(negative_impls)]
+#![feature(never_type)]
+#![feature(no_core)]
+#![feature(no_coverage)] // rust-lang/rust#84605
+#![feature(platform_intrinsics)]
+#![feature(prelude_import)]
+#![feature(repr_simd)]
+#![feature(rustc_allow_const_fn_unstable)]
+#![feature(rustc_attrs)]
+#![feature(simd_ffi)]
+#![feature(staged_api)]
+#![feature(stmt_expr_attributes)]
+#![feature(trait_alias)]
+#![feature(transparent_unions)]
+#![feature(try_blocks)]
+#![feature(unboxed_closures)]
+#![feature(unsized_fn_params)]
+#![feature(asm_const)]
+//
+// Target features:
+#![feature(arm_target_feature)]
+#![feature(avx512_target_feature)]
+#![feature(cmpxchg16b_target_feature)]
+#![feature(f16c_target_feature)]
+#![feature(hexagon_target_feature)]
+#![feature(mips_target_feature)]
+#![feature(powerpc_target_feature)]
+#![feature(rtm_target_feature)]
+#![feature(sse4a_target_feature)]
+#![feature(tbm_target_feature)]
+#![feature(wasm_target_feature)]
+
+// allow using `core::` in intra-doc links
+#[allow(unused_extern_crates)]
+extern crate self as core;
+
+#[prelude_import]
+#[allow(unused)]
+use prelude::v1::*;
+
+#[cfg(not(test))] // See #65860
+#[macro_use]
+mod macros;
+
+// We don't export this through #[macro_export] for now, to avoid breakage.
+// See https://github.com/rust-lang/rust/issues/82913
+#[cfg(not(test))]
+#[unstable(feature = "assert_matches", issue = "82775")]
+/// Unstable module containing the unstable `assert_matches` macro.
+pub mod assert_matches {
+ #[unstable(feature = "assert_matches", issue = "82775")]
+ pub use crate::macros::{assert_matches, debug_assert_matches};
+}
+
+#[macro_use]
+mod internal_macros;
+
+#[path = "num/shells/int_macros.rs"]
+#[macro_use]
+mod int_macros;
+
+#[path = "num/shells/i128.rs"]
+pub mod i128;
+#[path = "num/shells/i16.rs"]
+pub mod i16;
+#[path = "num/shells/i32.rs"]
+pub mod i32;
+#[path = "num/shells/i64.rs"]
+pub mod i64;
+#[path = "num/shells/i8.rs"]
+pub mod i8;
+#[path = "num/shells/isize.rs"]
+pub mod isize;
+
+#[path = "num/shells/u128.rs"]
+pub mod u128;
+#[path = "num/shells/u16.rs"]
+pub mod u16;
+#[path = "num/shells/u32.rs"]
+pub mod u32;
+#[path = "num/shells/u64.rs"]
+pub mod u64;
+#[path = "num/shells/u8.rs"]
+pub mod u8;
+#[path = "num/shells/usize.rs"]
+pub mod usize;
+
+#[path = "num/f32.rs"]
+pub mod f32;
+#[path = "num/f64.rs"]
+pub mod f64;
+
+#[macro_use]
+pub mod num;
+
+/* The libcore prelude, not as all-encompassing as the libstd prelude */
+
+pub mod prelude;
+
+/* Core modules for ownership management */
+
+pub mod hint;
+pub mod intrinsics;
+pub mod mem;
+pub mod ptr;
+
+/* Core language traits */
+
+pub mod borrow;
+pub mod clone;
+pub mod cmp;
+pub mod convert;
+pub mod default;
+pub mod marker;
+pub mod ops;
+
+/* Core types and methods on primitives */
+
+pub mod any;
+pub mod array;
+pub mod ascii;
+pub mod asserting;
+#[unstable(feature = "async_iterator", issue = "79024")]
+pub mod async_iter;
+pub mod cell;
+pub mod char;
+pub mod ffi;
+pub mod iter;
+#[unstable(feature = "once_cell", issue = "74465")]
+pub mod lazy;
+pub mod option;
+pub mod panic;
+pub mod panicking;
+pub mod pin;
+pub mod result;
+pub mod sync;
+
+pub mod fmt;
+pub mod hash;
+pub mod slice;
+pub mod str;
+pub mod time;
+
+pub mod unicode;
+
+/* Async */
+pub mod future;
+pub mod task;
+
+/* Heap memory allocator trait */
+#[allow(missing_docs)]
+pub mod alloc;
+
+// note: does not need to be public
+mod bool;
+mod tuple;
+mod unit;
+
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub mod primitive;
+
+// Pull in the `core_arch` crate directly into libcore. The contents of
+// `core_arch` are in a different repository: rust-lang/stdarch.
+//
+// `core_arch` depends on libcore, but the contents of this module are
+// set up in such a way that directly pulling it here works such that the
+// crate uses the this crate as its libcore.
+#[path = "../../stdarch/crates/core_arch/src/mod.rs"]
+#[allow(
+ missing_docs,
+ missing_debug_implementations,
+ dead_code,
+ unused_imports,
+ unsafe_op_in_unsafe_fn
+)]
+#[allow(rustdoc::bare_urls)]
+// FIXME: This annotation should be moved into rust-lang/stdarch after clashing_extern_declarations is
+// merged. It currently cannot because bootstrap fails as the lint hasn't been defined yet.
+#[allow(clashing_extern_declarations)]
+#[unstable(feature = "stdsimd", issue = "48556")]
+mod core_arch;
+
+#[doc = include_str!("../../stdarch/crates/core_arch/src/core_arch_docs.md")]
+#[stable(feature = "simd_arch", since = "1.27.0")]
+pub mod arch {
+ #[stable(feature = "simd_arch", since = "1.27.0")]
+ pub use crate::core_arch::arch::*;
+
+ /// Inline assembly.
+ ///
+ /// Refer to [rust by example] for a usage guide and the [reference] for
+ /// detailed information about the syntax and available options.
+ ///
+ /// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
+ /// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
+ #[stable(feature = "asm", since = "1.59.0")]
+ #[rustc_builtin_macro]
+ pub macro asm("assembly template", $(operands,)* $(options($(option),*))?) {
+ /* compiler built-in */
+ }
+
+ /// Module-level inline assembly.
+ ///
+ /// Refer to [rust by example] for a usage guide and the [reference] for
+ /// detailed information about the syntax and available options.
+ ///
+ /// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
+ /// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
+ #[stable(feature = "global_asm", since = "1.59.0")]
+ #[rustc_builtin_macro]
+ pub macro global_asm("assembly template", $(operands,)* $(options($(option),*))?) {
+ /* compiler built-in */
+ }
+}
+
+// Pull in the `core_simd` crate directly into libcore. The contents of
+// `core_simd` are in a different repository: rust-lang/portable-simd.
+//
+// `core_simd` depends on libcore, but the contents of this module are
+// set up in such a way that directly pulling it here works such that the
+// crate uses this crate as its libcore.
+#[path = "../../portable-simd/crates/core_simd/src/mod.rs"]
+#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
+#[allow(rustdoc::bare_urls)]
+#[unstable(feature = "portable_simd", issue = "86656")]
+mod core_simd;
+
+#[doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
+#[unstable(feature = "portable_simd", issue = "86656")]
+pub mod simd {
+ #[unstable(feature = "portable_simd", issue = "86656")]
+ pub use crate::core_simd::simd::*;
+}
+
+include!("primitive_docs.rs");
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
new file mode 100644
index 000000000..3a115a8b8
--- /dev/null
+++ b/library/core/src/macros/mod.rs
@@ -0,0 +1,1554 @@
+#[doc = include_str!("panic.md")]
+#[macro_export]
+#[rustc_builtin_macro(core_panic)]
+#[allow_internal_unstable(edition_panic)]
+#[stable(feature = "core", since = "1.6.0")]
+#[rustc_diagnostic_item = "core_panic_macro"]
+macro_rules! panic {
+ // Expands to either `$crate::panic::panic_2015` or `$crate::panic::panic_2021`
+ // depending on the edition of the caller.
+ ($($arg:tt)*) => {
+ /* compiler built-in */
+ };
+}
+
+/// Asserts that two expressions are equal to each other (using [`PartialEq`]).
+///
+/// On panic, this macro will print the values of the expressions with their
+/// debug representations.
+///
+/// Like [`assert!`], this macro has a second form, where a custom
+/// panic message can be provided.
+///
+/// # Examples
+///
+/// ```
+/// let a = 3;
+/// let b = 1 + 2;
+/// assert_eq!(a, b);
+///
+/// assert_eq!(a, b, "we are testing addition with {} and {}", a, b);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "assert_eq_macro")]
+#[allow_internal_unstable(core_panic)]
+macro_rules! assert_eq {
+ ($left:expr, $right:expr $(,)?) => {
+ match (&$left, &$right) {
+ (left_val, right_val) => {
+ if !(*left_val == *right_val) {
+ let kind = $crate::panicking::AssertKind::Eq;
+ // The reborrows below are intentional. Without them, the stack slot for the
+ // borrow is initialized even before the values are compared, leading to a
+ // noticeable slow down.
+ $crate::panicking::assert_failed(kind, &*left_val, &*right_val, $crate::option::Option::None);
+ }
+ }
+ }
+ };
+ ($left:expr, $right:expr, $($arg:tt)+) => {
+ match (&$left, &$right) {
+ (left_val, right_val) => {
+ if !(*left_val == *right_val) {
+ let kind = $crate::panicking::AssertKind::Eq;
+ // The reborrows below are intentional. Without them, the stack slot for the
+ // borrow is initialized even before the values are compared, leading to a
+ // noticeable slow down.
+ $crate::panicking::assert_failed(kind, &*left_val, &*right_val, $crate::option::Option::Some($crate::format_args!($($arg)+)));
+ }
+ }
+ }
+ };
+}
+
+/// Asserts that two expressions are not equal to each other (using [`PartialEq`]).
+///
+/// On panic, this macro will print the values of the expressions with their
+/// debug representations.
+///
+/// Like [`assert!`], this macro has a second form, where a custom
+/// panic message can be provided.
+///
+/// # Examples
+///
+/// ```
+/// let a = 3;
+/// let b = 2;
+/// assert_ne!(a, b);
+///
+/// assert_ne!(a, b, "we are testing that the values are not equal");
+/// ```
+#[macro_export]
+#[stable(feature = "assert_ne", since = "1.13.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "assert_ne_macro")]
+#[allow_internal_unstable(core_panic)]
+macro_rules! assert_ne {
+ ($left:expr, $right:expr $(,)?) => {
+ match (&$left, &$right) {
+ (left_val, right_val) => {
+ if *left_val == *right_val {
+ let kind = $crate::panicking::AssertKind::Ne;
+ // The reborrows below are intentional. Without them, the stack slot for the
+ // borrow is initialized even before the values are compared, leading to a
+ // noticeable slow down.
+ $crate::panicking::assert_failed(kind, &*left_val, &*right_val, $crate::option::Option::None);
+ }
+ }
+ }
+ };
+ ($left:expr, $right:expr, $($arg:tt)+) => {
+ match (&($left), &($right)) {
+ (left_val, right_val) => {
+ if *left_val == *right_val {
+ let kind = $crate::panicking::AssertKind::Ne;
+ // The reborrows below are intentional. Without them, the stack slot for the
+ // borrow is initialized even before the values are compared, leading to a
+ // noticeable slow down.
+ $crate::panicking::assert_failed(kind, &*left_val, &*right_val, $crate::option::Option::Some($crate::format_args!($($arg)+)));
+ }
+ }
+ }
+ };
+}
+
+/// Asserts that an expression matches any of the given patterns.
+///
+/// Like in a `match` expression, the pattern can be optionally followed by `if`
+/// and a guard expression that has access to names bound by the pattern.
+///
+/// On panic, this macro will print the value of the expression with its
+/// debug representation.
+///
+/// Like [`assert!`], this macro has a second form, where a custom
+/// panic message can be provided.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(assert_matches)]
+///
+/// use std::assert_matches::assert_matches;
+///
+/// let a = 1u32.checked_add(2);
+/// let b = 1u32.checked_sub(2);
+/// assert_matches!(a, Some(_));
+/// assert_matches!(b, None);
+///
+/// let c = Ok("abc".to_string());
+/// assert_matches!(c, Ok(x) | Err(x) if x.len() < 100);
+/// ```
+#[unstable(feature = "assert_matches", issue = "82775")]
+#[allow_internal_unstable(core_panic)]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro assert_matches {
+ ($left:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => {
+ match $left {
+ $( $pattern )|+ $( if $guard )? => {}
+ ref left_val => {
+ $crate::panicking::assert_matches_failed(
+ left_val,
+ $crate::stringify!($($pattern)|+ $(if $guard)?),
+ $crate::option::Option::None
+ );
+ }
+ }
+ },
+ ($left:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $($arg:tt)+) => {
+ match $left {
+ $( $pattern )|+ $( if $guard )? => {}
+ ref left_val => {
+ $crate::panicking::assert_matches_failed(
+ left_val,
+ $crate::stringify!($($pattern)|+ $(if $guard)?),
+ $crate::option::Option::Some($crate::format_args!($($arg)+))
+ );
+ }
+ }
+ },
+}
+
+/// Asserts that a boolean expression is `true` at runtime.
+///
+/// This will invoke the [`panic!`] macro if the provided expression cannot be
+/// evaluated to `true` at runtime.
+///
+/// Like [`assert!`], this macro also has a second version, where a custom panic
+/// message can be provided.
+///
+/// # Uses
+///
+/// Unlike [`assert!`], `debug_assert!` statements are only enabled in non
+/// optimized builds by default. An optimized build will not execute
+/// `debug_assert!` statements unless `-C debug-assertions` is passed to the
+/// compiler. This makes `debug_assert!` useful for checks that are too
+/// expensive to be present in a release build but may be helpful during
+/// development. The result of expanding `debug_assert!` is always type checked.
+///
+/// An unchecked assertion allows a program in an inconsistent state to keep
+/// running, which might have unexpected consequences but does not introduce
+/// unsafety as long as this only happens in safe code. The performance cost
+/// of assertions, however, is not measurable in general. Replacing [`assert!`]
+/// with `debug_assert!` is thus only encouraged after thorough profiling, and
+/// more importantly, only in safe code!
+///
+/// # Examples
+///
+/// ```
+/// // the panic message for these assertions is the stringified value of the
+/// // expression given.
+/// debug_assert!(true);
+///
+/// fn some_expensive_computation() -> bool { true } // a very simple function
+/// debug_assert!(some_expensive_computation());
+///
+/// // assert with a custom message
+/// let x = true;
+/// debug_assert!(x, "x wasn't true!");
+///
+/// let a = 3; let b = 27;
+/// debug_assert!(a + b == 30, "a = {}, b = {}", a, b);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "debug_assert_macro"]
+#[allow_internal_unstable(edition_panic)]
+macro_rules! debug_assert {
+ ($($arg:tt)*) => {
+ if $crate::cfg!(debug_assertions) {
+ $crate::assert!($($arg)*);
+ }
+ };
+}
+
+/// Asserts that two expressions are equal to each other.
+///
+/// On panic, this macro will print the values of the expressions with their
+/// debug representations.
+///
+/// Unlike [`assert_eq!`], `debug_assert_eq!` statements are only enabled in non
+/// optimized builds by default. An optimized build will not execute
+/// `debug_assert_eq!` statements unless `-C debug-assertions` is passed to the
+/// compiler. This makes `debug_assert_eq!` useful for checks that are too
+/// expensive to be present in a release build but may be helpful during
+/// development. The result of expanding `debug_assert_eq!` is always type checked.
+///
+/// # Examples
+///
+/// ```
+/// let a = 3;
+/// let b = 1 + 2;
+/// debug_assert_eq!(a, b);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "debug_assert_eq_macro")]
+macro_rules! debug_assert_eq {
+ ($($arg:tt)*) => {
+ if $crate::cfg!(debug_assertions) {
+ $crate::assert_eq!($($arg)*);
+ }
+ };
+}
+
+/// Asserts that two expressions are not equal to each other.
+///
+/// On panic, this macro will print the values of the expressions with their
+/// debug representations.
+///
+/// Unlike [`assert_ne!`], `debug_assert_ne!` statements are only enabled in non
+/// optimized builds by default. An optimized build will not execute
+/// `debug_assert_ne!` statements unless `-C debug-assertions` is passed to the
+/// compiler. This makes `debug_assert_ne!` useful for checks that are too
+/// expensive to be present in a release build but may be helpful during
+/// development. The result of expanding `debug_assert_ne!` is always type checked.
+///
+/// # Examples
+///
+/// ```
+/// let a = 3;
+/// let b = 2;
+/// debug_assert_ne!(a, b);
+/// ```
+#[macro_export]
+#[stable(feature = "assert_ne", since = "1.13.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "debug_assert_ne_macro")]
+macro_rules! debug_assert_ne {
+ ($($arg:tt)*) => {
+ if $crate::cfg!(debug_assertions) {
+ $crate::assert_ne!($($arg)*);
+ }
+ };
+}
+
+/// Asserts that an expression matches any of the given patterns.
+///
+/// Like in a `match` expression, the pattern can be optionally followed by `if`
+/// and a guard expression that has access to names bound by the pattern.
+///
+/// On panic, this macro will print the value of the expression with its
+/// debug representation.
+///
+/// Unlike [`assert_matches!`], `debug_assert_matches!` statements are only
+/// enabled in non optimized builds by default. An optimized build will not
+/// execute `debug_assert_matches!` statements unless `-C debug-assertions` is
+/// passed to the compiler. This makes `debug_assert_matches!` useful for
+/// checks that are too expensive to be present in a release build but may be
+/// helpful during development. The result of expanding `debug_assert_matches!`
+/// is always type checked.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(assert_matches)]
+///
+/// use std::assert_matches::debug_assert_matches;
+///
+/// let a = 1u32.checked_add(2);
+/// let b = 1u32.checked_sub(2);
+/// debug_assert_matches!(a, Some(_));
+/// debug_assert_matches!(b, None);
+///
+/// let c = Ok("abc".to_string());
+/// debug_assert_matches!(c, Ok(x) | Err(x) if x.len() < 100);
+/// ```
+#[macro_export]
+#[unstable(feature = "assert_matches", issue = "82775")]
+#[allow_internal_unstable(assert_matches)]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro debug_assert_matches($($arg:tt)*) {
+ if $crate::cfg!(debug_assertions) {
+ $crate::assert_matches::assert_matches!($($arg)*);
+ }
+}
+
+/// Returns whether the given expression matches any of the given patterns.
+///
+/// Like in a `match` expression, the pattern can be optionally followed by `if`
+/// and a guard expression that has access to names bound by the pattern.
+///
+/// # Examples
+///
+/// ```
+/// let foo = 'f';
+/// assert!(matches!(foo, 'A'..='Z' | 'a'..='z'));
+///
+/// let bar = Some(4);
+/// assert!(matches!(bar, Some(x) if x > 2));
+/// ```
+#[macro_export]
+#[stable(feature = "matches_macro", since = "1.42.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "matches_macro")]
+macro_rules! matches {
+ ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => {
+ match $expression {
+ $( $pattern )|+ $( if $guard )? => true,
+ _ => false
+ }
+ };
+}
+
+/// Unwraps a result or propagates its error.
+///
+/// The `?` operator was added to replace `try!` and should be used instead.
+/// Furthermore, `try` is a reserved word in Rust 2018, so if you must use
+/// it, you will need to use the [raw-identifier syntax][ris]: `r#try`.
+///
+/// [ris]: https://doc.rust-lang.org/nightly/rust-by-example/compatibility/raw_identifiers.html
+///
+/// `try!` matches the given [`Result`]. In case of the `Ok` variant, the
+/// expression has the value of the wrapped value.
+///
+/// In case of the `Err` variant, it retrieves the inner error. `try!` then
+/// performs conversion using `From`. This provides automatic conversion
+/// between specialized errors and more general ones. The resulting
+/// error is then immediately returned.
+///
+/// Because of the early return, `try!` can only be used in functions that
+/// return [`Result`].
+///
+/// # Examples
+///
+/// ```
+/// use std::io;
+/// use std::fs::File;
+/// use std::io::prelude::*;
+///
+/// enum MyError {
+/// FileWriteError
+/// }
+///
+/// impl From<io::Error> for MyError {
+/// fn from(e: io::Error) -> MyError {
+/// MyError::FileWriteError
+/// }
+/// }
+///
+/// // The preferred method of quick returning Errors
+/// fn write_to_file_question() -> Result<(), MyError> {
+/// let mut file = File::create("my_best_friends.txt")?;
+/// file.write_all(b"This is a list of my best friends.")?;
+/// Ok(())
+/// }
+///
+/// // The previous method of quick returning Errors
+/// fn write_to_file_using_try() -> Result<(), MyError> {
+/// let mut file = r#try!(File::create("my_best_friends.txt"));
+/// r#try!(file.write_all(b"This is a list of my best friends."));
+/// Ok(())
+/// }
+///
+/// // This is equivalent to:
+/// fn write_to_file_using_match() -> Result<(), MyError> {
+/// let mut file = r#try!(File::create("my_best_friends.txt"));
+/// match file.write_all(b"This is a list of my best friends.") {
+/// Ok(v) => v,
+/// Err(e) => return Err(From::from(e)),
+/// }
+/// Ok(())
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "1.39.0", note = "use the `?` operator instead")]
+#[doc(alias = "?")]
+macro_rules! r#try {
+ ($expr:expr $(,)?) => {
+ match $expr {
+ $crate::result::Result::Ok(val) => val,
+ $crate::result::Result::Err(err) => {
+ return $crate::result::Result::Err($crate::convert::From::from(err));
+ }
+ }
+ };
+}
+
+/// Writes formatted data into a buffer.
+///
+/// This macro accepts a 'writer', a format string, and a list of arguments. Arguments will be
+/// formatted according to the specified format string and the result will be passed to the writer.
+/// The writer may be any value with a `write_fmt` method; generally this comes from an
+/// implementation of either the [`fmt::Write`] or the [`io::Write`] trait. The macro
+/// returns whatever the `write_fmt` method returns; commonly a [`fmt::Result`], or an
+/// [`io::Result`].
+///
+/// See [`std::fmt`] for more information on the format string syntax.
+///
+/// [`std::fmt`]: ../std/fmt/index.html
+/// [`fmt::Write`]: crate::fmt::Write
+/// [`io::Write`]: ../std/io/trait.Write.html
+/// [`fmt::Result`]: crate::fmt::Result
+/// [`io::Result`]: ../std/io/type.Result.html
+///
+/// # Examples
+///
+/// ```
+/// use std::io::Write;
+///
+/// fn main() -> std::io::Result<()> {
+/// let mut w = Vec::new();
+/// write!(&mut w, "test")?;
+/// write!(&mut w, "formatted {}", "arguments")?;
+///
+/// assert_eq!(w, b"testformatted arguments");
+/// Ok(())
+/// }
+/// ```
+///
+/// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects
+/// implementing either, as objects do not typically implement both. However, the module must
+/// import the traits qualified so their names do not conflict:
+///
+/// ```
+/// use std::fmt::Write as FmtWrite;
+/// use std::io::Write as IoWrite;
+///
+/// fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// let mut s = String::new();
+/// let mut v = Vec::new();
+///
+/// write!(&mut s, "{} {}", "abc", 123)?; // uses fmt::Write::write_fmt
+/// write!(&mut v, "s = {:?}", s)?; // uses io::Write::write_fmt
+/// assert_eq!(v, b"s = \"abc 123\"");
+/// Ok(())
+/// }
+/// ```
+///
+/// Note: This macro can be used in `no_std` setups as well.
+/// In a `no_std` setup you are responsible for the implementation details of the components.
+///
+/// ```no_run
+/// # extern crate core;
+/// use core::fmt::Write;
+///
+/// struct Example;
+///
+/// impl Write for Example {
+/// fn write_str(&mut self, _s: &str) -> core::fmt::Result {
+/// unimplemented!();
+/// }
+/// }
+///
+/// let mut m = Example{};
+/// write!(&mut m, "Hello World").expect("Not written");
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "write_macro")]
+macro_rules! write {
+ ($dst:expr, $($arg:tt)*) => {
+ $dst.write_fmt($crate::format_args!($($arg)*))
+ };
+}
+
+/// Write formatted data into a buffer, with a newline appended.
+///
+/// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
+/// (no additional CARRIAGE RETURN (`\r`/`U+000D`).
+///
+/// For more information, see [`write!`]. For information on the format string syntax, see
+/// [`std::fmt`].
+///
+/// [`std::fmt`]: ../std/fmt/index.html
+///
+/// # Examples
+///
+/// ```
+/// use std::io::{Write, Result};
+///
+/// fn main() -> Result<()> {
+/// let mut w = Vec::new();
+/// writeln!(&mut w)?;
+/// writeln!(&mut w, "test")?;
+/// writeln!(&mut w, "formatted {}", "arguments")?;
+///
+/// assert_eq!(&w[..], "\ntest\nformatted arguments\n".as_bytes());
+/// Ok(())
+/// }
+/// ```
+///
+/// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects
+/// implementing either, as objects do not typically implement both. However, the module must
+/// import the traits qualified so their names do not conflict:
+///
+/// ```
+/// use std::fmt::Write as FmtWrite;
+/// use std::io::Write as IoWrite;
+///
+/// fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// let mut s = String::new();
+/// let mut v = Vec::new();
+///
+/// writeln!(&mut s, "{} {}", "abc", 123)?; // uses fmt::Write::write_fmt
+/// writeln!(&mut v, "s = {:?}", s)?; // uses io::Write::write_fmt
+/// assert_eq!(v, b"s = \"abc 123\\n\"\n");
+/// Ok(())
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "writeln_macro")]
+#[allow_internal_unstable(format_args_nl)]
+macro_rules! writeln {
+ ($dst:expr $(,)?) => {
+ $crate::write!($dst, "\n")
+ };
+ ($dst:expr, $($arg:tt)*) => {
+ $dst.write_fmt($crate::format_args_nl!($($arg)*))
+ };
+}
+
+/// Indicates unreachable code.
+///
+/// This is useful any time that the compiler can't determine that some code is unreachable. For
+/// example:
+///
+/// * Match arms with guard conditions.
+/// * Loops that dynamically terminate.
+/// * Iterators that dynamically terminate.
+///
+/// If the determination that the code is unreachable proves incorrect, the
+/// program immediately terminates with a [`panic!`].
+///
+/// The unsafe counterpart of this macro is the [`unreachable_unchecked`] function, which
+/// will cause undefined behavior if the code is reached.
+///
+/// [`unreachable_unchecked`]: crate::hint::unreachable_unchecked
+///
+/// # Panics
+///
+/// This will always [`panic!`] because `unreachable!` is just a shorthand for `panic!` with a
+/// fixed, specific message.
+///
+/// Like `panic!`, this macro has a second form for displaying custom values.
+///
+/// # Examples
+///
+/// Match arms:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// fn foo(x: Option<i32>) {
+/// match x {
+/// Some(n) if n >= 0 => println!("Some(Non-negative)"),
+/// Some(n) if n < 0 => println!("Some(Negative)"),
+/// Some(_) => unreachable!(), // compile error if commented out
+/// None => println!("None")
+/// }
+/// }
+/// ```
+///
+/// Iterators:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// fn divide_by_three(x: u32) -> u32 { // one of the poorest implementations of x/3
+/// for i in 0.. {
+/// if 3*i < i { panic!("u32 overflow"); }
+/// if x < 3*i { return i-1; }
+/// }
+/// unreachable!("The loop should always return");
+/// }
+/// ```
+#[macro_export]
+#[rustc_builtin_macro(unreachable)]
+#[allow_internal_unstable(edition_panic)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "unreachable_macro")]
+macro_rules! unreachable {
+ // Expands to either `$crate::panic::unreachable_2015` or `$crate::panic::unreachable_2021`
+ // depending on the edition of the caller.
+ ($($arg:tt)*) => {
+ /* compiler built-in */
+ };
+}
+
+/// Indicates unimplemented code by panicking with a message of "not implemented".
+///
+/// This allows your code to type-check, which is useful if you are prototyping or
+/// implementing a trait that requires multiple methods which you don't plan to use all of.
+///
+/// The difference between `unimplemented!` and [`todo!`] is that while `todo!`
+/// conveys an intent of implementing the functionality later and the message is "not yet
+/// implemented", `unimplemented!` makes no such claims. Its message is "not implemented".
+/// Also some IDEs will mark `todo!`s.
+///
+/// # Panics
+///
+/// This will always [`panic!`] because `unimplemented!` is just a shorthand for `panic!` with a
+/// fixed, specific message.
+///
+/// Like `panic!`, this macro has a second form for displaying custom values.
+///
+/// [`todo!`]: crate::todo
+///
+/// # Examples
+///
+/// Say we have a trait `Foo`:
+///
+/// ```
+/// trait Foo {
+/// fn bar(&self) -> u8;
+/// fn baz(&self);
+/// fn qux(&self) -> Result<u64, ()>;
+/// }
+/// ```
+///
+/// We want to implement `Foo` for 'MyStruct', but for some reason it only makes sense
+/// to implement the `bar()` function. `baz()` and `qux()` will still need to be defined
+/// in our implementation of `Foo`, but we can use `unimplemented!` in their definitions
+/// to allow our code to compile.
+///
+/// We still want to have our program stop running if the unimplemented methods are
+/// reached.
+///
+/// ```
+/// # trait Foo {
+/// # fn bar(&self) -> u8;
+/// # fn baz(&self);
+/// # fn qux(&self) -> Result<u64, ()>;
+/// # }
+/// struct MyStruct;
+///
+/// impl Foo for MyStruct {
+/// fn bar(&self) -> u8 {
+/// 1 + 1
+/// }
+///
+/// fn baz(&self) {
+/// // It makes no sense to `baz` a `MyStruct`, so we have no logic here
+/// // at all.
+/// // This will display "thread 'main' panicked at 'not implemented'".
+/// unimplemented!();
+/// }
+///
+/// fn qux(&self) -> Result<u64, ()> {
+/// // We have some logic here,
+/// // We can add a message to unimplemented! to display our omission.
+/// // This will display:
+/// // "thread 'main' panicked at 'not implemented: MyStruct isn't quxable'".
+/// unimplemented!("MyStruct isn't quxable");
+/// }
+/// }
+///
+/// fn main() {
+/// let s = MyStruct;
+/// s.bar();
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "unimplemented_macro")]
+#[allow_internal_unstable(core_panic)]
+macro_rules! unimplemented {
+ () => {
+ $crate::panicking::panic("not implemented")
+ };
+ ($($arg:tt)+) => {
+ $crate::panic!("not implemented: {}", $crate::format_args!($($arg)+))
+ };
+}
+
+/// Indicates unfinished code.
+///
+/// This can be useful if you are prototyping and are just looking to have your
+/// code typecheck.
+///
+/// The difference between [`unimplemented!`] and `todo!` is that while `todo!` conveys
+/// an intent of implementing the functionality later and the message is "not yet
+/// implemented", `unimplemented!` makes no such claims. Its message is "not implemented".
+/// Also some IDEs will mark `todo!`s.
+///
+/// # Panics
+///
+/// This will always [`panic!`].
+///
+/// # Examples
+///
+/// Here's an example of some in-progress code. We have a trait `Foo`:
+///
+/// ```
+/// trait Foo {
+/// fn bar(&self);
+/// fn baz(&self);
+/// }
+/// ```
+///
+/// We want to implement `Foo` on one of our types, but we also want to work on
+/// just `bar()` first. In order for our code to compile, we need to implement
+/// `baz()`, so we can use `todo!`:
+///
+/// ```
+/// # trait Foo {
+/// # fn bar(&self);
+/// # fn baz(&self);
+/// # }
+/// struct MyStruct;
+///
+/// impl Foo for MyStruct {
+/// fn bar(&self) {
+/// // implementation goes here
+/// }
+///
+/// fn baz(&self) {
+/// // let's not worry about implementing baz() for now
+/// todo!();
+/// }
+/// }
+///
+/// fn main() {
+/// let s = MyStruct;
+/// s.bar();
+///
+/// // we aren't even using baz(), so this is fine.
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "todo_macro", since = "1.40.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "todo_macro")]
+#[allow_internal_unstable(core_panic)]
+macro_rules! todo {
+ () => {
+ $crate::panicking::panic("not yet implemented")
+ };
+ ($($arg:tt)+) => {
+ $crate::panic!("not yet implemented: {}", $crate::format_args!($($arg)+))
+ };
+}
+
+/// Definitions of built-in macros.
+///
+/// Most of the macro properties (stability, visibility, etc.) are taken from the source code here,
+/// with exception of expansion functions transforming macro inputs into outputs,
+/// those functions are provided by the compiler.
+pub(crate) mod builtin {
+
+ /// Causes compilation to fail with the given error message when encountered.
+ ///
+ /// This macro should be used when a crate uses a conditional compilation strategy to provide
+ /// better error messages for erroneous conditions. It's the compiler-level form of [`panic!`],
+ /// but emits an error during *compilation* rather than at *runtime*.
+ ///
+ /// # Examples
+ ///
+ /// Two such examples are macros and `#[cfg]` environments.
+ ///
+ /// Emit a better compiler error if a macro is passed invalid values. Without the final branch,
+ /// the compiler would still emit an error, but the error's message would not mention the two
+ /// valid values.
+ ///
+ /// ```compile_fail
+ /// macro_rules! give_me_foo_or_bar {
+ /// (foo) => {};
+ /// (bar) => {};
+ /// ($x:ident) => {
+ /// compile_error!("This macro only accepts `foo` or `bar`");
+ /// }
+ /// }
+ ///
+ /// give_me_foo_or_bar!(neither);
+ /// // ^ will fail at compile time with message "This macro only accepts `foo` or `bar`"
+ /// ```
+ ///
+ /// Emit a compiler error if one of a number of features isn't available.
+ ///
+ /// ```compile_fail
+ /// #[cfg(not(any(feature = "foo", feature = "bar")))]
+ /// compile_error!("Either feature \"foo\" or \"bar\" must be enabled for this crate.");
+ /// ```
+ #[stable(feature = "compile_error_macro", since = "1.20.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "compile_error_macro")]
+ macro_rules! compile_error {
+ ($msg:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Constructs parameters for the other string-formatting macros.
+ ///
+ /// This macro functions by taking a formatting string literal containing
+ /// `{}` for each additional argument passed. `format_args!` prepares the
+ /// additional parameters to ensure the output can be interpreted as a string
+ /// and canonicalizes the arguments into a single type. Any value that implements
+ /// the [`Display`] trait can be passed to `format_args!`, as can any
+ /// [`Debug`] implementation be passed to a `{:?}` within the formatting string.
+ ///
+ /// This macro produces a value of type [`fmt::Arguments`]. This value can be
+ /// passed to the macros within [`std::fmt`] for performing useful redirection.
+ /// All other formatting macros ([`format!`], [`write!`], [`println!`], etc) are
+ /// proxied through this one. `format_args!`, unlike its derived macros, avoids
+ /// heap allocations.
+ ///
+ /// You can use the [`fmt::Arguments`] value that `format_args!` returns
+ /// in `Debug` and `Display` contexts as seen below. The example also shows
+ /// that `Debug` and `Display` format to the same thing: the interpolated
+ /// format string in `format_args!`.
+ ///
+ /// ```rust
+ /// let debug = format!("{:?}", format_args!("{} foo {:?}", 1, 2));
+ /// let display = format!("{}", format_args!("{} foo {:?}", 1, 2));
+ /// assert_eq!("1 foo 2", display);
+ /// assert_eq!(display, debug);
+ /// ```
+ ///
+ /// For more information, see the documentation in [`std::fmt`].
+ ///
+ /// [`Display`]: crate::fmt::Display
+ /// [`Debug`]: crate::fmt::Debug
+ /// [`fmt::Arguments`]: crate::fmt::Arguments
+ /// [`std::fmt`]: ../std/fmt/index.html
+ /// [`format!`]: ../std/macro.format.html
+ /// [`println!`]: ../std/macro.println.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// let s = fmt::format(format_args!("hello {}", "world"));
+ /// assert_eq!(s, format!("hello {}", "world"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "format_args_macro")]
+ #[allow_internal_unsafe]
+ #[allow_internal_unstable(fmt_internals)]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! format_args {
+ ($fmt:expr) => {{ /* compiler built-in */ }};
+ ($fmt:expr, $($args:tt)*) => {{ /* compiler built-in */ }};
+ }
+
+ /// Same as [`format_args`], but can be used in some const contexts.
+ ///
+ /// This macro is used by the panic macros for the `const_panic` feature.
+ ///
+ /// This macro will be removed once `format_args` is allowed in const contexts.
+ #[unstable(feature = "const_format_args", issue = "none")]
+ #[allow_internal_unstable(fmt_internals, const_fmt_arguments_new)]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! const_format_args {
+ ($fmt:expr) => {{ /* compiler built-in */ }};
+ ($fmt:expr, $($args:tt)*) => {{ /* compiler built-in */ }};
+ }
+
+ /// Same as [`format_args`], but adds a newline in the end.
+ #[unstable(
+ feature = "format_args_nl",
+ issue = "none",
+ reason = "`format_args_nl` is only for internal \
+ language use and is subject to change"
+ )]
+ #[allow_internal_unstable(fmt_internals)]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! format_args_nl {
+ ($fmt:expr) => {{ /* compiler built-in */ }};
+ ($fmt:expr, $($args:tt)*) => {{ /* compiler built-in */ }};
+ }
+
+ /// Inspects an environment variable at compile time.
+ ///
+ /// This macro will expand to the value of the named environment variable at
+ /// compile time, yielding an expression of type `&'static str`. Use
+ /// [`std::env::var`] instead if you want to read the value at runtime.
+ ///
+ /// [`std::env::var`]: ../std/env/fn.var.html
+ ///
+ /// If the environment variable is not defined, then a compilation error
+ /// will be emitted. To not emit a compile error, use the [`option_env!`]
+ /// macro instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let path: &'static str = env!("PATH");
+ /// println!("the $PATH variable at the time of compiling was: {path}");
+ /// ```
+ ///
+ /// You can customize the error message by passing a string as the second
+ /// parameter:
+ ///
+ /// ```compile_fail
+ /// let doc: &'static str = env!("documentation", "what's that?!");
+ /// ```
+ ///
+ /// If the `documentation` environment variable is not defined, you'll get
+ /// the following error:
+ ///
+ /// ```text
+ /// error: what's that?!
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "env_macro")]
+ macro_rules! env {
+ ($name:expr $(,)?) => {{ /* compiler built-in */ }};
+ ($name:expr, $error_msg:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Optionally inspects an environment variable at compile time.
+ ///
+ /// If the named environment variable is present at compile time, this will
+ /// expand into an expression of type `Option<&'static str>` whose value is
+ /// `Some` of the value of the environment variable. If the environment
+ /// variable is not present, then this will expand to `None`. See
+ /// [`Option<T>`][Option] for more information on this type. Use
+ /// [`std::env::var`] instead if you want to read the value at runtime.
+ ///
+ /// [`std::env::var`]: ../std/env/fn.var.html
+ ///
+ /// A compile time error is never emitted when using this macro regardless
+ /// of whether the environment variable is present or not.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let key: Option<&'static str> = option_env!("SECRET_KEY");
+ /// println!("the secret key might be: {key:?}");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "option_env_macro")]
+ macro_rules! option_env {
+ ($name:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Concatenates identifiers into one identifier.
+ ///
+ /// This macro takes any number of comma-separated identifiers, and
+ /// concatenates them all into one, yielding an expression which is a new
+ /// identifier. Note that hygiene makes it such that this macro cannot
+ /// capture local variables. Also, as a general rule, macros are only
+ /// allowed in item, statement or expression position. That means while
+ /// you may use this macro for referring to existing variables, functions or
+ /// modules etc, you cannot define a new one with it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(concat_idents)]
+ ///
+ /// # fn main() {
+ /// fn foobar() -> u32 { 23 }
+ ///
+ /// let f = concat_idents!(foo, bar);
+ /// println!("{}", f());
+ ///
+ /// // fn concat_idents!(new, fun, name) { } // not usable in this way!
+ /// # }
+ /// ```
+ #[unstable(
+ feature = "concat_idents",
+ issue = "29599",
+ reason = "`concat_idents` is not stable enough for use and is subject to change"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! concat_idents {
+ ($($e:ident),+ $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Concatenates literals into a byte slice.
+ ///
+ /// This macro takes any number of comma-separated literals, and concatenates them all into
+ /// one, yielding an expression of type `&[u8, _]`, which represents all of the literals
+ /// concatenated left-to-right. The literals passed can be any combination of:
+ ///
+ /// - byte literals (`b'r'`)
+ /// - byte strings (`b"Rust"`)
+ /// - arrays of bytes/numbers (`[b'A', 66, b'C']`)
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(concat_bytes)]
+ ///
+ /// # fn main() {
+ /// let s: &[u8; 6] = concat_bytes!(b'A', b"BC", [68, b'E', 70]);
+ /// assert_eq!(s, b"ABCDEF");
+ /// # }
+ /// ```
+ #[unstable(feature = "concat_bytes", issue = "87555")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! concat_bytes {
+ ($($e:literal),+ $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Concatenates literals into a static string slice.
+ ///
+ /// This macro takes any number of comma-separated literals, yielding an
+ /// expression of type `&'static str` which represents all of the literals
+ /// concatenated left-to-right.
+ ///
+ /// Integer and floating point literals are stringified in order to be
+ /// concatenated.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = concat!("test", 10, 'b', true);
+ /// assert_eq!(s, "test10btrue");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "concat_macro")]
+ macro_rules! concat {
+ ($($e:expr),* $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Expands to the line number on which it was invoked.
+ ///
+ /// With [`column!`] and [`file!`], these macros provide debugging information for
+ /// developers about the location within the source.
+ ///
+ /// The expanded expression has type `u32` and is 1-based, so the first line
+ /// in each file evaluates to 1, the second to 2, etc. This is consistent
+ /// with error messages by common compilers or popular editors.
+ /// The returned line is *not necessarily* the line of the `line!` invocation itself,
+ /// but rather the first macro invocation leading up to the invocation
+ /// of the `line!` macro.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let current_line = line!();
+ /// println!("defined on line: {current_line}");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "line_macro")]
+ macro_rules! line {
+ () => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Expands to the column number at which it was invoked.
+ ///
+ /// With [`line!`] and [`file!`], these macros provide debugging information for
+ /// developers about the location within the source.
+ ///
+ /// The expanded expression has type `u32` and is 1-based, so the first column
+ /// in each line evaluates to 1, the second to 2, etc. This is consistent
+ /// with error messages by common compilers or popular editors.
+ /// The returned column is *not necessarily* the line of the `column!` invocation itself,
+ /// but rather the first macro invocation leading up to the invocation
+ /// of the `column!` macro.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let current_col = column!();
+ /// println!("defined on column: {current_col}");
+ /// ```
+ ///
+ /// `column!` counts Unicode code points, not bytes or graphemes. As a result, the first two
+ /// invocations return the same value, but the third does not.
+ ///
+ /// ```
+ /// let a = ("foobar", column!()).1;
+ /// let b = ("人之初性本善", column!()).1;
+ /// let c = ("f̅o̅o̅b̅a̅r̅", column!()).1; // Uses combining overline (U+0305)
+ ///
+ /// assert_eq!(a, b);
+ /// assert_ne!(b, c);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "column_macro")]
+ macro_rules! column {
+ () => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Expands to the file name in which it was invoked.
+ ///
+ /// With [`line!`] and [`column!`], these macros provide debugging information for
+ /// developers about the location within the source.
+ ///
+ /// The expanded expression has type `&'static str`, and the returned file
+ /// is not the invocation of the `file!` macro itself, but rather the
+ /// first macro invocation leading up to the invocation of the `file!`
+ /// macro.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let this_file = file!();
+ /// println!("defined in file: {this_file}");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "file_macro")]
+ macro_rules! file {
+ () => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Stringifies its arguments.
+ ///
+ /// This macro will yield an expression of type `&'static str` which is the
+ /// stringification of all the tokens passed to the macro. No restrictions
+ /// are placed on the syntax of the macro invocation itself.
+ ///
+ /// Note that the expanded results of the input tokens may change in the
+ /// future. You should be careful if you rely on the output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let one_plus_one = stringify!(1 + 1);
+ /// assert_eq!(one_plus_one, "1 + 1");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "stringify_macro")]
+ macro_rules! stringify {
+ ($($t:tt)*) => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Includes a UTF-8 encoded file as a string.
+ ///
+ /// The file is located relative to the current file (similarly to how
+ /// modules are found). The provided path is interpreted in a platform-specific
+ /// way at compile time. So, for instance, an invocation with a Windows path
+ /// containing backslashes `\` would not compile correctly on Unix.
+ ///
+ /// This macro will yield an expression of type `&'static str` which is the
+ /// contents of the file.
+ ///
+ /// # Examples
+ ///
+ /// Assume there are two files in the same directory with the following
+ /// contents:
+ ///
+ /// File 'spanish.in':
+ ///
+ /// ```text
+ /// adiós
+ /// ```
+ ///
+ /// File 'main.rs':
+ ///
+ /// ```ignore (cannot-doctest-external-file-dependency)
+ /// fn main() {
+ /// let my_str = include_str!("spanish.in");
+ /// assert_eq!(my_str, "adiós\n");
+ /// print!("{my_str}");
+ /// }
+ /// ```
+ ///
+ /// Compiling 'main.rs' and running the resulting binary will print "adiós".
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "include_str_macro")]
+ macro_rules! include_str {
+ ($file:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Includes a file as a reference to a byte array.
+ ///
+ /// The file is located relative to the current file (similarly to how
+ /// modules are found). The provided path is interpreted in a platform-specific
+ /// way at compile time. So, for instance, an invocation with a Windows path
+ /// containing backslashes `\` would not compile correctly on Unix.
+ ///
+ /// This macro will yield an expression of type `&'static [u8; N]` which is
+ /// the contents of the file.
+ ///
+ /// # Examples
+ ///
+ /// Assume there are two files in the same directory with the following
+ /// contents:
+ ///
+ /// File 'spanish.in':
+ ///
+ /// ```text
+ /// adiós
+ /// ```
+ ///
+ /// File 'main.rs':
+ ///
+ /// ```ignore (cannot-doctest-external-file-dependency)
+ /// fn main() {
+ /// let bytes = include_bytes!("spanish.in");
+ /// assert_eq!(bytes, b"adi\xc3\xb3s\n");
+ /// print!("{}", String::from_utf8_lossy(bytes));
+ /// }
+ /// ```
+ ///
+ /// Compiling 'main.rs' and running the resulting binary will print "adiós".
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "include_bytes_macro")]
+ macro_rules! include_bytes {
+ ($file:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Expands to a string that represents the current module path.
+ ///
+ /// The current module path can be thought of as the hierarchy of modules
+ /// leading back up to the crate root. The first component of the path
+ /// returned is the name of the crate currently being compiled.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// mod test {
+ /// pub fn foo() {
+ /// assert!(module_path!().ends_with("test"));
+ /// }
+ /// }
+ ///
+ /// test::foo();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "module_path_macro")]
+ macro_rules! module_path {
+ () => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Evaluates boolean combinations of configuration flags at compile-time.
+ ///
+ /// In addition to the `#[cfg]` attribute, this macro is provided to allow
+ /// boolean expression evaluation of configuration flags. This frequently
+ /// leads to less duplicated code.
+ ///
+ /// The syntax given to this macro is the same syntax as the [`cfg`]
+ /// attribute.
+ ///
+ /// `cfg!`, unlike `#[cfg]`, does not remove any code and only evaluates to true or false. For
+ /// example, all blocks in an if/else expression need to be valid when `cfg!` is used for
+ /// the condition, regardless of what `cfg!` is evaluating.
+ ///
+ /// [`cfg`]: ../reference/conditional-compilation.html#the-cfg-attribute
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let my_directory = if cfg!(windows) {
+ /// "windows-specific-directory"
+ /// } else {
+ /// "unix-directory"
+ /// };
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "cfg_macro")]
+ macro_rules! cfg {
+ ($($cfg:tt)*) => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Parses a file as an expression or an item according to the context.
+ ///
+ /// The file is located relative to the current file (similarly to how
+ /// modules are found). The provided path is interpreted in a platform-specific
+ /// way at compile time. So, for instance, an invocation with a Windows path
+ /// containing backslashes `\` would not compile correctly on Unix.
+ ///
+ /// Using this macro is often a bad idea, because if the file is
+ /// parsed as an expression, it is going to be placed in the
+ /// surrounding code unhygienically. This could result in variables
+ /// or functions being different from what the file expected if
+ /// there are variables or functions that have the same name in
+ /// the current file.
+ ///
+ /// # Examples
+ ///
+ /// Assume there are two files in the same directory with the following
+ /// contents:
+ ///
+ /// File 'monkeys.in':
+ ///
+ /// ```ignore (only-for-syntax-highlight)
+ /// ['🙈', '🙊', '🙉']
+ /// .iter()
+ /// .cycle()
+ /// .take(6)
+ /// .collect::<String>()
+ /// ```
+ ///
+ /// File 'main.rs':
+ ///
+ /// ```ignore (cannot-doctest-external-file-dependency)
+ /// fn main() {
+ /// let my_string = include!("monkeys.in");
+ /// assert_eq!("🙈🙊🙉🙈🙊🙉", my_string);
+ /// println!("{my_string}");
+ /// }
+ /// ```
+ ///
+ /// Compiling 'main.rs' and running the resulting binary will print
+ /// "🙈🙊🙉🙈🙊🙉".
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "include_macro")]
+ macro_rules! include {
+ ($file:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Asserts that a boolean expression is `true` at runtime.
+ ///
+ /// This will invoke the [`panic!`] macro if the provided expression cannot be
+ /// evaluated to `true` at runtime.
+ ///
+ /// # Uses
+ ///
+ /// Assertions are always checked in both debug and release builds, and cannot
+ /// be disabled. See [`debug_assert!`] for assertions that are not enabled in
+ /// release builds by default.
+ ///
+ /// Unsafe code may rely on `assert!` to enforce run-time invariants that, if
+ /// violated could lead to unsafety.
+ ///
+ /// Other use-cases of `assert!` include testing and enforcing run-time
+ /// invariants in safe code (whose violation cannot result in unsafety).
+ ///
+ /// # Custom Messages
+ ///
+ /// This macro has a second form, where a custom panic message can
+ /// be provided with or without arguments for formatting. See [`std::fmt`]
+ /// for syntax for this form. Expressions used as format arguments will only
+ /// be evaluated if the assertion fails.
+ ///
+ /// [`std::fmt`]: ../std/fmt/index.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // the panic message for these assertions is the stringified value of the
+ /// // expression given.
+ /// assert!(true);
+ ///
+ /// fn some_computation() -> bool { true } // a very simple function
+ ///
+ /// assert!(some_computation());
+ ///
+ /// // assert with a custom message
+ /// let x = true;
+ /// assert!(x, "x wasn't true!");
+ ///
+ /// let a = 3; let b = 27;
+ /// assert!(a + b == 30, "a = {}, b = {}", a, b);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ #[rustc_diagnostic_item = "assert_macro"]
+ #[allow_internal_unstable(core_panic, edition_panic)]
+ macro_rules! assert {
+ ($cond:expr $(,)?) => {{ /* compiler built-in */ }};
+ ($cond:expr, $($arg:tt)+) => {{ /* compiler built-in */ }};
+ }
+
+ /// Prints passed tokens into the standard output.
+ #[unstable(
+ feature = "log_syntax",
+ issue = "29598",
+ reason = "`log_syntax!` is not stable enough for use and is subject to change"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! log_syntax {
+ ($($arg:tt)*) => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Enables or disables tracing functionality used for debugging other macros.
+ #[unstable(
+ feature = "trace_macros",
+ issue = "29598",
+ reason = "`trace_macros` is not stable enough for use and is subject to change"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! trace_macros {
+ (true) => {{ /* compiler built-in */ }};
+ (false) => {{ /* compiler built-in */ }};
+ }
+
+ /// Attribute macro used to apply derive macros.
+ ///
+ /// See [the reference] for more info.
+ ///
+ /// [the reference]: ../../../reference/attributes/derive.html
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ pub macro derive($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Attribute macro applied to a function to turn it into a unit test.
+ ///
+ /// See [the reference] for more info.
+ ///
+ /// [the reference]: ../../../reference/attributes/testing.html#the-test-attribute
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow_internal_unstable(test, rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro test($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Attribute macro applied to a function to turn it into a benchmark test.
+ #[unstable(
+ feature = "test",
+ issue = "50297",
+ soft,
+ reason = "`bench` is a part of custom test frameworks which are unstable"
+ )]
+ #[allow_internal_unstable(test, rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro bench($item:item) {
+ /* compiler built-in */
+ }
+
+ /// An implementation detail of the `#[test]` and `#[bench]` macros.
+ #[unstable(
+ feature = "custom_test_frameworks",
+ issue = "50297",
+ reason = "custom test frameworks are an unstable feature"
+ )]
+ #[allow_internal_unstable(test, rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro test_case($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Attribute macro applied to a static to register it as a global allocator.
+ ///
+ /// See also [`std::alloc::GlobalAlloc`](../../../std/alloc/trait.GlobalAlloc.html).
+ #[stable(feature = "global_allocator", since = "1.28.0")]
+ #[allow_internal_unstable(rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro global_allocator($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Keeps the item it's applied to if the passed path is accessible, and removes it otherwise.
+ #[unstable(
+ feature = "cfg_accessible",
+ issue = "64797",
+ reason = "`cfg_accessible` is not fully implemented"
+ )]
+ #[rustc_builtin_macro]
+ pub macro cfg_accessible($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Expands all `#[cfg]` and `#[cfg_attr]` attributes in the code fragment it's applied to.
+ #[unstable(
+ feature = "cfg_eval",
+ issue = "82679",
+ reason = "`cfg_eval` is a recently implemented feature"
+ )]
+ #[rustc_builtin_macro]
+ pub macro cfg_eval($($tt:tt)*) {
+ /* compiler built-in */
+ }
+
+ /// Unstable implementation detail of the `rustc` compiler, do not use.
+ #[rustc_builtin_macro]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow_internal_unstable(core_intrinsics, libstd_sys_internals, rt)]
+ #[deprecated(since = "1.52.0", note = "rustc-serialize is deprecated and no longer supported")]
+ #[doc(hidden)] // While technically stable, using it is unstable, and deprecated. Hide it.
+ pub macro RustcDecodable($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Unstable implementation detail of the `rustc` compiler, do not use.
+ #[rustc_builtin_macro]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow_internal_unstable(core_intrinsics, rt)]
+ #[deprecated(since = "1.52.0", note = "rustc-serialize is deprecated and no longer supported")]
+ #[doc(hidden)] // While technically stable, using it is unstable, and deprecated. Hide it.
+ pub macro RustcEncodable($item:item) {
+ /* compiler built-in */
+ }
+}
diff --git a/library/core/src/macros/panic.md b/library/core/src/macros/panic.md
new file mode 100644
index 000000000..98fb7e9e4
--- /dev/null
+++ b/library/core/src/macros/panic.md
@@ -0,0 +1,75 @@
+Panics the current thread.
+
+This allows a program to terminate immediately and provide feedback
+to the caller of the program.
+
+This macro is the perfect way to assert conditions in example code and in
+tests. `panic!` is closely tied with the `unwrap` method of both
+[`Option`][ounwrap] and [`Result`][runwrap] enums. Both implementations call
+`panic!` when they are set to [`None`] or [`Err`] variants.
+
+When using `panic!()` you can specify a string payload, that is built using
+the [`format!`] syntax. That payload is used when injecting the panic into
+the calling Rust thread, causing the thread to panic entirely.
+
+The behavior of the default `std` hook, i.e. the code that runs directly
+after the panic is invoked, is to print the message payload to
+`stderr` along with the file/line/column information of the `panic!()`
+call. You can override the panic hook using [`std::panic::set_hook()`].
+Inside the hook a panic can be accessed as a `&dyn Any + Send`,
+which contains either a `&str` or `String` for regular `panic!()` invocations.
+To panic with a value of another other type, [`panic_any`] can be used.
+
+See also the macro [`compile_error!`], for raising errors during compilation.
+
+# When to use `panic!` vs `Result`
+
+The Rust language provides two complementary systems for constructing /
+representing, reporting, propagating, reacting to, and discarding errors. These
+responsibilities are collectively known as "error handling." `panic!` and
+`Result` are similar in that they are each the primary interface of their
+respective error handling systems; however, the meaning these interfaces attach
+to their errors and the responsibilities they fulfill within their respective
+error handling systems differ.
+
+The `panic!` macro is used to construct errors that represent a bug that has
+been detected in your program. With `panic!` you provide a message that
+describes the bug and the language then constructs an error with that message,
+reports it, and propagates it for you.
+
+`Result` on the other hand is used to wrap other types that represent either
+the successful result of some computation, `Ok(T)`, or error types that
+represent an anticipated runtime failure mode of that computation, `Err(E)`.
+`Result` is used alongside user defined types which represent the various
+anticipated runtime failure modes that the associated computation could
+encounter. `Result` must be propagated manually, often with the the help of the
+`?` operator and `Try` trait, and they must be reported manually, often with
+the help of the `Error` trait.
+
+For more detailed information about error handling check out the [book] or the
+[`std::result`] module docs.
+
+[ounwrap]: Option::unwrap
+[runwrap]: Result::unwrap
+[`std::panic::set_hook()`]: ../std/panic/fn.set_hook.html
+[`panic_any`]: ../std/panic/fn.panic_any.html
+[`Box`]: ../std/boxed/struct.Box.html
+[`Any`]: crate::any::Any
+[`format!`]: ../std/macro.format.html
+[book]: ../book/ch09-00-error-handling.html
+[`std::result`]: ../std/result/index.html
+
+# Current implementation
+
+If the main thread panics it will terminate all your threads and end your
+program with code `101`.
+
+# Examples
+
+```should_panic
+# #![allow(unreachable_code)]
+panic!();
+panic!("this is a terrible mistake!");
+panic!("this is a {} {message}", "fancy", message = "message");
+std::panic::panic_any(4); // panic with the value of 4 to be collected elsewhere
+```
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
new file mode 100644
index 000000000..2c5789795
--- /dev/null
+++ b/library/core/src/marker.rs
@@ -0,0 +1,840 @@
+//! Primitive traits and types representing basic properties of types.
+//!
+//! Rust types can be classified in various useful ways according to
+//! their intrinsic properties. These classifications are represented
+//! as traits.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cell::UnsafeCell;
+use crate::cmp;
+use crate::fmt::Debug;
+use crate::hash::Hash;
+use crate::hash::Hasher;
+
+/// Types that can be transferred across thread boundaries.
+///
+/// This trait is automatically implemented when the compiler determines it's
+/// appropriate.
+///
+/// An example of a non-`Send` type is the reference-counting pointer
+/// [`rc::Rc`][`Rc`]. If two threads attempt to clone [`Rc`]s that point to the same
+/// reference-counted value, they might try to update the reference count at the
+/// same time, which is [undefined behavior][ub] because [`Rc`] doesn't use atomic
+/// operations. Its cousin [`sync::Arc`][arc] does use atomic operations (incurring
+/// some overhead) and thus is `Send`.
+///
+/// See [the Nomicon](../../nomicon/send-and-sync.html) for more details.
+///
+/// [`Rc`]: ../../std/rc/struct.Rc.html
+/// [arc]: ../../std/sync/struct.Arc.html
+/// [ub]: ../../reference/behavior-considered-undefined.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Send")]
+#[rustc_on_unimplemented(
+ message = "`{Self}` cannot be sent between threads safely",
+ label = "`{Self}` cannot be sent between threads safely"
+)]
+pub unsafe auto trait Send {
+ // empty.
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Send for *const T {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Send for *mut T {}
+
+/// Types with a constant size known at compile time.
+///
+/// All type parameters have an implicit bound of `Sized`. The special syntax
+/// `?Sized` can be used to remove this bound if it's not appropriate.
+///
+/// ```
+/// # #![allow(dead_code)]
+/// struct Foo<T>(T);
+/// struct Bar<T: ?Sized>(T);
+///
+/// // struct FooUse(Foo<[i32]>); // error: Sized is not implemented for [i32]
+/// struct BarUse(Bar<[i32]>); // OK
+/// ```
+///
+/// The one exception is the implicit `Self` type of a trait. A trait does not
+/// have an implicit `Sized` bound as this is incompatible with [trait object]s
+/// where, by definition, the trait needs to work with all possible implementors,
+/// and thus could be any size.
+///
+/// Although Rust will let you bind `Sized` to a trait, you won't
+/// be able to use it to form a trait object later:
+///
+/// ```
+/// # #![allow(unused_variables)]
+/// trait Foo { }
+/// trait Bar: Sized { }
+///
+/// struct Impl;
+/// impl Foo for Impl { }
+/// impl Bar for Impl { }
+///
+/// let x: &dyn Foo = &Impl; // OK
+/// // let y: &dyn Bar = &Impl; // error: the trait `Bar` cannot
+/// // be made into an object
+/// ```
+///
+/// [trait object]: ../../book/ch17-02-trait-objects.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[lang = "sized"]
+#[rustc_on_unimplemented(
+ message = "the size for values of type `{Self}` cannot be known at compilation time",
+ label = "doesn't have a size known at compile-time"
+)]
+#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
+#[rustc_specialization_trait]
+pub trait Sized {
+ // Empty.
+}
+
+/// Types that can be "unsized" to a dynamically-sized type.
+///
+/// For example, the sized array type `[i8; 2]` implements `Unsize<[i8]>` and
+/// `Unsize<dyn fmt::Debug>`.
+///
+/// All implementations of `Unsize` are provided automatically by the compiler.
+/// Those implementations are:
+///
+/// - Arrays `[T; N]` implement `Unsize<[T]>`.
+/// - Types implementing a trait `Trait` also implement `Unsize<dyn Trait>`.
+/// - Structs `Foo<..., T, ...>` implement `Unsize<Foo<..., U, ...>>` if all of these conditions
+/// are met:
+/// - `T: Unsize<U>`.
+/// - Only the last field of `Foo` has a type involving `T`.
+/// - `Bar<T>: Unsize<Bar<U>>`, where `Bar<T>` stands for the actual type of that last field.
+///
+/// `Unsize` is used along with [`ops::CoerceUnsized`] to allow
+/// "user-defined" containers such as [`Rc`] to contain dynamically-sized
+/// types. See the [DST coercion RFC][RFC982] and [the nomicon entry on coercion][nomicon-coerce]
+/// for more details.
+///
+/// [`ops::CoerceUnsized`]: crate::ops::CoerceUnsized
+/// [`Rc`]: ../../std/rc/struct.Rc.html
+/// [RFC982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
+/// [nomicon-coerce]: ../../nomicon/coercions.html
+#[unstable(feature = "unsize", issue = "27732")]
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {
+ // Empty.
+}
+
+/// Required trait for constants used in pattern matches.
+///
+/// Any type that derives `PartialEq` automatically implements this trait,
+/// *regardless* of whether its type-parameters implement `Eq`.
+///
+/// If a `const` item contains some type that does not implement this trait,
+/// then that type either (1.) does not implement `PartialEq` (which means the
+/// constant will not provide that comparison method, which code generation
+/// assumes is available), or (2.) it implements *its own* version of
+/// `PartialEq` (which we assume does not conform to a structural-equality
+/// comparison).
+///
+/// In either of the two scenarios above, we reject usage of such a constant in
+/// a pattern match.
+///
+/// See also the [structural match RFC][RFC1445], and [issue 63438] which
+/// motivated migrating from attribute-based design to this trait.
+///
+/// [RFC1445]: https://github.com/rust-lang/rfcs/blob/master/text/1445-restrict-constants-in-patterns.md
+/// [issue 63438]: https://github.com/rust-lang/rust/issues/63438
+#[unstable(feature = "structural_match", issue = "31434")]
+#[rustc_on_unimplemented(message = "the type `{Self}` does not `#[derive(PartialEq)]`")]
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {
+ // Empty.
+}
+
+/// Required trait for constants used in pattern matches.
+///
+/// Any type that derives `Eq` automatically implements this trait, *regardless*
+/// of whether its type parameters implement `Eq`.
+///
+/// This is a hack to work around a limitation in our type system.
+///
+/// # Background
+///
+/// We want to require that types of consts used in pattern matches
+/// have the attribute `#[derive(PartialEq, Eq)]`.
+///
+/// In a more ideal world, we could check that requirement by just checking that
+/// the given type implements both the `StructuralPartialEq` trait *and*
+/// the `Eq` trait. However, you can have ADTs that *do* `derive(PartialEq, Eq)`,
+/// and be a case that we want the compiler to accept, and yet the constant's
+/// type fails to implement `Eq`.
+///
+/// Namely, a case like this:
+///
+/// ```rust
+/// #[derive(PartialEq, Eq)]
+/// struct Wrap<X>(X);
+///
+/// fn higher_order(_: &()) { }
+///
+/// const CFN: Wrap<fn(&())> = Wrap(higher_order);
+///
+/// fn main() {
+/// match CFN {
+/// CFN => {}
+/// _ => {}
+/// }
+/// }
+/// ```
+///
+/// (The problem in the above code is that `Wrap<fn(&())>` does not implement
+/// `PartialEq`, nor `Eq`, because `for<'a> fn(&'a _)` does not implement those
+/// traits.)
+///
+/// Therefore, we cannot rely on naive check for `StructuralPartialEq` and
+/// mere `Eq`.
+///
+/// As a hack to work around this, we use two separate traits injected by each
+/// of the two derives (`#[derive(PartialEq)]` and `#[derive(Eq)]`) and check
+/// that both of them are present as part of structural-match checking.
+#[unstable(feature = "structural_match", issue = "31434")]
+#[rustc_on_unimplemented(message = "the type `{Self}` does not `#[derive(Eq)]`")]
+#[lang = "structural_teq"]
+pub trait StructuralEq {
+ // Empty.
+}
+
+/// Types whose values can be duplicated simply by copying bits.
+///
+/// By default, variable bindings have 'move semantics.' In other
+/// words:
+///
+/// ```
+/// #[derive(Debug)]
+/// struct Foo;
+///
+/// let x = Foo;
+///
+/// let y = x;
+///
+/// // `x` has moved into `y`, and so cannot be used
+///
+/// // println!("{x:?}"); // error: use of moved value
+/// ```
+///
+/// However, if a type implements `Copy`, it instead has 'copy semantics':
+///
+/// ```
+/// // We can derive a `Copy` implementation. `Clone` is also required, as it's
+/// // a supertrait of `Copy`.
+/// #[derive(Debug, Copy, Clone)]
+/// struct Foo;
+///
+/// let x = Foo;
+///
+/// let y = x;
+///
+/// // `y` is a copy of `x`
+///
+/// println!("{x:?}"); // A-OK!
+/// ```
+///
+/// It's important to note that in these two examples, the only difference is whether you
+/// are allowed to access `x` after the assignment. Under the hood, both a copy and a move
+/// can result in bits being copied in memory, although this is sometimes optimized away.
+///
+/// ## How can I implement `Copy`?
+///
+/// There are two ways to implement `Copy` on your type. The simplest is to use `derive`:
+///
+/// ```
+/// #[derive(Copy, Clone)]
+/// struct MyStruct;
+/// ```
+///
+/// You can also implement `Copy` and `Clone` manually:
+///
+/// ```
+/// struct MyStruct;
+///
+/// impl Copy for MyStruct { }
+///
+/// impl Clone for MyStruct {
+/// fn clone(&self) -> MyStruct {
+/// *self
+/// }
+/// }
+/// ```
+///
+/// There is a small difference between the two: the `derive` strategy will also place a `Copy`
+/// bound on type parameters, which isn't always desired.
+///
+/// ## What's the difference between `Copy` and `Clone`?
+///
+/// Copies happen implicitly, for example as part of an assignment `y = x`. The behavior of
+/// `Copy` is not overloadable; it is always a simple bit-wise copy.
+///
+/// Cloning is an explicit action, `x.clone()`. The implementation of [`Clone`] can
+/// provide any type-specific behavior necessary to duplicate values safely. For example,
+/// the implementation of [`Clone`] for [`String`] needs to copy the pointed-to string
+/// buffer in the heap. A simple bitwise copy of [`String`] values would merely copy the
+/// pointer, leading to a double free down the line. For this reason, [`String`] is [`Clone`]
+/// but not `Copy`.
+///
+/// [`Clone`] is a supertrait of `Copy`, so everything which is `Copy` must also implement
+/// [`Clone`]. If a type is `Copy` then its [`Clone`] implementation only needs to return `*self`
+/// (see the example above).
+///
+/// ## When can my type be `Copy`?
+///
+/// A type can implement `Copy` if all of its components implement `Copy`. For example, this
+/// struct can be `Copy`:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// #[derive(Copy, Clone)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+/// ```
+///
+/// A struct can be `Copy`, and [`i32`] is `Copy`, therefore `Point` is eligible to be `Copy`.
+/// By contrast, consider
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # struct Point;
+/// struct PointList {
+/// points: Vec<Point>,
+/// }
+/// ```
+///
+/// The struct `PointList` cannot implement `Copy`, because [`Vec<T>`] is not `Copy`. If we
+/// attempt to derive a `Copy` implementation, we'll get an error:
+///
+/// ```text
+/// the trait `Copy` may not be implemented for this type; field `points` does not implement `Copy`
+/// ```
+///
+/// Shared references (`&T`) are also `Copy`, so a type can be `Copy`, even when it holds
+/// shared references of types `T` that are *not* `Copy`. Consider the following struct,
+/// which can implement `Copy`, because it only holds a *shared reference* to our non-`Copy`
+/// type `PointList` from above:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # struct PointList;
+/// #[derive(Copy, Clone)]
+/// struct PointListWrapper<'a> {
+/// point_list_ref: &'a PointList,
+/// }
+/// ```
+///
+/// ## When *can't* my type be `Copy`?
+///
+/// Some types can't be copied safely. For example, copying `&mut T` would create an aliased
+/// mutable reference. Copying [`String`] would duplicate responsibility for managing the
+/// [`String`]'s buffer, leading to a double free.
+///
+/// Generalizing the latter case, any type implementing [`Drop`] can't be `Copy`, because it's
+/// managing some resource besides its own [`size_of::<T>`] bytes.
+///
+/// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get
+/// the error [E0204].
+///
+/// [E0204]: ../../error-index.html#E0204
+///
+/// ## When *should* my type be `Copy`?
+///
+/// Generally speaking, if your type _can_ implement `Copy`, it should. Keep in mind, though,
+/// that implementing `Copy` is part of the public API of your type. If the type might become
+/// non-`Copy` in the future, it could be prudent to omit the `Copy` implementation now, to
+/// avoid a breaking API change.
+///
+/// ## Additional implementors
+///
+/// In addition to the [implementors listed below][impls],
+/// the following types also implement `Copy`:
+///
+/// * Function item types (i.e., the distinct types defined for each function)
+/// * Function pointer types (e.g., `fn() -> i32`)
+/// * Closure types, if they capture no value from the environment
+/// or if all such captured values implement `Copy` themselves.
+/// Note that variables captured by shared reference always implement `Copy`
+/// (even if the referent doesn't),
+/// while variables captured by mutable reference never implement `Copy`.
+///
+/// [`Vec<T>`]: ../../std/vec/struct.Vec.html
+/// [`String`]: ../../std/string/struct.String.html
+/// [`size_of::<T>`]: crate::mem::size_of
+/// [impls]: #implementors
+#[stable(feature = "rust1", since = "1.0.0")]
+#[lang = "copy"]
+// FIXME(matthewjasper) This allows copying a type that doesn't implement
+// `Copy` because of unsatisfied lifetime bounds (copying `A<'_>` when only
+// `A<'static>: Copy` and `A<'_>: Clone`).
+// We have this attribute here for now only because there are quite a few
+// existing specializations on `Copy` that already exist in the standard
+// library, and there's no way to safely have this behavior right now.
+#[rustc_unsafe_specialization_marker]
+#[rustc_diagnostic_item = "Copy"]
+pub trait Copy: Clone {
+ // Empty.
+}
+
+/// Derive macro generating an impl of the trait `Copy`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics, derive_clone_copy)]
+pub macro Copy($item:item) {
+ /* compiler built-in */
+}
+
+/// Types for which it is safe to share references between threads.
+///
+/// This trait is automatically implemented when the compiler determines
+/// it's appropriate.
+///
+/// The precise definition is: a type `T` is [`Sync`] if and only if `&T` is
+/// [`Send`]. In other words, if there is no possibility of
+/// [undefined behavior][ub] (including data races) when passing
+/// `&T` references between threads.
+///
+/// As one would expect, primitive types like [`u8`] and [`f64`]
+/// are all [`Sync`], and so are simple aggregate types containing them,
+/// like tuples, structs and enums. More examples of basic [`Sync`]
+/// types include "immutable" types like `&T`, and those with simple
+/// inherited mutability, such as [`Box<T>`][box], [`Vec<T>`][vec] and
+/// most other collection types. (Generic parameters need to be [`Sync`]
+/// for their container to be [`Sync`].)
+///
+/// A somewhat surprising consequence of the definition is that `&mut T`
+/// is `Sync` (if `T` is `Sync`) even though it seems like that might
+/// provide unsynchronized mutation. The trick is that a mutable
+/// reference behind a shared reference (that is, `& &mut T`)
+/// becomes read-only, as if it were a `& &T`. Hence there is no risk
+/// of a data race.
+///
+/// Types that are not `Sync` are those that have "interior
+/// mutability" in a non-thread-safe form, such as [`Cell`][cell]
+/// and [`RefCell`][refcell]. These types allow for mutation of
+/// their contents even through an immutable, shared reference. For
+/// example the `set` method on [`Cell<T>`][cell] takes `&self`, so it requires
+/// only a shared reference [`&Cell<T>`][cell]. The method performs no
+/// synchronization, thus [`Cell`][cell] cannot be `Sync`.
+///
+/// Another example of a non-`Sync` type is the reference-counting
+/// pointer [`Rc`][rc]. Given any reference [`&Rc<T>`][rc], you can clone
+/// a new [`Rc<T>`][rc], modifying the reference counts in a non-atomic way.
+///
+/// For cases when one does need thread-safe interior mutability,
+/// Rust provides [atomic data types], as well as explicit locking via
+/// [`sync::Mutex`][mutex] and [`sync::RwLock`][rwlock]. These types
+/// ensure that any mutation cannot cause data races, hence the types
+/// are `Sync`. Likewise, [`sync::Arc`][arc] provides a thread-safe
+/// analogue of [`Rc`][rc].
+///
+/// Any types with interior mutability must also use the
+/// [`cell::UnsafeCell`][unsafecell] wrapper around the value(s) which
+/// can be mutated through a shared reference. Failing to doing this is
+/// [undefined behavior][ub]. For example, [`transmute`][transmute]-ing
+/// from `&T` to `&mut T` is invalid.
+///
+/// See [the Nomicon][nomicon-send-and-sync] for more details about `Sync`.
+///
+/// [box]: ../../std/boxed/struct.Box.html
+/// [vec]: ../../std/vec/struct.Vec.html
+/// [cell]: crate::cell::Cell
+/// [refcell]: crate::cell::RefCell
+/// [rc]: ../../std/rc/struct.Rc.html
+/// [arc]: ../../std/sync/struct.Arc.html
+/// [atomic data types]: crate::sync::atomic
+/// [mutex]: ../../std/sync/struct.Mutex.html
+/// [rwlock]: ../../std/sync/struct.RwLock.html
+/// [unsafecell]: crate::cell::UnsafeCell
+/// [ub]: ../../reference/behavior-considered-undefined.html
+/// [transmute]: crate::mem::transmute
+/// [nomicon-send-and-sync]: ../../nomicon/send-and-sync.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Sync")]
+#[lang = "sync"]
+#[rustc_on_unimplemented(
+ message = "`{Self}` cannot be shared between threads safely",
+ label = "`{Self}` cannot be shared between threads safely"
+)]
+pub unsafe auto trait Sync {
+ // FIXME(estebank): once support to add notes in `rustc_on_unimplemented`
+ // lands in beta, and it has been extended to check whether a closure is
+ // anywhere in the requirement chain, extend it as such (#48534):
+ // ```
+ // on(
+ // closure,
+ // note="`{Self}` cannot be shared safely, consider marking the closure `move`"
+ // ),
+ // ```
+
+ // Empty
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for *const T {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for *mut T {}
+
+macro_rules! impls {
+ ($t: ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Hash for $t<T> {
+ #[inline]
+ fn hash<H: Hasher>(&self, _: &mut H) {}
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> cmp::PartialEq for $t<T> {
+ fn eq(&self, _other: &$t<T>) -> bool {
+ true
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> cmp::Eq for $t<T> {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> cmp::PartialOrd for $t<T> {
+ fn partial_cmp(&self, _other: &$t<T>) -> Option<cmp::Ordering> {
+ Option::Some(cmp::Ordering::Equal)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> cmp::Ord for $t<T> {
+ fn cmp(&self, _other: &$t<T>) -> cmp::Ordering {
+ cmp::Ordering::Equal
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Copy for $t<T> {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Clone for $t<T> {
+ fn clone(&self) -> Self {
+ Self
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+ impl<T: ?Sized> const Default for $t<T> {
+ fn default() -> Self {
+ Self
+ }
+ }
+
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<T: ?Sized> StructuralPartialEq for $t<T> {}
+
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<T: ?Sized> StructuralEq for $t<T> {}
+ };
+}
+
+/// Zero-sized type used to mark things that "act like" they own a `T`.
+///
+/// Adding a `PhantomData<T>` field to your type tells the compiler that your
+/// type acts as though it stores a value of type `T`, even though it doesn't
+/// really. This information is used when computing certain safety properties.
+///
+/// For a more in-depth explanation of how to use `PhantomData<T>`, please see
+/// [the Nomicon](../../nomicon/phantom-data.html).
+///
+/// # A ghastly note 👻👻👻
+///
+/// Though they both have scary names, `PhantomData` and 'phantom types' are
+/// related, but not identical. A phantom type parameter is simply a type
+/// parameter which is never used. In Rust, this often causes the compiler to
+/// complain, and the solution is to add a "dummy" use by way of `PhantomData`.
+///
+/// # Examples
+///
+/// ## Unused lifetime parameters
+///
+/// Perhaps the most common use case for `PhantomData` is a struct that has an
+/// unused lifetime parameter, typically as part of some unsafe code. For
+/// example, here is a struct `Slice` that has two pointers of type `*const T`,
+/// presumably pointing into an array somewhere:
+///
+/// ```compile_fail,E0392
+/// struct Slice<'a, T> {
+/// start: *const T,
+/// end: *const T,
+/// }
+/// ```
+///
+/// The intention is that the underlying data is only valid for the
+/// lifetime `'a`, so `Slice` should not outlive `'a`. However, this
+/// intent is not expressed in the code, since there are no uses of
+/// the lifetime `'a` and hence it is not clear what data it applies
+/// to. We can correct this by telling the compiler to act *as if* the
+/// `Slice` struct contained a reference `&'a T`:
+///
+/// ```
+/// use std::marker::PhantomData;
+///
+/// # #[allow(dead_code)]
+/// struct Slice<'a, T: 'a> {
+/// start: *const T,
+/// end: *const T,
+/// phantom: PhantomData<&'a T>,
+/// }
+/// ```
+///
+/// This also in turn requires the annotation `T: 'a`, indicating
+/// that any references in `T` are valid over the lifetime `'a`.
+///
+/// When initializing a `Slice` you simply provide the value
+/// `PhantomData` for the field `phantom`:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # use std::marker::PhantomData;
+/// # struct Slice<'a, T: 'a> {
+/// # start: *const T,
+/// # end: *const T,
+/// # phantom: PhantomData<&'a T>,
+/// # }
+/// fn borrow_vec<T>(vec: &Vec<T>) -> Slice<'_, T> {
+/// let ptr = vec.as_ptr();
+/// Slice {
+/// start: ptr,
+/// end: unsafe { ptr.add(vec.len()) },
+/// phantom: PhantomData,
+/// }
+/// }
+/// ```
+///
+/// ## Unused type parameters
+///
+/// It sometimes happens that you have unused type parameters which
+/// indicate what type of data a struct is "tied" to, even though that
+/// data is not actually found in the struct itself. Here is an
+/// example where this arises with [FFI]. The foreign interface uses
+/// handles of type `*mut ()` to refer to Rust values of different
+/// types. We track the Rust type using a phantom type parameter on
+/// the struct `ExternalResource` which wraps a handle.
+///
+/// [FFI]: ../../book/ch19-01-unsafe-rust.html#using-extern-functions-to-call-external-code
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # trait ResType { }
+/// # struct ParamType;
+/// # mod foreign_lib {
+/// # pub fn new(_: usize) -> *mut () { 42 as *mut () }
+/// # pub fn do_stuff(_: *mut (), _: usize) {}
+/// # }
+/// # fn convert_params(_: ParamType) -> usize { 42 }
+/// use std::marker::PhantomData;
+/// use std::mem;
+///
+/// struct ExternalResource<R> {
+/// resource_handle: *mut (),
+/// resource_type: PhantomData<R>,
+/// }
+///
+/// impl<R: ResType> ExternalResource<R> {
+/// fn new() -> Self {
+/// let size_of_res = mem::size_of::<R>();
+/// Self {
+/// resource_handle: foreign_lib::new(size_of_res),
+/// resource_type: PhantomData,
+/// }
+/// }
+///
+/// fn do_stuff(&self, param: ParamType) {
+/// let foreign_params = convert_params(param);
+/// foreign_lib::do_stuff(self.resource_handle, foreign_params);
+/// }
+/// }
+/// ```
+///
+/// ## Ownership and the drop check
+///
+/// Adding a field of type `PhantomData<T>` indicates that your
+/// type owns data of type `T`. This in turn implies that when your
+/// type is dropped, it may drop one or more instances of the type
+/// `T`. This has bearing on the Rust compiler's [drop check]
+/// analysis.
+///
+/// If your struct does not in fact *own* the data of type `T`, it is
+/// better to use a reference type, like `PhantomData<&'a T>`
+/// (ideally) or `PhantomData<*const T>` (if no lifetime applies), so
+/// as not to indicate ownership.
+///
+/// [drop check]: ../../nomicon/dropck.html
+#[lang = "phantom_data"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct PhantomData<T: ?Sized>;
+
+impls! { PhantomData }
+
+mod impls {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ unsafe impl<T: Sync + ?Sized> Send for &T {}
+ #[stable(feature = "rust1", since = "1.0.0")]
+ unsafe impl<T: Send + ?Sized> Send for &mut T {}
+}
+
+/// Compiler-internal trait used to indicate the type of enum discriminants.
+///
+/// This trait is automatically implemented for every type and does not add any
+/// guarantees to [`mem::Discriminant`]. It is **undefined behavior** to transmute
+/// between `DiscriminantKind::Discriminant` and `mem::Discriminant`.
+///
+/// [`mem::Discriminant`]: crate::mem::Discriminant
+#[unstable(
+ feature = "discriminant_kind",
+ issue = "none",
+ reason = "this trait is unlikely to ever be stabilized, use `mem::discriminant` instead"
+)]
+#[lang = "discriminant_kind"]
+pub trait DiscriminantKind {
+ /// The type of the discriminant, which must satisfy the trait
+ /// bounds required by `mem::Discriminant`.
+ #[lang = "discriminant_type"]
+ type Discriminant: Clone + Copy + Debug + Eq + PartialEq + Hash + Send + Sync + Unpin;
+}
+
+/// Compiler-internal trait used to determine whether a type contains
+/// any `UnsafeCell` internally, but not through an indirection.
+/// This affects, for example, whether a `static` of that type is
+/// placed in read-only static memory or writable static memory.
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+impl<T: ?Sized> !Freeze for UnsafeCell<T> {}
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+/// Types that can be safely moved after being pinned.
+///
+/// Rust itself has no notion of immovable types, and considers moves (e.g.,
+/// through assignment or [`mem::replace`]) to always be safe.
+///
+/// The [`Pin`][Pin] type is used instead to prevent moves through the type
+/// system. Pointers `P<T>` wrapped in the [`Pin<P<T>>`][Pin] wrapper can't be
+/// moved out of. See the [`pin` module] documentation for more information on
+/// pinning.
+///
+/// Implementing the `Unpin` trait for `T` lifts the restrictions of pinning off
+/// the type, which then allows moving `T` out of [`Pin<P<T>>`][Pin] with
+/// functions such as [`mem::replace`].
+///
+/// `Unpin` has no consequence at all for non-pinned data. In particular,
+/// [`mem::replace`] happily moves `!Unpin` data (it works for any `&mut T`, not
+/// just when `T: Unpin`). However, you cannot use [`mem::replace`] on data
+/// wrapped inside a [`Pin<P<T>>`][Pin] because you cannot get the `&mut T` you
+/// need for that, and *that* is what makes this system work.
+///
+/// So this, for example, can only be done on types implementing `Unpin`:
+///
+/// ```rust
+/// # #![allow(unused_must_use)]
+/// use std::mem;
+/// use std::pin::Pin;
+///
+/// let mut string = "this".to_string();
+/// let mut pinned_string = Pin::new(&mut string);
+///
+/// // We need a mutable reference to call `mem::replace`.
+/// // We can obtain such a reference by (implicitly) invoking `Pin::deref_mut`,
+/// // but that is only possible because `String` implements `Unpin`.
+/// mem::replace(&mut *pinned_string, "other".to_string());
+/// ```
+///
+/// This trait is automatically implemented for almost every type.
+///
+/// [`mem::replace`]: crate::mem::replace
+/// [Pin]: crate::pin::Pin
+/// [`pin` module]: crate::pin
+#[stable(feature = "pin", since = "1.33.0")]
+#[rustc_on_unimplemented(
+ note = "consider using `Box::pin`",
+ message = "`{Self}` cannot be unpinned"
+)]
+#[lang = "unpin"]
+pub auto trait Unpin {}
+
+/// A marker type which does not implement `Unpin`.
+///
+/// If a type contains a `PhantomPinned`, it will not implement `Unpin` by default.
+#[stable(feature = "pin", since = "1.33.0")]
+#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+pub struct PhantomPinned;
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl !Unpin for PhantomPinned {}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<'a, T: ?Sized + 'a> Unpin for &'a T {}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<'a, T: ?Sized + 'a> Unpin for &'a mut T {}
+
+#[stable(feature = "pin_raw", since = "1.38.0")]
+impl<T: ?Sized> Unpin for *const T {}
+
+#[stable(feature = "pin_raw", since = "1.38.0")]
+impl<T: ?Sized> Unpin for *mut T {}
+
+/// A marker for types that can be dropped.
+///
+/// This should be used for `~const` bounds,
+/// as non-const bounds will always hold for every type.
+#[unstable(feature = "const_trait_impl", issue = "67792")]
+#[lang = "destruct"]
+#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
+pub trait Destruct {}
+
+/// Implementations of `Copy` for primitive types.
+///
+/// Implementations that cannot be described in Rust
+/// are implemented in `traits::SelectionContext::copy_clone_conditions()`
+/// in `rustc_trait_selection`.
+mod copy_impls {
+
+ use super::Copy;
+
+ macro_rules! impl_copy {
+ ($($t:ty)*) => {
+ $(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Copy for $t {}
+ )*
+ }
+ }
+
+ impl_copy! {
+ usize u8 u16 u32 u64 u128
+ isize i8 i16 i32 i64 i128
+ f32 f64
+ bool char
+ }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl Copy for ! {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Copy for *const T {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Copy for *mut T {}
+
+ /// Shared references can be copied, but mutable references *cannot*!
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Copy for &T {}
+}
diff --git a/library/core/src/mem/manually_drop.rs b/library/core/src/mem/manually_drop.rs
new file mode 100644
index 000000000..3d719afe4
--- /dev/null
+++ b/library/core/src/mem/manually_drop.rs
@@ -0,0 +1,165 @@
+use crate::ops::{Deref, DerefMut};
+use crate::ptr;
+
+/// A wrapper to inhibit compiler from automatically calling `T`’s destructor.
+/// This wrapper is 0-cost.
+///
+/// `ManuallyDrop<T>` is guaranteed to have the same layout as `T`, and is subject
+/// to the same layout optimizations as `T`. As a consequence, it has *no effect*
+/// on the assumptions that the compiler makes about its contents. For example,
+/// initializing a `ManuallyDrop<&mut T>` with [`mem::zeroed`] is undefined
+/// behavior. If you need to handle uninitialized data, use [`MaybeUninit<T>`]
+/// instead.
+///
+/// Note that accessing the value inside a `ManuallyDrop<T>` is safe.
+/// This means that a `ManuallyDrop<T>` whose content has been dropped must not
+/// be exposed through a public safe API.
+/// Correspondingly, `ManuallyDrop::drop` is unsafe.
+///
+/// # `ManuallyDrop` and drop order.
+///
+/// Rust has a well-defined [drop order] of values. To make sure that fields or
+/// locals are dropped in a specific order, reorder the declarations such that
+/// the implicit drop order is the correct one.
+///
+/// It is possible to use `ManuallyDrop` to control the drop order, but this
+/// requires unsafe code and is hard to do correctly in the presence of
+/// unwinding.
+///
+/// For example, if you want to make sure that a specific field is dropped after
+/// the others, make it the last field of a struct:
+///
+/// ```
+/// struct Context;
+///
+/// struct Widget {
+/// children: Vec<Widget>,
+/// // `context` will be dropped after `children`.
+/// // Rust guarantees that fields are dropped in the order of declaration.
+/// context: Context,
+/// }
+/// ```
+///
+/// [drop order]: https://doc.rust-lang.org/reference/destructors.html
+/// [`mem::zeroed`]: crate::mem::zeroed
+/// [`MaybeUninit<T>`]: crate::mem::MaybeUninit
+#[stable(feature = "manually_drop", since = "1.20.0")]
+#[lang = "manually_drop"]
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+ value: T,
+}
+
+impl<T> ManuallyDrop<T> {
+ /// Wrap a value to be manually dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::mem::ManuallyDrop;
+ /// let mut x = ManuallyDrop::new(String::from("Hello World!"));
+ /// x.truncate(5); // You can still safely operate on the value
+ /// assert_eq!(*x, "Hello");
+ /// // But `Drop` will not be run here
+ /// ```
+ #[must_use = "if you don't need the wrapper, you can use `mem::forget` instead"]
+ #[stable(feature = "manually_drop", since = "1.20.0")]
+ #[rustc_const_stable(feature = "const_manually_drop", since = "1.32.0")]
+ #[inline(always)]
+ pub const fn new(value: T) -> ManuallyDrop<T> {
+ ManuallyDrop { value }
+ }
+
+ /// Extracts the value from the `ManuallyDrop` container.
+ ///
+ /// This allows the value to be dropped again.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::mem::ManuallyDrop;
+ /// let x = ManuallyDrop::new(Box::new(()));
+ /// let _: Box<()> = ManuallyDrop::into_inner(x); // This drops the `Box`.
+ /// ```
+ #[stable(feature = "manually_drop", since = "1.20.0")]
+ #[rustc_const_stable(feature = "const_manually_drop", since = "1.32.0")]
+ #[inline(always)]
+ pub const fn into_inner(slot: ManuallyDrop<T>) -> T {
+ slot.value
+ }
+
+ /// Takes the value from the `ManuallyDrop<T>` container out.
+ ///
+ /// This method is primarily intended for moving out values in drop.
+ /// Instead of using [`ManuallyDrop::drop`] to manually drop the value,
+ /// you can use this method to take the value and use it however desired.
+ ///
+ /// Whenever possible, it is preferable to use [`into_inner`][`ManuallyDrop::into_inner`]
+ /// instead, which prevents duplicating the content of the `ManuallyDrop<T>`.
+ ///
+ /// # Safety
+ ///
+ /// This function semantically moves out the contained value without preventing further usage,
+ /// leaving the state of this container unchanged.
+ /// It is your responsibility to ensure that this `ManuallyDrop` is not used again.
+ ///
+ #[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"]
+ #[stable(feature = "manually_drop_take", since = "1.42.0")]
+ #[inline]
+ pub unsafe fn take(slot: &mut ManuallyDrop<T>) -> T {
+ // SAFETY: we are reading from a reference, which is guaranteed
+ // to be valid for reads.
+ unsafe { ptr::read(&slot.value) }
+ }
+}
+
+impl<T: ?Sized> ManuallyDrop<T> {
+ /// Manually drops the contained value. This is exactly equivalent to calling
+ /// [`ptr::drop_in_place`] with a pointer to the contained value. As such, unless
+ /// the contained value is a packed struct, the destructor will be called in-place
+ /// without moving the value, and thus can be used to safely drop [pinned] data.
+ ///
+ /// If you have ownership of the value, you can use [`ManuallyDrop::into_inner`] instead.
+ ///
+ /// # Safety
+ ///
+ /// This function runs the destructor of the contained value. Other than changes made by
+ /// the destructor itself, the memory is left unchanged, and so as far as the compiler is
+ /// concerned still holds a bit-pattern which is valid for the type `T`.
+ ///
+ /// However, this "zombie" value should not be exposed to safe code, and this function
+ /// should not be called more than once. To use a value after it's been dropped, or drop
+ /// a value multiple times, can cause Undefined Behavior (depending on what `drop` does).
+ /// This is normally prevented by the type system, but users of `ManuallyDrop` must
+ /// uphold those guarantees without assistance from the compiler.
+ ///
+ /// [pinned]: crate::pin
+ #[stable(feature = "manually_drop", since = "1.20.0")]
+ #[inline]
+ pub unsafe fn drop(slot: &mut ManuallyDrop<T>) {
+ // SAFETY: we are dropping the value pointed to by a mutable reference
+ // which is guaranteed to be valid for writes.
+ // It is up to the caller to make sure that `slot` isn't dropped again.
+ unsafe { ptr::drop_in_place(&mut slot.value) }
+ }
+}
+
+#[stable(feature = "manually_drop", since = "1.20.0")]
+#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
+impl<T: ?Sized> const Deref for ManuallyDrop<T> {
+ type Target = T;
+ #[inline(always)]
+ fn deref(&self) -> &T {
+ &self.value
+ }
+}
+
+#[stable(feature = "manually_drop", since = "1.20.0")]
+#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
+impl<T: ?Sized> const DerefMut for ManuallyDrop<T> {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.value
+ }
+}
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
new file mode 100644
index 000000000..b4ea53608
--- /dev/null
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -0,0 +1,1292 @@
+use crate::any::type_name;
+use crate::fmt;
+use crate::intrinsics;
+use crate::mem::{self, ManuallyDrop};
+use crate::ptr;
+use crate::slice;
+
+/// A wrapper type to construct uninitialized instances of `T`.
+///
+/// # Initialization invariant
+///
+/// The compiler, in general, assumes that a variable is properly initialized
+/// according to the requirements of the variable's type. For example, a variable of
+/// reference type must be aligned and non-null. This is an invariant that must
+/// *always* be upheld, even in unsafe code. As a consequence, zero-initializing a
+/// variable of reference type causes instantaneous [undefined behavior][ub],
+/// no matter whether that reference ever gets used to access memory:
+///
+/// ```rust,no_run
+/// # #![allow(invalid_value)]
+/// use std::mem::{self, MaybeUninit};
+///
+/// let x: &i32 = unsafe { mem::zeroed() }; // undefined behavior! ⚠️
+/// // The equivalent code with `MaybeUninit<&i32>`:
+/// let x: &i32 = unsafe { MaybeUninit::zeroed().assume_init() }; // undefined behavior! ⚠️
+/// ```
+///
+/// This is exploited by the compiler for various optimizations, such as eliding
+/// run-time checks and optimizing `enum` layout.
+///
+/// Similarly, entirely uninitialized memory may have any content, while a `bool` must
+/// always be `true` or `false`. Hence, creating an uninitialized `bool` is undefined behavior:
+///
+/// ```rust,no_run
+/// # #![allow(invalid_value)]
+/// use std::mem::{self, MaybeUninit};
+///
+/// let b: bool = unsafe { mem::uninitialized() }; // undefined behavior! ⚠️
+/// // The equivalent code with `MaybeUninit<bool>`:
+/// let b: bool = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! ⚠️
+/// ```
+///
+/// Moreover, uninitialized memory is special in that it does not have a fixed value ("fixed"
+/// meaning "it won't change without being written to"). Reading the same uninitialized byte
+/// multiple times can give different results. This makes it undefined behavior to have
+/// uninitialized data in a variable even if that variable has an integer type, which otherwise can
+/// hold any *fixed* bit pattern:
+///
+/// ```rust,no_run
+/// # #![allow(invalid_value)]
+/// use std::mem::{self, MaybeUninit};
+///
+/// let x: i32 = unsafe { mem::uninitialized() }; // undefined behavior! ⚠️
+/// // The equivalent code with `MaybeUninit<i32>`:
+/// let x: i32 = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! ⚠️
+/// ```
+/// (Notice that the rules around uninitialized integers are not finalized yet, but
+/// until they are, it is advisable to avoid them.)
+///
+/// On top of that, remember that most types have additional invariants beyond merely
+/// being considered initialized at the type level. For example, a `1`-initialized [`Vec<T>`]
+/// is considered initialized (under the current implementation; this does not constitute
+/// a stable guarantee) because the only requirement the compiler knows about it
+/// is that the data pointer must be non-null. Creating such a `Vec<T>` does not cause
+/// *immediate* undefined behavior, but will cause undefined behavior with most
+/// safe operations (including dropping it).
+///
+/// [`Vec<T>`]: ../../std/vec/struct.Vec.html
+///
+/// # Examples
+///
+/// `MaybeUninit<T>` serves to enable unsafe code to deal with uninitialized data.
+/// It is a signal to the compiler indicating that the data here might *not*
+/// be initialized:
+///
+/// ```rust
+/// use std::mem::MaybeUninit;
+///
+/// // Create an explicitly uninitialized reference. The compiler knows that data inside
+/// // a `MaybeUninit<T>` may be invalid, and hence this is not UB:
+/// let mut x = MaybeUninit::<&i32>::uninit();
+/// // Set it to a valid value.
+/// x.write(&0);
+/// // Extract the initialized data -- this is only allowed *after* properly
+/// // initializing `x`!
+/// let x = unsafe { x.assume_init() };
+/// ```
+///
+/// The compiler then knows to not make any incorrect assumptions or optimizations on this code.
+///
+/// You can think of `MaybeUninit<T>` as being a bit like `Option<T>` but without
+/// any of the run-time tracking and without any of the safety checks.
+///
+/// ## out-pointers
+///
+/// You can use `MaybeUninit<T>` to implement "out-pointers": instead of returning data
+/// from a function, pass it a pointer to some (uninitialized) memory to put the
+/// result into. This can be useful when it is important for the caller to control
+/// how the memory the result is stored in gets allocated, and you want to avoid
+/// unnecessary moves.
+///
+/// ```
+/// use std::mem::MaybeUninit;
+///
+/// unsafe fn make_vec(out: *mut Vec<i32>) {
+/// // `write` does not drop the old contents, which is important.
+/// out.write(vec![1, 2, 3]);
+/// }
+///
+/// let mut v = MaybeUninit::uninit();
+/// unsafe { make_vec(v.as_mut_ptr()); }
+/// // Now we know `v` is initialized! This also makes sure the vector gets
+/// // properly dropped.
+/// let v = unsafe { v.assume_init() };
+/// assert_eq!(&v, &[1, 2, 3]);
+/// ```
+///
+/// ## Initializing an array element-by-element
+///
+/// `MaybeUninit<T>` can be used to initialize a large array element-by-element:
+///
+/// ```
+/// use std::mem::{self, MaybeUninit};
+///
+/// let data = {
+/// // Create an uninitialized array of `MaybeUninit`. The `assume_init` is
+/// // safe because the type we are claiming to have initialized here is a
+/// // bunch of `MaybeUninit`s, which do not require initialization.
+/// let mut data: [MaybeUninit<Vec<u32>>; 1000] = unsafe {
+/// MaybeUninit::uninit().assume_init()
+/// };
+///
+/// // Dropping a `MaybeUninit` does nothing. Thus using raw pointer
+/// // assignment instead of `ptr::write` does not cause the old
+/// // uninitialized value to be dropped. Also if there is a panic during
+/// // this loop, we have a memory leak, but there is no memory safety
+/// // issue.
+/// for elem in &mut data[..] {
+/// elem.write(vec![42]);
+/// }
+///
+/// // Everything is initialized. Transmute the array to the
+/// // initialized type.
+/// unsafe { mem::transmute::<_, [Vec<u32>; 1000]>(data) }
+/// };
+///
+/// assert_eq!(&data[0], &[42]);
+/// ```
+///
+/// You can also work with partially initialized arrays, which could
+/// be found in low-level datastructures.
+///
+/// ```
+/// use std::mem::MaybeUninit;
+/// use std::ptr;
+///
+/// // Create an uninitialized array of `MaybeUninit`. The `assume_init` is
+/// // safe because the type we are claiming to have initialized here is a
+/// // bunch of `MaybeUninit`s, which do not require initialization.
+/// let mut data: [MaybeUninit<String>; 1000] = unsafe { MaybeUninit::uninit().assume_init() };
+/// // Count the number of elements we have assigned.
+/// let mut data_len: usize = 0;
+///
+/// for elem in &mut data[0..500] {
+/// elem.write(String::from("hello"));
+/// data_len += 1;
+/// }
+///
+/// // For each item in the array, drop if we allocated it.
+/// for elem in &mut data[0..data_len] {
+/// unsafe { ptr::drop_in_place(elem.as_mut_ptr()); }
+/// }
+/// ```
+///
+/// ## Initializing a struct field-by-field
+///
+/// You can use `MaybeUninit<T>`, and the [`std::ptr::addr_of_mut`] macro, to initialize structs field by field:
+///
+/// ```rust
+/// use std::mem::MaybeUninit;
+/// use std::ptr::addr_of_mut;
+///
+/// #[derive(Debug, PartialEq)]
+/// pub struct Foo {
+/// name: String,
+/// list: Vec<u8>,
+/// }
+///
+/// let foo = {
+/// let mut uninit: MaybeUninit<Foo> = MaybeUninit::uninit();
+/// let ptr = uninit.as_mut_ptr();
+///
+/// // Initializing the `name` field
+/// // Using `write` instead of assignment via `=` to not call `drop` on the
+/// // old, uninitialized value.
+/// unsafe { addr_of_mut!((*ptr).name).write("Bob".to_string()); }
+///
+/// // Initializing the `list` field
+/// // If there is a panic here, then the `String` in the `name` field leaks.
+/// unsafe { addr_of_mut!((*ptr).list).write(vec![0, 1, 2]); }
+///
+/// // All the fields are initialized, so we call `assume_init` to get an initialized Foo.
+/// unsafe { uninit.assume_init() }
+/// };
+///
+/// assert_eq!(
+/// foo,
+/// Foo {
+/// name: "Bob".to_string(),
+/// list: vec![0, 1, 2]
+/// }
+/// );
+/// ```
+/// [`std::ptr::addr_of_mut`]: crate::ptr::addr_of_mut
+/// [ub]: ../../reference/behavior-considered-undefined.html
+///
+/// # Layout
+///
+/// `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as `T`:
+///
+/// ```rust
+/// use std::mem::{MaybeUninit, size_of, align_of};
+/// assert_eq!(size_of::<MaybeUninit<u64>>(), size_of::<u64>());
+/// assert_eq!(align_of::<MaybeUninit<u64>>(), align_of::<u64>());
+/// ```
+///
+/// However remember that a type *containing* a `MaybeUninit<T>` is not necessarily the same
+/// layout; Rust does not in general guarantee that the fields of a `Foo<T>` have the same order as
+/// a `Foo<U>` even if `T` and `U` have the same size and alignment. Furthermore because any bit
+/// value is valid for a `MaybeUninit<T>` the compiler can't apply non-zero/niche-filling
+/// optimizations, potentially resulting in a larger size:
+///
+/// ```rust
+/// # use std::mem::{MaybeUninit, size_of};
+/// assert_eq!(size_of::<Option<bool>>(), 1);
+/// assert_eq!(size_of::<Option<MaybeUninit<bool>>>(), 2);
+/// ```
+///
+/// If `T` is FFI-safe, then so is `MaybeUninit<T>`.
+///
+/// While `MaybeUninit` is `#[repr(transparent)]` (indicating it guarantees the same size,
+/// alignment, and ABI as `T`), this does *not* change any of the previous caveats. `Option<T>` and
+/// `Option<MaybeUninit<T>>` may still have different sizes, and types containing a field of type
+/// `T` may be laid out (and sized) differently than if that field were `MaybeUninit<T>`.
+/// `MaybeUninit` is a union type, and `#[repr(transparent)]` on unions is unstable (see [the
+/// tracking issue](https://github.com/rust-lang/rust/issues/60405)). Over time, the exact
+/// guarantees of `#[repr(transparent)]` on unions may evolve, and `MaybeUninit` may or may not
+/// remain `#[repr(transparent)]`. That said, `MaybeUninit<T>` will *always* guarantee that it has
+/// the same size, alignment, and ABI as `T`; it's just that the way `MaybeUninit` implements that
+/// guarantee may evolve.
+#[stable(feature = "maybe_uninit", since = "1.36.0")]
+// Lang item so we can wrap other types in it. This is useful for generators.
+#[lang = "maybe_uninit"]
+#[derive(Copy)]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+ uninit: (),
+ value: ManuallyDrop<T>,
+}
+
+#[stable(feature = "maybe_uninit", since = "1.36.0")]
+impl<T: Copy> Clone for MaybeUninit<T> {
+ #[inline(always)]
+ fn clone(&self) -> Self {
+ // Not calling `T::clone()`, we cannot know if we are initialized enough for that.
+ *self
+ }
+}
+
+#[stable(feature = "maybe_uninit_debug", since = "1.41.0")]
+impl<T> fmt::Debug for MaybeUninit<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(type_name::<Self>())
+ }
+}
+
+impl<T> MaybeUninit<T> {
+ /// Creates a new `MaybeUninit<T>` initialized with the given value.
+ /// It is safe to call [`assume_init`] on the return value of this function.
+ ///
+ /// Note that dropping a `MaybeUninit<T>` will never call `T`'s drop code.
+ /// It is your responsibility to make sure `T` gets dropped if it got initialized.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let v: MaybeUninit<Vec<u8>> = MaybeUninit::new(vec![42]);
+ /// ```
+ ///
+ /// [`assume_init`]: MaybeUninit::assume_init
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_stable(feature = "const_maybe_uninit", since = "1.36.0")]
+ #[must_use = "use `forget` to avoid running Drop code"]
+ #[inline(always)]
+ pub const fn new(val: T) -> MaybeUninit<T> {
+ MaybeUninit { value: ManuallyDrop::new(val) }
+ }
+
+ /// Creates a new `MaybeUninit<T>` in an uninitialized state.
+ ///
+ /// Note that dropping a `MaybeUninit<T>` will never call `T`'s drop code.
+ /// It is your responsibility to make sure `T` gets dropped if it got initialized.
+ ///
+ /// See the [type-level documentation][MaybeUninit] for some examples.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let v: MaybeUninit<String> = MaybeUninit::uninit();
+ /// ```
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_stable(feature = "const_maybe_uninit", since = "1.36.0")]
+ #[must_use]
+ #[inline(always)]
+ #[rustc_diagnostic_item = "maybe_uninit_uninit"]
+ pub const fn uninit() -> MaybeUninit<T> {
+ MaybeUninit { uninit: () }
+ }
+
+ /// Create a new array of `MaybeUninit<T>` items, in an uninitialized state.
+ ///
+ /// Note: in a future Rust version this method may become unnecessary
+ /// when Rust allows
+ /// [inline const expressions](https://github.com/rust-lang/rust/issues/76001).
+ /// The example below could then use `let mut buf = [const { MaybeUninit::<u8>::uninit() }; 32];`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(maybe_uninit_uninit_array, maybe_uninit_slice)]
+ ///
+ /// use std::mem::MaybeUninit;
+ ///
+ /// extern "C" {
+ /// fn read_into_buffer(ptr: *mut u8, max_len: usize) -> usize;
+ /// }
+ ///
+ /// /// Returns a (possibly smaller) slice of data that was actually read
+ /// fn read(buf: &mut [MaybeUninit<u8>]) -> &[u8] {
+ /// unsafe {
+ /// let len = read_into_buffer(buf.as_mut_ptr() as *mut u8, buf.len());
+ /// MaybeUninit::slice_assume_init_ref(&buf[..len])
+ /// }
+ /// }
+ ///
+ /// let mut buf: [MaybeUninit<u8>; 32] = MaybeUninit::uninit_array();
+ /// let data = read(&mut buf);
+ /// ```
+ #[unstable(feature = "maybe_uninit_uninit_array", issue = "96097")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_uninit_array", issue = "96097")]
+ #[must_use]
+ #[inline(always)]
+ pub const fn uninit_array<const N: usize>() -> [Self; N] {
+ // SAFETY: An uninitialized `[MaybeUninit<_>; LEN]` is valid.
+ unsafe { MaybeUninit::<[MaybeUninit<T>; N]>::uninit().assume_init() }
+ }
+
+ /// Creates a new `MaybeUninit<T>` in an uninitialized state, with the memory being
+ /// filled with `0` bytes. It depends on `T` whether that already makes for
+ /// proper initialization. For example, `MaybeUninit<usize>::zeroed()` is initialized,
+ /// but `MaybeUninit<&'static i32>::zeroed()` is not because references must not
+ /// be null.
+ ///
+ /// Note that dropping a `MaybeUninit<T>` will never call `T`'s drop code.
+ /// It is your responsibility to make sure `T` gets dropped if it got initialized.
+ ///
+ /// # Example
+ ///
+ /// Correct usage of this function: initializing a struct with zero, where all
+ /// fields of the struct can hold the bit-pattern 0 as a valid value.
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let x = MaybeUninit::<(u8, bool)>::zeroed();
+ /// let x = unsafe { x.assume_init() };
+ /// assert_eq!(x, (0, false));
+ /// ```
+ ///
+ /// *Incorrect* usage of this function: calling `x.zeroed().assume_init()`
+ /// when `0` is not a valid bit-pattern for the type:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// enum NotZero { One = 1, Two = 2 }
+ ///
+ /// let x = MaybeUninit::<(u8, NotZero)>::zeroed();
+ /// let x = unsafe { x.assume_init() };
+ /// // Inside a pair, we create a `NotZero` that does not have a valid discriminant.
+ /// // This is undefined behavior. ⚠️
+ /// ```
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_zeroed", issue = "91850")]
+ #[must_use]
+ #[inline]
+ #[rustc_diagnostic_item = "maybe_uninit_zeroed"]
+ pub const fn zeroed() -> MaybeUninit<T> {
+ let mut u = MaybeUninit::<T>::uninit();
+ // SAFETY: `u.as_mut_ptr()` points to allocated memory.
+ unsafe {
+ u.as_mut_ptr().write_bytes(0u8, 1);
+ }
+ u
+ }
+
+ /// Sets the value of the `MaybeUninit<T>`.
+ ///
+ /// This overwrites any previous value without dropping it, so be careful
+ /// not to use this twice unless you want to skip running the destructor.
+ /// For your convenience, this also returns a mutable reference to the
+ /// (now safely initialized) contents of `self`.
+ ///
+ /// As the content is stored inside a `MaybeUninit`, the destructor is not
+ /// run for the inner data if the MaybeUninit leaves scope without a call to
+ /// [`assume_init`], [`assume_init_drop`], or similar. Code that receives
+ /// the mutable reference returned by this function needs to keep this in
+ /// mind. The safety model of Rust regards leaks as safe, but they are
+ /// usually still undesirable. This being said, the mutable reference
+ /// behaves like any other mutable reference would, so assigning a new value
+ /// to it will drop the old content.
+ ///
+ /// [`assume_init`]: Self::assume_init
+ /// [`assume_init_drop`]: Self::assume_init_drop
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u8>>::uninit();
+ ///
+ /// {
+ /// let hello = x.write((&b"Hello, world!").to_vec());
+ /// // Setting hello does not leak prior allocations, but drops them
+ /// *hello = (&b"Hello").to_vec();
+ /// hello[0] = 'h' as u8;
+ /// }
+ /// // x is initialized now:
+ /// let s = unsafe { x.assume_init() };
+ /// assert_eq!(b"hello", s.as_slice());
+ /// ```
+ ///
+ /// This usage of the method causes a leak:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<String>::uninit();
+ ///
+ /// x.write("Hello".to_string());
+ /// // This leaks the contained string:
+ /// x.write("hello".to_string());
+ /// // x is initialized now:
+ /// let s = unsafe { x.assume_init() };
+ /// ```
+ ///
+ /// This method can be used to avoid unsafe in some cases. The example below
+ /// shows a part of an implementation of a fixed sized arena that lends out
+ /// pinned references.
+ /// With `write`, we can avoid the need to write through a raw pointer:
+ ///
+ /// ```rust
+ /// use core::pin::Pin;
+ /// use core::mem::MaybeUninit;
+ ///
+ /// struct PinArena<T> {
+ /// memory: Box<[MaybeUninit<T>]>,
+ /// len: usize,
+ /// }
+ ///
+ /// impl <T> PinArena<T> {
+ /// pub fn capacity(&self) -> usize {
+ /// self.memory.len()
+ /// }
+ /// pub fn push(&mut self, val: T) -> Pin<&mut T> {
+ /// if self.len >= self.capacity() {
+ /// panic!("Attempted to push to a full pin arena!");
+ /// }
+ /// let ref_ = self.memory[self.len].write(val);
+ /// self.len += 1;
+ /// unsafe { Pin::new_unchecked(ref_) }
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "maybe_uninit_write", since = "1.55.0")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_write", issue = "63567")]
+ #[inline(always)]
+ pub const fn write(&mut self, val: T) -> &mut T {
+ *self = MaybeUninit::new(val);
+ // SAFETY: We just initialized this value.
+ unsafe { self.assume_init_mut() }
+ }
+
+ /// Gets a pointer to the contained value. Reading from this pointer or turning it
+ /// into a reference is undefined behavior unless the `MaybeUninit<T>` is initialized.
+ /// Writing to memory that this pointer (non-transitively) points to is undefined behavior
+ /// (except inside an `UnsafeCell<T>`).
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u32>>::uninit();
+ /// x.write(vec![0, 1, 2]);
+ /// // Create a reference into the `MaybeUninit<T>`. This is okay because we initialized it.
+ /// let x_vec = unsafe { &*x.as_ptr() };
+ /// assert_eq!(x_vec.len(), 3);
+ /// ```
+ ///
+ /// *Incorrect* usage of this method:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let x = MaybeUninit::<Vec<u32>>::uninit();
+ /// let x_vec = unsafe { &*x.as_ptr() };
+ /// // We have created a reference to an uninitialized vector! This is undefined behavior. ⚠️
+ /// ```
+ ///
+ /// (Notice that the rules around references to uninitialized data are not finalized yet, but
+ /// until they are, it is advisable to avoid them.)
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_stable(feature = "const_maybe_uninit_as_ptr", since = "1.59.0")]
+ #[inline(always)]
+ pub const fn as_ptr(&self) -> *const T {
+ // `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
+ self as *const _ as *const T
+ }
+
+ /// Gets a mutable pointer to the contained value. Reading from this pointer or turning it
+ /// into a reference is undefined behavior unless the `MaybeUninit<T>` is initialized.
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u32>>::uninit();
+ /// x.write(vec![0, 1, 2]);
+ /// // Create a reference into the `MaybeUninit<Vec<u32>>`.
+ /// // This is okay because we initialized it.
+ /// let x_vec = unsafe { &mut *x.as_mut_ptr() };
+ /// x_vec.push(3);
+ /// assert_eq!(x_vec.len(), 4);
+ /// ```
+ ///
+ /// *Incorrect* usage of this method:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u32>>::uninit();
+ /// let x_vec = unsafe { &mut *x.as_mut_ptr() };
+ /// // We have created a reference to an uninitialized vector! This is undefined behavior. ⚠️
+ /// ```
+ ///
+ /// (Notice that the rules around references to uninitialized data are not finalized yet, but
+ /// until they are, it is advisable to avoid them.)
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_as_mut_ptr", issue = "75251")]
+ #[inline(always)]
+ pub const fn as_mut_ptr(&mut self) -> *mut T {
+ // `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
+ self as *mut _ as *mut T
+ }
+
+ /// Extracts the value from the `MaybeUninit<T>` container. This is a great way
+ /// to ensure that the data will get dropped, because the resulting `T` is
+ /// subject to the usual drop handling.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` really is in an initialized
+ /// state. Calling this when the content is not yet fully initialized causes immediate undefined
+ /// behavior. The [type-level documentation][inv] contains more information about
+ /// this initialization invariant.
+ ///
+ /// [inv]: #initialization-invariant
+ ///
+ /// On top of that, remember that most types have additional invariants beyond merely
+ /// being considered initialized at the type level. For example, a `1`-initialized [`Vec<T>`]
+ /// is considered initialized (under the current implementation; this does not constitute
+ /// a stable guarantee) because the only requirement the compiler knows about it
+ /// is that the data pointer must be non-null. Creating such a `Vec<T>` does not cause
+ /// *immediate* undefined behavior, but will cause undefined behavior with most
+ /// safe operations (including dropping it).
+ ///
+ /// [`Vec<T>`]: ../../std/vec/struct.Vec.html
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<bool>::uninit();
+ /// x.write(true);
+ /// let x_init = unsafe { x.assume_init() };
+ /// assert_eq!(x_init, true);
+ /// ```
+ ///
+ /// *Incorrect* usage of this method:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let x = MaybeUninit::<Vec<u32>>::uninit();
+ /// let x_init = unsafe { x.assume_init() };
+ /// // `x` had not been initialized yet, so this last line caused undefined behavior. ⚠️
+ /// ```
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_stable(feature = "const_maybe_uninit_assume_init_by_value", since = "1.59.0")]
+ #[inline(always)]
+ #[rustc_diagnostic_item = "assume_init"]
+ #[track_caller]
+ pub const unsafe fn assume_init(self) -> T {
+ // SAFETY: the caller must guarantee that `self` is initialized.
+ // This also means that `self` must be a `value` variant.
+ unsafe {
+ intrinsics::assert_inhabited::<T>();
+ ManuallyDrop::into_inner(self.value)
+ }
+ }
+
+ /// Reads the value from the `MaybeUninit<T>` container. The resulting `T` is subject
+ /// to the usual drop handling.
+ ///
+ /// Whenever possible, it is preferable to use [`assume_init`] instead, which
+ /// prevents duplicating the content of the `MaybeUninit<T>`.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` really is in an initialized
+ /// state. Calling this when the content is not yet fully initialized causes undefined
+ /// behavior. The [type-level documentation][inv] contains more information about
+ /// this initialization invariant.
+ ///
+ /// Moreover, similar to the [`ptr::read`] function, this function creates a
+ /// bitwise copy of the contents, regardless whether the contained type
+ /// implements the [`Copy`] trait or not. When using multiple copies of the
+ /// data (by calling `assume_init_read` multiple times, or first calling
+ /// `assume_init_read` and then [`assume_init`]), it is your responsibility
+ /// to ensure that that data may indeed be duplicated.
+ ///
+ /// [inv]: #initialization-invariant
+ /// [`assume_init`]: MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<u32>::uninit();
+ /// x.write(13);
+ /// let x1 = unsafe { x.assume_init_read() };
+ /// // `u32` is `Copy`, so we may read multiple times.
+ /// let x2 = unsafe { x.assume_init_read() };
+ /// assert_eq!(x1, x2);
+ ///
+ /// let mut x = MaybeUninit::<Option<Vec<u32>>>::uninit();
+ /// x.write(None);
+ /// let x1 = unsafe { x.assume_init_read() };
+ /// // Duplicating a `None` value is okay, so we may read multiple times.
+ /// let x2 = unsafe { x.assume_init_read() };
+ /// assert_eq!(x1, x2);
+ /// ```
+ ///
+ /// *Incorrect* usage of this method:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Option<Vec<u32>>>::uninit();
+ /// x.write(Some(vec![0, 1, 2]));
+ /// let x1 = unsafe { x.assume_init_read() };
+ /// let x2 = unsafe { x.assume_init_read() };
+ /// // We now created two copies of the same vector, leading to a double-free ⚠️ when
+ /// // they both get dropped!
+ /// ```
+ #[stable(feature = "maybe_uninit_extra", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init_read", issue = "63567")]
+ #[inline(always)]
+ #[track_caller]
+ pub const unsafe fn assume_init_read(&self) -> T {
+ // SAFETY: the caller must guarantee that `self` is initialized.
+ // Reading from `self.as_ptr()` is safe since `self` should be initialized.
+ unsafe {
+ intrinsics::assert_inhabited::<T>();
+ self.as_ptr().read()
+ }
+ }
+
+ /// Drops the contained value in place.
+ ///
+ /// If you have ownership of the `MaybeUninit`, you can also use
+ /// [`assume_init`] as an alternative.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` really is
+ /// in an initialized state. Calling this when the content is not yet fully
+ /// initialized causes undefined behavior.
+ ///
+ /// On top of that, all additional invariants of the type `T` must be
+ /// satisfied, as the `Drop` implementation of `T` (or its members) may
+ /// rely on this. For example, setting a [`Vec<T>`] to an invalid but
+ /// non-null address makes it initialized (under the current implementation;
+ /// this does not constitute a stable guarantee), because the only
+ /// requirement the compiler knows about it is that the data pointer must be
+ /// non-null. Dropping such a `Vec<T>` however will cause undefined
+ /// behaviour.
+ ///
+ /// [`assume_init`]: MaybeUninit::assume_init
+ /// [`Vec<T>`]: ../../std/vec/struct.Vec.html
+ #[stable(feature = "maybe_uninit_extra", since = "1.60.0")]
+ pub unsafe fn assume_init_drop(&mut self) {
+ // SAFETY: the caller must guarantee that `self` is initialized and
+ // satisfies all invariants of `T`.
+ // Dropping the value in place is safe if that is the case.
+ unsafe { ptr::drop_in_place(self.as_mut_ptr()) }
+ }
+
+ /// Gets a shared reference to the contained value.
+ ///
+ /// This can be useful when we want to access a `MaybeUninit` that has been
+ /// initialized but don't have ownership of the `MaybeUninit` (preventing the use
+ /// of `.assume_init()`).
+ ///
+ /// # Safety
+ ///
+ /// Calling this when the content is not yet fully initialized causes undefined
+ /// behavior: it is up to the caller to guarantee that the `MaybeUninit<T>` really
+ /// is in an initialized state.
+ ///
+ /// # Examples
+ ///
+ /// ### Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u32>>::uninit();
+ /// // Initialize `x`:
+ /// x.write(vec![1, 2, 3]);
+ /// // Now that our `MaybeUninit<_>` is known to be initialized, it is okay to
+ /// // create a shared reference to it:
+ /// let x: &Vec<u32> = unsafe {
+ /// // SAFETY: `x` has been initialized.
+ /// x.assume_init_ref()
+ /// };
+ /// assert_eq!(x, &vec![1, 2, 3]);
+ /// ```
+ ///
+ /// ### *Incorrect* usages of this method:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let x = MaybeUninit::<Vec<u32>>::uninit();
+ /// let x_vec: &Vec<u32> = unsafe { x.assume_init_ref() };
+ /// // We have created a reference to an uninitialized vector! This is undefined behavior. ⚠️
+ /// ```
+ ///
+ /// ```rust,no_run
+ /// use std::{cell::Cell, mem::MaybeUninit};
+ ///
+ /// let b = MaybeUninit::<Cell<bool>>::uninit();
+ /// // Initialize the `MaybeUninit` using `Cell::set`:
+ /// unsafe {
+ /// b.assume_init_ref().set(true);
+ /// // ^^^^^^^^^^^^^^^
+ /// // Reference to an uninitialized `Cell<bool>`: UB!
+ /// }
+ /// ```
+ #[stable(feature = "maybe_uninit_ref", since = "1.55.0")]
+ #[rustc_const_stable(feature = "const_maybe_uninit_assume_init_ref", since = "1.59.0")]
+ #[inline(always)]
+ pub const unsafe fn assume_init_ref(&self) -> &T {
+ // SAFETY: the caller must guarantee that `self` is initialized.
+ // This also means that `self` must be a `value` variant.
+ unsafe {
+ intrinsics::assert_inhabited::<T>();
+ &*self.as_ptr()
+ }
+ }
+
+ /// Gets a mutable (unique) reference to the contained value.
+ ///
+ /// This can be useful when we want to access a `MaybeUninit` that has been
+ /// initialized but don't have ownership of the `MaybeUninit` (preventing the use
+ /// of `.assume_init()`).
+ ///
+ /// # Safety
+ ///
+ /// Calling this when the content is not yet fully initialized causes undefined
+ /// behavior: it is up to the caller to guarantee that the `MaybeUninit<T>` really
+ /// is in an initialized state. For instance, `.assume_init_mut()` cannot be used to
+ /// initialize a `MaybeUninit`.
+ ///
+ /// # Examples
+ ///
+ /// ### Correct usage of this method:
+ ///
+ /// ```rust
+ /// # #![allow(unexpected_cfgs)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// # unsafe extern "C" fn initialize_buffer(buf: *mut [u8; 1024]) { *buf = [0; 1024] }
+ /// # #[cfg(FALSE)]
+ /// extern "C" {
+ /// /// Initializes *all* the bytes of the input buffer.
+ /// fn initialize_buffer(buf: *mut [u8; 1024]);
+ /// }
+ ///
+ /// let mut buf = MaybeUninit::<[u8; 1024]>::uninit();
+ ///
+ /// // Initialize `buf`:
+ /// unsafe { initialize_buffer(buf.as_mut_ptr()); }
+ /// // Now we know that `buf` has been initialized, so we could `.assume_init()` it.
+ /// // However, using `.assume_init()` may trigger a `memcpy` of the 1024 bytes.
+ /// // To assert our buffer has been initialized without copying it, we upgrade
+ /// // the `&mut MaybeUninit<[u8; 1024]>` to a `&mut [u8; 1024]`:
+ /// let buf: &mut [u8; 1024] = unsafe {
+ /// // SAFETY: `buf` has been initialized.
+ /// buf.assume_init_mut()
+ /// };
+ ///
+ /// // Now we can use `buf` as a normal slice:
+ /// buf.sort_unstable();
+ /// assert!(
+ /// buf.windows(2).all(|pair| pair[0] <= pair[1]),
+ /// "buffer is sorted",
+ /// );
+ /// ```
+ ///
+ /// ### *Incorrect* usages of this method:
+ ///
+ /// You cannot use `.assume_init_mut()` to initialize a value:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut b = MaybeUninit::<bool>::uninit();
+ /// unsafe {
+ /// *b.assume_init_mut() = true;
+ /// // We have created a (mutable) reference to an uninitialized `bool`!
+ /// // This is undefined behavior. ⚠️
+ /// }
+ /// ```
+ ///
+ /// For instance, you cannot [`Read`] into an uninitialized buffer:
+ ///
+ /// [`Read`]: ../../std/io/trait.Read.html
+ ///
+ /// ```rust,no_run
+ /// use std::{io, mem::MaybeUninit};
+ ///
+ /// fn read_chunk (reader: &'_ mut dyn io::Read) -> io::Result<[u8; 64]>
+ /// {
+ /// let mut buffer = MaybeUninit::<[u8; 64]>::uninit();
+ /// reader.read_exact(unsafe { buffer.assume_init_mut() })?;
+ /// // ^^^^^^^^^^^^^^^^^^^^^^^^
+ /// // (mutable) reference to uninitialized memory!
+ /// // This is undefined behavior.
+ /// Ok(unsafe { buffer.assume_init() })
+ /// }
+ /// ```
+ ///
+ /// Nor can you use direct field access to do field-by-field gradual initialization:
+ ///
+ /// ```rust,no_run
+ /// use std::{mem::MaybeUninit, ptr};
+ ///
+ /// struct Foo {
+ /// a: u32,
+ /// b: u8,
+ /// }
+ ///
+ /// let foo: Foo = unsafe {
+ /// let mut foo = MaybeUninit::<Foo>::uninit();
+ /// ptr::write(&mut foo.assume_init_mut().a as *mut u32, 1337);
+ /// // ^^^^^^^^^^^^^^^^^^^^^
+ /// // (mutable) reference to uninitialized memory!
+ /// // This is undefined behavior.
+ /// ptr::write(&mut foo.assume_init_mut().b as *mut u8, 42);
+ /// // ^^^^^^^^^^^^^^^^^^^^^
+ /// // (mutable) reference to uninitialized memory!
+ /// // This is undefined behavior.
+ /// foo.assume_init()
+ /// };
+ /// ```
+ #[stable(feature = "maybe_uninit_ref", since = "1.55.0")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init", issue = "none")]
+ #[inline(always)]
+ pub const unsafe fn assume_init_mut(&mut self) -> &mut T {
+ // SAFETY: the caller must guarantee that `self` is initialized.
+ // This also means that `self` must be a `value` variant.
+ unsafe {
+ intrinsics::assert_inhabited::<T>();
+ &mut *self.as_mut_ptr()
+ }
+ }
+
+ /// Extracts the values from an array of `MaybeUninit` containers.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that all elements of the array are
+ /// in an initialized state.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_uninit_array)]
+ /// #![feature(maybe_uninit_array_assume_init)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut array: [MaybeUninit<i32>; 3] = MaybeUninit::uninit_array();
+ /// array[0].write(0);
+ /// array[1].write(1);
+ /// array[2].write(2);
+ ///
+ /// // SAFETY: Now safe as we initialised all elements
+ /// let array = unsafe {
+ /// MaybeUninit::array_assume_init(array)
+ /// };
+ ///
+ /// assert_eq!(array, [0, 1, 2]);
+ /// ```
+ #[unstable(feature = "maybe_uninit_array_assume_init", issue = "96097")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_array_assume_init", issue = "96097")]
+ #[inline(always)]
+ #[track_caller]
+ pub const unsafe fn array_assume_init<const N: usize>(array: [Self; N]) -> [T; N] {
+ // SAFETY:
+ // * The caller guarantees that all elements of the array are initialized
+ // * `MaybeUninit<T>` and T are guaranteed to have the same layout
+ // * `MaybeUninit` does not drop, so there are no double-frees
+ // And thus the conversion is safe
+ let ret = unsafe {
+ intrinsics::assert_inhabited::<[T; N]>();
+ (&array as *const _ as *const [T; N]).read()
+ };
+
+ // FIXME: required to avoid `~const Destruct` bound
+ super::forget(array);
+ ret
+ }
+
+ /// Assuming all the elements are initialized, get a slice to them.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` elements
+ /// really are in an initialized state.
+ /// Calling this when the content is not yet fully initialized causes undefined behavior.
+ ///
+ /// See [`assume_init_ref`] for more details and examples.
+ ///
+ /// [`assume_init_ref`]: MaybeUninit::assume_init_ref
+ #[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[rustc_const_unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[inline(always)]
+ pub const unsafe fn slice_assume_init_ref(slice: &[Self]) -> &[T] {
+ // SAFETY: casting `slice` to a `*const [T]` is safe since the caller guarantees that
+ // `slice` is initialized, and `MaybeUninit` is guaranteed to have the same layout as `T`.
+ // The pointer obtained is valid since it refers to memory owned by `slice` which is a
+ // reference and thus guaranteed to be valid for reads.
+ unsafe { &*(slice as *const [Self] as *const [T]) }
+ }
+
+ /// Assuming all the elements are initialized, get a mutable slice to them.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` elements
+ /// really are in an initialized state.
+ /// Calling this when the content is not yet fully initialized causes undefined behavior.
+ ///
+ /// See [`assume_init_mut`] for more details and examples.
+ ///
+ /// [`assume_init_mut`]: MaybeUninit::assume_init_mut
+ #[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init", issue = "none")]
+ #[inline(always)]
+ pub const unsafe fn slice_assume_init_mut(slice: &mut [Self]) -> &mut [T] {
+ // SAFETY: similar to safety notes for `slice_get_ref`, but we have a
+ // mutable reference which is also guaranteed to be valid for writes.
+ unsafe { &mut *(slice as *mut [Self] as *mut [T]) }
+ }
+
+ /// Gets a pointer to the first element of the array.
+ #[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[rustc_const_unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[inline(always)]
+ pub const fn slice_as_ptr(this: &[MaybeUninit<T>]) -> *const T {
+ this.as_ptr() as *const T
+ }
+
+ /// Gets a mutable pointer to the first element of the array.
+ #[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[rustc_const_unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[inline(always)]
+ pub const fn slice_as_mut_ptr(this: &mut [MaybeUninit<T>]) -> *mut T {
+ this.as_mut_ptr() as *mut T
+ }
+
+ /// Copies the elements from `src` to `this`, returning a mutable reference to the now initialized contents of `this`.
+ ///
+ /// If `T` does not implement `Copy`, use [`write_slice_cloned`]
+ ///
+ /// This is similar to [`slice::copy_from_slice`].
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_write_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut dst = [MaybeUninit::uninit(); 32];
+ /// let src = [0; 32];
+ ///
+ /// let init = MaybeUninit::write_slice(&mut dst, &src);
+ ///
+ /// assert_eq!(init, src);
+ /// ```
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_write_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut vec = Vec::with_capacity(32);
+ /// let src = [0; 16];
+ ///
+ /// MaybeUninit::write_slice(&mut vec.spare_capacity_mut()[..src.len()], &src);
+ ///
+ /// // SAFETY: we have just copied all the elements of len into the spare capacity
+ /// // the first src.len() elements of the vec are valid now.
+ /// unsafe {
+ /// vec.set_len(src.len());
+ /// }
+ ///
+ /// assert_eq!(vec, src);
+ /// ```
+ ///
+ /// [`write_slice_cloned`]: MaybeUninit::write_slice_cloned
+ #[unstable(feature = "maybe_uninit_write_slice", issue = "79995")]
+ pub fn write_slice<'a>(this: &'a mut [MaybeUninit<T>], src: &[T]) -> &'a mut [T]
+ where
+ T: Copy,
+ {
+ // SAFETY: &[T] and &[MaybeUninit<T>] have the same layout
+ let uninit_src: &[MaybeUninit<T>] = unsafe { super::transmute(src) };
+
+ this.copy_from_slice(uninit_src);
+
+ // SAFETY: Valid elements have just been copied into `this` so it is initialized
+ unsafe { MaybeUninit::slice_assume_init_mut(this) }
+ }
+
+ /// Clones the elements from `src` to `this`, returning a mutable reference to the now initialized contents of `this`.
+ /// Any already initialized elements will not be dropped.
+ ///
+ /// If `T` implements `Copy`, use [`write_slice`]
+ ///
+ /// This is similar to [`slice::clone_from_slice`] but does not drop existing elements.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths, or if the implementation of `Clone` panics.
+ ///
+ /// If there is a panic, the already cloned elements will be dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_write_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut dst = [MaybeUninit::uninit(), MaybeUninit::uninit(), MaybeUninit::uninit(), MaybeUninit::uninit(), MaybeUninit::uninit()];
+ /// let src = ["wibbly".to_string(), "wobbly".to_string(), "timey".to_string(), "wimey".to_string(), "stuff".to_string()];
+ ///
+ /// let init = MaybeUninit::write_slice_cloned(&mut dst, &src);
+ ///
+ /// assert_eq!(init, src);
+ /// ```
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_write_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut vec = Vec::with_capacity(32);
+ /// let src = ["rust", "is", "a", "pretty", "cool", "language"];
+ ///
+ /// MaybeUninit::write_slice_cloned(&mut vec.spare_capacity_mut()[..src.len()], &src);
+ ///
+ /// // SAFETY: we have just cloned all the elements of len into the spare capacity
+ /// // the first src.len() elements of the vec are valid now.
+ /// unsafe {
+ /// vec.set_len(src.len());
+ /// }
+ ///
+ /// assert_eq!(vec, src);
+ /// ```
+ ///
+ /// [`write_slice`]: MaybeUninit::write_slice
+ #[unstable(feature = "maybe_uninit_write_slice", issue = "79995")]
+ pub fn write_slice_cloned<'a>(this: &'a mut [MaybeUninit<T>], src: &[T]) -> &'a mut [T]
+ where
+ T: Clone,
+ {
+ // unlike copy_from_slice this does not call clone_from_slice on the slice
+ // this is because `MaybeUninit<T: Clone>` does not implement Clone.
+
+ struct Guard<'a, T> {
+ slice: &'a mut [MaybeUninit<T>],
+ initialized: usize,
+ }
+
+ impl<'a, T> Drop for Guard<'a, T> {
+ fn drop(&mut self) {
+ let initialized_part = &mut self.slice[..self.initialized];
+ // SAFETY: this raw slice will contain only initialized objects
+ // that's why, it is allowed to drop it.
+ unsafe {
+ crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(initialized_part));
+ }
+ }
+ }
+
+ assert_eq!(this.len(), src.len(), "destination and source slices have different lengths");
+ // NOTE: We need to explicitly slice them to the same length
+ // for bounds checking to be elided, and the optimizer will
+ // generate memcpy for simple cases (for example T = u8).
+ let len = this.len();
+ let src = &src[..len];
+
+ // guard is needed b/c panic might happen during a clone
+ let mut guard = Guard { slice: this, initialized: 0 };
+
+ for i in 0..len {
+ guard.slice[i].write(src[i].clone());
+ guard.initialized += 1;
+ }
+
+ super::forget(guard);
+
+ // SAFETY: Valid elements have just been written into `this` so it is initialized
+ unsafe { MaybeUninit::slice_assume_init_mut(this) }
+ }
+
+ /// Returns the contents of this `MaybeUninit` as a slice of potentially uninitialized bytes.
+ ///
+ /// Note that even if the contents of a `MaybeUninit` have been initialized, the value may still
+ /// contain padding bytes which are left uninitialized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_as_bytes, maybe_uninit_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let val = 0x12345678i32;
+ /// let uninit = MaybeUninit::new(val);
+ /// let uninit_bytes = uninit.as_bytes();
+ /// let bytes = unsafe { MaybeUninit::slice_assume_init_ref(uninit_bytes) };
+ /// assert_eq!(bytes, val.to_ne_bytes());
+ /// ```
+ #[unstable(feature = "maybe_uninit_as_bytes", issue = "93092")]
+ pub fn as_bytes(&self) -> &[MaybeUninit<u8>] {
+ // SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
+ unsafe {
+ slice::from_raw_parts(self.as_ptr() as *const MaybeUninit<u8>, mem::size_of::<T>())
+ }
+ }
+
+ /// Returns the contents of this `MaybeUninit` as a mutable slice of potentially uninitialized
+ /// bytes.
+ ///
+ /// Note that even if the contents of a `MaybeUninit` have been initialized, the value may still
+ /// contain padding bytes which are left uninitialized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_as_bytes)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let val = 0x12345678i32;
+ /// let mut uninit = MaybeUninit::new(val);
+ /// let uninit_bytes = uninit.as_bytes_mut();
+ /// if cfg!(target_endian = "little") {
+ /// uninit_bytes[0].write(0xcd);
+ /// } else {
+ /// uninit_bytes[3].write(0xcd);
+ /// }
+ /// let val2 = unsafe { uninit.assume_init() };
+ /// assert_eq!(val2, 0x123456cd);
+ /// ```
+ #[unstable(feature = "maybe_uninit_as_bytes", issue = "93092")]
+ pub fn as_bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ // SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
+ unsafe {
+ slice::from_raw_parts_mut(
+ self.as_mut_ptr() as *mut MaybeUninit<u8>,
+ mem::size_of::<T>(),
+ )
+ }
+ }
+
+ /// Returns the contents of this slice of `MaybeUninit` as a slice of potentially uninitialized
+ /// bytes.
+ ///
+ /// Note that even if the contents of a `MaybeUninit` have been initialized, the value may still
+ /// contain padding bytes which are left uninitialized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_as_bytes, maybe_uninit_write_slice, maybe_uninit_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let uninit = [MaybeUninit::new(0x1234u16), MaybeUninit::new(0x5678u16)];
+ /// let uninit_bytes = MaybeUninit::slice_as_bytes(&uninit);
+ /// let bytes = unsafe { MaybeUninit::slice_assume_init_ref(&uninit_bytes) };
+ /// let val1 = u16::from_ne_bytes(bytes[0..2].try_into().unwrap());
+ /// let val2 = u16::from_ne_bytes(bytes[2..4].try_into().unwrap());
+ /// assert_eq!(&[val1, val2], &[0x1234u16, 0x5678u16]);
+ /// ```
+ #[unstable(feature = "maybe_uninit_as_bytes", issue = "93092")]
+ pub fn slice_as_bytes(this: &[MaybeUninit<T>]) -> &[MaybeUninit<u8>] {
+ // SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
+ unsafe {
+ slice::from_raw_parts(
+ this.as_ptr() as *const MaybeUninit<u8>,
+ this.len() * mem::size_of::<T>(),
+ )
+ }
+ }
+
+ /// Returns the contents of this mutable slice of `MaybeUninit` as a mutable slice of
+ /// potentially uninitialized bytes.
+ ///
+ /// Note that even if the contents of a `MaybeUninit` have been initialized, the value may still
+ /// contain padding bytes which are left uninitialized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_as_bytes, maybe_uninit_write_slice, maybe_uninit_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut uninit = [MaybeUninit::<u16>::uninit(), MaybeUninit::<u16>::uninit()];
+ /// let uninit_bytes = MaybeUninit::slice_as_bytes_mut(&mut uninit);
+ /// MaybeUninit::write_slice(uninit_bytes, &[0x12, 0x34, 0x56, 0x78]);
+ /// let vals = unsafe { MaybeUninit::slice_assume_init_ref(&uninit) };
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(vals, &[0x3412u16, 0x7856u16]);
+ /// } else {
+ /// assert_eq!(vals, &[0x1234u16, 0x5678u16]);
+ /// }
+ /// ```
+ #[unstable(feature = "maybe_uninit_as_bytes", issue = "93092")]
+ pub fn slice_as_bytes_mut(this: &mut [MaybeUninit<T>]) -> &mut [MaybeUninit<u8>] {
+ // SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
+ unsafe {
+ slice::from_raw_parts_mut(
+ this.as_mut_ptr() as *mut MaybeUninit<u8>,
+ this.len() * mem::size_of::<T>(),
+ )
+ }
+ }
+}
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
new file mode 100644
index 000000000..20b2d5e26
--- /dev/null
+++ b/library/core/src/mem/mod.rs
@@ -0,0 +1,1180 @@
+//! Basic functions for dealing with memory.
+//!
+//! This module contains functions for querying the size and alignment of
+//! types, initializing and manipulating memory.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::clone;
+use crate::cmp;
+use crate::fmt;
+use crate::hash;
+use crate::intrinsics;
+use crate::marker::{Copy, DiscriminantKind, Sized};
+use crate::ptr;
+
+mod manually_drop;
+#[stable(feature = "manually_drop", since = "1.20.0")]
+pub use manually_drop::ManuallyDrop;
+
+mod maybe_uninit;
+#[stable(feature = "maybe_uninit", since = "1.36.0")]
+pub use maybe_uninit::MaybeUninit;
+
+mod valid_align;
+// For now this type is left crate-local. It could potentially make sense to expose
+// it publicly, as it would be a nice parameter type for methods which need to take
+// alignment as a parameter, such as `Layout::padding_needed_for`.
+pub(crate) use valid_align::ValidAlign;
+
+mod transmutability;
+#[unstable(feature = "transmutability", issue = "99571")]
+pub use transmutability::{Assume, BikeshedIntrinsicFrom};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
+pub use crate::intrinsics::transmute;
+
+/// Takes ownership and "forgets" about the value **without running its destructor**.
+///
+/// Any resources the value manages, such as heap memory or a file handle, will linger
+/// forever in an unreachable state. However, it does not guarantee that pointers
+/// to this memory will remain valid.
+///
+/// * If you want to leak memory, see [`Box::leak`].
+/// * If you want to obtain a raw pointer to the memory, see [`Box::into_raw`].
+/// * If you want to dispose of a value properly, running its destructor, see
+/// [`mem::drop`].
+///
+/// # Safety
+///
+/// `forget` is not marked as `unsafe`, because Rust's safety guarantees
+/// do not include a guarantee that destructors will always run. For example,
+/// a program can create a reference cycle using [`Rc`][rc], or call
+/// [`process::exit`][exit] to exit without running destructors. Thus, allowing
+/// `mem::forget` from safe code does not fundamentally change Rust's safety
+/// guarantees.
+///
+/// That said, leaking resources such as memory or I/O objects is usually undesirable.
+/// The need comes up in some specialized use cases for FFI or unsafe code, but even
+/// then, [`ManuallyDrop`] is typically preferred.
+///
+/// Because forgetting a value is allowed, any `unsafe` code you write must
+/// allow for this possibility. You cannot return a value and expect that the
+/// caller will necessarily run the value's destructor.
+///
+/// [rc]: ../../std/rc/struct.Rc.html
+/// [exit]: ../../std/process/fn.exit.html
+///
+/// # Examples
+///
+/// The canonical safe use of `mem::forget` is to circumvent a value's destructor
+/// implemented by the `Drop` trait. For example, this will leak a `File`, i.e. reclaim
+/// the space taken by the variable but never close the underlying system resource:
+///
+/// ```no_run
+/// use std::mem;
+/// use std::fs::File;
+///
+/// let file = File::open("foo.txt").unwrap();
+/// mem::forget(file);
+/// ```
+///
+/// This is useful when the ownership of the underlying resource was previously
+/// transferred to code outside of Rust, for example by transmitting the raw
+/// file descriptor to C code.
+///
+/// # Relationship with `ManuallyDrop`
+///
+/// While `mem::forget` can also be used to transfer *memory* ownership, doing so is error-prone.
+/// [`ManuallyDrop`] should be used instead. Consider, for example, this code:
+///
+/// ```
+/// use std::mem;
+///
+/// let mut v = vec![65, 122];
+/// // Build a `String` using the contents of `v`
+/// let s = unsafe { String::from_raw_parts(v.as_mut_ptr(), v.len(), v.capacity()) };
+/// // leak `v` because its memory is now managed by `s`
+/// mem::forget(v); // ERROR - v is invalid and must not be passed to a function
+/// assert_eq!(s, "Az");
+/// // `s` is implicitly dropped and its memory deallocated.
+/// ```
+///
+/// There are two issues with the above example:
+///
+/// * If more code were added between the construction of `String` and the invocation of
+/// `mem::forget()`, a panic within it would cause a double free because the same memory
+/// is handled by both `v` and `s`.
+/// * After calling `v.as_mut_ptr()` and transmitting the ownership of the data to `s`,
+/// the `v` value is invalid. Even when a value is just moved to `mem::forget` (which won't
+/// inspect it), some types have strict requirements on their values that
+/// make them invalid when dangling or no longer owned. Using invalid values in any
+/// way, including passing them to or returning them from functions, constitutes
+/// undefined behavior and may break the assumptions made by the compiler.
+///
+/// Switching to `ManuallyDrop` avoids both issues:
+///
+/// ```
+/// use std::mem::ManuallyDrop;
+///
+/// let v = vec![65, 122];
+/// // Before we disassemble `v` into its raw parts, make sure it
+/// // does not get dropped!
+/// let mut v = ManuallyDrop::new(v);
+/// // Now disassemble `v`. These operations cannot panic, so there cannot be a leak.
+/// let (ptr, len, cap) = (v.as_mut_ptr(), v.len(), v.capacity());
+/// // Finally, build a `String`.
+/// let s = unsafe { String::from_raw_parts(ptr, len, cap) };
+/// assert_eq!(s, "Az");
+/// // `s` is implicitly dropped and its memory deallocated.
+/// ```
+///
+/// `ManuallyDrop` robustly prevents double-free because we disable `v`'s destructor
+/// before doing anything else. `mem::forget()` doesn't allow this because it consumes its
+/// argument, forcing us to call it only after extracting anything we need from `v`. Even
+/// if a panic were introduced between construction of `ManuallyDrop` and building the
+/// string (which cannot happen in the code as shown), it would result in a leak and not a
+/// double free. In other words, `ManuallyDrop` errs on the side of leaking instead of
+/// erring on the side of (double-)dropping.
+///
+/// Also, `ManuallyDrop` prevents us from having to "touch" `v` after transferring the
+/// ownership to `s` — the final step of interacting with `v` to dispose of it without
+/// running its destructor is entirely avoided.
+///
+/// [`Box`]: ../../std/boxed/struct.Box.html
+/// [`Box::leak`]: ../../std/boxed/struct.Box.html#method.leak
+/// [`Box::into_raw`]: ../../std/boxed/struct.Box.html#method.into_raw
+/// [`mem::drop`]: drop
+/// [ub]: ../../reference/behavior-considered-undefined.html
+#[inline]
+#[rustc_const_stable(feature = "const_forget", since = "1.46.0")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "mem_forget")]
+pub const fn forget<T>(t: T) {
+ let _ = ManuallyDrop::new(t);
+}
+
+/// Like [`forget`], but also accepts unsized values.
+///
+/// This function is just a shim intended to be removed when the `unsized_locals` feature gets
+/// stabilized.
+#[inline]
+#[unstable(feature = "forget_unsized", issue = "none")]
+pub fn forget_unsized<T: ?Sized>(t: T) {
+ intrinsics::forget(t)
+}
+
+/// Returns the size of a type in bytes.
+///
+/// More specifically, this is the offset in bytes between successive elements
+/// in an array with that item type including alignment padding. Thus, for any
+/// type `T` and length `n`, `[T; n]` has a size of `n * size_of::<T>()`.
+///
+/// In general, the size of a type is not stable across compilations, but
+/// specific types such as primitives are.
+///
+/// The following table gives the size for primitives.
+///
+/// Type | size_of::\<Type>()
+/// ---- | ---------------
+/// () | 0
+/// bool | 1
+/// u8 | 1
+/// u16 | 2
+/// u32 | 4
+/// u64 | 8
+/// u128 | 16
+/// i8 | 1
+/// i16 | 2
+/// i32 | 4
+/// i64 | 8
+/// i128 | 16
+/// f32 | 4
+/// f64 | 8
+/// char | 4
+///
+/// Furthermore, `usize` and `isize` have the same size.
+///
+/// The types `*const T`, `&T`, `Box<T>`, `Option<&T>`, and `Option<Box<T>>` all have
+/// the same size. If `T` is Sized, all of those types have the same size as `usize`.
+///
+/// The mutability of a pointer does not change its size. As such, `&T` and `&mut T`
+/// have the same size. Likewise for `*const T` and `*mut T`.
+///
+/// # Size of `#[repr(C)]` items
+///
+/// The `C` representation for items has a defined layout. With this layout,
+/// the size of items is also stable as long as all fields have a stable size.
+///
+/// ## Size of Structs
+///
+/// For `structs`, the size is determined by the following algorithm.
+///
+/// For each field in the struct ordered by declaration order:
+///
+/// 1. Add the size of the field.
+/// 2. Round up the current size to the nearest multiple of the next field's [alignment].
+///
+/// Finally, round the size of the struct to the nearest multiple of its [alignment].
+/// The alignment of the struct is usually the largest alignment of all its
+/// fields; this can be changed with the use of `repr(align(N))`.
+///
+/// Unlike `C`, zero sized structs are not rounded up to one byte in size.
+///
+/// ## Size of Enums
+///
+/// Enums that carry no data other than the discriminant have the same size as C enums
+/// on the platform they are compiled for.
+///
+/// ## Size of Unions
+///
+/// The size of a union is the size of its largest field.
+///
+/// Unlike `C`, zero sized unions are not rounded up to one byte in size.
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// // Some primitives
+/// assert_eq!(4, mem::size_of::<i32>());
+/// assert_eq!(8, mem::size_of::<f64>());
+/// assert_eq!(0, mem::size_of::<()>());
+///
+/// // Some arrays
+/// assert_eq!(8, mem::size_of::<[i32; 2]>());
+/// assert_eq!(12, mem::size_of::<[i32; 3]>());
+/// assert_eq!(0, mem::size_of::<[i32; 0]>());
+///
+///
+/// // Pointer size equality
+/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<*const i32>());
+/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Box<i32>>());
+/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Option<&i32>>());
+/// assert_eq!(mem::size_of::<Box<i32>>(), mem::size_of::<Option<Box<i32>>>());
+/// ```
+///
+/// Using `#[repr(C)]`.
+///
+/// ```
+/// use std::mem;
+///
+/// #[repr(C)]
+/// struct FieldStruct {
+/// first: u8,
+/// second: u16,
+/// third: u8
+/// }
+///
+/// // The size of the first field is 1, so add 1 to the size. Size is 1.
+/// // The alignment of the second field is 2, so add 1 to the size for padding. Size is 2.
+/// // The size of the second field is 2, so add 2 to the size. Size is 4.
+/// // The alignment of the third field is 1, so add 0 to the size for padding. Size is 4.
+/// // The size of the third field is 1, so add 1 to the size. Size is 5.
+/// // Finally, the alignment of the struct is 2 (because the largest alignment amongst its
+/// // fields is 2), so add 1 to the size for padding. Size is 6.
+/// assert_eq!(6, mem::size_of::<FieldStruct>());
+///
+/// #[repr(C)]
+/// struct TupleStruct(u8, u16, u8);
+///
+/// // Tuple structs follow the same rules.
+/// assert_eq!(6, mem::size_of::<TupleStruct>());
+///
+/// // Note that reordering the fields can lower the size. We can remove both padding bytes
+/// // by putting `third` before `second`.
+/// #[repr(C)]
+/// struct FieldStructOptimized {
+/// first: u8,
+/// third: u8,
+/// second: u16
+/// }
+///
+/// assert_eq!(4, mem::size_of::<FieldStructOptimized>());
+///
+/// // Union size is the size of the largest field.
+/// #[repr(C)]
+/// union ExampleUnion {
+/// smaller: u8,
+/// larger: u16
+/// }
+///
+/// assert_eq!(2, mem::size_of::<ExampleUnion>());
+/// ```
+///
+/// [alignment]: align_of
+#[inline(always)]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_mem_size_of", since = "1.24.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "mem_size_of")]
+pub const fn size_of<T>() -> usize {
+ intrinsics::size_of::<T>()
+}
+
+/// Returns the size of the pointed-to value in bytes.
+///
+/// This is usually the same as `size_of::<T>()`. However, when `T` *has* no
+/// statically-known size, e.g., a slice [`[T]`][slice] or a [trait object],
+/// then `size_of_val` can be used to get the dynamically-known size.
+///
+/// [trait object]: ../../book/ch17-02-trait-objects.html
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// assert_eq!(4, mem::size_of_val(&5i32));
+///
+/// let x: [u8; 13] = [0; 13];
+/// let y: &[u8] = &x;
+/// assert_eq!(13, mem::size_of_val(y));
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_size_of_val", issue = "46571")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "mem_size_of_val")]
+pub const fn size_of_val<T: ?Sized>(val: &T) -> usize {
+ // SAFETY: `val` is a reference, so it's a valid raw pointer
+ unsafe { intrinsics::size_of_val(val) }
+}
+
+/// Returns the size of the pointed-to value in bytes.
+///
+/// This is usually the same as `size_of::<T>()`. However, when `T` *has* no
+/// statically-known size, e.g., a slice [`[T]`][slice] or a [trait object],
+/// then `size_of_val_raw` can be used to get the dynamically-known size.
+///
+/// # Safety
+///
+/// This function is only safe to call if the following conditions hold:
+///
+/// - If `T` is `Sized`, this function is always safe to call.
+/// - If the unsized tail of `T` is:
+/// - a [slice], then the length of the slice tail must be an initialized
+/// integer, and the size of the *entire value*
+/// (dynamic tail length + statically sized prefix) must fit in `isize`.
+/// - a [trait object], then the vtable part of the pointer must point
+/// to a valid vtable acquired by an unsizing coercion, and the size
+/// of the *entire value* (dynamic tail length + statically sized prefix)
+/// must fit in `isize`.
+/// - an (unstable) [extern type], then this function is always safe to
+/// call, but may panic or otherwise return the wrong value, as the
+/// extern type's layout is not known. This is the same behavior as
+/// [`size_of_val`] on a reference to a type with an extern type tail.
+/// - otherwise, it is conservatively not allowed to call this function.
+///
+/// [trait object]: ../../book/ch17-02-trait-objects.html
+/// [extern type]: ../../unstable-book/language-features/extern-types.html
+///
+/// # Examples
+///
+/// ```
+/// #![feature(layout_for_ptr)]
+/// use std::mem;
+///
+/// assert_eq!(4, mem::size_of_val(&5i32));
+///
+/// let x: [u8; 13] = [0; 13];
+/// let y: &[u8] = &x;
+/// assert_eq!(13, unsafe { mem::size_of_val_raw(y) });
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "layout_for_ptr", issue = "69835")]
+#[rustc_const_unstable(feature = "const_size_of_val_raw", issue = "46571")]
+pub const unsafe fn size_of_val_raw<T: ?Sized>(val: *const T) -> usize {
+ // SAFETY: the caller must provide a valid raw pointer
+ unsafe { intrinsics::size_of_val(val) }
+}
+
+/// Returns the [ABI]-required minimum alignment of a type in bytes.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Examples
+///
+/// ```
+/// # #![allow(deprecated)]
+/// use std::mem;
+///
+/// assert_eq!(4, mem::min_align_of::<i32>());
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(note = "use `align_of` instead", since = "1.2.0")]
+pub fn min_align_of<T>() -> usize {
+ intrinsics::min_align_of::<T>()
+}
+
+/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to in
+/// bytes.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Examples
+///
+/// ```
+/// # #![allow(deprecated)]
+/// use std::mem;
+///
+/// assert_eq!(4, mem::min_align_of_val(&5i32));
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(note = "use `align_of_val` instead", since = "1.2.0")]
+pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize {
+ // SAFETY: val is a reference, so it's a valid raw pointer
+ unsafe { intrinsics::min_align_of_val(val) }
+}
+
+/// Returns the [ABI]-required minimum alignment of a type in bytes.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// assert_eq!(4, mem::align_of::<i32>());
+/// ```
+#[inline(always)]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_align_of", since = "1.24.0")]
+pub const fn align_of<T>() -> usize {
+ intrinsics::min_align_of::<T>()
+}
+
+/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to in
+/// bytes.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// assert_eq!(4, mem::align_of_val(&5i32));
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_align_of_val", issue = "46571")]
+#[allow(deprecated)]
+pub const fn align_of_val<T: ?Sized>(val: &T) -> usize {
+ // SAFETY: val is a reference, so it's a valid raw pointer
+ unsafe { intrinsics::min_align_of_val(val) }
+}
+
+/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to in
+/// bytes.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Safety
+///
+/// This function is only safe to call if the following conditions hold:
+///
+/// - If `T` is `Sized`, this function is always safe to call.
+/// - If the unsized tail of `T` is:
+/// - a [slice], then the length of the slice tail must be an initialized
+/// integer, and the size of the *entire value*
+/// (dynamic tail length + statically sized prefix) must fit in `isize`.
+/// - a [trait object], then the vtable part of the pointer must point
+/// to a valid vtable acquired by an unsizing coercion, and the size
+/// of the *entire value* (dynamic tail length + statically sized prefix)
+/// must fit in `isize`.
+/// - an (unstable) [extern type], then this function is always safe to
+/// call, but may panic or otherwise return the wrong value, as the
+/// extern type's layout is not known. This is the same behavior as
+/// [`align_of_val`] on a reference to a type with an extern type tail.
+/// - otherwise, it is conservatively not allowed to call this function.
+///
+/// [trait object]: ../../book/ch17-02-trait-objects.html
+/// [extern type]: ../../unstable-book/language-features/extern-types.html
+///
+/// # Examples
+///
+/// ```
+/// #![feature(layout_for_ptr)]
+/// use std::mem;
+///
+/// assert_eq!(4, unsafe { mem::align_of_val_raw(&5i32) });
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "layout_for_ptr", issue = "69835")]
+#[rustc_const_unstable(feature = "const_align_of_val_raw", issue = "46571")]
+pub const unsafe fn align_of_val_raw<T: ?Sized>(val: *const T) -> usize {
+ // SAFETY: the caller must provide a valid raw pointer
+ unsafe { intrinsics::min_align_of_val(val) }
+}
+
+/// Returns `true` if dropping values of type `T` matters.
+///
+/// This is purely an optimization hint, and may be implemented conservatively:
+/// it may return `true` for types that don't actually need to be dropped.
+/// As such always returning `true` would be a valid implementation of
+/// this function. However if this function actually returns `false`, then you
+/// can be certain dropping `T` has no side effect.
+///
+/// Low level implementations of things like collections, which need to manually
+/// drop their data, should use this function to avoid unnecessarily
+/// trying to drop all their contents when they are destroyed. This might not
+/// make a difference in release builds (where a loop that has no side-effects
+/// is easily detected and eliminated), but is often a big win for debug builds.
+///
+/// Note that [`drop_in_place`] already performs this check, so if your workload
+/// can be reduced to some small number of [`drop_in_place`] calls, using this is
+/// unnecessary. In particular note that you can [`drop_in_place`] a slice, and that
+/// will do a single needs_drop check for all the values.
+///
+/// Types like Vec therefore just `drop_in_place(&mut self[..])` without using
+/// `needs_drop` explicitly. Types like [`HashMap`], on the other hand, have to drop
+/// values one at a time and should use this API.
+///
+/// [`drop_in_place`]: crate::ptr::drop_in_place
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+///
+/// # Examples
+///
+/// Here's an example of how a collection might make use of `needs_drop`:
+///
+/// ```
+/// use std::{mem, ptr};
+///
+/// pub struct MyCollection<T> {
+/// # data: [T; 1],
+/// /* ... */
+/// }
+/// # impl<T> MyCollection<T> {
+/// # fn iter_mut(&mut self) -> &mut [T] { &mut self.data }
+/// # fn free_buffer(&mut self) {}
+/// # }
+///
+/// impl<T> Drop for MyCollection<T> {
+/// fn drop(&mut self) {
+/// unsafe {
+/// // drop the data
+/// if mem::needs_drop::<T>() {
+/// for x in self.iter_mut() {
+/// ptr::drop_in_place(x);
+/// }
+/// }
+/// self.free_buffer();
+/// }
+/// }
+/// }
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "needs_drop", since = "1.21.0")]
+#[rustc_const_stable(feature = "const_mem_needs_drop", since = "1.36.0")]
+#[rustc_diagnostic_item = "needs_drop"]
+pub const fn needs_drop<T: ?Sized>() -> bool {
+ intrinsics::needs_drop::<T>()
+}
+
+/// Returns the value of type `T` represented by the all-zero byte-pattern.
+///
+/// This means that, for example, the padding byte in `(u8, u16)` is not
+/// necessarily zeroed.
+///
+/// There is no guarantee that an all-zero byte-pattern represents a valid value
+/// of some type `T`. For example, the all-zero byte-pattern is not a valid value
+/// for reference types (`&T`, `&mut T`) and functions pointers. Using `zeroed`
+/// on such types causes immediate [undefined behavior][ub] because [the Rust
+/// compiler assumes][inv] that there always is a valid value in a variable it
+/// considers initialized.
+///
+/// This has the same effect as [`MaybeUninit::zeroed().assume_init()`][zeroed].
+/// It is useful for FFI sometimes, but should generally be avoided.
+///
+/// [zeroed]: MaybeUninit::zeroed
+/// [ub]: ../../reference/behavior-considered-undefined.html
+/// [inv]: MaybeUninit#initialization-invariant
+///
+/// # Examples
+///
+/// Correct usage of this function: initializing an integer with zero.
+///
+/// ```
+/// use std::mem;
+///
+/// let x: i32 = unsafe { mem::zeroed() };
+/// assert_eq!(0, x);
+/// ```
+///
+/// *Incorrect* usage of this function: initializing a reference with zero.
+///
+/// ```rust,no_run
+/// # #![allow(invalid_value)]
+/// use std::mem;
+///
+/// let _x: &i32 = unsafe { mem::zeroed() }; // Undefined behavior!
+/// let _y: fn() = unsafe { mem::zeroed() }; // And again!
+/// ```
+#[inline(always)]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated_in_future)]
+#[allow(deprecated)]
+#[rustc_diagnostic_item = "mem_zeroed"]
+#[track_caller]
+pub unsafe fn zeroed<T>() -> T {
+ // SAFETY: the caller must guarantee that an all-zero value is valid for `T`.
+ unsafe {
+ intrinsics::assert_zero_valid::<T>();
+ MaybeUninit::zeroed().assume_init()
+ }
+}
+
+/// Bypasses Rust's normal memory-initialization checks by pretending to
+/// produce a value of type `T`, while doing nothing at all.
+///
+/// **This function is deprecated.** Use [`MaybeUninit<T>`] instead.
+/// It also might be slower than using `MaybeUninit<T>` due to mitigations that were put in place to
+/// limit the potential harm caused by incorrect use of this function in legacy code.
+///
+/// The reason for deprecation is that the function basically cannot be used
+/// correctly: it has the same effect as [`MaybeUninit::uninit().assume_init()`][uninit].
+/// As the [`assume_init` documentation][assume_init] explains,
+/// [the Rust compiler assumes][inv] that values are properly initialized.
+/// As a consequence, calling e.g. `mem::uninitialized::<bool>()` causes immediate
+/// undefined behavior for returning a `bool` that is not definitely either `true`
+/// or `false`. Worse, truly uninitialized memory like what gets returned here
+/// is special in that the compiler knows that it does not have a fixed value.
+/// This makes it undefined behavior to have uninitialized data in a variable even
+/// if that variable has an integer type.
+/// (Notice that the rules around uninitialized integers are not finalized yet, but
+/// until they are, it is advisable to avoid them.)
+///
+/// [uninit]: MaybeUninit::uninit
+/// [assume_init]: MaybeUninit::assume_init
+/// [inv]: MaybeUninit#initialization-invariant
+#[inline(always)]
+#[must_use]
+#[deprecated(since = "1.39.0", note = "use `mem::MaybeUninit` instead")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated_in_future)]
+#[allow(deprecated)]
+#[rustc_diagnostic_item = "mem_uninitialized"]
+#[track_caller]
+pub unsafe fn uninitialized<T>() -> T {
+ // SAFETY: the caller must guarantee that an uninitialized value is valid for `T`.
+ unsafe {
+ intrinsics::assert_uninit_valid::<T>();
+ let mut val = MaybeUninit::<T>::uninit();
+
+ // Fill memory with 0x01, as an imperfect mitigation for old code that uses this function on
+ // bool, nonnull, and noundef types. But don't do this if we actively want to detect UB.
+ if !cfg!(any(miri, sanitize = "memory")) {
+ val.as_mut_ptr().write_bytes(0x01, 1);
+ }
+
+ val.assume_init()
+ }
+}
+
+/// Swaps the values at two mutable locations, without deinitializing either one.
+///
+/// * If you want to swap with a default or dummy value, see [`take`].
+/// * If you want to swap with a passed value, returning the old value, see [`replace`].
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// let mut x = 5;
+/// let mut y = 42;
+///
+/// mem::swap(&mut x, &mut y);
+///
+/// assert_eq!(42, x);
+/// assert_eq!(5, y);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+pub const fn swap<T>(x: &mut T, y: &mut T) {
+ // NOTE(eddyb) SPIR-V's Logical addressing model doesn't allow for arbitrary
+ // reinterpretation of values as (chunkable) byte arrays, and the loop in the
+ // block optimization in `swap_slice` is hard to rewrite back
+ // into the (unoptimized) direct swapping implementation, so we disable it.
+ // FIXME(eddyb) the block optimization also prevents MIR optimizations from
+ // understanding `mem::replace`, `Option::take`, etc. - a better overall
+ // solution might be to make `ptr::swap_nonoverlapping` into an intrinsic, which
+ // a backend can choose to implement using the block optimization, or not.
+ // NOTE(scottmcm) MIRI is disabled here as reading in smaller units is a
+ // pessimization for it. Also, if the type contains any unaligned pointers,
+ // copying those over multiple reads is difficult to support.
+ #[cfg(not(any(target_arch = "spirv", miri)))]
+ {
+ // For types that are larger multiples of their alignment, the simple way
+ // tends to copy the whole thing to stack rather than doing it one part
+ // at a time, so instead treat them as one-element slices and piggy-back
+ // the slice optimizations that will split up the swaps.
+ if size_of::<T>() / align_of::<T>() > 4 {
+ // SAFETY: exclusive references always point to one non-overlapping
+ // element and are non-null and properly aligned.
+ return unsafe { ptr::swap_nonoverlapping(x, y, 1) };
+ }
+ }
+
+ // If a scalar consists of just a small number of alignment units, let
+ // the codegen just swap those pieces directly, as it's likely just a
+ // few instructions and anything else is probably overcomplicated.
+ //
+ // Most importantly, this covers primitives and simd types that tend to
+ // have size=align where doing anything else can be a pessimization.
+ // (This will also be used for ZSTs, though any solution works for them.)
+ swap_simple(x, y);
+}
+
+/// Same as [`swap`] semantically, but always uses the simple implementation.
+///
+/// Used elsewhere in `mem` and `ptr` at the bottom layer of calls.
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+#[inline]
+pub(crate) const fn swap_simple<T>(x: &mut T, y: &mut T) {
+ // We arrange for this to typically be called with small types,
+ // so this reads-and-writes approach is actually better than using
+ // copy_nonoverlapping as it easily puts things in LLVM registers
+ // directly and doesn't end up inlining allocas.
+ // And LLVM actually optimizes it to 3×memcpy if called with
+ // a type larger than it's willing to keep in a register.
+ // Having typed reads and writes in MIR here is also good as
+ // it lets MIRI and CTFE understand them better, including things
+ // like enforcing type validity for them.
+ // Importantly, read+copy_nonoverlapping+write introduces confusing
+ // asymmetry to the behaviour where one value went through read+write
+ // whereas the other was copied over by the intrinsic (see #94371).
+
+ // SAFETY: exclusive references are always valid to read/write,
+ // including being aligned, and nothing here panics so it's drop-safe.
+ unsafe {
+ let a = ptr::read(x);
+ let b = ptr::read(y);
+ ptr::write(x, b);
+ ptr::write(y, a);
+ }
+}
+
+/// Replaces `dest` with the default value of `T`, returning the previous `dest` value.
+///
+/// * If you want to replace the values of two variables, see [`swap`].
+/// * If you want to replace with a passed value instead of the default value, see [`replace`].
+///
+/// # Examples
+///
+/// A simple example:
+///
+/// ```
+/// use std::mem;
+///
+/// let mut v: Vec<i32> = vec![1, 2];
+///
+/// let old_v = mem::take(&mut v);
+/// assert_eq!(vec![1, 2], old_v);
+/// assert!(v.is_empty());
+/// ```
+///
+/// `take` allows taking ownership of a struct field by replacing it with an "empty" value.
+/// Without `take` you can run into issues like these:
+///
+/// ```compile_fail,E0507
+/// struct Buffer<T> { buf: Vec<T> }
+///
+/// impl<T> Buffer<T> {
+/// fn get_and_reset(&mut self) -> Vec<T> {
+/// // error: cannot move out of dereference of `&mut`-pointer
+/// let buf = self.buf;
+/// self.buf = Vec::new();
+/// buf
+/// }
+/// }
+/// ```
+///
+/// Note that `T` does not necessarily implement [`Clone`], so it can't even clone and reset
+/// `self.buf`. But `take` can be used to disassociate the original value of `self.buf` from
+/// `self`, allowing it to be returned:
+///
+/// ```
+/// use std::mem;
+///
+/// # struct Buffer<T> { buf: Vec<T> }
+/// impl<T> Buffer<T> {
+/// fn get_and_reset(&mut self) -> Vec<T> {
+/// mem::take(&mut self.buf)
+/// }
+/// }
+///
+/// let mut buffer = Buffer { buf: vec![0, 1] };
+/// assert_eq!(buffer.buf.len(), 2);
+///
+/// assert_eq!(buffer.get_and_reset(), vec![0, 1]);
+/// assert_eq!(buffer.buf.len(), 0);
+/// ```
+#[inline]
+#[stable(feature = "mem_take", since = "1.40.0")]
+pub fn take<T: Default>(dest: &mut T) -> T {
+ replace(dest, T::default())
+}
+
+/// Moves `src` into the referenced `dest`, returning the previous `dest` value.
+///
+/// Neither value is dropped.
+///
+/// * If you want to replace the values of two variables, see [`swap`].
+/// * If you want to replace with a default value, see [`take`].
+///
+/// # Examples
+///
+/// A simple example:
+///
+/// ```
+/// use std::mem;
+///
+/// let mut v: Vec<i32> = vec![1, 2];
+///
+/// let old_v = mem::replace(&mut v, vec![3, 4, 5]);
+/// assert_eq!(vec![1, 2], old_v);
+/// assert_eq!(vec![3, 4, 5], v);
+/// ```
+///
+/// `replace` allows consumption of a struct field by replacing it with another value.
+/// Without `replace` you can run into issues like these:
+///
+/// ```compile_fail,E0507
+/// struct Buffer<T> { buf: Vec<T> }
+///
+/// impl<T> Buffer<T> {
+/// fn replace_index(&mut self, i: usize, v: T) -> T {
+/// // error: cannot move out of dereference of `&mut`-pointer
+/// let t = self.buf[i];
+/// self.buf[i] = v;
+/// t
+/// }
+/// }
+/// ```
+///
+/// Note that `T` does not necessarily implement [`Clone`], so we can't even clone `self.buf[i]` to
+/// avoid the move. But `replace` can be used to disassociate the original value at that index from
+/// `self`, allowing it to be returned:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// use std::mem;
+///
+/// # struct Buffer<T> { buf: Vec<T> }
+/// impl<T> Buffer<T> {
+/// fn replace_index(&mut self, i: usize, v: T) -> T {
+/// mem::replace(&mut self.buf[i], v)
+/// }
+/// }
+///
+/// let mut buffer = Buffer { buf: vec![0, 1] };
+/// assert_eq!(buffer.buf[0], 0);
+///
+/// assert_eq!(buffer.replace_index(0, 2), 0);
+/// assert_eq!(buffer.buf[0], 2);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "if you don't need the old value, you can just assign the new value directly"]
+#[rustc_const_unstable(feature = "const_replace", issue = "83164")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "mem_replace")]
+pub const fn replace<T>(dest: &mut T, src: T) -> T {
+ // SAFETY: We read from `dest` but directly write `src` into it afterwards,
+ // such that the old value is not duplicated. Nothing is dropped and
+ // nothing here can panic.
+ unsafe {
+ let result = ptr::read(dest);
+ ptr::write(dest, src);
+ result
+ }
+}
+
+/// Disposes of a value.
+///
+/// This does so by calling the argument's implementation of [`Drop`][drop].
+///
+/// This effectively does nothing for types which implement `Copy`, e.g.
+/// integers. Such values are copied and _then_ moved into the function, so the
+/// value persists after this function call.
+///
+/// This function is not magic; it is literally defined as
+///
+/// ```
+/// pub fn drop<T>(_x: T) { }
+/// ```
+///
+/// Because `_x` is moved into the function, it is automatically dropped before
+/// the function returns.
+///
+/// [drop]: Drop
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let v = vec![1, 2, 3];
+///
+/// drop(v); // explicitly drop the vector
+/// ```
+///
+/// Since [`RefCell`] enforces the borrow rules at runtime, `drop` can
+/// release a [`RefCell`] borrow:
+///
+/// ```
+/// use std::cell::RefCell;
+///
+/// let x = RefCell::new(1);
+///
+/// let mut mutable_borrow = x.borrow_mut();
+/// *mutable_borrow = 1;
+///
+/// drop(mutable_borrow); // relinquish the mutable borrow on this slot
+///
+/// let borrow = x.borrow();
+/// println!("{}", *borrow);
+/// ```
+///
+/// Integers and other types implementing [`Copy`] are unaffected by `drop`.
+///
+/// ```
+/// #[derive(Copy, Clone)]
+/// struct Foo(u8);
+///
+/// let x = 1;
+/// let y = Foo(2);
+/// drop(x); // a copy of `x` is moved and dropped
+/// drop(y); // a copy of `y` is moved and dropped
+///
+/// println!("x: {}, y: {}", x, y.0); // still available
+/// ```
+///
+/// [`RefCell`]: crate::cell::RefCell
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "mem_drop")]
+pub fn drop<T>(_x: T) {}
+
+/// Bitwise-copies a value.
+///
+/// This function is not magic; it is literally defined as
+/// ```
+/// pub fn copy<T: Copy>(x: &T) -> T { *x }
+/// ```
+///
+/// It is useful when you want to pass a function pointer to a combinator, rather than defining a new closure.
+///
+/// Example:
+/// ```
+/// #![feature(mem_copy_fn)]
+/// use core::mem::copy;
+/// let result_from_ffi_function: Result<(), &i32> = Err(&1);
+/// let result_copied: Result<(), i32> = result_from_ffi_function.map_err(copy);
+/// ```
+#[inline]
+#[unstable(feature = "mem_copy_fn", issue = "98262")]
+pub fn copy<T: Copy>(x: &T) -> T {
+ *x
+}
+
+/// Interprets `src` as having type `&U`, and then reads `src` without moving
+/// the contained value.
+///
+/// This function will unsafely assume the pointer `src` is valid for [`size_of::<U>`][size_of]
+/// bytes by transmuting `&T` to `&U` and then reading the `&U` (except that this is done in a way
+/// that is correct even when `&U` has stricter alignment requirements than `&T`). It will also
+/// unsafely create a copy of the contained value instead of moving out of `src`.
+///
+/// It is not a compile-time error if `T` and `U` have different sizes, but it
+/// is highly encouraged to only invoke this function where `T` and `U` have the
+/// same size. This function triggers [undefined behavior][ub] if `U` is larger than
+/// `T`.
+///
+/// [ub]: ../../reference/behavior-considered-undefined.html
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// #[repr(packed)]
+/// struct Foo {
+/// bar: u8,
+/// }
+///
+/// let foo_array = [10u8];
+///
+/// unsafe {
+/// // Copy the data from 'foo_array' and treat it as a 'Foo'
+/// let mut foo_struct: Foo = mem::transmute_copy(&foo_array);
+/// assert_eq!(foo_struct.bar, 10);
+///
+/// // Modify the copied data
+/// foo_struct.bar = 20;
+/// assert_eq!(foo_struct.bar, 20);
+/// }
+///
+/// // The contents of 'foo_array' should not have changed
+/// assert_eq!(foo_array, [10]);
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_transmute_copy", issue = "83165")]
+pub const unsafe fn transmute_copy<T, U>(src: &T) -> U {
+ assert!(size_of::<T>() >= size_of::<U>(), "cannot transmute_copy if U is larger than T");
+
+ // If U has a higher alignment requirement, src might not be suitably aligned.
+ if align_of::<U>() > align_of::<T>() {
+ // SAFETY: `src` is a reference which is guaranteed to be valid for reads.
+ // The caller must guarantee that the actual transmutation is safe.
+ unsafe { ptr::read_unaligned(src as *const T as *const U) }
+ } else {
+ // SAFETY: `src` is a reference which is guaranteed to be valid for reads.
+ // We just checked that `src as *const U` was properly aligned.
+ // The caller must guarantee that the actual transmutation is safe.
+ unsafe { ptr::read(src as *const T as *const U) }
+ }
+}
+
+/// Opaque type representing the discriminant of an enum.
+///
+/// See the [`discriminant`] function in this module for more information.
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+pub struct Discriminant<T>(<T as DiscriminantKind>::Discriminant);
+
+// N.B. These trait implementations cannot be derived because we don't want any bounds on T.
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> Copy for Discriminant<T> {}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> clone::Clone for Discriminant<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> cmp::PartialEq for Discriminant<T> {
+ fn eq(&self, rhs: &Self) -> bool {
+ self.0 == rhs.0
+ }
+}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> cmp::Eq for Discriminant<T> {}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> hash::Hash for Discriminant<T> {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.0.hash(state);
+ }
+}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> fmt::Debug for Discriminant<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Discriminant").field(&self.0).finish()
+ }
+}
+
+/// Returns a value uniquely identifying the enum variant in `v`.
+///
+/// If `T` is not an enum, calling this function will not result in undefined behavior, but the
+/// return value is unspecified.
+///
+/// # Stability
+///
+/// The discriminant of an enum variant may change if the enum definition changes. A discriminant
+/// of some variant will not change between compilations with the same compiler.
+///
+/// # Examples
+///
+/// This can be used to compare enums that carry data, while disregarding
+/// the actual data:
+///
+/// ```
+/// use std::mem;
+///
+/// enum Foo { A(&'static str), B(i32), C(i32) }
+///
+/// assert_eq!(mem::discriminant(&Foo::A("bar")), mem::discriminant(&Foo::A("baz")));
+/// assert_eq!(mem::discriminant(&Foo::B(1)), mem::discriminant(&Foo::B(2)));
+/// assert_ne!(mem::discriminant(&Foo::B(3)), mem::discriminant(&Foo::C(3)));
+/// ```
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "mem_discriminant")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const fn discriminant<T>(v: &T) -> Discriminant<T> {
+ Discriminant(intrinsics::discriminant_value(v))
+}
+
+/// Returns the number of variants in the enum type `T`.
+///
+/// If `T` is not an enum, calling this function will not result in undefined behavior, but the
+/// return value is unspecified. Equally, if `T` is an enum with more variants than `usize::MAX`
+/// the return value is unspecified. Uninhabited variants will be counted.
+///
+/// Note that an enum may be expanded with additional variants in the future
+/// as a non-breaking change, for example if it is marked `#[non_exhaustive]`,
+/// which will change the result of this function.
+///
+/// # Examples
+///
+/// ```
+/// # #![feature(never_type)]
+/// # #![feature(variant_count)]
+///
+/// use std::mem;
+///
+/// enum Void {}
+/// enum Foo { A(&'static str), B(i32), C(i32) }
+///
+/// assert_eq!(mem::variant_count::<Void>(), 0);
+/// assert_eq!(mem::variant_count::<Foo>(), 3);
+///
+/// assert_eq!(mem::variant_count::<Option<!>>(), 2);
+/// assert_eq!(mem::variant_count::<Result<!, !>>(), 2);
+/// ```
+#[inline(always)]
+#[must_use]
+#[unstable(feature = "variant_count", issue = "73662")]
+#[rustc_const_unstable(feature = "variant_count", issue = "73662")]
+#[rustc_diagnostic_item = "mem_variant_count"]
+pub const fn variant_count<T>() -> usize {
+ intrinsics::variant_count::<T>()
+}
diff --git a/library/core/src/mem/transmutability.rs b/library/core/src/mem/transmutability.rs
new file mode 100644
index 000000000..b59a5b89d
--- /dev/null
+++ b/library/core/src/mem/transmutability.rs
@@ -0,0 +1,43 @@
+/// Are values of a type transmutable into values of another type?
+///
+/// This trait is implemented on-the-fly by the compiler for types `Src` and `Self` when the bits of
+/// any value of type `Self` are safely transmutable into a value of type `Dst`, in a given `Context`,
+/// notwithstanding whatever safety checks you have asked the compiler to [`Assume`] are satisfied.
+#[unstable(feature = "transmutability", issue = "99571")]
+#[cfg_attr(not(bootstrap), lang = "transmute_trait")]
+#[rustc_on_unimplemented(
+ message = "`{Src}` cannot be safely transmuted into `{Self}` in the defining scope of `{Context}`.",
+ label = "`{Src}` cannot be safely transmuted into `{Self}` in the defining scope of `{Context}`."
+)]
+pub unsafe trait BikeshedIntrinsicFrom<
+ Src,
+ Context,
+ const ASSUME_ALIGNMENT: bool,
+ const ASSUME_LIFETIMES: bool,
+ const ASSUME_VALIDITY: bool,
+ const ASSUME_VISIBILITY: bool,
+> where
+ Src: ?Sized,
+{
+}
+
+/// What transmutation safety conditions shall the compiler assume that *you* are checking?
+#[unstable(feature = "transmutability", issue = "99571")]
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct Assume {
+ /// When `true`, the compiler assumes that *you* are ensuring (either dynamically or statically) that
+ /// destination referents do not have stricter alignment requirements than source referents.
+ pub alignment: bool,
+
+ /// When `true`, the compiler assume that *you* are ensuring that lifetimes are not extended in a manner
+ /// that violates Rust's memory model.
+ pub lifetimes: bool,
+
+ /// When `true`, the compiler assumes that *you* are ensuring that the source type is actually a valid
+ /// instance of the destination type.
+ pub validity: bool,
+
+ /// When `true`, the compiler assumes that *you* have ensured that it is safe for you to violate the
+ /// type and field privacy of the destination type (and sometimes of the source type, too).
+ pub visibility: bool,
+}
diff --git a/library/core/src/mem/valid_align.rs b/library/core/src/mem/valid_align.rs
new file mode 100644
index 000000000..fcfa95120
--- /dev/null
+++ b/library/core/src/mem/valid_align.rs
@@ -0,0 +1,247 @@
+use crate::convert::TryFrom;
+use crate::num::NonZeroUsize;
+use crate::{cmp, fmt, hash, mem, num};
+
+/// A type storing a `usize` which is a power of two, and thus
+/// represents a possible alignment in the rust abstract machine.
+///
+/// Note that particularly large alignments, while representable in this type,
+/// are likely not to be supported by actual allocators and linkers.
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub(crate) struct ValidAlign(ValidAlignEnum);
+
+// ValidAlign is `repr(usize)`, but via extra steps.
+const _: () = assert!(mem::size_of::<ValidAlign>() == mem::size_of::<usize>());
+const _: () = assert!(mem::align_of::<ValidAlign>() == mem::align_of::<usize>());
+
+impl ValidAlign {
+ /// Creates a `ValidAlign` from a power-of-two `usize`.
+ ///
+ /// # Safety
+ ///
+ /// `align` must be a power of two.
+ ///
+ /// Equivalently, it must be `1 << exp` for some `exp` in `0..usize::BITS`.
+ /// It must *not* be zero.
+ #[inline]
+ pub(crate) const unsafe fn new_unchecked(align: usize) -> Self {
+ debug_assert!(align.is_power_of_two());
+
+ // SAFETY: By precondition, this must be a power of two, and
+ // our variants encompass all possible powers of two.
+ unsafe { mem::transmute::<usize, ValidAlign>(align) }
+ }
+
+ #[inline]
+ pub(crate) const fn as_nonzero(self) -> NonZeroUsize {
+ // SAFETY: All the discriminants are non-zero.
+ unsafe { NonZeroUsize::new_unchecked(self.0 as usize) }
+ }
+
+ /// Returns the base 2 logarithm of the alignment.
+ ///
+ /// This is always exact, as `self` represents a power of two.
+ #[inline]
+ pub(crate) fn log2(self) -> u32 {
+ self.as_nonzero().trailing_zeros()
+ }
+}
+
+impl fmt::Debug for ValidAlign {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?} (1 << {:?})", self.as_nonzero(), self.log2())
+ }
+}
+
+impl TryFrom<NonZeroUsize> for ValidAlign {
+ type Error = num::TryFromIntError;
+
+ #[inline]
+ fn try_from(align: NonZeroUsize) -> Result<ValidAlign, Self::Error> {
+ if align.is_power_of_two() {
+ // SAFETY: Just checked for power-of-two
+ unsafe { Ok(ValidAlign::new_unchecked(align.get())) }
+ } else {
+ Err(num::TryFromIntError(()))
+ }
+ }
+}
+
+impl TryFrom<usize> for ValidAlign {
+ type Error = num::TryFromIntError;
+
+ #[inline]
+ fn try_from(align: usize) -> Result<ValidAlign, Self::Error> {
+ if align.is_power_of_two() {
+ // SAFETY: Just checked for power-of-two
+ unsafe { Ok(ValidAlign::new_unchecked(align)) }
+ } else {
+ Err(num::TryFromIntError(()))
+ }
+ }
+}
+
+impl cmp::Eq for ValidAlign {}
+
+impl cmp::PartialEq for ValidAlign {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.as_nonzero() == other.as_nonzero()
+ }
+}
+
+impl cmp::Ord for ValidAlign {
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.as_nonzero().cmp(&other.as_nonzero())
+ }
+}
+
+impl cmp::PartialOrd for ValidAlign {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl hash::Hash for ValidAlign {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.as_nonzero().hash(state)
+ }
+}
+
+#[cfg(target_pointer_width = "16")]
+type ValidAlignEnum = ValidAlignEnum16;
+#[cfg(target_pointer_width = "32")]
+type ValidAlignEnum = ValidAlignEnum32;
+#[cfg(target_pointer_width = "64")]
+type ValidAlignEnum = ValidAlignEnum64;
+
+#[derive(Copy, Clone)]
+#[repr(u16)]
+enum ValidAlignEnum16 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+}
+
+#[derive(Copy, Clone)]
+#[repr(u32)]
+enum ValidAlignEnum32 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+ _Align1Shl16 = 1 << 16,
+ _Align1Shl17 = 1 << 17,
+ _Align1Shl18 = 1 << 18,
+ _Align1Shl19 = 1 << 19,
+ _Align1Shl20 = 1 << 20,
+ _Align1Shl21 = 1 << 21,
+ _Align1Shl22 = 1 << 22,
+ _Align1Shl23 = 1 << 23,
+ _Align1Shl24 = 1 << 24,
+ _Align1Shl25 = 1 << 25,
+ _Align1Shl26 = 1 << 26,
+ _Align1Shl27 = 1 << 27,
+ _Align1Shl28 = 1 << 28,
+ _Align1Shl29 = 1 << 29,
+ _Align1Shl30 = 1 << 30,
+ _Align1Shl31 = 1 << 31,
+}
+
+#[derive(Copy, Clone)]
+#[repr(u64)]
+enum ValidAlignEnum64 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+ _Align1Shl16 = 1 << 16,
+ _Align1Shl17 = 1 << 17,
+ _Align1Shl18 = 1 << 18,
+ _Align1Shl19 = 1 << 19,
+ _Align1Shl20 = 1 << 20,
+ _Align1Shl21 = 1 << 21,
+ _Align1Shl22 = 1 << 22,
+ _Align1Shl23 = 1 << 23,
+ _Align1Shl24 = 1 << 24,
+ _Align1Shl25 = 1 << 25,
+ _Align1Shl26 = 1 << 26,
+ _Align1Shl27 = 1 << 27,
+ _Align1Shl28 = 1 << 28,
+ _Align1Shl29 = 1 << 29,
+ _Align1Shl30 = 1 << 30,
+ _Align1Shl31 = 1 << 31,
+ _Align1Shl32 = 1 << 32,
+ _Align1Shl33 = 1 << 33,
+ _Align1Shl34 = 1 << 34,
+ _Align1Shl35 = 1 << 35,
+ _Align1Shl36 = 1 << 36,
+ _Align1Shl37 = 1 << 37,
+ _Align1Shl38 = 1 << 38,
+ _Align1Shl39 = 1 << 39,
+ _Align1Shl40 = 1 << 40,
+ _Align1Shl41 = 1 << 41,
+ _Align1Shl42 = 1 << 42,
+ _Align1Shl43 = 1 << 43,
+ _Align1Shl44 = 1 << 44,
+ _Align1Shl45 = 1 << 45,
+ _Align1Shl46 = 1 << 46,
+ _Align1Shl47 = 1 << 47,
+ _Align1Shl48 = 1 << 48,
+ _Align1Shl49 = 1 << 49,
+ _Align1Shl50 = 1 << 50,
+ _Align1Shl51 = 1 << 51,
+ _Align1Shl52 = 1 << 52,
+ _Align1Shl53 = 1 << 53,
+ _Align1Shl54 = 1 << 54,
+ _Align1Shl55 = 1 << 55,
+ _Align1Shl56 = 1 << 56,
+ _Align1Shl57 = 1 << 57,
+ _Align1Shl58 = 1 << 58,
+ _Align1Shl59 = 1 << 59,
+ _Align1Shl60 = 1 << 60,
+ _Align1Shl61 = 1 << 61,
+ _Align1Shl62 = 1 << 62,
+ _Align1Shl63 = 1 << 63,
+}
diff --git a/library/core/src/num/bignum.rs b/library/core/src/num/bignum.rs
new file mode 100644
index 000000000..de85fdd6e
--- /dev/null
+++ b/library/core/src/num/bignum.rs
@@ -0,0 +1,434 @@
+//! Custom arbitrary-precision number (bignum) implementation.
+//!
+//! This is designed to avoid the heap allocation at expense of stack memory.
+//! The most used bignum type, `Big32x40`, is limited by 32 × 40 = 1,280 bits
+//! and will take at most 160 bytes of stack memory. This is more than enough
+//! for round-tripping all possible finite `f64` values.
+//!
+//! In principle it is possible to have multiple bignum types for different
+//! inputs, but we don't do so to avoid the code bloat. Each bignum is still
+//! tracked for the actual usages, so it normally doesn't matter.
+
+// This module is only for dec2flt and flt2dec, and only public because of coretests.
+// It is not intended to ever be stabilized.
+#![doc(hidden)]
+#![unstable(
+ feature = "core_private_bignum",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+#![macro_use]
+
+/// Arithmetic operations required by bignums.
+pub trait FullOps: Sized {
+ /// Returns `(carry', v')` such that `carry' * 2^W + v' = self * other + other2 + carry`,
+ /// where `W` is the number of bits in `Self`.
+ fn full_mul_add(self, other: Self, other2: Self, carry: Self) -> (Self /* carry */, Self);
+
+ /// Returns `(quo, rem)` such that `borrow * 2^W + self = quo * other + rem`
+ /// and `0 <= rem < other`, where `W` is the number of bits in `Self`.
+ fn full_div_rem(self, other: Self, borrow: Self)
+ -> (Self /* quotient */, Self /* remainder */);
+}
+
+macro_rules! impl_full_ops {
+ ($($ty:ty: add($addfn:path), mul/div($bigty:ident);)*) => (
+ $(
+ impl FullOps for $ty {
+ fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
+ // This cannot overflow;
+ // the output is between `0` and `2^nbits * (2^nbits - 1)`.
+ let v = (self as $bigty) * (other as $bigty) + (other2 as $bigty) +
+ (carry as $bigty);
+ ((v >> <$ty>::BITS) as $ty, v as $ty)
+ }
+
+ fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
+ debug_assert!(borrow < other);
+ // This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
+ let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
+ let rhs = other as $bigty;
+ ((lhs / rhs) as $ty, (lhs % rhs) as $ty)
+ }
+ }
+ )*
+ )
+}
+
+impl_full_ops! {
+ u8: add(intrinsics::u8_add_with_overflow), mul/div(u16);
+ u16: add(intrinsics::u16_add_with_overflow), mul/div(u32);
+ u32: add(intrinsics::u32_add_with_overflow), mul/div(u64);
+ // See RFC #521 for enabling this.
+ // u64: add(intrinsics::u64_add_with_overflow), mul/div(u128);
+}
+
+/// Table of powers of 5 representable in digits. Specifically, the largest {u8, u16, u32} value
+/// that's a power of five, plus the corresponding exponent. Used in `mul_pow5`.
+const SMALL_POW5: [(u64, usize); 3] = [(125, 3), (15625, 6), (1_220_703_125, 13)];
+
+macro_rules! define_bignum {
+ ($name:ident: type=$ty:ty, n=$n:expr) => {
+ /// Stack-allocated arbitrary-precision (up to certain limit) integer.
+ ///
+ /// This is backed by a fixed-size array of given type ("digit").
+ /// While the array is not very large (normally some hundred bytes),
+ /// copying it recklessly may result in the performance hit.
+ /// Thus this is intentionally not `Copy`.
+ ///
+ /// All operations available to bignums panic in the case of overflows.
+ /// The caller is responsible to use large enough bignum types.
+ pub struct $name {
+ /// One plus the offset to the maximum "digit" in use.
+ /// This does not decrease, so be aware of the computation order.
+ /// `base[size..]` should be zero.
+ size: usize,
+ /// Digits. `[a, b, c, ...]` represents `a + b*2^W + c*2^(2W) + ...`
+ /// where `W` is the number of bits in the digit type.
+ base: [$ty; $n],
+ }
+
+ impl $name {
+ /// Makes a bignum from one digit.
+ pub fn from_small(v: $ty) -> $name {
+ let mut base = [0; $n];
+ base[0] = v;
+ $name { size: 1, base }
+ }
+
+ /// Makes a bignum from `u64` value.
+ pub fn from_u64(mut v: u64) -> $name {
+ let mut base = [0; $n];
+ let mut sz = 0;
+ while v > 0 {
+ base[sz] = v as $ty;
+ v >>= <$ty>::BITS;
+ sz += 1;
+ }
+ $name { size: sz, base }
+ }
+
+ /// Returns the internal digits as a slice `[a, b, c, ...]` such that the numeric
+ /// value is `a + b * 2^W + c * 2^(2W) + ...` where `W` is the number of bits in
+ /// the digit type.
+ pub fn digits(&self) -> &[$ty] {
+ &self.base[..self.size]
+ }
+
+ /// Returns the `i`-th bit where bit 0 is the least significant one.
+ /// In other words, the bit with weight `2^i`.
+ pub fn get_bit(&self, i: usize) -> u8 {
+ let digitbits = <$ty>::BITS as usize;
+ let d = i / digitbits;
+ let b = i % digitbits;
+ ((self.base[d] >> b) & 1) as u8
+ }
+
+ /// Returns `true` if the bignum is zero.
+ pub fn is_zero(&self) -> bool {
+ self.digits().iter().all(|&v| v == 0)
+ }
+
+ /// Returns the number of bits necessary to represent this value. Note that zero
+ /// is considered to need 0 bits.
+ pub fn bit_length(&self) -> usize {
+ let digitbits = <$ty>::BITS as usize;
+ let digits = self.digits();
+ // Find the most significant non-zero digit.
+ let msd = digits.iter().rposition(|&x| x != 0);
+ match msd {
+ Some(msd) => msd * digitbits + digits[msd].log2() as usize + 1,
+ // There are no non-zero digits, i.e., the number is zero.
+ _ => 0,
+ }
+ }
+
+ /// Adds `other` to itself and returns its own mutable reference.
+ pub fn add<'a>(&'a mut self, other: &$name) -> &'a mut $name {
+ use crate::cmp;
+ use crate::iter;
+
+ let mut sz = cmp::max(self.size, other.size);
+ let mut carry = false;
+ for (a, b) in iter::zip(&mut self.base[..sz], &other.base[..sz]) {
+ let (v, c) = (*a).carrying_add(*b, carry);
+ *a = v;
+ carry = c;
+ }
+ if carry {
+ self.base[sz] = 1;
+ sz += 1;
+ }
+ self.size = sz;
+ self
+ }
+
+ pub fn add_small(&mut self, other: $ty) -> &mut $name {
+ let (v, mut carry) = self.base[0].carrying_add(other, false);
+ self.base[0] = v;
+ let mut i = 1;
+ while carry {
+ let (v, c) = self.base[i].carrying_add(0, carry);
+ self.base[i] = v;
+ carry = c;
+ i += 1;
+ }
+ if i > self.size {
+ self.size = i;
+ }
+ self
+ }
+
+ /// Subtracts `other` from itself and returns its own mutable reference.
+ pub fn sub<'a>(&'a mut self, other: &$name) -> &'a mut $name {
+ use crate::cmp;
+ use crate::iter;
+
+ let sz = cmp::max(self.size, other.size);
+ let mut noborrow = true;
+ for (a, b) in iter::zip(&mut self.base[..sz], &other.base[..sz]) {
+ let (v, c) = (*a).carrying_add(!*b, noborrow);
+ *a = v;
+ noborrow = c;
+ }
+ assert!(noborrow);
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by a digit-sized `other` and returns its own
+ /// mutable reference.
+ pub fn mul_small(&mut self, other: $ty) -> &mut $name {
+ let mut sz = self.size;
+ let mut carry = 0;
+ for a in &mut self.base[..sz] {
+ let (v, c) = (*a).carrying_mul(other, carry);
+ *a = v;
+ carry = c;
+ }
+ if carry > 0 {
+ self.base[sz] = carry;
+ sz += 1;
+ }
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by `2^bits` and returns its own mutable reference.
+ pub fn mul_pow2(&mut self, bits: usize) -> &mut $name {
+ let digitbits = <$ty>::BITS as usize;
+ let digits = bits / digitbits;
+ let bits = bits % digitbits;
+
+ assert!(digits < $n);
+ debug_assert!(self.base[$n - digits..].iter().all(|&v| v == 0));
+ debug_assert!(bits == 0 || (self.base[$n - digits - 1] >> (digitbits - bits)) == 0);
+
+ // shift by `digits * digitbits` bits
+ for i in (0..self.size).rev() {
+ self.base[i + digits] = self.base[i];
+ }
+ for i in 0..digits {
+ self.base[i] = 0;
+ }
+
+ // shift by `bits` bits
+ let mut sz = self.size + digits;
+ if bits > 0 {
+ let last = sz;
+ let overflow = self.base[last - 1] >> (digitbits - bits);
+ if overflow > 0 {
+ self.base[last] = overflow;
+ sz += 1;
+ }
+ for i in (digits + 1..last).rev() {
+ self.base[i] =
+ (self.base[i] << bits) | (self.base[i - 1] >> (digitbits - bits));
+ }
+ self.base[digits] <<= bits;
+ // self.base[..digits] is zero, no need to shift
+ }
+
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by `5^e` and returns its own mutable reference.
+ pub fn mul_pow5(&mut self, mut e: usize) -> &mut $name {
+ use crate::mem;
+ use crate::num::bignum::SMALL_POW5;
+
+ // There are exactly n trailing zeros on 2^n, and the only relevant digit sizes
+ // are consecutive powers of two, so this is well suited index for the table.
+ let table_index = mem::size_of::<$ty>().trailing_zeros() as usize;
+ let (small_power, small_e) = SMALL_POW5[table_index];
+ let small_power = small_power as $ty;
+
+ // Multiply with the largest single-digit power as long as possible ...
+ while e >= small_e {
+ self.mul_small(small_power);
+ e -= small_e;
+ }
+
+ // ... then finish off the remainder.
+ let mut rest_power = 1;
+ for _ in 0..e {
+ rest_power *= 5;
+ }
+ self.mul_small(rest_power);
+
+ self
+ }
+
+ /// Multiplies itself by a number described by `other[0] + other[1] * 2^W +
+ /// other[2] * 2^(2W) + ...` (where `W` is the number of bits in the digit type)
+ /// and returns its own mutable reference.
+ pub fn mul_digits<'a>(&'a mut self, other: &[$ty]) -> &'a mut $name {
+ // the internal routine. works best when aa.len() <= bb.len().
+ fn mul_inner(ret: &mut [$ty; $n], aa: &[$ty], bb: &[$ty]) -> usize {
+ use crate::num::bignum::FullOps;
+
+ let mut retsz = 0;
+ for (i, &a) in aa.iter().enumerate() {
+ if a == 0 {
+ continue;
+ }
+ let mut sz = bb.len();
+ let mut carry = 0;
+ for (j, &b) in bb.iter().enumerate() {
+ let (c, v) = a.full_mul_add(b, ret[i + j], carry);
+ ret[i + j] = v;
+ carry = c;
+ }
+ if carry > 0 {
+ ret[i + sz] = carry;
+ sz += 1;
+ }
+ if retsz < i + sz {
+ retsz = i + sz;
+ }
+ }
+ retsz
+ }
+
+ let mut ret = [0; $n];
+ let retsz = if self.size < other.len() {
+ mul_inner(&mut ret, &self.digits(), other)
+ } else {
+ mul_inner(&mut ret, other, &self.digits())
+ };
+ self.base = ret;
+ self.size = retsz;
+ self
+ }
+
+ /// Divides itself by a digit-sized `other` and returns its own
+ /// mutable reference *and* the remainder.
+ pub fn div_rem_small(&mut self, other: $ty) -> (&mut $name, $ty) {
+ use crate::num::bignum::FullOps;
+
+ assert!(other > 0);
+
+ let sz = self.size;
+ let mut borrow = 0;
+ for a in self.base[..sz].iter_mut().rev() {
+ let (q, r) = (*a).full_div_rem(other, borrow);
+ *a = q;
+ borrow = r;
+ }
+ (self, borrow)
+ }
+
+ /// Divide self by another bignum, overwriting `q` with the quotient and `r` with the
+ /// remainder.
+ pub fn div_rem(&self, d: &$name, q: &mut $name, r: &mut $name) {
+ // Stupid slow base-2 long division taken from
+ // https://en.wikipedia.org/wiki/Division_algorithm
+ // FIXME use a greater base ($ty) for the long division.
+ assert!(!d.is_zero());
+ let digitbits = <$ty>::BITS as usize;
+ for digit in &mut q.base[..] {
+ *digit = 0;
+ }
+ for digit in &mut r.base[..] {
+ *digit = 0;
+ }
+ r.size = d.size;
+ q.size = 1;
+ let mut q_is_zero = true;
+ let end = self.bit_length();
+ for i in (0..end).rev() {
+ r.mul_pow2(1);
+ r.base[0] |= self.get_bit(i) as $ty;
+ if &*r >= d {
+ r.sub(d);
+ // Set bit `i` of q to 1.
+ let digit_idx = i / digitbits;
+ let bit_idx = i % digitbits;
+ if q_is_zero {
+ q.size = digit_idx + 1;
+ q_is_zero = false;
+ }
+ q.base[digit_idx] |= 1 << bit_idx;
+ }
+ }
+ debug_assert!(q.base[q.size..].iter().all(|&d| d == 0));
+ debug_assert!(r.base[r.size..].iter().all(|&d| d == 0));
+ }
+ }
+
+ impl crate::cmp::PartialEq for $name {
+ fn eq(&self, other: &$name) -> bool {
+ self.base[..] == other.base[..]
+ }
+ }
+
+ impl crate::cmp::Eq for $name {}
+
+ impl crate::cmp::PartialOrd for $name {
+ fn partial_cmp(&self, other: &$name) -> crate::option::Option<crate::cmp::Ordering> {
+ crate::option::Option::Some(self.cmp(other))
+ }
+ }
+
+ impl crate::cmp::Ord for $name {
+ fn cmp(&self, other: &$name) -> crate::cmp::Ordering {
+ use crate::cmp::max;
+ let sz = max(self.size, other.size);
+ let lhs = self.base[..sz].iter().cloned().rev();
+ let rhs = other.base[..sz].iter().cloned().rev();
+ lhs.cmp(rhs)
+ }
+ }
+
+ impl crate::clone::Clone for $name {
+ fn clone(&self) -> Self {
+ Self { size: self.size, base: self.base }
+ }
+ }
+
+ impl crate::fmt::Debug for $name {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
+ let sz = if self.size < 1 { 1 } else { self.size };
+ let digitlen = <$ty>::BITS as usize / 4;
+
+ write!(f, "{:#x}", self.base[sz - 1])?;
+ for &v in self.base[..sz - 1].iter().rev() {
+ write!(f, "_{:01$x}", v, digitlen)?;
+ }
+ crate::result::Result::Ok(())
+ }
+ }
+ };
+}
+
+/// The digit type for `Big32x40`.
+pub type Digit32 = u32;
+
+define_bignum!(Big32x40: type=Digit32, n=40);
+
+// this one is used for testing only.
+#[doc(hidden)]
+pub mod tests {
+ define_bignum!(Big8x3: type=u8, n=3);
+}
diff --git a/library/core/src/num/dec2flt/common.rs b/library/core/src/num/dec2flt/common.rs
new file mode 100644
index 000000000..17957d7e7
--- /dev/null
+++ b/library/core/src/num/dec2flt/common.rs
@@ -0,0 +1,198 @@
+//! Common utilities, for internal use only.
+
+use crate::ptr;
+
+/// Helper methods to process immutable bytes.
+pub(crate) trait ByteSlice: AsRef<[u8]> {
+ unsafe fn first_unchecked(&self) -> u8 {
+ debug_assert!(!self.is_empty());
+ // SAFETY: safe as long as self is not empty
+ unsafe { *self.as_ref().get_unchecked(0) }
+ }
+
+ /// Get if the slice contains no elements.
+ fn is_empty(&self) -> bool {
+ self.as_ref().is_empty()
+ }
+
+ /// Check if the slice at least `n` length.
+ fn check_len(&self, n: usize) -> bool {
+ n <= self.as_ref().len()
+ }
+
+ /// Check if the first character in the slice is equal to c.
+ fn first_is(&self, c: u8) -> bool {
+ self.as_ref().first() == Some(&c)
+ }
+
+ /// Check if the first character in the slice is equal to c1 or c2.
+ fn first_is2(&self, c1: u8, c2: u8) -> bool {
+ if let Some(&c) = self.as_ref().first() { c == c1 || c == c2 } else { false }
+ }
+
+ /// Bounds-checked test if the first character in the slice is a digit.
+ fn first_isdigit(&self) -> bool {
+ if let Some(&c) = self.as_ref().first() { c.is_ascii_digit() } else { false }
+ }
+
+ /// Check if self starts with u with a case-insensitive comparison.
+ fn starts_with_ignore_case(&self, u: &[u8]) -> bool {
+ debug_assert!(self.as_ref().len() >= u.len());
+ let iter = self.as_ref().iter().zip(u.iter());
+ let d = iter.fold(0, |i, (&x, &y)| i | (x ^ y));
+ d == 0 || d == 32
+ }
+
+ /// Get the remaining slice after the first N elements.
+ fn advance(&self, n: usize) -> &[u8] {
+ &self.as_ref()[n..]
+ }
+
+ /// Get the slice after skipping all leading characters equal c.
+ fn skip_chars(&self, c: u8) -> &[u8] {
+ let mut s = self.as_ref();
+ while s.first_is(c) {
+ s = s.advance(1);
+ }
+ s
+ }
+
+ /// Get the slice after skipping all leading characters equal c1 or c2.
+ fn skip_chars2(&self, c1: u8, c2: u8) -> &[u8] {
+ let mut s = self.as_ref();
+ while s.first_is2(c1, c2) {
+ s = s.advance(1);
+ }
+ s
+ }
+
+ /// Read 8 bytes as a 64-bit integer in little-endian order.
+ unsafe fn read_u64_unchecked(&self) -> u64 {
+ debug_assert!(self.check_len(8));
+ let src = self.as_ref().as_ptr() as *const u64;
+ // SAFETY: safe as long as self is at least 8 bytes
+ u64::from_le(unsafe { ptr::read_unaligned(src) })
+ }
+
+ /// Try to read the next 8 bytes from the slice.
+ fn read_u64(&self) -> Option<u64> {
+ if self.check_len(8) {
+ // SAFETY: self must be at least 8 bytes.
+ Some(unsafe { self.read_u64_unchecked() })
+ } else {
+ None
+ }
+ }
+
+ /// Calculate the offset of slice from another.
+ fn offset_from(&self, other: &Self) -> isize {
+ other.as_ref().len() as isize - self.as_ref().len() as isize
+ }
+}
+
+impl ByteSlice for [u8] {}
+
+/// Helper methods to process mutable bytes.
+pub(crate) trait ByteSliceMut: AsMut<[u8]> {
+ /// Write a 64-bit integer as 8 bytes in little-endian order.
+ unsafe fn write_u64_unchecked(&mut self, value: u64) {
+ debug_assert!(self.as_mut().len() >= 8);
+ let dst = self.as_mut().as_mut_ptr() as *mut u64;
+ // NOTE: we must use `write_unaligned`, since dst is not
+ // guaranteed to be properly aligned. Miri will warn us
+ // if we use `write` instead of `write_unaligned`, as expected.
+ // SAFETY: safe as long as self is at least 8 bytes
+ unsafe {
+ ptr::write_unaligned(dst, u64::to_le(value));
+ }
+ }
+}
+
+impl ByteSliceMut for [u8] {}
+
+/// Bytes wrapper with specialized methods for ASCII characters.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) struct AsciiStr<'a> {
+ slc: &'a [u8],
+}
+
+impl<'a> AsciiStr<'a> {
+ pub fn new(slc: &'a [u8]) -> Self {
+ Self { slc }
+ }
+
+ /// Advance the view by n, advancing it in-place to (n..).
+ pub unsafe fn step_by(&mut self, n: usize) -> &mut Self {
+ // SAFETY: safe as long n is less than the buffer length
+ self.slc = unsafe { self.slc.get_unchecked(n..) };
+ self
+ }
+
+ /// Advance the view by n, advancing it in-place to (1..).
+ pub unsafe fn step(&mut self) -> &mut Self {
+ // SAFETY: safe as long as self is not empty
+ unsafe { self.step_by(1) }
+ }
+
+ /// Iteratively parse and consume digits from bytes.
+ pub fn parse_digits(&mut self, mut func: impl FnMut(u8)) {
+ while let Some(&c) = self.as_ref().first() {
+ let c = c.wrapping_sub(b'0');
+ if c < 10 {
+ func(c);
+ // SAFETY: self cannot be empty
+ unsafe {
+ self.step();
+ }
+ } else {
+ break;
+ }
+ }
+ }
+}
+
+impl<'a> AsRef<[u8]> for AsciiStr<'a> {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.slc
+ }
+}
+
+impl<'a> ByteSlice for AsciiStr<'a> {}
+
+/// Determine if 8 bytes are all decimal digits.
+/// This does not care about the order in which the bytes were loaded.
+pub(crate) fn is_8digits(v: u64) -> bool {
+ let a = v.wrapping_add(0x4646_4646_4646_4646);
+ let b = v.wrapping_sub(0x3030_3030_3030_3030);
+ (a | b) & 0x8080_8080_8080_8080 == 0
+}
+
+/// Iteratively parse and consume digits from bytes.
+pub(crate) fn parse_digits(s: &mut &[u8], mut f: impl FnMut(u8)) {
+ while let Some(&c) = s.get(0) {
+ let c = c.wrapping_sub(b'0');
+ if c < 10 {
+ f(c);
+ *s = s.advance(1);
+ } else {
+ break;
+ }
+ }
+}
+
+/// A custom 64-bit floating point type, representing `f * 2^e`.
+/// e is biased, so it be directly shifted into the exponent bits.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
+pub struct BiasedFp {
+ /// The significant digits.
+ pub f: u64,
+ /// The biased, binary exponent.
+ pub e: i32,
+}
+
+impl BiasedFp {
+ pub const fn zero_pow2(e: i32) -> Self {
+ Self { f: 0, e }
+ }
+}
diff --git a/library/core/src/num/dec2flt/decimal.rs b/library/core/src/num/dec2flt/decimal.rs
new file mode 100644
index 000000000..f8edc3625
--- /dev/null
+++ b/library/core/src/num/dec2flt/decimal.rs
@@ -0,0 +1,351 @@
+//! Arbitrary-precision decimal class for fallback algorithms.
+//!
+//! This is only used if the fast-path (native floats) and
+//! the Eisel-Lemire algorithm are unable to unambiguously
+//! determine the float.
+//!
+//! The technique used is "Simple Decimal Conversion", developed
+//! by Nigel Tao and Ken Thompson. A detailed description of the
+//! algorithm can be found in "ParseNumberF64 by Simple Decimal Conversion",
+//! available online: <https://nigeltao.github.io/blog/2020/parse-number-f64-simple.html>.
+
+use crate::num::dec2flt::common::{is_8digits, parse_digits, ByteSlice, ByteSliceMut};
+
+#[derive(Clone)]
+pub struct Decimal {
+ /// The number of significant digits in the decimal.
+ pub num_digits: usize,
+ /// The offset of the decimal point in the significant digits.
+ pub decimal_point: i32,
+ /// If the number of significant digits stored in the decimal is truncated.
+ pub truncated: bool,
+ /// Buffer of the raw digits, in the range [0, 9].
+ pub digits: [u8; Self::MAX_DIGITS],
+}
+
+impl Default for Decimal {
+ fn default() -> Self {
+ Self { num_digits: 0, decimal_point: 0, truncated: false, digits: [0; Self::MAX_DIGITS] }
+ }
+}
+
+impl Decimal {
+ /// The maximum number of digits required to unambiguously round a float.
+ ///
+ /// For a double-precision IEEE-754 float, this required 767 digits,
+ /// so we store the max digits + 1.
+ ///
+ /// We can exactly represent a float in radix `b` from radix 2 if
+ /// `b` is divisible by 2. This function calculates the exact number of
+ /// digits required to exactly represent that float.
+ ///
+ /// According to the "Handbook of Floating Point Arithmetic",
+ /// for IEEE754, with emin being the min exponent, p2 being the
+ /// precision, and b being the radix, the number of digits follows as:
+ ///
+ /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋`
+ ///
+ /// For f32, this follows as:
+ /// emin = -126
+ /// p2 = 24
+ ///
+ /// For f64, this follows as:
+ /// emin = -1022
+ /// p2 = 53
+ ///
+ /// In Python:
+ /// `-emin + p2 + math.floor((emin+ 1)*math.log(2, b)-math.log(1-2**(-p2), b))`
+ pub const MAX_DIGITS: usize = 768;
+ /// The max digits that can be exactly represented in a 64-bit integer.
+ pub const MAX_DIGITS_WITHOUT_OVERFLOW: usize = 19;
+ pub const DECIMAL_POINT_RANGE: i32 = 2047;
+
+ /// Append a digit to the buffer.
+ pub fn try_add_digit(&mut self, digit: u8) {
+ if self.num_digits < Self::MAX_DIGITS {
+ self.digits[self.num_digits] = digit;
+ }
+ self.num_digits += 1;
+ }
+
+ /// Trim trailing zeros from the buffer.
+ pub fn trim(&mut self) {
+ // All of the following calls to `Decimal::trim` can't panic because:
+ //
+ // 1. `parse_decimal` sets `num_digits` to a max of `Decimal::MAX_DIGITS`.
+ // 2. `right_shift` sets `num_digits` to `write_index`, which is bounded by `num_digits`.
+ // 3. `left_shift` `num_digits` to a max of `Decimal::MAX_DIGITS`.
+ //
+ // Trim is only called in `right_shift` and `left_shift`.
+ debug_assert!(self.num_digits <= Self::MAX_DIGITS);
+ while self.num_digits != 0 && self.digits[self.num_digits - 1] == 0 {
+ self.num_digits -= 1;
+ }
+ }
+
+ pub fn round(&self) -> u64 {
+ if self.num_digits == 0 || self.decimal_point < 0 {
+ return 0;
+ } else if self.decimal_point > 18 {
+ return 0xFFFF_FFFF_FFFF_FFFF_u64;
+ }
+ let dp = self.decimal_point as usize;
+ let mut n = 0_u64;
+ for i in 0..dp {
+ n *= 10;
+ if i < self.num_digits {
+ n += self.digits[i] as u64;
+ }
+ }
+ let mut round_up = false;
+ if dp < self.num_digits {
+ round_up = self.digits[dp] >= 5;
+ if self.digits[dp] == 5 && dp + 1 == self.num_digits {
+ round_up = self.truncated || ((dp != 0) && (1 & self.digits[dp - 1] != 0))
+ }
+ }
+ if round_up {
+ n += 1;
+ }
+ n
+ }
+
+ /// Computes decimal * 2^shift.
+ pub fn left_shift(&mut self, shift: usize) {
+ if self.num_digits == 0 {
+ return;
+ }
+ let num_new_digits = number_of_digits_decimal_left_shift(self, shift);
+ let mut read_index = self.num_digits;
+ let mut write_index = self.num_digits + num_new_digits;
+ let mut n = 0_u64;
+ while read_index != 0 {
+ read_index -= 1;
+ write_index -= 1;
+ n += (self.digits[read_index] as u64) << shift;
+ let quotient = n / 10;
+ let remainder = n - (10 * quotient);
+ if write_index < Self::MAX_DIGITS {
+ self.digits[write_index] = remainder as u8;
+ } else if remainder > 0 {
+ self.truncated = true;
+ }
+ n = quotient;
+ }
+ while n > 0 {
+ write_index -= 1;
+ let quotient = n / 10;
+ let remainder = n - (10 * quotient);
+ if write_index < Self::MAX_DIGITS {
+ self.digits[write_index] = remainder as u8;
+ } else if remainder > 0 {
+ self.truncated = true;
+ }
+ n = quotient;
+ }
+ self.num_digits += num_new_digits;
+ if self.num_digits > Self::MAX_DIGITS {
+ self.num_digits = Self::MAX_DIGITS;
+ }
+ self.decimal_point += num_new_digits as i32;
+ self.trim();
+ }
+
+ /// Computes decimal * 2^-shift.
+ pub fn right_shift(&mut self, shift: usize) {
+ let mut read_index = 0;
+ let mut write_index = 0;
+ let mut n = 0_u64;
+ while (n >> shift) == 0 {
+ if read_index < self.num_digits {
+ n = (10 * n) + self.digits[read_index] as u64;
+ read_index += 1;
+ } else if n == 0 {
+ return;
+ } else {
+ while (n >> shift) == 0 {
+ n *= 10;
+ read_index += 1;
+ }
+ break;
+ }
+ }
+ self.decimal_point -= read_index as i32 - 1;
+ if self.decimal_point < -Self::DECIMAL_POINT_RANGE {
+ // `self = Self::Default()`, but without the overhead of clearing `digits`.
+ self.num_digits = 0;
+ self.decimal_point = 0;
+ self.truncated = false;
+ return;
+ }
+ let mask = (1_u64 << shift) - 1;
+ while read_index < self.num_digits {
+ let new_digit = (n >> shift) as u8;
+ n = (10 * (n & mask)) + self.digits[read_index] as u64;
+ read_index += 1;
+ self.digits[write_index] = new_digit;
+ write_index += 1;
+ }
+ while n > 0 {
+ let new_digit = (n >> shift) as u8;
+ n = 10 * (n & mask);
+ if write_index < Self::MAX_DIGITS {
+ self.digits[write_index] = new_digit;
+ write_index += 1;
+ } else if new_digit > 0 {
+ self.truncated = true;
+ }
+ }
+ self.num_digits = write_index;
+ self.trim();
+ }
+}
+
+/// Parse a big integer representation of the float as a decimal.
+pub fn parse_decimal(mut s: &[u8]) -> Decimal {
+ let mut d = Decimal::default();
+ let start = s;
+ s = s.skip_chars(b'0');
+ parse_digits(&mut s, |digit| d.try_add_digit(digit));
+ if s.first_is(b'.') {
+ s = s.advance(1);
+ let first = s;
+ // Skip leading zeros.
+ if d.num_digits == 0 {
+ s = s.skip_chars(b'0');
+ }
+ while s.len() >= 8 && d.num_digits + 8 < Decimal::MAX_DIGITS {
+ // SAFETY: s is at least 8 bytes.
+ let v = unsafe { s.read_u64_unchecked() };
+ if !is_8digits(v) {
+ break;
+ }
+ // SAFETY: d.num_digits + 8 is less than d.digits.len()
+ unsafe {
+ d.digits[d.num_digits..].write_u64_unchecked(v - 0x3030_3030_3030_3030);
+ }
+ d.num_digits += 8;
+ s = s.advance(8);
+ }
+ parse_digits(&mut s, |digit| d.try_add_digit(digit));
+ d.decimal_point = s.len() as i32 - first.len() as i32;
+ }
+ if d.num_digits != 0 {
+ // Ignore the trailing zeros if there are any
+ let mut n_trailing_zeros = 0;
+ for &c in start[..(start.len() - s.len())].iter().rev() {
+ if c == b'0' {
+ n_trailing_zeros += 1;
+ } else if c != b'.' {
+ break;
+ }
+ }
+ d.decimal_point += n_trailing_zeros as i32;
+ d.num_digits -= n_trailing_zeros;
+ d.decimal_point += d.num_digits as i32;
+ if d.num_digits > Decimal::MAX_DIGITS {
+ d.truncated = true;
+ d.num_digits = Decimal::MAX_DIGITS;
+ }
+ }
+ if s.first_is2(b'e', b'E') {
+ s = s.advance(1);
+ let mut neg_exp = false;
+ if s.first_is(b'-') {
+ neg_exp = true;
+ s = s.advance(1);
+ } else if s.first_is(b'+') {
+ s = s.advance(1);
+ }
+ let mut exp_num = 0_i32;
+ parse_digits(&mut s, |digit| {
+ if exp_num < 0x10000 {
+ exp_num = 10 * exp_num + digit as i32;
+ }
+ });
+ d.decimal_point += if neg_exp { -exp_num } else { exp_num };
+ }
+ for i in d.num_digits..Decimal::MAX_DIGITS_WITHOUT_OVERFLOW {
+ d.digits[i] = 0;
+ }
+ d
+}
+
+fn number_of_digits_decimal_left_shift(d: &Decimal, mut shift: usize) -> usize {
+ #[rustfmt::skip]
+ const TABLE: [u16; 65] = [
+ 0x0000, 0x0800, 0x0801, 0x0803, 0x1006, 0x1009, 0x100D, 0x1812, 0x1817, 0x181D, 0x2024,
+ 0x202B, 0x2033, 0x203C, 0x2846, 0x2850, 0x285B, 0x3067, 0x3073, 0x3080, 0x388E, 0x389C,
+ 0x38AB, 0x38BB, 0x40CC, 0x40DD, 0x40EF, 0x4902, 0x4915, 0x4929, 0x513E, 0x5153, 0x5169,
+ 0x5180, 0x5998, 0x59B0, 0x59C9, 0x61E3, 0x61FD, 0x6218, 0x6A34, 0x6A50, 0x6A6D, 0x6A8B,
+ 0x72AA, 0x72C9, 0x72E9, 0x7B0A, 0x7B2B, 0x7B4D, 0x8370, 0x8393, 0x83B7, 0x83DC, 0x8C02,
+ 0x8C28, 0x8C4F, 0x9477, 0x949F, 0x94C8, 0x9CF2, 0x051C, 0x051C, 0x051C, 0x051C,
+ ];
+ #[rustfmt::skip]
+ const TABLE_POW5: [u8; 0x051C] = [
+ 5, 2, 5, 1, 2, 5, 6, 2, 5, 3, 1, 2, 5, 1, 5, 6, 2, 5, 7, 8, 1, 2, 5, 3, 9, 0, 6, 2, 5, 1,
+ 9, 5, 3, 1, 2, 5, 9, 7, 6, 5, 6, 2, 5, 4, 8, 8, 2, 8, 1, 2, 5, 2, 4, 4, 1, 4, 0, 6, 2, 5,
+ 1, 2, 2, 0, 7, 0, 3, 1, 2, 5, 6, 1, 0, 3, 5, 1, 5, 6, 2, 5, 3, 0, 5, 1, 7, 5, 7, 8, 1, 2,
+ 5, 1, 5, 2, 5, 8, 7, 8, 9, 0, 6, 2, 5, 7, 6, 2, 9, 3, 9, 4, 5, 3, 1, 2, 5, 3, 8, 1, 4, 6,
+ 9, 7, 2, 6, 5, 6, 2, 5, 1, 9, 0, 7, 3, 4, 8, 6, 3, 2, 8, 1, 2, 5, 9, 5, 3, 6, 7, 4, 3, 1,
+ 6, 4, 0, 6, 2, 5, 4, 7, 6, 8, 3, 7, 1, 5, 8, 2, 0, 3, 1, 2, 5, 2, 3, 8, 4, 1, 8, 5, 7, 9,
+ 1, 0, 1, 5, 6, 2, 5, 1, 1, 9, 2, 0, 9, 2, 8, 9, 5, 5, 0, 7, 8, 1, 2, 5, 5, 9, 6, 0, 4, 6,
+ 4, 4, 7, 7, 5, 3, 9, 0, 6, 2, 5, 2, 9, 8, 0, 2, 3, 2, 2, 3, 8, 7, 6, 9, 5, 3, 1, 2, 5, 1,
+ 4, 9, 0, 1, 1, 6, 1, 1, 9, 3, 8, 4, 7, 6, 5, 6, 2, 5, 7, 4, 5, 0, 5, 8, 0, 5, 9, 6, 9, 2,
+ 3, 8, 2, 8, 1, 2, 5, 3, 7, 2, 5, 2, 9, 0, 2, 9, 8, 4, 6, 1, 9, 1, 4, 0, 6, 2, 5, 1, 8, 6,
+ 2, 6, 4, 5, 1, 4, 9, 2, 3, 0, 9, 5, 7, 0, 3, 1, 2, 5, 9, 3, 1, 3, 2, 2, 5, 7, 4, 6, 1, 5,
+ 4, 7, 8, 5, 1, 5, 6, 2, 5, 4, 6, 5, 6, 6, 1, 2, 8, 7, 3, 0, 7, 7, 3, 9, 2, 5, 7, 8, 1, 2,
+ 5, 2, 3, 2, 8, 3, 0, 6, 4, 3, 6, 5, 3, 8, 6, 9, 6, 2, 8, 9, 0, 6, 2, 5, 1, 1, 6, 4, 1, 5,
+ 3, 2, 1, 8, 2, 6, 9, 3, 4, 8, 1, 4, 4, 5, 3, 1, 2, 5, 5, 8, 2, 0, 7, 6, 6, 0, 9, 1, 3, 4,
+ 6, 7, 4, 0, 7, 2, 2, 6, 5, 6, 2, 5, 2, 9, 1, 0, 3, 8, 3, 0, 4, 5, 6, 7, 3, 3, 7, 0, 3, 6,
+ 1, 3, 2, 8, 1, 2, 5, 1, 4, 5, 5, 1, 9, 1, 5, 2, 2, 8, 3, 6, 6, 8, 5, 1, 8, 0, 6, 6, 4, 0,
+ 6, 2, 5, 7, 2, 7, 5, 9, 5, 7, 6, 1, 4, 1, 8, 3, 4, 2, 5, 9, 0, 3, 3, 2, 0, 3, 1, 2, 5, 3,
+ 6, 3, 7, 9, 7, 8, 8, 0, 7, 0, 9, 1, 7, 1, 2, 9, 5, 1, 6, 6, 0, 1, 5, 6, 2, 5, 1, 8, 1, 8,
+ 9, 8, 9, 4, 0, 3, 5, 4, 5, 8, 5, 6, 4, 7, 5, 8, 3, 0, 0, 7, 8, 1, 2, 5, 9, 0, 9, 4, 9, 4,
+ 7, 0, 1, 7, 7, 2, 9, 2, 8, 2, 3, 7, 9, 1, 5, 0, 3, 9, 0, 6, 2, 5, 4, 5, 4, 7, 4, 7, 3, 5,
+ 0, 8, 8, 6, 4, 6, 4, 1, 1, 8, 9, 5, 7, 5, 1, 9, 5, 3, 1, 2, 5, 2, 2, 7, 3, 7, 3, 6, 7, 5,
+ 4, 4, 3, 2, 3, 2, 0, 5, 9, 4, 7, 8, 7, 5, 9, 7, 6, 5, 6, 2, 5, 1, 1, 3, 6, 8, 6, 8, 3, 7,
+ 7, 2, 1, 6, 1, 6, 0, 2, 9, 7, 3, 9, 3, 7, 9, 8, 8, 2, 8, 1, 2, 5, 5, 6, 8, 4, 3, 4, 1, 8,
+ 8, 6, 0, 8, 0, 8, 0, 1, 4, 8, 6, 9, 6, 8, 9, 9, 4, 1, 4, 0, 6, 2, 5, 2, 8, 4, 2, 1, 7, 0,
+ 9, 4, 3, 0, 4, 0, 4, 0, 0, 7, 4, 3, 4, 8, 4, 4, 9, 7, 0, 7, 0, 3, 1, 2, 5, 1, 4, 2, 1, 0,
+ 8, 5, 4, 7, 1, 5, 2, 0, 2, 0, 0, 3, 7, 1, 7, 4, 2, 2, 4, 8, 5, 3, 5, 1, 5, 6, 2, 5, 7, 1,
+ 0, 5, 4, 2, 7, 3, 5, 7, 6, 0, 1, 0, 0, 1, 8, 5, 8, 7, 1, 1, 2, 4, 2, 6, 7, 5, 7, 8, 1, 2,
+ 5, 3, 5, 5, 2, 7, 1, 3, 6, 7, 8, 8, 0, 0, 5, 0, 0, 9, 2, 9, 3, 5, 5, 6, 2, 1, 3, 3, 7, 8,
+ 9, 0, 6, 2, 5, 1, 7, 7, 6, 3, 5, 6, 8, 3, 9, 4, 0, 0, 2, 5, 0, 4, 6, 4, 6, 7, 7, 8, 1, 0,
+ 6, 6, 8, 9, 4, 5, 3, 1, 2, 5, 8, 8, 8, 1, 7, 8, 4, 1, 9, 7, 0, 0, 1, 2, 5, 2, 3, 2, 3, 3,
+ 8, 9, 0, 5, 3, 3, 4, 4, 7, 2, 6, 5, 6, 2, 5, 4, 4, 4, 0, 8, 9, 2, 0, 9, 8, 5, 0, 0, 6, 2,
+ 6, 1, 6, 1, 6, 9, 4, 5, 2, 6, 6, 7, 2, 3, 6, 3, 2, 8, 1, 2, 5, 2, 2, 2, 0, 4, 4, 6, 0, 4,
+ 9, 2, 5, 0, 3, 1, 3, 0, 8, 0, 8, 4, 7, 2, 6, 3, 3, 3, 6, 1, 8, 1, 6, 4, 0, 6, 2, 5, 1, 1,
+ 1, 0, 2, 2, 3, 0, 2, 4, 6, 2, 5, 1, 5, 6, 5, 4, 0, 4, 2, 3, 6, 3, 1, 6, 6, 8, 0, 9, 0, 8,
+ 2, 0, 3, 1, 2, 5, 5, 5, 5, 1, 1, 1, 5, 1, 2, 3, 1, 2, 5, 7, 8, 2, 7, 0, 2, 1, 1, 8, 1, 5,
+ 8, 3, 4, 0, 4, 5, 4, 1, 0, 1, 5, 6, 2, 5, 2, 7, 7, 5, 5, 5, 7, 5, 6, 1, 5, 6, 2, 8, 9, 1,
+ 3, 5, 1, 0, 5, 9, 0, 7, 9, 1, 7, 0, 2, 2, 7, 0, 5, 0, 7, 8, 1, 2, 5, 1, 3, 8, 7, 7, 7, 8,
+ 7, 8, 0, 7, 8, 1, 4, 4, 5, 6, 7, 5, 5, 2, 9, 5, 3, 9, 5, 8, 5, 1, 1, 3, 5, 2, 5, 3, 9, 0,
+ 6, 2, 5, 6, 9, 3, 8, 8, 9, 3, 9, 0, 3, 9, 0, 7, 2, 2, 8, 3, 7, 7, 6, 4, 7, 6, 9, 7, 9, 2,
+ 5, 5, 6, 7, 6, 2, 6, 9, 5, 3, 1, 2, 5, 3, 4, 6, 9, 4, 4, 6, 9, 5, 1, 9, 5, 3, 6, 1, 4, 1,
+ 8, 8, 8, 2, 3, 8, 4, 8, 9, 6, 2, 7, 8, 3, 8, 1, 3, 4, 7, 6, 5, 6, 2, 5, 1, 7, 3, 4, 7, 2,
+ 3, 4, 7, 5, 9, 7, 6, 8, 0, 7, 0, 9, 4, 4, 1, 1, 9, 2, 4, 4, 8, 1, 3, 9, 1, 9, 0, 6, 7, 3,
+ 8, 2, 8, 1, 2, 5, 8, 6, 7, 3, 6, 1, 7, 3, 7, 9, 8, 8, 4, 0, 3, 5, 4, 7, 2, 0, 5, 9, 6, 2,
+ 2, 4, 0, 6, 9, 5, 9, 5, 3, 3, 6, 9, 1, 4, 0, 6, 2, 5,
+ ];
+
+ shift &= 63;
+ let x_a = TABLE[shift];
+ let x_b = TABLE[shift + 1];
+ let num_new_digits = (x_a >> 11) as _;
+ let pow5_a = (0x7FF & x_a) as usize;
+ let pow5_b = (0x7FF & x_b) as usize;
+ let pow5 = &TABLE_POW5[pow5_a..];
+ for (i, &p5) in pow5.iter().enumerate().take(pow5_b - pow5_a) {
+ if i >= d.num_digits {
+ return num_new_digits - 1;
+ } else if d.digits[i] == p5 {
+ continue;
+ } else if d.digits[i] < p5 {
+ return num_new_digits - 1;
+ } else {
+ return num_new_digits;
+ }
+ }
+ num_new_digits
+}
diff --git a/library/core/src/num/dec2flt/float.rs b/library/core/src/num/dec2flt/float.rs
new file mode 100644
index 000000000..5921c5ed4
--- /dev/null
+++ b/library/core/src/num/dec2flt/float.rs
@@ -0,0 +1,207 @@
+//! Helper trait for generic float types.
+
+use crate::fmt::{Debug, LowerExp};
+use crate::num::FpCategory;
+use crate::ops::{Add, Div, Mul, Neg};
+
+/// A helper trait to avoid duplicating basically all the conversion code for `f32` and `f64`.
+///
+/// See the parent module's doc comment for why this is necessary.
+///
+/// Should **never ever** be implemented for other types or be used outside the dec2flt module.
+#[doc(hidden)]
+pub trait RawFloat:
+ Sized
+ + Div<Output = Self>
+ + Neg<Output = Self>
+ + Mul<Output = Self>
+ + Add<Output = Self>
+ + LowerExp
+ + PartialEq
+ + PartialOrd
+ + Default
+ + Clone
+ + Copy
+ + Debug
+{
+ const INFINITY: Self;
+ const NEG_INFINITY: Self;
+ const NAN: Self;
+ const NEG_NAN: Self;
+
+ /// The number of bits in the significand, *excluding* the hidden bit.
+ const MANTISSA_EXPLICIT_BITS: usize;
+
+ // Round-to-even only happens for negative values of q
+ // when q ≥ −4 in the 64-bit case and when q ≥ −17 in
+ // the 32-bitcase.
+ //
+ // When q ≥ 0,we have that 5^q ≤ 2m+1. In the 64-bit case,we
+ // have 5^q ≤ 2m+1 ≤ 2^54 or q ≤ 23. In the 32-bit case,we have
+ // 5^q ≤ 2m+1 ≤ 2^25 or q ≤ 10.
+ //
+ // When q < 0, we have w ≥ (2m+1)×5^−q. We must have that w < 2^64
+ // so (2m+1)×5^−q < 2^64. We have that 2m+1 > 2^53 (64-bit case)
+ // or 2m+1 > 2^24 (32-bit case). Hence,we must have 2^53×5^−q < 2^64
+ // (64-bit) and 2^24×5^−q < 2^64 (32-bit). Hence we have 5^−q < 2^11
+ // or q ≥ −4 (64-bit case) and 5^−q < 2^40 or q ≥ −17 (32-bitcase).
+ //
+ // Thus we have that we only need to round ties to even when
+ // we have that q ∈ [−4,23](in the 64-bit case) or q∈[−17,10]
+ // (in the 32-bit case). In both cases,the power of five(5^|q|)
+ // fits in a 64-bit word.
+ const MIN_EXPONENT_ROUND_TO_EVEN: i32;
+ const MAX_EXPONENT_ROUND_TO_EVEN: i32;
+
+ // Minimum exponent that for a fast path case, or `-⌊(MANTISSA_EXPLICIT_BITS+1)/log2(5)⌋`
+ const MIN_EXPONENT_FAST_PATH: i64;
+
+ // Maximum exponent that for a fast path case, or `⌊(MANTISSA_EXPLICIT_BITS+1)/log2(5)⌋`
+ const MAX_EXPONENT_FAST_PATH: i64;
+
+ // Maximum exponent that can be represented for a disguised-fast path case.
+ // This is `MAX_EXPONENT_FAST_PATH + ⌊(MANTISSA_EXPLICIT_BITS+1)/log2(10)⌋`
+ const MAX_EXPONENT_DISGUISED_FAST_PATH: i64;
+
+ // Minimum exponent value `-(1 << (EXP_BITS - 1)) + 1`.
+ const MINIMUM_EXPONENT: i32;
+
+ // Largest exponent value `(1 << EXP_BITS) - 1`.
+ const INFINITE_POWER: i32;
+
+ // Index (in bits) of the sign.
+ const SIGN_INDEX: usize;
+
+ // Smallest decimal exponent for a non-zero value.
+ const SMALLEST_POWER_OF_TEN: i32;
+
+ // Largest decimal exponent for a non-infinite value.
+ const LARGEST_POWER_OF_TEN: i32;
+
+ // Maximum mantissa for the fast-path (`1 << 53` for f64).
+ const MAX_MANTISSA_FAST_PATH: u64 = 2_u64 << Self::MANTISSA_EXPLICIT_BITS;
+
+ /// Convert integer into float through an as cast.
+ /// This is only called in the fast-path algorithm, and therefore
+ /// will not lose precision, since the value will always have
+ /// only if the value is <= Self::MAX_MANTISSA_FAST_PATH.
+ fn from_u64(v: u64) -> Self;
+
+ /// Performs a raw transmutation from an integer.
+ fn from_u64_bits(v: u64) -> Self;
+
+ /// Get a small power-of-ten for fast-path multiplication.
+ fn pow10_fast_path(exponent: usize) -> Self;
+
+ /// Returns the category that this number falls into.
+ fn classify(self) -> FpCategory;
+
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8);
+}
+
+impl RawFloat for f32 {
+ const INFINITY: Self = f32::INFINITY;
+ const NEG_INFINITY: Self = f32::NEG_INFINITY;
+ const NAN: Self = f32::NAN;
+ const NEG_NAN: Self = -f32::NAN;
+
+ const MANTISSA_EXPLICIT_BITS: usize = 23;
+ const MIN_EXPONENT_ROUND_TO_EVEN: i32 = -17;
+ const MAX_EXPONENT_ROUND_TO_EVEN: i32 = 10;
+ const MIN_EXPONENT_FAST_PATH: i64 = -10; // assuming FLT_EVAL_METHOD = 0
+ const MAX_EXPONENT_FAST_PATH: i64 = 10;
+ const MAX_EXPONENT_DISGUISED_FAST_PATH: i64 = 17;
+ const MINIMUM_EXPONENT: i32 = -127;
+ const INFINITE_POWER: i32 = 0xFF;
+ const SIGN_INDEX: usize = 31;
+ const SMALLEST_POWER_OF_TEN: i32 = -65;
+ const LARGEST_POWER_OF_TEN: i32 = 38;
+
+ fn from_u64(v: u64) -> Self {
+ debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH);
+ v as _
+ }
+
+ fn from_u64_bits(v: u64) -> Self {
+ f32::from_bits((v & 0xFFFFFFFF) as u32)
+ }
+
+ fn pow10_fast_path(exponent: usize) -> Self {
+ #[allow(clippy::use_self)]
+ const TABLE: [f32; 16] =
+ [1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 0., 0., 0., 0., 0.];
+ TABLE[exponent & 15]
+ }
+
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8) {
+ let bits = self.to_bits();
+ let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
+ let mantissa =
+ if exponent == 0 { (bits & 0x7fffff) << 1 } else { (bits & 0x7fffff) | 0x800000 };
+ // Exponent bias + mantissa shift
+ exponent -= 127 + 23;
+ (mantissa as u64, exponent, sign)
+ }
+
+ fn classify(self) -> FpCategory {
+ self.classify()
+ }
+}
+
+impl RawFloat for f64 {
+ const INFINITY: Self = f64::INFINITY;
+ const NEG_INFINITY: Self = f64::NEG_INFINITY;
+ const NAN: Self = f64::NAN;
+ const NEG_NAN: Self = -f64::NAN;
+
+ const MANTISSA_EXPLICIT_BITS: usize = 52;
+ const MIN_EXPONENT_ROUND_TO_EVEN: i32 = -4;
+ const MAX_EXPONENT_ROUND_TO_EVEN: i32 = 23;
+ const MIN_EXPONENT_FAST_PATH: i64 = -22; // assuming FLT_EVAL_METHOD = 0
+ const MAX_EXPONENT_FAST_PATH: i64 = 22;
+ const MAX_EXPONENT_DISGUISED_FAST_PATH: i64 = 37;
+ const MINIMUM_EXPONENT: i32 = -1023;
+ const INFINITE_POWER: i32 = 0x7FF;
+ const SIGN_INDEX: usize = 63;
+ const SMALLEST_POWER_OF_TEN: i32 = -342;
+ const LARGEST_POWER_OF_TEN: i32 = 308;
+
+ fn from_u64(v: u64) -> Self {
+ debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH);
+ v as _
+ }
+
+ fn from_u64_bits(v: u64) -> Self {
+ f64::from_bits(v)
+ }
+
+ fn pow10_fast_path(exponent: usize) -> Self {
+ const TABLE: [f64; 32] = [
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15,
+ 1e16, 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, 0., 0., 0., 0., 0., 0., 0., 0., 0.,
+ ];
+ TABLE[exponent & 31]
+ }
+
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8) {
+ let bits = self.to_bits();
+ let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0xfffffffffffff) << 1
+ } else {
+ (bits & 0xfffffffffffff) | 0x10000000000000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 1023 + 52;
+ (mantissa, exponent, sign)
+ }
+
+ fn classify(self) -> FpCategory {
+ self.classify()
+ }
+}
diff --git a/library/core/src/num/dec2flt/fpu.rs b/library/core/src/num/dec2flt/fpu.rs
new file mode 100644
index 000000000..ec5fa45fd
--- /dev/null
+++ b/library/core/src/num/dec2flt/fpu.rs
@@ -0,0 +1,90 @@
+//! Platform-specific, assembly instructions to avoid
+//! intermediate rounding on architectures with FPUs.
+
+pub use fpu_precision::set_precision;
+
+// On x86, the x87 FPU is used for float operations if the SSE/SSE2 extensions are not available.
+// The x87 FPU operates with 80 bits of precision by default, which means that operations will
+// round to 80 bits causing double rounding to happen when values are eventually represented as
+// 32/64 bit float values. To overcome this, the FPU control word can be set so that the
+// computations are performed in the desired precision.
+#[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+mod fpu_precision {
+ use core::arch::asm;
+ use core::mem::size_of;
+
+ /// A structure used to preserve the original value of the FPU control word, so that it can be
+ /// restored when the structure is dropped.
+ ///
+ /// The x87 FPU is a 16-bits register whose fields are as follows:
+ ///
+ /// | 12-15 | 10-11 | 8-9 | 6-7 | 5 | 4 | 3 | 2 | 1 | 0 |
+ /// |------:|------:|----:|----:|---:|---:|---:|---:|---:|---:|
+ /// | | RC | PC | | PM | UM | OM | ZM | DM | IM |
+ ///
+ /// The documentation for all of the fields is available in the IA-32 Architectures Software
+ /// Developer's Manual (Volume 1).
+ ///
+ /// The only field which is relevant for the following code is PC, Precision Control. This
+ /// field determines the precision of the operations performed by the FPU. It can be set to:
+ /// - 0b00, single precision i.e., 32-bits
+ /// - 0b10, double precision i.e., 64-bits
+ /// - 0b11, double extended precision i.e., 80-bits (default state)
+ /// The 0b01 value is reserved and should not be used.
+ pub struct FPUControlWord(u16);
+
+ fn set_cw(cw: u16) {
+ // SAFETY: the `fldcw` instruction has been audited to be able to work correctly with
+ // any `u16`
+ unsafe {
+ asm!(
+ "fldcw word ptr [{}]",
+ in(reg) &cw,
+ options(nostack),
+ )
+ }
+ }
+
+ /// Sets the precision field of the FPU to `T` and returns a `FPUControlWord`.
+ pub fn set_precision<T>() -> FPUControlWord {
+ let mut cw = 0_u16;
+
+ // Compute the value for the Precision Control field that is appropriate for `T`.
+ let cw_precision = match size_of::<T>() {
+ 4 => 0x0000, // 32 bits
+ 8 => 0x0200, // 64 bits
+ _ => 0x0300, // default, 80 bits
+ };
+
+ // Get the original value of the control word to restore it later, when the
+ // `FPUControlWord` structure is dropped
+ // SAFETY: the `fnstcw` instruction has been audited to be able to work correctly with
+ // any `u16`
+ unsafe {
+ asm!(
+ "fnstcw word ptr [{}]",
+ in(reg) &mut cw,
+ options(nostack),
+ )
+ }
+
+ // Set the control word to the desired precision. This is achieved by masking away the old
+ // precision (bits 8 and 9, 0x300) and replacing it with the precision flag computed above.
+ set_cw((cw & 0xFCFF) | cw_precision);
+
+ FPUControlWord(cw)
+ }
+
+ impl Drop for FPUControlWord {
+ fn drop(&mut self) {
+ set_cw(self.0)
+ }
+ }
+}
+
+// In most architectures, floating point operations have an explicit bit size, therefore the
+// precision of the computation is determined on a per-operation basis.
+#[cfg(any(not(target_arch = "x86"), target_feature = "sse2"))]
+mod fpu_precision {
+ pub fn set_precision<T>() {}
+}
diff --git a/library/core/src/num/dec2flt/lemire.rs b/library/core/src/num/dec2flt/lemire.rs
new file mode 100644
index 000000000..75405f471
--- /dev/null
+++ b/library/core/src/num/dec2flt/lemire.rs
@@ -0,0 +1,166 @@
+//! Implementation of the Eisel-Lemire algorithm.
+
+use crate::num::dec2flt::common::BiasedFp;
+use crate::num::dec2flt::float::RawFloat;
+use crate::num::dec2flt::table::{
+ LARGEST_POWER_OF_FIVE, POWER_OF_FIVE_128, SMALLEST_POWER_OF_FIVE,
+};
+
+/// Compute a float using an extended-precision representation.
+///
+/// Fast conversion of a the significant digits and decimal exponent
+/// a float to an extended representation with a binary float. This
+/// algorithm will accurately parse the vast majority of cases,
+/// and uses a 128-bit representation (with a fallback 192-bit
+/// representation).
+///
+/// This algorithm scales the exponent by the decimal exponent
+/// using pre-computed powers-of-5, and calculates if the
+/// representation can be unambiguously rounded to the nearest
+/// machine float. Near-halfway cases are not handled here,
+/// and are represented by a negative, biased binary exponent.
+///
+/// The algorithm is described in detail in "Daniel Lemire, Number Parsing
+/// at a Gigabyte per Second" in section 5, "Fast Algorithm", and
+/// section 6, "Exact Numbers And Ties", available online:
+/// <https://arxiv.org/abs/2101.11408.pdf>.
+pub fn compute_float<F: RawFloat>(q: i64, mut w: u64) -> BiasedFp {
+ let fp_zero = BiasedFp::zero_pow2(0);
+ let fp_inf = BiasedFp::zero_pow2(F::INFINITE_POWER);
+ let fp_error = BiasedFp::zero_pow2(-1);
+
+ // Short-circuit if the value can only be a literal 0 or infinity.
+ if w == 0 || q < F::SMALLEST_POWER_OF_TEN as i64 {
+ return fp_zero;
+ } else if q > F::LARGEST_POWER_OF_TEN as i64 {
+ return fp_inf;
+ }
+ // Normalize our significant digits, so the most-significant bit is set.
+ let lz = w.leading_zeros();
+ w <<= lz;
+ let (lo, hi) = compute_product_approx(q, w, F::MANTISSA_EXPLICIT_BITS + 3);
+ if lo == 0xFFFF_FFFF_FFFF_FFFF {
+ // If we have failed to approximate w x 5^-q with our 128-bit value.
+ // Since the addition of 1 could lead to an overflow which could then
+ // round up over the half-way point, this can lead to improper rounding
+ // of a float.
+ //
+ // However, this can only occur if q ∈ [-27, 55]. The upper bound of q
+ // is 55 because 5^55 < 2^128, however, this can only happen if 5^q > 2^64,
+ // since otherwise the product can be represented in 64-bits, producing
+ // an exact result. For negative exponents, rounding-to-even can
+ // only occur if 5^-q < 2^64.
+ //
+ // For detailed explanations of rounding for negative exponents, see
+ // <https://arxiv.org/pdf/2101.11408.pdf#section.9.1>. For detailed
+ // explanations of rounding for positive exponents, see
+ // <https://arxiv.org/pdf/2101.11408.pdf#section.8>.
+ let inside_safe_exponent = (q >= -27) && (q <= 55);
+ if !inside_safe_exponent {
+ return fp_error;
+ }
+ }
+ let upperbit = (hi >> 63) as i32;
+ let mut mantissa = hi >> (upperbit + 64 - F::MANTISSA_EXPLICIT_BITS as i32 - 3);
+ let mut power2 = power(q as i32) + upperbit - lz as i32 - F::MINIMUM_EXPONENT;
+ if power2 <= 0 {
+ if -power2 + 1 >= 64 {
+ // Have more than 64 bits below the minimum exponent, must be 0.
+ return fp_zero;
+ }
+ // Have a subnormal value.
+ mantissa >>= -power2 + 1;
+ mantissa += mantissa & 1;
+ mantissa >>= 1;
+ power2 = (mantissa >= (1_u64 << F::MANTISSA_EXPLICIT_BITS)) as i32;
+ return BiasedFp { f: mantissa, e: power2 };
+ }
+ // Need to handle rounding ties. Normally, we need to round up,
+ // but if we fall right in between and and we have an even basis, we
+ // need to round down.
+ //
+ // This will only occur if:
+ // 1. The lower 64 bits of the 128-bit representation is 0.
+ // IE, 5^q fits in single 64-bit word.
+ // 2. The least-significant bit prior to truncated mantissa is odd.
+ // 3. All the bits truncated when shifting to mantissa bits + 1 are 0.
+ //
+ // Or, we may fall between two floats: we are exactly halfway.
+ if lo <= 1
+ && q >= F::MIN_EXPONENT_ROUND_TO_EVEN as i64
+ && q <= F::MAX_EXPONENT_ROUND_TO_EVEN as i64
+ && mantissa & 3 == 1
+ && (mantissa << (upperbit + 64 - F::MANTISSA_EXPLICIT_BITS as i32 - 3)) == hi
+ {
+ // Zero the lowest bit, so we don't round up.
+ mantissa &= !1_u64;
+ }
+ // Round-to-even, then shift the significant digits into place.
+ mantissa += mantissa & 1;
+ mantissa >>= 1;
+ if mantissa >= (2_u64 << F::MANTISSA_EXPLICIT_BITS) {
+ // Rounding up overflowed, so the carry bit is set. Set the
+ // mantissa to 1 (only the implicit, hidden bit is set) and
+ // increase the exponent.
+ mantissa = 1_u64 << F::MANTISSA_EXPLICIT_BITS;
+ power2 += 1;
+ }
+ // Zero out the hidden bit.
+ mantissa &= !(1_u64 << F::MANTISSA_EXPLICIT_BITS);
+ if power2 >= F::INFINITE_POWER {
+ // Exponent is above largest normal value, must be infinite.
+ return fp_inf;
+ }
+ BiasedFp { f: mantissa, e: power2 }
+}
+
+/// Calculate a base 2 exponent from a decimal exponent.
+/// This uses a pre-computed integer approximation for
+/// log2(10), where 217706 / 2^16 is accurate for the
+/// entire range of non-finite decimal exponents.
+fn power(q: i32) -> i32 {
+ (q.wrapping_mul(152_170 + 65536) >> 16) + 63
+}
+
+fn full_multiplication(a: u64, b: u64) -> (u64, u64) {
+ let r = (a as u128) * (b as u128);
+ (r as u64, (r >> 64) as u64)
+}
+
+// This will compute or rather approximate w * 5**q and return a pair of 64-bit words
+// approximating the result, with the "high" part corresponding to the most significant
+// bits and the low part corresponding to the least significant bits.
+fn compute_product_approx(q: i64, w: u64, precision: usize) -> (u64, u64) {
+ debug_assert!(q >= SMALLEST_POWER_OF_FIVE as i64);
+ debug_assert!(q <= LARGEST_POWER_OF_FIVE as i64);
+ debug_assert!(precision <= 64);
+
+ let mask = if precision < 64 {
+ 0xFFFF_FFFF_FFFF_FFFF_u64 >> precision
+ } else {
+ 0xFFFF_FFFF_FFFF_FFFF_u64
+ };
+
+ // 5^q < 2^64, then the multiplication always provides an exact value.
+ // That means whenever we need to round ties to even, we always have
+ // an exact value.
+ let index = (q - SMALLEST_POWER_OF_FIVE as i64) as usize;
+ let (lo5, hi5) = POWER_OF_FIVE_128[index];
+ // Only need one multiplication as long as there is 1 zero but
+ // in the explicit mantissa bits, +1 for the hidden bit, +1 to
+ // determine the rounding direction, +1 for if the computed
+ // product has a leading zero.
+ let (mut first_lo, mut first_hi) = full_multiplication(w, lo5);
+ if first_hi & mask == mask {
+ // Need to do a second multiplication to get better precision
+ // for the lower product. This will always be exact
+ // where q is < 55, since 5^55 < 2^128. If this wraps,
+ // then we need to need to round up the hi product.
+ let (_, second_hi) = full_multiplication(w, hi5);
+ first_lo = first_lo.wrapping_add(second_hi);
+ if second_hi > first_lo {
+ first_hi += 1;
+ }
+ }
+ (first_lo, first_hi)
+}
diff --git a/library/core/src/num/dec2flt/mod.rs b/library/core/src/num/dec2flt/mod.rs
new file mode 100644
index 000000000..a888ced49
--- /dev/null
+++ b/library/core/src/num/dec2flt/mod.rs
@@ -0,0 +1,269 @@
+//! Converting decimal strings into IEEE 754 binary floating point numbers.
+//!
+//! # Problem statement
+//!
+//! We are given a decimal string such as `12.34e56`. This string consists of integral (`12`),
+//! fractional (`34`), and exponent (`56`) parts. All parts are optional and interpreted as zero
+//! when missing.
+//!
+//! We seek the IEEE 754 floating point number that is closest to the exact value of the decimal
+//! string. It is well-known that many decimal strings do not have terminating representations in
+//! base two, so we round to 0.5 units in the last place (in other words, as well as possible).
+//! Ties, decimal values exactly half-way between two consecutive floats, are resolved with the
+//! half-to-even strategy, also known as banker's rounding.
+//!
+//! Needless to say, this is quite hard, both in terms of implementation complexity and in terms
+//! of CPU cycles taken.
+//!
+//! # Implementation
+//!
+//! First, we ignore signs. Or rather, we remove it at the very beginning of the conversion
+//! process and re-apply it at the very end. This is correct in all edge cases since IEEE
+//! floats are symmetric around zero, negating one simply flips the first bit.
+//!
+//! Then we remove the decimal point by adjusting the exponent: Conceptually, `12.34e56` turns
+//! into `1234e54`, which we describe with a positive integer `f = 1234` and an integer `e = 54`.
+//! The `(f, e)` representation is used by almost all code past the parsing stage.
+//!
+//! We then try a long chain of progressively more general and expensive special cases using
+//! machine-sized integers and small, fixed-sized floating point numbers (first `f32`/`f64`, then
+//! a type with 64 bit significand). The extended-precision algorithm
+//! uses the Eisel-Lemire algorithm, which uses a 128-bit (or 192-bit)
+//! representation that can accurately and quickly compute the vast majority
+//! of floats. When all these fail, we bite the bullet and resort to using
+//! a large-decimal representation, shifting the digits into range, calculating
+//! the upper significant bits and exactly round to the nearest representation.
+//!
+//! Another aspect that needs attention is the ``RawFloat`` trait by which almost all functions
+//! are parametrized. One might think that it's enough to parse to `f64` and cast the result to
+//! `f32`. Unfortunately this is not the world we live in, and this has nothing to do with using
+//! base two or half-to-even rounding.
+//!
+//! Consider for example two types `d2` and `d4` representing a decimal type with two decimal
+//! digits and four decimal digits each and take "0.01499" as input. Let's use half-up rounding.
+//! Going directly to two decimal digits gives `0.01`, but if we round to four digits first,
+//! we get `0.0150`, which is then rounded up to `0.02`. The same principle applies to other
+//! operations as well, if you want 0.5 ULP accuracy you need to do *everything* in full precision
+//! and round *exactly once, at the end*, by considering all truncated bits at once.
+//!
+//! Primarily, this module and its children implement the algorithms described in:
+//! "Number Parsing at a Gigabyte per Second", available online:
+//! <https://arxiv.org/abs/2101.11408>.
+//!
+//! # Other
+//!
+//! The conversion should *never* panic. There are assertions and explicit panics in the code,
+//! but they should never be triggered and only serve as internal sanity checks. Any panics should
+//! be considered a bug.
+//!
+//! There are unit tests but they are woefully inadequate at ensuring correctness, they only cover
+//! a small percentage of possible errors. Far more extensive tests are located in the directory
+//! `src/etc/test-float-parse` as a Python script.
+//!
+//! A note on integer overflow: Many parts of this file perform arithmetic with the decimal
+//! exponent `e`. Primarily, we shift the decimal point around: Before the first decimal digit,
+//! after the last decimal digit, and so on. This could overflow if done carelessly. We rely on
+//! the parsing submodule to only hand out sufficiently small exponents, where "sufficient" means
+//! "such that the exponent +/- the number of decimal digits fits into a 64 bit integer".
+//! Larger exponents are accepted, but we don't do arithmetic with them, they are immediately
+//! turned into {positive,negative} {zero,infinity}.
+
+#![doc(hidden)]
+#![unstable(
+ feature = "dec2flt",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+
+use crate::fmt;
+use crate::str::FromStr;
+
+use self::common::{BiasedFp, ByteSlice};
+use self::float::RawFloat;
+use self::lemire::compute_float;
+use self::parse::{parse_inf_nan, parse_number};
+use self::slow::parse_long_mantissa;
+
+mod common;
+mod decimal;
+mod fpu;
+mod slow;
+mod table;
+// float is used in flt2dec, and all are used in unit tests.
+pub mod float;
+pub mod lemire;
+pub mod number;
+pub mod parse;
+
+macro_rules! from_str_float_impl {
+ ($t:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl FromStr for $t {
+ type Err = ParseFloatError;
+
+ /// Converts a string in base 10 to a float.
+ /// Accepts an optional decimal exponent.
+ ///
+ /// This function accepts strings such as
+ ///
+ /// * '3.14'
+ /// * '-3.14'
+ /// * '2.5E10', or equivalently, '2.5e10'
+ /// * '2.5E-10'
+ /// * '5.'
+ /// * '.5', or, equivalently, '0.5'
+ /// * 'inf', '-inf', '+infinity', 'NaN'
+ ///
+ /// Note that alphabetical characters are not case-sensitive.
+ ///
+ /// Leading and trailing whitespace represent an error.
+ ///
+ /// # Grammar
+ ///
+ /// All strings that adhere to the following [EBNF] grammar when
+ /// lowercased will result in an [`Ok`] being returned:
+ ///
+ /// ```txt
+ /// Float ::= Sign? ( 'inf' | 'infinity' | 'nan' | Number )
+ /// Number ::= ( Digit+ |
+ /// Digit+ '.' Digit* |
+ /// Digit* '.' Digit+ ) Exp?
+ /// Exp ::= 'e' Sign? Digit+
+ /// Sign ::= [+-]
+ /// Digit ::= [0-9]
+ /// ```
+ ///
+ /// [EBNF]: https://www.w3.org/TR/REC-xml/#sec-notation
+ ///
+ /// # Arguments
+ ///
+ /// * src - A string
+ ///
+ /// # Return value
+ ///
+ /// `Err(ParseFloatError)` if the string did not represent a valid
+ /// number. Otherwise, `Ok(n)` where `n` is the closest
+ /// representable floating-point number to the number represented
+ /// by `src` (following the same rules for rounding as for the
+ /// results of primitive operations).
+ #[inline]
+ fn from_str(src: &str) -> Result<Self, ParseFloatError> {
+ dec2flt(src)
+ }
+ }
+ };
+}
+from_str_float_impl!(f32);
+from_str_float_impl!(f64);
+
+/// An error which can be returned when parsing a float.
+///
+/// This error is used as the error type for the [`FromStr`] implementation
+/// for [`f32`] and [`f64`].
+///
+/// # Example
+///
+/// ```
+/// use std::str::FromStr;
+///
+/// if let Err(e) = f64::from_str("a.12") {
+/// println!("Failed conversion to f64: {e}");
+/// }
+/// ```
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseFloatError {
+ kind: FloatErrorKind,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum FloatErrorKind {
+ Empty,
+ Invalid,
+}
+
+impl ParseFloatError {
+ #[unstable(
+ feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ FloatErrorKind::Empty => "cannot parse float from empty string",
+ FloatErrorKind::Invalid => "invalid float literal",
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseFloatError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+pub(super) fn pfe_empty() -> ParseFloatError {
+ ParseFloatError { kind: FloatErrorKind::Empty }
+}
+
+// Used in unit tests, keep public.
+// This is much better than making FloatErrorKind and ParseFloatError::kind public.
+pub fn pfe_invalid() -> ParseFloatError {
+ ParseFloatError { kind: FloatErrorKind::Invalid }
+}
+
+/// Converts a `BiasedFp` to the closest machine float type.
+fn biased_fp_to_float<T: RawFloat>(x: BiasedFp) -> T {
+ let mut word = x.f;
+ word |= (x.e as u64) << T::MANTISSA_EXPLICIT_BITS;
+ T::from_u64_bits(word)
+}
+
+/// Converts a decimal string into a floating point number.
+pub fn dec2flt<F: RawFloat>(s: &str) -> Result<F, ParseFloatError> {
+ let mut s = s.as_bytes();
+ let c = if let Some(&c) = s.first() {
+ c
+ } else {
+ return Err(pfe_empty());
+ };
+ let negative = c == b'-';
+ if c == b'-' || c == b'+' {
+ s = s.advance(1);
+ }
+ if s.is_empty() {
+ return Err(pfe_invalid());
+ }
+
+ let num = match parse_number(s, negative) {
+ Some(r) => r,
+ None if let Some(value) = parse_inf_nan(s, negative) => return Ok(value),
+ None => return Err(pfe_invalid()),
+ };
+ if let Some(value) = num.try_fast_path::<F>() {
+ return Ok(value);
+ }
+
+ // If significant digits were truncated, then we can have rounding error
+ // only if `mantissa + 1` produces a different result. We also avoid
+ // redundantly using the Eisel-Lemire algorithm if it was unable to
+ // correctly round on the first pass.
+ let mut fp = compute_float::<F>(num.exponent, num.mantissa);
+ if num.many_digits && fp.e >= 0 && fp != compute_float::<F>(num.exponent, num.mantissa + 1) {
+ fp.e = -1;
+ }
+ // Unable to correctly round the float using the Eisel-Lemire algorithm.
+ // Fallback to a slower, but always correct algorithm.
+ if fp.e < 0 {
+ fp = parse_long_mantissa::<F>(s);
+ }
+
+ let mut float = biased_fp_to_float::<F>(fp);
+ if num.negative {
+ float = -float;
+ }
+ Ok(float)
+}
diff --git a/library/core/src/num/dec2flt/number.rs b/library/core/src/num/dec2flt/number.rs
new file mode 100644
index 000000000..405f7e7b6
--- /dev/null
+++ b/library/core/src/num/dec2flt/number.rs
@@ -0,0 +1,86 @@
+//! Representation of a float as the significant digits and exponent.
+
+use crate::num::dec2flt::float::RawFloat;
+use crate::num::dec2flt::fpu::set_precision;
+
+#[rustfmt::skip]
+const INT_POW10: [u64; 16] = [
+ 1,
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ 1000000,
+ 10000000,
+ 100000000,
+ 1000000000,
+ 10000000000,
+ 100000000000,
+ 1000000000000,
+ 10000000000000,
+ 100000000000000,
+ 1000000000000000,
+];
+
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
+pub struct Number {
+ pub exponent: i64,
+ pub mantissa: u64,
+ pub negative: bool,
+ pub many_digits: bool,
+}
+
+impl Number {
+ /// Detect if the float can be accurately reconstructed from native floats.
+ fn is_fast_path<F: RawFloat>(&self) -> bool {
+ F::MIN_EXPONENT_FAST_PATH <= self.exponent
+ && self.exponent <= F::MAX_EXPONENT_DISGUISED_FAST_PATH
+ && self.mantissa <= F::MAX_MANTISSA_FAST_PATH
+ && !self.many_digits
+ }
+
+ /// The fast path algorithm using machine-sized integers and floats.
+ ///
+ /// This is extracted into a separate function so that it can be attempted before constructing
+ /// a Decimal. This only works if both the mantissa and the exponent
+ /// can be exactly represented as a machine float, since IEE-754 guarantees
+ /// no rounding will occur.
+ ///
+ /// There is an exception: disguised fast-path cases, where we can shift
+ /// powers-of-10 from the exponent to the significant digits.
+ pub fn try_fast_path<F: RawFloat>(&self) -> Option<F> {
+ // The fast path crucially depends on arithmetic being rounded to the correct number of bits
+ // without any intermediate rounding. On x86 (without SSE or SSE2) this requires the precision
+ // of the x87 FPU stack to be changed so that it directly rounds to 64/32 bit.
+ // The `set_precision` function takes care of setting the precision on architectures which
+ // require setting it by changing the global state (like the control word of the x87 FPU).
+ let _cw = set_precision::<F>();
+
+ if self.is_fast_path::<F>() {
+ let mut value = if self.exponent <= F::MAX_EXPONENT_FAST_PATH {
+ // normal fast path
+ let value = F::from_u64(self.mantissa);
+ if self.exponent < 0 {
+ value / F::pow10_fast_path((-self.exponent) as _)
+ } else {
+ value * F::pow10_fast_path(self.exponent as _)
+ }
+ } else {
+ // disguised fast path
+ let shift = self.exponent - F::MAX_EXPONENT_FAST_PATH;
+ let mantissa = self.mantissa.checked_mul(INT_POW10[shift as usize])?;
+ if mantissa > F::MAX_MANTISSA_FAST_PATH {
+ return None;
+ }
+ F::from_u64(mantissa) * F::pow10_fast_path(F::MAX_EXPONENT_FAST_PATH as _)
+ };
+ if self.negative {
+ value = -value;
+ }
+ Some(value)
+ } else {
+ None
+ }
+ }
+}
diff --git a/library/core/src/num/dec2flt/parse.rs b/library/core/src/num/dec2flt/parse.rs
new file mode 100644
index 000000000..1a90e0d20
--- /dev/null
+++ b/library/core/src/num/dec2flt/parse.rs
@@ -0,0 +1,233 @@
+//! Functions to parse floating-point numbers.
+
+use crate::num::dec2flt::common::{is_8digits, AsciiStr, ByteSlice};
+use crate::num::dec2flt::float::RawFloat;
+use crate::num::dec2flt::number::Number;
+
+const MIN_19DIGIT_INT: u64 = 100_0000_0000_0000_0000;
+
+/// Parse 8 digits, loaded as bytes in little-endian order.
+///
+/// This uses the trick where every digit is in [0x030, 0x39],
+/// and therefore can be parsed in 3 multiplications, much
+/// faster than the normal 8.
+///
+/// This is based off the algorithm described in "Fast numeric string to
+/// int", available here: <https://johnnylee-sde.github.io/Fast-numeric-string-to-int/>.
+fn parse_8digits(mut v: u64) -> u64 {
+ const MASK: u64 = 0x0000_00FF_0000_00FF;
+ const MUL1: u64 = 0x000F_4240_0000_0064;
+ const MUL2: u64 = 0x0000_2710_0000_0001;
+ v -= 0x3030_3030_3030_3030;
+ v = (v * 10) + (v >> 8); // will not overflow, fits in 63 bits
+ let v1 = (v & MASK).wrapping_mul(MUL1);
+ let v2 = ((v >> 16) & MASK).wrapping_mul(MUL2);
+ ((v1.wrapping_add(v2) >> 32) as u32) as u64
+}
+
+/// Parse digits until a non-digit character is found.
+fn try_parse_digits(s: &mut AsciiStr<'_>, x: &mut u64) {
+ // may cause overflows, to be handled later
+ s.parse_digits(|digit| {
+ *x = x.wrapping_mul(10).wrapping_add(digit as _);
+ });
+}
+
+/// Parse up to 19 digits (the max that can be stored in a 64-bit integer).
+fn try_parse_19digits(s: &mut AsciiStr<'_>, x: &mut u64) {
+ while *x < MIN_19DIGIT_INT {
+ if let Some(&c) = s.as_ref().first() {
+ let digit = c.wrapping_sub(b'0');
+ if digit < 10 {
+ *x = (*x * 10) + digit as u64; // no overflows here
+ // SAFETY: cannot be empty
+ unsafe {
+ s.step();
+ }
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+/// Try to parse 8 digits at a time, using an optimized algorithm.
+fn try_parse_8digits(s: &mut AsciiStr<'_>, x: &mut u64) {
+ // may cause overflows, to be handled later
+ if let Some(v) = s.read_u64() {
+ if is_8digits(v) {
+ *x = x.wrapping_mul(1_0000_0000).wrapping_add(parse_8digits(v));
+ // SAFETY: already ensured the buffer was >= 8 bytes in read_u64.
+ unsafe {
+ s.step_by(8);
+ }
+ if let Some(v) = s.read_u64() {
+ if is_8digits(v) {
+ *x = x.wrapping_mul(1_0000_0000).wrapping_add(parse_8digits(v));
+ // SAFETY: already ensured the buffer was >= 8 bytes in try_read_u64.
+ unsafe {
+ s.step_by(8);
+ }
+ }
+ }
+ }
+ }
+}
+
+/// Parse the scientific notation component of a float.
+fn parse_scientific(s: &mut AsciiStr<'_>) -> Option<i64> {
+ let mut exponent = 0_i64;
+ let mut negative = false;
+ if let Some(&c) = s.as_ref().get(0) {
+ negative = c == b'-';
+ if c == b'-' || c == b'+' {
+ // SAFETY: s cannot be empty
+ unsafe {
+ s.step();
+ }
+ }
+ }
+ if s.first_isdigit() {
+ s.parse_digits(|digit| {
+ // no overflows here, saturate well before overflow
+ if exponent < 0x10000 {
+ exponent = 10 * exponent + digit as i64;
+ }
+ });
+ if negative { Some(-exponent) } else { Some(exponent) }
+ } else {
+ None
+ }
+}
+
+/// Parse a partial, non-special floating point number.
+///
+/// This creates a representation of the float as the
+/// significant digits and the decimal exponent.
+fn parse_partial_number(s: &[u8], negative: bool) -> Option<(Number, usize)> {
+ let mut s = AsciiStr::new(s);
+ let start = s;
+ debug_assert!(!s.is_empty());
+
+ // parse initial digits before dot
+ let mut mantissa = 0_u64;
+ let digits_start = s;
+ try_parse_digits(&mut s, &mut mantissa);
+ let mut n_digits = s.offset_from(&digits_start);
+
+ // handle dot with the following digits
+ let mut n_after_dot = 0;
+ let mut exponent = 0_i64;
+ let int_end = s;
+ if s.first_is(b'.') {
+ // SAFETY: s cannot be empty due to first_is
+ unsafe { s.step() };
+ let before = s;
+ try_parse_8digits(&mut s, &mut mantissa);
+ try_parse_digits(&mut s, &mut mantissa);
+ n_after_dot = s.offset_from(&before);
+ exponent = -n_after_dot as i64;
+ }
+
+ n_digits += n_after_dot;
+ if n_digits == 0 {
+ return None;
+ }
+
+ // handle scientific format
+ let mut exp_number = 0_i64;
+ if s.first_is2(b'e', b'E') {
+ // SAFETY: s cannot be empty
+ unsafe {
+ s.step();
+ }
+ // If None, we have no trailing digits after exponent, or an invalid float.
+ exp_number = parse_scientific(&mut s)?;
+ exponent += exp_number;
+ }
+
+ let len = s.offset_from(&start) as _;
+
+ // handle uncommon case with many digits
+ if n_digits <= 19 {
+ return Some((Number { exponent, mantissa, negative, many_digits: false }, len));
+ }
+
+ n_digits -= 19;
+ let mut many_digits = false;
+ let mut p = digits_start;
+ while p.first_is2(b'0', b'.') {
+ // SAFETY: p cannot be empty due to first_is2
+ unsafe {
+ // '0' = b'.' + 2
+ n_digits -= p.first_unchecked().saturating_sub(b'0' - 1) as isize;
+ p.step();
+ }
+ }
+ if n_digits > 0 {
+ // at this point we have more than 19 significant digits, let's try again
+ many_digits = true;
+ mantissa = 0;
+ let mut s = digits_start;
+ try_parse_19digits(&mut s, &mut mantissa);
+ exponent = if mantissa >= MIN_19DIGIT_INT {
+ // big int
+ int_end.offset_from(&s)
+ } else {
+ // SAFETY: the next byte must be present and be '.'
+ // We know this is true because we had more than 19
+ // digits previously, so we overflowed a 64-bit integer,
+ // but parsing only the integral digits produced less
+ // than 19 digits. That means we must have a decimal
+ // point, and at least 1 fractional digit.
+ unsafe { s.step() };
+ let before = s;
+ try_parse_19digits(&mut s, &mut mantissa);
+ -s.offset_from(&before)
+ } as i64;
+ // add back the explicit part
+ exponent += exp_number;
+ }
+
+ Some((Number { exponent, mantissa, negative, many_digits }, len))
+}
+
+/// Try to parse a non-special floating point number.
+pub fn parse_number(s: &[u8], negative: bool) -> Option<Number> {
+ if let Some((float, rest)) = parse_partial_number(s, negative) {
+ if rest == s.len() {
+ return Some(float);
+ }
+ }
+ None
+}
+
+/// Parse a partial representation of a special, non-finite float.
+fn parse_partial_inf_nan<F: RawFloat>(s: &[u8]) -> Option<(F, usize)> {
+ fn parse_inf_rest(s: &[u8]) -> usize {
+ if s.len() >= 8 && s[3..].as_ref().starts_with_ignore_case(b"inity") { 8 } else { 3 }
+ }
+ if s.len() >= 3 {
+ if s.starts_with_ignore_case(b"nan") {
+ return Some((F::NAN, 3));
+ } else if s.starts_with_ignore_case(b"inf") {
+ return Some((F::INFINITY, parse_inf_rest(s)));
+ }
+ }
+ None
+}
+
+/// Try to parse a special, non-finite float.
+pub fn parse_inf_nan<F: RawFloat>(s: &[u8], negative: bool) -> Option<F> {
+ if let Some((mut float, rest)) = parse_partial_inf_nan::<F>(s) {
+ if rest == s.len() {
+ if negative {
+ float = -float;
+ }
+ return Some(float);
+ }
+ }
+ None
+}
diff --git a/library/core/src/num/dec2flt/slow.rs b/library/core/src/num/dec2flt/slow.rs
new file mode 100644
index 000000000..bf1044033
--- /dev/null
+++ b/library/core/src/num/dec2flt/slow.rs
@@ -0,0 +1,109 @@
+//! Slow, fallback algorithm for cases the Eisel-Lemire algorithm cannot round.
+
+use crate::num::dec2flt::common::BiasedFp;
+use crate::num::dec2flt::decimal::{parse_decimal, Decimal};
+use crate::num::dec2flt::float::RawFloat;
+
+/// Parse the significant digits and biased, binary exponent of a float.
+///
+/// This is a fallback algorithm that uses a big-integer representation
+/// of the float, and therefore is considerably slower than faster
+/// approximations. However, it will always determine how to round
+/// the significant digits to the nearest machine float, allowing
+/// use to handle near half-way cases.
+///
+/// Near half-way cases are halfway between two consecutive machine floats.
+/// For example, the float `16777217.0` has a bitwise representation of
+/// `100000000000000000000000 1`. Rounding to a single-precision float,
+/// the trailing `1` is truncated. Using round-nearest, tie-even, any
+/// value above `16777217.0` must be rounded up to `16777218.0`, while
+/// any value before or equal to `16777217.0` must be rounded down
+/// to `16777216.0`. These near-halfway conversions therefore may require
+/// a large number of digits to unambiguously determine how to round.
+///
+/// The algorithms described here are based on "Processing Long Numbers Quickly",
+/// available here: <https://arxiv.org/pdf/2101.11408.pdf#section.11>.
+pub(crate) fn parse_long_mantissa<F: RawFloat>(s: &[u8]) -> BiasedFp {
+ const MAX_SHIFT: usize = 60;
+ const NUM_POWERS: usize = 19;
+ const POWERS: [u8; 19] =
+ [0, 3, 6, 9, 13, 16, 19, 23, 26, 29, 33, 36, 39, 43, 46, 49, 53, 56, 59];
+
+ let get_shift = |n| {
+ if n < NUM_POWERS { POWERS[n] as usize } else { MAX_SHIFT }
+ };
+
+ let fp_zero = BiasedFp::zero_pow2(0);
+ let fp_inf = BiasedFp::zero_pow2(F::INFINITE_POWER);
+
+ let mut d = parse_decimal(s);
+
+ // Short-circuit if the value can only be a literal 0 or infinity.
+ if d.num_digits == 0 || d.decimal_point < -324 {
+ return fp_zero;
+ } else if d.decimal_point >= 310 {
+ return fp_inf;
+ }
+ let mut exp2 = 0_i32;
+ // Shift right toward (1/2 ... 1].
+ while d.decimal_point > 0 {
+ let n = d.decimal_point as usize;
+ let shift = get_shift(n);
+ d.right_shift(shift);
+ if d.decimal_point < -Decimal::DECIMAL_POINT_RANGE {
+ return fp_zero;
+ }
+ exp2 += shift as i32;
+ }
+ // Shift left toward (1/2 ... 1].
+ while d.decimal_point <= 0 {
+ let shift = if d.decimal_point == 0 {
+ match d.digits[0] {
+ digit if digit >= 5 => break,
+ 0 | 1 => 2,
+ _ => 1,
+ }
+ } else {
+ get_shift((-d.decimal_point) as _)
+ };
+ d.left_shift(shift);
+ if d.decimal_point > Decimal::DECIMAL_POINT_RANGE {
+ return fp_inf;
+ }
+ exp2 -= shift as i32;
+ }
+ // We are now in the range [1/2 ... 1] but the binary format uses [1 ... 2].
+ exp2 -= 1;
+ while (F::MINIMUM_EXPONENT + 1) > exp2 {
+ let mut n = ((F::MINIMUM_EXPONENT + 1) - exp2) as usize;
+ if n > MAX_SHIFT {
+ n = MAX_SHIFT;
+ }
+ d.right_shift(n);
+ exp2 += n as i32;
+ }
+ if (exp2 - F::MINIMUM_EXPONENT) >= F::INFINITE_POWER {
+ return fp_inf;
+ }
+ // Shift the decimal to the hidden bit, and then round the value
+ // to get the high mantissa+1 bits.
+ d.left_shift(F::MANTISSA_EXPLICIT_BITS + 1);
+ let mut mantissa = d.round();
+ if mantissa >= (1_u64 << (F::MANTISSA_EXPLICIT_BITS + 1)) {
+ // Rounding up overflowed to the carry bit, need to
+ // shift back to the hidden bit.
+ d.right_shift(1);
+ exp2 += 1;
+ mantissa = d.round();
+ if (exp2 - F::MINIMUM_EXPONENT) >= F::INFINITE_POWER {
+ return fp_inf;
+ }
+ }
+ let mut power2 = exp2 - F::MINIMUM_EXPONENT;
+ if mantissa < (1_u64 << F::MANTISSA_EXPLICIT_BITS) {
+ power2 -= 1;
+ }
+ // Zero out all the bits above the explicit mantissa bits.
+ mantissa &= (1_u64 << F::MANTISSA_EXPLICIT_BITS) - 1;
+ BiasedFp { f: mantissa, e: power2 }
+}
diff --git a/library/core/src/num/dec2flt/table.rs b/library/core/src/num/dec2flt/table.rs
new file mode 100644
index 000000000..4856074a6
--- /dev/null
+++ b/library/core/src/num/dec2flt/table.rs
@@ -0,0 +1,670 @@
+//! Pre-computed tables powers-of-5 for extended-precision representations.
+//!
+//! These tables enable fast scaling of the significant digits
+//! of a float to the decimal exponent, with minimal rounding
+//! errors, in a 128 or 192-bit representation.
+//!
+//! DO NOT MODIFY: Generated by `src/etc/dec2flt_table.py`
+
+pub const SMALLEST_POWER_OF_FIVE: i32 = -342;
+pub const LARGEST_POWER_OF_FIVE: i32 = 308;
+pub const N_POWERS_OF_FIVE: usize = (LARGEST_POWER_OF_FIVE - SMALLEST_POWER_OF_FIVE + 1) as usize;
+
+// Use static to avoid long compile times: Rust compiler errors
+// can have the entire table compiled multiple times, and then
+// emit code multiple times, even if it's stripped out in
+// the final binary.
+#[rustfmt::skip]
+pub static POWER_OF_FIVE_128: [(u64, u64); N_POWERS_OF_FIVE] = [
+ (0xeef453d6923bd65a, 0x113faa2906a13b3f), // 5^-342
+ (0x9558b4661b6565f8, 0x4ac7ca59a424c507), // 5^-341
+ (0xbaaee17fa23ebf76, 0x5d79bcf00d2df649), // 5^-340
+ (0xe95a99df8ace6f53, 0xf4d82c2c107973dc), // 5^-339
+ (0x91d8a02bb6c10594, 0x79071b9b8a4be869), // 5^-338
+ (0xb64ec836a47146f9, 0x9748e2826cdee284), // 5^-337
+ (0xe3e27a444d8d98b7, 0xfd1b1b2308169b25), // 5^-336
+ (0x8e6d8c6ab0787f72, 0xfe30f0f5e50e20f7), // 5^-335
+ (0xb208ef855c969f4f, 0xbdbd2d335e51a935), // 5^-334
+ (0xde8b2b66b3bc4723, 0xad2c788035e61382), // 5^-333
+ (0x8b16fb203055ac76, 0x4c3bcb5021afcc31), // 5^-332
+ (0xaddcb9e83c6b1793, 0xdf4abe242a1bbf3d), // 5^-331
+ (0xd953e8624b85dd78, 0xd71d6dad34a2af0d), // 5^-330
+ (0x87d4713d6f33aa6b, 0x8672648c40e5ad68), // 5^-329
+ (0xa9c98d8ccb009506, 0x680efdaf511f18c2), // 5^-328
+ (0xd43bf0effdc0ba48, 0x212bd1b2566def2), // 5^-327
+ (0x84a57695fe98746d, 0x14bb630f7604b57), // 5^-326
+ (0xa5ced43b7e3e9188, 0x419ea3bd35385e2d), // 5^-325
+ (0xcf42894a5dce35ea, 0x52064cac828675b9), // 5^-324
+ (0x818995ce7aa0e1b2, 0x7343efebd1940993), // 5^-323
+ (0xa1ebfb4219491a1f, 0x1014ebe6c5f90bf8), // 5^-322
+ (0xca66fa129f9b60a6, 0xd41a26e077774ef6), // 5^-321
+ (0xfd00b897478238d0, 0x8920b098955522b4), // 5^-320
+ (0x9e20735e8cb16382, 0x55b46e5f5d5535b0), // 5^-319
+ (0xc5a890362fddbc62, 0xeb2189f734aa831d), // 5^-318
+ (0xf712b443bbd52b7b, 0xa5e9ec7501d523e4), // 5^-317
+ (0x9a6bb0aa55653b2d, 0x47b233c92125366e), // 5^-316
+ (0xc1069cd4eabe89f8, 0x999ec0bb696e840a), // 5^-315
+ (0xf148440a256e2c76, 0xc00670ea43ca250d), // 5^-314
+ (0x96cd2a865764dbca, 0x380406926a5e5728), // 5^-313
+ (0xbc807527ed3e12bc, 0xc605083704f5ecf2), // 5^-312
+ (0xeba09271e88d976b, 0xf7864a44c633682e), // 5^-311
+ (0x93445b8731587ea3, 0x7ab3ee6afbe0211d), // 5^-310
+ (0xb8157268fdae9e4c, 0x5960ea05bad82964), // 5^-309
+ (0xe61acf033d1a45df, 0x6fb92487298e33bd), // 5^-308
+ (0x8fd0c16206306bab, 0xa5d3b6d479f8e056), // 5^-307
+ (0xb3c4f1ba87bc8696, 0x8f48a4899877186c), // 5^-306
+ (0xe0b62e2929aba83c, 0x331acdabfe94de87), // 5^-305
+ (0x8c71dcd9ba0b4925, 0x9ff0c08b7f1d0b14), // 5^-304
+ (0xaf8e5410288e1b6f, 0x7ecf0ae5ee44dd9), // 5^-303
+ (0xdb71e91432b1a24a, 0xc9e82cd9f69d6150), // 5^-302
+ (0x892731ac9faf056e, 0xbe311c083a225cd2), // 5^-301
+ (0xab70fe17c79ac6ca, 0x6dbd630a48aaf406), // 5^-300
+ (0xd64d3d9db981787d, 0x92cbbccdad5b108), // 5^-299
+ (0x85f0468293f0eb4e, 0x25bbf56008c58ea5), // 5^-298
+ (0xa76c582338ed2621, 0xaf2af2b80af6f24e), // 5^-297
+ (0xd1476e2c07286faa, 0x1af5af660db4aee1), // 5^-296
+ (0x82cca4db847945ca, 0x50d98d9fc890ed4d), // 5^-295
+ (0xa37fce126597973c, 0xe50ff107bab528a0), // 5^-294
+ (0xcc5fc196fefd7d0c, 0x1e53ed49a96272c8), // 5^-293
+ (0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7a), // 5^-292
+ (0x9faacf3df73609b1, 0x77b191618c54e9ac), // 5^-291
+ (0xc795830d75038c1d, 0xd59df5b9ef6a2417), // 5^-290
+ (0xf97ae3d0d2446f25, 0x4b0573286b44ad1d), // 5^-289
+ (0x9becce62836ac577, 0x4ee367f9430aec32), // 5^-288
+ (0xc2e801fb244576d5, 0x229c41f793cda73f), // 5^-287
+ (0xf3a20279ed56d48a, 0x6b43527578c1110f), // 5^-286
+ (0x9845418c345644d6, 0x830a13896b78aaa9), // 5^-285
+ (0xbe5691ef416bd60c, 0x23cc986bc656d553), // 5^-284
+ (0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa8), // 5^-283
+ (0x94b3a202eb1c3f39, 0x7bf7d71432f3d6a9), // 5^-282
+ (0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc53), // 5^-281
+ (0xe858ad248f5c22c9, 0xd1b3400f8f9cff68), // 5^-280
+ (0x91376c36d99995be, 0x23100809b9c21fa1), // 5^-279
+ (0xb58547448ffffb2d, 0xabd40a0c2832a78a), // 5^-278
+ (0xe2e69915b3fff9f9, 0x16c90c8f323f516c), // 5^-277
+ (0x8dd01fad907ffc3b, 0xae3da7d97f6792e3), // 5^-276
+ (0xb1442798f49ffb4a, 0x99cd11cfdf41779c), // 5^-275
+ (0xdd95317f31c7fa1d, 0x40405643d711d583), // 5^-274
+ (0x8a7d3eef7f1cfc52, 0x482835ea666b2572), // 5^-273
+ (0xad1c8eab5ee43b66, 0xda3243650005eecf), // 5^-272
+ (0xd863b256369d4a40, 0x90bed43e40076a82), // 5^-271
+ (0x873e4f75e2224e68, 0x5a7744a6e804a291), // 5^-270
+ (0xa90de3535aaae202, 0x711515d0a205cb36), // 5^-269
+ (0xd3515c2831559a83, 0xd5a5b44ca873e03), // 5^-268
+ (0x8412d9991ed58091, 0xe858790afe9486c2), // 5^-267
+ (0xa5178fff668ae0b6, 0x626e974dbe39a872), // 5^-266
+ (0xce5d73ff402d98e3, 0xfb0a3d212dc8128f), // 5^-265
+ (0x80fa687f881c7f8e, 0x7ce66634bc9d0b99), // 5^-264
+ (0xa139029f6a239f72, 0x1c1fffc1ebc44e80), // 5^-263
+ (0xc987434744ac874e, 0xa327ffb266b56220), // 5^-262
+ (0xfbe9141915d7a922, 0x4bf1ff9f0062baa8), // 5^-261
+ (0x9d71ac8fada6c9b5, 0x6f773fc3603db4a9), // 5^-260
+ (0xc4ce17b399107c22, 0xcb550fb4384d21d3), // 5^-259
+ (0xf6019da07f549b2b, 0x7e2a53a146606a48), // 5^-258
+ (0x99c102844f94e0fb, 0x2eda7444cbfc426d), // 5^-257
+ (0xc0314325637a1939, 0xfa911155fefb5308), // 5^-256
+ (0xf03d93eebc589f88, 0x793555ab7eba27ca), // 5^-255
+ (0x96267c7535b763b5, 0x4bc1558b2f3458de), // 5^-254
+ (0xbbb01b9283253ca2, 0x9eb1aaedfb016f16), // 5^-253
+ (0xea9c227723ee8bcb, 0x465e15a979c1cadc), // 5^-252
+ (0x92a1958a7675175f, 0xbfacd89ec191ec9), // 5^-251
+ (0xb749faed14125d36, 0xcef980ec671f667b), // 5^-250
+ (0xe51c79a85916f484, 0x82b7e12780e7401a), // 5^-249
+ (0x8f31cc0937ae58d2, 0xd1b2ecb8b0908810), // 5^-248
+ (0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa15), // 5^-247
+ (0xdfbdcece67006ac9, 0x67a791e093e1d49a), // 5^-246
+ (0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e0), // 5^-245
+ (0xaecc49914078536d, 0x58fae9f773886e18), // 5^-244
+ (0xda7f5bf590966848, 0xaf39a475506a899e), // 5^-243
+ (0x888f99797a5e012d, 0x6d8406c952429603), // 5^-242
+ (0xaab37fd7d8f58178, 0xc8e5087ba6d33b83), // 5^-241
+ (0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a64), // 5^-240
+ (0x855c3be0a17fcd26, 0x5cf2eea09a55067f), // 5^-239
+ (0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481e), // 5^-238
+ (0xd0601d8efc57b08b, 0xf13b94daf124da26), // 5^-237
+ (0x823c12795db6ce57, 0x76c53d08d6b70858), // 5^-236
+ (0xa2cb1717b52481ed, 0x54768c4b0c64ca6e), // 5^-235
+ (0xcb7ddcdda26da268, 0xa9942f5dcf7dfd09), // 5^-234
+ (0xfe5d54150b090b02, 0xd3f93b35435d7c4c), // 5^-233
+ (0x9efa548d26e5a6e1, 0xc47bc5014a1a6daf), // 5^-232
+ (0xc6b8e9b0709f109a, 0x359ab6419ca1091b), // 5^-231
+ (0xf867241c8cc6d4c0, 0xc30163d203c94b62), // 5^-230
+ (0x9b407691d7fc44f8, 0x79e0de63425dcf1d), // 5^-229
+ (0xc21094364dfb5636, 0x985915fc12f542e4), // 5^-228
+ (0xf294b943e17a2bc4, 0x3e6f5b7b17b2939d), // 5^-227
+ (0x979cf3ca6cec5b5a, 0xa705992ceecf9c42), // 5^-226
+ (0xbd8430bd08277231, 0x50c6ff782a838353), // 5^-225
+ (0xece53cec4a314ebd, 0xa4f8bf5635246428), // 5^-224
+ (0x940f4613ae5ed136, 0x871b7795e136be99), // 5^-223
+ (0xb913179899f68584, 0x28e2557b59846e3f), // 5^-222
+ (0xe757dd7ec07426e5, 0x331aeada2fe589cf), // 5^-221
+ (0x9096ea6f3848984f, 0x3ff0d2c85def7621), // 5^-220
+ (0xb4bca50b065abe63, 0xfed077a756b53a9), // 5^-219
+ (0xe1ebce4dc7f16dfb, 0xd3e8495912c62894), // 5^-218
+ (0x8d3360f09cf6e4bd, 0x64712dd7abbbd95c), // 5^-217
+ (0xb080392cc4349dec, 0xbd8d794d96aacfb3), // 5^-216
+ (0xdca04777f541c567, 0xecf0d7a0fc5583a0), // 5^-215
+ (0x89e42caaf9491b60, 0xf41686c49db57244), // 5^-214
+ (0xac5d37d5b79b6239, 0x311c2875c522ced5), // 5^-213
+ (0xd77485cb25823ac7, 0x7d633293366b828b), // 5^-212
+ (0x86a8d39ef77164bc, 0xae5dff9c02033197), // 5^-211
+ (0xa8530886b54dbdeb, 0xd9f57f830283fdfc), // 5^-210
+ (0xd267caa862a12d66, 0xd072df63c324fd7b), // 5^-209
+ (0x8380dea93da4bc60, 0x4247cb9e59f71e6d), // 5^-208
+ (0xa46116538d0deb78, 0x52d9be85f074e608), // 5^-207
+ (0xcd795be870516656, 0x67902e276c921f8b), // 5^-206
+ (0x806bd9714632dff6, 0xba1cd8a3db53b6), // 5^-205
+ (0xa086cfcd97bf97f3, 0x80e8a40eccd228a4), // 5^-204
+ (0xc8a883c0fdaf7df0, 0x6122cd128006b2cd), // 5^-203
+ (0xfad2a4b13d1b5d6c, 0x796b805720085f81), // 5^-202
+ (0x9cc3a6eec6311a63, 0xcbe3303674053bb0), // 5^-201
+ (0xc3f490aa77bd60fc, 0xbedbfc4411068a9c), // 5^-200
+ (0xf4f1b4d515acb93b, 0xee92fb5515482d44), // 5^-199
+ (0x991711052d8bf3c5, 0x751bdd152d4d1c4a), // 5^-198
+ (0xbf5cd54678eef0b6, 0xd262d45a78a0635d), // 5^-197
+ (0xef340a98172aace4, 0x86fb897116c87c34), // 5^-196
+ (0x9580869f0e7aac0e, 0xd45d35e6ae3d4da0), // 5^-195
+ (0xbae0a846d2195712, 0x8974836059cca109), // 5^-194
+ (0xe998d258869facd7, 0x2bd1a438703fc94b), // 5^-193
+ (0x91ff83775423cc06, 0x7b6306a34627ddcf), // 5^-192
+ (0xb67f6455292cbf08, 0x1a3bc84c17b1d542), // 5^-191
+ (0xe41f3d6a7377eeca, 0x20caba5f1d9e4a93), // 5^-190
+ (0x8e938662882af53e, 0x547eb47b7282ee9c), // 5^-189
+ (0xb23867fb2a35b28d, 0xe99e619a4f23aa43), // 5^-188
+ (0xdec681f9f4c31f31, 0x6405fa00e2ec94d4), // 5^-187
+ (0x8b3c113c38f9f37e, 0xde83bc408dd3dd04), // 5^-186
+ (0xae0b158b4738705e, 0x9624ab50b148d445), // 5^-185
+ (0xd98ddaee19068c76, 0x3badd624dd9b0957), // 5^-184
+ (0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d6), // 5^-183
+ (0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4c), // 5^-182
+ (0xd47487cc8470652b, 0x7647c3200069671f), // 5^-181
+ (0x84c8d4dfd2c63f3b, 0x29ecd9f40041e073), // 5^-180
+ (0xa5fb0a17c777cf09, 0xf468107100525890), // 5^-179
+ (0xcf79cc9db955c2cc, 0x7182148d4066eeb4), // 5^-178
+ (0x81ac1fe293d599bf, 0xc6f14cd848405530), // 5^-177
+ (0xa21727db38cb002f, 0xb8ada00e5a506a7c), // 5^-176
+ (0xca9cf1d206fdc03b, 0xa6d90811f0e4851c), // 5^-175
+ (0xfd442e4688bd304a, 0x908f4a166d1da663), // 5^-174
+ (0x9e4a9cec15763e2e, 0x9a598e4e043287fe), // 5^-173
+ (0xc5dd44271ad3cdba, 0x40eff1e1853f29fd), // 5^-172
+ (0xf7549530e188c128, 0xd12bee59e68ef47c), // 5^-171
+ (0x9a94dd3e8cf578b9, 0x82bb74f8301958ce), // 5^-170
+ (0xc13a148e3032d6e7, 0xe36a52363c1faf01), // 5^-169
+ (0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac1), // 5^-168
+ (0x96f5600f15a7b7e5, 0x29ab103a5ef8c0b9), // 5^-167
+ (0xbcb2b812db11a5de, 0x7415d448f6b6f0e7), // 5^-166
+ (0xebdf661791d60f56, 0x111b495b3464ad21), // 5^-165
+ (0x936b9fcebb25c995, 0xcab10dd900beec34), // 5^-164
+ (0xb84687c269ef3bfb, 0x3d5d514f40eea742), // 5^-163
+ (0xe65829b3046b0afa, 0xcb4a5a3112a5112), // 5^-162
+ (0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ab), // 5^-161
+ (0xb3f4e093db73a093, 0x59ed216765690f56), // 5^-160
+ (0xe0f218b8d25088b8, 0x306869c13ec3532c), // 5^-159
+ (0x8c974f7383725573, 0x1e414218c73a13fb), // 5^-158
+ (0xafbd2350644eeacf, 0xe5d1929ef90898fa), // 5^-157
+ (0xdbac6c247d62a583, 0xdf45f746b74abf39), // 5^-156
+ (0x894bc396ce5da772, 0x6b8bba8c328eb783), // 5^-155
+ (0xab9eb47c81f5114f, 0x66ea92f3f326564), // 5^-154
+ (0xd686619ba27255a2, 0xc80a537b0efefebd), // 5^-153
+ (0x8613fd0145877585, 0xbd06742ce95f5f36), // 5^-152
+ (0xa798fc4196e952e7, 0x2c48113823b73704), // 5^-151
+ (0xd17f3b51fca3a7a0, 0xf75a15862ca504c5), // 5^-150
+ (0x82ef85133de648c4, 0x9a984d73dbe722fb), // 5^-149
+ (0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebba), // 5^-148
+ (0xcc963fee10b7d1b3, 0x318df905079926a8), // 5^-147
+ (0xffbbcfe994e5c61f, 0xfdf17746497f7052), // 5^-146
+ (0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa633), // 5^-145
+ (0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc0), // 5^-144
+ (0xf9bd690a1b68637b, 0x3dfdce7aa3c673b0), // 5^-143
+ (0x9c1661a651213e2d, 0x6bea10ca65c084e), // 5^-142
+ (0xc31bfa0fe5698db8, 0x486e494fcff30a62), // 5^-141
+ (0xf3e2f893dec3f126, 0x5a89dba3c3efccfa), // 5^-140
+ (0x986ddb5c6b3a76b7, 0xf89629465a75e01c), // 5^-139
+ (0xbe89523386091465, 0xf6bbb397f1135823), // 5^-138
+ (0xee2ba6c0678b597f, 0x746aa07ded582e2c), // 5^-137
+ (0x94db483840b717ef, 0xa8c2a44eb4571cdc), // 5^-136
+ (0xba121a4650e4ddeb, 0x92f34d62616ce413), // 5^-135
+ (0xe896a0d7e51e1566, 0x77b020baf9c81d17), // 5^-134
+ (0x915e2486ef32cd60, 0xace1474dc1d122e), // 5^-133
+ (0xb5b5ada8aaff80b8, 0xd819992132456ba), // 5^-132
+ (0xe3231912d5bf60e6, 0x10e1fff697ed6c69), // 5^-131
+ (0x8df5efabc5979c8f, 0xca8d3ffa1ef463c1), // 5^-130
+ (0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb2), // 5^-129
+ (0xddd0467c64bce4a0, 0xac7cb3f6d05ddbde), // 5^-128
+ (0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96b), // 5^-127
+ (0xad4ab7112eb3929d, 0x86c16c98d2c953c6), // 5^-126
+ (0xd89d64d57a607744, 0xe871c7bf077ba8b7), // 5^-125
+ (0x87625f056c7c4a8b, 0x11471cd764ad4972), // 5^-124
+ (0xa93af6c6c79b5d2d, 0xd598e40d3dd89bcf), // 5^-123
+ (0xd389b47879823479, 0x4aff1d108d4ec2c3), // 5^-122
+ (0x843610cb4bf160cb, 0xcedf722a585139ba), // 5^-121
+ (0xa54394fe1eedb8fe, 0xc2974eb4ee658828), // 5^-120
+ (0xce947a3da6a9273e, 0x733d226229feea32), // 5^-119
+ (0x811ccc668829b887, 0x806357d5a3f525f), // 5^-118
+ (0xa163ff802a3426a8, 0xca07c2dcb0cf26f7), // 5^-117
+ (0xc9bcff6034c13052, 0xfc89b393dd02f0b5), // 5^-116
+ (0xfc2c3f3841f17c67, 0xbbac2078d443ace2), // 5^-115
+ (0x9d9ba7832936edc0, 0xd54b944b84aa4c0d), // 5^-114
+ (0xc5029163f384a931, 0xa9e795e65d4df11), // 5^-113
+ (0xf64335bcf065d37d, 0x4d4617b5ff4a16d5), // 5^-112
+ (0x99ea0196163fa42e, 0x504bced1bf8e4e45), // 5^-111
+ (0xc06481fb9bcf8d39, 0xe45ec2862f71e1d6), // 5^-110
+ (0xf07da27a82c37088, 0x5d767327bb4e5a4c), // 5^-109
+ (0x964e858c91ba2655, 0x3a6a07f8d510f86f), // 5^-108
+ (0xbbe226efb628afea, 0x890489f70a55368b), // 5^-107
+ (0xeadab0aba3b2dbe5, 0x2b45ac74ccea842e), // 5^-106
+ (0x92c8ae6b464fc96f, 0x3b0b8bc90012929d), // 5^-105
+ (0xb77ada0617e3bbcb, 0x9ce6ebb40173744), // 5^-104
+ (0xe55990879ddcaabd, 0xcc420a6a101d0515), // 5^-103
+ (0x8f57fa54c2a9eab6, 0x9fa946824a12232d), // 5^-102
+ (0xb32df8e9f3546564, 0x47939822dc96abf9), // 5^-101
+ (0xdff9772470297ebd, 0x59787e2b93bc56f7), // 5^-100
+ (0x8bfbea76c619ef36, 0x57eb4edb3c55b65a), // 5^-99
+ (0xaefae51477a06b03, 0xede622920b6b23f1), // 5^-98
+ (0xdab99e59958885c4, 0xe95fab368e45eced), // 5^-97
+ (0x88b402f7fd75539b, 0x11dbcb0218ebb414), // 5^-96
+ (0xaae103b5fcd2a881, 0xd652bdc29f26a119), // 5^-95
+ (0xd59944a37c0752a2, 0x4be76d3346f0495f), // 5^-94
+ (0x857fcae62d8493a5, 0x6f70a4400c562ddb), // 5^-93
+ (0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb952), // 5^-92
+ (0xd097ad07a71f26b2, 0x7e2000a41346a7a7), // 5^-91
+ (0x825ecc24c873782f, 0x8ed400668c0c28c8), // 5^-90
+ (0xa2f67f2dfa90563b, 0x728900802f0f32fa), // 5^-89
+ (0xcbb41ef979346bca, 0x4f2b40a03ad2ffb9), // 5^-88
+ (0xfea126b7d78186bc, 0xe2f610c84987bfa8), // 5^-87
+ (0x9f24b832e6b0f436, 0xdd9ca7d2df4d7c9), // 5^-86
+ (0xc6ede63fa05d3143, 0x91503d1c79720dbb), // 5^-85
+ (0xf8a95fcf88747d94, 0x75a44c6397ce912a), // 5^-84
+ (0x9b69dbe1b548ce7c, 0xc986afbe3ee11aba), // 5^-83
+ (0xc24452da229b021b, 0xfbe85badce996168), // 5^-82
+ (0xf2d56790ab41c2a2, 0xfae27299423fb9c3), // 5^-81
+ (0x97c560ba6b0919a5, 0xdccd879fc967d41a), // 5^-80
+ (0xbdb6b8e905cb600f, 0x5400e987bbc1c920), // 5^-79
+ (0xed246723473e3813, 0x290123e9aab23b68), // 5^-78
+ (0x9436c0760c86e30b, 0xf9a0b6720aaf6521), // 5^-77
+ (0xb94470938fa89bce, 0xf808e40e8d5b3e69), // 5^-76
+ (0xe7958cb87392c2c2, 0xb60b1d1230b20e04), // 5^-75
+ (0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c2), // 5^-74
+ (0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af3), // 5^-73
+ (0xe2280b6c20dd5232, 0x25c6da63c38de1b0), // 5^-72
+ (0x8d590723948a535f, 0x579c487e5a38ad0e), // 5^-71
+ (0xb0af48ec79ace837, 0x2d835a9df0c6d851), // 5^-70
+ (0xdcdb1b2798182244, 0xf8e431456cf88e65), // 5^-69
+ (0x8a08f0f8bf0f156b, 0x1b8e9ecb641b58ff), // 5^-68
+ (0xac8b2d36eed2dac5, 0xe272467e3d222f3f), // 5^-67
+ (0xd7adf884aa879177, 0x5b0ed81dcc6abb0f), // 5^-66
+ (0x86ccbb52ea94baea, 0x98e947129fc2b4e9), // 5^-65
+ (0xa87fea27a539e9a5, 0x3f2398d747b36224), // 5^-64
+ (0xd29fe4b18e88640e, 0x8eec7f0d19a03aad), // 5^-63
+ (0x83a3eeeef9153e89, 0x1953cf68300424ac), // 5^-62
+ (0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd7), // 5^-61
+ (0xcdb02555653131b6, 0x3792f412cb06794d), // 5^-60
+ (0x808e17555f3ebf11, 0xe2bbd88bbee40bd0), // 5^-59
+ (0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec4), // 5^-58
+ (0xc8de047564d20a8b, 0xf245825a5a445275), // 5^-57
+ (0xfb158592be068d2e, 0xeed6e2f0f0d56712), // 5^-56
+ (0x9ced737bb6c4183d, 0x55464dd69685606b), // 5^-55
+ (0xc428d05aa4751e4c, 0xaa97e14c3c26b886), // 5^-54
+ (0xf53304714d9265df, 0xd53dd99f4b3066a8), // 5^-53
+ (0x993fe2c6d07b7fab, 0xe546a8038efe4029), // 5^-52
+ (0xbf8fdb78849a5f96, 0xde98520472bdd033), // 5^-51
+ (0xef73d256a5c0f77c, 0x963e66858f6d4440), // 5^-50
+ (0x95a8637627989aad, 0xdde7001379a44aa8), // 5^-49
+ (0xbb127c53b17ec159, 0x5560c018580d5d52), // 5^-48
+ (0xe9d71b689dde71af, 0xaab8f01e6e10b4a6), // 5^-47
+ (0x9226712162ab070d, 0xcab3961304ca70e8), // 5^-46
+ (0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d22), // 5^-45
+ (0xe45c10c42a2b3b05, 0x8cb89a7db77c506a), // 5^-44
+ (0x8eb98a7a9a5b04e3, 0x77f3608e92adb242), // 5^-43
+ (0xb267ed1940f1c61c, 0x55f038b237591ed3), // 5^-42
+ (0xdf01e85f912e37a3, 0x6b6c46dec52f6688), // 5^-41
+ (0x8b61313bbabce2c6, 0x2323ac4b3b3da015), // 5^-40
+ (0xae397d8aa96c1b77, 0xabec975e0a0d081a), // 5^-39
+ (0xd9c7dced53c72255, 0x96e7bd358c904a21), // 5^-38
+ (0x881cea14545c7575, 0x7e50d64177da2e54), // 5^-37
+ (0xaa242499697392d2, 0xdde50bd1d5d0b9e9), // 5^-36
+ (0xd4ad2dbfc3d07787, 0x955e4ec64b44e864), // 5^-35
+ (0x84ec3c97da624ab4, 0xbd5af13bef0b113e), // 5^-34
+ (0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58e), // 5^-33
+ (0xcfb11ead453994ba, 0x67de18eda5814af2), // 5^-32
+ (0x81ceb32c4b43fcf4, 0x80eacf948770ced7), // 5^-31
+ (0xa2425ff75e14fc31, 0xa1258379a94d028d), // 5^-30
+ (0xcad2f7f5359a3b3e, 0x96ee45813a04330), // 5^-29
+ (0xfd87b5f28300ca0d, 0x8bca9d6e188853fc), // 5^-28
+ (0x9e74d1b791e07e48, 0x775ea264cf55347e), // 5^-27
+ (0xc612062576589dda, 0x95364afe032a819e), // 5^-26
+ (0xf79687aed3eec551, 0x3a83ddbd83f52205), // 5^-25
+ (0x9abe14cd44753b52, 0xc4926a9672793543), // 5^-24
+ (0xc16d9a0095928a27, 0x75b7053c0f178294), // 5^-23
+ (0xf1c90080baf72cb1, 0x5324c68b12dd6339), // 5^-22
+ (0x971da05074da7bee, 0xd3f6fc16ebca5e04), // 5^-21
+ (0xbce5086492111aea, 0x88f4bb1ca6bcf585), // 5^-20
+ (0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6), // 5^-19
+ (0x9392ee8e921d5d07, 0x3aff322e62439fd0), // 5^-18
+ (0xb877aa3236a4b449, 0x9befeb9fad487c3), // 5^-17
+ (0xe69594bec44de15b, 0x4c2ebe687989a9b4), // 5^-16
+ (0x901d7cf73ab0acd9, 0xf9d37014bf60a11), // 5^-15
+ (0xb424dc35095cd80f, 0x538484c19ef38c95), // 5^-14
+ (0xe12e13424bb40e13, 0x2865a5f206b06fba), // 5^-13
+ (0x8cbccc096f5088cb, 0xf93f87b7442e45d4), // 5^-12
+ (0xafebff0bcb24aafe, 0xf78f69a51539d749), // 5^-11
+ (0xdbe6fecebdedd5be, 0xb573440e5a884d1c), // 5^-10
+ (0x89705f4136b4a597, 0x31680a88f8953031), // 5^-9
+ (0xabcc77118461cefc, 0xfdc20d2b36ba7c3e), // 5^-8
+ (0xd6bf94d5e57a42bc, 0x3d32907604691b4d), // 5^-7
+ (0x8637bd05af6c69b5, 0xa63f9a49c2c1b110), // 5^-6
+ (0xa7c5ac471b478423, 0xfcf80dc33721d54), // 5^-5
+ (0xd1b71758e219652b, 0xd3c36113404ea4a9), // 5^-4
+ (0x83126e978d4fdf3b, 0x645a1cac083126ea), // 5^-3
+ (0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4), // 5^-2
+ (0xcccccccccccccccc, 0xcccccccccccccccd), // 5^-1
+ (0x8000000000000000, 0x0), // 5^0
+ (0xa000000000000000, 0x0), // 5^1
+ (0xc800000000000000, 0x0), // 5^2
+ (0xfa00000000000000, 0x0), // 5^3
+ (0x9c40000000000000, 0x0), // 5^4
+ (0xc350000000000000, 0x0), // 5^5
+ (0xf424000000000000, 0x0), // 5^6
+ (0x9896800000000000, 0x0), // 5^7
+ (0xbebc200000000000, 0x0), // 5^8
+ (0xee6b280000000000, 0x0), // 5^9
+ (0x9502f90000000000, 0x0), // 5^10
+ (0xba43b74000000000, 0x0), // 5^11
+ (0xe8d4a51000000000, 0x0), // 5^12
+ (0x9184e72a00000000, 0x0), // 5^13
+ (0xb5e620f480000000, 0x0), // 5^14
+ (0xe35fa931a0000000, 0x0), // 5^15
+ (0x8e1bc9bf04000000, 0x0), // 5^16
+ (0xb1a2bc2ec5000000, 0x0), // 5^17
+ (0xde0b6b3a76400000, 0x0), // 5^18
+ (0x8ac7230489e80000, 0x0), // 5^19
+ (0xad78ebc5ac620000, 0x0), // 5^20
+ (0xd8d726b7177a8000, 0x0), // 5^21
+ (0x878678326eac9000, 0x0), // 5^22
+ (0xa968163f0a57b400, 0x0), // 5^23
+ (0xd3c21bcecceda100, 0x0), // 5^24
+ (0x84595161401484a0, 0x0), // 5^25
+ (0xa56fa5b99019a5c8, 0x0), // 5^26
+ (0xcecb8f27f4200f3a, 0x0), // 5^27
+ (0x813f3978f8940984, 0x4000000000000000), // 5^28
+ (0xa18f07d736b90be5, 0x5000000000000000), // 5^29
+ (0xc9f2c9cd04674ede, 0xa400000000000000), // 5^30
+ (0xfc6f7c4045812296, 0x4d00000000000000), // 5^31
+ (0x9dc5ada82b70b59d, 0xf020000000000000), // 5^32
+ (0xc5371912364ce305, 0x6c28000000000000), // 5^33
+ (0xf684df56c3e01bc6, 0xc732000000000000), // 5^34
+ (0x9a130b963a6c115c, 0x3c7f400000000000), // 5^35
+ (0xc097ce7bc90715b3, 0x4b9f100000000000), // 5^36
+ (0xf0bdc21abb48db20, 0x1e86d40000000000), // 5^37
+ (0x96769950b50d88f4, 0x1314448000000000), // 5^38
+ (0xbc143fa4e250eb31, 0x17d955a000000000), // 5^39
+ (0xeb194f8e1ae525fd, 0x5dcfab0800000000), // 5^40
+ (0x92efd1b8d0cf37be, 0x5aa1cae500000000), // 5^41
+ (0xb7abc627050305ad, 0xf14a3d9e40000000), // 5^42
+ (0xe596b7b0c643c719, 0x6d9ccd05d0000000), // 5^43
+ (0x8f7e32ce7bea5c6f, 0xe4820023a2000000), // 5^44
+ (0xb35dbf821ae4f38b, 0xdda2802c8a800000), // 5^45
+ (0xe0352f62a19e306e, 0xd50b2037ad200000), // 5^46
+ (0x8c213d9da502de45, 0x4526f422cc340000), // 5^47
+ (0xaf298d050e4395d6, 0x9670b12b7f410000), // 5^48
+ (0xdaf3f04651d47b4c, 0x3c0cdd765f114000), // 5^49
+ (0x88d8762bf324cd0f, 0xa5880a69fb6ac800), // 5^50
+ (0xab0e93b6efee0053, 0x8eea0d047a457a00), // 5^51
+ (0xd5d238a4abe98068, 0x72a4904598d6d880), // 5^52
+ (0x85a36366eb71f041, 0x47a6da2b7f864750), // 5^53
+ (0xa70c3c40a64e6c51, 0x999090b65f67d924), // 5^54
+ (0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d), // 5^55
+ (0x82818f1281ed449f, 0xbff8f10e7a8921a4), // 5^56
+ (0xa321f2d7226895c7, 0xaff72d52192b6a0d), // 5^57
+ (0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490), // 5^58
+ (0xfee50b7025c36a08, 0x2f236d04753d5b4), // 5^59
+ (0x9f4f2726179a2245, 0x1d762422c946590), // 5^60
+ (0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5), // 5^61
+ (0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2), // 5^62
+ (0x9b934c3b330c8577, 0x63cc55f49f88eb2f), // 5^63
+ (0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb), // 5^64
+ (0xf316271c7fc3908a, 0x8bef464e3945ef7a), // 5^65
+ (0x97edd871cfda3a56, 0x97758bf0e3cbb5ac), // 5^66
+ (0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317), // 5^67
+ (0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd), // 5^68
+ (0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a), // 5^69
+ (0xb975d6b6ee39e436, 0xb3e2fd538e122b44), // 5^70
+ (0xe7d34c64a9c85d44, 0x60dbbca87196b616), // 5^71
+ (0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd), // 5^72
+ (0xb51d13aea4a488dd, 0x6babab6398bdbe41), // 5^73
+ (0xe264589a4dcdab14, 0xc696963c7eed2dd1), // 5^74
+ (0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2), // 5^75
+ (0xb0de65388cc8ada8, 0x3b25a55f43294bcb), // 5^76
+ (0xdd15fe86affad912, 0x49ef0eb713f39ebe), // 5^77
+ (0x8a2dbf142dfcc7ab, 0x6e3569326c784337), // 5^78
+ (0xacb92ed9397bf996, 0x49c2c37f07965404), // 5^79
+ (0xd7e77a8f87daf7fb, 0xdc33745ec97be906), // 5^80
+ (0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3), // 5^81
+ (0xa8acd7c0222311bc, 0xc40832ea0d68ce0c), // 5^82
+ (0xd2d80db02aabd62b, 0xf50a3fa490c30190), // 5^83
+ (0x83c7088e1aab65db, 0x792667c6da79e0fa), // 5^84
+ (0xa4b8cab1a1563f52, 0x577001b891185938), // 5^85
+ (0xcde6fd5e09abcf26, 0xed4c0226b55e6f86), // 5^86
+ (0x80b05e5ac60b6178, 0x544f8158315b05b4), // 5^87
+ (0xa0dc75f1778e39d6, 0x696361ae3db1c721), // 5^88
+ (0xc913936dd571c84c, 0x3bc3a19cd1e38e9), // 5^89
+ (0xfb5878494ace3a5f, 0x4ab48a04065c723), // 5^90
+ (0x9d174b2dcec0e47b, 0x62eb0d64283f9c76), // 5^91
+ (0xc45d1df942711d9a, 0x3ba5d0bd324f8394), // 5^92
+ (0xf5746577930d6500, 0xca8f44ec7ee36479), // 5^93
+ (0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb), // 5^94
+ (0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e), // 5^95
+ (0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e), // 5^96
+ (0x95d04aee3b80ece5, 0xbba1f1d158724a12), // 5^97
+ (0xbb445da9ca61281f, 0x2a8a6e45ae8edc97), // 5^98
+ (0xea1575143cf97226, 0xf52d09d71a3293bd), // 5^99
+ (0x924d692ca61be758, 0x593c2626705f9c56), // 5^100
+ (0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c), // 5^101
+ (0xe498f455c38b997a, 0xb6dfb9c0f956447), // 5^102
+ (0x8edf98b59a373fec, 0x4724bd4189bd5eac), // 5^103
+ (0xb2977ee300c50fe7, 0x58edec91ec2cb657), // 5^104
+ (0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed), // 5^105
+ (0x8b865b215899f46c, 0xbd79e0d20082ee74), // 5^106
+ (0xae67f1e9aec07187, 0xecd8590680a3aa11), // 5^107
+ (0xda01ee641a708de9, 0xe80e6f4820cc9495), // 5^108
+ (0x884134fe908658b2, 0x3109058d147fdcdd), // 5^109
+ (0xaa51823e34a7eede, 0xbd4b46f0599fd415), // 5^110
+ (0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a), // 5^111
+ (0x850fadc09923329e, 0x3e2cf6bc604ddb0), // 5^112
+ (0xa6539930bf6bff45, 0x84db8346b786151c), // 5^113
+ (0xcfe87f7cef46ff16, 0xe612641865679a63), // 5^114
+ (0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e), // 5^115
+ (0xa26da3999aef7749, 0xe3be5e330f38f09d), // 5^116
+ (0xcb090c8001ab551c, 0x5cadf5bfd3072cc5), // 5^117
+ (0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6), // 5^118
+ (0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa), // 5^119
+ (0xc646d63501a1511d, 0xb281e1fd541501b8), // 5^120
+ (0xf7d88bc24209a565, 0x1f225a7ca91a4226), // 5^121
+ (0x9ae757596946075f, 0x3375788de9b06958), // 5^122
+ (0xc1a12d2fc3978937, 0x52d6b1641c83ae), // 5^123
+ (0xf209787bb47d6b84, 0xc0678c5dbd23a49a), // 5^124
+ (0x9745eb4d50ce6332, 0xf840b7ba963646e0), // 5^125
+ (0xbd176620a501fbff, 0xb650e5a93bc3d898), // 5^126
+ (0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe), // 5^127
+ (0x93ba47c980e98cdf, 0xc66f336c36b10137), // 5^128
+ (0xb8a8d9bbe123f017, 0xb80b0047445d4184), // 5^129
+ (0xe6d3102ad96cec1d, 0xa60dc059157491e5), // 5^130
+ (0x9043ea1ac7e41392, 0x87c89837ad68db2f), // 5^131
+ (0xb454e4a179dd1877, 0x29babe4598c311fb), // 5^132
+ (0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a), // 5^133
+ (0x8ce2529e2734bb1d, 0x1899e4a65f58660c), // 5^134
+ (0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f), // 5^135
+ (0xdc21a1171d42645d, 0x76707543f4fa1f73), // 5^136
+ (0x899504ae72497eba, 0x6a06494a791c53a8), // 5^137
+ (0xabfa45da0edbde69, 0x487db9d17636892), // 5^138
+ (0xd6f8d7509292d603, 0x45a9d2845d3c42b6), // 5^139
+ (0x865b86925b9bc5c2, 0xb8a2392ba45a9b2), // 5^140
+ (0xa7f26836f282b732, 0x8e6cac7768d7141e), // 5^141
+ (0xd1ef0244af2364ff, 0x3207d795430cd926), // 5^142
+ (0x8335616aed761f1f, 0x7f44e6bd49e807b8), // 5^143
+ (0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6), // 5^144
+ (0xcd036837130890a1, 0x36dba887c37a8c0f), // 5^145
+ (0x802221226be55a64, 0xc2494954da2c9789), // 5^146
+ (0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c), // 5^147
+ (0xc83553c5c8965d3d, 0x6f92829494e5acc7), // 5^148
+ (0xfa42a8b73abbf48c, 0xcb772339ba1f17f9), // 5^149
+ (0x9c69a97284b578d7, 0xff2a760414536efb), // 5^150
+ (0xc38413cf25e2d70d, 0xfef5138519684aba), // 5^151
+ (0xf46518c2ef5b8cd1, 0x7eb258665fc25d69), // 5^152
+ (0x98bf2f79d5993802, 0xef2f773ffbd97a61), // 5^153
+ (0xbeeefb584aff8603, 0xaafb550ffacfd8fa), // 5^154
+ (0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38), // 5^155
+ (0x952ab45cfa97a0b2, 0xdd945a747bf26183), // 5^156
+ (0xba756174393d88df, 0x94f971119aeef9e4), // 5^157
+ (0xe912b9d1478ceb17, 0x7a37cd5601aab85d), // 5^158
+ (0x91abb422ccb812ee, 0xac62e055c10ab33a), // 5^159
+ (0xb616a12b7fe617aa, 0x577b986b314d6009), // 5^160
+ (0xe39c49765fdf9d94, 0xed5a7e85fda0b80b), // 5^161
+ (0x8e41ade9fbebc27d, 0x14588f13be847307), // 5^162
+ (0xb1d219647ae6b31c, 0x596eb2d8ae258fc8), // 5^163
+ (0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb), // 5^164
+ (0x8aec23d680043bee, 0x25de7bb9480d5854), // 5^165
+ (0xada72ccc20054ae9, 0xaf561aa79a10ae6a), // 5^166
+ (0xd910f7ff28069da4, 0x1b2ba1518094da04), // 5^167
+ (0x87aa9aff79042286, 0x90fb44d2f05d0842), // 5^168
+ (0xa99541bf57452b28, 0x353a1607ac744a53), // 5^169
+ (0xd3fa922f2d1675f2, 0x42889b8997915ce8), // 5^170
+ (0x847c9b5d7c2e09b7, 0x69956135febada11), // 5^171
+ (0xa59bc234db398c25, 0x43fab9837e699095), // 5^172
+ (0xcf02b2c21207ef2e, 0x94f967e45e03f4bb), // 5^173
+ (0x8161afb94b44f57d, 0x1d1be0eebac278f5), // 5^174
+ (0xa1ba1ba79e1632dc, 0x6462d92a69731732), // 5^175
+ (0xca28a291859bbf93, 0x7d7b8f7503cfdcfe), // 5^176
+ (0xfcb2cb35e702af78, 0x5cda735244c3d43e), // 5^177
+ (0x9defbf01b061adab, 0x3a0888136afa64a7), // 5^178
+ (0xc56baec21c7a1916, 0x88aaa1845b8fdd0), // 5^179
+ (0xf6c69a72a3989f5b, 0x8aad549e57273d45), // 5^180
+ (0x9a3c2087a63f6399, 0x36ac54e2f678864b), // 5^181
+ (0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd), // 5^182
+ (0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5), // 5^183
+ (0x969eb7c47859e743, 0x9f644ae5a4b1b325), // 5^184
+ (0xbc4665b596706114, 0x873d5d9f0dde1fee), // 5^185
+ (0xeb57ff22fc0c7959, 0xa90cb506d155a7ea), // 5^186
+ (0x9316ff75dd87cbd8, 0x9a7f12442d588f2), // 5^187
+ (0xb7dcbf5354e9bece, 0xc11ed6d538aeb2f), // 5^188
+ (0xe5d3ef282a242e81, 0x8f1668c8a86da5fa), // 5^189
+ (0x8fa475791a569d10, 0xf96e017d694487bc), // 5^190
+ (0xb38d92d760ec4455, 0x37c981dcc395a9ac), // 5^191
+ (0xe070f78d3927556a, 0x85bbe253f47b1417), // 5^192
+ (0x8c469ab843b89562, 0x93956d7478ccec8e), // 5^193
+ (0xaf58416654a6babb, 0x387ac8d1970027b2), // 5^194
+ (0xdb2e51bfe9d0696a, 0x6997b05fcc0319e), // 5^195
+ (0x88fcf317f22241e2, 0x441fece3bdf81f03), // 5^196
+ (0xab3c2fddeeaad25a, 0xd527e81cad7626c3), // 5^197
+ (0xd60b3bd56a5586f1, 0x8a71e223d8d3b074), // 5^198
+ (0x85c7056562757456, 0xf6872d5667844e49), // 5^199
+ (0xa738c6bebb12d16c, 0xb428f8ac016561db), // 5^200
+ (0xd106f86e69d785c7, 0xe13336d701beba52), // 5^201
+ (0x82a45b450226b39c, 0xecc0024661173473), // 5^202
+ (0xa34d721642b06084, 0x27f002d7f95d0190), // 5^203
+ (0xcc20ce9bd35c78a5, 0x31ec038df7b441f4), // 5^204
+ (0xff290242c83396ce, 0x7e67047175a15271), // 5^205
+ (0x9f79a169bd203e41, 0xf0062c6e984d386), // 5^206
+ (0xc75809c42c684dd1, 0x52c07b78a3e60868), // 5^207
+ (0xf92e0c3537826145, 0xa7709a56ccdf8a82), // 5^208
+ (0x9bbcc7a142b17ccb, 0x88a66076400bb691), // 5^209
+ (0xc2abf989935ddbfe, 0x6acff893d00ea435), // 5^210
+ (0xf356f7ebf83552fe, 0x583f6b8c4124d43), // 5^211
+ (0x98165af37b2153de, 0xc3727a337a8b704a), // 5^212
+ (0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c), // 5^213
+ (0xeda2ee1c7064130c, 0x1162def06f79df73), // 5^214
+ (0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8), // 5^215
+ (0xb9a74a0637ce2ee1, 0x6d953e2bd7173692), // 5^216
+ (0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437), // 5^217
+ (0x910ab1d4db9914a0, 0x1d9c9892400a22a2), // 5^218
+ (0xb54d5e4a127f59c8, 0x2503beb6d00cab4b), // 5^219
+ (0xe2a0b5dc971f303a, 0x2e44ae64840fd61d), // 5^220
+ (0x8da471a9de737e24, 0x5ceaecfed289e5d2), // 5^221
+ (0xb10d8e1456105dad, 0x7425a83e872c5f47), // 5^222
+ (0xdd50f1996b947518, 0xd12f124e28f77719), // 5^223
+ (0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f), // 5^224
+ (0xace73cbfdc0bfb7b, 0x636cc64d1001550b), // 5^225
+ (0xd8210befd30efa5a, 0x3c47f7e05401aa4e), // 5^226
+ (0x8714a775e3e95c78, 0x65acfaec34810a71), // 5^227
+ (0xa8d9d1535ce3b396, 0x7f1839a741a14d0d), // 5^228
+ (0xd31045a8341ca07c, 0x1ede48111209a050), // 5^229
+ (0x83ea2b892091e44d, 0x934aed0aab460432), // 5^230
+ (0xa4e4b66b68b65d60, 0xf81da84d5617853f), // 5^231
+ (0xce1de40642e3f4b9, 0x36251260ab9d668e), // 5^232
+ (0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019), // 5^233
+ (0xa1075a24e4421730, 0xb24cf65b8612f81f), // 5^234
+ (0xc94930ae1d529cfc, 0xdee033f26797b627), // 5^235
+ (0xfb9b7cd9a4a7443c, 0x169840ef017da3b1), // 5^236
+ (0x9d412e0806e88aa5, 0x8e1f289560ee864e), // 5^237
+ (0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2), // 5^238
+ (0xf5b5d7ec8acb58a2, 0xae10af696774b1db), // 5^239
+ (0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29), // 5^240
+ (0xbff610b0cc6edd3f, 0x17fd090a58d32af3), // 5^241
+ (0xeff394dcff8a948e, 0xddfc4b4cef07f5b0), // 5^242
+ (0x95f83d0a1fb69cd9, 0x4abdaf101564f98e), // 5^243
+ (0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1), // 5^244
+ (0xea53df5fd18d5513, 0x84c86189216dc5ed), // 5^245
+ (0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4), // 5^246
+ (0xb7118682dbb66a77, 0x3fbc8c33221dc2a1), // 5^247
+ (0xe4d5e82392a40515, 0xfabaf3feaa5334a), // 5^248
+ (0x8f05b1163ba6832d, 0x29cb4d87f2a7400e), // 5^249
+ (0xb2c71d5bca9023f8, 0x743e20e9ef511012), // 5^250
+ (0xdf78e4b2bd342cf6, 0x914da9246b255416), // 5^251
+ (0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e), // 5^252
+ (0xae9672aba3d0c320, 0xa184ac2473b529b1), // 5^253
+ (0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e), // 5^254
+ (0x8865899617fb1871, 0x7e2fa67c7a658892), // 5^255
+ (0xaa7eebfb9df9de8d, 0xddbb901b98feeab7), // 5^256
+ (0xd51ea6fa85785631, 0x552a74227f3ea565), // 5^257
+ (0x8533285c936b35de, 0xd53a88958f87275f), // 5^258
+ (0xa67ff273b8460356, 0x8a892abaf368f137), // 5^259
+ (0xd01fef10a657842c, 0x2d2b7569b0432d85), // 5^260
+ (0x8213f56a67f6b29b, 0x9c3b29620e29fc73), // 5^261
+ (0xa298f2c501f45f42, 0x8349f3ba91b47b8f), // 5^262
+ (0xcb3f2f7642717713, 0x241c70a936219a73), // 5^263
+ (0xfe0efb53d30dd4d7, 0xed238cd383aa0110), // 5^264
+ (0x9ec95d1463e8a506, 0xf4363804324a40aa), // 5^265
+ (0xc67bb4597ce2ce48, 0xb143c6053edcd0d5), // 5^266
+ (0xf81aa16fdc1b81da, 0xdd94b7868e94050a), // 5^267
+ (0x9b10a4e5e9913128, 0xca7cf2b4191c8326), // 5^268
+ (0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0), // 5^269
+ (0xf24a01a73cf2dccf, 0xbc633b39673c8cec), // 5^270
+ (0x976e41088617ca01, 0xd5be0503e085d813), // 5^271
+ (0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18), // 5^272
+ (0xec9c459d51852ba2, 0xddf8e7d60ed1219e), // 5^273
+ (0x93e1ab8252f33b45, 0xcabb90e5c942b503), // 5^274
+ (0xb8da1662e7b00a17, 0x3d6a751f3b936243), // 5^275
+ (0xe7109bfba19c0c9d, 0xcc512670a783ad4), // 5^276
+ (0x906a617d450187e2, 0x27fb2b80668b24c5), // 5^277
+ (0xb484f9dc9641e9da, 0xb1f9f660802dedf6), // 5^278
+ (0xe1a63853bbd26451, 0x5e7873f8a0396973), // 5^279
+ (0x8d07e33455637eb2, 0xdb0b487b6423e1e8), // 5^280
+ (0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62), // 5^281
+ (0xdc5c5301c56b75f7, 0x7641a140cc7810fb), // 5^282
+ (0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d), // 5^283
+ (0xac2820d9623bf429, 0x546345fa9fbdcd44), // 5^284
+ (0xd732290fbacaf133, 0xa97c177947ad4095), // 5^285
+ (0x867f59a9d4bed6c0, 0x49ed8eabcccc485d), // 5^286
+ (0xa81f301449ee8c70, 0x5c68f256bfff5a74), // 5^287
+ (0xd226fc195c6a2f8c, 0x73832eec6fff3111), // 5^288
+ (0x83585d8fd9c25db7, 0xc831fd53c5ff7eab), // 5^289
+ (0xa42e74f3d032f525, 0xba3e7ca8b77f5e55), // 5^290
+ (0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb), // 5^291
+ (0x80444b5e7aa7cf85, 0x7980d163cf5b81b3), // 5^292
+ (0xa0555e361951c366, 0xd7e105bcc332621f), // 5^293
+ (0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7), // 5^294
+ (0xfa856334878fc150, 0xb14f98f6f0feb951), // 5^295
+ (0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3), // 5^296
+ (0xc3b8358109e84f07, 0xa862f80ec4700c8), // 5^297
+ (0xf4a642e14c6262c8, 0xcd27bb612758c0fa), // 5^298
+ (0x98e7e9cccfbd7dbd, 0x8038d51cb897789c), // 5^299
+ (0xbf21e44003acdd2c, 0xe0470a63e6bd56c3), // 5^300
+ (0xeeea5d5004981478, 0x1858ccfce06cac74), // 5^301
+ (0x95527a5202df0ccb, 0xf37801e0c43ebc8), // 5^302
+ (0xbaa718e68396cffd, 0xd30560258f54e6ba), // 5^303
+ (0xe950df20247c83fd, 0x47c6b82ef32a2069), // 5^304
+ (0x91d28b7416cdd27e, 0x4cdc331d57fa5441), // 5^305
+ (0xb6472e511c81471d, 0xe0133fe4adf8e952), // 5^306
+ (0xe3d8f9e563a198e5, 0x58180fddd97723a6), // 5^307
+ (0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648), // 5^308
+];
diff --git a/library/core/src/num/diy_float.rs b/library/core/src/num/diy_float.rs
new file mode 100644
index 000000000..ce7f6475d
--- /dev/null
+++ b/library/core/src/num/diy_float.rs
@@ -0,0 +1,81 @@
+//! Extended precision "soft float", for internal use only.
+
+// This module is only for dec2flt and flt2dec, and only public because of coretests.
+// It is not intended to ever be stabilized.
+#![doc(hidden)]
+#![unstable(
+ feature = "core_private_diy_float",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+
+/// A custom 64-bit floating point type, representing `f * 2^e`.
+#[derive(Copy, Clone, Debug)]
+#[doc(hidden)]
+pub struct Fp {
+ /// The integer mantissa.
+ pub f: u64,
+ /// The exponent in base 2.
+ pub e: i16,
+}
+
+impl Fp {
+ /// Returns a correctly rounded product of itself and `other`.
+ pub fn mul(&self, other: &Fp) -> Fp {
+ const MASK: u64 = 0xffffffff;
+ let a = self.f >> 32;
+ let b = self.f & MASK;
+ let c = other.f >> 32;
+ let d = other.f & MASK;
+ let ac = a * c;
+ let bc = b * c;
+ let ad = a * d;
+ let bd = b * d;
+ let tmp = (bd >> 32) + (ad & MASK) + (bc & MASK) + (1 << 31) /* round */;
+ let f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ let e = self.e + other.e + 64;
+ Fp { f, e }
+ }
+
+ /// Normalizes itself so that the resulting mantissa is at least `2^63`.
+ pub fn normalize(&self) -> Fp {
+ let mut f = self.f;
+ let mut e = self.e;
+ if f >> (64 - 32) == 0 {
+ f <<= 32;
+ e -= 32;
+ }
+ if f >> (64 - 16) == 0 {
+ f <<= 16;
+ e -= 16;
+ }
+ if f >> (64 - 8) == 0 {
+ f <<= 8;
+ e -= 8;
+ }
+ if f >> (64 - 4) == 0 {
+ f <<= 4;
+ e -= 4;
+ }
+ if f >> (64 - 2) == 0 {
+ f <<= 2;
+ e -= 2;
+ }
+ if f >> (64 - 1) == 0 {
+ f <<= 1;
+ e -= 1;
+ }
+ debug_assert!(f >= (1 << 63));
+ Fp { f, e }
+ }
+
+ /// Normalizes itself to have the shared exponent.
+ /// It can only decrease the exponent (and thus increase the mantissa).
+ pub fn normalize_to(&self, e: i16) -> Fp {
+ let edelta = self.e - e;
+ assert!(edelta >= 0);
+ let edelta = edelta as usize;
+ assert_eq!(self.f << edelta >> edelta, self.f);
+ Fp { f: self.f << edelta, e }
+ }
+}
diff --git a/library/core/src/num/error.rs b/library/core/src/num/error.rs
new file mode 100644
index 000000000..1a223016d
--- /dev/null
+++ b/library/core/src/num/error.rs
@@ -0,0 +1,146 @@
+//! Error types for conversion to integral types.
+
+use crate::convert::Infallible;
+use crate::fmt;
+
+/// The error type returned when a checked integral type conversion fails.
+#[stable(feature = "try_from", since = "1.34.0")]
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct TryFromIntError(pub(crate) ());
+
+impl TryFromIntError {
+ #[unstable(
+ feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ "out of range integral type conversion attempted"
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl fmt::Display for TryFromIntError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(fmt)
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl const From<Infallible> for TryFromIntError {
+ fn from(x: Infallible) -> TryFromIntError {
+ match x {}
+ }
+}
+
+#[unstable(feature = "never_type", issue = "35121")]
+impl const From<!> for TryFromIntError {
+ fn from(never: !) -> TryFromIntError {
+ // Match rather than coerce to make sure that code like
+ // `From<Infallible> for TryFromIntError` above will keep working
+ // when `Infallible` becomes an alias to `!`.
+ match never {}
+ }
+}
+
+/// An error which can be returned when parsing an integer.
+///
+/// This error is used as the error type for the `from_str_radix()` functions
+/// on the primitive integer types, such as [`i8::from_str_radix`].
+///
+/// # Potential causes
+///
+/// Among other causes, `ParseIntError` can be thrown because of leading or trailing whitespace
+/// in the string e.g., when it is obtained from the standard input.
+/// Using the [`str::trim()`] method ensures that no whitespace remains before parsing.
+///
+/// # Example
+///
+/// ```
+/// if let Err(e) = i32::from_str_radix("a12", 10) {
+/// println!("Failed conversion to i32: {e}");
+/// }
+/// ```
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseIntError {
+ pub(super) kind: IntErrorKind,
+}
+
+/// Enum to store the various types of errors that can cause parsing an integer to fail.
+///
+/// # Example
+///
+/// ```
+/// # fn main() {
+/// if let Err(e) = i32::from_str_radix("a12", 10) {
+/// println!("Failed conversion to i32: {:?}", e.kind());
+/// }
+/// # }
+/// ```
+#[stable(feature = "int_error_matching", since = "1.55.0")]
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[non_exhaustive]
+pub enum IntErrorKind {
+ /// Value being parsed is empty.
+ ///
+ /// This variant will be constructed when parsing an empty string.
+ #[stable(feature = "int_error_matching", since = "1.55.0")]
+ Empty,
+ /// Contains an invalid digit in its context.
+ ///
+ /// Among other causes, this variant will be constructed when parsing a string that
+ /// contains a non-ASCII char.
+ ///
+ /// This variant is also constructed when a `+` or `-` is misplaced within a string
+ /// either on its own or in the middle of a number.
+ #[stable(feature = "int_error_matching", since = "1.55.0")]
+ InvalidDigit,
+ /// Integer is too large to store in target integer type.
+ #[stable(feature = "int_error_matching", since = "1.55.0")]
+ PosOverflow,
+ /// Integer is too small to store in target integer type.
+ #[stable(feature = "int_error_matching", since = "1.55.0")]
+ NegOverflow,
+ /// Value was Zero
+ ///
+ /// This variant will be emitted when the parsing string has a value of zero, which
+ /// would be illegal for non-zero types.
+ #[stable(feature = "int_error_matching", since = "1.55.0")]
+ Zero,
+}
+
+impl ParseIntError {
+ /// Outputs the detailed cause of parsing an integer failing.
+ #[must_use]
+ #[stable(feature = "int_error_matching", since = "1.55.0")]
+ pub fn kind(&self) -> &IntErrorKind {
+ &self.kind
+ }
+ #[unstable(
+ feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ IntErrorKind::Empty => "cannot parse integer from empty string",
+ IntErrorKind::InvalidDigit => "invalid digit found in string",
+ IntErrorKind::PosOverflow => "number too large to fit in target type",
+ IntErrorKind::NegOverflow => "number too small to fit in target type",
+ IntErrorKind::Zero => "number would be zero for non-zero type",
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseIntError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
new file mode 100644
index 000000000..6548ad2e5
--- /dev/null
+++ b/library/core/src/num/f32.rs
@@ -0,0 +1,1296 @@
+//! Constants specific to the `f32` single-precision floating point type.
+//!
+//! *[See also the `f32` primitive type][f32].*
+//!
+//! Mathematically significant numbers are provided in the `consts` sub-module.
+//!
+//! For the constants defined directly in this module
+//! (as distinct from those defined in the `consts` sub-module),
+//! new code should instead use the associated constants
+//! defined directly on the `f32` type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::convert::FloatToInt;
+#[cfg(not(test))]
+use crate::intrinsics;
+use crate::mem;
+use crate::num::FpCategory;
+
+/// The radix or base of the internal representation of `f32`.
+/// Use [`f32::RADIX`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let r = std::f32::RADIX;
+///
+/// // intended way
+/// let r = f32::RADIX;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `RADIX` associated constant on `f32`")]
+pub const RADIX: u32 = f32::RADIX;
+
+/// Number of significant digits in base 2.
+/// Use [`f32::MANTISSA_DIGITS`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let d = std::f32::MANTISSA_DIGITS;
+///
+/// // intended way
+/// let d = f32::MANTISSA_DIGITS;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(
+ since = "TBD",
+ note = "replaced by the `MANTISSA_DIGITS` associated constant on `f32`"
+)]
+pub const MANTISSA_DIGITS: u32 = f32::MANTISSA_DIGITS;
+
+/// Approximate number of significant digits in base 10.
+/// Use [`f32::DIGITS`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let d = std::f32::DIGITS;
+///
+/// // intended way
+/// let d = f32::DIGITS;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `DIGITS` associated constant on `f32`")]
+pub const DIGITS: u32 = f32::DIGITS;
+
+/// [Machine epsilon] value for `f32`.
+/// Use [`f32::EPSILON`] instead.
+///
+/// This is the difference between `1.0` and the next larger representable number.
+///
+/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let e = std::f32::EPSILON;
+///
+/// // intended way
+/// let e = f32::EPSILON;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `EPSILON` associated constant on `f32`")]
+pub const EPSILON: f32 = f32::EPSILON;
+
+/// Smallest finite `f32` value.
+/// Use [`f32::MIN`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let min = std::f32::MIN;
+///
+/// // intended way
+/// let min = f32::MIN;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MIN` associated constant on `f32`")]
+pub const MIN: f32 = f32::MIN;
+
+/// Smallest positive normal `f32` value.
+/// Use [`f32::MIN_POSITIVE`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let min = std::f32::MIN_POSITIVE;
+///
+/// // intended way
+/// let min = f32::MIN_POSITIVE;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MIN_POSITIVE` associated constant on `f32`")]
+pub const MIN_POSITIVE: f32 = f32::MIN_POSITIVE;
+
+/// Largest finite `f32` value.
+/// Use [`f32::MAX`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let max = std::f32::MAX;
+///
+/// // intended way
+/// let max = f32::MAX;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MAX` associated constant on `f32`")]
+pub const MAX: f32 = f32::MAX;
+
+/// One greater than the minimum possible normal power of 2 exponent.
+/// Use [`f32::MIN_EXP`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let min = std::f32::MIN_EXP;
+///
+/// // intended way
+/// let min = f32::MIN_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MIN_EXP` associated constant on `f32`")]
+pub const MIN_EXP: i32 = f32::MIN_EXP;
+
+/// Maximum possible power of 2 exponent.
+/// Use [`f32::MAX_EXP`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let max = std::f32::MAX_EXP;
+///
+/// // intended way
+/// let max = f32::MAX_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MAX_EXP` associated constant on `f32`")]
+pub const MAX_EXP: i32 = f32::MAX_EXP;
+
+/// Minimum possible normal power of 10 exponent.
+/// Use [`f32::MIN_10_EXP`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let min = std::f32::MIN_10_EXP;
+///
+/// // intended way
+/// let min = f32::MIN_10_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MIN_10_EXP` associated constant on `f32`")]
+pub const MIN_10_EXP: i32 = f32::MIN_10_EXP;
+
+/// Maximum possible power of 10 exponent.
+/// Use [`f32::MAX_10_EXP`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let max = std::f32::MAX_10_EXP;
+///
+/// // intended way
+/// let max = f32::MAX_10_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MAX_10_EXP` associated constant on `f32`")]
+pub const MAX_10_EXP: i32 = f32::MAX_10_EXP;
+
+/// Not a Number (NaN).
+/// Use [`f32::NAN`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let nan = std::f32::NAN;
+///
+/// // intended way
+/// let nan = f32::NAN;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `NAN` associated constant on `f32`")]
+pub const NAN: f32 = f32::NAN;
+
+/// Infinity (∞).
+/// Use [`f32::INFINITY`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let inf = std::f32::INFINITY;
+///
+/// // intended way
+/// let inf = f32::INFINITY;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `INFINITY` associated constant on `f32`")]
+pub const INFINITY: f32 = f32::INFINITY;
+
+/// Negative infinity (−∞).
+/// Use [`f32::NEG_INFINITY`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let ninf = std::f32::NEG_INFINITY;
+///
+/// // intended way
+/// let ninf = f32::NEG_INFINITY;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `NEG_INFINITY` associated constant on `f32`")]
+pub const NEG_INFINITY: f32 = f32::NEG_INFINITY;
+
+/// Basic mathematical constants.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod consts {
+ // FIXME: replace with mathematical constants from cmath.
+
+ /// Archimedes' constant (π)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
+
+ /// The full circle constant (τ)
+ ///
+ /// Equal to 2π.
+ #[stable(feature = "tau_constant", since = "1.47.0")]
+ pub const TAU: f32 = 6.28318530717958647692528676655900577_f32;
+
+ /// π/2
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
+
+ /// π/3
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
+
+ /// π/4
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
+
+ /// π/6
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
+
+ /// π/8
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
+
+ /// 1/π
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
+
+ /// 2/π
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
+
+ /// 2/sqrt(π)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
+
+ /// sqrt(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
+
+ /// 1/sqrt(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
+
+ /// Euler's number (e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const E: f32 = 2.71828182845904523536028747135266250_f32;
+
+ /// log<sub>2</sub>(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
+
+ /// log<sub>2</sub>(10)
+ #[stable(feature = "extra_log_consts", since = "1.43.0")]
+ pub const LOG2_10: f32 = 3.32192809488736234787031942948939018_f32;
+
+ /// log<sub>10</sub>(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
+
+ /// log<sub>10</sub>(2)
+ #[stable(feature = "extra_log_consts", since = "1.43.0")]
+ pub const LOG10_2: f32 = 0.301029995663981195213738894724493027_f32;
+
+ /// ln(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
+
+ /// ln(10)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
+}
+
+#[cfg(not(test))]
+impl f32 {
+ /// The radix or base of the internal representation of `f32`.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const RADIX: u32 = 2;
+
+ /// Number of significant digits in base 2.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MANTISSA_DIGITS: u32 = 24;
+
+ /// Approximate number of significant digits in base 10.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const DIGITS: u32 = 6;
+
+ /// [Machine epsilon] value for `f32`.
+ ///
+ /// This is the difference between `1.0` and the next larger representable number.
+ ///
+ /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const EPSILON: f32 = 1.19209290e-07_f32;
+
+ /// Smallest finite `f32` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN: f32 = -3.40282347e+38_f32;
+ /// Smallest positive normal `f32` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
+ /// Largest finite `f32` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX: f32 = 3.40282347e+38_f32;
+
+ /// One greater than the minimum possible normal power of 2 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_EXP: i32 = -125;
+ /// Maximum possible power of 2 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX_EXP: i32 = 128;
+
+ /// Minimum possible normal power of 10 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_10_EXP: i32 = -37;
+ /// Maximum possible power of 10 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX_10_EXP: i32 = 38;
+
+ /// Not a Number (NaN).
+ ///
+ /// Note that IEEE-745 doesn't define just a single NaN value;
+ /// a plethora of bit patterns are considered to be NaN.
+ /// Furthermore, the standard makes a difference
+ /// between a "signaling" and a "quiet" NaN,
+ /// and allows inspecting its "payload" (the unspecified bits in the bit pattern).
+ /// This constant isn't guaranteed to equal to any specific NaN bitpattern,
+ /// and the stability of its representation over Rust versions
+ /// and target platforms isn't guaranteed.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const NAN: f32 = 0.0_f32 / 0.0_f32;
+ /// Infinity (∞).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const INFINITY: f32 = 1.0_f32 / 0.0_f32;
+ /// Negative infinity (−∞).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const NEG_INFINITY: f32 = -1.0_f32 / 0.0_f32;
+
+ /// Returns `true` if this value is NaN.
+ ///
+ /// ```
+ /// let nan = f32::NAN;
+ /// let f = 7.0_f32;
+ ///
+ /// assert!(nan.is_nan());
+ /// assert!(!f.is_nan());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_nan(self) -> bool {
+ self != self
+ }
+
+ // FIXME(#50145): `abs` is publicly unavailable in libcore due to
+ // concerns about portability, so this implementation is for
+ // private use internally.
+ #[inline]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ pub(crate) const fn abs_private(self) -> f32 {
+ // SAFETY: This transmutation is fine. Probably. For the reasons std is using it.
+ unsafe { mem::transmute::<u32, f32>(mem::transmute::<f32, u32>(self) & 0x7fff_ffff) }
+ }
+
+ /// Returns `true` if this value is positive infinity or negative infinity, and
+ /// `false` otherwise.
+ ///
+ /// ```
+ /// let f = 7.0f32;
+ /// let inf = f32::INFINITY;
+ /// let neg_inf = f32::NEG_INFINITY;
+ /// let nan = f32::NAN;
+ ///
+ /// assert!(!f.is_infinite());
+ /// assert!(!nan.is_infinite());
+ ///
+ /// assert!(inf.is_infinite());
+ /// assert!(neg_inf.is_infinite());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_infinite(self) -> bool {
+ // Getting clever with transmutation can result in incorrect answers on some FPUs
+ // FIXME: alter the Rust <-> Rust calling convention to prevent this problem.
+ // See https://github.com/rust-lang/rust/issues/72327
+ (self == f32::INFINITY) | (self == f32::NEG_INFINITY)
+ }
+
+ /// Returns `true` if this number is neither infinite nor NaN.
+ ///
+ /// ```
+ /// let f = 7.0f32;
+ /// let inf = f32::INFINITY;
+ /// let neg_inf = f32::NEG_INFINITY;
+ /// let nan = f32::NAN;
+ ///
+ /// assert!(f.is_finite());
+ ///
+ /// assert!(!nan.is_finite());
+ /// assert!(!inf.is_finite());
+ /// assert!(!neg_inf.is_finite());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_finite(self) -> bool {
+ // There's no need to handle NaN separately: if self is NaN,
+ // the comparison is not true, exactly as desired.
+ self.abs_private() < Self::INFINITY
+ }
+
+ /// Returns `true` if the number is [subnormal].
+ ///
+ /// ```
+ /// let min = f32::MIN_POSITIVE; // 1.17549435e-38f32
+ /// let max = f32::MAX;
+ /// let lower_than_min = 1.0e-40_f32;
+ /// let zero = 0.0_f32;
+ ///
+ /// assert!(!min.is_subnormal());
+ /// assert!(!max.is_subnormal());
+ ///
+ /// assert!(!zero.is_subnormal());
+ /// assert!(!f32::NAN.is_subnormal());
+ /// assert!(!f32::INFINITY.is_subnormal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(lower_than_min.is_subnormal());
+ /// ```
+ /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
+ #[must_use]
+ #[stable(feature = "is_subnormal", since = "1.53.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_subnormal(self) -> bool {
+ matches!(self.classify(), FpCategory::Subnormal)
+ }
+
+ /// Returns `true` if the number is neither zero, infinite,
+ /// [subnormal], or NaN.
+ ///
+ /// ```
+ /// let min = f32::MIN_POSITIVE; // 1.17549435e-38f32
+ /// let max = f32::MAX;
+ /// let lower_than_min = 1.0e-40_f32;
+ /// let zero = 0.0_f32;
+ ///
+ /// assert!(min.is_normal());
+ /// assert!(max.is_normal());
+ ///
+ /// assert!(!zero.is_normal());
+ /// assert!(!f32::NAN.is_normal());
+ /// assert!(!f32::INFINITY.is_normal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(!lower_than_min.is_normal());
+ /// ```
+ /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_normal(self) -> bool {
+ matches!(self.classify(), FpCategory::Normal)
+ }
+
+ /// Returns the floating point category of the number. If only one property
+ /// is going to be tested, it is generally faster to use the specific
+ /// predicate instead.
+ ///
+ /// ```
+ /// use std::num::FpCategory;
+ ///
+ /// let num = 12.4_f32;
+ /// let inf = f32::INFINITY;
+ ///
+ /// assert_eq!(num.classify(), FpCategory::Normal);
+ /// assert_eq!(inf.classify(), FpCategory::Infinite);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ pub const fn classify(self) -> FpCategory {
+ // A previous implementation tried to only use bitmask-based checks,
+ // using f32::to_bits to transmute the float to its bit repr and match on that.
+ // Unfortunately, floating point numbers can be much worse than that.
+ // This also needs to not result in recursive evaluations of f64::to_bits.
+ //
+ // On some processors, in some cases, LLVM will "helpfully" lower floating point ops,
+ // in spite of a request for them using f32 and f64, to things like x87 operations.
+ // These have an f64's mantissa, but can have a larger than normal exponent.
+ // FIXME(jubilee): Using x87 operations is never necessary in order to function
+ // on x86 processors for Rust-to-Rust calls, so this issue should not happen.
+ // Code generation should be adjusted to use non-C calling conventions, avoiding this.
+ //
+ if self.is_infinite() {
+ // Thus, a value may compare unequal to infinity, despite having a "full" exponent mask.
+ FpCategory::Infinite
+ } else if self.is_nan() {
+ // And it may not be NaN, as it can simply be an "overextended" finite value.
+ FpCategory::Nan
+ } else {
+ // However, std can't simply compare to zero to check for zero, either,
+ // as correctness requires avoiding equality tests that may be Subnormal == -0.0
+ // because it may be wrong under "denormals are zero" and "flush to zero" modes.
+ // Most of std's targets don't use those, but they are used for thumbv7neon.
+ // So, this does use bitpattern matching for the rest.
+
+ // SAFETY: f32 to u32 is fine. Usually.
+ // If classify has gotten this far, the value is definitely in one of these categories.
+ unsafe { f32::partial_classify(self) }
+ }
+ }
+
+ // This doesn't actually return a right answer for NaN on purpose,
+ // seeing as how it cannot correctly discern between a floating point NaN,
+ // and some normal floating point numbers truncated from an x87 FPU.
+ // FIXME(jubilee): This probably could at least answer things correctly for Infinity,
+ // like the f64 version does, but I need to run more checks on how things go on x86.
+ // I fear losing mantissa data that would have answered that differently.
+ //
+ // # Safety
+ // This requires making sure you call this function for values it answers correctly on,
+ // otherwise it returns a wrong answer. This is not important for memory safety per se,
+ // but getting floats correct is important for not accidentally leaking const eval
+ // runtime-deviating logic which may or may not be acceptable.
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ const unsafe fn partial_classify(self) -> FpCategory {
+ const EXP_MASK: u32 = 0x7f800000;
+ const MAN_MASK: u32 = 0x007fffff;
+
+ // SAFETY: The caller is not asking questions for which this will tell lies.
+ let b = unsafe { mem::transmute::<f32, u32>(self) };
+ match (b & MAN_MASK, b & EXP_MASK) {
+ (0, 0) => FpCategory::Zero,
+ (_, 0) => FpCategory::Subnormal,
+ _ => FpCategory::Normal,
+ }
+ }
+
+ // This operates on bits, and only bits, so it can ignore concerns about weird FPUs.
+ // FIXME(jubilee): In a just world, this would be the entire impl for classify,
+ // plus a transmute. We do not live in a just world, but we can make it more so.
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ const fn classify_bits(b: u32) -> FpCategory {
+ const EXP_MASK: u32 = 0x7f800000;
+ const MAN_MASK: u32 = 0x007fffff;
+
+ match (b & MAN_MASK, b & EXP_MASK) {
+ (0, EXP_MASK) => FpCategory::Infinite,
+ (_, EXP_MASK) => FpCategory::Nan,
+ (0, 0) => FpCategory::Zero,
+ (_, 0) => FpCategory::Subnormal,
+ _ => FpCategory::Normal,
+ }
+ }
+
+ /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
+ /// positive sign bit and positive infinity. Note that IEEE-745 doesn't assign any
+ /// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
+ /// the bit pattern of NaNs are conserved over arithmetic operations, the result of
+ /// `is_sign_positive` on a NaN might produce an unexpected result in some cases.
+ /// See [explanation of NaN as a special value](f32) for more info.
+ ///
+ /// ```
+ /// let f = 7.0_f32;
+ /// let g = -7.0_f32;
+ ///
+ /// assert!(f.is_sign_positive());
+ /// assert!(!g.is_sign_positive());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_sign_positive(self) -> bool {
+ !self.is_sign_negative()
+ }
+
+ /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
+ /// negative sign bit and negative infinity. Note that IEEE-745 doesn't assign any
+ /// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
+ /// the bit pattern of NaNs are conserved over arithmetic operations, the result of
+ /// `is_sign_negative` on a NaN might produce an unexpected result in some cases.
+ /// See [explanation of NaN as a special value](f32) for more info.
+ ///
+ /// ```
+ /// let f = 7.0f32;
+ /// let g = -7.0f32;
+ ///
+ /// assert!(!f.is_sign_negative());
+ /// assert!(g.is_sign_negative());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_sign_negative(self) -> bool {
+ // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
+ // applies to zeros and NaNs as well.
+ // SAFETY: This is just transmuting to get the sign bit, it's fine.
+ unsafe { mem::transmute::<f32, u32>(self) & 0x8000_0000 != 0 }
+ }
+
+ /// Takes the reciprocal (inverse) of a number, `1/x`.
+ ///
+ /// ```
+ /// let x = 2.0_f32;
+ /// let abs_difference = (x.recip() - (1.0 / x)).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[must_use = "this returns the result of the operation, without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn recip(self) -> f32 {
+ 1.0 / self
+ }
+
+ /// Converts radians to degrees.
+ ///
+ /// ```
+ /// let angle = std::f32::consts::PI;
+ ///
+ /// let abs_difference = (angle.to_degrees() - 180.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "f32_deg_rad_conversions", since = "1.7.0")]
+ #[inline]
+ pub fn to_degrees(self) -> f32 {
+ // Use a constant for better precision.
+ const PIS_IN_180: f32 = 57.2957795130823208767981548141051703_f32;
+ self * PIS_IN_180
+ }
+
+ /// Converts degrees to radians.
+ ///
+ /// ```
+ /// let angle = 180.0f32;
+ ///
+ /// let abs_difference = (angle.to_radians() - std::f32::consts::PI).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "f32_deg_rad_conversions", since = "1.7.0")]
+ #[inline]
+ pub fn to_radians(self) -> f32 {
+ let value: f32 = consts::PI;
+ self * (value / 180.0f32)
+ }
+
+ /// Returns the maximum of the two numbers, ignoring NaN.
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ /// This follows the IEEE-754 2008 semantics for maxNum, except for handling of signaling NaNs;
+ /// this function handles all NaNs the same way and avoids maxNum's problems with associativity.
+ /// This also matches the behavior of libm’s fmax.
+ ///
+ /// ```
+ /// let x = 1.0f32;
+ /// let y = 2.0f32;
+ ///
+ /// assert_eq!(x.max(y), y);
+ /// ```
+ #[must_use = "this returns the result of the comparison, without modifying either input"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn max(self, other: f32) -> f32 {
+ intrinsics::maxnumf32(self, other)
+ }
+
+ /// Returns the minimum of the two numbers, ignoring NaN.
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ /// This follows the IEEE-754 2008 semantics for minNum, except for handling of signaling NaNs;
+ /// this function handles all NaNs the same way and avoids minNum's problems with associativity.
+ /// This also matches the behavior of libm’s fmin.
+ ///
+ /// ```
+ /// let x = 1.0f32;
+ /// let y = 2.0f32;
+ ///
+ /// assert_eq!(x.min(y), x);
+ /// ```
+ #[must_use = "this returns the result of the comparison, without modifying either input"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn min(self, other: f32) -> f32 {
+ intrinsics::minnumf32(self, other)
+ }
+
+ /// Returns the maximum of the two numbers, propagating NaN.
+ ///
+ /// This returns NaN when *either* argument is NaN, as opposed to
+ /// [`f32::max`] which only returns NaN when *both* arguments are NaN.
+ ///
+ /// ```
+ /// #![feature(float_minimum_maximum)]
+ /// let x = 1.0f32;
+ /// let y = 2.0f32;
+ ///
+ /// assert_eq!(x.maximum(y), y);
+ /// assert!(x.maximum(f32::NAN).is_nan());
+ /// ```
+ ///
+ /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the greater
+ /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0.
+ /// Note that this follows the semantics specified in IEEE 754-2019.
+ ///
+ /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN
+ /// operand is conserved; see [explanation of NaN as a special value](f32) for more info.
+ #[must_use = "this returns the result of the comparison, without modifying either input"]
+ #[unstable(feature = "float_minimum_maximum", issue = "91079")]
+ #[inline]
+ pub fn maximum(self, other: f32) -> f32 {
+ if self > other {
+ self
+ } else if other > self {
+ other
+ } else if self == other {
+ if self.is_sign_positive() && other.is_sign_negative() { self } else { other }
+ } else {
+ self + other
+ }
+ }
+
+ /// Returns the minimum of the two numbers, propagating NaN.
+ ///
+ /// This returns NaN when *either* argument is NaN, as opposed to
+ /// [`f32::min`] which only returns NaN when *both* arguments are NaN.
+ ///
+ /// ```
+ /// #![feature(float_minimum_maximum)]
+ /// let x = 1.0f32;
+ /// let y = 2.0f32;
+ ///
+ /// assert_eq!(x.minimum(y), x);
+ /// assert!(x.minimum(f32::NAN).is_nan());
+ /// ```
+ ///
+ /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the lesser
+ /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0.
+ /// Note that this follows the semantics specified in IEEE 754-2019.
+ ///
+ /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN
+ /// operand is conserved; see [explanation of NaN as a special value](f32) for more info.
+ #[must_use = "this returns the result of the comparison, without modifying either input"]
+ #[unstable(feature = "float_minimum_maximum", issue = "91079")]
+ #[inline]
+ pub fn minimum(self, other: f32) -> f32 {
+ if self < other {
+ self
+ } else if other < self {
+ other
+ } else if self == other {
+ if self.is_sign_negative() && other.is_sign_positive() { self } else { other }
+ } else {
+ self + other
+ }
+ }
+
+ /// Rounds toward zero and converts to any primitive integer type,
+ /// assuming that the value is finite and fits in that type.
+ ///
+ /// ```
+ /// let value = 4.6_f32;
+ /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
+ /// assert_eq!(rounded, 4);
+ ///
+ /// let value = -128.9_f32;
+ /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
+ /// assert_eq!(rounded, i8::MIN);
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The value must:
+ ///
+ /// * Not be `NaN`
+ /// * Not be infinite
+ /// * Be representable in the return type `Int`, after truncating off its fractional part
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_approx_unchecked_to", since = "1.44.0")]
+ #[inline]
+ pub unsafe fn to_int_unchecked<Int>(self) -> Int
+ where
+ Self: FloatToInt<Int>,
+ {
+ // SAFETY: the caller must uphold the safety contract for
+ // `FloatToInt::to_int_unchecked`.
+ unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
+ }
+
+ /// Raw transmutation to `u32`.
+ ///
+ /// This is currently identical to `transmute::<f32, u32>(self)` on all platforms.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// Note that this function is distinct from `as` casting, which attempts to
+ /// preserve the *numeric* value, and not the bitwise value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_ne!((1f32).to_bits(), 1f32 as u32); // to_bits() is not casting!
+ /// assert_eq!((12.5f32).to_bits(), 0x41480000);
+ ///
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_bits_conv", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_bits(self) -> u32 {
+ // SAFETY: `u32` is a plain old datatype so we can always transmute to it.
+ // ...sorta.
+ //
+ // It turns out that at runtime, it is possible for a floating point number
+ // to be subject to a floating point mode that alters nonzero subnormal numbers
+ // to zero on reads and writes, aka "denormals are zero" and "flush to zero".
+ // This is not a problem per se, but at least one tier2 platform for Rust
+ // actually exhibits this behavior by default.
+ //
+ // In addition, on x86 targets with SSE or SSE2 disabled and the x87 FPU enabled,
+ // i.e. not soft-float, the way Rust does parameter passing can actually alter
+ // a number that is "not infinity" to have the same exponent as infinity,
+ // in a slightly unpredictable manner.
+ //
+ // And, of course evaluating to a NaN value is fairly nondeterministic.
+ // More precisely: when NaN should be returned is knowable, but which NaN?
+ // So far that's defined by a combination of LLVM and the CPU, not Rust.
+ // This function, however, allows observing the bitstring of a NaN,
+ // thus introspection on CTFE.
+ //
+ // In order to preserve, at least for the moment, const-to-runtime equivalence,
+ // we reject any of these possible situations from happening.
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ const fn ct_f32_to_u32(ct: f32) -> u32 {
+ match ct.classify() {
+ FpCategory::Nan => {
+ panic!("const-eval error: cannot use f32::to_bits on a NaN")
+ }
+ FpCategory::Subnormal => {
+ panic!("const-eval error: cannot use f32::to_bits on a subnormal number")
+ }
+ FpCategory::Infinite | FpCategory::Normal | FpCategory::Zero => {
+ // SAFETY: We have a normal floating point number. Now we transmute, i.e. do a bitcopy.
+ unsafe { mem::transmute::<f32, u32>(ct) }
+ }
+ }
+ }
+ // SAFETY: `u32` is a plain old datatype so we can always... uh...
+ // ...look, just pretend you forgot what you just read.
+ // Stability concerns.
+ let rt_f32_to_u32 = |rt| unsafe { mem::transmute::<f32, u32>(rt) };
+ // SAFETY: We use internal implementations that either always work or fail at compile time.
+ unsafe { intrinsics::const_eval_select((self,), ct_f32_to_u32, rt_f32_to_u32) }
+ }
+
+ /// Raw transmutation from `u32`.
+ ///
+ /// This is currently identical to `transmute::<u32, f32>(v)` on all platforms.
+ /// It turns out this is incredibly portable, for two reasons:
+ ///
+ /// * Floats and Ints have the same endianness on all supported platforms.
+ /// * IEEE-754 very precisely specifies the bit layout of floats.
+ ///
+ /// However there is one caveat: prior to the 2008 version of IEEE-754, how
+ /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
+ /// (notably x86 and ARM) picked the interpretation that was ultimately
+ /// standardized in 2008, but some didn't (notably MIPS). As a result, all
+ /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
+ ///
+ /// Rather than trying to preserve signaling-ness cross-platform, this
+ /// implementation favors preserving the exact bits. This means that
+ /// any payloads encoded in NaNs will be preserved even if the result of
+ /// this method is sent over the network from an x86 machine to a MIPS one.
+ ///
+ /// If the results of this method are only manipulated by the same
+ /// architecture that produced them, then there is no portability concern.
+ ///
+ /// If the input isn't NaN, then there is no portability concern.
+ ///
+ /// If you don't care about signalingness (very likely), then there is no
+ /// portability concern.
+ ///
+ /// Note that this function is distinct from `as` casting, which attempts to
+ /// preserve the *numeric* value, and not the bitwise value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = f32::from_bits(0x41480000);
+ /// assert_eq!(v, 12.5);
+ /// ```
+ #[stable(feature = "float_bits_conv", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[must_use]
+ #[inline]
+ pub const fn from_bits(v: u32) -> Self {
+ // It turns out the safety issues with sNaN were overblown! Hooray!
+ // SAFETY: `u32` is a plain old datatype so we can always transmute from it
+ // ...sorta.
+ //
+ // It turns out that at runtime, it is possible for a floating point number
+ // to be subject to floating point modes that alter nonzero subnormal numbers
+ // to zero on reads and writes, aka "denormals are zero" and "flush to zero".
+ // This is not a problem usually, but at least one tier2 platform for Rust
+ // actually exhibits this behavior by default: thumbv7neon
+ // aka "the Neon FPU in AArch32 state"
+ //
+ // In addition, on x86 targets with SSE or SSE2 disabled and the x87 FPU enabled,
+ // i.e. not soft-float, the way Rust does parameter passing can actually alter
+ // a number that is "not infinity" to have the same exponent as infinity,
+ // in a slightly unpredictable manner.
+ //
+ // And, of course evaluating to a NaN value is fairly nondeterministic.
+ // More precisely: when NaN should be returned is knowable, but which NaN?
+ // So far that's defined by a combination of LLVM and the CPU, not Rust.
+ // This function, however, allows observing the bitstring of a NaN,
+ // thus introspection on CTFE.
+ //
+ // In order to preserve, at least for the moment, const-to-runtime equivalence,
+ // reject any of these possible situations from happening.
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ const fn ct_u32_to_f32(ct: u32) -> f32 {
+ match f32::classify_bits(ct) {
+ FpCategory::Subnormal => {
+ panic!("const-eval error: cannot use f32::from_bits on a subnormal number")
+ }
+ FpCategory::Nan => {
+ panic!("const-eval error: cannot use f32::from_bits on NaN")
+ }
+ FpCategory::Infinite | FpCategory::Normal | FpCategory::Zero => {
+ // SAFETY: It's not a frumious number
+ unsafe { mem::transmute::<u32, f32>(ct) }
+ }
+ }
+ }
+ // SAFETY: `u32` is a plain old datatype so we can always... uh...
+ // ...look, just pretend you forgot what you just read.
+ // Stability concerns.
+ let rt_u32_to_f32 = |rt| unsafe { mem::transmute::<u32, f32>(rt) };
+ // SAFETY: We use internal implementations that either always work or fail at compile time.
+ unsafe { intrinsics::const_eval_select((v,), ct_u32_to_f32, rt_u32_to_f32) }
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// big-endian (network) byte order.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f32.to_be_bytes();
+ /// assert_eq!(bytes, [0x41, 0x48, 0x00, 0x00]);
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_be_bytes(self) -> [u8; 4] {
+ self.to_bits().to_be_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// little-endian byte order.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f32.to_le_bytes();
+ /// assert_eq!(bytes, [0x00, 0x00, 0x48, 0x41]);
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_le_bytes(self) -> [u8; 4] {
+ self.to_bits().to_le_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// native byte order.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
+ ///
+ /// [`to_be_bytes`]: f32::to_be_bytes
+ /// [`to_le_bytes`]: f32::to_le_bytes
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f32.to_ne_bytes();
+ /// assert_eq!(
+ /// bytes,
+ /// if cfg!(target_endian = "big") {
+ /// [0x41, 0x48, 0x00, 0x00]
+ /// } else {
+ /// [0x00, 0x00, 0x48, 0x41]
+ /// }
+ /// );
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_ne_bytes(self) -> [u8; 4] {
+ self.to_bits().to_ne_bytes()
+ }
+
+ /// Create a floating point value from its representation as a byte array in big endian.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f32::from_be_bytes([0x41, 0x48, 0x00, 0x00]);
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[must_use]
+ #[inline]
+ pub const fn from_be_bytes(bytes: [u8; 4]) -> Self {
+ Self::from_bits(u32::from_be_bytes(bytes))
+ }
+
+ /// Create a floating point value from its representation as a byte array in little endian.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f32::from_le_bytes([0x00, 0x00, 0x48, 0x41]);
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[must_use]
+ #[inline]
+ pub const fn from_le_bytes(bytes: [u8; 4]) -> Self {
+ Self::from_bits(u32::from_le_bytes(bytes))
+ }
+
+ /// Create a floating point value from its representation as a byte array in native endian.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
+ /// appropriate instead.
+ ///
+ /// [`from_be_bytes`]: f32::from_be_bytes
+ /// [`from_le_bytes`]: f32::from_le_bytes
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f32::from_ne_bytes(if cfg!(target_endian = "big") {
+ /// [0x41, 0x48, 0x00, 0x00]
+ /// } else {
+ /// [0x00, 0x00, 0x48, 0x41]
+ /// });
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[must_use]
+ #[inline]
+ pub const fn from_ne_bytes(bytes: [u8; 4]) -> Self {
+ Self::from_bits(u32::from_ne_bytes(bytes))
+ }
+
+ /// Return the ordering between `self` and `other`.
+ ///
+ /// Unlike the standard partial comparison between floating point numbers,
+ /// this comparison always produces an ordering in accordance to
+ /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
+ /// floating point standard. The values are ordered in the following sequence:
+ ///
+ /// - negative quiet NaN
+ /// - negative signaling NaN
+ /// - negative infinity
+ /// - negative numbers
+ /// - negative subnormal numbers
+ /// - negative zero
+ /// - positive zero
+ /// - positive subnormal numbers
+ /// - positive numbers
+ /// - positive infinity
+ /// - positive signaling NaN
+ /// - positive quiet NaN.
+ ///
+ /// The ordering established by this function does not always agree with the
+ /// [`PartialOrd`] and [`PartialEq`] implementations of `f32`. For example,
+ /// they consider negative and positive zero equal, while `total_cmp`
+ /// doesn't.
+ ///
+ /// The interpretation of the signaling NaN bit follows the definition in
+ /// the IEEE 754 standard, which may not match the interpretation by some of
+ /// the older, non-conformant (e.g. MIPS) hardware implementations.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// struct GoodBoy {
+ /// name: String,
+ /// weight: f32,
+ /// }
+ ///
+ /// let mut bois = vec![
+ /// GoodBoy { name: "Pucci".to_owned(), weight: 0.1 },
+ /// GoodBoy { name: "Woofer".to_owned(), weight: 99.0 },
+ /// GoodBoy { name: "Yapper".to_owned(), weight: 10.0 },
+ /// GoodBoy { name: "Chonk".to_owned(), weight: f32::INFINITY },
+ /// GoodBoy { name: "Abs. Unit".to_owned(), weight: f32::NAN },
+ /// GoodBoy { name: "Floaty".to_owned(), weight: -5.0 },
+ /// ];
+ ///
+ /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
+ /// # assert!(bois.into_iter().map(|b| b.weight)
+ /// # .zip([-5.0, 0.1, 10.0, 99.0, f32::INFINITY, f32::NAN].iter())
+ /// # .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// ```
+ #[stable(feature = "total_cmp", since = "1.62.0")]
+ #[must_use]
+ #[inline]
+ pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
+ let mut left = self.to_bits() as i32;
+ let mut right = other.to_bits() as i32;
+
+ // In case of negatives, flip all the bits except the sign
+ // to achieve a similar layout as two's complement integers
+ //
+ // Why does this work? IEEE 754 floats consist of three fields:
+ // Sign bit, exponent and mantissa. The set of exponent and mantissa
+ // fields as a whole have the property that their bitwise order is
+ // equal to the numeric magnitude where the magnitude is defined.
+ // The magnitude is not normally defined on NaN values, but
+ // IEEE 754 totalOrder defines the NaN values also to follow the
+ // bitwise order. This leads to order explained in the doc comment.
+ // However, the representation of magnitude is the same for negative
+ // and positive numbers – only the sign bit is different.
+ // To easily compare the floats as signed integers, we need to
+ // flip the exponent and mantissa bits in case of negative numbers.
+ // We effectively convert the numbers to "two's complement" form.
+ //
+ // To do the flipping, we construct a mask and XOR against it.
+ // We branchlessly calculate an "all-ones except for the sign bit"
+ // mask from negative-signed values: right shifting sign-extends
+ // the integer, so we "fill" the mask with sign bits, and then
+ // convert to unsigned to push one more zero bit.
+ // On positive values, the mask is all zeros, so it's a no-op.
+ left ^= (((left >> 31) as u32) >> 1) as i32;
+ right ^= (((right >> 31) as u32) >> 1) as i32;
+
+ left.cmp(&right)
+ }
+
+ /// Restrict a value to a certain interval unless it is NaN.
+ ///
+ /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
+ /// less than `min`. Otherwise this returns `self`.
+ ///
+ /// Note that this function returns NaN if the initial value was NaN as
+ /// well.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!((-3.0f32).clamp(-2.0, 1.0) == -2.0);
+ /// assert!((0.0f32).clamp(-2.0, 1.0) == 0.0);
+ /// assert!((2.0f32).clamp(-2.0, 1.0) == 1.0);
+ /// assert!((f32::NAN).clamp(-2.0, 1.0).is_nan());
+ /// ```
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "clamp", since = "1.50.0")]
+ #[inline]
+ pub fn clamp(self, min: f32, max: f32) -> f32 {
+ assert!(min <= max);
+ let mut x = self;
+ if x < min {
+ x = min;
+ }
+ if x > max {
+ x = max;
+ }
+ x
+ }
+}
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
new file mode 100644
index 000000000..75c92c2f8
--- /dev/null
+++ b/library/core/src/num/f64.rs
@@ -0,0 +1,1294 @@
+//! Constants specific to the `f64` double-precision floating point type.
+//!
+//! *[See also the `f64` primitive type][f64].*
+//!
+//! Mathematically significant numbers are provided in the `consts` sub-module.
+//!
+//! For the constants defined directly in this module
+//! (as distinct from those defined in the `consts` sub-module),
+//! new code should instead use the associated constants
+//! defined directly on the `f64` type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::convert::FloatToInt;
+#[cfg(not(test))]
+use crate::intrinsics;
+use crate::mem;
+use crate::num::FpCategory;
+
+/// The radix or base of the internal representation of `f64`.
+/// Use [`f64::RADIX`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let r = std::f64::RADIX;
+///
+/// // intended way
+/// let r = f64::RADIX;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `RADIX` associated constant on `f64`")]
+pub const RADIX: u32 = f64::RADIX;
+
+/// Number of significant digits in base 2.
+/// Use [`f64::MANTISSA_DIGITS`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let d = std::f64::MANTISSA_DIGITS;
+///
+/// // intended way
+/// let d = f64::MANTISSA_DIGITS;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(
+ since = "TBD",
+ note = "replaced by the `MANTISSA_DIGITS` associated constant on `f64`"
+)]
+pub const MANTISSA_DIGITS: u32 = f64::MANTISSA_DIGITS;
+
+/// Approximate number of significant digits in base 10.
+/// Use [`f64::DIGITS`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let d = std::f64::DIGITS;
+///
+/// // intended way
+/// let d = f64::DIGITS;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `DIGITS` associated constant on `f64`")]
+pub const DIGITS: u32 = f64::DIGITS;
+
+/// [Machine epsilon] value for `f64`.
+/// Use [`f64::EPSILON`] instead.
+///
+/// This is the difference between `1.0` and the next larger representable number.
+///
+/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let e = std::f64::EPSILON;
+///
+/// // intended way
+/// let e = f64::EPSILON;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `EPSILON` associated constant on `f64`")]
+pub const EPSILON: f64 = f64::EPSILON;
+
+/// Smallest finite `f64` value.
+/// Use [`f64::MIN`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let min = std::f64::MIN;
+///
+/// // intended way
+/// let min = f64::MIN;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MIN` associated constant on `f64`")]
+pub const MIN: f64 = f64::MIN;
+
+/// Smallest positive normal `f64` value.
+/// Use [`f64::MIN_POSITIVE`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let min = std::f64::MIN_POSITIVE;
+///
+/// // intended way
+/// let min = f64::MIN_POSITIVE;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MIN_POSITIVE` associated constant on `f64`")]
+pub const MIN_POSITIVE: f64 = f64::MIN_POSITIVE;
+
+/// Largest finite `f64` value.
+/// Use [`f64::MAX`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let max = std::f64::MAX;
+///
+/// // intended way
+/// let max = f64::MAX;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MAX` associated constant on `f64`")]
+pub const MAX: f64 = f64::MAX;
+
+/// One greater than the minimum possible normal power of 2 exponent.
+/// Use [`f64::MIN_EXP`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let min = std::f64::MIN_EXP;
+///
+/// // intended way
+/// let min = f64::MIN_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MIN_EXP` associated constant on `f64`")]
+pub const MIN_EXP: i32 = f64::MIN_EXP;
+
+/// Maximum possible power of 2 exponent.
+/// Use [`f64::MAX_EXP`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let max = std::f64::MAX_EXP;
+///
+/// // intended way
+/// let max = f64::MAX_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MAX_EXP` associated constant on `f64`")]
+pub const MAX_EXP: i32 = f64::MAX_EXP;
+
+/// Minimum possible normal power of 10 exponent.
+/// Use [`f64::MIN_10_EXP`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let min = std::f64::MIN_10_EXP;
+///
+/// // intended way
+/// let min = f64::MIN_10_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MIN_10_EXP` associated constant on `f64`")]
+pub const MIN_10_EXP: i32 = f64::MIN_10_EXP;
+
+/// Maximum possible power of 10 exponent.
+/// Use [`f64::MAX_10_EXP`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let max = std::f64::MAX_10_EXP;
+///
+/// // intended way
+/// let max = f64::MAX_10_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `MAX_10_EXP` associated constant on `f64`")]
+pub const MAX_10_EXP: i32 = f64::MAX_10_EXP;
+
+/// Not a Number (NaN).
+/// Use [`f64::NAN`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let nan = std::f64::NAN;
+///
+/// // intended way
+/// let nan = f64::NAN;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `NAN` associated constant on `f64`")]
+pub const NAN: f64 = f64::NAN;
+
+/// Infinity (∞).
+/// Use [`f64::INFINITY`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let inf = std::f64::INFINITY;
+///
+/// // intended way
+/// let inf = f64::INFINITY;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `INFINITY` associated constant on `f64`")]
+pub const INFINITY: f64 = f64::INFINITY;
+
+/// Negative infinity (−∞).
+/// Use [`f64::NEG_INFINITY`] instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// # #[allow(deprecated, deprecated_in_future)]
+/// let ninf = std::f64::NEG_INFINITY;
+///
+/// // intended way
+/// let ninf = f64::NEG_INFINITY;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "TBD", note = "replaced by the `NEG_INFINITY` associated constant on `f64`")]
+pub const NEG_INFINITY: f64 = f64::NEG_INFINITY;
+
+/// Basic mathematical constants.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod consts {
+ // FIXME: replace with mathematical constants from cmath.
+
+ /// Archimedes' constant (π)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const PI: f64 = 3.14159265358979323846264338327950288_f64;
+
+ /// The full circle constant (τ)
+ ///
+ /// Equal to 2π.
+ #[stable(feature = "tau_constant", since = "1.47.0")]
+ pub const TAU: f64 = 6.28318530717958647692528676655900577_f64;
+
+ /// π/2
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_2: f64 = 1.57079632679489661923132169163975144_f64;
+
+ /// π/3
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_3: f64 = 1.04719755119659774615421446109316763_f64;
+
+ /// π/4
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_4: f64 = 0.785398163397448309615660845819875721_f64;
+
+ /// π/6
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_6: f64 = 0.52359877559829887307710723054658381_f64;
+
+ /// π/8
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_8: f64 = 0.39269908169872415480783042290993786_f64;
+
+ /// 1/π
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_PI: f64 = 0.318309886183790671537767526745028724_f64;
+
+ /// 2/π
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_PI: f64 = 0.636619772367581343075535053490057448_f64;
+
+ /// 2/sqrt(π)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_SQRT_PI: f64 = 1.12837916709551257389615890312154517_f64;
+
+ /// sqrt(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const SQRT_2: f64 = 1.41421356237309504880168872420969808_f64;
+
+ /// 1/sqrt(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_SQRT_2: f64 = 0.707106781186547524400844362104849039_f64;
+
+ /// Euler's number (e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const E: f64 = 2.71828182845904523536028747135266250_f64;
+
+ /// log<sub>2</sub>(10)
+ #[stable(feature = "extra_log_consts", since = "1.43.0")]
+ pub const LOG2_10: f64 = 3.32192809488736234787031942948939018_f64;
+
+ /// log<sub>2</sub>(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG2_E: f64 = 1.44269504088896340735992468100189214_f64;
+
+ /// log<sub>10</sub>(2)
+ #[stable(feature = "extra_log_consts", since = "1.43.0")]
+ pub const LOG10_2: f64 = 0.301029995663981195213738894724493027_f64;
+
+ /// log<sub>10</sub>(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG10_E: f64 = 0.434294481903251827651128918916605082_f64;
+
+ /// ln(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_2: f64 = 0.693147180559945309417232121458176568_f64;
+
+ /// ln(10)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_10: f64 = 2.30258509299404568401799145468436421_f64;
+}
+
+#[cfg(not(test))]
+impl f64 {
+ /// The radix or base of the internal representation of `f64`.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const RADIX: u32 = 2;
+
+ /// Number of significant digits in base 2.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MANTISSA_DIGITS: u32 = 53;
+ /// Approximate number of significant digits in base 10.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const DIGITS: u32 = 15;
+
+ /// [Machine epsilon] value for `f64`.
+ ///
+ /// This is the difference between `1.0` and the next larger representable number.
+ ///
+ /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const EPSILON: f64 = 2.2204460492503131e-16_f64;
+
+ /// Smallest finite `f64` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN: f64 = -1.7976931348623157e+308_f64;
+ /// Smallest positive normal `f64` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_POSITIVE: f64 = 2.2250738585072014e-308_f64;
+ /// Largest finite `f64` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX: f64 = 1.7976931348623157e+308_f64;
+
+ /// One greater than the minimum possible normal power of 2 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_EXP: i32 = -1021;
+ /// Maximum possible power of 2 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX_EXP: i32 = 1024;
+
+ /// Minimum possible normal power of 10 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_10_EXP: i32 = -307;
+ /// Maximum possible power of 10 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX_10_EXP: i32 = 308;
+
+ /// Not a Number (NaN).
+ ///
+ /// Note that IEEE-745 doesn't define just a single NaN value;
+ /// a plethora of bit patterns are considered to be NaN.
+ /// Furthermore, the standard makes a difference
+ /// between a "signaling" and a "quiet" NaN,
+ /// and allows inspecting its "payload" (the unspecified bits in the bit pattern).
+ /// This constant isn't guaranteed to equal to any specific NaN bitpattern,
+ /// and the stability of its representation over Rust versions
+ /// and target platforms isn't guaranteed.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const NAN: f64 = 0.0_f64 / 0.0_f64;
+ /// Infinity (∞).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const INFINITY: f64 = 1.0_f64 / 0.0_f64;
+ /// Negative infinity (−∞).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const NEG_INFINITY: f64 = -1.0_f64 / 0.0_f64;
+
+ /// Returns `true` if this value is NaN.
+ ///
+ /// ```
+ /// let nan = f64::NAN;
+ /// let f = 7.0_f64;
+ ///
+ /// assert!(nan.is_nan());
+ /// assert!(!f.is_nan());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_nan(self) -> bool {
+ self != self
+ }
+
+ // FIXME(#50145): `abs` is publicly unavailable in libcore due to
+ // concerns about portability, so this implementation is for
+ // private use internally.
+ #[inline]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ pub(crate) const fn abs_private(self) -> f64 {
+ // SAFETY: This transmutation is fine. Probably. For the reasons std is using it.
+ unsafe {
+ mem::transmute::<u64, f64>(mem::transmute::<f64, u64>(self) & 0x7fff_ffff_ffff_ffff)
+ }
+ }
+
+ /// Returns `true` if this value is positive infinity or negative infinity, and
+ /// `false` otherwise.
+ ///
+ /// ```
+ /// let f = 7.0f64;
+ /// let inf = f64::INFINITY;
+ /// let neg_inf = f64::NEG_INFINITY;
+ /// let nan = f64::NAN;
+ ///
+ /// assert!(!f.is_infinite());
+ /// assert!(!nan.is_infinite());
+ ///
+ /// assert!(inf.is_infinite());
+ /// assert!(neg_inf.is_infinite());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_infinite(self) -> bool {
+ // Getting clever with transmutation can result in incorrect answers on some FPUs
+ // FIXME: alter the Rust <-> Rust calling convention to prevent this problem.
+ // See https://github.com/rust-lang/rust/issues/72327
+ (self == f64::INFINITY) | (self == f64::NEG_INFINITY)
+ }
+
+ /// Returns `true` if this number is neither infinite nor NaN.
+ ///
+ /// ```
+ /// let f = 7.0f64;
+ /// let inf: f64 = f64::INFINITY;
+ /// let neg_inf: f64 = f64::NEG_INFINITY;
+ /// let nan: f64 = f64::NAN;
+ ///
+ /// assert!(f.is_finite());
+ ///
+ /// assert!(!nan.is_finite());
+ /// assert!(!inf.is_finite());
+ /// assert!(!neg_inf.is_finite());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_finite(self) -> bool {
+ // There's no need to handle NaN separately: if self is NaN,
+ // the comparison is not true, exactly as desired.
+ self.abs_private() < Self::INFINITY
+ }
+
+ /// Returns `true` if the number is [subnormal].
+ ///
+ /// ```
+ /// let min = f64::MIN_POSITIVE; // 2.2250738585072014e-308_f64
+ /// let max = f64::MAX;
+ /// let lower_than_min = 1.0e-308_f64;
+ /// let zero = 0.0_f64;
+ ///
+ /// assert!(!min.is_subnormal());
+ /// assert!(!max.is_subnormal());
+ ///
+ /// assert!(!zero.is_subnormal());
+ /// assert!(!f64::NAN.is_subnormal());
+ /// assert!(!f64::INFINITY.is_subnormal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(lower_than_min.is_subnormal());
+ /// ```
+ /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
+ #[must_use]
+ #[stable(feature = "is_subnormal", since = "1.53.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_subnormal(self) -> bool {
+ matches!(self.classify(), FpCategory::Subnormal)
+ }
+
+ /// Returns `true` if the number is neither zero, infinite,
+ /// [subnormal], or NaN.
+ ///
+ /// ```
+ /// let min = f64::MIN_POSITIVE; // 2.2250738585072014e-308f64
+ /// let max = f64::MAX;
+ /// let lower_than_min = 1.0e-308_f64;
+ /// let zero = 0.0f64;
+ ///
+ /// assert!(min.is_normal());
+ /// assert!(max.is_normal());
+ ///
+ /// assert!(!zero.is_normal());
+ /// assert!(!f64::NAN.is_normal());
+ /// assert!(!f64::INFINITY.is_normal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(!lower_than_min.is_normal());
+ /// ```
+ /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_normal(self) -> bool {
+ matches!(self.classify(), FpCategory::Normal)
+ }
+
+ /// Returns the floating point category of the number. If only one property
+ /// is going to be tested, it is generally faster to use the specific
+ /// predicate instead.
+ ///
+ /// ```
+ /// use std::num::FpCategory;
+ ///
+ /// let num = 12.4_f64;
+ /// let inf = f64::INFINITY;
+ ///
+ /// assert_eq!(num.classify(), FpCategory::Normal);
+ /// assert_eq!(inf.classify(), FpCategory::Infinite);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ pub const fn classify(self) -> FpCategory {
+ // A previous implementation tried to only use bitmask-based checks,
+ // using f64::to_bits to transmute the float to its bit repr and match on that.
+ // Unfortunately, floating point numbers can be much worse than that.
+ // This also needs to not result in recursive evaluations of f64::to_bits.
+ //
+ // On some processors, in some cases, LLVM will "helpfully" lower floating point ops,
+ // in spite of a request for them using f32 and f64, to things like x87 operations.
+ // These have an f64's mantissa, but can have a larger than normal exponent.
+ // FIXME(jubilee): Using x87 operations is never necessary in order to function
+ // on x86 processors for Rust-to-Rust calls, so this issue should not happen.
+ // Code generation should be adjusted to use non-C calling conventions, avoiding this.
+ //
+ // Thus, a value may compare unequal to infinity, despite having a "full" exponent mask.
+ // And it may not be NaN, as it can simply be an "overextended" finite value.
+ if self.is_nan() {
+ FpCategory::Nan
+ } else {
+ // However, std can't simply compare to zero to check for zero, either,
+ // as correctness requires avoiding equality tests that may be Subnormal == -0.0
+ // because it may be wrong under "denormals are zero" and "flush to zero" modes.
+ // Most of std's targets don't use those, but they are used for thumbv7neon.
+ // So, this does use bitpattern matching for the rest.
+
+ // SAFETY: f64 to u64 is fine. Usually.
+ // If control flow has gotten this far, the value is definitely in one of the categories
+ // that f64::partial_classify can correctly analyze.
+ unsafe { f64::partial_classify(self) }
+ }
+ }
+
+ // This doesn't actually return a right answer for NaN on purpose,
+ // seeing as how it cannot correctly discern between a floating point NaN,
+ // and some normal floating point numbers truncated from an x87 FPU.
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ const unsafe fn partial_classify(self) -> FpCategory {
+ const EXP_MASK: u64 = 0x7ff0000000000000;
+ const MAN_MASK: u64 = 0x000fffffffffffff;
+
+ // SAFETY: The caller is not asking questions for which this will tell lies.
+ let b = unsafe { mem::transmute::<f64, u64>(self) };
+ match (b & MAN_MASK, b & EXP_MASK) {
+ (0, EXP_MASK) => FpCategory::Infinite,
+ (0, 0) => FpCategory::Zero,
+ (_, 0) => FpCategory::Subnormal,
+ _ => FpCategory::Normal,
+ }
+ }
+
+ // This operates on bits, and only bits, so it can ignore concerns about weird FPUs.
+ // FIXME(jubilee): In a just world, this would be the entire impl for classify,
+ // plus a transmute. We do not live in a just world, but we can make it more so.
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ const fn classify_bits(b: u64) -> FpCategory {
+ const EXP_MASK: u64 = 0x7ff0000000000000;
+ const MAN_MASK: u64 = 0x000fffffffffffff;
+
+ match (b & MAN_MASK, b & EXP_MASK) {
+ (0, EXP_MASK) => FpCategory::Infinite,
+ (_, EXP_MASK) => FpCategory::Nan,
+ (0, 0) => FpCategory::Zero,
+ (_, 0) => FpCategory::Subnormal,
+ _ => FpCategory::Normal,
+ }
+ }
+
+ /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
+ /// positive sign bit and positive infinity. Note that IEEE-745 doesn't assign any
+ /// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
+ /// the bit pattern of NaNs are conserved over arithmetic operations, the result of
+ /// `is_sign_positive` on a NaN might produce an unexpected result in some cases.
+ /// See [explanation of NaN as a special value](f32) for more info.
+ ///
+ /// ```
+ /// let f = 7.0_f64;
+ /// let g = -7.0_f64;
+ ///
+ /// assert!(f.is_sign_positive());
+ /// assert!(!g.is_sign_positive());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_sign_positive(self) -> bool {
+ !self.is_sign_negative()
+ }
+
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.0.0", note = "renamed to is_sign_positive")]
+ #[inline]
+ #[doc(hidden)]
+ pub fn is_positive(self) -> bool {
+ self.is_sign_positive()
+ }
+
+ /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
+ /// negative sign bit and negative infinity. Note that IEEE-745 doesn't assign any
+ /// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
+ /// the bit pattern of NaNs are conserved over arithmetic operations, the result of
+ /// `is_sign_negative` on a NaN might produce an unexpected result in some cases.
+ /// See [explanation of NaN as a special value](f32) for more info.
+ ///
+ /// ```
+ /// let f = 7.0_f64;
+ /// let g = -7.0_f64;
+ ///
+ /// assert!(!f.is_sign_negative());
+ /// assert!(g.is_sign_negative());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_sign_negative(self) -> bool {
+ // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
+ // applies to zeros and NaNs as well.
+ // SAFETY: This is just transmuting to get the sign bit, it's fine.
+ unsafe { mem::transmute::<f64, u64>(self) & 0x8000_0000_0000_0000 != 0 }
+ }
+
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.0.0", note = "renamed to is_sign_negative")]
+ #[inline]
+ #[doc(hidden)]
+ pub fn is_negative(self) -> bool {
+ self.is_sign_negative()
+ }
+
+ /// Takes the reciprocal (inverse) of a number, `1/x`.
+ ///
+ /// ```
+ /// let x = 2.0_f64;
+ /// let abs_difference = (x.recip() - (1.0 / x)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[must_use = "this returns the result of the operation, without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn recip(self) -> f64 {
+ 1.0 / self
+ }
+
+ /// Converts radians to degrees.
+ ///
+ /// ```
+ /// let angle = std::f64::consts::PI;
+ ///
+ /// let abs_difference = (angle.to_degrees() - 180.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_degrees(self) -> f64 {
+ // The division here is correctly rounded with respect to the true
+ // value of 180/π. (This differs from f32, where a constant must be
+ // used to ensure a correctly rounded result.)
+ self * (180.0f64 / consts::PI)
+ }
+
+ /// Converts degrees to radians.
+ ///
+ /// ```
+ /// let angle = 180.0_f64;
+ ///
+ /// let abs_difference = (angle.to_radians() - std::f64::consts::PI).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_radians(self) -> f64 {
+ let value: f64 = consts::PI;
+ self * (value / 180.0)
+ }
+
+ /// Returns the maximum of the two numbers, ignoring NaN.
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ /// This follows the IEEE-754 2008 semantics for maxNum, except for handling of signaling NaNs;
+ /// this function handles all NaNs the same way and avoids maxNum's problems with associativity.
+ /// This also matches the behavior of libm’s fmax.
+ ///
+ /// ```
+ /// let x = 1.0_f64;
+ /// let y = 2.0_f64;
+ ///
+ /// assert_eq!(x.max(y), y);
+ /// ```
+ #[must_use = "this returns the result of the comparison, without modifying either input"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn max(self, other: f64) -> f64 {
+ intrinsics::maxnumf64(self, other)
+ }
+
+ /// Returns the minimum of the two numbers, ignoring NaN.
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ /// This follows the IEEE-754 2008 semantics for minNum, except for handling of signaling NaNs;
+ /// this function handles all NaNs the same way and avoids minNum's problems with associativity.
+ /// This also matches the behavior of libm’s fmin.
+ ///
+ /// ```
+ /// let x = 1.0_f64;
+ /// let y = 2.0_f64;
+ ///
+ /// assert_eq!(x.min(y), x);
+ /// ```
+ #[must_use = "this returns the result of the comparison, without modifying either input"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn min(self, other: f64) -> f64 {
+ intrinsics::minnumf64(self, other)
+ }
+
+ /// Returns the maximum of the two numbers, propagating NaN.
+ ///
+ /// This returns NaN when *either* argument is NaN, as opposed to
+ /// [`f64::max`] which only returns NaN when *both* arguments are NaN.
+ ///
+ /// ```
+ /// #![feature(float_minimum_maximum)]
+ /// let x = 1.0_f64;
+ /// let y = 2.0_f64;
+ ///
+ /// assert_eq!(x.maximum(y), y);
+ /// assert!(x.maximum(f64::NAN).is_nan());
+ /// ```
+ ///
+ /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the greater
+ /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0.
+ /// Note that this follows the semantics specified in IEEE 754-2019.
+ ///
+ /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN
+ /// operand is conserved; see [explanation of NaN as a special value](f32) for more info.
+ #[must_use = "this returns the result of the comparison, without modifying either input"]
+ #[unstable(feature = "float_minimum_maximum", issue = "91079")]
+ #[inline]
+ pub fn maximum(self, other: f64) -> f64 {
+ if self > other {
+ self
+ } else if other > self {
+ other
+ } else if self == other {
+ if self.is_sign_positive() && other.is_sign_negative() { self } else { other }
+ } else {
+ self + other
+ }
+ }
+
+ /// Returns the minimum of the two numbers, propagating NaN.
+ ///
+ /// This returns NaN when *either* argument is NaN, as opposed to
+ /// [`f64::min`] which only returns NaN when *both* arguments are NaN.
+ ///
+ /// ```
+ /// #![feature(float_minimum_maximum)]
+ /// let x = 1.0_f64;
+ /// let y = 2.0_f64;
+ ///
+ /// assert_eq!(x.minimum(y), x);
+ /// assert!(x.minimum(f64::NAN).is_nan());
+ /// ```
+ ///
+ /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the lesser
+ /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0.
+ /// Note that this follows the semantics specified in IEEE 754-2019.
+ ///
+ /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN
+ /// operand is conserved; see [explanation of NaN as a special value](f32) for more info.
+ #[must_use = "this returns the result of the comparison, without modifying either input"]
+ #[unstable(feature = "float_minimum_maximum", issue = "91079")]
+ #[inline]
+ pub fn minimum(self, other: f64) -> f64 {
+ if self < other {
+ self
+ } else if other < self {
+ other
+ } else if self == other {
+ if self.is_sign_negative() && other.is_sign_positive() { self } else { other }
+ } else {
+ self + other
+ }
+ }
+
+ /// Rounds toward zero and converts to any primitive integer type,
+ /// assuming that the value is finite and fits in that type.
+ ///
+ /// ```
+ /// let value = 4.6_f64;
+ /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
+ /// assert_eq!(rounded, 4);
+ ///
+ /// let value = -128.9_f64;
+ /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
+ /// assert_eq!(rounded, i8::MIN);
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The value must:
+ ///
+ /// * Not be `NaN`
+ /// * Not be infinite
+ /// * Be representable in the return type `Int`, after truncating off its fractional part
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_approx_unchecked_to", since = "1.44.0")]
+ #[inline]
+ pub unsafe fn to_int_unchecked<Int>(self) -> Int
+ where
+ Self: FloatToInt<Int>,
+ {
+ // SAFETY: the caller must uphold the safety contract for
+ // `FloatToInt::to_int_unchecked`.
+ unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
+ }
+
+ /// Raw transmutation to `u64`.
+ ///
+ /// This is currently identical to `transmute::<f64, u64>(self)` on all platforms.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// Note that this function is distinct from `as` casting, which attempts to
+ /// preserve the *numeric* value, and not the bitwise value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!((1f64).to_bits() != 1f64 as u64); // to_bits() is not casting!
+ /// assert_eq!((12.5f64).to_bits(), 0x4029000000000000);
+ ///
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_bits_conv", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_bits(self) -> u64 {
+ // SAFETY: `u64` is a plain old datatype so we can always transmute to it.
+ // ...sorta.
+ //
+ // See the SAFETY comment in f64::from_bits for more.
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ const fn ct_f64_to_u64(ct: f64) -> u64 {
+ match ct.classify() {
+ FpCategory::Nan => {
+ panic!("const-eval error: cannot use f64::to_bits on a NaN")
+ }
+ FpCategory::Subnormal => {
+ panic!("const-eval error: cannot use f64::to_bits on a subnormal number")
+ }
+ FpCategory::Infinite | FpCategory::Normal | FpCategory::Zero => {
+ // SAFETY: We have a normal floating point number. Now we transmute, i.e. do a bitcopy.
+ unsafe { mem::transmute::<f64, u64>(ct) }
+ }
+ }
+ }
+ // SAFETY: `u64` is a plain old datatype so we can always... uh...
+ // ...look, just pretend you forgot what you just read.
+ // Stability concerns.
+ let rt_f64_to_u64 = |rt| unsafe { mem::transmute::<f64, u64>(rt) };
+ // SAFETY: We use internal implementations that either always work or fail at compile time.
+ unsafe { intrinsics::const_eval_select((self,), ct_f64_to_u64, rt_f64_to_u64) }
+ }
+
+ /// Raw transmutation from `u64`.
+ ///
+ /// This is currently identical to `transmute::<u64, f64>(v)` on all platforms.
+ /// It turns out this is incredibly portable, for two reasons:
+ ///
+ /// * Floats and Ints have the same endianness on all supported platforms.
+ /// * IEEE-754 very precisely specifies the bit layout of floats.
+ ///
+ /// However there is one caveat: prior to the 2008 version of IEEE-754, how
+ /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
+ /// (notably x86 and ARM) picked the interpretation that was ultimately
+ /// standardized in 2008, but some didn't (notably MIPS). As a result, all
+ /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
+ ///
+ /// Rather than trying to preserve signaling-ness cross-platform, this
+ /// implementation favors preserving the exact bits. This means that
+ /// any payloads encoded in NaNs will be preserved even if the result of
+ /// this method is sent over the network from an x86 machine to a MIPS one.
+ ///
+ /// If the results of this method are only manipulated by the same
+ /// architecture that produced them, then there is no portability concern.
+ ///
+ /// If the input isn't NaN, then there is no portability concern.
+ ///
+ /// If you don't care about signaling-ness (very likely), then there is no
+ /// portability concern.
+ ///
+ /// Note that this function is distinct from `as` casting, which attempts to
+ /// preserve the *numeric* value, and not the bitwise value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = f64::from_bits(0x4029000000000000);
+ /// assert_eq!(v, 12.5);
+ /// ```
+ #[stable(feature = "float_bits_conv", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[must_use]
+ #[inline]
+ pub const fn from_bits(v: u64) -> Self {
+ // It turns out the safety issues with sNaN were overblown! Hooray!
+ // SAFETY: `u64` is a plain old datatype so we can always transmute from it
+ // ...sorta.
+ //
+ // It turns out that at runtime, it is possible for a floating point number
+ // to be subject to floating point modes that alter nonzero subnormal numbers
+ // to zero on reads and writes, aka "denormals are zero" and "flush to zero".
+ // This is not a problem usually, but at least one tier2 platform for Rust
+ // actually exhibits an FTZ behavior by default: thumbv7neon
+ // aka "the Neon FPU in AArch32 state"
+ //
+ // Even with this, not all instructions exhibit the FTZ behaviors on thumbv7neon,
+ // so this should load the same bits if LLVM emits the "correct" instructions,
+ // but LLVM sometimes makes interesting choices about float optimization,
+ // and other FPUs may do similar. Thus, it is wise to indulge luxuriously in caution.
+ //
+ // In addition, on x86 targets with SSE or SSE2 disabled and the x87 FPU enabled,
+ // i.e. not soft-float, the way Rust does parameter passing can actually alter
+ // a number that is "not infinity" to have the same exponent as infinity,
+ // in a slightly unpredictable manner.
+ //
+ // And, of course evaluating to a NaN value is fairly nondeterministic.
+ // More precisely: when NaN should be returned is knowable, but which NaN?
+ // So far that's defined by a combination of LLVM and the CPU, not Rust.
+ // This function, however, allows observing the bitstring of a NaN,
+ // thus introspection on CTFE.
+ //
+ // In order to preserve, at least for the moment, const-to-runtime equivalence,
+ // reject any of these possible situations from happening.
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ const fn ct_u64_to_f64(ct: u64) -> f64 {
+ match f64::classify_bits(ct) {
+ FpCategory::Subnormal => {
+ panic!("const-eval error: cannot use f64::from_bits on a subnormal number")
+ }
+ FpCategory::Nan => {
+ panic!("const-eval error: cannot use f64::from_bits on NaN")
+ }
+ FpCategory::Infinite | FpCategory::Normal | FpCategory::Zero => {
+ // SAFETY: It's not a frumious number
+ unsafe { mem::transmute::<u64, f64>(ct) }
+ }
+ }
+ }
+ // SAFETY: `u64` is a plain old datatype so we can always... uh...
+ // ...look, just pretend you forgot what you just read.
+ // Stability concerns.
+ let rt_u64_to_f64 = |rt| unsafe { mem::transmute::<u64, f64>(rt) };
+ // SAFETY: We use internal implementations that either always work or fail at compile time.
+ unsafe { intrinsics::const_eval_select((v,), ct_u64_to_f64, rt_u64_to_f64) }
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// big-endian (network) byte order.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f64.to_be_bytes();
+ /// assert_eq!(bytes, [0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_be_bytes(self) -> [u8; 8] {
+ self.to_bits().to_be_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// little-endian byte order.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f64.to_le_bytes();
+ /// assert_eq!(bytes, [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]);
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_le_bytes(self) -> [u8; 8] {
+ self.to_bits().to_le_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// native byte order.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
+ ///
+ /// [`to_be_bytes`]: f64::to_be_bytes
+ /// [`to_le_bytes`]: f64::to_le_bytes
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f64.to_ne_bytes();
+ /// assert_eq!(
+ /// bytes,
+ /// if cfg!(target_endian = "big") {
+ /// [0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+ /// } else {
+ /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]
+ /// }
+ /// );
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_ne_bytes(self) -> [u8; 8] {
+ self.to_bits().to_ne_bytes()
+ }
+
+ /// Create a floating point value from its representation as a byte array in big endian.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f64::from_be_bytes([0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[must_use]
+ #[inline]
+ pub const fn from_be_bytes(bytes: [u8; 8]) -> Self {
+ Self::from_bits(u64::from_be_bytes(bytes))
+ }
+
+ /// Create a floating point value from its representation as a byte array in little endian.
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f64::from_le_bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]);
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[must_use]
+ #[inline]
+ pub const fn from_le_bytes(bytes: [u8; 8]) -> Self {
+ Self::from_bits(u64::from_le_bytes(bytes))
+ }
+
+ /// Create a floating point value from its representation as a byte array in native endian.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
+ /// appropriate instead.
+ ///
+ /// [`from_be_bytes`]: f64::from_be_bytes
+ /// [`from_le_bytes`]: f64::from_le_bytes
+ ///
+ /// See [`from_bits`](Self::from_bits) for some discussion of the
+ /// portability of this operation (there are almost no issues).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f64::from_ne_bytes(if cfg!(target_endian = "big") {
+ /// [0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+ /// } else {
+ /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]
+ /// });
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[must_use]
+ #[inline]
+ pub const fn from_ne_bytes(bytes: [u8; 8]) -> Self {
+ Self::from_bits(u64::from_ne_bytes(bytes))
+ }
+
+ /// Return the ordering between `self` and `other`.
+ ///
+ /// Unlike the standard partial comparison between floating point numbers,
+ /// this comparison always produces an ordering in accordance to
+ /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
+ /// floating point standard. The values are ordered in the following sequence:
+ ///
+ /// - negative quiet NaN
+ /// - negative signaling NaN
+ /// - negative infinity
+ /// - negative numbers
+ /// - negative subnormal numbers
+ /// - negative zero
+ /// - positive zero
+ /// - positive subnormal numbers
+ /// - positive numbers
+ /// - positive infinity
+ /// - positive signaling NaN
+ /// - positive quiet NaN.
+ ///
+ /// The ordering established by this function does not always agree with the
+ /// [`PartialOrd`] and [`PartialEq`] implementations of `f64`. For example,
+ /// they consider negative and positive zero equal, while `total_cmp`
+ /// doesn't.
+ ///
+ /// The interpretation of the signaling NaN bit follows the definition in
+ /// the IEEE 754 standard, which may not match the interpretation by some of
+ /// the older, non-conformant (e.g. MIPS) hardware implementations.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// struct GoodBoy {
+ /// name: String,
+ /// weight: f64,
+ /// }
+ ///
+ /// let mut bois = vec![
+ /// GoodBoy { name: "Pucci".to_owned(), weight: 0.1 },
+ /// GoodBoy { name: "Woofer".to_owned(), weight: 99.0 },
+ /// GoodBoy { name: "Yapper".to_owned(), weight: 10.0 },
+ /// GoodBoy { name: "Chonk".to_owned(), weight: f64::INFINITY },
+ /// GoodBoy { name: "Abs. Unit".to_owned(), weight: f64::NAN },
+ /// GoodBoy { name: "Floaty".to_owned(), weight: -5.0 },
+ /// ];
+ ///
+ /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
+ /// # assert!(bois.into_iter().map(|b| b.weight)
+ /// # .zip([-5.0, 0.1, 10.0, 99.0, f64::INFINITY, f64::NAN].iter())
+ /// # .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// ```
+ #[stable(feature = "total_cmp", since = "1.62.0")]
+ #[must_use]
+ #[inline]
+ pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
+ let mut left = self.to_bits() as i64;
+ let mut right = other.to_bits() as i64;
+
+ // In case of negatives, flip all the bits except the sign
+ // to achieve a similar layout as two's complement integers
+ //
+ // Why does this work? IEEE 754 floats consist of three fields:
+ // Sign bit, exponent and mantissa. The set of exponent and mantissa
+ // fields as a whole have the property that their bitwise order is
+ // equal to the numeric magnitude where the magnitude is defined.
+ // The magnitude is not normally defined on NaN values, but
+ // IEEE 754 totalOrder defines the NaN values also to follow the
+ // bitwise order. This leads to order explained in the doc comment.
+ // However, the representation of magnitude is the same for negative
+ // and positive numbers – only the sign bit is different.
+ // To easily compare the floats as signed integers, we need to
+ // flip the exponent and mantissa bits in case of negative numbers.
+ // We effectively convert the numbers to "two's complement" form.
+ //
+ // To do the flipping, we construct a mask and XOR against it.
+ // We branchlessly calculate an "all-ones except for the sign bit"
+ // mask from negative-signed values: right shifting sign-extends
+ // the integer, so we "fill" the mask with sign bits, and then
+ // convert to unsigned to push one more zero bit.
+ // On positive values, the mask is all zeros, so it's a no-op.
+ left ^= (((left >> 63) as u64) >> 1) as i64;
+ right ^= (((right >> 63) as u64) >> 1) as i64;
+
+ left.cmp(&right)
+ }
+
+ /// Restrict a value to a certain interval unless it is NaN.
+ ///
+ /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
+ /// less than `min`. Otherwise this returns `self`.
+ ///
+ /// Note that this function returns NaN if the initial value was NaN as
+ /// well.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!((-3.0f64).clamp(-2.0, 1.0) == -2.0);
+ /// assert!((0.0f64).clamp(-2.0, 1.0) == 0.0);
+ /// assert!((2.0f64).clamp(-2.0, 1.0) == 1.0);
+ /// assert!((f64::NAN).clamp(-2.0, 1.0).is_nan());
+ /// ```
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "clamp", since = "1.50.0")]
+ #[inline]
+ pub fn clamp(self, min: f64, max: f64) -> f64 {
+ assert!(min <= max);
+ let mut x = self;
+ if x < min {
+ x = min;
+ }
+ if x > max {
+ x = max;
+ }
+ x
+ }
+}
diff --git a/library/core/src/num/flt2dec/decoder.rs b/library/core/src/num/flt2dec/decoder.rs
new file mode 100644
index 000000000..576386054
--- /dev/null
+++ b/library/core/src/num/flt2dec/decoder.rs
@@ -0,0 +1,100 @@
+//! Decodes a floating-point value into individual parts and error ranges.
+
+use crate::num::dec2flt::float::RawFloat;
+use crate::num::FpCategory;
+
+/// Decoded unsigned finite value, such that:
+///
+/// - The original value equals to `mant * 2^exp`.
+///
+/// - Any number from `(mant - minus) * 2^exp` to `(mant + plus) * 2^exp` will
+/// round to the original value. The range is inclusive only when
+/// `inclusive` is `true`.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct Decoded {
+ /// The scaled mantissa.
+ pub mant: u64,
+ /// The lower error range.
+ pub minus: u64,
+ /// The upper error range.
+ pub plus: u64,
+ /// The shared exponent in base 2.
+ pub exp: i16,
+ /// True when the error range is inclusive.
+ ///
+ /// In IEEE 754, this is true when the original mantissa was even.
+ pub inclusive: bool,
+}
+
+/// Decoded unsigned value.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum FullDecoded {
+ /// Not-a-number.
+ Nan,
+ /// Infinities, either positive or negative.
+ Infinite,
+ /// Zero, either positive or negative.
+ Zero,
+ /// Finite numbers with further decoded fields.
+ Finite(Decoded),
+}
+
+/// A floating point type which can be `decode`d.
+pub trait DecodableFloat: RawFloat + Copy {
+ /// The minimum positive normalized value.
+ fn min_pos_norm_value() -> Self;
+}
+
+impl DecodableFloat for f32 {
+ fn min_pos_norm_value() -> Self {
+ f32::MIN_POSITIVE
+ }
+}
+
+impl DecodableFloat for f64 {
+ fn min_pos_norm_value() -> Self {
+ f64::MIN_POSITIVE
+ }
+}
+
+/// Returns a sign (true when negative) and `FullDecoded` value
+/// from given floating point number.
+pub fn decode<T: DecodableFloat>(v: T) -> (/*negative?*/ bool, FullDecoded) {
+ let (mant, exp, sign) = v.integer_decode();
+ let even = (mant & 1) == 0;
+ let decoded = match v.classify() {
+ FpCategory::Nan => FullDecoded::Nan,
+ FpCategory::Infinite => FullDecoded::Infinite,
+ FpCategory::Zero => FullDecoded::Zero,
+ FpCategory::Subnormal => {
+ // neighbors: (mant - 2, exp) -- (mant, exp) -- (mant + 2, exp)
+ // Float::integer_decode always preserves the exponent,
+ // so the mantissa is scaled for subnormals.
+ FullDecoded::Finite(Decoded { mant, minus: 1, plus: 1, exp, inclusive: even })
+ }
+ FpCategory::Normal => {
+ let minnorm = <T as DecodableFloat>::min_pos_norm_value().integer_decode();
+ if mant == minnorm.0 {
+ // neighbors: (maxmant, exp - 1) -- (minnormmant, exp) -- (minnormmant + 1, exp)
+ // where maxmant = minnormmant * 2 - 1
+ FullDecoded::Finite(Decoded {
+ mant: mant << 2,
+ minus: 1,
+ plus: 2,
+ exp: exp - 2,
+ inclusive: even,
+ })
+ } else {
+ // neighbors: (mant - 1, exp) -- (mant, exp) -- (mant + 1, exp)
+ FullDecoded::Finite(Decoded {
+ mant: mant << 1,
+ minus: 1,
+ plus: 1,
+ exp: exp - 1,
+ inclusive: even,
+ })
+ }
+ }
+ };
+ (sign < 0, decoded)
+}
diff --git a/library/core/src/num/flt2dec/estimator.rs b/library/core/src/num/flt2dec/estimator.rs
new file mode 100644
index 000000000..50e2f7052
--- /dev/null
+++ b/library/core/src/num/flt2dec/estimator.rs
@@ -0,0 +1,14 @@
+//! The exponent estimator.
+
+/// Finds `k_0` such that `10^(k_0-1) < mant * 2^exp <= 10^(k_0+1)`.
+///
+/// This is used to approximate `k = ceil(log_10 (mant * 2^exp))`;
+/// the true `k` is either `k_0` or `k_0+1`.
+#[doc(hidden)]
+pub fn estimate_scaling_factor(mant: u64, exp: i16) -> i16 {
+ // 2^(nbits-1) < mant <= 2^nbits if mant > 0
+ let nbits = 64 - (mant - 1).leading_zeros() as i64;
+ // 1292913986 = floor(2^32 * log_10 2)
+ // therefore this always underestimates (or is exact), but not much.
+ (((nbits + exp as i64) * 1292913986) >> 32) as i16
+}
diff --git a/library/core/src/num/flt2dec/mod.rs b/library/core/src/num/flt2dec/mod.rs
new file mode 100644
index 000000000..1ff2e8c82
--- /dev/null
+++ b/library/core/src/num/flt2dec/mod.rs
@@ -0,0 +1,673 @@
+/*!
+
+Floating-point number to decimal conversion routines.
+
+# Problem statement
+
+We are given the floating-point number `v = f * 2^e` with an integer `f`,
+and its bounds `minus` and `plus` such that any number between `v - minus` and
+`v + plus` will be rounded to `v`. For the simplicity we assume that
+this range is exclusive. Then we would like to get the unique decimal
+representation `V = 0.d[0..n-1] * 10^k` such that:
+
+- `d[0]` is non-zero.
+
+- It's correctly rounded when parsed back: `v - minus < V < v + plus`.
+ Furthermore it is shortest such one, i.e., there is no representation
+ with less than `n` digits that is correctly rounded.
+
+- It's closest to the original value: `abs(V - v) <= 10^(k-n) / 2`. Note that
+ there might be two representations satisfying this uniqueness requirement,
+ in which case some tie-breaking mechanism is used.
+
+We will call this mode of operation as to the *shortest* mode. This mode is used
+when there is no additional constraint, and can be thought as a "natural" mode
+as it matches the ordinary intuition (it at least prints `0.1f32` as "0.1").
+
+We have two more modes of operation closely related to each other. In these modes
+we are given either the number of significant digits `n` or the last-digit
+limitation `limit` (which determines the actual `n`), and we would like to get
+the representation `V = 0.d[0..n-1] * 10^k` such that:
+
+- `d[0]` is non-zero, unless `n` was zero in which case only `k` is returned.
+
+- It's closest to the original value: `abs(V - v) <= 10^(k-n) / 2`. Again,
+ there might be some tie-breaking mechanism.
+
+When `limit` is given but not `n`, we set `n` such that `k - n = limit`
+so that the last digit `d[n-1]` is scaled by `10^(k-n) = 10^limit`.
+If such `n` is negative, we clip it to zero so that we will only get `k`.
+We are also limited by the supplied buffer. This limitation is used to print
+the number up to given number of fractional digits without knowing
+the correct `k` beforehand.
+
+We will call the mode of operation requiring `n` as to the *exact* mode,
+and one requiring `limit` as to the *fixed* mode. The exact mode is a subset of
+the fixed mode: the sufficiently large last-digit limitation will eventually fill
+the supplied buffer and let the algorithm to return.
+
+# Implementation overview
+
+It is easy to get the floating point printing correct but slow (Russ Cox has
+[demonstrated](https://research.swtch.com/ftoa) how it's easy), or incorrect but
+fast (naïve division and modulo). But it is surprisingly hard to print
+floating point numbers correctly *and* efficiently.
+
+There are two classes of algorithms widely known to be correct.
+
+- The "Dragon" family of algorithm is first described by Guy L. Steele Jr. and
+ Jon L. White. They rely on the fixed-size big integer for their correctness.
+ A slight improvement was found later, which is posthumously described by
+ Robert G. Burger and R. Kent Dybvig. David Gay's `dtoa.c` routine is
+ a popular implementation of this strategy.
+
+- The "Grisu" family of algorithm is first described by Florian Loitsch.
+ They use very cheap integer-only procedure to determine the close-to-correct
+ representation which is at least guaranteed to be shortest. The variant,
+ Grisu3, actively detects if the resulting representation is incorrect.
+
+We implement both algorithms with necessary tweaks to suit our requirements.
+In particular, published literatures are short of the actual implementation
+difficulties like how to avoid arithmetic overflows. Each implementation,
+available in `strategy::dragon` and `strategy::grisu` respectively,
+extensively describes all necessary justifications and many proofs for them.
+(It is still difficult to follow though. You have been warned.)
+
+Both implementations expose two public functions:
+
+- `format_shortest(decoded, buf)`, which always needs at least
+ `MAX_SIG_DIGITS` digits of buffer. Implements the shortest mode.
+
+- `format_exact(decoded, buf, limit)`, which accepts as small as
+ one digit of buffer. Implements exact and fixed modes.
+
+They try to fill the `u8` buffer with digits and returns the number of digits
+written and the exponent `k`. They are total for all finite `f32` and `f64`
+inputs (Grisu internally falls back to Dragon if necessary).
+
+The rendered digits are formatted into the actual string form with
+four functions:
+
+- `to_shortest_str` prints the shortest representation, which can be padded by
+ zeroes to make *at least* given number of fractional digits.
+
+- `to_shortest_exp_str` prints the shortest representation, which can be
+ padded by zeroes when its exponent is in the specified ranges,
+ or can be printed in the exponential form such as `1.23e45`.
+
+- `to_exact_exp_str` prints the exact representation with given number of
+ digits in the exponential form.
+
+- `to_exact_fixed_str` prints the fixed representation with *exactly*
+ given number of fractional digits.
+
+They all return a slice of preallocated `Part` array, which corresponds to
+the individual part of strings: a fixed string, a part of rendered digits,
+a number of zeroes or a small (`u16`) number. The caller is expected to
+provide a large enough buffer and `Part` array, and to assemble the final
+string from resulting `Part`s itself.
+
+All algorithms and formatting functions are accompanied by extensive tests
+in `coretests::num::flt2dec` module. It also shows how to use individual
+functions.
+
+*/
+
+// while this is extensively documented, this is in principle private which is
+// only made public for testing. do not expose us.
+#![doc(hidden)]
+#![unstable(
+ feature = "flt2dec",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+
+pub use self::decoder::{decode, DecodableFloat, Decoded, FullDecoded};
+
+use super::fmt::{Formatted, Part};
+use crate::mem::MaybeUninit;
+
+pub mod decoder;
+pub mod estimator;
+
+/// Digit-generation algorithms.
+pub mod strategy {
+ pub mod dragon;
+ pub mod grisu;
+}
+
+/// The minimum size of buffer necessary for the shortest mode.
+///
+/// It is a bit non-trivial to derive, but this is one plus the maximal number of
+/// significant decimal digits from formatting algorithms with the shortest result.
+/// The exact formula is `ceil(# bits in mantissa * log_10 2 + 1)`.
+pub const MAX_SIG_DIGITS: usize = 17;
+
+/// When `d` contains decimal digits, increase the last digit and propagate carry.
+/// Returns a next digit when it causes the length to change.
+#[doc(hidden)]
+pub fn round_up(d: &mut [u8]) -> Option<u8> {
+ match d.iter().rposition(|&c| c != b'9') {
+ Some(i) => {
+ // d[i+1..n] is all nines
+ d[i] += 1;
+ for j in i + 1..d.len() {
+ d[j] = b'0';
+ }
+ None
+ }
+ None if d.len() > 0 => {
+ // 999..999 rounds to 1000..000 with an increased exponent
+ d[0] = b'1';
+ for j in 1..d.len() {
+ d[j] = b'0';
+ }
+ Some(b'0')
+ }
+ None => {
+ // an empty buffer rounds up (a bit strange but reasonable)
+ Some(b'1')
+ }
+ }
+}
+
+/// Formats given decimal digits `0.<...buf...> * 10^exp` into the decimal form
+/// with at least given number of fractional digits. The result is stored to
+/// the supplied parts array and a slice of written parts is returned.
+///
+/// `frac_digits` can be less than the number of actual fractional digits in `buf`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus `frac_digits` of 0 means that
+/// it will only print given digits and nothing else.
+fn digits_to_dec_str<'a>(
+ buf: &'a [u8],
+ exp: i16,
+ frac_digits: usize,
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> &'a [Part<'a>] {
+ assert!(!buf.is_empty());
+ assert!(buf[0] > b'0');
+ assert!(parts.len() >= 4);
+
+ // if there is the restriction on the last digit position, `buf` is assumed to be
+ // left-padded with the virtual zeroes. the number of virtual zeroes, `nzeroes`,
+ // equals to `max(0, exp + frac_digits - buf.len())`, so that the position of
+ // the last digit `exp - buf.len() - nzeroes` is no more than `-frac_digits`:
+ //
+ // |<-virtual->|
+ // |<---- buf ---->| zeroes | exp
+ // 0. 1 2 3 4 5 6 7 8 9 _ _ _ _ _ _ x 10
+ // | | |
+ // 10^exp 10^(exp-buf.len()) 10^(exp-buf.len()-nzeroes)
+ //
+ // `nzeroes` is individually calculated for each case in order to avoid overflow.
+
+ if exp <= 0 {
+ // the decimal point is before rendered digits: [0.][000...000][1234][____]
+ let minus_exp = -(exp as i32) as usize;
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(minus_exp));
+ parts[2] = MaybeUninit::new(Part::Copy(buf));
+ if frac_digits > buf.len() && frac_digits - buf.len() > minus_exp {
+ parts[3] = MaybeUninit::new(Part::Zero((frac_digits - buf.len()) - minus_exp));
+ // SAFETY: we just initialized the elements `..4`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..4]) }
+ } else {
+ // SAFETY: we just initialized the elements `..3`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..3]) }
+ }
+ } else {
+ let exp = exp as usize;
+ if exp < buf.len() {
+ // the decimal point is inside rendered digits: [12][.][34][____]
+ parts[0] = MaybeUninit::new(Part::Copy(&buf[..exp]));
+ parts[1] = MaybeUninit::new(Part::Copy(b"."));
+ parts[2] = MaybeUninit::new(Part::Copy(&buf[exp..]));
+ if frac_digits > buf.len() - exp {
+ parts[3] = MaybeUninit::new(Part::Zero(frac_digits - (buf.len() - exp)));
+ // SAFETY: we just initialized the elements `..4`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..4]) }
+ } else {
+ // SAFETY: we just initialized the elements `..3`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..3]) }
+ }
+ } else {
+ // the decimal point is after rendered digits: [1234][____0000] or [1234][__][.][__].
+ parts[0] = MaybeUninit::new(Part::Copy(buf));
+ parts[1] = MaybeUninit::new(Part::Zero(exp - buf.len()));
+ if frac_digits > 0 {
+ parts[2] = MaybeUninit::new(Part::Copy(b"."));
+ parts[3] = MaybeUninit::new(Part::Zero(frac_digits));
+ // SAFETY: we just initialized the elements `..4`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..4]) }
+ } else {
+ // SAFETY: we just initialized the elements `..2`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..2]) }
+ }
+ }
+ }
+}
+
+/// Formats the given decimal digits `0.<...buf...> * 10^exp` into the exponential
+/// form with at least the given number of significant digits. When `upper` is `true`,
+/// the exponent will be prefixed by `E`; otherwise that's `e`. The result is
+/// stored to the supplied parts array and a slice of written parts is returned.
+///
+/// `min_digits` can be less than the number of actual significant digits in `buf`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus, `min_digits == 0` means that
+/// it will only print the given digits and nothing else.
+fn digits_to_exp_str<'a>(
+ buf: &'a [u8],
+ exp: i16,
+ min_ndigits: usize,
+ upper: bool,
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> &'a [Part<'a>] {
+ assert!(!buf.is_empty());
+ assert!(buf[0] > b'0');
+ assert!(parts.len() >= 6);
+
+ let mut n = 0;
+
+ parts[n] = MaybeUninit::new(Part::Copy(&buf[..1]));
+ n += 1;
+
+ if buf.len() > 1 || min_ndigits > 1 {
+ parts[n] = MaybeUninit::new(Part::Copy(b"."));
+ parts[n + 1] = MaybeUninit::new(Part::Copy(&buf[1..]));
+ n += 2;
+ if min_ndigits > buf.len() {
+ parts[n] = MaybeUninit::new(Part::Zero(min_ndigits - buf.len()));
+ n += 1;
+ }
+ }
+
+ // 0.1234 x 10^exp = 1.234 x 10^(exp-1)
+ let exp = exp as i32 - 1; // avoid underflow when exp is i16::MIN
+ if exp < 0 {
+ parts[n] = MaybeUninit::new(Part::Copy(if upper { b"E-" } else { b"e-" }));
+ parts[n + 1] = MaybeUninit::new(Part::Num(-exp as u16));
+ } else {
+ parts[n] = MaybeUninit::new(Part::Copy(if upper { b"E" } else { b"e" }));
+ parts[n + 1] = MaybeUninit::new(Part::Num(exp as u16));
+ }
+ // SAFETY: we just initialized the elements `..n + 2`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..n + 2]) }
+}
+
+/// Sign formatting options.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Sign {
+ /// Prints `-` for any negative value.
+ Minus, // -inf -1 -0 0 1 inf nan
+ /// Prints `-` for any negative value, or `+` otherwise.
+ MinusPlus, // -inf -1 -0 +0 +1 +inf nan
+}
+
+/// Returns the static byte string corresponding to the sign to be formatted.
+/// It can be either `""`, `"+"` or `"-"`.
+fn determine_sign(sign: Sign, decoded: &FullDecoded, negative: bool) -> &'static str {
+ match (*decoded, sign) {
+ (FullDecoded::Nan, _) => "",
+ (_, Sign::Minus) => {
+ if negative {
+ "-"
+ } else {
+ ""
+ }
+ }
+ (_, Sign::MinusPlus) => {
+ if negative {
+ "-"
+ } else {
+ "+"
+ }
+ }
+ }
+}
+
+/// Formats the given floating point number into the decimal form with at least
+/// given number of fractional digits. The result is stored to the supplied parts
+/// array while utilizing given byte buffer as a scratch. `upper` is currently
+/// unused but left for the future decision to change the case of non-finite values,
+/// i.e., `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
+/// (which can be an empty string if no sign is rendered).
+///
+/// `format_shortest` should be the underlying digit-generation function.
+/// It should return the part of the buffer that it initialized.
+/// You probably would want `strategy::grisu::format_shortest` for this.
+///
+/// `frac_digits` can be less than the number of actual fractional digits in `v`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus `frac_digits` of 0 means that
+/// it will only print given digits and nothing else.
+///
+/// The byte buffer should be at least `MAX_SIG_DIGITS` bytes long.
+/// There should be at least 4 parts available, due to the worst case like
+/// `[+][0.][0000][2][0000]` with `frac_digits = 10`.
+pub fn to_shortest_str<'a, T, F>(
+ mut format_shortest: F,
+ v: T,
+ sign: Sign,
+ frac_digits: usize,
+ buf: &'a mut [MaybeUninit<u8>],
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> Formatted<'a>
+where
+ T: DecodableFloat,
+ F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ assert!(parts.len() >= 4);
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Zero => {
+ if frac_digits > 0 {
+ // [0.][0000]
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..2`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..2]) },
+ }
+ } else {
+ parts[0] = MaybeUninit::new(Part::Copy(b"0"));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..1`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) },
+ }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let (buf, exp) = format_shortest(decoded, buf);
+ Formatted { sign, parts: digits_to_dec_str(buf, exp, frac_digits, parts) }
+ }
+ }
+}
+
+/// Formats the given floating point number into the decimal form or
+/// the exponential form, depending on the resulting exponent. The result is
+/// stored to the supplied parts array while utilizing given byte buffer
+/// as a scratch. `upper` is used to determine the case of non-finite values
+/// (`inf` and `nan`) or the case of the exponent prefix (`e` or `E`).
+/// The first part to be rendered is always a `Part::Sign` (which can be
+/// an empty string if no sign is rendered).
+///
+/// `format_shortest` should be the underlying digit-generation function.
+/// It should return the part of the buffer that it initialized.
+/// You probably would want `strategy::grisu::format_shortest` for this.
+///
+/// The `dec_bounds` is a tuple `(lo, hi)` such that the number is formatted
+/// as decimal only when `10^lo <= V < 10^hi`. Note that this is the *apparent* `V`
+/// instead of the actual `v`! Thus any printed exponent in the exponential form
+/// cannot be in this range, avoiding any confusion.
+///
+/// The byte buffer should be at least `MAX_SIG_DIGITS` bytes long.
+/// There should be at least 6 parts available, due to the worst case like
+/// `[+][1][.][2345][e][-][6]`.
+pub fn to_shortest_exp_str<'a, T, F>(
+ mut format_shortest: F,
+ v: T,
+ sign: Sign,
+ dec_bounds: (i16, i16),
+ upper: bool,
+ buf: &'a mut [MaybeUninit<u8>],
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> Formatted<'a>
+where
+ T: DecodableFloat,
+ F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ assert!(parts.len() >= 6);
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+ assert!(dec_bounds.0 <= dec_bounds.1);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Zero => {
+ parts[0] = if dec_bounds.0 <= 0 && 0 < dec_bounds.1 {
+ MaybeUninit::new(Part::Copy(b"0"))
+ } else {
+ MaybeUninit::new(Part::Copy(if upper { b"0E0" } else { b"0e0" }))
+ };
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let (buf, exp) = format_shortest(decoded, buf);
+ let vis_exp = exp as i32 - 1;
+ let parts = if dec_bounds.0 as i32 <= vis_exp && vis_exp < dec_bounds.1 as i32 {
+ digits_to_dec_str(buf, exp, 0, parts)
+ } else {
+ digits_to_exp_str(buf, exp, 0, upper, parts)
+ };
+ Formatted { sign, parts }
+ }
+ }
+}
+
+/// Returns a rather crude approximation (upper bound) for the maximum buffer size
+/// calculated from the given decoded exponent.
+///
+/// The exact limit is:
+///
+/// - when `exp < 0`, the maximum length is `ceil(log_10 (5^-exp * (2^64 - 1)))`.
+/// - when `exp >= 0`, the maximum length is `ceil(log_10 (2^exp * (2^64 - 1)))`.
+///
+/// `ceil(log_10 (x^exp * (2^64 - 1)))` is less than `ceil(log_10 (2^64 - 1)) +
+/// ceil(exp * log_10 x)`, which is in turn less than `20 + (1 + exp * log_10 x)`.
+/// We use the facts that `log_10 2 < 5/16` and `log_10 5 < 12/16`, which is
+/// enough for our purposes.
+///
+/// Why do we need this? `format_exact` functions will fill the entire buffer
+/// unless limited by the last digit restriction, but it is possible that
+/// the number of digits requested is ridiculously large (say, 30,000 digits).
+/// The vast majority of buffer will be filled with zeroes, so we don't want to
+/// allocate all the buffer beforehand. Consequently, for any given arguments,
+/// 826 bytes of buffer should be sufficient for `f64`. Compare this with
+/// the actual number for the worst case: 770 bytes (when `exp = -1074`).
+fn estimate_max_buf_len(exp: i16) -> usize {
+ 21 + ((if exp < 0 { -12 } else { 5 } * exp as i32) as usize >> 4)
+}
+
+/// Formats given floating point number into the exponential form with
+/// exactly given number of significant digits. The result is stored to
+/// the supplied parts array while utilizing given byte buffer as a scratch.
+/// `upper` is used to determine the case of the exponent prefix (`e` or `E`).
+/// The first part to be rendered is always a `Part::Sign` (which can be
+/// an empty string if no sign is rendered).
+///
+/// `format_exact` should be the underlying digit-generation function.
+/// It should return the part of the buffer that it initialized.
+/// You probably would want `strategy::grisu::format_exact` for this.
+///
+/// The byte buffer should be at least `ndigits` bytes long unless `ndigits` is
+/// so large that only the fixed number of digits will be ever written.
+/// (The tipping point for `f64` is about 800, so 1000 bytes should be enough.)
+/// There should be at least 6 parts available, due to the worst case like
+/// `[+][1][.][2345][e][-][6]`.
+pub fn to_exact_exp_str<'a, T, F>(
+ mut format_exact: F,
+ v: T,
+ sign: Sign,
+ ndigits: usize,
+ upper: bool,
+ buf: &'a mut [MaybeUninit<u8>],
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> Formatted<'a>
+where
+ T: DecodableFloat,
+ F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ assert!(parts.len() >= 6);
+ assert!(ndigits > 0);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Zero => {
+ if ndigits > 1 {
+ // [0.][0000][e0]
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(ndigits - 1));
+ parts[2] = MaybeUninit::new(Part::Copy(if upper { b"E0" } else { b"e0" }));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..3`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..3]) },
+ }
+ } else {
+ parts[0] = MaybeUninit::new(Part::Copy(if upper { b"0E0" } else { b"0e0" }));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..1`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) },
+ }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let maxlen = estimate_max_buf_len(decoded.exp);
+ assert!(buf.len() >= ndigits || buf.len() >= maxlen);
+
+ let trunc = if ndigits < maxlen { ndigits } else { maxlen };
+ let (buf, exp) = format_exact(decoded, &mut buf[..trunc], i16::MIN);
+ Formatted { sign, parts: digits_to_exp_str(buf, exp, ndigits, upper, parts) }
+ }
+ }
+}
+
+/// Formats given floating point number into the decimal form with exactly
+/// given number of fractional digits. The result is stored to the supplied parts
+/// array while utilizing given byte buffer as a scratch. `upper` is currently
+/// unused but left for the future decision to change the case of non-finite values,
+/// i.e., `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
+/// (which can be an empty string if no sign is rendered).
+///
+/// `format_exact` should be the underlying digit-generation function.
+/// It should return the part of the buffer that it initialized.
+/// You probably would want `strategy::grisu::format_exact` for this.
+///
+/// The byte buffer should be enough for the output unless `frac_digits` is
+/// so large that only the fixed number of digits will be ever written.
+/// (The tipping point for `f64` is about 800, and 1000 bytes should be enough.)
+/// There should be at least 4 parts available, due to the worst case like
+/// `[+][0.][0000][2][0000]` with `frac_digits = 10`.
+pub fn to_exact_fixed_str<'a, T, F>(
+ mut format_exact: F,
+ v: T,
+ sign: Sign,
+ frac_digits: usize,
+ buf: &'a mut [MaybeUninit<u8>],
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> Formatted<'a>
+where
+ T: DecodableFloat,
+ F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ assert!(parts.len() >= 4);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Zero => {
+ if frac_digits > 0 {
+ // [0.][0000]
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..2`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..2]) },
+ }
+ } else {
+ parts[0] = MaybeUninit::new(Part::Copy(b"0"));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..1`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) },
+ }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let maxlen = estimate_max_buf_len(decoded.exp);
+ assert!(buf.len() >= maxlen);
+
+ // it *is* possible that `frac_digits` is ridiculously large.
+ // `format_exact` will end rendering digits much earlier in this case,
+ // because we are strictly limited by `maxlen`.
+ let limit = if frac_digits < 0x8000 { -(frac_digits as i16) } else { i16::MIN };
+ let (buf, exp) = format_exact(decoded, &mut buf[..maxlen], limit);
+ if exp <= limit {
+ // the restriction couldn't been met, so this should render like zero no matter
+ // `exp` was. this does not include the case that the restriction has been met
+ // only after the final rounding-up; it's a regular case with `exp = limit + 1`.
+ debug_assert_eq!(buf.len(), 0);
+ if frac_digits > 0 {
+ // [0.][0000]
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..2`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..2]) },
+ }
+ } else {
+ parts[0] = MaybeUninit::new(Part::Copy(b"0"));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..1`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) },
+ }
+ }
+ } else {
+ Formatted { sign, parts: digits_to_dec_str(buf, exp, frac_digits, parts) }
+ }
+ }
+ }
+}
diff --git a/library/core/src/num/flt2dec/strategy/dragon.rs b/library/core/src/num/flt2dec/strategy/dragon.rs
new file mode 100644
index 000000000..8ced5971e
--- /dev/null
+++ b/library/core/src/num/flt2dec/strategy/dragon.rs
@@ -0,0 +1,388 @@
+//! Almost direct (but slightly optimized) Rust translation of Figure 3 of "Printing
+//! Floating-Point Numbers Quickly and Accurately"[^1].
+//!
+//! [^1]: Burger, R. G. and Dybvig, R. K. 1996. Printing floating-point numbers
+//! quickly and accurately. SIGPLAN Not. 31, 5 (May. 1996), 108-116.
+
+use crate::cmp::Ordering;
+use crate::mem::MaybeUninit;
+
+use crate::num::bignum::Big32x40 as Big;
+use crate::num::bignum::Digit32 as Digit;
+use crate::num::flt2dec::estimator::estimate_scaling_factor;
+use crate::num::flt2dec::{round_up, Decoded, MAX_SIG_DIGITS};
+
+static POW10: [Digit; 10] =
+ [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000];
+static TWOPOW10: [Digit; 10] =
+ [2, 20, 200, 2000, 20000, 200000, 2000000, 20000000, 200000000, 2000000000];
+
+// precalculated arrays of `Digit`s for 10^(2^n)
+static POW10TO16: [Digit; 2] = [0x6fc10000, 0x2386f2];
+static POW10TO32: [Digit; 4] = [0, 0x85acef81, 0x2d6d415b, 0x4ee];
+static POW10TO64: [Digit; 7] = [0, 0, 0xbf6a1f01, 0x6e38ed64, 0xdaa797ed, 0xe93ff9f4, 0x184f03];
+static POW10TO128: [Digit; 14] = [
+ 0, 0, 0, 0, 0x2e953e01, 0x3df9909, 0xf1538fd, 0x2374e42f, 0xd3cff5ec, 0xc404dc08, 0xbccdb0da,
+ 0xa6337f19, 0xe91f2603, 0x24e,
+];
+static POW10TO256: [Digit; 27] = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0x982e7c01, 0xbed3875b, 0xd8d99f72, 0x12152f87, 0x6bde50c6, 0xcf4a6e70,
+ 0xd595d80f, 0x26b2716e, 0xadc666b0, 0x1d153624, 0x3c42d35a, 0x63ff540e, 0xcc5573c0, 0x65f9ef17,
+ 0x55bc28f2, 0x80dcc7f7, 0xf46eeddc, 0x5fdcefce, 0x553f7,
+];
+
+#[doc(hidden)]
+pub fn mul_pow10(x: &mut Big, n: usize) -> &mut Big {
+ debug_assert!(n < 512);
+ if n & 7 != 0 {
+ x.mul_small(POW10[n & 7]);
+ }
+ if n & 8 != 0 {
+ x.mul_small(POW10[8]);
+ }
+ if n & 16 != 0 {
+ x.mul_digits(&POW10TO16);
+ }
+ if n & 32 != 0 {
+ x.mul_digits(&POW10TO32);
+ }
+ if n & 64 != 0 {
+ x.mul_digits(&POW10TO64);
+ }
+ if n & 128 != 0 {
+ x.mul_digits(&POW10TO128);
+ }
+ if n & 256 != 0 {
+ x.mul_digits(&POW10TO256);
+ }
+ x
+}
+
+fn div_2pow10(x: &mut Big, mut n: usize) -> &mut Big {
+ let largest = POW10.len() - 1;
+ while n > largest {
+ x.div_rem_small(POW10[largest]);
+ n -= largest;
+ }
+ x.div_rem_small(TWOPOW10[n]);
+ x
+}
+
+// only usable when `x < 16 * scale`; `scaleN` should be `scale.mul_small(N)`
+fn div_rem_upto_16<'a>(
+ x: &'a mut Big,
+ scale: &Big,
+ scale2: &Big,
+ scale4: &Big,
+ scale8: &Big,
+) -> (u8, &'a mut Big) {
+ let mut d = 0;
+ if *x >= *scale8 {
+ x.sub(scale8);
+ d += 8;
+ }
+ if *x >= *scale4 {
+ x.sub(scale4);
+ d += 4;
+ }
+ if *x >= *scale2 {
+ x.sub(scale2);
+ d += 2;
+ }
+ if *x >= *scale {
+ x.sub(scale);
+ d += 1;
+ }
+ debug_assert!(*x < *scale);
+ (d, x)
+}
+
+/// The shortest mode implementation for Dragon.
+pub fn format_shortest<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
+ // the number `v` to format is known to be:
+ // - equal to `mant * 2^exp`;
+ // - preceded by `(mant - 2 * minus) * 2^exp` in the original type; and
+ // - followed by `(mant + 2 * plus) * 2^exp` in the original type.
+ //
+ // obviously, `minus` and `plus` cannot be zero. (for infinities, we use out-of-range values.)
+ // also we assume that at least one digit is generated, i.e., `mant` cannot be zero too.
+ //
+ // this also means that any number between `low = (mant - minus) * 2^exp` and
+ // `high = (mant + plus) * 2^exp` will map to this exact floating point number,
+ // with bounds included when the original mantissa was even (i.e., `!mant_was_odd`).
+
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+
+ // `a.cmp(&b) < rounding` is `if d.inclusive {a <= b} else {a < b}`
+ let rounding = if d.inclusive { Ordering::Greater } else { Ordering::Equal };
+
+ // estimate `k_0` from original inputs satisfying `10^(k_0-1) < high <= 10^(k_0+1)`.
+ // the tight bound `k` satisfying `10^(k-1) < high <= 10^k` is calculated later.
+ let mut k = estimate_scaling_factor(d.mant + d.plus, d.exp);
+
+ // convert `{mant, plus, minus} * 2^exp` into the fractional form so that:
+ // - `v = mant / scale`
+ // - `low = (mant - minus) / scale`
+ // - `high = (mant + plus) / scale`
+ let mut mant = Big::from_u64(d.mant);
+ let mut minus = Big::from_u64(d.minus);
+ let mut plus = Big::from_u64(d.plus);
+ let mut scale = Big::from_small(1);
+ if d.exp < 0 {
+ scale.mul_pow2(-d.exp as usize);
+ } else {
+ mant.mul_pow2(d.exp as usize);
+ minus.mul_pow2(d.exp as usize);
+ plus.mul_pow2(d.exp as usize);
+ }
+
+ // divide `mant` by `10^k`. now `scale / 10 < mant + plus <= scale * 10`.
+ if k >= 0 {
+ mul_pow10(&mut scale, k as usize);
+ } else {
+ mul_pow10(&mut mant, -k as usize);
+ mul_pow10(&mut minus, -k as usize);
+ mul_pow10(&mut plus, -k as usize);
+ }
+
+ // fixup when `mant + plus > scale` (or `>=`).
+ // we are not actually modifying `scale`, since we can skip the initial multiplication instead.
+ // now `scale < mant + plus <= scale * 10` and we are ready to generate digits.
+ //
+ // note that `d[0]` *can* be zero, when `scale - plus < mant < scale`.
+ // in this case rounding-up condition (`up` below) will be triggered immediately.
+ if scale.cmp(mant.clone().add(&plus)) < rounding {
+ // equivalent to scaling `scale` by 10
+ k += 1;
+ } else {
+ mant.mul_small(10);
+ minus.mul_small(10);
+ plus.mul_small(10);
+ }
+
+ // cache `(2, 4, 8) * scale` for digit generation.
+ let mut scale2 = scale.clone();
+ scale2.mul_pow2(1);
+ let mut scale4 = scale.clone();
+ scale4.mul_pow2(2);
+ let mut scale8 = scale.clone();
+ scale8.mul_pow2(3);
+
+ let mut down;
+ let mut up;
+ let mut i = 0;
+ loop {
+ // invariants, where `d[0..n-1]` are digits generated so far:
+ // - `v = mant / scale * 10^(k-n-1) + d[0..n-1] * 10^(k-n)`
+ // - `v - low = minus / scale * 10^(k-n-1)`
+ // - `high - v = plus / scale * 10^(k-n-1)`
+ // - `(mant + plus) / scale <= 10` (thus `mant / scale < 10`)
+ // where `d[i..j]` is a shorthand for `d[i] * 10^(j-i) + ... + d[j-1] * 10 + d[j]`.
+
+ // generate one digit: `d[n] = floor(mant / scale) < 10`.
+ let (d, _) = div_rem_upto_16(&mut mant, &scale, &scale2, &scale4, &scale8);
+ debug_assert!(d < 10);
+ buf[i] = MaybeUninit::new(b'0' + d);
+ i += 1;
+
+ // this is a simplified description of the modified Dragon algorithm.
+ // many intermediate derivations and completeness arguments are omitted for convenience.
+ //
+ // start with modified invariants, as we've updated `n`:
+ // - `v = mant / scale * 10^(k-n) + d[0..n-1] * 10^(k-n)`
+ // - `v - low = minus / scale * 10^(k-n)`
+ // - `high - v = plus / scale * 10^(k-n)`
+ //
+ // assume that `d[0..n-1]` is the shortest representation between `low` and `high`,
+ // i.e., `d[0..n-1]` satisfies both of the following but `d[0..n-2]` doesn't:
+ // - `low < d[0..n-1] * 10^(k-n) < high` (bijectivity: digits round to `v`); and
+ // - `abs(v / 10^(k-n) - d[0..n-1]) <= 1/2` (the last digit is correct).
+ //
+ // the second condition simplifies to `2 * mant <= scale`.
+ // solving invariants in terms of `mant`, `low` and `high` yields
+ // a simpler version of the first condition: `-plus < mant < minus`.
+ // since `-plus < 0 <= mant`, we have the correct shortest representation
+ // when `mant < minus` and `2 * mant <= scale`.
+ // (the former becomes `mant <= minus` when the original mantissa is even.)
+ //
+ // when the second doesn't hold (`2 * mant > scale`), we need to increase the last digit.
+ // this is enough for restoring that condition: we already know that
+ // the digit generation guarantees `0 <= v / 10^(k-n) - d[0..n-1] < 1`.
+ // in this case, the first condition becomes `-plus < mant - scale < minus`.
+ // since `mant < scale` after the generation, we have `scale < mant + plus`.
+ // (again, this becomes `scale <= mant + plus` when the original mantissa is even.)
+ //
+ // in short:
+ // - stop and round `down` (keep digits as is) when `mant < minus` (or `<=`).
+ // - stop and round `up` (increase the last digit) when `scale < mant + plus` (or `<=`).
+ // - keep generating otherwise.
+ down = mant.cmp(&minus) < rounding;
+ up = scale.cmp(mant.clone().add(&plus)) < rounding;
+ if down || up {
+ break;
+ } // we have the shortest representation, proceed to the rounding
+
+ // restore the invariants.
+ // this makes the algorithm always terminating: `minus` and `plus` always increases,
+ // but `mant` is clipped modulo `scale` and `scale` is fixed.
+ mant.mul_small(10);
+ minus.mul_small(10);
+ plus.mul_small(10);
+ }
+
+ // rounding up happens when
+ // i) only the rounding-up condition was triggered, or
+ // ii) both conditions were triggered and tie breaking prefers rounding up.
+ if up && (!down || *mant.mul_pow2(1) >= scale) {
+ // if rounding up changes the length, the exponent should also change.
+ // it seems that this condition is very hard to satisfy (possibly impossible),
+ // but we are just being safe and consistent here.
+ // SAFETY: we initialized that memory above.
+ if let Some(c) = round_up(unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..i]) }) {
+ buf[i] = MaybeUninit::new(c);
+ i += 1;
+ k += 1;
+ }
+ }
+
+ // SAFETY: we initialized that memory above.
+ (unsafe { MaybeUninit::slice_assume_init_ref(&buf[..i]) }, k)
+}
+
+/// The exact and fixed mode implementation for Dragon.
+pub fn format_exact<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+ limit: i16,
+) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+
+ // estimate `k_0` from original inputs satisfying `10^(k_0-1) < v <= 10^(k_0+1)`.
+ let mut k = estimate_scaling_factor(d.mant, d.exp);
+
+ // `v = mant / scale`.
+ let mut mant = Big::from_u64(d.mant);
+ let mut scale = Big::from_small(1);
+ if d.exp < 0 {
+ scale.mul_pow2(-d.exp as usize);
+ } else {
+ mant.mul_pow2(d.exp as usize);
+ }
+
+ // divide `mant` by `10^k`. now `scale / 10 < mant <= scale * 10`.
+ if k >= 0 {
+ mul_pow10(&mut scale, k as usize);
+ } else {
+ mul_pow10(&mut mant, -k as usize);
+ }
+
+ // fixup when `mant + plus >= scale`, where `plus / scale = 10^-buf.len() / 2`.
+ // in order to keep the fixed-size bignum, we actually use `mant + floor(plus) >= scale`.
+ // we are not actually modifying `scale`, since we can skip the initial multiplication instead.
+ // again with the shortest algorithm, `d[0]` can be zero but will be eventually rounded up.
+ if *div_2pow10(&mut scale.clone(), buf.len()).add(&mant) >= scale {
+ // equivalent to scaling `scale` by 10
+ k += 1;
+ } else {
+ mant.mul_small(10);
+ }
+
+ // if we are working with the last-digit limitation, we need to shorten the buffer
+ // before the actual rendering in order to avoid double rounding.
+ // note that we have to enlarge the buffer again when rounding up happens!
+ let mut len = if k < limit {
+ // oops, we cannot even produce *one* digit.
+ // this is possible when, say, we've got something like 9.5 and it's being rounded to 10.
+ // we return an empty buffer, with an exception of the later rounding-up case
+ // which occurs when `k == limit` and has to produce exactly one digit.
+ 0
+ } else if ((k as i32 - limit as i32) as usize) < buf.len() {
+ (k - limit) as usize
+ } else {
+ buf.len()
+ };
+
+ if len > 0 {
+ // cache `(2, 4, 8) * scale` for digit generation.
+ // (this can be expensive, so do not calculate them when the buffer is empty.)
+ let mut scale2 = scale.clone();
+ scale2.mul_pow2(1);
+ let mut scale4 = scale.clone();
+ scale4.mul_pow2(2);
+ let mut scale8 = scale.clone();
+ scale8.mul_pow2(3);
+
+ for i in 0..len {
+ if mant.is_zero() {
+ // following digits are all zeroes, we stop here
+ // do *not* try to perform rounding! rather, fill remaining digits.
+ for c in &mut buf[i..len] {
+ *c = MaybeUninit::new(b'0');
+ }
+ // SAFETY: we initialized that memory above.
+ return (unsafe { MaybeUninit::slice_assume_init_ref(&buf[..len]) }, k);
+ }
+
+ let mut d = 0;
+ if mant >= scale8 {
+ mant.sub(&scale8);
+ d += 8;
+ }
+ if mant >= scale4 {
+ mant.sub(&scale4);
+ d += 4;
+ }
+ if mant >= scale2 {
+ mant.sub(&scale2);
+ d += 2;
+ }
+ if mant >= scale {
+ mant.sub(&scale);
+ d += 1;
+ }
+ debug_assert!(mant < scale);
+ debug_assert!(d < 10);
+ buf[i] = MaybeUninit::new(b'0' + d);
+ mant.mul_small(10);
+ }
+ }
+
+ // rounding up if we stop in the middle of digits
+ // if the following digits are exactly 5000..., check the prior digit and try to
+ // round to even (i.e., avoid rounding up when the prior digit is even).
+ let order = mant.cmp(scale.mul_small(5));
+ if order == Ordering::Greater
+ || (order == Ordering::Equal
+ // SAFETY: `buf[len-1]` is initialized.
+ && (len == 0 || unsafe { buf[len - 1].assume_init() } & 1 == 1))
+ {
+ // if rounding up changes the length, the exponent should also change.
+ // but we've been requested a fixed number of digits, so do not alter the buffer...
+ // SAFETY: we initialized that memory above.
+ if let Some(c) = round_up(unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..len]) }) {
+ // ...unless we've been requested the fixed precision instead.
+ // we also need to check that, if the original buffer was empty,
+ // the additional digit can only be added when `k == limit` (edge case).
+ k += 1;
+ if k > limit && len < buf.len() {
+ buf[len] = MaybeUninit::new(c);
+ len += 1;
+ }
+ }
+ }
+
+ // SAFETY: we initialized that memory above.
+ (unsafe { MaybeUninit::slice_assume_init_ref(&buf[..len]) }, k)
+}
diff --git a/library/core/src/num/flt2dec/strategy/grisu.rs b/library/core/src/num/flt2dec/strategy/grisu.rs
new file mode 100644
index 000000000..a4cb51c62
--- /dev/null
+++ b/library/core/src/num/flt2dec/strategy/grisu.rs
@@ -0,0 +1,764 @@
+//! Rust adaptation of the Grisu3 algorithm described in "Printing Floating-Point Numbers Quickly
+//! and Accurately with Integers"[^1]. It uses about 1KB of precomputed table, and in turn, it's
+//! very quick for most inputs.
+//!
+//! [^1]: Florian Loitsch. 2010. Printing floating-point numbers quickly and
+//! accurately with integers. SIGPLAN Not. 45, 6 (June 2010), 233-243.
+
+use crate::mem::MaybeUninit;
+use crate::num::diy_float::Fp;
+use crate::num::flt2dec::{round_up, Decoded, MAX_SIG_DIGITS};
+
+// see the comments in `format_shortest_opt` for the rationale.
+#[doc(hidden)]
+pub const ALPHA: i16 = -60;
+#[doc(hidden)]
+pub const GAMMA: i16 = -32;
+
+/*
+# the following Python code generates this table:
+for i in xrange(-308, 333, 8):
+ if i >= 0: f = 10**i; e = 0
+ else: f = 2**(80-4*i) // 10**-i; e = 4 * i - 80
+ l = f.bit_length()
+ f = ((f << 64 >> (l-1)) + 1) >> 1; e += l - 64
+ print ' (%#018x, %5d, %4d),' % (f, e, i)
+*/
+
+#[doc(hidden)]
+pub static CACHED_POW10: [(u64, i16, i16); 81] = [
+ // (f, e, k)
+ (0xe61acf033d1a45df, -1087, -308),
+ (0xab70fe17c79ac6ca, -1060, -300),
+ (0xff77b1fcbebcdc4f, -1034, -292),
+ (0xbe5691ef416bd60c, -1007, -284),
+ (0x8dd01fad907ffc3c, -980, -276),
+ (0xd3515c2831559a83, -954, -268),
+ (0x9d71ac8fada6c9b5, -927, -260),
+ (0xea9c227723ee8bcb, -901, -252),
+ (0xaecc49914078536d, -874, -244),
+ (0x823c12795db6ce57, -847, -236),
+ (0xc21094364dfb5637, -821, -228),
+ (0x9096ea6f3848984f, -794, -220),
+ (0xd77485cb25823ac7, -768, -212),
+ (0xa086cfcd97bf97f4, -741, -204),
+ (0xef340a98172aace5, -715, -196),
+ (0xb23867fb2a35b28e, -688, -188),
+ (0x84c8d4dfd2c63f3b, -661, -180),
+ (0xc5dd44271ad3cdba, -635, -172),
+ (0x936b9fcebb25c996, -608, -164),
+ (0xdbac6c247d62a584, -582, -156),
+ (0xa3ab66580d5fdaf6, -555, -148),
+ (0xf3e2f893dec3f126, -529, -140),
+ (0xb5b5ada8aaff80b8, -502, -132),
+ (0x87625f056c7c4a8b, -475, -124),
+ (0xc9bcff6034c13053, -449, -116),
+ (0x964e858c91ba2655, -422, -108),
+ (0xdff9772470297ebd, -396, -100),
+ (0xa6dfbd9fb8e5b88f, -369, -92),
+ (0xf8a95fcf88747d94, -343, -84),
+ (0xb94470938fa89bcf, -316, -76),
+ (0x8a08f0f8bf0f156b, -289, -68),
+ (0xcdb02555653131b6, -263, -60),
+ (0x993fe2c6d07b7fac, -236, -52),
+ (0xe45c10c42a2b3b06, -210, -44),
+ (0xaa242499697392d3, -183, -36),
+ (0xfd87b5f28300ca0e, -157, -28),
+ (0xbce5086492111aeb, -130, -20),
+ (0x8cbccc096f5088cc, -103, -12),
+ (0xd1b71758e219652c, -77, -4),
+ (0x9c40000000000000, -50, 4),
+ (0xe8d4a51000000000, -24, 12),
+ (0xad78ebc5ac620000, 3, 20),
+ (0x813f3978f8940984, 30, 28),
+ (0xc097ce7bc90715b3, 56, 36),
+ (0x8f7e32ce7bea5c70, 83, 44),
+ (0xd5d238a4abe98068, 109, 52),
+ (0x9f4f2726179a2245, 136, 60),
+ (0xed63a231d4c4fb27, 162, 68),
+ (0xb0de65388cc8ada8, 189, 76),
+ (0x83c7088e1aab65db, 216, 84),
+ (0xc45d1df942711d9a, 242, 92),
+ (0x924d692ca61be758, 269, 100),
+ (0xda01ee641a708dea, 295, 108),
+ (0xa26da3999aef774a, 322, 116),
+ (0xf209787bb47d6b85, 348, 124),
+ (0xb454e4a179dd1877, 375, 132),
+ (0x865b86925b9bc5c2, 402, 140),
+ (0xc83553c5c8965d3d, 428, 148),
+ (0x952ab45cfa97a0b3, 455, 156),
+ (0xde469fbd99a05fe3, 481, 164),
+ (0xa59bc234db398c25, 508, 172),
+ (0xf6c69a72a3989f5c, 534, 180),
+ (0xb7dcbf5354e9bece, 561, 188),
+ (0x88fcf317f22241e2, 588, 196),
+ (0xcc20ce9bd35c78a5, 614, 204),
+ (0x98165af37b2153df, 641, 212),
+ (0xe2a0b5dc971f303a, 667, 220),
+ (0xa8d9d1535ce3b396, 694, 228),
+ (0xfb9b7cd9a4a7443c, 720, 236),
+ (0xbb764c4ca7a44410, 747, 244),
+ (0x8bab8eefb6409c1a, 774, 252),
+ (0xd01fef10a657842c, 800, 260),
+ (0x9b10a4e5e9913129, 827, 268),
+ (0xe7109bfba19c0c9d, 853, 276),
+ (0xac2820d9623bf429, 880, 284),
+ (0x80444b5e7aa7cf85, 907, 292),
+ (0xbf21e44003acdd2d, 933, 300),
+ (0x8e679c2f5e44ff8f, 960, 308),
+ (0xd433179d9c8cb841, 986, 316),
+ (0x9e19db92b4e31ba9, 1013, 324),
+ (0xeb96bf6ebadf77d9, 1039, 332),
+];
+
+#[doc(hidden)]
+pub const CACHED_POW10_FIRST_E: i16 = -1087;
+#[doc(hidden)]
+pub const CACHED_POW10_LAST_E: i16 = 1039;
+
+#[doc(hidden)]
+pub fn cached_power(alpha: i16, gamma: i16) -> (i16, Fp) {
+ let offset = CACHED_POW10_FIRST_E as i32;
+ let range = (CACHED_POW10.len() as i32) - 1;
+ let domain = (CACHED_POW10_LAST_E - CACHED_POW10_FIRST_E) as i32;
+ let idx = ((gamma as i32) - offset) * range / domain;
+ let (f, e, k) = CACHED_POW10[idx as usize];
+ debug_assert!(alpha <= e && e <= gamma);
+ (k, Fp { f, e })
+}
+
+/// Given `x > 0`, returns `(k, 10^k)` such that `10^k <= x < 10^(k+1)`.
+#[doc(hidden)]
+pub fn max_pow10_no_more_than(x: u32) -> (u8, u32) {
+ debug_assert!(x > 0);
+
+ const X9: u32 = 10_0000_0000;
+ const X8: u32 = 1_0000_0000;
+ const X7: u32 = 1000_0000;
+ const X6: u32 = 100_0000;
+ const X5: u32 = 10_0000;
+ const X4: u32 = 1_0000;
+ const X3: u32 = 1000;
+ const X2: u32 = 100;
+ const X1: u32 = 10;
+
+ if x < X4 {
+ if x < X2 {
+ if x < X1 { (0, 1) } else { (1, X1) }
+ } else {
+ if x < X3 { (2, X2) } else { (3, X3) }
+ }
+ } else {
+ if x < X6 {
+ if x < X5 { (4, X4) } else { (5, X5) }
+ } else if x < X8 {
+ if x < X7 { (6, X6) } else { (7, X7) }
+ } else {
+ if x < X9 { (8, X8) } else { (9, X9) }
+ }
+ }
+}
+
+/// The shortest mode implementation for Grisu.
+///
+/// It returns `None` when it would return an inexact representation otherwise.
+pub fn format_shortest_opt<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+) -> Option<(/*digits*/ &'a [u8], /*exp*/ i16)> {
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+ assert!(d.mant + d.plus < (1 << 61)); // we need at least three bits of additional precision
+
+ // start with the normalized values with the shared exponent
+ let plus = Fp { f: d.mant + d.plus, e: d.exp }.normalize();
+ let minus = Fp { f: d.mant - d.minus, e: d.exp }.normalize_to(plus.e);
+ let v = Fp { f: d.mant, e: d.exp }.normalize_to(plus.e);
+
+ // find any `cached = 10^minusk` such that `ALPHA <= minusk + plus.e + 64 <= GAMMA`.
+ // since `plus` is normalized, this means `2^(62 + ALPHA) <= plus * cached < 2^(64 + GAMMA)`;
+ // given our choices of `ALPHA` and `GAMMA`, this puts `plus * cached` into `[4, 2^32)`.
+ //
+ // it is obviously desirable to maximize `GAMMA - ALPHA`,
+ // so that we don't need many cached powers of 10, but there are some considerations:
+ //
+ // 1. we want to keep `floor(plus * cached)` within `u32` since it needs a costly division.
+ // (this is not really avoidable, remainder is required for accuracy estimation.)
+ // 2. the remainder of `floor(plus * cached)` repeatedly gets multiplied by 10,
+ // and it should not overflow.
+ //
+ // the first gives `64 + GAMMA <= 32`, while the second gives `10 * 2^-ALPHA <= 2^64`;
+ // -60 and -32 is the maximal range with this constraint, and V8 also uses them.
+ let (minusk, cached) = cached_power(ALPHA - plus.e - 64, GAMMA - plus.e - 64);
+
+ // scale fps. this gives the maximal error of 1 ulp (proved from Theorem 5.1).
+ let plus = plus.mul(&cached);
+ let minus = minus.mul(&cached);
+ let v = v.mul(&cached);
+ debug_assert_eq!(plus.e, minus.e);
+ debug_assert_eq!(plus.e, v.e);
+
+ // +- actual range of minus
+ // | <---|---------------------- unsafe region --------------------------> |
+ // | | |
+ // | |<--->| | <--------------- safe region ---------------> | |
+ // | | | | | |
+ // |1 ulp|1 ulp| |1 ulp|1 ulp| |1 ulp|1 ulp|
+ // |<--->|<--->| |<--->|<--->| |<--->|<--->|
+ // |-----|-----|-------...-------|-----|-----|-------...-------|-----|-----|
+ // | minus | | v | | plus |
+ // minus1 minus0 v - 1 ulp v + 1 ulp plus0 plus1
+ //
+ // above `minus`, `v` and `plus` are *quantized* approximations (error < 1 ulp).
+ // as we don't know the error is positive or negative, we use two approximations spaced equally
+ // and have the maximal error of 2 ulps.
+ //
+ // the "unsafe region" is a liberal interval which we initially generate.
+ // the "safe region" is a conservative interval which we only accept.
+ // we start with the correct repr within the unsafe region, and try to find the closest repr
+ // to `v` which is also within the safe region. if we can't, we give up.
+ let plus1 = plus.f + 1;
+ // let plus0 = plus.f - 1; // only for explanation
+ // let minus0 = minus.f + 1; // only for explanation
+ let minus1 = minus.f - 1;
+ let e = -plus.e as usize; // shared exponent
+
+ // divide `plus1` into integral and fractional parts.
+ // integral parts are guaranteed to fit in u32, since cached power guarantees `plus < 2^32`
+ // and normalized `plus.f` is always less than `2^64 - 2^4` due to the precision requirement.
+ let plus1int = (plus1 >> e) as u32;
+ let plus1frac = plus1 & ((1 << e) - 1);
+
+ // calculate the largest `10^max_kappa` no more than `plus1` (thus `plus1 < 10^(max_kappa+1)`).
+ // this is an upper bound of `kappa` below.
+ let (max_kappa, max_ten_kappa) = max_pow10_no_more_than(plus1int);
+
+ let mut i = 0;
+ let exp = max_kappa as i16 - minusk + 1;
+
+ // Theorem 6.2: if `k` is the greatest integer s.t. `0 <= y mod 10^k <= y - x`,
+ // then `V = floor(y / 10^k) * 10^k` is in `[x, y]` and one of the shortest
+ // representations (with the minimal number of significant digits) in that range.
+ //
+ // find the digit length `kappa` between `(minus1, plus1)` as per Theorem 6.2.
+ // Theorem 6.2 can be adopted to exclude `x` by requiring `y mod 10^k < y - x` instead.
+ // (e.g., `x` = 32000, `y` = 32777; `kappa` = 2 since `y mod 10^3 = 777 < y - x = 777`.)
+ // the algorithm relies on the later verification phase to exclude `y`.
+ let delta1 = plus1 - minus1;
+ // let delta1int = (delta1 >> e) as usize; // only for explanation
+ let delta1frac = delta1 & ((1 << e) - 1);
+
+ // render integral parts, while checking for the accuracy at each step.
+ let mut kappa = max_kappa as i16;
+ let mut ten_kappa = max_ten_kappa; // 10^kappa
+ let mut remainder = plus1int; // digits yet to be rendered
+ loop {
+ // we always have at least one digit to render, as `plus1 >= 10^kappa`
+ // invariants:
+ // - `delta1int <= remainder < 10^(kappa+1)`
+ // - `plus1int = d[0..n-1] * 10^(kappa+1) + remainder`
+ // (it follows that `remainder = plus1int % 10^(kappa+1)`)
+
+ // divide `remainder` by `10^kappa`. both are scaled by `2^-e`.
+ let q = remainder / ten_kappa;
+ let r = remainder % ten_kappa;
+ debug_assert!(q < 10);
+ buf[i] = MaybeUninit::new(b'0' + q as u8);
+ i += 1;
+
+ let plus1rem = ((r as u64) << e) + plus1frac; // == (plus1 % 10^kappa) * 2^e
+ if plus1rem < delta1 {
+ // `plus1 % 10^kappa < delta1 = plus1 - minus1`; we've found the correct `kappa`.
+ let ten_kappa = (ten_kappa as u64) << e; // scale 10^kappa back to the shared exponent
+ return round_and_weed(
+ // SAFETY: we initialized that memory above.
+ unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..i]) },
+ exp,
+ plus1rem,
+ delta1,
+ plus1 - v.f,
+ ten_kappa,
+ 1,
+ );
+ }
+
+ // break the loop when we have rendered all integral digits.
+ // the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
+ if i > max_kappa as usize {
+ debug_assert_eq!(ten_kappa, 1);
+ debug_assert_eq!(kappa, 0);
+ break;
+ }
+
+ // restore invariants
+ kappa -= 1;
+ ten_kappa /= 10;
+ remainder = r;
+ }
+
+ // render fractional parts, while checking for the accuracy at each step.
+ // this time we rely on repeated multiplications, as division will lose the precision.
+ let mut remainder = plus1frac;
+ let mut threshold = delta1frac;
+ let mut ulp = 1;
+ loop {
+ // the next digit should be significant as we've tested that before breaking out
+ // invariants, where `m = max_kappa + 1` (# of digits in the integral part):
+ // - `remainder < 2^e`
+ // - `plus1frac * 10^(n-m) = d[m..n-1] * 2^e + remainder`
+
+ remainder *= 10; // won't overflow, `2^e * 10 < 2^64`
+ threshold *= 10;
+ ulp *= 10;
+
+ // divide `remainder` by `10^kappa`.
+ // both are scaled by `2^e / 10^kappa`, so the latter is implicit here.
+ let q = remainder >> e;
+ let r = remainder & ((1 << e) - 1);
+ debug_assert!(q < 10);
+ buf[i] = MaybeUninit::new(b'0' + q as u8);
+ i += 1;
+
+ if r < threshold {
+ let ten_kappa = 1 << e; // implicit divisor
+ return round_and_weed(
+ // SAFETY: we initialized that memory above.
+ unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..i]) },
+ exp,
+ r,
+ threshold,
+ (plus1 - v.f) * ulp,
+ ten_kappa,
+ ulp,
+ );
+ }
+
+ // restore invariants
+ kappa -= 1;
+ remainder = r;
+ }
+
+ // we've generated all significant digits of `plus1`, but not sure if it's the optimal one.
+ // for example, if `minus1` is 3.14153... and `plus1` is 3.14158..., there are 5 different
+ // shortest representation from 3.14154 to 3.14158 but we only have the greatest one.
+ // we have to successively decrease the last digit and check if this is the optimal repr.
+ // there are at most 9 candidates (..1 to ..9), so this is fairly quick. ("rounding" phase)
+ //
+ // the function checks if this "optimal" repr is actually within the ulp ranges,
+ // and also, it is possible that the "second-to-optimal" repr can actually be optimal
+ // due to the rounding error. in either cases this returns `None`. ("weeding" phase)
+ //
+ // all arguments here are scaled by the common (but implicit) value `k`, so that:
+ // - `remainder = (plus1 % 10^kappa) * k`
+ // - `threshold = (plus1 - minus1) * k` (and also, `remainder < threshold`)
+ // - `plus1v = (plus1 - v) * k` (and also, `threshold > plus1v` from prior invariants)
+ // - `ten_kappa = 10^kappa * k`
+ // - `ulp = 2^-e * k`
+ fn round_and_weed(
+ buf: &mut [u8],
+ exp: i16,
+ remainder: u64,
+ threshold: u64,
+ plus1v: u64,
+ ten_kappa: u64,
+ ulp: u64,
+ ) -> Option<(&[u8], i16)> {
+ assert!(!buf.is_empty());
+
+ // produce two approximations to `v` (actually `plus1 - v`) within 1.5 ulps.
+ // the resulting representation should be the closest representation to both.
+ //
+ // here `plus1 - v` is used since calculations are done with respect to `plus1`
+ // in order to avoid overflow/underflow (hence the seemingly swapped names).
+ let plus1v_down = plus1v + ulp; // plus1 - (v - 1 ulp)
+ let plus1v_up = plus1v - ulp; // plus1 - (v + 1 ulp)
+
+ // decrease the last digit and stop at the closest representation to `v + 1 ulp`.
+ let mut plus1w = remainder; // plus1w(n) = plus1 - w(n)
+ {
+ let last = buf.last_mut().unwrap();
+
+ // we work with the approximated digits `w(n)`, which is initially equal to `plus1 -
+ // plus1 % 10^kappa`. after running the loop body `n` times, `w(n) = plus1 -
+ // plus1 % 10^kappa - n * 10^kappa`. we set `plus1w(n) = plus1 - w(n) =
+ // plus1 % 10^kappa + n * 10^kappa` (thus `remainder = plus1w(0)`) to simplify checks.
+ // note that `plus1w(n)` is always increasing.
+ //
+ // we have three conditions to terminate. any of them will make the loop unable to
+ // proceed, but we then have at least one valid representation known to be closest to
+ // `v + 1 ulp` anyway. we will denote them as TC1 through TC3 for brevity.
+ //
+ // TC1: `w(n) <= v + 1 ulp`, i.e., this is the last repr that can be the closest one.
+ // this is equivalent to `plus1 - w(n) = plus1w(n) >= plus1 - (v + 1 ulp) = plus1v_up`.
+ // combined with TC2 (which checks if `w(n+1)` is valid), this prevents the possible
+ // overflow on the calculation of `plus1w(n)`.
+ //
+ // TC2: `w(n+1) < minus1`, i.e., the next repr definitely does not round to `v`.
+ // this is equivalent to `plus1 - w(n) + 10^kappa = plus1w(n) + 10^kappa >
+ // plus1 - minus1 = threshold`. the left hand side can overflow, but we know
+ // `threshold > plus1v`, so if TC1 is false, `threshold - plus1w(n) >
+ // threshold - (plus1v - 1 ulp) > 1 ulp` and we can safely test if
+ // `threshold - plus1w(n) < 10^kappa` instead.
+ //
+ // TC3: `abs(w(n) - (v + 1 ulp)) <= abs(w(n+1) - (v + 1 ulp))`, i.e., the next repr is
+ // no closer to `v + 1 ulp` than the current repr. given `z(n) = plus1v_up - plus1w(n)`,
+ // this becomes `abs(z(n)) <= abs(z(n+1))`. again assuming that TC1 is false, we have
+ // `z(n) > 0`. we have two cases to consider:
+ //
+ // - when `z(n+1) >= 0`: TC3 becomes `z(n) <= z(n+1)`. as `plus1w(n)` is increasing,
+ // `z(n)` should be decreasing and this is clearly false.
+ // - when `z(n+1) < 0`:
+ // - TC3a: the precondition is `plus1v_up < plus1w(n) + 10^kappa`. assuming TC2 is
+ // false, `threshold >= plus1w(n) + 10^kappa` so it cannot overflow.
+ // - TC3b: TC3 becomes `z(n) <= -z(n+1)`, i.e., `plus1v_up - plus1w(n) >=
+ // plus1w(n+1) - plus1v_up = plus1w(n) + 10^kappa - plus1v_up`. the negated TC1
+ // gives `plus1v_up > plus1w(n)`, so it cannot overflow or underflow when
+ // combined with TC3a.
+ //
+ // consequently, we should stop when `TC1 || TC2 || (TC3a && TC3b)`. the following is
+ // equal to its inverse, `!TC1 && !TC2 && (!TC3a || !TC3b)`.
+ while plus1w < plus1v_up
+ && threshold - plus1w >= ten_kappa
+ && (plus1w + ten_kappa < plus1v_up
+ || plus1v_up - plus1w >= plus1w + ten_kappa - plus1v_up)
+ {
+ *last -= 1;
+ debug_assert!(*last > b'0'); // the shortest repr cannot end with `0`
+ plus1w += ten_kappa;
+ }
+ }
+
+ // check if this representation is also the closest representation to `v - 1 ulp`.
+ //
+ // this is simply same to the terminating conditions for `v + 1 ulp`, with all `plus1v_up`
+ // replaced by `plus1v_down` instead. overflow analysis equally holds.
+ if plus1w < plus1v_down
+ && threshold - plus1w >= ten_kappa
+ && (plus1w + ten_kappa < plus1v_down
+ || plus1v_down - plus1w >= plus1w + ten_kappa - plus1v_down)
+ {
+ return None;
+ }
+
+ // now we have the closest representation to `v` between `plus1` and `minus1`.
+ // this is too liberal, though, so we reject any `w(n)` not between `plus0` and `minus0`,
+ // i.e., `plus1 - plus1w(n) <= minus0` or `plus1 - plus1w(n) >= plus0`. we utilize the facts
+ // that `threshold = plus1 - minus1` and `plus1 - plus0 = minus0 - minus1 = 2 ulp`.
+ if 2 * ulp <= plus1w && plus1w <= threshold - 4 * ulp { Some((buf, exp)) } else { None }
+ }
+}
+
+/// The shortest mode implementation for Grisu with Dragon fallback.
+///
+/// This should be used for most cases.
+pub fn format_shortest<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
+ use crate::num::flt2dec::strategy::dragon::format_shortest as fallback;
+ // SAFETY: The borrow checker is not smart enough to let us use `buf`
+ // in the second branch, so we launder the lifetime here. But we only re-use
+ // `buf` if `format_shortest_opt` returned `None` so this is okay.
+ match format_shortest_opt(d, unsafe { &mut *(buf as *mut _) }) {
+ Some(ret) => ret,
+ None => fallback(d, buf),
+ }
+}
+
+/// The exact and fixed mode implementation for Grisu.
+///
+/// It returns `None` when it would return an inexact representation otherwise.
+pub fn format_exact_opt<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+ limit: i16,
+) -> Option<(/*digits*/ &'a [u8], /*exp*/ i16)> {
+ assert!(d.mant > 0);
+ assert!(d.mant < (1 << 61)); // we need at least three bits of additional precision
+ assert!(!buf.is_empty());
+
+ // normalize and scale `v`.
+ let v = Fp { f: d.mant, e: d.exp }.normalize();
+ let (minusk, cached) = cached_power(ALPHA - v.e - 64, GAMMA - v.e - 64);
+ let v = v.mul(&cached);
+
+ // divide `v` into integral and fractional parts.
+ let e = -v.e as usize;
+ let vint = (v.f >> e) as u32;
+ let vfrac = v.f & ((1 << e) - 1);
+
+ // both old `v` and new `v` (scaled by `10^-k`) has an error of < 1 ulp (Theorem 5.1).
+ // as we don't know the error is positive or negative, we use two approximations
+ // spaced equally and have the maximal error of 2 ulps (same to the shortest case).
+ //
+ // the goal is to find the exactly rounded series of digits that are common to
+ // both `v - 1 ulp` and `v + 1 ulp`, so that we are maximally confident.
+ // if this is not possible, we don't know which one is the correct output for `v`,
+ // so we give up and fall back.
+ //
+ // `err` is defined as `1 ulp * 2^e` here (same to the ulp in `vfrac`),
+ // and we will scale it whenever `v` gets scaled.
+ let mut err = 1;
+
+ // calculate the largest `10^max_kappa` no more than `v` (thus `v < 10^(max_kappa+1)`).
+ // this is an upper bound of `kappa` below.
+ let (max_kappa, max_ten_kappa) = max_pow10_no_more_than(vint);
+
+ let mut i = 0;
+ let exp = max_kappa as i16 - minusk + 1;
+
+ // if we are working with the last-digit limitation, we need to shorten the buffer
+ // before the actual rendering in order to avoid double rounding.
+ // note that we have to enlarge the buffer again when rounding up happens!
+ let len = if exp <= limit {
+ // oops, we cannot even produce *one* digit.
+ // this is possible when, say, we've got something like 9.5 and it's being rounded to 10.
+ //
+ // in principle we can immediately call `possibly_round` with an empty buffer,
+ // but scaling `max_ten_kappa << e` by 10 can result in overflow.
+ // thus we are being sloppy here and widen the error range by a factor of 10.
+ // this will increase the false negative rate, but only very, *very* slightly;
+ // it can only matter noticeably when the mantissa is bigger than 60 bits.
+ //
+ // SAFETY: `len=0`, so the obligation of having initialized this memory is trivial.
+ return unsafe {
+ possibly_round(buf, 0, exp, limit, v.f / 10, (max_ten_kappa as u64) << e, err << e)
+ };
+ } else if ((exp as i32 - limit as i32) as usize) < buf.len() {
+ (exp - limit) as usize
+ } else {
+ buf.len()
+ };
+ debug_assert!(len > 0);
+
+ // render integral parts.
+ // the error is entirely fractional, so we don't need to check it in this part.
+ let mut kappa = max_kappa as i16;
+ let mut ten_kappa = max_ten_kappa; // 10^kappa
+ let mut remainder = vint; // digits yet to be rendered
+ loop {
+ // we always have at least one digit to render
+ // invariants:
+ // - `remainder < 10^(kappa+1)`
+ // - `vint = d[0..n-1] * 10^(kappa+1) + remainder`
+ // (it follows that `remainder = vint % 10^(kappa+1)`)
+
+ // divide `remainder` by `10^kappa`. both are scaled by `2^-e`.
+ let q = remainder / ten_kappa;
+ let r = remainder % ten_kappa;
+ debug_assert!(q < 10);
+ buf[i] = MaybeUninit::new(b'0' + q as u8);
+ i += 1;
+
+ // is the buffer full? run the rounding pass with the remainder.
+ if i == len {
+ let vrem = ((r as u64) << e) + vfrac; // == (v % 10^kappa) * 2^e
+ // SAFETY: we have initialized `len` many bytes.
+ return unsafe {
+ possibly_round(buf, len, exp, limit, vrem, (ten_kappa as u64) << e, err << e)
+ };
+ }
+
+ // break the loop when we have rendered all integral digits.
+ // the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
+ if i > max_kappa as usize {
+ debug_assert_eq!(ten_kappa, 1);
+ debug_assert_eq!(kappa, 0);
+ break;
+ }
+
+ // restore invariants
+ kappa -= 1;
+ ten_kappa /= 10;
+ remainder = r;
+ }
+
+ // render fractional parts.
+ //
+ // in principle we can continue to the last available digit and check for the accuracy.
+ // unfortunately we are working with the finite-sized integers, so we need some criterion
+ // to detect the overflow. V8 uses `remainder > err`, which becomes false when
+ // the first `i` significant digits of `v - 1 ulp` and `v` differ. however this rejects
+ // too many otherwise valid input.
+ //
+ // since the later phase has a correct overflow detection, we instead use tighter criterion:
+ // we continue til `err` exceeds `10^kappa / 2`, so that the range between `v - 1 ulp` and
+ // `v + 1 ulp` definitely contains two or more rounded representations. this is same to
+ // the first two comparisons from `possibly_round`, for the reference.
+ let mut remainder = vfrac;
+ let maxerr = 1 << (e - 1);
+ while err < maxerr {
+ // invariants, where `m = max_kappa + 1` (# of digits in the integral part):
+ // - `remainder < 2^e`
+ // - `vfrac * 10^(n-m) = d[m..n-1] * 2^e + remainder`
+ // - `err = 10^(n-m)`
+
+ remainder *= 10; // won't overflow, `2^e * 10 < 2^64`
+ err *= 10; // won't overflow, `err * 10 < 2^e * 5 < 2^64`
+
+ // divide `remainder` by `10^kappa`.
+ // both are scaled by `2^e / 10^kappa`, so the latter is implicit here.
+ let q = remainder >> e;
+ let r = remainder & ((1 << e) - 1);
+ debug_assert!(q < 10);
+ buf[i] = MaybeUninit::new(b'0' + q as u8);
+ i += 1;
+
+ // is the buffer full? run the rounding pass with the remainder.
+ if i == len {
+ // SAFETY: we have initialized `len` many bytes.
+ return unsafe { possibly_round(buf, len, exp, limit, r, 1 << e, err) };
+ }
+
+ // restore invariants
+ remainder = r;
+ }
+
+ // further calculation is useless (`possibly_round` definitely fails), so we give up.
+ return None;
+
+ // we've generated all requested digits of `v`, which should be also same to corresponding
+ // digits of `v - 1 ulp`. now we check if there is a unique representation shared by
+ // both `v - 1 ulp` and `v + 1 ulp`; this can be either same to generated digits, or
+ // to the rounded-up version of those digits. if the range contains multiple representations
+ // of the same length, we cannot be sure and should return `None` instead.
+ //
+ // all arguments here are scaled by the common (but implicit) value `k`, so that:
+ // - `remainder = (v % 10^kappa) * k`
+ // - `ten_kappa = 10^kappa * k`
+ // - `ulp = 2^-e * k`
+ //
+ // SAFETY: the first `len` bytes of `buf` must be initialized.
+ unsafe fn possibly_round(
+ buf: &mut [MaybeUninit<u8>],
+ mut len: usize,
+ mut exp: i16,
+ limit: i16,
+ remainder: u64,
+ ten_kappa: u64,
+ ulp: u64,
+ ) -> Option<(&[u8], i16)> {
+ debug_assert!(remainder < ten_kappa);
+
+ // 10^kappa
+ // : : :<->: :
+ // : : : : :
+ // :|1 ulp|1 ulp| :
+ // :|<--->|<--->| :
+ // ----|-----|-----|----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // (for the reference, the dotted line indicates the exact value for
+ // possible representations in given number of digits.)
+ //
+ // error is too large that there are at least three possible representations
+ // between `v - 1 ulp` and `v + 1 ulp`. we cannot determine which one is correct.
+ if ulp >= ten_kappa {
+ return None;
+ }
+
+ // 10^kappa
+ // :<------->:
+ // : :
+ // : |1 ulp|1 ulp|
+ // : |<--->|<--->|
+ // ----|-----|-----|----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // in fact, 1/2 ulp is enough to introduce two possible representations.
+ // (remember that we need a unique representation for both `v - 1 ulp` and `v + 1 ulp`.)
+ // this won't overflow, as `ulp < ten_kappa` from the first check.
+ if ten_kappa - ulp <= ulp {
+ return None;
+ }
+
+ // remainder
+ // :<->| :
+ // : | :
+ // :<--------- 10^kappa ---------->:
+ // | : | :
+ // |1 ulp|1 ulp| :
+ // |<--->|<--->| :
+ // ----|-----|-----|------------------------
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // if `v + 1 ulp` is closer to the rounded-down representation (which is already in `buf`),
+ // then we can safely return. note that `v - 1 ulp` *can* be less than the current
+ // representation, but as `1 ulp < 10^kappa / 2`, this condition is enough:
+ // the distance between `v - 1 ulp` and the current representation
+ // cannot exceed `10^kappa / 2`.
+ //
+ // the condition equals to `remainder + ulp < 10^kappa / 2`.
+ // since this can easily overflow, first check if `remainder < 10^kappa / 2`.
+ // we've already verified that `ulp < 10^kappa / 2`, so as long as
+ // `10^kappa` did not overflow after all, the second check is fine.
+ if ten_kappa - remainder > remainder && ten_kappa - 2 * remainder >= 2 * ulp {
+ // SAFETY: our caller initialized that memory.
+ return Some((unsafe { MaybeUninit::slice_assume_init_ref(&buf[..len]) }, exp));
+ }
+
+ // :<------- remainder ------>| :
+ // : | :
+ // :<--------- 10^kappa --------->:
+ // : | | : |
+ // : |1 ulp|1 ulp|
+ // : |<--->|<--->|
+ // -----------------------|-----|-----|-----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // on the other hands, if `v - 1 ulp` is closer to the rounded-up representation,
+ // we should round up and return. for the same reason we don't need to check `v + 1 ulp`.
+ //
+ // the condition equals to `remainder - ulp >= 10^kappa / 2`.
+ // again we first check if `remainder > ulp` (note that this is not `remainder >= ulp`,
+ // as `10^kappa` is never zero). also note that `remainder - ulp <= 10^kappa`,
+ // so the second check does not overflow.
+ if remainder > ulp && ten_kappa - (remainder - ulp) <= remainder - ulp {
+ if let Some(c) =
+ // SAFETY: our caller must have initialized that memory.
+ round_up(unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..len]) })
+ {
+ // only add an additional digit when we've been requested the fixed precision.
+ // we also need to check that, if the original buffer was empty,
+ // the additional digit can only be added when `exp == limit` (edge case).
+ exp += 1;
+ if exp > limit && len < buf.len() {
+ buf[len] = MaybeUninit::new(c);
+ len += 1;
+ }
+ }
+ // SAFETY: we and our caller initialized that memory.
+ return Some((unsafe { MaybeUninit::slice_assume_init_ref(&buf[..len]) }, exp));
+ }
+
+ // otherwise we are doomed (i.e., some values between `v - 1 ulp` and `v + 1 ulp` are
+ // rounding down and others are rounding up) and give up.
+ None
+ }
+}
+
+/// The exact and fixed mode implementation for Grisu with Dragon fallback.
+///
+/// This should be used for most cases.
+pub fn format_exact<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+ limit: i16,
+) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
+ use crate::num::flt2dec::strategy::dragon::format_exact as fallback;
+ // SAFETY: The borrow checker is not smart enough to let us use `buf`
+ // in the second branch, so we launder the lifetime here. But we only re-use
+ // `buf` if `format_exact_opt` returned `None` so this is okay.
+ match format_exact_opt(d, unsafe { &mut *(buf as *mut _) }, limit) {
+ Some(ret) => ret,
+ None => fallback(d, buf, limit),
+ }
+}
diff --git a/library/core/src/num/fmt.rs b/library/core/src/num/fmt.rs
new file mode 100644
index 000000000..ed6119715
--- /dev/null
+++ b/library/core/src/num/fmt.rs
@@ -0,0 +1,108 @@
+//! Shared utilities used by both float and integer formatting.
+#![doc(hidden)]
+#![unstable(
+ feature = "numfmt",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+
+/// Formatted parts.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Part<'a> {
+ /// Given number of zero digits.
+ Zero(usize),
+ /// A literal number up to 5 digits.
+ Num(u16),
+ /// A verbatim copy of given bytes.
+ Copy(&'a [u8]),
+}
+
+impl<'a> Part<'a> {
+ /// Returns the exact byte length of given part.
+ pub fn len(&self) -> usize {
+ match *self {
+ Part::Zero(nzeroes) => nzeroes,
+ Part::Num(v) => {
+ if v < 1_000 {
+ if v < 10 {
+ 1
+ } else if v < 100 {
+ 2
+ } else {
+ 3
+ }
+ } else {
+ if v < 10_000 { 4 } else { 5 }
+ }
+ }
+ Part::Copy(buf) => buf.len(),
+ }
+ }
+
+ /// Writes a part into the supplied buffer.
+ /// Returns the number of written bytes, or `None` if the buffer is not enough.
+ /// (It may still leave partially written bytes in the buffer; do not rely on that.)
+ pub fn write(&self, out: &mut [u8]) -> Option<usize> {
+ let len = self.len();
+ if out.len() >= len {
+ match *self {
+ Part::Zero(nzeroes) => {
+ for c in &mut out[..nzeroes] {
+ *c = b'0';
+ }
+ }
+ Part::Num(mut v) => {
+ for c in out[..len].iter_mut().rev() {
+ *c = b'0' + (v % 10) as u8;
+ v /= 10;
+ }
+ }
+ Part::Copy(buf) => {
+ out[..buf.len()].copy_from_slice(buf);
+ }
+ }
+ Some(len)
+ } else {
+ None
+ }
+ }
+}
+
+/// Formatted result containing one or more parts.
+/// This can be written to the byte buffer or converted to the allocated string.
+#[allow(missing_debug_implementations)]
+#[derive(Clone)]
+pub struct Formatted<'a> {
+ /// A byte slice representing a sign, either `""`, `"-"` or `"+"`.
+ pub sign: &'static str,
+ /// Formatted parts to be rendered after a sign and optional zero padding.
+ pub parts: &'a [Part<'a>],
+}
+
+impl<'a> Formatted<'a> {
+ /// Returns the exact byte length of combined formatted result.
+ pub fn len(&self) -> usize {
+ let mut len = self.sign.len();
+ for part in self.parts {
+ len += part.len();
+ }
+ len
+ }
+
+ /// Writes all formatted parts into the supplied buffer.
+ /// Returns the number of written bytes, or `None` if the buffer is not enough.
+ /// (It may still leave partially written bytes in the buffer; do not rely on that.)
+ pub fn write(&self, out: &mut [u8]) -> Option<usize> {
+ if out.len() < self.sign.len() {
+ return None;
+ }
+ out[..self.sign.len()].copy_from_slice(self.sign.as_bytes());
+
+ let mut written = self.sign.len();
+ for part in self.parts {
+ let len = part.write(&mut out[written..])?;
+ written += len;
+ }
+ Some(written)
+ }
+}
diff --git a/library/core/src/num/int_log10.rs b/library/core/src/num/int_log10.rs
new file mode 100644
index 000000000..cc26c04a5
--- /dev/null
+++ b/library/core/src/num/int_log10.rs
@@ -0,0 +1,140 @@
+/// These functions compute the integer logarithm of their type, assuming
+/// that someone has already checked that the the value is strictly positive.
+
+// 0 < val <= u8::MAX
+#[inline]
+pub const fn u8(val: u8) -> u32 {
+ let val = val as u32;
+
+ // For better performance, avoid branches by assembling the solution
+ // in the bits above the low 8 bits.
+
+ // Adding c1 to val gives 10 in the top bits for val < 10, 11 for val >= 10
+ const C1: u32 = 0b11_00000000 - 10; // 758
+ // Adding c2 to val gives 01 in the top bits for val < 100, 10 for val >= 100
+ const C2: u32 = 0b10_00000000 - 100; // 412
+
+ // Value of top bits:
+ // +c1 +c2 1&2
+ // 0..=9 10 01 00 = 0
+ // 10..=99 11 01 01 = 1
+ // 100..=255 11 10 10 = 2
+ ((val + C1) & (val + C2)) >> 8
+}
+
+// 0 < val < 100_000
+#[inline]
+const fn less_than_5(val: u32) -> u32 {
+ // Similar to u8, when adding one of these constants to val,
+ // we get two possible bit patterns above the low 17 bits,
+ // depending on whether val is below or above the threshold.
+ const C1: u32 = 0b011_00000000000000000 - 10; // 393206
+ const C2: u32 = 0b100_00000000000000000 - 100; // 524188
+ const C3: u32 = 0b111_00000000000000000 - 1000; // 916504
+ const C4: u32 = 0b100_00000000000000000 - 10000; // 514288
+
+ // Value of top bits:
+ // +c1 +c2 1&2 +c3 +c4 3&4 ^
+ // 0..=9 010 011 010 110 011 010 000 = 0
+ // 10..=99 011 011 011 110 011 010 001 = 1
+ // 100..=999 011 100 000 110 011 010 010 = 2
+ // 1000..=9999 011 100 000 111 011 011 011 = 3
+ // 10000..=99999 011 100 000 111 100 100 100 = 4
+ (((val + C1) & (val + C2)) ^ ((val + C3) & (val + C4))) >> 17
+}
+
+// 0 < val <= u16::MAX
+#[inline]
+pub const fn u16(val: u16) -> u32 {
+ less_than_5(val as u32)
+}
+
+// 0 < val <= u32::MAX
+#[inline]
+pub const fn u32(mut val: u32) -> u32 {
+ let mut log = 0;
+ if val >= 100_000 {
+ val /= 100_000;
+ log += 5;
+ }
+ log + less_than_5(val)
+}
+
+// 0 < val <= u64::MAX
+#[inline]
+pub const fn u64(mut val: u64) -> u32 {
+ let mut log = 0;
+ if val >= 10_000_000_000 {
+ val /= 10_000_000_000;
+ log += 10;
+ }
+ if val >= 100_000 {
+ val /= 100_000;
+ log += 5;
+ }
+ log + less_than_5(val as u32)
+}
+
+// 0 < val <= u128::MAX
+#[inline]
+pub const fn u128(mut val: u128) -> u32 {
+ let mut log = 0;
+ if val >= 100_000_000_000_000_000_000_000_000_000_000 {
+ val /= 100_000_000_000_000_000_000_000_000_000_000;
+ log += 32;
+ return log + u32(val as u32);
+ }
+ if val >= 10_000_000_000_000_000 {
+ val /= 10_000_000_000_000_000;
+ log += 16;
+ }
+ log + u64(val as u64)
+}
+
+#[cfg(target_pointer_width = "16")]
+#[inline]
+pub const fn usize(val: usize) -> u32 {
+ u16(val as _)
+}
+
+#[cfg(target_pointer_width = "32")]
+#[inline]
+pub const fn usize(val: usize) -> u32 {
+ u32(val as _)
+}
+
+#[cfg(target_pointer_width = "64")]
+#[inline]
+pub const fn usize(val: usize) -> u32 {
+ u64(val as _)
+}
+
+// 0 < val <= i8::MAX
+#[inline]
+pub const fn i8(val: i8) -> u32 {
+ u8(val as u8)
+}
+
+// 0 < val <= i16::MAX
+#[inline]
+pub const fn i16(val: i16) -> u32 {
+ u16(val as u16)
+}
+
+// 0 < val <= i32::MAX
+#[inline]
+pub const fn i32(val: i32) -> u32 {
+ u32(val as u32)
+}
+
+// 0 < val <= i64::MAX
+#[inline]
+pub const fn i64(val: i64) -> u32 {
+ u64(val as u64)
+}
+
+// 0 < val <= i128::MAX
+#[inline]
+pub const fn i128(val: i128) -> u32 {
+ u128(val as u128)
+}
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
new file mode 100644
index 000000000..a66de19ba
--- /dev/null
+++ b/library/core/src/num/int_macros.rs
@@ -0,0 +1,2744 @@
+macro_rules! int_impl {
+ ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr, $BITS_MINUS_ONE:expr, $Min:expr, $Max:expr,
+ $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr,
+ $reversed:expr, $le_bytes:expr, $be_bytes:expr,
+ $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr,
+ $bound_condition:expr) => {
+ /// The smallest value that can be represented by this integer type
+ #[doc = concat!("(&minus;2<sup>", $BITS_MINUS_ONE, "</sup>", $bound_condition, ")")]
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN, ", stringify!($Min), ");")]
+ /// ```
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN: Self = !0 ^ ((!0 as $UnsignedT) >> 1) as Self;
+
+ /// The largest value that can be represented by this integer type
+ #[doc = concat!("(2<sup>", $BITS_MINUS_ONE, "</sup> &minus; 1", $bound_condition, ")")]
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX, ", stringify!($Max), ");")]
+ /// ```
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX: Self = !Self::MIN;
+
+ /// The size of this integer type in bits.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");")]
+ /// ```
+ #[stable(feature = "int_bits_const", since = "1.53.0")]
+ pub const BITS: u32 = $BITS;
+
+ /// Converts a string slice in a given base to an integer.
+ ///
+ /// The string is expected to be an optional `+` or `-` sign followed by digits.
+ /// Leading and trailing whitespace represent an error. Digits are a subset of these characters,
+ /// depending on `radix`:
+ ///
+ /// * `0-9`
+ /// * `a-z`
+ /// * `A-Z`
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `radix` is not in the range from 2 to 36.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::from_str_radix(\"A\", 16), Ok(10));")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError> {
+ from_str_radix(src, radix)
+ }
+
+ /// Returns the number of ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0b100_0000", stringify!($SelfT), ";")]
+ ///
+ /// assert_eq!(n.count_ones(), 1);
+ /// ```
+ ///
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[doc(alias = "popcount")]
+ #[doc(alias = "popcnt")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
+
+ /// Returns the number of zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.count_zeros(), 1);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn count_zeros(self) -> u32 {
+ (!self).count_ones()
+ }
+
+ /// Returns the number of leading zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = -1", stringify!($SelfT), ";")]
+ ///
+ /// assert_eq!(n.leading_zeros(), 0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn leading_zeros(self) -> u32 {
+ (self as $UnsignedT).leading_zeros()
+ }
+
+ /// Returns the number of trailing zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = -4", stringify!($SelfT), ";")]
+ ///
+ /// assert_eq!(n.trailing_zeros(), 2);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn trailing_zeros(self) -> u32 {
+ (self as $UnsignedT).trailing_zeros()
+ }
+
+ /// Returns the number of leading ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = -1", stringify!($SelfT), ";")]
+ ///
+ #[doc = concat!("assert_eq!(n.leading_ones(), ", stringify!($BITS), ");")]
+ /// ```
+ #[stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[rustc_const_stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn leading_ones(self) -> u32 {
+ (self as $UnsignedT).leading_ones()
+ }
+
+ /// Returns the number of trailing ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 3", stringify!($SelfT), ";")]
+ ///
+ /// assert_eq!(n.trailing_ones(), 2);
+ /// ```
+ #[stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[rustc_const_stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn trailing_ones(self) -> u32 {
+ (self as $UnsignedT).trailing_ones()
+ }
+
+ /// Shifts the bits to the left by a specified amount, `n`,
+ /// wrapping the truncated bits to the end of the resulting integer.
+ ///
+ /// Please note this isn't the same operation as the `<<` shifting operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", $rot_op, stringify!($SelfT), ";")]
+ #[doc = concat!("let m = ", $rot_result, ";")]
+ ///
+ #[doc = concat!("assert_eq!(n.rotate_left(", $rot, "), m);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn rotate_left(self, n: u32) -> Self {
+ (self as $UnsignedT).rotate_left(n) as Self
+ }
+
+ /// Shifts the bits to the right by a specified amount, `n`,
+ /// wrapping the truncated bits to the beginning of the resulting
+ /// integer.
+ ///
+ /// Please note this isn't the same operation as the `>>` shifting operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", $rot_result, stringify!($SelfT), ";")]
+ #[doc = concat!("let m = ", $rot_op, ";")]
+ ///
+ #[doc = concat!("assert_eq!(n.rotate_right(", $rot, "), m);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn rotate_right(self, n: u32) -> Self {
+ (self as $UnsignedT).rotate_right(n) as Self
+ }
+
+ /// Reverses the byte order of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", $swap_op, stringify!($SelfT), ";")]
+ ///
+ /// let m = n.swap_bytes();
+ ///
+ #[doc = concat!("assert_eq!(m, ", $swapped, ");")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn swap_bytes(self) -> Self {
+ (self as $UnsignedT).swap_bytes() as Self
+ }
+
+ /// Reverses the order of bits in the integer. The least significant bit becomes the most significant bit,
+ /// second least-significant bit becomes second most-significant bit, etc.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", $swap_op, stringify!($SelfT), ";")]
+ /// let m = n.reverse_bits();
+ ///
+ #[doc = concat!("assert_eq!(m, ", $reversed, ");")]
+ #[doc = concat!("assert_eq!(0, 0", stringify!($SelfT), ".reverse_bits());")]
+ /// ```
+ #[stable(feature = "reverse_bits", since = "1.37.0")]
+ #[rustc_const_stable(feature = "reverse_bits", since = "1.37.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn reverse_bits(self) -> Self {
+ (self as $UnsignedT).reverse_bits() as Self
+ }
+
+ /// Converts an integer from big endian to the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0x1A", stringify!($SelfT), ";")]
+ ///
+ /// if cfg!(target_endian = "big") {
+ #[doc = concat!(" assert_eq!(", stringify!($SelfT), "::from_be(n), n)")]
+ /// } else {
+ #[doc = concat!(" assert_eq!(", stringify!($SelfT), "::from_be(n), n.swap_bytes())")]
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_conversions", since = "1.32.0")]
+ #[must_use]
+ #[inline]
+ pub const fn from_be(x: Self) -> Self {
+ #[cfg(target_endian = "big")]
+ {
+ x
+ }
+ #[cfg(not(target_endian = "big"))]
+ {
+ x.swap_bytes()
+ }
+ }
+
+ /// Converts an integer from little endian to the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0x1A", stringify!($SelfT), ";")]
+ ///
+ /// if cfg!(target_endian = "little") {
+ #[doc = concat!(" assert_eq!(", stringify!($SelfT), "::from_le(n), n)")]
+ /// } else {
+ #[doc = concat!(" assert_eq!(", stringify!($SelfT), "::from_le(n), n.swap_bytes())")]
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_conversions", since = "1.32.0")]
+ #[must_use]
+ #[inline]
+ pub const fn from_le(x: Self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ x
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ x.swap_bytes()
+ }
+ }
+
+ /// Converts `self` to big endian from the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0x1A", stringify!($SelfT), ";")]
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(n.to_be(), n)
+ /// } else {
+ /// assert_eq!(n.to_be(), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_conversions", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_be(self) -> Self { // or not to be?
+ #[cfg(target_endian = "big")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "big"))]
+ {
+ self.swap_bytes()
+ }
+ }
+
+ /// Converts `self` to little endian from the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0x1A", stringify!($SelfT), ";")]
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(n.to_le(), n)
+ /// } else {
+ /// assert_eq!(n.to_le(), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_conversions", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_le(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ self.swap_bytes()
+ }
+ }
+
+ /// Checked integer addition. Computes `self + rhs`, returning `None`
+ /// if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(1), Some(", stringify!($SelfT), "::MAX - 1));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_add(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked integer addition. Computes `self + rhs`, assuming overflow
+ /// cannot occur.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior when
+ #[doc = concat!("`self + rhs > ", stringify!($SelfT), "::MAX` or `self + rhs < ", stringify!($SelfT), "::MIN`,")]
+ /// i.e. when [`checked_add`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_add`]: ", stringify!($SelfT), "::checked_add")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_add`.
+ unsafe { intrinsics::unchecked_add(self, rhs) }
+ }
+
+ /// Checked addition with an unsigned integer. Computes `self + rhs`,
+ /// returning `None` if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_unsigned(2), Some(3));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add_unsigned(3), None);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_add_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
+ let (a, b) = self.overflowing_add_unsigned(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Checked integer subtraction. Computes `self - rhs`, returning `None` if
+ /// overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).checked_sub(1), Some(", stringify!($SelfT), "::MIN + 1));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).checked_sub(3), None);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_sub(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked integer subtraction. Computes `self - rhs`, assuming overflow
+ /// cannot occur.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior when
+ #[doc = concat!("`self - rhs > ", stringify!($SelfT), "::MAX` or `self - rhs < ", stringify!($SelfT), "::MIN`,")]
+ /// i.e. when [`checked_sub`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_sub`]: ", stringify!($SelfT), "::checked_sub")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_sub`.
+ unsafe { intrinsics::unchecked_sub(self, rhs) }
+ }
+
+ /// Checked subtraction with an unsigned integer. Computes `self - rhs`,
+ /// returning `None` if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_sub_unsigned(2), Some(-1));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).checked_sub_unsigned(3), None);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_sub_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
+ let (a, b) = self.overflowing_sub_unsigned(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Checked integer multiplication. Computes `self * rhs`, returning `None` if
+ /// overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(1), Some(", stringify!($SelfT), "::MAX));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_mul(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked integer multiplication. Computes `self * rhs`, assuming overflow
+ /// cannot occur.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior when
+ #[doc = concat!("`self * rhs > ", stringify!($SelfT), "::MAX` or `self * rhs < ", stringify!($SelfT), "::MIN`,")]
+ /// i.e. when [`checked_mul`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_mul`]: ", stringify!($SelfT), "::checked_mul")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_mul(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_mul`.
+ unsafe { intrinsics::unchecked_mul(self, rhs) }
+ }
+
+ /// Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0`
+ /// or the division results in overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 1).checked_div(-1), Some(", stringify!($Max), "));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_div(-1), None);")]
+ #[doc = concat!("assert_eq!((1", stringify!($SelfT), ").checked_div(0), None);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_div", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_div(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0 || ((self == Self::MIN) && (rhs == -1))) {
+ None
+ } else {
+ // SAFETY: div by zero and by INT_MIN have been checked above
+ Some(unsafe { intrinsics::unchecked_div(self, rhs) })
+ }
+ }
+
+ /// Checked Euclidean division. Computes `self.div_euclid(rhs)`,
+ /// returning `None` if `rhs == 0` or the division results in overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 1).checked_div_euclid(-1), Some(", stringify!($Max), "));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_div_euclid(-1), None);")]
+ #[doc = concat!("assert_eq!((1", stringify!($SelfT), ").checked_div_euclid(0), None);")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_div_euclid(self, rhs: Self) -> Option<Self> {
+ // Using `&` helps LLVM see that it is the same check made in division.
+ if unlikely!(rhs == 0 || ((self == Self::MIN) & (rhs == -1))) {
+ None
+ } else {
+ Some(self.div_euclid(rhs))
+ }
+ }
+
+ /// Checked integer remainder. Computes `self % rhs`, returning `None` if
+ /// `rhs == 0` or the division results in overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem(2), Some(1));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_rem(-1), None);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_div", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_rem(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0 || ((self == Self::MIN) && (rhs == -1))) {
+ None
+ } else {
+ // SAFETY: div by zero and by INT_MIN have been checked above
+ Some(unsafe { intrinsics::unchecked_rem(self, rhs) })
+ }
+ }
+
+ /// Checked Euclidean remainder. Computes `self.rem_euclid(rhs)`, returning `None`
+ /// if `rhs == 0` or the division results in overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(2), Some(1));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(0), None);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_rem_euclid(-1), None);")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_rem_euclid(self, rhs: Self) -> Option<Self> {
+ // Using `&` helps LLVM see that it is the same check made in division.
+ if unlikely!(rhs == 0 || ((self == Self::MIN) & (rhs == -1))) {
+ None
+ } else {
+ Some(self.rem_euclid(rhs))
+ }
+ }
+
+ /// Checked negation. Computes `-self`, returning `None` if `self == MIN`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_neg(), Some(-5));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_neg(), None);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_neg(self) -> Option<Self> {
+ let (a, b) = self.overflowing_neg();
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Checked shift left. Computes `self << rhs`, returning `None` if `rhs` is larger
+ /// than or equal to the number of bits in `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0x1", stringify!($SelfT), ".checked_shl(4), Some(0x10));")]
+ #[doc = concat!("assert_eq!(0x1", stringify!($SelfT), ".checked_shl(129), None);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shl(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked shift left. Computes `self << rhs`, assuming that
+ /// `rhs` is less than the number of bits in `self`.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior if `rhs` is larger than
+ /// or equal to the number of bits in `self`,
+ /// i.e. when [`checked_shl`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_shl`]: ", stringify!($SelfT), "::checked_shl")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_shl`.
+ unsafe { intrinsics::unchecked_shl(self, rhs) }
+ }
+
+ /// Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is
+ /// larger than or equal to the number of bits in `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".checked_shr(4), Some(0x1));")]
+ #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".checked_shr(128), None);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shr(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked shift right. Computes `self >> rhs`, assuming that
+ /// `rhs` is less than the number of bits in `self`.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior if `rhs` is larger than
+ /// or equal to the number of bits in `self`,
+ /// i.e. when [`checked_shr`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_shr`]: ", stringify!($SelfT), "::checked_shr")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_shr`.
+ unsafe { intrinsics::unchecked_shr(self, rhs) }
+ }
+
+ /// Checked absolute value. Computes `self.abs()`, returning `None` if
+ /// `self == MIN`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!((-5", stringify!($SelfT), ").checked_abs(), Some(5));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_abs(), None);")]
+ /// ```
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_abs(self) -> Option<Self> {
+ if self.is_negative() {
+ self.checked_neg()
+ } else {
+ Some(self)
+ }
+ }
+
+ /// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
+ /// overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(8", stringify!($SelfT), ".checked_pow(2), Some(64));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_pow(2), None);")]
+ /// ```
+
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
+ if exp == 0 {
+ return Some(1);
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = try_opt!(acc.checked_mul(base));
+ }
+ exp /= 2;
+ base = try_opt!(base.checked_mul(base));
+ }
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ Some(try_opt!(acc.checked_mul(base)))
+ }
+
+ /// Saturating integer addition. Computes `self + rhs`, saturating at the numeric
+ /// bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_add(1), 101);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_add(100), ", stringify!($SelfT), "::MAX);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_add(-1), ", stringify!($SelfT), "::MIN);")]
+ /// ```
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn saturating_add(self, rhs: Self) -> Self {
+ intrinsics::saturating_add(self, rhs)
+ }
+
+ /// Saturating addition with an unsigned integer. Computes `self + rhs`,
+ /// saturating at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_unsigned(2), 3);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_add_unsigned(100), ", stringify!($SelfT), "::MAX);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_add_unsigned(self, rhs: $UnsignedT) -> Self {
+ // Overflow can only happen at the upper bound
+ // We cannot use `unwrap_or` here because it is not `const`
+ match self.checked_add_unsigned(rhs) {
+ Some(x) => x,
+ None => Self::MAX,
+ }
+ }
+
+ /// Saturating integer subtraction. Computes `self - rhs`, saturating at the
+ /// numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_sub(127), -27);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_sub(100), ", stringify!($SelfT), "::MIN);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_sub(-1), ", stringify!($SelfT), "::MAX);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn saturating_sub(self, rhs: Self) -> Self {
+ intrinsics::saturating_sub(self, rhs)
+ }
+
+ /// Saturating subtraction with an unsigned integer. Computes `self - rhs`,
+ /// saturating at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_sub_unsigned(127), -27);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_sub_unsigned(100), ", stringify!($SelfT), "::MIN);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_sub_unsigned(self, rhs: $UnsignedT) -> Self {
+ // Overflow can only happen at the lower bound
+ // We cannot use `unwrap_or` here because it is not `const`
+ match self.checked_sub_unsigned(rhs) {
+ Some(x) => x,
+ None => Self::MIN,
+ }
+ }
+
+ /// Saturating integer negation. Computes `-self`, returning `MAX` if `self == MIN`
+ /// instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_neg(), -100);")]
+ #[doc = concat!("assert_eq!((-100", stringify!($SelfT), ").saturating_neg(), 100);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_neg(), ", stringify!($SelfT), "::MAX);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_neg(), ", stringify!($SelfT), "::MIN + 1);")]
+ /// ```
+
+ #[stable(feature = "saturating_neg", since = "1.45.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn saturating_neg(self) -> Self {
+ intrinsics::saturating_sub(0, self)
+ }
+
+ /// Saturating absolute value. Computes `self.abs()`, returning `MAX` if `self ==
+ /// MIN` instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_abs(), 100);")]
+ #[doc = concat!("assert_eq!((-100", stringify!($SelfT), ").saturating_abs(), 100);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_abs(), ", stringify!($SelfT), "::MAX);")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 1).saturating_abs(), ", stringify!($SelfT), "::MAX);")]
+ /// ```
+
+ #[stable(feature = "saturating_neg", since = "1.45.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_abs(self) -> Self {
+ if self.is_negative() {
+ self.saturating_neg()
+ } else {
+ self
+ }
+ }
+
+ /// Saturating integer multiplication. Computes `self * rhs`, saturating at the
+ /// numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".saturating_mul(12), 120);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_mul(10), ", stringify!($SelfT), "::MAX);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_mul(10), ", stringify!($SelfT), "::MIN);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_mul(self, rhs: Self) -> Self {
+ match self.checked_mul(rhs) {
+ Some(x) => x,
+ None => if (self < 0) == (rhs < 0) {
+ Self::MAX
+ } else {
+ Self::MIN
+ }
+ }
+ }
+
+ /// Saturating integer division. Computes `self / rhs`, saturating at the
+ /// numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".saturating_div(2), 2);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_div(-1), ", stringify!($SelfT), "::MIN + 1);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_div(-1), ", stringify!($SelfT), "::MAX);")]
+ ///
+ /// ```
+ ///
+ /// ```should_panic
+ #[doc = concat!("let _ = 1", stringify!($SelfT), ".saturating_div(0);")]
+ ///
+ /// ```
+ #[stable(feature = "saturating_div", since = "1.58.0")]
+ #[rustc_const_stable(feature = "saturating_div", since = "1.58.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_div(self, rhs: Self) -> Self {
+ match self.overflowing_div(rhs) {
+ (result, false) => result,
+ (_result, true) => Self::MAX, // MIN / -1 is the only possible saturating overflow
+ }
+ }
+
+ /// Saturating integer exponentiation. Computes `self.pow(exp)`,
+ /// saturating at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!((-4", stringify!($SelfT), ").saturating_pow(3), -64);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(2), ", stringify!($SelfT), "::MAX);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(3), ", stringify!($SelfT), "::MIN);")]
+ /// ```
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_pow(self, exp: u32) -> Self {
+ match self.checked_pow(exp) {
+ Some(x) => x,
+ None if self < 0 && exp % 2 == 1 => Self::MIN,
+ None => Self::MAX,
+ }
+ }
+
+ /// Wrapping (modular) addition. Computes `self + rhs`, wrapping around at the
+ /// boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_add(27), 127);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_add(2), ", stringify!($SelfT), "::MIN + 1);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_add(self, rhs: Self) -> Self {
+ intrinsics::wrapping_add(self, rhs)
+ }
+
+ /// Wrapping (modular) addition with an unsigned integer. Computes
+ /// `self + rhs`, wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_add_unsigned(27), 127);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_add_unsigned(2), ", stringify!($SelfT), "::MIN + 1);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_add_unsigned(self, rhs: $UnsignedT) -> Self {
+ self.wrapping_add(rhs as Self)
+ }
+
+ /// Wrapping (modular) subtraction. Computes `self - rhs`, wrapping around at the
+ /// boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".wrapping_sub(127), -127);")]
+ #[doc = concat!("assert_eq!((-2", stringify!($SelfT), ").wrapping_sub(", stringify!($SelfT), "::MAX), ", stringify!($SelfT), "::MAX);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_sub(self, rhs: Self) -> Self {
+ intrinsics::wrapping_sub(self, rhs)
+ }
+
+ /// Wrapping (modular) subtraction with an unsigned integer. Computes
+ /// `self - rhs`, wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".wrapping_sub_unsigned(127), -127);")]
+ #[doc = concat!("assert_eq!((-2", stringify!($SelfT), ").wrapping_sub_unsigned(", stringify!($UnsignedT), "::MAX), -1);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_sub_unsigned(self, rhs: $UnsignedT) -> Self {
+ self.wrapping_sub(rhs as Self)
+ }
+
+ /// Wrapping (modular) multiplication. Computes `self * rhs`, wrapping around at
+ /// the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".wrapping_mul(12), 120);")]
+ /// assert_eq!(11i8.wrapping_mul(12), -124);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_mul(self, rhs: Self) -> Self {
+ intrinsics::wrapping_mul(self, rhs)
+ }
+
+ /// Wrapping (modular) division. Computes `self / rhs`, wrapping around at the
+ /// boundary of the type.
+ ///
+ /// The only case where such wrapping can occur is when one divides `MIN / -1` on a signed type (where
+ /// `MIN` is the negative minimal value for the type); this is equivalent to `-MIN`, a positive value
+ /// that is too large to represent in the type. In such a case, this function returns `MIN` itself.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_div(10), 10);")]
+ /// assert_eq!((-128i8).wrapping_div(-1), -128);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_div(self, rhs: Self) -> Self {
+ self.overflowing_div(rhs).0
+ }
+
+ /// Wrapping Euclidean division. Computes `self.div_euclid(rhs)`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// Wrapping will only occur in `MIN / -1` on a signed type (where `MIN` is the negative minimal value
+ /// for the type). This is equivalent to `-MIN`, a positive value that is too large to represent in the
+ /// type. In this case, this method returns `MIN` itself.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_div_euclid(10), 10);")]
+ /// assert_eq!((-128i8).wrapping_div_euclid(-1), -128);
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_div_euclid(self, rhs: Self) -> Self {
+ self.overflowing_div_euclid(rhs).0
+ }
+
+ /// Wrapping (modular) remainder. Computes `self % rhs`, wrapping around at the
+ /// boundary of the type.
+ ///
+ /// Such wrap-around never actually occurs mathematically; implementation artifacts make `x % y`
+ /// invalid for `MIN / -1` on a signed type (where `MIN` is the negative minimal value). In such a case,
+ /// this function returns `0`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_rem(10), 0);")]
+ /// assert_eq!((-128i8).wrapping_rem(-1), 0);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_rem(self, rhs: Self) -> Self {
+ self.overflowing_rem(rhs).0
+ }
+
+ /// Wrapping Euclidean remainder. Computes `self.rem_euclid(rhs)`, wrapping around
+ /// at the boundary of the type.
+ ///
+ /// Wrapping will only occur in `MIN % -1` on a signed type (where `MIN` is the negative minimal value
+ /// for the type). In this case, this method returns 0.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_rem_euclid(10), 0);")]
+ /// assert_eq!((-128i8).wrapping_rem_euclid(-1), 0);
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_rem_euclid(self, rhs: Self) -> Self {
+ self.overflowing_rem_euclid(rhs).0
+ }
+
+ /// Wrapping (modular) negation. Computes `-self`, wrapping around at the boundary
+ /// of the type.
+ ///
+ /// The only case where such wrapping can occur is when one negates `MIN` on a signed type (where `MIN`
+ /// is the negative minimal value for the type); this is a positive value that is too large to represent
+ /// in the type. In such a case, this function returns `MIN` itself.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_neg(), -100);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.wrapping_neg(), ", stringify!($SelfT), "::MIN);")]
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_neg(self) -> Self {
+ (0 as $SelfT).wrapping_sub(self)
+ }
+
+ /// Panic-free bitwise shift-left; yields `self << mask(rhs)`, where `mask` removes
+ /// any high-order bits of `rhs` that would cause the shift to exceed the bitwidth of the type.
+ ///
+ /// Note that this is *not* the same as a rotate-left; the RHS of a wrapping shift-left is restricted to
+ /// the range of the type, rather than the bits shifted out of the LHS being returned to the other end.
+ /// The primitive integer types all implement a [`rotate_left`](Self::rotate_left) function,
+ /// which may be what you want instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(7), -128);")]
+ #[doc = concat!("assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(128), -1);")]
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_shl(self, rhs: u32) -> Self {
+ // SAFETY: the masking by the bitsize of the type ensures that we do not shift
+ // out of bounds
+ unsafe {
+ intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
+ }
+ }
+
+ /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`, where `mask`
+ /// removes any high-order bits of `rhs` that would cause the shift to exceed the bitwidth of the type.
+ ///
+ /// Note that this is *not* the same as a rotate-right; the RHS of a wrapping shift-right is restricted
+ /// to the range of the type, rather than the bits shifted out of the LHS being returned to the other
+ /// end. The primitive integer types all implement a [`rotate_right`](Self::rotate_right) function,
+ /// which may be what you want instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!((-128", stringify!($SelfT), ").wrapping_shr(7), -1);")]
+ /// assert_eq!((-128i16).wrapping_shr(64), -128);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_shr(self, rhs: u32) -> Self {
+ // SAFETY: the masking by the bitsize of the type ensures that we do not shift
+ // out of bounds
+ unsafe {
+ intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
+ }
+ }
+
+ /// Wrapping (modular) absolute value. Computes `self.abs()`, wrapping around at
+ /// the boundary of the type.
+ ///
+ /// The only case where such wrapping can occur is when one takes the absolute value of the negative
+ /// minimal value for the type; this is a positive value that is too large to represent in the type. In
+ /// such a case, this function returns `MIN` itself.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_abs(), 100);")]
+ #[doc = concat!("assert_eq!((-100", stringify!($SelfT), ").wrapping_abs(), 100);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.wrapping_abs(), ", stringify!($SelfT), "::MIN);")]
+ /// assert_eq!((-128i8).wrapping_abs() as u8, 128);
+ /// ```
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[allow(unused_attributes)]
+ #[inline]
+ pub const fn wrapping_abs(self) -> Self {
+ if self.is_negative() {
+ self.wrapping_neg()
+ } else {
+ self
+ }
+ }
+
+ /// Computes the absolute value of `self` without any wrapping
+ /// or panicking.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".unsigned_abs(), 100", stringify!($UnsignedT), ");")]
+ #[doc = concat!("assert_eq!((-100", stringify!($SelfT), ").unsigned_abs(), 100", stringify!($UnsignedT), ");")]
+ /// assert_eq!((-128i8).unsigned_abs(), 128u8);
+ /// ```
+ #[stable(feature = "unsigned_abs", since = "1.51.0")]
+ #[rustc_const_stable(feature = "unsigned_abs", since = "1.51.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn unsigned_abs(self) -> $UnsignedT {
+ self.wrapping_abs() as $UnsignedT
+ }
+
+ /// Wrapping (modular) exponentiation. Computes `self.pow(exp)`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".wrapping_pow(4), 81);")]
+ /// assert_eq!(3i8.wrapping_pow(5), -13);
+ /// assert_eq!(3i8.wrapping_pow(6), -39);
+ /// ```
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_pow(self, mut exp: u32) -> Self {
+ if exp == 0 {
+ return 1;
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc.wrapping_mul(base);
+ }
+ exp /= 2;
+ base = base.wrapping_mul(base);
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ acc.wrapping_mul(base)
+ }
+
+ /// Calculates `self` + `rhs`
+ ///
+ /// Returns a tuple of the addition along with a boolean indicating whether an arithmetic overflow would
+ /// occur. If an overflow would have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (", stringify!($SelfT), "::MIN, true));")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+
+ /// Calculates `self` + `rhs` with an unsigned `rhs`
+ ///
+ /// Returns a tuple of the addition along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_unsigned(2), (3, false));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN).overflowing_add_unsigned(", stringify!($UnsignedT), "::MAX), (", stringify!($SelfT), "::MAX, false));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).overflowing_add_unsigned(3), (", stringify!($SelfT), "::MIN, true));")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_add_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
+ let rhs = rhs as Self;
+ let (res, overflowed) = self.overflowing_add(rhs);
+ (res, overflowed ^ (rhs < 0))
+ }
+
+ /// Calculates `self` - `rhs`
+ ///
+ /// Returns a tuple of the subtraction along with a boolean indicating whether an arithmetic overflow
+ /// would occur. If an overflow would have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+
+ /// Calculates `self` - `rhs` with an unsigned `rhs`
+ ///
+ /// Returns a tuple of the subtraction along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_sub_unsigned(2), (-1, false));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX).overflowing_sub_unsigned(", stringify!($UnsignedT), "::MAX), (", stringify!($SelfT), "::MIN, false));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).overflowing_sub_unsigned(3), (", stringify!($SelfT), "::MAX, true));")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_sub_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
+ let rhs = rhs as Self;
+ let (res, overflowed) = self.overflowing_sub(rhs);
+ (res, overflowed ^ (rhs < 0))
+ }
+
+ /// Calculates the multiplication of `self` and `rhs`.
+ ///
+ /// Returns a tuple of the multiplication along with a boolean indicating whether an arithmetic overflow
+ /// would occur. If an overflow would have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_mul(2), (10, false));")]
+ /// assert_eq!(1_000_000_000i32.overflowing_mul(10), (1410065408, true));
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+
+ /// Calculates the divisor when `self` is divided by `rhs`.
+ ///
+ /// Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would
+ /// occur. If an overflow would occur then self is returned.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_div(2), (2, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div(-1), (", stringify!($SelfT), "::MIN, true));")]
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_overflowing_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_div(self, rhs: Self) -> (Self, bool) {
+ // Using `&` helps LLVM see that it is the same check made in division.
+ if unlikely!((self == Self::MIN) & (rhs == -1)) {
+ (self, true)
+ } else {
+ (self / rhs, false)
+ }
+ }
+
+ /// Calculates the quotient of Euclidean division `self.div_euclid(rhs)`.
+ ///
+ /// Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would
+ /// occur. If an overflow would occur then `self` is returned.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_div_euclid(2), (2, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div_euclid(-1), (", stringify!($SelfT), "::MIN, true));")]
+ /// ```
+ #[inline]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) {
+ // Using `&` helps LLVM see that it is the same check made in division.
+ if unlikely!((self == Self::MIN) & (rhs == -1)) {
+ (self, true)
+ } else {
+ (self.div_euclid(rhs), false)
+ }
+ }
+
+ /// Calculates the remainder when `self` is divided by `rhs`.
+ ///
+ /// Returns a tuple of the remainder after dividing along with a boolean indicating whether an
+ /// arithmetic overflow would occur. If an overflow would occur then 0 is returned.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_rem(2), (1, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem(-1), (0, true));")]
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_overflowing_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_rem(self, rhs: Self) -> (Self, bool) {
+ if unlikely!(rhs == -1) {
+ (0, self == Self::MIN)
+ } else {
+ (self % rhs, false)
+ }
+ }
+
+
+ /// Overflowing Euclidean remainder. Calculates `self.rem_euclid(rhs)`.
+ ///
+ /// Returns a tuple of the remainder after dividing along with a boolean indicating whether an
+ /// arithmetic overflow would occur. If an overflow would occur then 0 is returned.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_rem_euclid(2), (1, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem_euclid(-1), (0, true));")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) {
+ if unlikely!(rhs == -1) {
+ (0, self == Self::MIN)
+ } else {
+ (self.rem_euclid(rhs), false)
+ }
+ }
+
+
+ /// Negates self, overflowing if this is equal to the minimum value.
+ ///
+ /// Returns a tuple of the negated version of self along with a boolean indicating whether an overflow
+ /// happened. If `self` is the minimum value (e.g., `i32::MIN` for values of type `i32`), then the
+ /// minimum value will be returned again and `true` will be returned for an overflow happening.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".overflowing_neg(), (-2, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_neg(), (", stringify!($SelfT), "::MIN, true));")]
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[allow(unused_attributes)]
+ pub const fn overflowing_neg(self) -> (Self, bool) {
+ if unlikely!(self == Self::MIN) {
+ (Self::MIN, true)
+ } else {
+ (-self, false)
+ }
+ }
+
+ /// Shifts self left by `rhs` bits.
+ ///
+ /// Returns a tuple of the shifted version of self along with a boolean indicating whether the shift
+ /// value was larger than or equal to the number of bits. If the shift value is too large, then value is
+ /// masked (N-1) where N is the number of bits, and this value is then used to perform the shift.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0x1", stringify!($SelfT),".overflowing_shl(4), (0x10, false));")]
+ /// assert_eq!(0x1i32.overflowing_shl(36), (0x10, true));
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) {
+ (self.wrapping_shl(rhs), (rhs > ($BITS - 1)))
+ }
+
+ /// Shifts self right by `rhs` bits.
+ ///
+ /// Returns a tuple of the shifted version of self along with a boolean indicating whether the shift
+ /// value was larger than or equal to the number of bits. If the shift value is too large, then value is
+ /// masked (N-1) where N is the number of bits, and this value is then used to perform the shift.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(4), (0x1, false));")]
+ /// assert_eq!(0x10i32.overflowing_shr(36), (0x1, true));
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) {
+ (self.wrapping_shr(rhs), (rhs > ($BITS - 1)))
+ }
+
+ /// Computes the absolute value of `self`.
+ ///
+ /// Returns a tuple of the absolute version of self along with a boolean indicating whether an overflow
+ /// happened. If self is the minimum value
+ #[doc = concat!("(e.g., ", stringify!($SelfT), "::MIN for values of type ", stringify!($SelfT), "),")]
+ /// then the minimum value will be returned again and true will be returned
+ /// for an overflow happening.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".overflowing_abs(), (10, false));")]
+ #[doc = concat!("assert_eq!((-10", stringify!($SelfT), ").overflowing_abs(), (10, false));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN).overflowing_abs(), (", stringify!($SelfT), "::MIN, true));")]
+ /// ```
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_abs(self) -> (Self, bool) {
+ (self.wrapping_abs(), self == Self::MIN)
+ }
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// Returns a tuple of the exponentiation along with a bool indicating
+ /// whether an overflow happened.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".overflowing_pow(4), (81, false));")]
+ /// assert_eq!(3i8.overflowing_pow(5), (-13, true));
+ /// ```
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_pow(self, mut exp: u32) -> (Self, bool) {
+ if exp == 0 {
+ return (1,false);
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+ let mut overflown = false;
+ // Scratch space for storing results of overflowing_mul.
+ let mut r;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ r = acc.overflowing_mul(base);
+ acc = r.0;
+ overflown |= r.1;
+ }
+ exp /= 2;
+ r = base.overflowing_mul(base);
+ base = r.0;
+ overflown |= r.1;
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ r = acc.overflowing_mul(base);
+ r.1 |= overflown;
+ r
+ }
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let x: ", stringify!($SelfT), " = 2; // or any other integer type")]
+ ///
+ /// assert_eq!(x.pow(5), 32);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn pow(self, mut exp: u32) -> Self {
+ if exp == 0 {
+ return 1;
+ }
+ let mut base = self;
+ let mut acc = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc * base;
+ }
+ exp /= 2;
+ base = base * base;
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ acc * base
+ }
+
+ /// Calculates the quotient of Euclidean division of `self` by `rhs`.
+ ///
+ /// This computes the integer `q` such that `self = q * rhs + r`, with
+ /// `r = self.rem_euclid(rhs)` and `0 <= r < abs(rhs)`.
+ ///
+ /// In other words, the result is `self / rhs` rounded to the integer `q`
+ /// such that `self >= q * rhs`.
+ /// If `self > 0`, this is equal to round towards zero (the default in Rust);
+ /// if `self < 0`, this is equal to round towards +/- infinity.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0 or the division results in overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let a: ", stringify!($SelfT), " = 7; // or any other integer type")]
+ /// let b = 4;
+ ///
+ /// assert_eq!(a.div_euclid(b), 1); // 7 >= 4 * 1
+ /// assert_eq!(a.div_euclid(-b), -1); // 7 >= -4 * -1
+ /// assert_eq!((-a).div_euclid(b), -2); // -7 >= 4 * -2
+ /// assert_eq!((-a).div_euclid(-b), 2); // -7 >= -4 * 2
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn div_euclid(self, rhs: Self) -> Self {
+ let q = self / rhs;
+ if self % rhs < 0 {
+ return if rhs > 0 { q - 1 } else { q + 1 }
+ }
+ q
+ }
+
+
+ /// Calculates the least nonnegative remainder of `self (mod rhs)`.
+ ///
+ /// This is done as if by the Euclidean division algorithm -- given
+ /// `r = self.rem_euclid(rhs)`, `self = rhs * self.div_euclid(rhs) + r`, and
+ /// `0 <= r < abs(rhs)`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0 or the division results in overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let a: ", stringify!($SelfT), " = 7; // or any other integer type")]
+ /// let b = 4;
+ ///
+ /// assert_eq!(a.rem_euclid(b), 3);
+ /// assert_eq!((-a).rem_euclid(b), 1);
+ /// assert_eq!(a.rem_euclid(-b), 3);
+ /// assert_eq!((-a).rem_euclid(-b), 1);
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn rem_euclid(self, rhs: Self) -> Self {
+ let r = self % rhs;
+ if r < 0 {
+ if rhs < 0 {
+ r - rhs
+ } else {
+ r + rhs
+ }
+ } else {
+ r
+ }
+ }
+
+ /// Calculates the quotient of `self` and `rhs`, rounding the result towards negative infinity.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is zero.
+ ///
+ /// ## Overflow behavior
+ ///
+ /// On overflow, this function will panic if overflow checks are enabled (default in debug
+ /// mode) and wrap if overflow checks are disabled (default in release mode).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(int_roundings)]
+ #[doc = concat!("let a: ", stringify!($SelfT)," = 8;")]
+ /// let b = 3;
+ ///
+ /// assert_eq!(a.div_floor(b), 2);
+ /// assert_eq!(a.div_floor(-b), -3);
+ /// assert_eq!((-a).div_floor(b), -3);
+ /// assert_eq!((-a).div_floor(-b), 2);
+ /// ```
+ #[unstable(feature = "int_roundings", issue = "88581")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn div_floor(self, rhs: Self) -> Self {
+ let d = self / rhs;
+ let r = self % rhs;
+ if (r > 0 && rhs < 0) || (r < 0 && rhs > 0) {
+ d - 1
+ } else {
+ d
+ }
+ }
+
+ /// Calculates the quotient of `self` and `rhs`, rounding the result towards positive infinity.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is zero.
+ ///
+ /// ## Overflow behavior
+ ///
+ /// On overflow, this function will panic if overflow checks are enabled (default in debug
+ /// mode) and wrap if overflow checks are disabled (default in release mode).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(int_roundings)]
+ #[doc = concat!("let a: ", stringify!($SelfT)," = 8;")]
+ /// let b = 3;
+ ///
+ /// assert_eq!(a.div_ceil(b), 3);
+ /// assert_eq!(a.div_ceil(-b), -2);
+ /// assert_eq!((-a).div_ceil(b), -2);
+ /// assert_eq!((-a).div_ceil(-b), 3);
+ /// ```
+ #[unstable(feature = "int_roundings", issue = "88581")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn div_ceil(self, rhs: Self) -> Self {
+ let d = self / rhs;
+ let r = self % rhs;
+ if (r > 0 && rhs > 0) || (r < 0 && rhs < 0) {
+ d + 1
+ } else {
+ d
+ }
+ }
+
+ /// If `rhs` is positive, calculates the smallest value greater than or
+ /// equal to `self` that is a multiple of `rhs`. If `rhs` is negative,
+ /// calculates the largest value less than or equal to `self` that is a
+ /// multiple of `rhs`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is zero.
+ ///
+ /// ## Overflow behavior
+ ///
+ /// On overflow, this function will panic if overflow checks are enabled (default in debug
+ /// mode) and wrap if overflow checks are disabled (default in release mode).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(int_roundings)]
+ #[doc = concat!("assert_eq!(16_", stringify!($SelfT), ".next_multiple_of(8), 16);")]
+ #[doc = concat!("assert_eq!(23_", stringify!($SelfT), ".next_multiple_of(8), 24);")]
+ #[doc = concat!("assert_eq!(16_", stringify!($SelfT), ".next_multiple_of(-8), 16);")]
+ #[doc = concat!("assert_eq!(23_", stringify!($SelfT), ".next_multiple_of(-8), 16);")]
+ #[doc = concat!("assert_eq!((-16_", stringify!($SelfT), ").next_multiple_of(8), -16);")]
+ #[doc = concat!("assert_eq!((-23_", stringify!($SelfT), ").next_multiple_of(8), -16);")]
+ #[doc = concat!("assert_eq!((-16_", stringify!($SelfT), ").next_multiple_of(-8), -16);")]
+ #[doc = concat!("assert_eq!((-23_", stringify!($SelfT), ").next_multiple_of(-8), -24);")]
+ /// ```
+ #[unstable(feature = "int_roundings", issue = "88581")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn next_multiple_of(self, rhs: Self) -> Self {
+ // This would otherwise fail when calculating `r` when self == T::MIN.
+ if rhs == -1 {
+ return self;
+ }
+
+ let r = self % rhs;
+ let m = if (r > 0 && rhs < 0) || (r < 0 && rhs > 0) {
+ r + rhs
+ } else {
+ r
+ };
+
+ if m == 0 {
+ self
+ } else {
+ self + (rhs - m)
+ }
+ }
+
+ /// If `rhs` is positive, calculates the smallest value greater than or
+ /// equal to `self` that is a multiple of `rhs`. If `rhs` is negative,
+ /// calculates the largest value less than or equal to `self` that is a
+ /// multiple of `rhs`. Returns `None` if `rhs` is zero or the operation
+ /// would result in overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(int_roundings)]
+ #[doc = concat!("assert_eq!(16_", stringify!($SelfT), ".checked_next_multiple_of(8), Some(16));")]
+ #[doc = concat!("assert_eq!(23_", stringify!($SelfT), ".checked_next_multiple_of(8), Some(24));")]
+ #[doc = concat!("assert_eq!(16_", stringify!($SelfT), ".checked_next_multiple_of(-8), Some(16));")]
+ #[doc = concat!("assert_eq!(23_", stringify!($SelfT), ".checked_next_multiple_of(-8), Some(16));")]
+ #[doc = concat!("assert_eq!((-16_", stringify!($SelfT), ").checked_next_multiple_of(8), Some(-16));")]
+ #[doc = concat!("assert_eq!((-23_", stringify!($SelfT), ").checked_next_multiple_of(8), Some(-16));")]
+ #[doc = concat!("assert_eq!((-16_", stringify!($SelfT), ").checked_next_multiple_of(-8), Some(-16));")]
+ #[doc = concat!("assert_eq!((-23_", stringify!($SelfT), ").checked_next_multiple_of(-8), Some(-24));")]
+ #[doc = concat!("assert_eq!(1_", stringify!($SelfT), ".checked_next_multiple_of(0), None);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_next_multiple_of(2), None);")]
+ /// ```
+ #[unstable(feature = "int_roundings", issue = "88581")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_next_multiple_of(self, rhs: Self) -> Option<Self> {
+ // This would otherwise fail when calculating `r` when self == T::MIN.
+ if rhs == -1 {
+ return Some(self);
+ }
+
+ let r = try_opt!(self.checked_rem(rhs));
+ let m = if (r > 0 && rhs < 0) || (r < 0 && rhs > 0) {
+ // r + rhs cannot overflow because they have opposite signs
+ r + rhs
+ } else {
+ r
+ };
+
+ if m == 0 {
+ Some(self)
+ } else {
+ // rhs - m cannot overflow because m has the same sign as rhs
+ self.checked_add(rhs - m)
+ }
+ }
+
+ /// Returns the logarithm of the number with respect to an arbitrary base,
+ /// rounded down.
+ ///
+ /// This method might not be optimized owing to implementation details;
+ /// `log2` can produce results more efficiently for base 2, and `log10`
+ /// can produce results more efficiently for base 10.
+ ///
+ /// # Panics
+ ///
+ /// When the number is negative, zero, or if the base is not at least 2; it
+ /// panics in debug mode and the return value is 0 in release
+ /// mode.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".log(5), 1);")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[track_caller]
+ #[rustc_inherit_overflow_checks]
+ #[allow(arithmetic_overflow)]
+ pub const fn log(self, base: Self) -> u32 {
+ match self.checked_log(base) {
+ Some(n) => n,
+ None => {
+ // In debug builds, trigger a panic on None.
+ // This should optimize completely out in release builds.
+ let _ = Self::MAX + 1;
+
+ 0
+ },
+ }
+ }
+
+ /// Returns the base 2 logarithm of the number, rounded down.
+ ///
+ /// # Panics
+ ///
+ /// When the number is negative or zero it panics in debug mode and the return value
+ /// is 0 in release mode.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".log2(), 1);")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[track_caller]
+ #[rustc_inherit_overflow_checks]
+ #[allow(arithmetic_overflow)]
+ pub const fn log2(self) -> u32 {
+ match self.checked_log2() {
+ Some(n) => n,
+ None => {
+ // In debug builds, trigger a panic on None.
+ // This should optimize completely out in release builds.
+ let _ = Self::MAX + 1;
+
+ 0
+ },
+ }
+ }
+
+ /// Returns the base 10 logarithm of the number, rounded down.
+ ///
+ /// # Panics
+ ///
+ /// When the number is negative or zero it panics in debug mode and the return value
+ /// is 0 in release mode.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".log10(), 1);")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[track_caller]
+ #[rustc_inherit_overflow_checks]
+ #[allow(arithmetic_overflow)]
+ pub const fn log10(self) -> u32 {
+ match self.checked_log10() {
+ Some(n) => n,
+ None => {
+ // In debug builds, trigger a panic on None.
+ // This should optimize completely out in release builds.
+ let _ = Self::MAX + 1;
+
+ 0
+ },
+ }
+ }
+
+ /// Returns the logarithm of the number with respect to an arbitrary base,
+ /// rounded down.
+ ///
+ /// Returns `None` if the number is negative or zero, or if the base is not at least 2.
+ ///
+ /// This method might not be optimized owing to implementation details;
+ /// `checked_log2` can produce results more efficiently for base 2, and
+ /// `checked_log10` can produce results more efficiently for base 10.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_log(5), Some(1));")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_log(self, base: Self) -> Option<u32> {
+ if self <= 0 || base <= 1 {
+ None
+ } else {
+ let mut n = 0;
+ let mut r = self;
+
+ // Optimization for 128 bit wide integers.
+ if Self::BITS == 128 {
+ let b = Self::log2(self) / (Self::log2(base) + 1);
+ n += b;
+ r /= base.pow(b as u32);
+ }
+
+ while r >= base {
+ r /= base;
+ n += 1;
+ }
+ Some(n)
+ }
+ }
+
+ /// Returns the base 2 logarithm of the number, rounded down.
+ ///
+ /// Returns `None` if the number is negative or zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_log2(), Some(1));")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_log2(self) -> Option<u32> {
+ if self <= 0 {
+ None
+ } else {
+ // SAFETY: We just checked that this number is positive
+ let log = (Self::BITS - 1) - unsafe { intrinsics::ctlz_nonzero(self) as u32 };
+ Some(log)
+ }
+ }
+
+ /// Returns the base 10 logarithm of the number, rounded down.
+ ///
+ /// Returns `None` if the number is negative or zero.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_log10(), Some(1));")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_log10(self) -> Option<u32> {
+ if self > 0 {
+ Some(int_log10::$ActualT(self as $ActualT))
+ } else {
+ None
+ }
+ }
+
+ /// Computes the absolute value of `self`.
+ ///
+ /// # Overflow behavior
+ ///
+ /// The absolute value of
+ #[doc = concat!("`", stringify!($SelfT), "::MIN`")]
+ /// cannot be represented as an
+ #[doc = concat!("`", stringify!($SelfT), "`,")]
+ /// and attempting to calculate it will cause an overflow. This means
+ /// that code in debug mode will trigger a panic on this case and
+ /// optimized code will return
+ #[doc = concat!("`", stringify!($SelfT), "::MIN`")]
+ /// without a panic.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".abs(), 10);")]
+ #[doc = concat!("assert_eq!((-10", stringify!($SelfT), ").abs(), 10);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[allow(unused_attributes)]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn abs(self) -> Self {
+ // Note that the #[rustc_inherit_overflow_checks] and #[inline]
+ // above mean that the overflow semantics of the subtraction
+ // depend on the crate we're being called from.
+ if self.is_negative() {
+ -self
+ } else {
+ self
+ }
+ }
+
+ /// Computes the absolute difference between `self` and `other`.
+ ///
+ /// This function always returns the correct answer without overflow or
+ /// panics by returning an unsigned integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".abs_diff(80), 20", stringify!($UnsignedT), ");")]
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".abs_diff(110), 10", stringify!($UnsignedT), ");")]
+ #[doc = concat!("assert_eq!((-100", stringify!($SelfT), ").abs_diff(80), 180", stringify!($UnsignedT), ");")]
+ #[doc = concat!("assert_eq!((-100", stringify!($SelfT), ").abs_diff(-120), 20", stringify!($UnsignedT), ");")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.abs_diff(", stringify!($SelfT), "::MAX), ", stringify!($UnsignedT), "::MAX);")]
+ /// ```
+ #[stable(feature = "int_abs_diff", since = "1.60.0")]
+ #[rustc_const_stable(feature = "int_abs_diff", since = "1.60.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn abs_diff(self, other: Self) -> $UnsignedT {
+ if self < other {
+ // Converting a non-negative x from signed to unsigned by using
+ // `x as U` is left unchanged, but a negative x is converted
+ // to value x + 2^N. Thus if `s` and `o` are binary variables
+ // respectively indicating whether `self` and `other` are
+ // negative, we are computing the mathematical value:
+ //
+ // (other + o*2^N) - (self + s*2^N) mod 2^N
+ // other - self + (o-s)*2^N mod 2^N
+ // other - self mod 2^N
+ //
+ // Finally, taking the mod 2^N of the mathematical value of
+ // `other - self` does not change it as it already is
+ // in the range [0, 2^N).
+ (other as $UnsignedT).wrapping_sub(self as $UnsignedT)
+ } else {
+ (self as $UnsignedT).wrapping_sub(other as $UnsignedT)
+ }
+ }
+
+ /// Returns a number representing sign of `self`.
+ ///
+ /// - `0` if the number is zero
+ /// - `1` if the number is positive
+ /// - `-1` if the number is negative
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".signum(), 1);")]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".signum(), 0);")]
+ #[doc = concat!("assert_eq!((-10", stringify!($SelfT), ").signum(), -1);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_sign", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn signum(self) -> Self {
+ match self {
+ n if n > 0 => 1,
+ 0 => 0,
+ _ => -1,
+ }
+ }
+
+ /// Returns `true` if `self` is positive and `false` if the number is zero or
+ /// negative.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert!(10", stringify!($SelfT), ".is_positive());")]
+ #[doc = concat!("assert!(!(-10", stringify!($SelfT), ").is_positive());")]
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline(always)]
+ pub const fn is_positive(self) -> bool { self > 0 }
+
+ /// Returns `true` if `self` is negative and `false` if the number is zero or
+ /// positive.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert!((-10", stringify!($SelfT), ").is_negative());")]
+ #[doc = concat!("assert!(!10", stringify!($SelfT), ".is_negative());")]
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline(always)]
+ pub const fn is_negative(self) -> bool { self < 0 }
+
+ /// Return the memory representation of this integer as a byte array in
+ /// big-endian (network) byte order.
+ ///
+ #[doc = $to_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let bytes = ", $swap_op, stringify!($SelfT), ".to_be_bytes();")]
+ #[doc = concat!("assert_eq!(bytes, ", $be_bytes, ");")]
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_be_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ self.to_be().to_ne_bytes()
+ }
+
+ /// Return the memory representation of this integer as a byte array in
+ /// little-endian byte order.
+ ///
+ #[doc = $to_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let bytes = ", $swap_op, stringify!($SelfT), ".to_le_bytes();")]
+ #[doc = concat!("assert_eq!(bytes, ", $le_bytes, ");")]
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_le_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ self.to_le().to_ne_bytes()
+ }
+
+ /// Return the memory representation of this integer as a byte array in
+ /// native byte order.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate,
+ /// instead.
+ ///
+ #[doc = $to_xe_bytes_doc]
+ ///
+ /// [`to_be_bytes`]: Self::to_be_bytes
+ /// [`to_le_bytes`]: Self::to_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let bytes = ", $swap_op, stringify!($SelfT), ".to_ne_bytes();")]
+ /// assert_eq!(
+ /// bytes,
+ /// if cfg!(target_endian = "big") {
+ #[doc = concat!(" ", $be_bytes)]
+ /// } else {
+ #[doc = concat!(" ", $le_bytes)]
+ /// }
+ /// );
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ // SAFETY: const sound because integers are plain old datatypes so we can always
+ // transmute them to arrays of bytes
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ne_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ // SAFETY: integers are plain old datatypes so we can always transmute them to
+ // arrays of bytes
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Create an integer value from its representation as a byte array in
+ /// big endian.
+ ///
+ #[doc = $from_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let value = ", stringify!($SelfT), "::from_be_bytes(", $be_bytes, ");")]
+ #[doc = concat!("assert_eq!(value, ", $swap_op, ");")]
+ /// ```
+ ///
+ /// When starting from a slice rather than an array, fallible conversion APIs can be used:
+ ///
+ /// ```
+ #[doc = concat!("fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
+ #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+ /// *input = rest;
+ #[doc = concat!(" ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())")]
+ /// }
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use]
+ #[inline]
+ pub const fn from_be_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_be(Self::from_ne_bytes(bytes))
+ }
+
+ /// Create an integer value from its representation as a byte array in
+ /// little endian.
+ ///
+ #[doc = $from_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let value = ", stringify!($SelfT), "::from_le_bytes(", $le_bytes, ");")]
+ #[doc = concat!("assert_eq!(value, ", $swap_op, ");")]
+ /// ```
+ ///
+ /// When starting from a slice rather than an array, fallible conversion APIs can be used:
+ ///
+ /// ```
+ #[doc = concat!("fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
+ #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+ /// *input = rest;
+ #[doc = concat!(" ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())")]
+ /// }
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use]
+ #[inline]
+ pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_le(Self::from_ne_bytes(bytes))
+ }
+
+ /// Create an integer value from its memory representation as a byte
+ /// array in native endianness.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
+ /// appropriate instead.
+ ///
+ /// [`from_be_bytes`]: Self::from_be_bytes
+ /// [`from_le_bytes`]: Self::from_le_bytes
+ ///
+ #[doc = $from_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let value = ", stringify!($SelfT), "::from_ne_bytes(if cfg!(target_endian = \"big\") {")]
+ #[doc = concat!(" ", $be_bytes)]
+ /// } else {
+ #[doc = concat!(" ", $le_bytes)]
+ /// });
+ #[doc = concat!("assert_eq!(value, ", $swap_op, ");")]
+ /// ```
+ ///
+ /// When starting from a slice rather than an array, fallible conversion APIs can be used:
+ ///
+ /// ```
+ #[doc = concat!("fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
+ #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+ /// *input = rest;
+ #[doc = concat!(" ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())")]
+ /// }
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use]
+ // SAFETY: const sound because integers are plain old datatypes so we can always
+ // transmute to them
+ #[inline]
+ pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ // SAFETY: integers are plain old datatypes so we can always transmute to them
+ unsafe { mem::transmute(bytes) }
+ }
+
+ /// New code should prefer to use
+ #[doc = concat!("[`", stringify!($SelfT), "::MIN", "`] instead.")]
+ ///
+ /// Returns the smallest value that can be represented by this integer type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline(always)]
+ #[rustc_promotable]
+ #[rustc_const_stable(feature = "const_min_value", since = "1.32.0")]
+ #[deprecated(since = "TBD", note = "replaced by the `MIN` associated constant on this type")]
+ pub const fn min_value() -> Self {
+ Self::MIN
+ }
+
+ /// New code should prefer to use
+ #[doc = concat!("[`", stringify!($SelfT), "::MAX", "`] instead.")]
+ ///
+ /// Returns the largest value that can be represented by this integer type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline(always)]
+ #[rustc_promotable]
+ #[rustc_const_stable(feature = "const_max_value", since = "1.32.0")]
+ #[deprecated(since = "TBD", note = "replaced by the `MAX` associated constant on this type")]
+ pub const fn max_value() -> Self {
+ Self::MAX
+ }
+ }
+}
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
new file mode 100644
index 000000000..f481399fd
--- /dev/null
+++ b/library/core/src/num/mod.rs
@@ -0,0 +1,1124 @@
+//! Numeric traits and functions for the built-in numeric types.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::ascii;
+use crate::intrinsics;
+use crate::mem;
+use crate::ops::{Add, Mul, Sub};
+use crate::str::FromStr;
+
+// Used because the `?` operator is not allowed in a const context.
+macro_rules! try_opt {
+ ($e:expr) => {
+ match $e {
+ Some(x) => x,
+ None => return None,
+ }
+ };
+}
+
+#[allow_internal_unstable(const_likely)]
+macro_rules! unlikely {
+ ($e: expr) => {
+ intrinsics::unlikely($e)
+ };
+}
+
+// All these modules are technically private and only exposed for coretests:
+#[cfg(not(no_fp_fmt_parse))]
+pub mod bignum;
+#[cfg(not(no_fp_fmt_parse))]
+pub mod dec2flt;
+#[cfg(not(no_fp_fmt_parse))]
+pub mod diy_float;
+#[cfg(not(no_fp_fmt_parse))]
+pub mod flt2dec;
+pub mod fmt;
+
+#[macro_use]
+mod int_macros; // import int_impl!
+#[macro_use]
+mod uint_macros; // import uint_impl!
+
+mod error;
+mod int_log10;
+mod nonzero;
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+mod saturating;
+mod wrapping;
+
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+pub use saturating::Saturating;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use wrapping::Wrapping;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg(not(no_fp_fmt_parse))]
+pub use dec2flt::ParseFloatError;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use error::ParseIntError;
+
+#[stable(feature = "nonzero", since = "1.28.0")]
+pub use nonzero::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize};
+
+#[stable(feature = "signed_nonzero", since = "1.34.0")]
+pub use nonzero::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize};
+
+#[stable(feature = "try_from", since = "1.34.0")]
+pub use error::TryFromIntError;
+
+#[stable(feature = "int_error_matching", since = "1.55.0")]
+pub use error::IntErrorKind;
+
+macro_rules! usize_isize_to_xe_bytes_doc {
+ () => {
+ "
+
+**Note**: This function returns an array of length 2, 4 or 8 bytes
+depending on the target pointer size.
+
+"
+ };
+}
+
+macro_rules! usize_isize_from_xe_bytes_doc {
+ () => {
+ "
+
+**Note**: This function takes an array of length 2, 4 or 8 bytes
+depending on the target pointer size.
+
+"
+ };
+}
+
+macro_rules! widening_impl {
+ ($SelfT:ty, $WideT:ty, $BITS:literal, unsigned) => {
+ /// Calculates the complete product `self * rhs` without the possibility to overflow.
+ ///
+ /// This returns the low-order (wrapping) bits and the high-order (overflow) bits
+ /// of the result as two separate values, in that order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `u32` is used here.
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ /// assert_eq!(5u32.widening_mul(2), (10, 0));
+ /// assert_eq!(1_000_000_000u32.widening_mul(10), (1410065408, 2));
+ /// ```
+ #[unstable(feature = "bigint_helper_methods", issue = "85532")]
+ #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn widening_mul(self, rhs: Self) -> (Self, Self) {
+ // note: longer-term this should be done via an intrinsic,
+ // but for now we can deal without an impl for u128/i128
+ // SAFETY: overflow will be contained within the wider types
+ let wide = unsafe { (self as $WideT).unchecked_mul(rhs as $WideT) };
+ (wide as $SelfT, (wide >> $BITS) as $SelfT)
+ }
+
+ /// Calculates the "full multiplication" `self * rhs + carry`
+ /// without the possibility to overflow.
+ ///
+ /// This returns the low-order (wrapping) bits and the high-order (overflow) bits
+ /// of the result as two separate values, in that order.
+ ///
+ /// Performs "long multiplication" which takes in an extra amount to add, and may return an
+ /// additional amount of overflow. This allows for chaining together multiple
+ /// multiplications to create "big integers" which represent larger values.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `u32` is used here.
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ /// assert_eq!(5u32.carrying_mul(2, 0), (10, 0));
+ /// assert_eq!(5u32.carrying_mul(2, 10), (20, 0));
+ /// assert_eq!(1_000_000_000u32.carrying_mul(10, 0), (1410065408, 2));
+ /// assert_eq!(1_000_000_000u32.carrying_mul(10, 10), (1410065418, 2));
+ #[doc = concat!("assert_eq!(",
+ stringify!($SelfT), "::MAX.carrying_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ",
+ "(0, ", stringify!($SelfT), "::MAX));"
+ )]
+ /// ```
+ ///
+ /// If `carry` is zero, this is similar to [`overflowing_mul`](Self::overflowing_mul),
+ /// except that it gives the value of the overflow instead of just whether one happened:
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ /// let r = u8::carrying_mul(7, 13, 0);
+ /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(7, 13));
+ /// let r = u8::carrying_mul(13, 42, 0);
+ /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(13, 42));
+ /// ```
+ ///
+ /// The value of the first field in the returned tuple matches what you'd get
+ /// by combining the [`wrapping_mul`](Self::wrapping_mul) and
+ /// [`wrapping_add`](Self::wrapping_add) methods:
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ /// assert_eq!(
+ /// 789_u16.carrying_mul(456, 123).0,
+ /// 789_u16.wrapping_mul(456).wrapping_add(123),
+ /// );
+ /// ```
+ #[unstable(feature = "bigint_helper_methods", issue = "85532")]
+ #[rustc_const_unstable(feature = "bigint_helper_methods", issue = "85532")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
+ // note: longer-term this should be done via an intrinsic,
+ // but for now we can deal without an impl for u128/i128
+ // SAFETY: overflow will be contained within the wider types
+ let wide = unsafe {
+ (self as $WideT).unchecked_mul(rhs as $WideT).unchecked_add(carry as $WideT)
+ };
+ (wide as $SelfT, (wide >> $BITS) as $SelfT)
+ }
+ };
+}
+
+impl i8 {
+ int_impl! { i8, i8, u8, 8, 7, -128, 127, 2, "-0x7e", "0xa", "0x12", "0x12", "0x48",
+ "[0x12]", "[0x12]", "", "", "" }
+}
+
+impl i16 {
+ int_impl! { i16, i16, u16, 16, 15, -32768, 32767, 4, "-0x5ffd", "0x3a", "0x1234", "0x3412",
+ "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]", "", "", "" }
+}
+
+impl i32 {
+ int_impl! { i32, i32, u32, 32, 31, -2147483648, 2147483647, 8, "0x10000b3", "0xb301",
+ "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78]", "", "", "" }
+}
+
+impl i64 {
+ int_impl! { i64, i64, u64, 64, 63, -9223372036854775808, 9223372036854775807, 12,
+ "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412",
+ "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", "", "", "" }
+}
+
+impl i128 {
+ int_impl! { i128, i128, u128, 128, 127, -170141183460469231731687303715884105728,
+ 170141183460469231731687303715884105727, 16,
+ "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012",
+ "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48",
+ "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \
+ 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \
+ 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", "", "", "" }
+}
+
+#[cfg(target_pointer_width = "16")]
+impl isize {
+ int_impl! { isize, i16, usize, 16, 15, -32768, 32767, 4, "-0x5ffd", "0x3a", "0x1234",
+ "0x3412", "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(),
+ " on 16-bit targets" }
+}
+
+#[cfg(target_pointer_width = "32")]
+impl isize {
+ int_impl! { isize, i32, usize, 32, 31, -2147483648, 2147483647, 8, "0x10000b3", "0xb301",
+ "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(),
+ " on 32-bit targets" }
+}
+
+#[cfg(target_pointer_width = "64")]
+impl isize {
+ int_impl! { isize, i64, usize, 64, 63, -9223372036854775808, 9223372036854775807,
+ 12, "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412",
+ "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(),
+ " on 64-bit targets" }
+}
+
+/// If 6th bit set ascii is upper case.
+const ASCII_CASE_MASK: u8 = 0b0010_0000;
+
+impl u8 {
+ uint_impl! { u8, u8, i8, NonZeroU8, 8, 255, 2, "0x82", "0xa", "0x12", "0x12", "0x48", "[0x12]",
+ "[0x12]", "", "", "" }
+ widening_impl! { u8, u16, 8, unsigned }
+
+ /// Checks if the value is within the ASCII range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = 97u8;
+ /// let non_ascii = 150u8;
+ ///
+ /// assert!(ascii.is_ascii());
+ /// assert!(!non_ascii.is_ascii());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_u8_is_ascii", since = "1.43.0")]
+ #[inline]
+ pub const fn is_ascii(&self) -> bool {
+ *self & 128 == 0
+ }
+
+ /// Makes a copy of the value in its ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let lowercase_a = 97u8;
+ ///
+ /// assert_eq!(65, lowercase_a.to_ascii_uppercase());
+ /// ```
+ ///
+ /// [`make_ascii_uppercase`]: Self::make_ascii_uppercase
+ #[must_use = "to uppercase the value in-place, use `make_ascii_uppercase()`"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
+ #[inline]
+ pub const fn to_ascii_uppercase(&self) -> u8 {
+ // Toggle the fifth bit if this is a lowercase letter
+ *self ^ ((self.is_ascii_lowercase() as u8) * ASCII_CASE_MASK)
+ }
+
+ /// Makes a copy of the value in its ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 65u8;
+ ///
+ /// assert_eq!(97, uppercase_a.to_ascii_lowercase());
+ /// ```
+ ///
+ /// [`make_ascii_lowercase`]: Self::make_ascii_lowercase
+ #[must_use = "to lowercase the value in-place, use `make_ascii_lowercase()`"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
+ #[inline]
+ pub const fn to_ascii_lowercase(&self) -> u8 {
+ // Set the fifth bit if this is an uppercase letter
+ *self | (self.is_ascii_uppercase() as u8 * ASCII_CASE_MASK)
+ }
+
+ /// Assumes self is ascii
+ #[inline]
+ pub(crate) const fn ascii_change_case_unchecked(&self) -> u8 {
+ *self ^ ASCII_CASE_MASK
+ }
+
+ /// Checks that two values are an ASCII case-insensitive match.
+ ///
+ /// This is equivalent to `to_ascii_lowercase(a) == to_ascii_lowercase(b)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let lowercase_a = 97u8;
+ /// let uppercase_a = 65u8;
+ ///
+ /// assert!(lowercase_a.eq_ignore_ascii_case(&uppercase_a));
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
+ #[inline]
+ pub const fn eq_ignore_ascii_case(&self, other: &u8) -> bool {
+ self.to_ascii_lowercase() == other.to_ascii_lowercase()
+ }
+
+ /// Converts this value to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut byte = b'a';
+ ///
+ /// byte.make_ascii_uppercase();
+ ///
+ /// assert_eq!(b'A', byte);
+ /// ```
+ ///
+ /// [`to_ascii_uppercase`]: Self::to_ascii_uppercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ *self = self.to_ascii_uppercase();
+ }
+
+ /// Converts this value to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut byte = b'A';
+ ///
+ /// byte.make_ascii_lowercase();
+ ///
+ /// assert_eq!(b'a', byte);
+ /// ```
+ ///
+ /// [`to_ascii_lowercase`]: Self::to_ascii_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ *self = self.to_ascii_lowercase();
+ }
+
+ /// Checks if the value is an ASCII alphabetic character:
+ ///
+ /// - U+0041 'A' ..= U+005A 'Z', or
+ /// - U+0061 'a' ..= U+007A 'z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_alphabetic());
+ /// assert!(uppercase_g.is_ascii_alphabetic());
+ /// assert!(a.is_ascii_alphabetic());
+ /// assert!(g.is_ascii_alphabetic());
+ /// assert!(!zero.is_ascii_alphabetic());
+ /// assert!(!percent.is_ascii_alphabetic());
+ /// assert!(!space.is_ascii_alphabetic());
+ /// assert!(!lf.is_ascii_alphabetic());
+ /// assert!(!esc.is_ascii_alphabetic());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_alphabetic(&self) -> bool {
+ matches!(*self, b'A'..=b'Z' | b'a'..=b'z')
+ }
+
+ /// Checks if the value is an ASCII uppercase character:
+ /// U+0041 'A' ..= U+005A 'Z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_uppercase());
+ /// assert!(uppercase_g.is_ascii_uppercase());
+ /// assert!(!a.is_ascii_uppercase());
+ /// assert!(!g.is_ascii_uppercase());
+ /// assert!(!zero.is_ascii_uppercase());
+ /// assert!(!percent.is_ascii_uppercase());
+ /// assert!(!space.is_ascii_uppercase());
+ /// assert!(!lf.is_ascii_uppercase());
+ /// assert!(!esc.is_ascii_uppercase());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_uppercase(&self) -> bool {
+ matches!(*self, b'A'..=b'Z')
+ }
+
+ /// Checks if the value is an ASCII lowercase character:
+ /// U+0061 'a' ..= U+007A 'z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_lowercase());
+ /// assert!(!uppercase_g.is_ascii_lowercase());
+ /// assert!(a.is_ascii_lowercase());
+ /// assert!(g.is_ascii_lowercase());
+ /// assert!(!zero.is_ascii_lowercase());
+ /// assert!(!percent.is_ascii_lowercase());
+ /// assert!(!space.is_ascii_lowercase());
+ /// assert!(!lf.is_ascii_lowercase());
+ /// assert!(!esc.is_ascii_lowercase());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_lowercase(&self) -> bool {
+ matches!(*self, b'a'..=b'z')
+ }
+
+ /// Checks if the value is an ASCII alphanumeric character:
+ ///
+ /// - U+0041 'A' ..= U+005A 'Z', or
+ /// - U+0061 'a' ..= U+007A 'z', or
+ /// - U+0030 '0' ..= U+0039 '9'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_alphanumeric());
+ /// assert!(uppercase_g.is_ascii_alphanumeric());
+ /// assert!(a.is_ascii_alphanumeric());
+ /// assert!(g.is_ascii_alphanumeric());
+ /// assert!(zero.is_ascii_alphanumeric());
+ /// assert!(!percent.is_ascii_alphanumeric());
+ /// assert!(!space.is_ascii_alphanumeric());
+ /// assert!(!lf.is_ascii_alphanumeric());
+ /// assert!(!esc.is_ascii_alphanumeric());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_alphanumeric(&self) -> bool {
+ matches!(*self, b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z')
+ }
+
+ /// Checks if the value is an ASCII decimal digit:
+ /// U+0030 '0' ..= U+0039 '9'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_digit());
+ /// assert!(!uppercase_g.is_ascii_digit());
+ /// assert!(!a.is_ascii_digit());
+ /// assert!(!g.is_ascii_digit());
+ /// assert!(zero.is_ascii_digit());
+ /// assert!(!percent.is_ascii_digit());
+ /// assert!(!space.is_ascii_digit());
+ /// assert!(!lf.is_ascii_digit());
+ /// assert!(!esc.is_ascii_digit());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_digit(&self) -> bool {
+ matches!(*self, b'0'..=b'9')
+ }
+
+ /// Checks if the value is an ASCII hexadecimal digit:
+ ///
+ /// - U+0030 '0' ..= U+0039 '9', or
+ /// - U+0041 'A' ..= U+0046 'F', or
+ /// - U+0061 'a' ..= U+0066 'f'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_hexdigit());
+ /// assert!(!uppercase_g.is_ascii_hexdigit());
+ /// assert!(a.is_ascii_hexdigit());
+ /// assert!(!g.is_ascii_hexdigit());
+ /// assert!(zero.is_ascii_hexdigit());
+ /// assert!(!percent.is_ascii_hexdigit());
+ /// assert!(!space.is_ascii_hexdigit());
+ /// assert!(!lf.is_ascii_hexdigit());
+ /// assert!(!esc.is_ascii_hexdigit());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_hexdigit(&self) -> bool {
+ matches!(*self, b'0'..=b'9' | b'A'..=b'F' | b'a'..=b'f')
+ }
+
+ /// Checks if the value is an ASCII punctuation character:
+ ///
+ /// - U+0021 ..= U+002F `! " # $ % & ' ( ) * + , - . /`, or
+ /// - U+003A ..= U+0040 `: ; < = > ? @`, or
+ /// - U+005B ..= U+0060 ``[ \ ] ^ _ ` ``, or
+ /// - U+007B ..= U+007E `{ | } ~`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_punctuation());
+ /// assert!(!uppercase_g.is_ascii_punctuation());
+ /// assert!(!a.is_ascii_punctuation());
+ /// assert!(!g.is_ascii_punctuation());
+ /// assert!(!zero.is_ascii_punctuation());
+ /// assert!(percent.is_ascii_punctuation());
+ /// assert!(!space.is_ascii_punctuation());
+ /// assert!(!lf.is_ascii_punctuation());
+ /// assert!(!esc.is_ascii_punctuation());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_punctuation(&self) -> bool {
+ matches!(*self, b'!'..=b'/' | b':'..=b'@' | b'['..=b'`' | b'{'..=b'~')
+ }
+
+ /// Checks if the value is an ASCII graphic character:
+ /// U+0021 '!' ..= U+007E '~'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(uppercase_a.is_ascii_graphic());
+ /// assert!(uppercase_g.is_ascii_graphic());
+ /// assert!(a.is_ascii_graphic());
+ /// assert!(g.is_ascii_graphic());
+ /// assert!(zero.is_ascii_graphic());
+ /// assert!(percent.is_ascii_graphic());
+ /// assert!(!space.is_ascii_graphic());
+ /// assert!(!lf.is_ascii_graphic());
+ /// assert!(!esc.is_ascii_graphic());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_graphic(&self) -> bool {
+ matches!(*self, b'!'..=b'~')
+ }
+
+ /// Checks if the value is an ASCII whitespace character:
+ /// U+0020 SPACE, U+0009 HORIZONTAL TAB, U+000A LINE FEED,
+ /// U+000C FORM FEED, or U+000D CARRIAGE RETURN.
+ ///
+ /// Rust uses the WhatWG Infra Standard's [definition of ASCII
+ /// whitespace][infra-aw]. There are several other definitions in
+ /// wide use. For instance, [the POSIX locale][pct] includes
+ /// U+000B VERTICAL TAB as well as all the above characters,
+ /// but—from the very same specification—[the default rule for
+ /// "field splitting" in the Bourne shell][bfs] considers *only*
+ /// SPACE, HORIZONTAL TAB, and LINE FEED as whitespace.
+ ///
+ /// If you are writing a program that will process an existing
+ /// file format, check what that format's definition of whitespace is
+ /// before using this function.
+ ///
+ /// [infra-aw]: https://infra.spec.whatwg.org/#ascii-whitespace
+ /// [pct]: https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap07.html#tag_07_03_01
+ /// [bfs]: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_05
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_whitespace());
+ /// assert!(!uppercase_g.is_ascii_whitespace());
+ /// assert!(!a.is_ascii_whitespace());
+ /// assert!(!g.is_ascii_whitespace());
+ /// assert!(!zero.is_ascii_whitespace());
+ /// assert!(!percent.is_ascii_whitespace());
+ /// assert!(space.is_ascii_whitespace());
+ /// assert!(lf.is_ascii_whitespace());
+ /// assert!(!esc.is_ascii_whitespace());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_whitespace(&self) -> bool {
+ matches!(*self, b'\t' | b'\n' | b'\x0C' | b'\r' | b' ')
+ }
+
+ /// Checks if the value is an ASCII control character:
+ /// U+0000 NUL ..= U+001F UNIT SEPARATOR, or U+007F DELETE.
+ /// Note that most ASCII whitespace characters are control
+ /// characters, but SPACE is not.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = b'\x1b';
+ ///
+ /// assert!(!uppercase_a.is_ascii_control());
+ /// assert!(!uppercase_g.is_ascii_control());
+ /// assert!(!a.is_ascii_control());
+ /// assert!(!g.is_ascii_control());
+ /// assert!(!zero.is_ascii_control());
+ /// assert!(!percent.is_ascii_control());
+ /// assert!(!space.is_ascii_control());
+ /// assert!(lf.is_ascii_control());
+ /// assert!(esc.is_ascii_control());
+ /// ```
+ #[must_use]
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_control(&self) -> bool {
+ matches!(*self, b'\0'..=b'\x1F' | b'\x7F')
+ }
+
+ /// Returns an iterator that produces an escaped version of a `u8`,
+ /// treating it as an ASCII character.
+ ///
+ /// The behavior is identical to [`ascii::escape_default`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ ///
+ /// assert_eq!("0", b'0'.escape_ascii().to_string());
+ /// assert_eq!("\\t", b'\t'.escape_ascii().to_string());
+ /// assert_eq!("\\r", b'\r'.escape_ascii().to_string());
+ /// assert_eq!("\\n", b'\n'.escape_ascii().to_string());
+ /// assert_eq!("\\'", b'\''.escape_ascii().to_string());
+ /// assert_eq!("\\\"", b'"'.escape_ascii().to_string());
+ /// assert_eq!("\\\\", b'\\'.escape_ascii().to_string());
+ /// assert_eq!("\\x9d", b'\x9d'.escape_ascii().to_string());
+ /// ```
+ #[must_use = "this returns the escaped byte as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+ #[inline]
+ pub fn escape_ascii(self) -> ascii::EscapeDefault {
+ ascii::escape_default(self)
+ }
+
+ #[inline]
+ pub(crate) const fn is_utf8_char_boundary(self) -> bool {
+ // This is bit magic equivalent to: b < 128 || b >= 192
+ (self as i8) >= -0x40
+ }
+}
+
+impl u16 {
+ uint_impl! { u16, u16, i16, NonZeroU16, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48",
+ "[0x34, 0x12]", "[0x12, 0x34]", "", "", "" }
+ widening_impl! { u16, u32, 16, unsigned }
+
+ /// Checks if the value is a Unicode surrogate code point, which are disallowed values for [`char`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(utf16_extra)]
+ ///
+ /// let low_non_surrogate = 0xA000u16;
+ /// let low_surrogate = 0xD800u16;
+ /// let high_surrogate = 0xDC00u16;
+ /// let high_non_surrogate = 0xE000u16;
+ ///
+ /// assert!(!low_non_surrogate.is_utf16_surrogate());
+ /// assert!(low_surrogate.is_utf16_surrogate());
+ /// assert!(high_surrogate.is_utf16_surrogate());
+ /// assert!(!high_non_surrogate.is_utf16_surrogate());
+ /// ```
+ #[must_use]
+ #[unstable(feature = "utf16_extra", issue = "94919")]
+ #[rustc_const_unstable(feature = "utf16_extra_const", issue = "94919")]
+ #[inline]
+ pub const fn is_utf16_surrogate(self) -> bool {
+ matches!(self, 0xD800..=0xDFFF)
+ }
+}
+
+impl u32 {
+ uint_impl! { u32, u32, i32, NonZeroU32, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678",
+ "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", "", "", "" }
+ widening_impl! { u32, u64, 32, unsigned }
+}
+
+impl u64 {
+ uint_impl! { u64, u64, i64, NonZeroU64, 64, 18446744073709551615, 12, "0xaa00000000006e1", "0x6e10aa",
+ "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48",
+ "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]",
+ "", "", ""}
+ widening_impl! { u64, u128, 64, unsigned }
+}
+
+impl u128 {
+ uint_impl! { u128, u128, i128, NonZeroU128, 128, 340282366920938463463374607431768211455, 16,
+ "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012",
+ "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48",
+ "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \
+ 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \
+ 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]",
+ "", "", ""}
+}
+
+#[cfg(target_pointer_width = "16")]
+impl usize {
+ uint_impl! { usize, u16, isize, NonZeroUsize, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48",
+ "[0x34, 0x12]", "[0x12, 0x34]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(),
+ " on 16-bit targets" }
+ widening_impl! { usize, u32, 16, unsigned }
+}
+#[cfg(target_pointer_width = "32")]
+impl usize {
+ uint_impl! { usize, u32, isize, NonZeroUsize, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678",
+ "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(),
+ " on 32-bit targets" }
+ widening_impl! { usize, u64, 32, unsigned }
+}
+
+#[cfg(target_pointer_width = "64")]
+impl usize {
+ uint_impl! { usize, u64, isize, NonZeroUsize, 64, 18446744073709551615, 12, "0xaa00000000006e1", "0x6e10aa",
+ "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48",
+ "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(),
+ " on 64-bit targets" }
+ widening_impl! { usize, u128, 64, unsigned }
+}
+
+impl usize {
+ /// Returns an `usize` where every byte is equal to `x`.
+ #[inline]
+ pub(crate) const fn repeat_u8(x: u8) -> usize {
+ usize::from_ne_bytes([x; mem::size_of::<usize>()])
+ }
+
+ /// Returns an `usize` where every byte pair is equal to `x`.
+ #[inline]
+ pub(crate) const fn repeat_u16(x: u16) -> usize {
+ let mut r = 0usize;
+ let mut i = 0;
+ while i < mem::size_of::<usize>() {
+ // Use `wrapping_shl` to make it work on targets with 16-bit `usize`
+ r = r.wrapping_shl(16) | (x as usize);
+ i += 2;
+ }
+ r
+ }
+}
+
+/// A classification of floating point numbers.
+///
+/// This `enum` is used as the return type for [`f32::classify`] and [`f64::classify`]. See
+/// their documentation for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::num::FpCategory;
+///
+/// let num = 12.4_f32;
+/// let inf = f32::INFINITY;
+/// let zero = 0f32;
+/// let sub: f32 = 1.1754942e-38;
+/// let nan = f32::NAN;
+///
+/// assert_eq!(num.classify(), FpCategory::Normal);
+/// assert_eq!(inf.classify(), FpCategory::Infinite);
+/// assert_eq!(zero.classify(), FpCategory::Zero);
+/// assert_eq!(nan.classify(), FpCategory::Nan);
+/// assert_eq!(sub.classify(), FpCategory::Subnormal);
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum FpCategory {
+ /// NaN (not a number): this value results from calculations like `(-1.0).sqrt()`.
+ ///
+ /// See [the documentation for `f32`](f32) for more information on the unusual properties
+ /// of NaN.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Nan,
+
+ /// Positive or negative infinity, which often results from dividing a nonzero number
+ /// by zero.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Infinite,
+
+ /// Positive or negative zero.
+ ///
+ /// See [the documentation for `f32`](f32) for more information on the signedness of zeroes.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Zero,
+
+ /// “Subnormal” or “denormal” floating point representation (less precise, relative to
+ /// their magnitude, than [`Normal`]).
+ ///
+ /// Subnormal numbers are larger in magnitude than [`Zero`] but smaller in magnitude than all
+ /// [`Normal`] numbers.
+ ///
+ /// [`Normal`]: Self::Normal
+ /// [`Zero`]: Self::Zero
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Subnormal,
+
+ /// A regular floating point number, not any of the exceptional categories.
+ ///
+ /// The smallest positive normal numbers are [`f32::MIN_POSITIVE`] and [`f64::MIN_POSITIVE`],
+ /// and the largest positive normal numbers are [`f32::MAX`] and [`f64::MAX`]. (Unlike signed
+ /// integers, floating point numbers are symmetric in their range, so negating any of these
+ /// constants will produce their negative counterpart.)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Normal,
+}
+
+#[doc(hidden)]
+trait FromStrRadixHelper:
+ PartialOrd + Copy + Add<Output = Self> + Sub<Output = Self> + Mul<Output = Self>
+{
+ const MIN: Self;
+ fn from_u32(u: u32) -> Self;
+ fn checked_mul(&self, other: u32) -> Option<Self>;
+ fn checked_sub(&self, other: u32) -> Option<Self>;
+ fn checked_add(&self, other: u32) -> Option<Self>;
+}
+
+macro_rules! from_str_radix_int_impl {
+ ($($t:ty)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl FromStr for $t {
+ type Err = ParseIntError;
+ fn from_str(src: &str) -> Result<Self, ParseIntError> {
+ from_str_radix(src, 10)
+ }
+ }
+ )*}
+}
+from_str_radix_int_impl! { isize i8 i16 i32 i64 i128 usize u8 u16 u32 u64 u128 }
+
+macro_rules! impl_helper_for {
+ ($($t:ty)*) => ($(impl FromStrRadixHelper for $t {
+ const MIN: Self = Self::MIN;
+ #[inline]
+ fn from_u32(u: u32) -> Self { u as Self }
+ #[inline]
+ fn checked_mul(&self, other: u32) -> Option<Self> {
+ Self::checked_mul(*self, other as Self)
+ }
+ #[inline]
+ fn checked_sub(&self, other: u32) -> Option<Self> {
+ Self::checked_sub(*self, other as Self)
+ }
+ #[inline]
+ fn checked_add(&self, other: u32) -> Option<Self> {
+ Self::checked_add(*self, other as Self)
+ }
+ })*)
+}
+impl_helper_for! { i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize }
+
+/// Determines if a string of text of that length of that radix could be guaranteed to be
+/// stored in the given type T.
+/// Note that if the radix is known to the compiler, it is just the check of digits.len that
+/// is done at runtime.
+#[doc(hidden)]
+#[inline(always)]
+#[unstable(issue = "none", feature = "std_internals")]
+pub fn can_not_overflow<T>(radix: u32, is_signed_ty: bool, digits: &[u8]) -> bool {
+ radix <= 16 && digits.len() <= mem::size_of::<T>() * 2 - is_signed_ty as usize
+}
+
+fn from_str_radix<T: FromStrRadixHelper>(src: &str, radix: u32) -> Result<T, ParseIntError> {
+ use self::IntErrorKind::*;
+ use self::ParseIntError as PIE;
+
+ assert!(
+ (2..=36).contains(&radix),
+ "from_str_radix_int: must lie in the range `[2, 36]` - found {}",
+ radix
+ );
+
+ if src.is_empty() {
+ return Err(PIE { kind: Empty });
+ }
+
+ let is_signed_ty = T::from_u32(0) > T::MIN;
+
+ // all valid digits are ascii, so we will just iterate over the utf8 bytes
+ // and cast them to chars. .to_digit() will safely return None for anything
+ // other than a valid ascii digit for the given radix, including the first-byte
+ // of multi-byte sequences
+ let src = src.as_bytes();
+
+ let (is_positive, digits) = match src[0] {
+ b'+' | b'-' if src[1..].is_empty() => {
+ return Err(PIE { kind: InvalidDigit });
+ }
+ b'+' => (true, &src[1..]),
+ b'-' if is_signed_ty => (false, &src[1..]),
+ _ => (true, src),
+ };
+
+ let mut result = T::from_u32(0);
+
+ if can_not_overflow::<T>(radix, is_signed_ty, digits) {
+ // If the len of the str is short compared to the range of the type
+ // we are parsing into, then we can be certain that an overflow will not occur.
+ // This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
+ // above is a faster (conservative) approximation of this.
+ //
+ // Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
+ // `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
+ // `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
+ macro_rules! run_unchecked_loop {
+ ($unchecked_additive_op:expr) => {
+ for &c in digits {
+ result = result * T::from_u32(radix);
+ let x = (c as char).to_digit(radix).ok_or(PIE { kind: InvalidDigit })?;
+ result = $unchecked_additive_op(result, T::from_u32(x));
+ }
+ };
+ }
+ if is_positive {
+ run_unchecked_loop!(<T as core::ops::Add>::add)
+ } else {
+ run_unchecked_loop!(<T as core::ops::Sub>::sub)
+ };
+ } else {
+ macro_rules! run_checked_loop {
+ ($checked_additive_op:ident, $overflow_err:expr) => {
+ for &c in digits {
+ // When `radix` is passed in as a literal, rather than doing a slow `imul`
+ // the compiler can use shifts if `radix` can be expressed as a
+ // sum of powers of 2 (x*10 can be written as x*8 + x*2).
+ // When the compiler can't use these optimisations,
+ // the latency of the multiplication can be hidden by issuing it
+ // before the result is needed to improve performance on
+ // modern out-of-order CPU as multiplication here is slower
+ // than the other instructions, we can get the end result faster
+ // doing multiplication first and let the CPU spends other cycles
+ // doing other computation and get multiplication result later.
+ let mul = result.checked_mul(radix);
+ let x = (c as char).to_digit(radix).ok_or(PIE { kind: InvalidDigit })?;
+ result = mul.ok_or_else($overflow_err)?;
+ result = T::$checked_additive_op(&result, x).ok_or_else($overflow_err)?;
+ }
+ };
+ }
+ if is_positive {
+ run_checked_loop!(checked_add, || PIE { kind: PosOverflow })
+ } else {
+ run_checked_loop!(checked_sub, || PIE { kind: NegOverflow })
+ };
+ }
+ Ok(result)
+}
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
new file mode 100644
index 000000000..4de0a0cf5
--- /dev/null
+++ b/library/core/src/num/nonzero.rs
@@ -0,0 +1,1134 @@
+//! Definitions of integer that is known not to equal zero.
+
+use crate::fmt;
+use crate::ops::{BitOr, BitOrAssign, Div, Rem};
+use crate::str::FromStr;
+
+use super::from_str_radix;
+use super::{IntErrorKind, ParseIntError};
+use crate::intrinsics;
+
+macro_rules! impl_nonzero_fmt {
+ ( #[$stability: meta] ( $( $Trait: ident ),+ ) for $Ty: ident ) => {
+ $(
+ #[$stability]
+ impl fmt::$Trait for $Ty {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.get().fmt(f)
+ }
+ }
+ )+
+ }
+}
+
+macro_rules! nonzero_integers {
+ ( $( #[$stability: meta] #[$const_new_unchecked_stability: meta] $Ty: ident($Int: ty); )+ ) => {
+ $(
+ /// An integer that is known not to equal zero.
+ ///
+ /// This enables some memory layout optimization.
+ #[doc = concat!("For example, `Option<", stringify!($Ty), ">` is the same size as `", stringify!($Int), "`:")]
+ ///
+ /// ```rust
+ /// use std::mem::size_of;
+ #[doc = concat!("assert_eq!(size_of::<Option<core::num::", stringify!($Ty), ">>(), size_of::<", stringify!($Int), ">());")]
+ /// ```
+ #[$stability]
+ #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+ #[repr(transparent)]
+ #[rustc_layout_scalar_valid_range_start(1)]
+ #[rustc_nonnull_optimization_guaranteed]
+ #[rustc_diagnostic_item = stringify!($Ty)]
+ pub struct $Ty($Int);
+
+ impl $Ty {
+ /// Creates a non-zero without checking whether the value is non-zero.
+ /// This results in undefined behaviour if the value is zero.
+ ///
+ /// # Safety
+ ///
+ /// The value must not be zero.
+ #[$stability]
+ #[$const_new_unchecked_stability]
+ #[must_use]
+ #[inline]
+ pub const unsafe fn new_unchecked(n: $Int) -> Self {
+ // SAFETY: this is guaranteed to be safe by the caller.
+ unsafe {
+ core::intrinsics::assert_unsafe_precondition!(n != 0);
+ Self(n)
+ }
+ }
+
+ /// Creates a non-zero if the given value is not zero.
+ #[$stability]
+ #[rustc_const_stable(feature = "const_nonzero_int_methods", since = "1.47.0")]
+ #[must_use]
+ #[inline]
+ pub const fn new(n: $Int) -> Option<Self> {
+ if n != 0 {
+ // SAFETY: we just checked that there's no `0`
+ Some(unsafe { Self(n) })
+ } else {
+ None
+ }
+ }
+
+ /// Returns the value as a primitive type.
+ #[$stability]
+ #[inline]
+ #[rustc_const_stable(feature = "const_nonzero_get", since = "1.34.0")]
+ pub const fn get(self) -> $Int {
+ self.0
+ }
+
+ }
+
+ #[stable(feature = "from_nonzero", since = "1.31.0")]
+ #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+ impl const From<$Ty> for $Int {
+ #[doc = concat!("Converts a `", stringify!($Ty), "` into an `", stringify!($Int), "`")]
+ #[inline]
+ fn from(nonzero: $Ty) -> Self {
+ nonzero.0
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr for $Ty {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: Self) -> Self::Output {
+ // SAFETY: since `self` and `rhs` are both nonzero, the
+ // result of the bitwise-or will be nonzero.
+ unsafe { $Ty::new_unchecked(self.get() | rhs.get()) }
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr<$Int> for $Ty {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: $Int) -> Self::Output {
+ // SAFETY: since `self` is nonzero, the result of the
+ // bitwise-or will be nonzero regardless of the value of
+ // `rhs`.
+ unsafe { $Ty::new_unchecked(self.get() | rhs) }
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr<$Ty> for $Int {
+ type Output = $Ty;
+ #[inline]
+ fn bitor(self, rhs: $Ty) -> Self::Output {
+ // SAFETY: since `rhs` is nonzero, the result of the
+ // bitwise-or will be nonzero regardless of the value of
+ // `self`.
+ unsafe { $Ty::new_unchecked(self | rhs.get()) }
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign for $Ty {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Self) {
+ *self = *self | rhs;
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign<$Int> for $Ty {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: $Int) {
+ *self = *self | rhs;
+ }
+ }
+
+ impl_nonzero_fmt! {
+ #[$stability] (Debug, Display, Binary, Octal, LowerHex, UpperHex) for $Ty
+ }
+ )+
+ }
+}
+
+nonzero_integers! {
+ #[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU8(u8);
+ #[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU16(u16);
+ #[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU32(u32);
+ #[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU64(u64);
+ #[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU128(u128);
+ #[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroUsize(usize);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI8(i8);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI16(i16);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI32(i32);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI64(i64);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI128(i128);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroIsize(isize);
+}
+
+macro_rules! from_str_radix_nzint_impl {
+ ($($t:ty)*) => {$(
+ #[stable(feature = "nonzero_parse", since = "1.35.0")]
+ impl FromStr for $t {
+ type Err = ParseIntError;
+ fn from_str(src: &str) -> Result<Self, Self::Err> {
+ Self::new(from_str_radix(src, 10)?)
+ .ok_or(ParseIntError {
+ kind: IntErrorKind::Zero
+ })
+ }
+ }
+ )*}
+}
+
+from_str_radix_nzint_impl! { NonZeroU8 NonZeroU16 NonZeroU32 NonZeroU64 NonZeroU128 NonZeroUsize
+NonZeroI8 NonZeroI16 NonZeroI32 NonZeroI64 NonZeroI128 NonZeroIsize }
+
+macro_rules! nonzero_leading_trailing_zeros {
+ ( $( $Ty: ident($Uint: ty) , $LeadingTestExpr:expr ;)+ ) => {
+ $(
+ impl $Ty {
+ /// Returns the number of leading zeros in the binary representation of `self`.
+ ///
+ /// On many architectures, this function can perform better than `leading_zeros()` on the underlying integer type, as special handling of zero can be avoided.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = std::num::", stringify!($Ty), "::new(", stringify!($LeadingTestExpr), ").unwrap();")]
+ ///
+ /// assert_eq!(n.leading_zeros(), 0);
+ /// ```
+ #[stable(feature = "nonzero_leading_trailing_zeros", since = "1.53.0")]
+ #[rustc_const_stable(feature = "nonzero_leading_trailing_zeros", since = "1.53.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn leading_zeros(self) -> u32 {
+ // SAFETY: since `self` cannot be zero, it is safe to call `ctlz_nonzero`.
+ unsafe { intrinsics::ctlz_nonzero(self.0 as $Uint) as u32 }
+ }
+
+ /// Returns the number of trailing zeros in the binary representation
+ /// of `self`.
+ ///
+ /// On many architectures, this function can perform better than `trailing_zeros()` on the underlying integer type, as special handling of zero can be avoided.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = std::num::", stringify!($Ty), "::new(0b0101000).unwrap();")]
+ ///
+ /// assert_eq!(n.trailing_zeros(), 3);
+ /// ```
+ #[stable(feature = "nonzero_leading_trailing_zeros", since = "1.53.0")]
+ #[rustc_const_stable(feature = "nonzero_leading_trailing_zeros", since = "1.53.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn trailing_zeros(self) -> u32 {
+ // SAFETY: since `self` cannot be zero, it is safe to call `cttz_nonzero`.
+ unsafe { intrinsics::cttz_nonzero(self.0 as $Uint) as u32 }
+ }
+
+ }
+ )+
+ }
+}
+
+nonzero_leading_trailing_zeros! {
+ NonZeroU8(u8), u8::MAX;
+ NonZeroU16(u16), u16::MAX;
+ NonZeroU32(u32), u32::MAX;
+ NonZeroU64(u64), u64::MAX;
+ NonZeroU128(u128), u128::MAX;
+ NonZeroUsize(usize), usize::MAX;
+ NonZeroI8(u8), -1i8;
+ NonZeroI16(u16), -1i16;
+ NonZeroI32(u32), -1i32;
+ NonZeroI64(u64), -1i64;
+ NonZeroI128(u128), -1i128;
+ NonZeroIsize(usize), -1isize;
+}
+
+macro_rules! nonzero_integers_div {
+ ( $( $Ty: ident($Int: ty); )+ ) => {
+ $(
+ #[stable(feature = "nonzero_div", since = "1.51.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Div<$Ty> for $Int {
+ type Output = $Int;
+ /// This operation rounds towards zero,
+ /// truncating any fractional part of the exact result, and cannot panic.
+ #[inline]
+ fn div(self, other: $Ty) -> $Int {
+ // SAFETY: div by zero is checked because `other` is a nonzero,
+ // and MIN/-1 is checked because `self` is an unsigned int.
+ unsafe { crate::intrinsics::unchecked_div(self, other.get()) }
+ }
+ }
+
+ #[stable(feature = "nonzero_div", since = "1.51.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Rem<$Ty> for $Int {
+ type Output = $Int;
+ /// This operation satisfies `n % d == n - (n / d) * d`, and cannot panic.
+ #[inline]
+ fn rem(self, other: $Ty) -> $Int {
+ // SAFETY: rem by zero is checked because `other` is a nonzero,
+ // and MIN/-1 is checked because `self` is an unsigned int.
+ unsafe { crate::intrinsics::unchecked_rem(self, other.get()) }
+ }
+ }
+ )+
+ }
+}
+
+nonzero_integers_div! {
+ NonZeroU8(u8);
+ NonZeroU16(u16);
+ NonZeroU32(u32);
+ NonZeroU64(u64);
+ NonZeroU128(u128);
+ NonZeroUsize(usize);
+}
+
+// A bunch of methods for unsigned nonzero types only.
+macro_rules! nonzero_unsigned_operations {
+ ( $( $Ty: ident($Int: ident); )+ ) => {
+ $(
+ impl $Ty {
+ /// Add an unsigned integer to a non-zero value.
+ /// Check for overflow and return [`None`] on overflow
+ /// As a consequence, the result cannot wrap to zero.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(Some(two), one.checked_add(1));
+ /// assert_eq!(None, max.checked_add(1));
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_add(self, other: $Int) -> Option<$Ty> {
+ if let Some(result) = self.get().checked_add(other) {
+ // SAFETY: $Int::checked_add returns None on overflow
+ // so the result cannot be zero.
+ Some(unsafe { $Ty::new_unchecked(result) })
+ } else {
+ None
+ }
+ }
+
+ /// Add an unsigned integer to a non-zero value.
+ #[doc = concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(two, one.saturating_add(1));
+ /// assert_eq!(max, max.saturating_add(1));
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_add(self, other: $Int) -> $Ty {
+ // SAFETY: $Int::saturating_add returns $Int::MAX on overflow
+ // so the result cannot be zero.
+ unsafe { $Ty::new_unchecked(self.get().saturating_add(other)) }
+ }
+
+ /// Add an unsigned integer to a non-zero value,
+ /// assuming overflow cannot occur.
+ /// Overflow is unchecked, and it is undefined behaviour to overflow
+ /// *even if the result would wrap to a non-zero value*.
+ /// The behaviour is undefined as soon as
+ #[doc = concat!("`self + rhs > ", stringify!($Int), "::MAX`.")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonzero_ops)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
+ ///
+ /// assert_eq!(two, unsafe { one.unchecked_add(1) });
+ /// # Some(())
+ /// # }
+ /// ```
+ #[unstable(feature = "nonzero_ops", issue = "84186")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const unsafe fn unchecked_add(self, other: $Int) -> $Ty {
+ // SAFETY: The caller ensures there is no overflow.
+ unsafe { $Ty::new_unchecked(self.get().unchecked_add(other)) }
+ }
+
+ /// Returns the smallest power of two greater than or equal to n.
+ /// Check for overflow and return [`None`]
+ /// if the next power of two is greater than the type’s maximum value.
+ /// As a consequence, the result cannot wrap to zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
+ #[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
+ #[doc = concat!("let four = ", stringify!($Ty), "::new(4)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(Some(two), two.checked_next_power_of_two() );
+ /// assert_eq!(Some(four), three.checked_next_power_of_two() );
+ /// assert_eq!(None, max.checked_next_power_of_two() );
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_next_power_of_two(self) -> Option<$Ty> {
+ if let Some(nz) = self.get().checked_next_power_of_two() {
+ // SAFETY: The next power of two is positive
+ // and overflow is checked.
+ Some(unsafe { $Ty::new_unchecked(nz) })
+ } else {
+ None
+ }
+ }
+
+ /// Returns the base 2 logarithm of the number, rounded down.
+ ///
+ /// This is the same operation as
+ #[doc = concat!("[`", stringify!($Int), "::log2`],")]
+ /// except that it has no failure cases to worry about
+ /// since this value can never be zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(7).unwrap().log2(), 2);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(8).unwrap().log2(), 3);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(9).unwrap().log2(), 3);")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn log2(self) -> u32 {
+ Self::BITS - 1 - self.leading_zeros()
+ }
+
+ /// Returns the base 10 logarithm of the number, rounded down.
+ ///
+ /// This is the same operation as
+ #[doc = concat!("[`", stringify!($Int), "::log10`],")]
+ /// except that it has no failure cases to worry about
+ /// since this value can never be zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(99).unwrap().log10(), 1);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(100).unwrap().log10(), 2);")]
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::new(101).unwrap().log10(), 2);")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn log10(self) -> u32 {
+ super::int_log10::$Int(self.0)
+ }
+ }
+ )+
+ }
+}
+
+nonzero_unsigned_operations! {
+ NonZeroU8(u8);
+ NonZeroU16(u16);
+ NonZeroU32(u32);
+ NonZeroU64(u64);
+ NonZeroU128(u128);
+ NonZeroUsize(usize);
+}
+
+// A bunch of methods for signed nonzero types only.
+macro_rules! nonzero_signed_operations {
+ ( $( $Ty: ident($Int: ty) -> $Uty: ident($Uint: ty); )+ ) => {
+ $(
+ impl $Ty {
+ /// Computes the absolute value of self.
+ #[doc = concat!("See [`", stringify!($Int), "::abs`]")]
+ /// for documentation on overflow behaviour.
+ ///
+ /// # Example
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
+ ///
+ /// assert_eq!(pos, pos.abs());
+ /// assert_eq!(pos, neg.abs());
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn abs(self) -> $Ty {
+ // SAFETY: This cannot overflow to zero.
+ unsafe { $Ty::new_unchecked(self.get().abs()) }
+ }
+
+ /// Checked absolute value.
+ /// Check for overflow and returns [`None`] if
+ #[doc = concat!("`self == ", stringify!($Int), "::MIN`.")]
+ /// The result cannot be zero.
+ ///
+ /// # Example
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
+ #[doc = concat!("let min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ ///
+ /// assert_eq!(Some(pos), neg.checked_abs());
+ /// assert_eq!(None, min.checked_abs());
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_abs(self) -> Option<$Ty> {
+ if let Some(nz) = self.get().checked_abs() {
+ // SAFETY: absolute value of nonzero cannot yield zero values.
+ Some(unsafe { $Ty::new_unchecked(nz) })
+ } else {
+ None
+ }
+ }
+
+ /// Computes the absolute value of self,
+ /// with overflow information, see
+ #[doc = concat!("[`", stringify!($Int), "::overflowing_abs`].")]
+ ///
+ /// # Example
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
+ #[doc = concat!("let min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ ///
+ /// assert_eq!((pos, false), pos.overflowing_abs());
+ /// assert_eq!((pos, false), neg.overflowing_abs());
+ /// assert_eq!((min, true), min.overflowing_abs());
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_abs(self) -> ($Ty, bool) {
+ let (nz, flag) = self.get().overflowing_abs();
+ (
+ // SAFETY: absolute value of nonzero cannot yield zero values.
+ unsafe { $Ty::new_unchecked(nz) },
+ flag,
+ )
+ }
+
+ /// Saturating absolute value, see
+ #[doc = concat!("[`", stringify!($Int), "::saturating_abs`].")]
+ ///
+ /// # Example
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
+ #[doc = concat!("let min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ #[doc = concat!("let min_plus = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN + 1)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(pos, pos.saturating_abs());
+ /// assert_eq!(pos, neg.saturating_abs());
+ /// assert_eq!(max, min.saturating_abs());
+ /// assert_eq!(max, min_plus.saturating_abs());
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_abs(self) -> $Ty {
+ // SAFETY: absolute value of nonzero cannot yield zero values.
+ unsafe { $Ty::new_unchecked(self.get().saturating_abs()) }
+ }
+
+ /// Wrapping absolute value, see
+ #[doc = concat!("[`", stringify!($Int), "::wrapping_abs`].")]
+ ///
+ /// # Example
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
+ #[doc = concat!("let min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(pos, pos.wrapping_abs());
+ /// assert_eq!(pos, neg.wrapping_abs());
+ /// assert_eq!(min, min.wrapping_abs());
+ /// # // FIXME: add once Neg is implemented?
+ /// # // assert_eq!(max, (-max).wrapping_abs());
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_abs(self) -> $Ty {
+ // SAFETY: absolute value of nonzero cannot yield zero values.
+ unsafe { $Ty::new_unchecked(self.get().wrapping_abs()) }
+ }
+
+ /// Computes the absolute value of self
+ /// without any wrapping or panicking.
+ ///
+ /// # Example
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ #[doc = concat!("# use std::num::", stringify!($Uty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let u_pos = ", stringify!($Uty), "::new(1)?;")]
+ #[doc = concat!("let i_pos = ", stringify!($Ty), "::new(1)?;")]
+ #[doc = concat!("let i_neg = ", stringify!($Ty), "::new(-1)?;")]
+ #[doc = concat!("let i_min = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MIN)?;")]
+ #[doc = concat!("let u_max = ", stringify!($Uty), "::new(",
+ stringify!($Uint), "::MAX / 2 + 1)?;")]
+ ///
+ /// assert_eq!(u_pos, i_pos.unsigned_abs());
+ /// assert_eq!(u_pos, i_neg.unsigned_abs());
+ /// assert_eq!(u_max, i_min.unsigned_abs());
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn unsigned_abs(self) -> $Uty {
+ // SAFETY: absolute value of nonzero cannot yield zero values.
+ unsafe { $Uty::new_unchecked(self.get().unsigned_abs()) }
+ }
+ }
+ )+
+ }
+}
+
+nonzero_signed_operations! {
+ NonZeroI8(i8) -> NonZeroU8(u8);
+ NonZeroI16(i16) -> NonZeroU16(u16);
+ NonZeroI32(i32) -> NonZeroU32(u32);
+ NonZeroI64(i64) -> NonZeroU64(u64);
+ NonZeroI128(i128) -> NonZeroU128(u128);
+ NonZeroIsize(isize) -> NonZeroUsize(usize);
+}
+
+// A bunch of methods for both signed and unsigned nonzero types.
+macro_rules! nonzero_unsigned_signed_operations {
+ ( $( $signedness:ident $Ty: ident($Int: ty); )+ ) => {
+ $(
+ impl $Ty {
+ /// Multiply two non-zero integers together.
+ /// Check for overflow and return [`None`] on overflow.
+ /// As a consequence, the result cannot wrap to zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
+ #[doc = concat!("let four = ", stringify!($Ty), "::new(4)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(Some(four), two.checked_mul(two));
+ /// assert_eq!(None, max.checked_mul(two));
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_mul(self, other: $Ty) -> Option<$Ty> {
+ if let Some(result) = self.get().checked_mul(other.get()) {
+ // SAFETY: checked_mul returns None on overflow
+ // and `other` is also non-null
+ // so the result cannot be zero.
+ Some(unsafe { $Ty::new_unchecked(result) })
+ } else {
+ None
+ }
+ }
+
+ /// Multiply two non-zero integers together.
+ #[doc = concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
+ #[doc = concat!("let four = ", stringify!($Ty), "::new(4)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(four, two.saturating_mul(two));
+ /// assert_eq!(max, four.saturating_mul(max));
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_mul(self, other: $Ty) -> $Ty {
+ // SAFETY: saturating_mul returns u*::MAX on overflow
+ // and `other` is also non-null
+ // so the result cannot be zero.
+ unsafe { $Ty::new_unchecked(self.get().saturating_mul(other.get())) }
+ }
+
+ /// Multiply two non-zero integers together,
+ /// assuming overflow cannot occur.
+ /// Overflow is unchecked, and it is undefined behaviour to overflow
+ /// *even if the result would wrap to a non-zero value*.
+ /// The behaviour is undefined as soon as
+ #[doc = sign_dependent_expr!{
+ $signedness ?
+ if signed {
+ concat!("`self * rhs > ", stringify!($Int), "::MAX`, ",
+ "or `self * rhs < ", stringify!($Int), "::MIN`.")
+ }
+ if unsigned {
+ concat!("`self * rhs > ", stringify!($Int), "::MAX`.")
+ }
+ }]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonzero_ops)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
+ #[doc = concat!("let four = ", stringify!($Ty), "::new(4)?;")]
+ ///
+ /// assert_eq!(four, unsafe { two.unchecked_mul(two) });
+ /// # Some(())
+ /// # }
+ /// ```
+ #[unstable(feature = "nonzero_ops", issue = "84186")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const unsafe fn unchecked_mul(self, other: $Ty) -> $Ty {
+ // SAFETY: The caller ensures there is no overflow.
+ unsafe { $Ty::new_unchecked(self.get().unchecked_mul(other.get())) }
+ }
+
+ /// Raise non-zero value to an integer power.
+ /// Check for overflow and return [`None`] on overflow.
+ /// As a consequence, the result cannot wrap to zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
+ #[doc = concat!("let twenty_seven = ", stringify!($Ty), "::new(27)?;")]
+ #[doc = concat!("let half_max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX / 2)?;")]
+ ///
+ /// assert_eq!(Some(twenty_seven), three.checked_pow(3));
+ /// assert_eq!(None, half_max.checked_pow(3));
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_pow(self, other: u32) -> Option<$Ty> {
+ if let Some(result) = self.get().checked_pow(other) {
+ // SAFETY: checked_pow returns None on overflow
+ // so the result cannot be zero.
+ Some(unsafe { $Ty::new_unchecked(result) })
+ } else {
+ None
+ }
+ }
+
+ /// Raise non-zero value to an integer power.
+ #[doc = sign_dependent_expr!{
+ $signedness ?
+ if signed {
+ concat!("Return [`", stringify!($Int), "::MIN`] ",
+ "or [`", stringify!($Int), "::MAX`] on overflow.")
+ }
+ if unsigned {
+ concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")
+ }
+ }]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
+ #[doc = concat!("let twenty_seven = ", stringify!($Ty), "::new(27)?;")]
+ #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ stringify!($Int), "::MAX)?;")]
+ ///
+ /// assert_eq!(twenty_seven, three.saturating_pow(3));
+ /// assert_eq!(max, max.saturating_pow(3));
+ /// # Some(())
+ /// # }
+ /// ```
+ #[stable(feature = "nonzero_checked_ops", since = "1.64.0")]
+ #[rustc_const_stable(feature = "const_nonzero_checked_ops", since = "1.64.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_pow(self, other: u32) -> $Ty {
+ // SAFETY: saturating_pow returns u*::MAX on overflow
+ // so the result cannot be zero.
+ unsafe { $Ty::new_unchecked(self.get().saturating_pow(other)) }
+ }
+ }
+ )+
+ }
+}
+
+// Use this when the generated code should differ between signed and unsigned types.
+macro_rules! sign_dependent_expr {
+ (signed ? if signed { $signed_case:expr } if unsigned { $unsigned_case:expr } ) => {
+ $signed_case
+ };
+ (unsigned ? if signed { $signed_case:expr } if unsigned { $unsigned_case:expr } ) => {
+ $unsigned_case
+ };
+}
+
+nonzero_unsigned_signed_operations! {
+ unsigned NonZeroU8(u8);
+ unsigned NonZeroU16(u16);
+ unsigned NonZeroU32(u32);
+ unsigned NonZeroU64(u64);
+ unsigned NonZeroU128(u128);
+ unsigned NonZeroUsize(usize);
+ signed NonZeroI8(i8);
+ signed NonZeroI16(i16);
+ signed NonZeroI32(i32);
+ signed NonZeroI64(i64);
+ signed NonZeroI128(i128);
+ signed NonZeroIsize(isize);
+}
+
+macro_rules! nonzero_unsigned_is_power_of_two {
+ ( $( $Ty: ident )+ ) => {
+ $(
+ impl $Ty {
+
+ /// Returns `true` if and only if `self == (1 << k)` for some `k`.
+ ///
+ /// On many architectures, this function can perform better than `is_power_of_two()`
+ /// on the underlying integer type, as special handling of zero can be avoided.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let eight = std::num::", stringify!($Ty), "::new(8).unwrap();")]
+ /// assert!(eight.is_power_of_two());
+ #[doc = concat!("let ten = std::num::", stringify!($Ty), "::new(10).unwrap();")]
+ /// assert!(!ten.is_power_of_two());
+ /// ```
+ #[must_use]
+ #[stable(feature = "nonzero_is_power_of_two", since = "1.59.0")]
+ #[rustc_const_stable(feature = "nonzero_is_power_of_two", since = "1.59.0")]
+ #[inline]
+ pub const fn is_power_of_two(self) -> bool {
+ // LLVM 11 normalizes `unchecked_sub(x, 1) & x == 0` to the implementation seen here.
+ // On the basic x86-64 target, this saves 3 instructions for the zero check.
+ // On x86_64 with BMI1, being nonzero lets it codegen to `BLSR`, which saves an instruction
+ // compared to the `POPCNT` implementation on the underlying integer type.
+
+ intrinsics::ctpop(self.get()) < 2
+ }
+
+ }
+ )+
+ }
+}
+
+nonzero_unsigned_is_power_of_two! { NonZeroU8 NonZeroU16 NonZeroU32 NonZeroU64 NonZeroU128 NonZeroUsize }
+
+macro_rules! nonzero_min_max_unsigned {
+ ( $( $Ty: ident($Int: ident); )+ ) => {
+ $(
+ impl $Ty {
+ /// The smallest value that can be represented by this non-zero
+ /// integer type, 1.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonzero_min_max)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::MIN.get(), 1", stringify!($Int), ");")]
+ /// ```
+ #[unstable(feature = "nonzero_min_max", issue = "89065")]
+ pub const MIN: Self = Self::new(1).unwrap();
+
+ /// The largest value that can be represented by this non-zero
+ /// integer type,
+ #[doc = concat!("equal to [`", stringify!($Int), "::MAX`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonzero_min_max)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::MAX.get(), ", stringify!($Int), "::MAX);")]
+ /// ```
+ #[unstable(feature = "nonzero_min_max", issue = "89065")]
+ pub const MAX: Self = Self::new(<$Int>::MAX).unwrap();
+ }
+ )+
+ }
+}
+
+macro_rules! nonzero_min_max_signed {
+ ( $( $Ty: ident($Int: ident); )+ ) => {
+ $(
+ impl $Ty {
+ /// The smallest value that can be represented by this non-zero
+ /// integer type,
+ #[doc = concat!("equal to [`", stringify!($Int), "::MIN`].")]
+ ///
+ /// Note: While most integer types are defined for every whole
+ /// number between `MIN` and `MAX`, signed non-zero integers are
+ /// a special case. They have a "gap" at 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonzero_min_max)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::MIN.get(), ", stringify!($Int), "::MIN);")]
+ /// ```
+ #[unstable(feature = "nonzero_min_max", issue = "89065")]
+ pub const MIN: Self = Self::new(<$Int>::MIN).unwrap();
+
+ /// The largest value that can be represented by this non-zero
+ /// integer type,
+ #[doc = concat!("equal to [`", stringify!($Int), "::MAX`].")]
+ ///
+ /// Note: While most integer types are defined for every whole
+ /// number between `MIN` and `MAX`, signed non-zero integers are
+ /// a special case. They have a "gap" at 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonzero_min_max)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::MAX.get(), ", stringify!($Int), "::MAX);")]
+ /// ```
+ #[unstable(feature = "nonzero_min_max", issue = "89065")]
+ pub const MAX: Self = Self::new(<$Int>::MAX).unwrap();
+ }
+ )+
+ }
+}
+
+nonzero_min_max_unsigned! {
+ NonZeroU8(u8);
+ NonZeroU16(u16);
+ NonZeroU32(u32);
+ NonZeroU64(u64);
+ NonZeroU128(u128);
+ NonZeroUsize(usize);
+}
+
+nonzero_min_max_signed! {
+ NonZeroI8(i8);
+ NonZeroI16(i16);
+ NonZeroI32(i32);
+ NonZeroI64(i64);
+ NonZeroI128(i128);
+ NonZeroIsize(isize);
+}
+
+macro_rules! nonzero_bits {
+ ( $( $Ty: ident($Int: ty); )+ ) => {
+ $(
+ impl $Ty {
+ /// The size of this non-zero integer type in bits.
+ ///
+ #[doc = concat!("This value is equal to [`", stringify!($Int), "::BITS`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonzero_bits)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::BITS, ", stringify!($Int), "::BITS);")]
+ /// ```
+ #[unstable(feature = "nonzero_bits", issue = "94881")]
+ pub const BITS: u32 = <$Int>::BITS;
+ }
+ )+
+ }
+}
+
+nonzero_bits! {
+ NonZeroU8(u8);
+ NonZeroI8(i8);
+ NonZeroU16(u16);
+ NonZeroI16(i16);
+ NonZeroU32(u32);
+ NonZeroI32(i32);
+ NonZeroU64(u64);
+ NonZeroI64(i64);
+ NonZeroU128(u128);
+ NonZeroI128(i128);
+ NonZeroUsize(usize);
+ NonZeroIsize(isize);
+}
diff --git a/library/core/src/num/saturating.rs b/library/core/src/num/saturating.rs
new file mode 100644
index 000000000..8982473b2
--- /dev/null
+++ b/library/core/src/num/saturating.rs
@@ -0,0 +1,1081 @@
+//! Definitions of `Saturating<T>`.
+
+use crate::fmt;
+use crate::ops::{Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign};
+use crate::ops::{BitXor, BitXorAssign, Div, DivAssign};
+use crate::ops::{Mul, MulAssign, Neg, Not, Rem, RemAssign};
+use crate::ops::{Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign};
+
+/// Provides intentionally-saturating arithmetic on `T`.
+///
+/// Operations like `+` on `u32` values are intended to never overflow,
+/// and in some debug configurations overflow is detected and results
+/// in a panic. While most arithmetic falls into this category, some
+/// code explicitly expects and relies upon saturating arithmetic.
+///
+/// Saturating arithmetic can be achieved either through methods like
+/// `saturating_add`, or through the `Saturating<T>` type, which says that
+/// all standard arithmetic operations on the underlying value are
+/// intended to have saturating semantics.
+///
+/// The underlying value can be retrieved through the `.0` index of the
+/// `Saturating` tuple.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(saturating_int_impl)]
+/// use std::num::Saturating;
+///
+/// let max = Saturating(u32::MAX);
+/// let one = Saturating(1u32);
+///
+/// assert_eq!(u32::MAX, (max + one).0);
+/// ```
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)]
+#[repr(transparent)]
+pub struct Saturating<T>(#[unstable(feature = "saturating_int_impl", issue = "87920")] pub T);
+
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+impl<T: fmt::Debug> fmt::Debug for Saturating<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+impl<T: fmt::Display> fmt::Display for Saturating<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+impl<T: fmt::Binary> fmt::Binary for Saturating<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+impl<T: fmt::Octal> fmt::Octal for Saturating<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+impl<T: fmt::LowerHex> fmt::LowerHex for Saturating<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+impl<T: fmt::UpperHex> fmt::UpperHex for Saturating<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+#[allow(unused_macros)]
+macro_rules! sh_impl_signed {
+ ($t:ident, $f:ident) => {
+ // FIXME what is the correct implementation here? see discussion https://github.com/rust-lang/rust/pull/87921#discussion_r695870065
+ //
+ // #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ // impl Shl<$f> for Saturating<$t> {
+ // type Output = Saturating<$t>;
+ //
+ // #[inline]
+ // fn shl(self, other: $f) -> Saturating<$t> {
+ // if other < 0 {
+ // Saturating(self.0.shr((-other & self::shift_max::$t as $f) as u32))
+ // } else {
+ // Saturating(self.0.shl((other & self::shift_max::$t as $f) as u32))
+ // }
+ // }
+ // }
+ // forward_ref_binop! { impl Shl, shl for Saturating<$t>, $f,
+ // #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ //
+ // #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ // impl ShlAssign<$f> for Saturating<$t> {
+ // #[inline]
+ // fn shl_assign(&mut self, other: $f) {
+ // *self = *self << other;
+ // }
+ // }
+ // forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Shr<$f> for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn shr(self, other: $f) -> Saturating<$t> {
+ if other < 0 {
+ Saturating(self.0.shl((-other & self::shift_max::$t as $f) as u32))
+ } else {
+ Saturating(self.0.shr((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ }
+ forward_ref_binop! { impl Shr, shr for Saturating<$t>, $f,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl ShrAssign<$f> for Saturating<$t> {
+ #[inline]
+ fn shr_assign(&mut self, other: $f) {
+ *self = *self >> other;
+ }
+ }
+ forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f }
+ };
+}
+
+macro_rules! sh_impl_unsigned {
+ ($t:ident, $f:ident) => {
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Shl<$f> for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn shl(self, other: $f) -> Saturating<$t> {
+ Saturating(self.0.wrapping_shl(other as u32))
+ }
+ }
+ forward_ref_binop! { impl Shl, shl for Saturating<$t>, $f,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl ShlAssign<$f> for Saturating<$t> {
+ #[inline]
+ fn shl_assign(&mut self, other: $f) {
+ *self = *self << other;
+ }
+ }
+ forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Shr<$f> for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn shr(self, other: $f) -> Saturating<$t> {
+ Saturating(self.0.wrapping_shr(other as u32))
+ }
+ }
+ forward_ref_binop! { impl Shr, shr for Saturating<$t>, $f,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl ShrAssign<$f> for Saturating<$t> {
+ #[inline]
+ fn shr_assign(&mut self, other: $f) {
+ *self = *self >> other;
+ }
+ }
+ forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f }
+ };
+}
+
+// FIXME (#23545): uncomment the remaining impls
+macro_rules! sh_impl_all {
+ ($($t:ident)*) => ($(
+ //sh_impl_unsigned! { $t, u8 }
+ //sh_impl_unsigned! { $t, u16 }
+ //sh_impl_unsigned! { $t, u32 }
+ //sh_impl_unsigned! { $t, u64 }
+ //sh_impl_unsigned! { $t, u128 }
+ sh_impl_unsigned! { $t, usize }
+
+ //sh_impl_signed! { $t, i8 }
+ //sh_impl_signed! { $t, i16 }
+ //sh_impl_signed! { $t, i32 }
+ //sh_impl_signed! { $t, i64 }
+ //sh_impl_signed! { $t, i128 }
+ //sh_impl_signed! { $t, isize }
+ )*)
+}
+
+sh_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
+
+// FIXME(30524): impl Op<T> for Saturating<T>, impl OpAssign<T> for Saturating<T>
+macro_rules! saturating_impl {
+ ($($t:ty)*) => ($(
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Add for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn add(self, other: Saturating<$t>) -> Saturating<$t> {
+ Saturating(self.0.saturating_add(other.0))
+ }
+ }
+ forward_ref_binop! { impl Add, add for Saturating<$t>, Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl AddAssign for Saturating<$t> {
+ #[inline]
+ fn add_assign(&mut self, other: Saturating<$t>) {
+ *self = *self + other;
+ }
+ }
+ forward_ref_op_assign! { impl AddAssign, add_assign for Saturating<$t>, Saturating<$t> }
+
+ #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ impl AddAssign<$t> for Saturating<$t> {
+ #[inline]
+ fn add_assign(&mut self, other: $t) {
+ *self = *self + Saturating(other);
+ }
+ }
+ forward_ref_op_assign! { impl AddAssign, add_assign for Saturating<$t>, $t }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Sub for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn sub(self, other: Saturating<$t>) -> Saturating<$t> {
+ Saturating(self.0.saturating_sub(other.0))
+ }
+ }
+ forward_ref_binop! { impl Sub, sub for Saturating<$t>, Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl SubAssign for Saturating<$t> {
+ #[inline]
+ fn sub_assign(&mut self, other: Saturating<$t>) {
+ *self = *self - other;
+ }
+ }
+ forward_ref_op_assign! { impl SubAssign, sub_assign for Saturating<$t>, Saturating<$t> }
+
+ #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ impl SubAssign<$t> for Saturating<$t> {
+ #[inline]
+ fn sub_assign(&mut self, other: $t) {
+ *self = *self - Saturating(other);
+ }
+ }
+ forward_ref_op_assign! { impl SubAssign, sub_assign for Saturating<$t>, $t }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Mul for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn mul(self, other: Saturating<$t>) -> Saturating<$t> {
+ Saturating(self.0.saturating_mul(other.0))
+ }
+ }
+ forward_ref_binop! { impl Mul, mul for Saturating<$t>, Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl MulAssign for Saturating<$t> {
+ #[inline]
+ fn mul_assign(&mut self, other: Saturating<$t>) {
+ *self = *self * other;
+ }
+ }
+ forward_ref_op_assign! { impl MulAssign, mul_assign for Saturating<$t>, Saturating<$t> }
+
+ #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ impl MulAssign<$t> for Saturating<$t> {
+ #[inline]
+ fn mul_assign(&mut self, other: $t) {
+ *self = *self * Saturating(other);
+ }
+ }
+ forward_ref_op_assign! { impl MulAssign, mul_assign for Saturating<$t>, $t }
+
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert_eq!(Saturating(2", stringify!($t), "), Saturating(5", stringify!($t), ") / Saturating(2));")]
+ #[doc = concat!("assert_eq!(Saturating(", stringify!($t), "::MAX), Saturating(", stringify!($t), "::MAX) / Saturating(1));")]
+ #[doc = concat!("assert_eq!(Saturating(", stringify!($t), "::MIN), Saturating(", stringify!($t), "::MIN) / Saturating(1));")]
+ /// ```
+ ///
+ /// ```should_panic
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let _ = Saturating(0", stringify!($t), ") / Saturating(0);")]
+ /// ```
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Div for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn div(self, other: Saturating<$t>) -> Saturating<$t> {
+ Saturating(self.0.saturating_div(other.0))
+ }
+ }
+ forward_ref_binop! { impl Div, div for Saturating<$t>, Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl DivAssign for Saturating<$t> {
+ #[inline]
+ fn div_assign(&mut self, other: Saturating<$t>) {
+ *self = *self / other;
+ }
+ }
+ forward_ref_op_assign! { impl DivAssign, div_assign for Saturating<$t>, Saturating<$t> }
+
+ #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ impl DivAssign<$t> for Saturating<$t> {
+ #[inline]
+ fn div_assign(&mut self, other: $t) {
+ *self = *self / Saturating(other);
+ }
+ }
+ forward_ref_op_assign! { impl DivAssign, div_assign for Saturating<$t>, $t }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Rem for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn rem(self, other: Saturating<$t>) -> Saturating<$t> {
+ Saturating(self.0.rem(other.0))
+ }
+ }
+ forward_ref_binop! { impl Rem, rem for Saturating<$t>, Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl RemAssign for Saturating<$t> {
+ #[inline]
+ fn rem_assign(&mut self, other: Saturating<$t>) {
+ *self = *self % other;
+ }
+ }
+ forward_ref_op_assign! { impl RemAssign, rem_assign for Saturating<$t>, Saturating<$t> }
+
+ #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ impl RemAssign<$t> for Saturating<$t> {
+ #[inline]
+ fn rem_assign(&mut self, other: $t) {
+ *self = *self % Saturating(other);
+ }
+ }
+ forward_ref_op_assign! { impl RemAssign, rem_assign for Saturating<$t>, $t }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Not for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn not(self) -> Saturating<$t> {
+ Saturating(!self.0)
+ }
+ }
+ forward_ref_unop! { impl Not, not for Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl BitXor for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn bitxor(self, other: Saturating<$t>) -> Saturating<$t> {
+ Saturating(self.0 ^ other.0)
+ }
+ }
+ forward_ref_binop! { impl BitXor, bitxor for Saturating<$t>, Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl BitXorAssign for Saturating<$t> {
+ #[inline]
+ fn bitxor_assign(&mut self, other: Saturating<$t>) {
+ *self = *self ^ other;
+ }
+ }
+ forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Saturating<$t>, Saturating<$t> }
+
+ #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ impl BitXorAssign<$t> for Saturating<$t> {
+ #[inline]
+ fn bitxor_assign(&mut self, other: $t) {
+ *self = *self ^ Saturating(other);
+ }
+ }
+ forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Saturating<$t>, $t }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl BitOr for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn bitor(self, other: Saturating<$t>) -> Saturating<$t> {
+ Saturating(self.0 | other.0)
+ }
+ }
+ forward_ref_binop! { impl BitOr, bitor for Saturating<$t>, Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl BitOrAssign for Saturating<$t> {
+ #[inline]
+ fn bitor_assign(&mut self, other: Saturating<$t>) {
+ *self = *self | other;
+ }
+ }
+ forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Saturating<$t>, Saturating<$t> }
+
+ #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ impl BitOrAssign<$t> for Saturating<$t> {
+ #[inline]
+ fn bitor_assign(&mut self, other: $t) {
+ *self = *self | Saturating(other);
+ }
+ }
+ forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Saturating<$t>, $t }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl BitAnd for Saturating<$t> {
+ type Output = Saturating<$t>;
+
+ #[inline]
+ fn bitand(self, other: Saturating<$t>) -> Saturating<$t> {
+ Saturating(self.0 & other.0)
+ }
+ }
+ forward_ref_binop! { impl BitAnd, bitand for Saturating<$t>, Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl BitAndAssign for Saturating<$t> {
+ #[inline]
+ fn bitand_assign(&mut self, other: Saturating<$t>) {
+ *self = *self & other;
+ }
+ }
+ forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Saturating<$t>, Saturating<$t> }
+
+ #[unstable(feature = "saturating_int_assign_impl", issue = "92354")]
+ impl BitAndAssign<$t> for Saturating<$t> {
+ #[inline]
+ fn bitand_assign(&mut self, other: $t) {
+ *self = *self & Saturating(other);
+ }
+ }
+ forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Saturating<$t>, $t }
+
+ )*)
+}
+
+saturating_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+macro_rules! saturating_int_impl {
+ ($($t:ty)*) => ($(
+ impl Saturating<$t> {
+ /// Returns the smallest value that can be represented by this integer type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert_eq!(<Saturating<", stringify!($t), ">>::MIN, Saturating(", stringify!($t), "::MIN));")]
+ /// ```
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const MIN: Self = Self(<$t>::MIN);
+
+ /// Returns the largest value that can be represented by this integer type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert_eq!(<Saturating<", stringify!($t), ">>::MAX, Saturating(", stringify!($t), "::MAX));")]
+ /// ```
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const MAX: Self = Self(<$t>::MAX);
+
+ /// Returns the size of this integer type in bits.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert_eq!(<Saturating<", stringify!($t), ">>::BITS, ", stringify!($t), "::BITS);")]
+ /// ```
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const BITS: u32 = <$t>::BITS;
+
+ /// Returns the number of ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let n = Saturating(0b01001100", stringify!($t), ");")]
+ ///
+ /// assert_eq!(n.count_ones(), 3);
+ /// ```
+ #[inline]
+ #[doc(alias = "popcount")]
+ #[doc(alias = "popcnt")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn count_ones(self) -> u32 {
+ self.0.count_ones()
+ }
+
+ /// Returns the number of zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert_eq!(Saturating(!0", stringify!($t), ").count_zeros(), 0);")]
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn count_zeros(self) -> u32 {
+ self.0.count_zeros()
+ }
+
+ /// Returns the number of trailing zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let n = Saturating(0b0101000", stringify!($t), ");")]
+ ///
+ /// assert_eq!(n.trailing_zeros(), 3);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn trailing_zeros(self) -> u32 {
+ self.0.trailing_zeros()
+ }
+
+ /// Shifts the bits to the left by a specified amount, `n`,
+ /// saturating the truncated bits to the end of the resulting
+ /// integer.
+ ///
+ /// Please note this isn't the same operation as the `<<` shifting
+ /// operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ /// let n: Saturating<i64> = Saturating(0x0123456789ABCDEF);
+ /// let m: Saturating<i64> = Saturating(-0x76543210FEDCBA99);
+ ///
+ /// assert_eq!(n.rotate_left(32), m);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn rotate_left(self, n: u32) -> Self {
+ Saturating(self.0.rotate_left(n))
+ }
+
+ /// Shifts the bits to the right by a specified amount, `n`,
+ /// saturating the truncated bits to the beginning of the resulting
+ /// integer.
+ ///
+ /// Please note this isn't the same operation as the `>>` shifting
+ /// operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ /// let n: Saturating<i64> = Saturating(0x0123456789ABCDEF);
+ /// let m: Saturating<i64> = Saturating(-0xFEDCBA987654322);
+ ///
+ /// assert_eq!(n.rotate_right(4), m);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn rotate_right(self, n: u32) -> Self {
+ Saturating(self.0.rotate_right(n))
+ }
+
+ /// Reverses the byte order of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ /// let n: Saturating<i16> = Saturating(0b0000000_01010101);
+ /// assert_eq!(n, Saturating(85));
+ ///
+ /// let m = n.swap_bytes();
+ ///
+ /// assert_eq!(m, Saturating(0b01010101_00000000));
+ /// assert_eq!(m, Saturating(21760));
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn swap_bytes(self) -> Self {
+ Saturating(self.0.swap_bytes())
+ }
+
+ /// Reverses the bit pattern of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `i16` is used here.
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ /// let n = Saturating(0b0000000_01010101i16);
+ /// assert_eq!(n, Saturating(85));
+ ///
+ /// let m = n.reverse_bits();
+ ///
+ /// assert_eq!(m.0 as u16, 0b10101010_00000000);
+ /// assert_eq!(m, Saturating(-22016));
+ /// ```
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[rustc_const_unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn reverse_bits(self) -> Self {
+ Saturating(self.0.reverse_bits())
+ }
+
+ /// Converts an integer from big endian to the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let n = Saturating(0x1A", stringify!($t), ");")]
+ ///
+ /// if cfg!(target_endian = "big") {
+ #[doc = concat!(" assert_eq!(<Saturating<", stringify!($t), ">>::from_be(n), n)")]
+ /// } else {
+ #[doc = concat!(" assert_eq!(<Saturating<", stringify!($t), ">>::from_be(n), n.swap_bytes())")]
+ /// }
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn from_be(x: Self) -> Self {
+ Saturating(<$t>::from_be(x.0))
+ }
+
+ /// Converts an integer from little endian to the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let n = Saturating(0x1A", stringify!($t), ");")]
+ ///
+ /// if cfg!(target_endian = "little") {
+ #[doc = concat!(" assert_eq!(<Saturating<", stringify!($t), ">>::from_le(n), n)")]
+ /// } else {
+ #[doc = concat!(" assert_eq!(<Saturating<", stringify!($t), ">>::from_le(n), n.swap_bytes())")]
+ /// }
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn from_le(x: Self) -> Self {
+ Saturating(<$t>::from_le(x.0))
+ }
+
+ /// Converts `self` to big endian from the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let n = Saturating(0x1A", stringify!($t), ");")]
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(n.to_be(), n)
+ /// } else {
+ /// assert_eq!(n.to_be(), n.swap_bytes())
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn to_be(self) -> Self {
+ Saturating(self.0.to_be())
+ }
+
+ /// Converts `self` to little endian from the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let n = Saturating(0x1A", stringify!($t), ");")]
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(n.to_le(), n)
+ /// } else {
+ /// assert_eq!(n.to_le(), n.swap_bytes())
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn to_le(self) -> Self {
+ Saturating(self.0.to_le())
+ }
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert_eq!(Saturating(3", stringify!($t), ").pow(4), Saturating(81));")]
+ /// ```
+ ///
+ /// Results that are too large are saturated:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ /// assert_eq!(Saturating(3i8).pow(5), Saturating(127));
+ /// assert_eq!(Saturating(3i8).pow(6), Saturating(127));
+ /// ```
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub fn pow(self, exp: u32) -> Self {
+ Saturating(self.0.saturating_pow(exp))
+ }
+ }
+ )*)
+}
+
+saturating_int_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+macro_rules! saturating_int_impl_signed {
+ ($($t:ty)*) => ($(
+ impl Saturating<$t> {
+ /// Returns the number of leading zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let n = Saturating(", stringify!($t), "::MAX >> 2);")]
+ ///
+ /// assert_eq!(n.leading_zeros(), 3);
+ /// ```
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn leading_zeros(self) -> u32 {
+ self.0.leading_zeros()
+ }
+
+ /// Saturating absolute value. Computes `self.abs()`, returning `MAX` if `self == MIN`
+ /// instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert_eq!(Saturating(100", stringify!($t), ").abs(), Saturating(100));")]
+ #[doc = concat!("assert_eq!(Saturating(-100", stringify!($t), ").abs(), Saturating(100));")]
+ #[doc = concat!("assert_eq!(Saturating(", stringify!($t), "::MIN).abs(), Saturating((", stringify!($t), "::MIN + 1).abs()));")]
+ #[doc = concat!("assert_eq!(Saturating(", stringify!($t), "::MIN).abs(), Saturating(", stringify!($t), "::MIN.saturating_abs()));")]
+ #[doc = concat!("assert_eq!(Saturating(", stringify!($t), "::MIN).abs(), Saturating(", stringify!($t), "::MAX));")]
+ /// ```
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub fn abs(self) -> Saturating<$t> {
+ Saturating(self.0.saturating_abs())
+ }
+
+ /// Returns a number representing sign of `self`.
+ ///
+ /// - `0` if the number is zero
+ /// - `1` if the number is positive
+ /// - `-1` if the number is negative
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert_eq!(Saturating(10", stringify!($t), ").signum(), Saturating(1));")]
+ #[doc = concat!("assert_eq!(Saturating(0", stringify!($t), ").signum(), Saturating(0));")]
+ #[doc = concat!("assert_eq!(Saturating(-10", stringify!($t), ").signum(), Saturating(-1));")]
+ /// ```
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub fn signum(self) -> Saturating<$t> {
+ Saturating(self.0.signum())
+ }
+
+ /// Returns `true` if `self` is positive and `false` if the number is zero or
+ /// negative.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert!(Saturating(10", stringify!($t), ").is_positive());")]
+ #[doc = concat!("assert!(!Saturating(-10", stringify!($t), ").is_positive());")]
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn is_positive(self) -> bool {
+ self.0.is_positive()
+ }
+
+ /// Returns `true` if `self` is negative and `false` if the number is zero or
+ /// positive.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert!(Saturating(-10", stringify!($t), ").is_negative());")]
+ #[doc = concat!("assert!(!Saturating(10", stringify!($t), ").is_negative());")]
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub const fn is_negative(self) -> bool {
+ self.0.is_negative()
+ }
+ }
+
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ impl Neg for Saturating<$t> {
+ type Output = Self;
+ #[inline]
+ fn neg(self) -> Self {
+ Saturating(self.0.saturating_neg())
+ }
+ }
+ forward_ref_unop! { impl Neg, neg for Saturating<$t>,
+ #[unstable(feature = "saturating_int_impl", issue = "87920")] }
+ )*)
+}
+
+saturating_int_impl_signed! { isize i8 i16 i32 i64 i128 }
+
+macro_rules! saturating_int_impl_unsigned {
+ ($($t:ty)*) => ($(
+ impl Saturating<$t> {
+ /// Returns the number of leading zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("let n = Saturating(", stringify!($t), "::MAX >> 2);")]
+ ///
+ /// assert_eq!(n.leading_zeros(), 2);
+ /// ```
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn leading_zeros(self) -> u32 {
+ self.0.leading_zeros()
+ }
+
+ /// Returns `true` if and only if `self == 2^k` for some `k`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(saturating_int_impl)]
+ /// use std::num::Saturating;
+ ///
+ #[doc = concat!("assert!(Saturating(16", stringify!($t), ").is_power_of_two());")]
+ #[doc = concat!("assert!(!Saturating(10", stringify!($t), ").is_power_of_two());")]
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "saturating_int_impl", issue = "87920")]
+ pub fn is_power_of_two(self) -> bool {
+ self.0.is_power_of_two()
+ }
+
+ }
+ )*)
+}
+
+saturating_int_impl_unsigned! { usize u8 u16 u32 u64 u128 }
+
+// Related to potential Shl and ShlAssign implementation
+//
+// mod shift_max {
+// #![allow(non_upper_case_globals)]
+//
+// #[cfg(target_pointer_width = "16")]
+// mod platform {
+// pub const usize: u32 = super::u16;
+// pub const isize: u32 = super::i16;
+// }
+//
+// #[cfg(target_pointer_width = "32")]
+// mod platform {
+// pub const usize: u32 = super::u32;
+// pub const isize: u32 = super::i32;
+// }
+//
+// #[cfg(target_pointer_width = "64")]
+// mod platform {
+// pub const usize: u32 = super::u64;
+// pub const isize: u32 = super::i64;
+// }
+//
+// pub const i8: u32 = (1 << 3) - 1;
+// pub const i16: u32 = (1 << 4) - 1;
+// pub const i32: u32 = (1 << 5) - 1;
+// pub const i64: u32 = (1 << 6) - 1;
+// pub const i128: u32 = (1 << 7) - 1;
+// pub use self::platform::isize;
+//
+// pub const u8: u32 = i8;
+// pub const u16: u32 = i16;
+// pub const u32: u32 = i32;
+// pub const u64: u32 = i64;
+// pub const u128: u32 = i128;
+// pub use self::platform::usize;
+// }
diff --git a/library/core/src/num/shells/i128.rs b/library/core/src/num/shells/i128.rs
new file mode 100644
index 000000000..7b048dc52
--- /dev/null
+++ b/library/core/src/num/shells/i128.rs
@@ -0,0 +1,13 @@
+//! Constants for the 128-bit signed integer type.
+//!
+//! *[See also the `i128` primitive type][i128].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "i128", since = "1.26.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `i128`"
+)]
+
+int_module! { i128, #[stable(feature = "i128", since="1.26.0")] }
diff --git a/library/core/src/num/shells/i16.rs b/library/core/src/num/shells/i16.rs
new file mode 100644
index 000000000..5c5812d5c
--- /dev/null
+++ b/library/core/src/num/shells/i16.rs
@@ -0,0 +1,13 @@
+//! Constants for the 16-bit signed integer type.
+//!
+//! *[See also the `i16` primitive type][i16].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `i16`"
+)]
+
+int_module! { i16 }
diff --git a/library/core/src/num/shells/i32.rs b/library/core/src/num/shells/i32.rs
new file mode 100644
index 000000000..b283ac644
--- /dev/null
+++ b/library/core/src/num/shells/i32.rs
@@ -0,0 +1,13 @@
+//! Constants for the 32-bit signed integer type.
+//!
+//! *[See also the `i32` primitive type][i32].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `i32`"
+)]
+
+int_module! { i32 }
diff --git a/library/core/src/num/shells/i64.rs b/library/core/src/num/shells/i64.rs
new file mode 100644
index 000000000..a416fa7e9
--- /dev/null
+++ b/library/core/src/num/shells/i64.rs
@@ -0,0 +1,13 @@
+//! Constants for the 64-bit signed integer type.
+//!
+//! *[See also the `i64` primitive type][i64].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `i64`"
+)]
+
+int_module! { i64 }
diff --git a/library/core/src/num/shells/i8.rs b/library/core/src/num/shells/i8.rs
new file mode 100644
index 000000000..02465013a
--- /dev/null
+++ b/library/core/src/num/shells/i8.rs
@@ -0,0 +1,13 @@
+//! Constants for the 8-bit signed integer type.
+//!
+//! *[See also the `i8` primitive type][i8].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `i8`"
+)]
+
+int_module! { i8 }
diff --git a/library/core/src/num/shells/int_macros.rs b/library/core/src/num/shells/int_macros.rs
new file mode 100644
index 000000000..2b1133e11
--- /dev/null
+++ b/library/core/src/num/shells/int_macros.rs
@@ -0,0 +1,44 @@
+#![doc(hidden)]
+
+macro_rules! int_module {
+ ($T:ident) => (int_module!($T, #[stable(feature = "rust1", since = "1.0.0")]););
+ ($T:ident, #[$attr:meta]) => (
+ #[doc = concat!(
+ "The smallest value that can be represented by this integer type. Use ",
+ "[`", stringify!($T), "::MIN", "`] instead."
+ )]
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// // deprecated way
+ #[doc = concat!("let min = std::", stringify!($T), "::MIN;")]
+ ///
+ /// // intended way
+ #[doc = concat!("let min = ", stringify!($T), "::MIN;")]
+ /// ```
+ ///
+ #[$attr]
+ #[deprecated(since = "TBD", note = "replaced by the `MIN` associated constant on this type")]
+ pub const MIN: $T = $T::MIN;
+
+ #[doc = concat!(
+ "The largest value that can be represented by this integer type. Use ",
+ "[`", stringify!($T), "::MAX", "`] instead."
+ )]
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// // deprecated way
+ #[doc = concat!("let max = std::", stringify!($T), "::MAX;")]
+ ///
+ /// // intended way
+ #[doc = concat!("let max = ", stringify!($T), "::MAX;")]
+ /// ```
+ ///
+ #[$attr]
+ #[deprecated(since = "TBD", note = "replaced by the `MAX` associated constant on this type")]
+ pub const MAX: $T = $T::MAX;
+ )
+}
diff --git a/library/core/src/num/shells/isize.rs b/library/core/src/num/shells/isize.rs
new file mode 100644
index 000000000..1579fbab6
--- /dev/null
+++ b/library/core/src/num/shells/isize.rs
@@ -0,0 +1,13 @@
+//! Constants for the pointer-sized signed integer type.
+//!
+//! *[See also the `isize` primitive type][isize].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `isize`"
+)]
+
+int_module! { isize }
diff --git a/library/core/src/num/shells/u128.rs b/library/core/src/num/shells/u128.rs
new file mode 100644
index 000000000..fe08cee58
--- /dev/null
+++ b/library/core/src/num/shells/u128.rs
@@ -0,0 +1,13 @@
+//! Constants for the 128-bit unsigned integer type.
+//!
+//! *[See also the `u128` primitive type][u128].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "i128", since = "1.26.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `u128`"
+)]
+
+int_module! { u128, #[stable(feature = "i128", since="1.26.0")] }
diff --git a/library/core/src/num/shells/u16.rs b/library/core/src/num/shells/u16.rs
new file mode 100644
index 000000000..36f8c6978
--- /dev/null
+++ b/library/core/src/num/shells/u16.rs
@@ -0,0 +1,13 @@
+//! Constants for the 16-bit unsigned integer type.
+//!
+//! *[See also the `u16` primitive type][u16].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `u16`"
+)]
+
+int_module! { u16 }
diff --git a/library/core/src/num/shells/u32.rs b/library/core/src/num/shells/u32.rs
new file mode 100644
index 000000000..1c369097d
--- /dev/null
+++ b/library/core/src/num/shells/u32.rs
@@ -0,0 +1,13 @@
+//! Constants for the 32-bit unsigned integer type.
+//!
+//! *[See also the `u32` primitive type][u32].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `u32`"
+)]
+
+int_module! { u32 }
diff --git a/library/core/src/num/shells/u64.rs b/library/core/src/num/shells/u64.rs
new file mode 100644
index 000000000..e8b691d15
--- /dev/null
+++ b/library/core/src/num/shells/u64.rs
@@ -0,0 +1,13 @@
+//! Constants for the 64-bit unsigned integer type.
+//!
+//! *[See also the `u64` primitive type][u64].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `u64`"
+)]
+
+int_module! { u64 }
diff --git a/library/core/src/num/shells/u8.rs b/library/core/src/num/shells/u8.rs
new file mode 100644
index 000000000..817c6a18a
--- /dev/null
+++ b/library/core/src/num/shells/u8.rs
@@ -0,0 +1,13 @@
+//! Constants for the 8-bit unsigned integer type.
+//!
+//! *[See also the `u8` primitive type][u8].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `u8`"
+)]
+
+int_module! { u8 }
diff --git a/library/core/src/num/shells/usize.rs b/library/core/src/num/shells/usize.rs
new file mode 100644
index 000000000..3e1bec5ec
--- /dev/null
+++ b/library/core/src/num/shells/usize.rs
@@ -0,0 +1,13 @@
+//! Constants for the pointer-sized unsigned integer type.
+//!
+//! *[See also the `usize` primitive type][usize].*
+//!
+//! New code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deprecated(
+ since = "TBD",
+ note = "all constants in this module replaced by associated constants on `usize`"
+)]
+
+int_module! { usize }
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
new file mode 100644
index 000000000..733655442
--- /dev/null
+++ b/library/core/src/num/uint_macros.rs
@@ -0,0 +1,2454 @@
+macro_rules! uint_impl {
+ ($SelfT:ty, $ActualT:ident, $SignedT:ident, $NonZeroT:ident,
+ $BITS:expr, $MaxV:expr,
+ $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr,
+ $reversed:expr, $le_bytes:expr, $be_bytes:expr,
+ $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr,
+ $bound_condition:expr) => {
+ /// The smallest value that can be represented by this integer type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN, 0);")]
+ /// ```
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN: Self = 0;
+
+ /// The largest value that can be represented by this integer type
+ #[doc = concat!("(2<sup>", $BITS, "</sup> &minus; 1", $bound_condition, ")")]
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX, ", stringify!($MaxV), ");")]
+ /// ```
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX: Self = !0;
+
+ /// The size of this integer type in bits.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");")]
+ /// ```
+ #[stable(feature = "int_bits_const", since = "1.53.0")]
+ pub const BITS: u32 = $BITS;
+
+ /// Converts a string slice in a given base to an integer.
+ ///
+ /// The string is expected to be an optional `+` sign
+ /// followed by digits.
+ /// Leading and trailing whitespace represent an error.
+ /// Digits are a subset of these characters, depending on `radix`:
+ ///
+ /// * `0-9`
+ /// * `a-z`
+ /// * `A-Z`
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `radix` is not in the range from 2 to 36.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::from_str_radix(\"A\", 16), Ok(10));")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError> {
+ from_str_radix(src, radix)
+ }
+
+ /// Returns the number of ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0b01001100", stringify!($SelfT), ";")]
+ ///
+ /// assert_eq!(n.count_ones(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[doc(alias = "popcount")]
+ #[doc(alias = "popcnt")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn count_ones(self) -> u32 {
+ intrinsics::ctpop(self as $ActualT) as u32
+ }
+
+ /// Returns the number of zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.count_zeros(), 0);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn count_zeros(self) -> u32 {
+ (!self).count_ones()
+ }
+
+ /// Returns the number of leading zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", stringify!($SelfT), "::MAX >> 2;")]
+ ///
+ /// assert_eq!(n.leading_zeros(), 2);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn leading_zeros(self) -> u32 {
+ intrinsics::ctlz(self as $ActualT) as u32
+ }
+
+ /// Returns the number of trailing zeros in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0b0101000", stringify!($SelfT), ";")]
+ ///
+ /// assert_eq!(n.trailing_zeros(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn trailing_zeros(self) -> u32 {
+ intrinsics::cttz(self) as u32
+ }
+
+ /// Returns the number of leading ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = !(", stringify!($SelfT), "::MAX >> 2);")]
+ ///
+ /// assert_eq!(n.leading_ones(), 2);
+ /// ```
+ #[stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[rustc_const_stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn leading_ones(self) -> u32 {
+ (!self).leading_zeros()
+ }
+
+ /// Returns the number of trailing ones in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0b1010111", stringify!($SelfT), ";")]
+ ///
+ /// assert_eq!(n.trailing_ones(), 3);
+ /// ```
+ #[stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[rustc_const_stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn trailing_ones(self) -> u32 {
+ (!self).trailing_zeros()
+ }
+
+ /// Shifts the bits to the left by a specified amount, `n`,
+ /// wrapping the truncated bits to the end of the resulting integer.
+ ///
+ /// Please note this isn't the same operation as the `<<` shifting operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", $rot_op, stringify!($SelfT), ";")]
+ #[doc = concat!("let m = ", $rot_result, ";")]
+ ///
+ #[doc = concat!("assert_eq!(n.rotate_left(", $rot, "), m);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn rotate_left(self, n: u32) -> Self {
+ intrinsics::rotate_left(self, n as $SelfT)
+ }
+
+ /// Shifts the bits to the right by a specified amount, `n`,
+ /// wrapping the truncated bits to the beginning of the resulting
+ /// integer.
+ ///
+ /// Please note this isn't the same operation as the `>>` shifting operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", $rot_result, stringify!($SelfT), ";")]
+ #[doc = concat!("let m = ", $rot_op, ";")]
+ ///
+ #[doc = concat!("assert_eq!(n.rotate_right(", $rot, "), m);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn rotate_right(self, n: u32) -> Self {
+ intrinsics::rotate_right(self, n as $SelfT)
+ }
+
+ /// Reverses the byte order of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", $swap_op, stringify!($SelfT), ";")]
+ /// let m = n.swap_bytes();
+ ///
+ #[doc = concat!("assert_eq!(m, ", $swapped, ");")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn swap_bytes(self) -> Self {
+ intrinsics::bswap(self as $ActualT) as Self
+ }
+
+ /// Reverses the order of bits in the integer. The least significant bit becomes the most significant bit,
+ /// second least-significant bit becomes second most-significant bit, etc.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = ", $swap_op, stringify!($SelfT), ";")]
+ /// let m = n.reverse_bits();
+ ///
+ #[doc = concat!("assert_eq!(m, ", $reversed, ");")]
+ #[doc = concat!("assert_eq!(0, 0", stringify!($SelfT), ".reverse_bits());")]
+ /// ```
+ #[stable(feature = "reverse_bits", since = "1.37.0")]
+ #[rustc_const_stable(feature = "reverse_bits", since = "1.37.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn reverse_bits(self) -> Self {
+ intrinsics::bitreverse(self as $ActualT) as Self
+ }
+
+ /// Converts an integer from big endian to the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0x1A", stringify!($SelfT), ";")]
+ ///
+ /// if cfg!(target_endian = "big") {
+ #[doc = concat!(" assert_eq!(", stringify!($SelfT), "::from_be(n), n)")]
+ /// } else {
+ #[doc = concat!(" assert_eq!(", stringify!($SelfT), "::from_be(n), n.swap_bytes())")]
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use]
+ #[inline(always)]
+ pub const fn from_be(x: Self) -> Self {
+ #[cfg(target_endian = "big")]
+ {
+ x
+ }
+ #[cfg(not(target_endian = "big"))]
+ {
+ x.swap_bytes()
+ }
+ }
+
+ /// Converts an integer from little endian to the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0x1A", stringify!($SelfT), ";")]
+ ///
+ /// if cfg!(target_endian = "little") {
+ #[doc = concat!(" assert_eq!(", stringify!($SelfT), "::from_le(n), n)")]
+ /// } else {
+ #[doc = concat!(" assert_eq!(", stringify!($SelfT), "::from_le(n), n.swap_bytes())")]
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use]
+ #[inline(always)]
+ pub const fn from_le(x: Self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ x
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ x.swap_bytes()
+ }
+ }
+
+ /// Converts `self` to big endian from the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0x1A", stringify!($SelfT), ";")]
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(n.to_be(), n)
+ /// } else {
+ /// assert_eq!(n.to_be(), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn to_be(self) -> Self { // or not to be?
+ #[cfg(target_endian = "big")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "big"))]
+ {
+ self.swap_bytes()
+ }
+ }
+
+ /// Converts `self` to little endian from the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("let n = 0x1A", stringify!($SelfT), ";")]
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(n.to_le(), n)
+ /// } else {
+ /// assert_eq!(n.to_le(), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn to_le(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ self.swap_bytes()
+ }
+ }
+
+ /// Checked integer addition. Computes `self + rhs`, returning `None`
+ /// if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!(
+ "assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(1), ",
+ "Some(", stringify!($SelfT), "::MAX - 1));"
+ )]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_add(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked integer addition. Computes `self + rhs`, assuming overflow
+ /// cannot occur.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior when
+ #[doc = concat!("`self + rhs > ", stringify!($SelfT), "::MAX` or `self + rhs < ", stringify!($SelfT), "::MIN`,")]
+ /// i.e. when [`checked_add`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_add`]: ", stringify!($SelfT), "::checked_add")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_add`.
+ unsafe { intrinsics::unchecked_add(self, rhs) }
+ }
+
+ /// Checked addition with a signed integer. Computes `self + rhs`,
+ /// returning `None` if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_signed(2), Some(3));")]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_signed(-2), None);")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add_signed(3), None);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_add_signed(self, rhs: $SignedT) -> Option<Self> {
+ let (a, b) = self.overflowing_add_signed(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Checked integer subtraction. Computes `self - rhs`, returning
+ /// `None` if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_sub(1), Some(0));")]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".checked_sub(1), None);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_sub(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked integer subtraction. Computes `self - rhs`, assuming overflow
+ /// cannot occur.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior when
+ #[doc = concat!("`self - rhs > ", stringify!($SelfT), "::MAX` or `self - rhs < ", stringify!($SelfT), "::MIN`,")]
+ /// i.e. when [`checked_sub`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_sub`]: ", stringify!($SelfT), "::checked_sub")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_sub`.
+ unsafe { intrinsics::unchecked_sub(self, rhs) }
+ }
+
+ /// Checked integer multiplication. Computes `self * rhs`, returning
+ /// `None` if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_mul(1), Some(5));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_mul(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked integer multiplication. Computes `self * rhs`, assuming overflow
+ /// cannot occur.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior when
+ #[doc = concat!("`self * rhs > ", stringify!($SelfT), "::MAX` or `self * rhs < ", stringify!($SelfT), "::MIN`,")]
+ /// i.e. when [`checked_mul`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_mul`]: ", stringify!($SelfT), "::checked_mul")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_mul(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_mul`.
+ unsafe { intrinsics::unchecked_mul(self, rhs) }
+ }
+
+ /// Checked integer division. Computes `self / rhs`, returning `None`
+ /// if `rhs == 0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(128", stringify!($SelfT), ".checked_div(2), Some(64));")]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_div(0), None);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_div", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_div(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0) {
+ None
+ } else {
+ // SAFETY: div by zero has been checked above and unsigned types have no other
+ // failure modes for division
+ Some(unsafe { intrinsics::unchecked_div(self, rhs) })
+ }
+ }
+
+ /// Checked Euclidean division. Computes `self.div_euclid(rhs)`, returning `None`
+ /// if `rhs == 0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(128", stringify!($SelfT), ".checked_div_euclid(2), Some(64));")]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_div_euclid(0), None);")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_div_euclid(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0) {
+ None
+ } else {
+ Some(self.div_euclid(rhs))
+ }
+ }
+
+
+ /// Checked integer remainder. Computes `self % rhs`, returning `None`
+ /// if `rhs == 0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem(2), Some(1));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_div", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_rem(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0) {
+ None
+ } else {
+ // SAFETY: div by zero has been checked above and unsigned types have no other
+ // failure modes for division
+ Some(unsafe { intrinsics::unchecked_rem(self, rhs) })
+ }
+ }
+
+ /// Checked Euclidean modulo. Computes `self.rem_euclid(rhs)`, returning `None`
+ /// if `rhs == 0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(2), Some(1));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(0), None);")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_rem_euclid(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0) {
+ None
+ } else {
+ Some(self.rem_euclid(rhs))
+ }
+ }
+
+ /// Returns the logarithm of the number with respect to an arbitrary base,
+ /// rounded down.
+ ///
+ /// This method might not be optimized owing to implementation details;
+ /// `log2` can produce results more efficiently for base 2, and `log10`
+ /// can produce results more efficiently for base 10.
+ ///
+ /// # Panics
+ ///
+ /// When the number is zero, or if the base is not at least 2;
+ /// it panics in debug mode and the return value is 0 in release mode.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".log(5), 1);")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[track_caller]
+ #[rustc_inherit_overflow_checks]
+ #[allow(arithmetic_overflow)]
+ pub const fn log(self, base: Self) -> u32 {
+ match self.checked_log(base) {
+ Some(n) => n,
+ None => {
+ // In debug builds, trigger a panic on None.
+ // This should optimize completely out in release builds.
+ let _ = Self::MAX + 1;
+
+ 0
+ },
+ }
+ }
+
+ /// Returns the base 2 logarithm of the number, rounded down.
+ ///
+ /// # Panics
+ ///
+ /// When the number is zero it panics in debug mode and
+ /// the return value is 0 in release mode.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".log2(), 1);")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[track_caller]
+ #[rustc_inherit_overflow_checks]
+ #[allow(arithmetic_overflow)]
+ pub const fn log2(self) -> u32 {
+ match self.checked_log2() {
+ Some(n) => n,
+ None => {
+ // In debug builds, trigger a panic on None.
+ // This should optimize completely out in release builds.
+ let _ = Self::MAX + 1;
+
+ 0
+ },
+ }
+ }
+
+ /// Returns the base 10 logarithm of the number, rounded down.
+ ///
+ /// # Panics
+ ///
+ /// When the number is zero it panics in debug mode and the
+ /// return value is 0 in release mode.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".log10(), 1);")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[track_caller]
+ #[rustc_inherit_overflow_checks]
+ #[allow(arithmetic_overflow)]
+ pub const fn log10(self) -> u32 {
+ match self.checked_log10() {
+ Some(n) => n,
+ None => {
+ // In debug builds, trigger a panic on None.
+ // This should optimize completely out in release builds.
+ let _ = Self::MAX + 1;
+
+ 0
+ },
+ }
+ }
+
+ /// Returns the logarithm of the number with respect to an arbitrary base,
+ /// rounded down.
+ ///
+ /// Returns `None` if the number is zero, or if the base is not at least 2.
+ ///
+ /// This method might not be optimized owing to implementation details;
+ /// `checked_log2` can produce results more efficiently for base 2, and
+ /// `checked_log10` can produce results more efficiently for base 10.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_log(5), Some(1));")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_log(self, base: Self) -> Option<u32> {
+ if self <= 0 || base <= 1 {
+ None
+ } else {
+ let mut n = 0;
+ let mut r = self;
+
+ // Optimization for 128 bit wide integers.
+ if Self::BITS == 128 {
+ let b = Self::log2(self) / (Self::log2(base) + 1);
+ n += b;
+ r /= base.pow(b as u32);
+ }
+
+ while r >= base {
+ r /= base;
+ n += 1;
+ }
+ Some(n)
+ }
+ }
+
+ /// Returns the base 2 logarithm of the number, rounded down.
+ ///
+ /// Returns `None` if the number is zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_log2(), Some(1));")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_log2(self) -> Option<u32> {
+ if let Some(x) = <$NonZeroT>::new(self) {
+ Some(x.log2())
+ } else {
+ None
+ }
+ }
+
+ /// Returns the base 10 logarithm of the number, rounded down.
+ ///
+ /// Returns `None` if the number is zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_log)]
+ #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_log10(), Some(1));")]
+ /// ```
+ #[unstable(feature = "int_log", issue = "70887")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_log10(self) -> Option<u32> {
+ if let Some(x) = <$NonZeroT>::new(self) {
+ Some(x.log10())
+ } else {
+ None
+ }
+ }
+
+ /// Checked negation. Computes `-self`, returning `None` unless `self ==
+ /// 0`.
+ ///
+ /// Note that negating any positive integer will overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".checked_neg(), Some(0));")]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_neg(), None);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_neg(self) -> Option<Self> {
+ let (a, b) = self.overflowing_neg();
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Checked shift left. Computes `self << rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0x1", stringify!($SelfT), ".checked_shl(4), Some(0x10));")]
+ #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".checked_shl(129), None);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shl(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked shift left. Computes `self << rhs`, assuming that
+ /// `rhs` is less than the number of bits in `self`.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior if `rhs` is larger than
+ /// or equal to the number of bits in `self`,
+ /// i.e. when [`checked_shl`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_shl`]: ", stringify!($SelfT), "::checked_shl")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_shl`.
+ unsafe { intrinsics::unchecked_shl(self, rhs) }
+ }
+
+ /// Checked shift right. Computes `self >> rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".checked_shr(4), Some(0x1));")]
+ #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".checked_shr(129), None);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shr(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+
+ /// Unchecked shift right. Computes `self >> rhs`, assuming that
+ /// `rhs` is less than the number of bits in `self`.
+ ///
+ /// # Safety
+ ///
+ /// This results in undefined behavior if `rhs` is larger than
+ /// or equal to the number of bits in `self`,
+ /// i.e. when [`checked_shr`] would return `None`.
+ ///
+ #[doc = concat!("[`checked_shr`]: ", stringify!($SelfT), "::checked_shr")]
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "85122",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_shr`.
+ unsafe { intrinsics::unchecked_shr(self, rhs) }
+ }
+
+ /// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
+ /// overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_pow(5), Some(32));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_pow(2), None);")]
+ /// ```
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
+ if exp == 0 {
+ return Some(1);
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = try_opt!(acc.checked_mul(base));
+ }
+ exp /= 2;
+ base = try_opt!(base.checked_mul(base));
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+
+ Some(try_opt!(acc.checked_mul(base)))
+ }
+
+ /// Saturating integer addition. Computes `self + rhs`, saturating at
+ /// the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_add(1), 101);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_add(127), ", stringify!($SelfT), "::MAX);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[inline(always)]
+ pub const fn saturating_add(self, rhs: Self) -> Self {
+ intrinsics::saturating_add(self, rhs)
+ }
+
+ /// Saturating addition with a signed integer. Computes `self + rhs`,
+ /// saturating at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_signed(2), 3);")]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_signed(-2), 0);")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).saturating_add_signed(4), ", stringify!($SelfT), "::MAX);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_add_signed(self, rhs: $SignedT) -> Self {
+ let (res, overflow) = self.overflowing_add(rhs as Self);
+ if overflow == (rhs < 0) {
+ res
+ } else if overflow {
+ Self::MAX
+ } else {
+ 0
+ }
+ }
+
+ /// Saturating integer subtraction. Computes `self - rhs`, saturating
+ /// at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_sub(27), 73);")]
+ #[doc = concat!("assert_eq!(13", stringify!($SelfT), ".saturating_sub(127), 0);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[inline(always)]
+ pub const fn saturating_sub(self, rhs: Self) -> Self {
+ intrinsics::saturating_sub(self, rhs)
+ }
+
+ /// Saturating integer multiplication. Computes `self * rhs`,
+ /// saturating at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".saturating_mul(10), 20);")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX).saturating_mul(10), ", stringify!($SelfT),"::MAX);")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_mul(self, rhs: Self) -> Self {
+ match self.checked_mul(rhs) {
+ Some(x) => x,
+ None => Self::MAX,
+ }
+ }
+
+ /// Saturating integer division. Computes `self / rhs`, saturating at the
+ /// numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".saturating_div(2), 2);")]
+ ///
+ /// ```
+ ///
+ /// ```should_panic
+ #[doc = concat!("let _ = 1", stringify!($SelfT), ".saturating_div(0);")]
+ ///
+ /// ```
+ #[stable(feature = "saturating_div", since = "1.58.0")]
+ #[rustc_const_stable(feature = "saturating_div", since = "1.58.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_div(self, rhs: Self) -> Self {
+ // on unsigned types, there is no overflow in integer division
+ self.wrapping_div(rhs)
+ }
+
+ /// Saturating integer exponentiation. Computes `self.pow(exp)`,
+ /// saturating at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(4", stringify!($SelfT), ".saturating_pow(3), 64);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_pow(2), ", stringify!($SelfT), "::MAX);")]
+ /// ```
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_pow(self, exp: u32) -> Self {
+ match self.checked_pow(exp) {
+ Some(x) => x,
+ None => Self::MAX,
+ }
+ }
+
+ /// Wrapping (modular) addition. Computes `self + rhs`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(200", stringify!($SelfT), ".wrapping_add(55), 255);")]
+ #[doc = concat!("assert_eq!(200", stringify!($SelfT), ".wrapping_add(", stringify!($SelfT), "::MAX), 199);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_add(self, rhs: Self) -> Self {
+ intrinsics::wrapping_add(self, rhs)
+ }
+
+ /// Wrapping (modular) addition with a signed integer. Computes
+ /// `self + rhs`, wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_add_signed(2), 3);")]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_add_signed(-2), ", stringify!($SelfT), "::MAX);")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).wrapping_add_signed(4), 1);")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
+ self.wrapping_add(rhs as Self)
+ }
+
+ /// Wrapping (modular) subtraction. Computes `self - rhs`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_sub(100), 0);")]
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_sub(", stringify!($SelfT), "::MAX), 101);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_sub(self, rhs: Self) -> Self {
+ intrinsics::wrapping_sub(self, rhs)
+ }
+
+ /// Wrapping (modular) multiplication. Computes `self *
+ /// rhs`, wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `u8` is used here.
+ ///
+ /// ```
+ /// assert_eq!(10u8.wrapping_mul(12), 120);
+ /// assert_eq!(25u8.wrapping_mul(12), 44);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_mul(self, rhs: Self) -> Self {
+ intrinsics::wrapping_mul(self, rhs)
+ }
+
+ /// Wrapping (modular) division. Computes `self / rhs`.
+ /// Wrapped division on unsigned types is just normal division.
+ /// There's no way wrapping could ever happen.
+ /// This function exists, so that all operations
+ /// are accounted for in the wrapping operations.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_div(10), 10);")]
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_div(self, rhs: Self) -> Self {
+ self / rhs
+ }
+
+ /// Wrapping Euclidean division. Computes `self.div_euclid(rhs)`.
+ /// Wrapped division on unsigned types is just normal division.
+ /// There's no way wrapping could ever happen.
+ /// This function exists, so that all operations
+ /// are accounted for in the wrapping operations.
+ /// Since, for the positive integers, all common
+ /// definitions of division are equal, this
+ /// is exactly equal to `self.wrapping_div(rhs)`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_div_euclid(10), 10);")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_div_euclid(self, rhs: Self) -> Self {
+ self / rhs
+ }
+
+ /// Wrapping (modular) remainder. Computes `self % rhs`.
+ /// Wrapped remainder calculation on unsigned types is
+ /// just the regular remainder calculation.
+ /// There's no way wrapping could ever happen.
+ /// This function exists, so that all operations
+ /// are accounted for in the wrapping operations.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_rem(10), 0);")]
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+
+ /// Wrapping Euclidean modulo. Computes `self.rem_euclid(rhs)`.
+ /// Wrapped modulo calculation on unsigned types is
+ /// just the regular remainder calculation.
+ /// There's no way wrapping could ever happen.
+ /// This function exists, so that all operations
+ /// are accounted for in the wrapping operations.
+ /// Since, for the positive integers, all common
+ /// definitions of division are equal, this
+ /// is exactly equal to `self.wrapping_rem(rhs)`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_rem_euclid(10), 0);")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_rem_euclid(self, rhs: Self) -> Self {
+ self % rhs
+ }
+
+ /// Wrapping (modular) negation. Computes `-self`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// Since unsigned types do not have negative equivalents
+ /// all applications of this function will wrap (except for `-0`).
+ /// For values smaller than the corresponding signed type's maximum
+ /// the result is the same as casting the corresponding signed value.
+ /// Any larger values are equivalent to `MAX + 1 - (val - MAX - 1)` where
+ /// `MAX` is the corresponding signed type's maximum.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `i8` is used here.
+ ///
+ /// ```
+ /// assert_eq!(100i8.wrapping_neg(), -100);
+ /// assert_eq!((-128i8).wrapping_neg(), -128);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_neg(self) -> Self {
+ (0 as $SelfT).wrapping_sub(self)
+ }
+
+ /// Panic-free bitwise shift-left; yields `self << mask(rhs)`,
+ /// where `mask` removes any high-order bits of `rhs` that
+ /// would cause the shift to exceed the bitwidth of the type.
+ ///
+ /// Note that this is *not* the same as a rotate-left; the
+ /// RHS of a wrapping shift-left is restricted to the range
+ /// of the type, rather than the bits shifted out of the LHS
+ /// being returned to the other end. The primitive integer
+ /// types all implement a [`rotate_left`](Self::rotate_left) function,
+ /// which may be what you want instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_shl(7), 128);")]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_shl(128), 1);")]
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_shl(self, rhs: u32) -> Self {
+ // SAFETY: the masking by the bitsize of the type ensures that we do not shift
+ // out of bounds
+ unsafe {
+ intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
+ }
+ }
+
+ /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`,
+ /// where `mask` removes any high-order bits of `rhs` that
+ /// would cause the shift to exceed the bitwidth of the type.
+ ///
+ /// Note that this is *not* the same as a rotate-right; the
+ /// RHS of a wrapping shift-right is restricted to the range
+ /// of the type, rather than the bits shifted out of the LHS
+ /// being returned to the other end. The primitive integer
+ /// types all implement a [`rotate_right`](Self::rotate_right) function,
+ /// which may be what you want instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(128", stringify!($SelfT), ".wrapping_shr(7), 1);")]
+ #[doc = concat!("assert_eq!(128", stringify!($SelfT), ".wrapping_shr(128), 128);")]
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn wrapping_shr(self, rhs: u32) -> Self {
+ // SAFETY: the masking by the bitsize of the type ensures that we do not shift
+ // out of bounds
+ unsafe {
+ intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
+ }
+ }
+
+ /// Wrapping (modular) exponentiation. Computes `self.pow(exp)`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".wrapping_pow(5), 243);")]
+ /// assert_eq!(3u8.wrapping_pow(6), 217);
+ /// ```
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_pow(self, mut exp: u32) -> Self {
+ if exp == 0 {
+ return 1;
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc.wrapping_mul(base);
+ }
+ exp /= 2;
+ base = base.wrapping_mul(base);
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ acc.wrapping_mul(base)
+ }
+
+ /// Calculates `self` + `rhs`
+ ///
+ /// Returns a tuple of the addition along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (0, true));")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+
+ /// Calculates `self + rhs + carry` without the ability to overflow.
+ ///
+ /// Performs "ternary addition" which takes in an extra bit to add, and may return an
+ /// additional bit of overflow. This allows for chaining together multiple additions
+ /// to create "big integers" which represent larger values.
+ ///
+ #[doc = concat!("This can be thought of as a ", stringify!($BITS), "-bit \"full adder\", in the electronics sense.")]
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, false), (7, false));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, true), (8, false));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), (0, true));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(0, true), (0, true));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, true), (1, true));")]
+ #[doc = concat!("assert_eq!(",
+ stringify!($SelfT), "::MAX.carrying_add(", stringify!($SelfT), "::MAX, true), ",
+ "(", stringify!($SelfT), "::MAX, true));"
+ )]
+ /// ```
+ ///
+ /// If `carry` is false, this method is equivalent to [`overflowing_add`](Self::overflowing_add):
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ #[doc = concat!("assert_eq!(5_", stringify!($SelfT), ".carrying_add(2, false), 5_", stringify!($SelfT), ".overflowing_add(2));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), ", stringify!($SelfT), "::MAX.overflowing_add(1));")]
+ /// ```
+ #[unstable(feature = "bigint_helper_methods", issue = "85532")]
+ #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) {
+ // note: longer-term this should be done via an intrinsic, but this has been shown
+ // to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
+ let (a, b) = self.overflowing_add(rhs);
+ let (c, d) = a.overflowing_add(carry as $SelfT);
+ (c, b || d)
+ }
+
+ /// Calculates `self` + `rhs` with a signed `rhs`
+ ///
+ /// Returns a tuple of the addition along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(mixed_integer_ops)]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_signed(2), (3, false));")]
+ #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_signed(-2), (", stringify!($SelfT), "::MAX, true));")]
+ #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).overflowing_add_signed(4), (1, true));")]
+ /// ```
+ #[unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
+ let (res, overflowed) = self.overflowing_add(rhs as Self);
+ (res, overflowed ^ (rhs < 0))
+ }
+
+ /// Calculates `self` - `rhs`
+ ///
+ /// Returns a tuple of the subtraction along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ ///
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));")]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+
+ /// Calculates `self - rhs - borrow` without the ability to overflow.
+ ///
+ /// Performs "ternary subtraction" which takes in an extra bit to subtract, and may return
+ /// an additional bit of overflow. This allows for chaining together multiple subtractions
+ /// to create "big integers" which represent larger values.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// #![feature(bigint_helper_methods)]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, false), (3, false));")]
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, true), (2, false));")]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, false), (", stringify!($SelfT), "::MAX, true));")]
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, true), (", stringify!($SelfT), "::MAX - 1, true));")]
+ /// ```
+ #[unstable(feature = "bigint_helper_methods", issue = "85532")]
+ #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn borrowing_sub(self, rhs: Self, borrow: bool) -> (Self, bool) {
+ // note: longer-term this should be done via an intrinsic, but this has been shown
+ // to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
+ let (a, b) = self.overflowing_sub(rhs);
+ let (c, d) = a.overflowing_sub(borrow as $SelfT);
+ (c, b || d)
+ }
+
+ /// Computes the absolute difference between `self` and `other`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".abs_diff(80), 20", stringify!($SelfT), ");")]
+ #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".abs_diff(110), 10", stringify!($SelfT), ");")]
+ /// ```
+ #[stable(feature = "int_abs_diff", since = "1.60.0")]
+ #[rustc_const_stable(feature = "int_abs_diff", since = "1.60.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn abs_diff(self, other: Self) -> Self {
+ if mem::size_of::<Self>() == 1 {
+ // Trick LLVM into generating the psadbw instruction when SSE2
+ // is available and this function is autovectorized for u8's.
+ (self as i32).wrapping_sub(other as i32).abs() as Self
+ } else {
+ if self < other {
+ other - self
+ } else {
+ self - other
+ }
+ }
+ }
+
+ /// Calculates the multiplication of `self` and `rhs`.
+ ///
+ /// Returns a tuple of the multiplication along with a boolean
+ /// indicating whether an arithmetic overflow would occur. If an
+ /// overflow would have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `u32` is used here.
+ ///
+ /// ```
+ /// assert_eq!(5u32.overflowing_mul(2), (10, false));
+ /// assert_eq!(1_000_000_000u32.overflowing_mul(10), (1410065408, true));
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+
+ /// Calculates the divisor when `self` is divided by `rhs`.
+ ///
+ /// Returns a tuple of the divisor along with a boolean indicating
+ /// whether an arithmetic overflow would occur. Note that for unsigned
+ /// integers overflow never occurs, so the second value is always
+ /// `false`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_div(2), (2, false));")]
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_overflowing_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_div(self, rhs: Self) -> (Self, bool) {
+ (self / rhs, false)
+ }
+
+ /// Calculates the quotient of Euclidean division `self.div_euclid(rhs)`.
+ ///
+ /// Returns a tuple of the divisor along with a boolean indicating
+ /// whether an arithmetic overflow would occur. Note that for unsigned
+ /// integers overflow never occurs, so the second value is always
+ /// `false`.
+ /// Since, for the positive integers, all common
+ /// definitions of division are equal, this
+ /// is exactly equal to `self.overflowing_div(rhs)`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_div_euclid(2), (2, false));")]
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) {
+ (self / rhs, false)
+ }
+
+ /// Calculates the remainder when `self` is divided by `rhs`.
+ ///
+ /// Returns a tuple of the remainder after dividing along with a boolean
+ /// indicating whether an arithmetic overflow would occur. Note that for
+ /// unsigned integers overflow never occurs, so the second value is
+ /// always `false`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_rem(2), (1, false));")]
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_overflowing_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_rem(self, rhs: Self) -> (Self, bool) {
+ (self % rhs, false)
+ }
+
+ /// Calculates the remainder `self.rem_euclid(rhs)` as if by Euclidean division.
+ ///
+ /// Returns a tuple of the modulo after dividing along with a boolean
+ /// indicating whether an arithmetic overflow would occur. Note that for
+ /// unsigned integers overflow never occurs, so the second value is
+ /// always `false`.
+ /// Since, for the positive integers, all common
+ /// definitions of division are equal, this operation
+ /// is exactly equal to `self.overflowing_rem(rhs)`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_rem_euclid(2), (1, false));")]
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) {
+ (self % rhs, false)
+ }
+
+ /// Negates self in an overflowing fashion.
+ ///
+ /// Returns `!self + 1` using wrapping operations to return the value
+ /// that represents the negation of this unsigned value. Note that for
+ /// positive unsigned values overflow always occurs, but negating 0 does
+ /// not overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".overflowing_neg(), (0, false));")]
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".overflowing_neg(), (-2i32 as ", stringify!($SelfT), ", true));")]
+ /// ```
+ #[inline(always)]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_neg(self) -> (Self, bool) {
+ ((!self).wrapping_add(1), self != 0)
+ }
+
+ /// Shifts self left by `rhs` bits.
+ ///
+ /// Returns a tuple of the shifted version of self along with a boolean
+ /// indicating whether the shift value was larger than or equal to the
+ /// number of bits. If the shift value is too large, then value is
+ /// masked (N-1) where N is the number of bits, and this value is then
+ /// used to perform the shift.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0x1", stringify!($SelfT), ".overflowing_shl(4), (0x10, false));")]
+ #[doc = concat!("assert_eq!(0x1", stringify!($SelfT), ".overflowing_shl(132), (0x10, true));")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) {
+ (self.wrapping_shl(rhs), (rhs > ($BITS - 1)))
+ }
+
+ /// Shifts self right by `rhs` bits.
+ ///
+ /// Returns a tuple of the shifted version of self along with a boolean
+ /// indicating whether the shift value was larger than or equal to the
+ /// number of bits. If the shift value is too large, then value is
+ /// masked (N-1) where N is the number of bits, and this value is then
+ /// used to perform the shift.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(4), (0x1, false));")]
+ #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(132), (0x1, true));")]
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) {
+ (self.wrapping_shr(rhs), (rhs > ($BITS - 1)))
+ }
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// Returns a tuple of the exponentiation along with a bool indicating
+ /// whether an overflow happened.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".overflowing_pow(5), (243, false));")]
+ /// assert_eq!(3u8.overflowing_pow(6), (217, true));
+ /// ```
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_pow(self, mut exp: u32) -> (Self, bool) {
+ if exp == 0{
+ return (1,false);
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+ let mut overflown = false;
+ // Scratch space for storing results of overflowing_mul.
+ let mut r;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ r = acc.overflowing_mul(base);
+ acc = r.0;
+ overflown |= r.1;
+ }
+ exp /= 2;
+ r = base.overflowing_mul(base);
+ base = r.0;
+ overflown |= r.1;
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ r = acc.overflowing_mul(base);
+ r.1 |= overflown;
+
+ r
+ }
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".pow(5), 32);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn pow(self, mut exp: u32) -> Self {
+ if exp == 0 {
+ return 1;
+ }
+ let mut base = self;
+ let mut acc = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc * base;
+ }
+ exp /= 2;
+ base = base * base;
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ acc * base
+ }
+
+ /// Performs Euclidean division.
+ ///
+ /// Since, for the positive integers, all common
+ /// definitions of division are equal, this
+ /// is exactly equal to `self / rhs`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(7", stringify!($SelfT), ".div_euclid(4), 1); // or any other integer type")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ #[rustc_inherit_overflow_checks]
+ pub const fn div_euclid(self, rhs: Self) -> Self {
+ self / rhs
+ }
+
+
+ /// Calculates the least remainder of `self (mod rhs)`.
+ ///
+ /// Since, for the positive integers, all common
+ /// definitions of division are equal, this
+ /// is exactly equal to `self % rhs`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(7", stringify!($SelfT), ".rem_euclid(4), 3); // or any other integer type")]
+ /// ```
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ #[rustc_inherit_overflow_checks]
+ pub const fn rem_euclid(self, rhs: Self) -> Self {
+ self % rhs
+ }
+
+ /// Calculates the quotient of `self` and `rhs`, rounding the result towards negative infinity.
+ ///
+ /// This is the same as performing `self / rhs` for all unsigned integers.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is zero.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(int_roundings)]
+ #[doc = concat!("assert_eq!(7_", stringify!($SelfT), ".div_floor(4), 1);")]
+ /// ```
+ #[unstable(feature = "int_roundings", issue = "88581")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn div_floor(self, rhs: Self) -> Self {
+ self / rhs
+ }
+
+ /// Calculates the quotient of `self` and `rhs`, rounding the result towards positive infinity.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is zero.
+ ///
+ /// ## Overflow behavior
+ ///
+ /// On overflow, this function will panic if overflow checks are enabled (default in debug
+ /// mode) and wrap if overflow checks are disabled (default in release mode).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(int_roundings)]
+ #[doc = concat!("assert_eq!(7_", stringify!($SelfT), ".div_ceil(4), 2);")]
+ /// ```
+ #[unstable(feature = "int_roundings", issue = "88581")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn div_ceil(self, rhs: Self) -> Self {
+ let d = self / rhs;
+ let r = self % rhs;
+ if r > 0 && rhs > 0 {
+ d + 1
+ } else {
+ d
+ }
+ }
+
+ /// Calculates the smallest value greater than or equal to `self` that
+ /// is a multiple of `rhs`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is zero.
+ ///
+ /// ## Overflow behavior
+ ///
+ /// On overflow, this function will panic if overflow checks are enabled (default in debug
+ /// mode) and wrap if overflow checks are disabled (default in release mode).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(int_roundings)]
+ #[doc = concat!("assert_eq!(16_", stringify!($SelfT), ".next_multiple_of(8), 16);")]
+ #[doc = concat!("assert_eq!(23_", stringify!($SelfT), ".next_multiple_of(8), 24);")]
+ /// ```
+ #[unstable(feature = "int_roundings", issue = "88581")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn next_multiple_of(self, rhs: Self) -> Self {
+ match self % rhs {
+ 0 => self,
+ r => self + (rhs - r)
+ }
+ }
+
+ /// Calculates the smallest value greater than or equal to `self` that
+ /// is a multiple of `rhs`. Returns `None` if `rhs` is zero or the
+ /// operation would result in overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(int_roundings)]
+ #[doc = concat!("assert_eq!(16_", stringify!($SelfT), ".checked_next_multiple_of(8), Some(16));")]
+ #[doc = concat!("assert_eq!(23_", stringify!($SelfT), ".checked_next_multiple_of(8), Some(24));")]
+ #[doc = concat!("assert_eq!(1_", stringify!($SelfT), ".checked_next_multiple_of(0), None);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_next_multiple_of(2), None);")]
+ /// ```
+ #[unstable(feature = "int_roundings", issue = "88581")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_next_multiple_of(self, rhs: Self) -> Option<Self> {
+ match try_opt!(self.checked_rem(rhs)) {
+ 0 => Some(self),
+ // rhs - r cannot overflow because r is smaller than rhs
+ r => self.checked_add(rhs - r)
+ }
+ }
+
+ /// Returns `true` if and only if `self == 2^k` for some `k`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert!(16", stringify!($SelfT), ".is_power_of_two());")]
+ #[doc = concat!("assert!(!10", stringify!($SelfT), ".is_power_of_two());")]
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_is_power_of_two", since = "1.32.0")]
+ #[inline(always)]
+ pub const fn is_power_of_two(self) -> bool {
+ self.count_ones() == 1
+ }
+
+ // Returns one less than next power of two.
+ // (For 8u8 next power of two is 8u8 and for 6u8 it is 8u8)
+ //
+ // 8u8.one_less_than_next_power_of_two() == 7
+ // 6u8.one_less_than_next_power_of_two() == 7
+ //
+ // This method cannot overflow, as in the `next_power_of_two`
+ // overflow cases it instead ends up returning the maximum value
+ // of the type, and can return 0 for 0.
+ #[inline]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ const fn one_less_than_next_power_of_two(self) -> Self {
+ if self <= 1 { return 0; }
+
+ let p = self - 1;
+ // SAFETY: Because `p > 0`, it cannot consist entirely of leading zeros.
+ // That means the shift is always in-bounds, and some processors
+ // (such as intel pre-haswell) have more efficient ctlz
+ // intrinsics when the argument is non-zero.
+ let z = unsafe { intrinsics::ctlz_nonzero(p) };
+ <$SelfT>::MAX >> z
+ }
+
+ /// Returns the smallest power of two greater than or equal to `self`.
+ ///
+ /// When return value overflows (i.e., `self > (1 << (N-1))` for type
+ /// `uN`), it panics in debug mode and the return value is wrapped to 0 in
+ /// release mode (the only situation in which method can return 0).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".next_power_of_two(), 2);")]
+ #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".next_power_of_two(), 4);")]
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn next_power_of_two(self) -> Self {
+ self.one_less_than_next_power_of_two() + 1
+ }
+
+ /// Returns the smallest power of two greater than or equal to `n`. If
+ /// the next power of two is greater than the type's maximum value,
+ /// `None` is returned, otherwise the power of two is wrapped in `Some`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_next_power_of_two(), Some(2));")]
+ #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".checked_next_power_of_two(), Some(4));")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_next_power_of_two(), None);")]
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn checked_next_power_of_two(self) -> Option<Self> {
+ self.one_less_than_next_power_of_two().checked_add(1)
+ }
+
+ /// Returns the smallest power of two greater than or equal to `n`. If
+ /// the next power of two is greater than the type's maximum value,
+ /// the return value is wrapped to `0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_next_power_of_two)]
+ ///
+ #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".wrapping_next_power_of_two(), 2);")]
+ #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".wrapping_next_power_of_two(), 4);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_next_power_of_two(), 0);")]
+ /// ```
+ #[inline]
+ #[unstable(feature = "wrapping_next_power_of_two", issue = "32463",
+ reason = "needs decision on wrapping behaviour")]
+ #[rustc_const_unstable(feature = "wrapping_next_power_of_two", issue = "32463")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn wrapping_next_power_of_two(self) -> Self {
+ self.one_less_than_next_power_of_two().wrapping_add(1)
+ }
+
+ /// Return the memory representation of this integer as a byte array in
+ /// big-endian (network) byte order.
+ ///
+ #[doc = $to_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let bytes = ", $swap_op, stringify!($SelfT), ".to_be_bytes();")]
+ #[doc = concat!("assert_eq!(bytes, ", $be_bytes, ");")]
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_be_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ self.to_be().to_ne_bytes()
+ }
+
+ /// Return the memory representation of this integer as a byte array in
+ /// little-endian byte order.
+ ///
+ #[doc = $to_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let bytes = ", $swap_op, stringify!($SelfT), ".to_le_bytes();")]
+ #[doc = concat!("assert_eq!(bytes, ", $le_bytes, ");")]
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_le_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ self.to_le().to_ne_bytes()
+ }
+
+ /// Return the memory representation of this integer as a byte array in
+ /// native byte order.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate,
+ /// instead.
+ ///
+ #[doc = $to_xe_bytes_doc]
+ ///
+ /// [`to_be_bytes`]: Self::to_be_bytes
+ /// [`to_le_bytes`]: Self::to_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let bytes = ", $swap_op, stringify!($SelfT), ".to_ne_bytes();")]
+ /// assert_eq!(
+ /// bytes,
+ /// if cfg!(target_endian = "big") {
+ #[doc = concat!(" ", $be_bytes)]
+ /// } else {
+ #[doc = concat!(" ", $le_bytes)]
+ /// }
+ /// );
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ // SAFETY: const sound because integers are plain old datatypes so we can always
+ // transmute them to arrays of bytes
+ #[inline]
+ pub const fn to_ne_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ // SAFETY: integers are plain old datatypes so we can always transmute them to
+ // arrays of bytes
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Create a native endian integer value from its representation
+ /// as a byte array in big endian.
+ ///
+ #[doc = $from_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let value = ", stringify!($SelfT), "::from_be_bytes(", $be_bytes, ");")]
+ #[doc = concat!("assert_eq!(value, ", $swap_op, ");")]
+ /// ```
+ ///
+ /// When starting from a slice rather than an array, fallible conversion APIs can be used:
+ ///
+ /// ```
+ #[doc = concat!("fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
+ #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+ /// *input = rest;
+ #[doc = concat!(" ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())")]
+ /// }
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use]
+ #[inline]
+ pub const fn from_be_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_be(Self::from_ne_bytes(bytes))
+ }
+
+ /// Create a native endian integer value from its representation
+ /// as a byte array in little endian.
+ ///
+ #[doc = $from_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let value = ", stringify!($SelfT), "::from_le_bytes(", $le_bytes, ");")]
+ #[doc = concat!("assert_eq!(value, ", $swap_op, ");")]
+ /// ```
+ ///
+ /// When starting from a slice rather than an array, fallible conversion APIs can be used:
+ ///
+ /// ```
+ #[doc = concat!("fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
+ #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+ /// *input = rest;
+ #[doc = concat!(" ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())")]
+ /// }
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use]
+ #[inline]
+ pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_le(Self::from_ne_bytes(bytes))
+ }
+
+ /// Create a native endian integer value from its memory representation
+ /// as a byte array in native endianness.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
+ /// appropriate instead.
+ ///
+ /// [`from_be_bytes`]: Self::from_be_bytes
+ /// [`from_le_bytes`]: Self::from_le_bytes
+ ///
+ #[doc = $from_xe_bytes_doc]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!("let value = ", stringify!($SelfT), "::from_ne_bytes(if cfg!(target_endian = \"big\") {")]
+ #[doc = concat!(" ", $be_bytes, "")]
+ /// } else {
+ #[doc = concat!(" ", $le_bytes, "")]
+ /// });
+ #[doc = concat!("assert_eq!(value, ", $swap_op, ");")]
+ /// ```
+ ///
+ /// When starting from a slice rather than an array, fallible conversion APIs can be used:
+ ///
+ /// ```
+ #[doc = concat!("fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
+ #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+ /// *input = rest;
+ #[doc = concat!(" ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())")]
+ /// }
+ /// ```
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[must_use]
+ // SAFETY: const sound because integers are plain old datatypes so we can always
+ // transmute to them
+ #[inline]
+ pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ // SAFETY: integers are plain old datatypes so we can always transmute to them
+ unsafe { mem::transmute(bytes) }
+ }
+
+ /// New code should prefer to use
+ #[doc = concat!("[`", stringify!($SelfT), "::MIN", "`] instead.")]
+ ///
+ /// Returns the smallest value that can be represented by this integer type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_promotable]
+ #[inline(always)]
+ #[rustc_const_stable(feature = "const_max_value", since = "1.32.0")]
+ #[deprecated(since = "TBD", note = "replaced by the `MIN` associated constant on this type")]
+ pub const fn min_value() -> Self { Self::MIN }
+
+ /// New code should prefer to use
+ #[doc = concat!("[`", stringify!($SelfT), "::MAX", "`] instead.")]
+ ///
+ /// Returns the largest value that can be represented by this integer type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_promotable]
+ #[inline(always)]
+ #[rustc_const_stable(feature = "const_max_value", since = "1.32.0")]
+ #[deprecated(since = "TBD", note = "replaced by the `MAX` associated constant on this type")]
+ pub const fn max_value() -> Self { Self::MAX }
+ }
+}
diff --git a/library/core/src/num/wrapping.rs b/library/core/src/num/wrapping.rs
new file mode 100644
index 000000000..5353d900e
--- /dev/null
+++ b/library/core/src/num/wrapping.rs
@@ -0,0 +1,1123 @@
+//! Definitions of `Wrapping<T>`.
+
+use crate::fmt;
+use crate::ops::{Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign};
+use crate::ops::{BitXor, BitXorAssign, Div, DivAssign};
+use crate::ops::{Mul, MulAssign, Neg, Not, Rem, RemAssign};
+use crate::ops::{Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign};
+
+/// Provides intentionally-wrapped arithmetic on `T`.
+///
+/// Operations like `+` on `u32` values are intended to never overflow,
+/// and in some debug configurations overflow is detected and results
+/// in a panic. While most arithmetic falls into this category, some
+/// code explicitly expects and relies upon modular arithmetic (e.g.,
+/// hashing).
+///
+/// Wrapping arithmetic can be achieved either through methods like
+/// `wrapping_add`, or through the `Wrapping<T>` type, which says that
+/// all standard arithmetic operations on the underlying value are
+/// intended to have wrapping semantics.
+///
+/// The underlying value can be retrieved through the `.0` index of the
+/// `Wrapping` tuple.
+///
+/// # Examples
+///
+/// ```
+/// use std::num::Wrapping;
+///
+/// let zero = Wrapping(0u32);
+/// let one = Wrapping(1u32);
+///
+/// assert_eq!(u32::MAX, (zero - one).0);
+/// ```
+///
+/// # Layout
+///
+/// `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)]
+#[repr(transparent)]
+pub struct Wrapping<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug> fmt::Debug for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_display", since = "1.10.0")]
+impl<T: fmt::Display> fmt::Display for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_fmt", since = "1.11.0")]
+impl<T: fmt::Binary> fmt::Binary for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_fmt", since = "1.11.0")]
+impl<T: fmt::Octal> fmt::Octal for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_fmt", since = "1.11.0")]
+impl<T: fmt::LowerHex> fmt::LowerHex for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_fmt", since = "1.11.0")]
+impl<T: fmt::UpperHex> fmt::UpperHex for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[allow(unused_macros)]
+macro_rules! sh_impl_signed {
+ ($t:ident, $f:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shl<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn shl(self, other: $f) -> Wrapping<$t> {
+ if other < 0 {
+ Wrapping(self.0.wrapping_shr((-other & self::shift_max::$t as $f) as u32))
+ } else {
+ Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ }
+ forward_ref_binop! { impl const Shl, shl for Wrapping<$t>, $f,
+ #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShlAssign<$f> for Wrapping<$t> {
+ #[inline]
+ fn shl_assign(&mut self, other: $f) {
+ *self = *self << other;
+ }
+ }
+ forward_ref_op_assign! { impl const ShlAssign, shl_assign for Wrapping<$t>, $f }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shr<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn shr(self, other: $f) -> Wrapping<$t> {
+ if other < 0 {
+ Wrapping(self.0.wrapping_shl((-other & self::shift_max::$t as $f) as u32))
+ } else {
+ Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ }
+ forward_ref_binop! { impl const Shr, shr for Wrapping<$t>, $f,
+ #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShrAssign<$f> for Wrapping<$t> {
+ #[inline]
+ fn shr_assign(&mut self, other: $f) {
+ *self = *self >> other;
+ }
+ }
+ forward_ref_op_assign! { impl const ShrAssign, shr_assign for Wrapping<$t>, $f }
+ };
+}
+
+macro_rules! sh_impl_unsigned {
+ ($t:ident, $f:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shl<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn shl(self, other: $f) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ forward_ref_binop! { impl const Shl, shl for Wrapping<$t>, $f,
+ #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShlAssign<$f> for Wrapping<$t> {
+ #[inline]
+ fn shl_assign(&mut self, other: $f) {
+ *self = *self << other;
+ }
+ }
+ forward_ref_op_assign! { impl const ShlAssign, shl_assign for Wrapping<$t>, $f }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shr<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn shr(self, other: $f) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ forward_ref_binop! { impl const Shr, shr for Wrapping<$t>, $f,
+ #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShrAssign<$f> for Wrapping<$t> {
+ #[inline]
+ fn shr_assign(&mut self, other: $f) {
+ *self = *self >> other;
+ }
+ }
+ forward_ref_op_assign! { impl const ShrAssign, shr_assign for Wrapping<$t>, $f }
+ };
+}
+
+// FIXME (#23545): uncomment the remaining impls
+macro_rules! sh_impl_all {
+ ($($t:ident)*) => ($(
+ //sh_impl_unsigned! { $t, u8 }
+ //sh_impl_unsigned! { $t, u16 }
+ //sh_impl_unsigned! { $t, u32 }
+ //sh_impl_unsigned! { $t, u64 }
+ //sh_impl_unsigned! { $t, u128 }
+ sh_impl_unsigned! { $t, usize }
+
+ //sh_impl_signed! { $t, i8 }
+ //sh_impl_signed! { $t, i16 }
+ //sh_impl_signed! { $t, i32 }
+ //sh_impl_signed! { $t, i64 }
+ //sh_impl_signed! { $t, i128 }
+ //sh_impl_signed! { $t, isize }
+ )*)
+}
+
+sh_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
+
+// FIXME(30524): impl Op<T> for Wrapping<T>, impl OpAssign<T> for Wrapping<T>
+macro_rules! wrapping_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Add for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn add(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_add(other.0))
+ }
+ }
+ forward_ref_binop! { impl const Add, add for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const AddAssign for Wrapping<$t> {
+ #[inline]
+ fn add_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self + other;
+ }
+ }
+ forward_ref_op_assign! { impl const AddAssign, add_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const AddAssign<$t> for Wrapping<$t> {
+ #[inline]
+ fn add_assign(&mut self, other: $t) {
+ *self = *self + Wrapping(other);
+ }
+ }
+ forward_ref_op_assign! { impl const AddAssign, add_assign for Wrapping<$t>, $t }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Sub for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn sub(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_sub(other.0))
+ }
+ }
+ forward_ref_binop! { impl const Sub, sub for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const SubAssign for Wrapping<$t> {
+ #[inline]
+ fn sub_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self - other;
+ }
+ }
+ forward_ref_op_assign! { impl const SubAssign, sub_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const SubAssign<$t> for Wrapping<$t> {
+ #[inline]
+ fn sub_assign(&mut self, other: $t) {
+ *self = *self - Wrapping(other);
+ }
+ }
+ forward_ref_op_assign! { impl const SubAssign, sub_assign for Wrapping<$t>, $t }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Mul for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn mul(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_mul(other.0))
+ }
+ }
+ forward_ref_binop! { impl Mul, mul for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const MulAssign for Wrapping<$t> {
+ #[inline]
+ fn mul_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self * other;
+ }
+ }
+ forward_ref_op_assign! { impl const MulAssign, mul_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const MulAssign<$t> for Wrapping<$t> {
+ #[inline]
+ fn mul_assign(&mut self, other: $t) {
+ *self = *self * Wrapping(other);
+ }
+ }
+ forward_ref_op_assign! { impl const MulAssign, mul_assign for Wrapping<$t>, $t }
+
+ #[stable(feature = "wrapping_div", since = "1.3.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Div for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn div(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_div(other.0))
+ }
+ }
+ forward_ref_binop! { impl const Div, div for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const DivAssign for Wrapping<$t> {
+ #[inline]
+ fn div_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self / other;
+ }
+ }
+ forward_ref_op_assign! { impl const DivAssign, div_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const DivAssign<$t> for Wrapping<$t> {
+ #[inline]
+ fn div_assign(&mut self, other: $t) {
+ *self = *self / Wrapping(other);
+ }
+ }
+ forward_ref_op_assign! { impl const DivAssign, div_assign for Wrapping<$t>, $t }
+
+ #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Rem for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn rem(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_rem(other.0))
+ }
+ }
+ forward_ref_binop! { impl const Rem, rem for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const RemAssign for Wrapping<$t> {
+ #[inline]
+ fn rem_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self % other;
+ }
+ }
+ forward_ref_op_assign! { impl const RemAssign, rem_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const RemAssign<$t> for Wrapping<$t> {
+ #[inline]
+ fn rem_assign(&mut self, other: $t) {
+ *self = *self % Wrapping(other);
+ }
+ }
+ forward_ref_op_assign! { impl const RemAssign, rem_assign for Wrapping<$t>, $t }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Not for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn not(self) -> Wrapping<$t> {
+ Wrapping(!self.0)
+ }
+ }
+ forward_ref_unop! { impl const Not, not for Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXor for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn bitxor(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 ^ other.0)
+ }
+ }
+ forward_ref_binop! { impl const BitXor, bitxor for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXorAssign for Wrapping<$t> {
+ #[inline]
+ fn bitxor_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self ^ other;
+ }
+ }
+ forward_ref_op_assign! { impl const BitXorAssign, bitxor_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXorAssign<$t> for Wrapping<$t> {
+ #[inline]
+ fn bitxor_assign(&mut self, other: $t) {
+ *self = *self ^ Wrapping(other);
+ }
+ }
+ forward_ref_op_assign! { impl const BitXorAssign, bitxor_assign for Wrapping<$t>, $t }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn bitor(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 | other.0)
+ }
+ }
+ forward_ref_binop! { impl const BitOr, bitor for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign for Wrapping<$t> {
+ #[inline]
+ fn bitor_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self | other;
+ }
+ }
+ forward_ref_op_assign! { impl const BitOrAssign, bitor_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign<$t> for Wrapping<$t> {
+ #[inline]
+ fn bitor_assign(&mut self, other: $t) {
+ *self = *self | Wrapping(other);
+ }
+ }
+ forward_ref_op_assign! { impl const BitOrAssign, bitor_assign for Wrapping<$t>, $t }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAnd for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn bitand(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 & other.0)
+ }
+ }
+ forward_ref_binop! { impl const BitAnd, bitand for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAndAssign for Wrapping<$t> {
+ #[inline]
+ fn bitand_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self & other;
+ }
+ }
+ forward_ref_op_assign! { impl const BitAndAssign, bitand_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAndAssign<$t> for Wrapping<$t> {
+ #[inline]
+ fn bitand_assign(&mut self, other: $t) {
+ *self = *self & Wrapping(other);
+ }
+ }
+ forward_ref_op_assign! { impl const BitAndAssign, bitand_assign for Wrapping<$t>, $t }
+
+ #[stable(feature = "wrapping_neg", since = "1.10.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Neg for Wrapping<$t> {
+ type Output = Self;
+ #[inline]
+ fn neg(self) -> Self {
+ Wrapping(0) - self
+ }
+ }
+ forward_ref_unop! { impl const Neg, neg for Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ )*)
+}
+
+wrapping_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+macro_rules! wrapping_int_impl {
+ ($($t:ty)*) => ($(
+ impl Wrapping<$t> {
+ /// Returns the smallest value that can be represented by this integer type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert_eq!(<Wrapping<", stringify!($t), ">>::MIN, Wrapping(", stringify!($t), "::MIN));")]
+ /// ```
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const MIN: Self = Self(<$t>::MIN);
+
+ /// Returns the largest value that can be represented by this integer type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert_eq!(<Wrapping<", stringify!($t), ">>::MAX, Wrapping(", stringify!($t), "::MAX));")]
+ /// ```
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const MAX: Self = Self(<$t>::MAX);
+
+ /// Returns the size of this integer type in bits.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert_eq!(<Wrapping<", stringify!($t), ">>::BITS, ", stringify!($t), "::BITS);")]
+ /// ```
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const BITS: u32 = <$t>::BITS;
+
+ /// Returns the number of ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("let n = Wrapping(0b01001100", stringify!($t), ");")]
+ ///
+ /// assert_eq!(n.count_ones(), 3);
+ /// ```
+ #[inline]
+ #[doc(alias = "popcount")]
+ #[doc(alias = "popcnt")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn count_ones(self) -> u32 {
+ self.0.count_ones()
+ }
+
+ /// Returns the number of zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert_eq!(Wrapping(!0", stringify!($t), ").count_zeros(), 0);")]
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn count_zeros(self) -> u32 {
+ self.0.count_zeros()
+ }
+
+ /// Returns the number of trailing zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("let n = Wrapping(0b0101000", stringify!($t), ");")]
+ ///
+ /// assert_eq!(n.trailing_zeros(), 3);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn trailing_zeros(self) -> u32 {
+ self.0.trailing_zeros()
+ }
+
+ /// Shifts the bits to the left by a specified amount, `n`,
+ /// wrapping the truncated bits to the end of the resulting
+ /// integer.
+ ///
+ /// Please note this isn't the same operation as the `<<` shifting
+ /// operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ /// let n: Wrapping<i64> = Wrapping(0x0123456789ABCDEF);
+ /// let m: Wrapping<i64> = Wrapping(-0x76543210FEDCBA99);
+ ///
+ /// assert_eq!(n.rotate_left(32), m);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn rotate_left(self, n: u32) -> Self {
+ Wrapping(self.0.rotate_left(n))
+ }
+
+ /// Shifts the bits to the right by a specified amount, `n`,
+ /// wrapping the truncated bits to the beginning of the resulting
+ /// integer.
+ ///
+ /// Please note this isn't the same operation as the `>>` shifting
+ /// operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ /// let n: Wrapping<i64> = Wrapping(0x0123456789ABCDEF);
+ /// let m: Wrapping<i64> = Wrapping(-0xFEDCBA987654322);
+ ///
+ /// assert_eq!(n.rotate_right(4), m);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn rotate_right(self, n: u32) -> Self {
+ Wrapping(self.0.rotate_right(n))
+ }
+
+ /// Reverses the byte order of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ /// let n: Wrapping<i16> = Wrapping(0b0000000_01010101);
+ /// assert_eq!(n, Wrapping(85));
+ ///
+ /// let m = n.swap_bytes();
+ ///
+ /// assert_eq!(m, Wrapping(0b01010101_00000000));
+ /// assert_eq!(m, Wrapping(21760));
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn swap_bytes(self) -> Self {
+ Wrapping(self.0.swap_bytes())
+ }
+
+ /// Reverses the bit pattern of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `i16` is used here.
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::num::Wrapping;
+ ///
+ /// let n = Wrapping(0b0000000_01010101i16);
+ /// assert_eq!(n, Wrapping(85));
+ ///
+ /// let m = n.reverse_bits();
+ ///
+ /// assert_eq!(m.0 as u16, 0b10101010_00000000);
+ /// assert_eq!(m, Wrapping(-22016));
+ /// ```
+ #[stable(feature = "reverse_bits", since = "1.37.0")]
+ #[rustc_const_stable(feature = "const_reverse_bits", since = "1.37.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn reverse_bits(self) -> Self {
+ Wrapping(self.0.reverse_bits())
+ }
+
+ /// Converts an integer from big endian to the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("let n = Wrapping(0x1A", stringify!($t), ");")]
+ ///
+ /// if cfg!(target_endian = "big") {
+ #[doc = concat!(" assert_eq!(<Wrapping<", stringify!($t), ">>::from_be(n), n)")]
+ /// } else {
+ #[doc = concat!(" assert_eq!(<Wrapping<", stringify!($t), ">>::from_be(n), n.swap_bytes())")]
+ /// }
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn from_be(x: Self) -> Self {
+ Wrapping(<$t>::from_be(x.0))
+ }
+
+ /// Converts an integer from little endian to the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("let n = Wrapping(0x1A", stringify!($t), ");")]
+ ///
+ /// if cfg!(target_endian = "little") {
+ #[doc = concat!(" assert_eq!(<Wrapping<", stringify!($t), ">>::from_le(n), n)")]
+ /// } else {
+ #[doc = concat!(" assert_eq!(<Wrapping<", stringify!($t), ">>::from_le(n), n.swap_bytes())")]
+ /// }
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn from_le(x: Self) -> Self {
+ Wrapping(<$t>::from_le(x.0))
+ }
+
+ /// Converts `self` to big endian from the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("let n = Wrapping(0x1A", stringify!($t), ");")]
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(n.to_be(), n)
+ /// } else {
+ /// assert_eq!(n.to_be(), n.swap_bytes())
+ /// }
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn to_be(self) -> Self {
+ Wrapping(self.0.to_be())
+ }
+
+ /// Converts `self` to little endian from the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("let n = Wrapping(0x1A", stringify!($t), ");")]
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(n.to_le(), n)
+ /// } else {
+ /// assert_eq!(n.to_le(), n.swap_bytes())
+ /// }
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn to_le(self) -> Self {
+ Wrapping(self.0.to_le())
+ }
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert_eq!(Wrapping(3", stringify!($t), ").pow(4), Wrapping(81));")]
+ /// ```
+ ///
+ /// Results that are too large are wrapped:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ /// assert_eq!(Wrapping(3i8).pow(5), Wrapping(-13));
+ /// assert_eq!(Wrapping(3i8).pow(6), Wrapping(-39));
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub fn pow(self, exp: u32) -> Self {
+ Wrapping(self.0.wrapping_pow(exp))
+ }
+ }
+ )*)
+}
+
+wrapping_int_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+macro_rules! wrapping_int_impl_signed {
+ ($($t:ty)*) => ($(
+ impl Wrapping<$t> {
+ /// Returns the number of leading zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("let n = Wrapping(", stringify!($t), "::MAX) >> 2;")]
+ ///
+ /// assert_eq!(n.leading_zeros(), 3);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn leading_zeros(self) -> u32 {
+ self.0.leading_zeros()
+ }
+
+ /// Computes the absolute value of `self`, wrapping around at
+ /// the boundary of the type.
+ ///
+ /// The only case where such wrapping can occur is when one takes the absolute value of the negative
+ /// minimal value for the type this is a positive value that is too large to represent in the type. In
+ /// such a case, this function returns `MIN` itself.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert_eq!(Wrapping(100", stringify!($t), ").abs(), Wrapping(100));")]
+ #[doc = concat!("assert_eq!(Wrapping(-100", stringify!($t), ").abs(), Wrapping(100));")]
+ #[doc = concat!("assert_eq!(Wrapping(", stringify!($t), "::MIN).abs(), Wrapping(", stringify!($t), "::MIN));")]
+ /// assert_eq!(Wrapping(-128i8).abs().0 as u8, 128u8);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub fn abs(self) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_abs())
+ }
+
+ /// Returns a number representing sign of `self`.
+ ///
+ /// - `0` if the number is zero
+ /// - `1` if the number is positive
+ /// - `-1` if the number is negative
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert_eq!(Wrapping(10", stringify!($t), ").signum(), Wrapping(1));")]
+ #[doc = concat!("assert_eq!(Wrapping(0", stringify!($t), ").signum(), Wrapping(0));")]
+ #[doc = concat!("assert_eq!(Wrapping(-10", stringify!($t), ").signum(), Wrapping(-1));")]
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub fn signum(self) -> Wrapping<$t> {
+ Wrapping(self.0.signum())
+ }
+
+ /// Returns `true` if `self` is positive and `false` if the number is zero or
+ /// negative.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert!(Wrapping(10", stringify!($t), ").is_positive());")]
+ #[doc = concat!("assert!(!Wrapping(-10", stringify!($t), ").is_positive());")]
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn is_positive(self) -> bool {
+ self.0.is_positive()
+ }
+
+ /// Returns `true` if `self` is negative and `false` if the number is zero or
+ /// positive.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert!(Wrapping(-10", stringify!($t), ").is_negative());")]
+ #[doc = concat!("assert!(!Wrapping(10", stringify!($t), ").is_negative());")]
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn is_negative(self) -> bool {
+ self.0.is_negative()
+ }
+ }
+ )*)
+}
+
+wrapping_int_impl_signed! { isize i8 i16 i32 i64 i128 }
+
+macro_rules! wrapping_int_impl_unsigned {
+ ($($t:ty)*) => ($(
+ impl Wrapping<$t> {
+ /// Returns the number of leading zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("let n = Wrapping(", stringify!($t), "::MAX) >> 2;")]
+ ///
+ /// assert_eq!(n.leading_zeros(), 2);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn leading_zeros(self) -> u32 {
+ self.0.leading_zeros()
+ }
+
+ /// Returns `true` if and only if `self == 2^k` for some `k`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert!(Wrapping(16", stringify!($t), ").is_power_of_two());")]
+ #[doc = concat!("assert!(!Wrapping(10", stringify!($t), ").is_power_of_two());")]
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub fn is_power_of_two(self) -> bool {
+ self.0.is_power_of_two()
+ }
+
+ /// Returns the smallest power of two greater than or equal to `self`.
+ ///
+ /// When return value overflows (i.e., `self > (1 << (N-1))` for type
+ /// `uN`), overflows to `2^N = 0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_next_power_of_two)]
+ /// use std::num::Wrapping;
+ ///
+ #[doc = concat!("assert_eq!(Wrapping(2", stringify!($t), ").next_power_of_two(), Wrapping(2));")]
+ #[doc = concat!("assert_eq!(Wrapping(3", stringify!($t), ").next_power_of_two(), Wrapping(4));")]
+ #[doc = concat!("assert_eq!(Wrapping(200_u8).next_power_of_two(), Wrapping(0));")]
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[unstable(feature = "wrapping_next_power_of_two", issue = "32463",
+ reason = "needs decision on wrapping behaviour")]
+ pub fn next_power_of_two(self) -> Self {
+ Wrapping(self.0.wrapping_next_power_of_two())
+ }
+ }
+ )*)
+}
+
+wrapping_int_impl_unsigned! { usize u8 u16 u32 u64 u128 }
+
+mod shift_max {
+ #![allow(non_upper_case_globals)]
+
+ #[cfg(target_pointer_width = "16")]
+ mod platform {
+ pub const usize: u32 = super::u16;
+ pub const isize: u32 = super::i16;
+ }
+
+ #[cfg(target_pointer_width = "32")]
+ mod platform {
+ pub const usize: u32 = super::u32;
+ pub const isize: u32 = super::i32;
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ mod platform {
+ pub const usize: u32 = super::u64;
+ pub const isize: u32 = super::i64;
+ }
+
+ pub const i8: u32 = (1 << 3) - 1;
+ pub const i16: u32 = (1 << 4) - 1;
+ pub const i32: u32 = (1 << 5) - 1;
+ pub const i64: u32 = (1 << 6) - 1;
+ pub const i128: u32 = (1 << 7) - 1;
+ pub use self::platform::isize;
+
+ pub const u8: u32 = i8;
+ pub const u16: u32 = i16;
+ pub const u32: u32 = i32;
+ pub const u64: u32 = i64;
+ pub const u128: u32 = i128;
+ pub use self::platform::usize;
+}
diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs
new file mode 100644
index 000000000..e367be8c1
--- /dev/null
+++ b/library/core/src/ops/arith.rs
@@ -0,0 +1,1029 @@
+/// The addition operator `+`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory. For
+/// example, [`std::time::SystemTime`] implements `Add<Duration>`, which permits
+/// operations of the form `SystemTime = SystemTime + Duration`.
+///
+/// [`std::time::SystemTime`]: ../../std/time/struct.SystemTime.html
+///
+/// # Examples
+///
+/// ## `Add`able points
+///
+/// ```
+/// use std::ops::Add;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl Add for Point {
+/// type Output = Self;
+///
+/// fn add(self, other: Self) -> Self {
+/// Self {
+/// x: self.x + other.x,
+/// y: self.y + other.y,
+/// }
+/// }
+/// }
+///
+/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 },
+/// Point { x: 3, y: 3 });
+/// ```
+///
+/// ## Implementing `Add` with generics
+///
+/// Here is an example of the same `Point` struct implementing the `Add` trait
+/// using generics.
+///
+/// ```
+/// use std::ops::Add;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point<T> {
+/// x: T,
+/// y: T,
+/// }
+///
+/// // Notice that the implementation uses the associated type `Output`.
+/// impl<T: Add<Output = T>> Add for Point<T> {
+/// type Output = Self;
+///
+/// fn add(self, other: Self) -> Self::Output {
+/// Self {
+/// x: self.x + other.x,
+/// y: self.y + other.y,
+/// }
+/// }
+/// }
+///
+/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 },
+/// Point { x: 3, y: 3 });
+/// ```
+#[lang = "add"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(
+ bootstrap,
+ rustc_on_unimplemented(
+ on(
+ all(_Self = "{integer}", Rhs = "{float}"),
+ message = "cannot add a float to an integer",
+ ),
+ on(
+ all(_Self = "{float}", Rhs = "{integer}"),
+ message = "cannot add an integer to a float",
+ ),
+ message = "cannot add `{Rhs}` to `{Self}`",
+ label = "no implementation for `{Self} + {Rhs}`"
+ )
+)]
+#[cfg_attr(
+ not(bootstrap),
+ rustc_on_unimplemented(
+ on(
+ all(_Self = "{integer}", Rhs = "{float}"),
+ message = "cannot add a float to an integer",
+ ),
+ on(
+ all(_Self = "{float}", Rhs = "{integer}"),
+ message = "cannot add an integer to a float",
+ ),
+ message = "cannot add `{Rhs}` to `{Self}`",
+ label = "no implementation for `{Self} + {Rhs}`",
+ append_const_msg,
+ )
+)]
+#[doc(alias = "+")]
+pub trait Add<Rhs = Self> {
+ /// The resulting type after applying the `+` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `+` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 + 1, 13);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn add(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! add_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Add for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn add(self, other: $t) -> $t { self + other }
+ }
+
+ forward_ref_binop! { impl const Add, add for $t, $t }
+ )*)
+}
+
+add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The subtraction operator `-`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory. For
+/// example, [`std::time::SystemTime`] implements `Sub<Duration>`, which permits
+/// operations of the form `SystemTime = SystemTime - Duration`.
+///
+/// [`std::time::SystemTime`]: ../../std/time/struct.SystemTime.html
+///
+/// # Examples
+///
+/// ## `Sub`tractable points
+///
+/// ```
+/// use std::ops::Sub;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl Sub for Point {
+/// type Output = Self;
+///
+/// fn sub(self, other: Self) -> Self::Output {
+/// Self {
+/// x: self.x - other.x,
+/// y: self.y - other.y,
+/// }
+/// }
+/// }
+///
+/// assert_eq!(Point { x: 3, y: 3 } - Point { x: 2, y: 3 },
+/// Point { x: 1, y: 0 });
+/// ```
+///
+/// ## Implementing `Sub` with generics
+///
+/// Here is an example of the same `Point` struct implementing the `Sub` trait
+/// using generics.
+///
+/// ```
+/// use std::ops::Sub;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Point<T> {
+/// x: T,
+/// y: T,
+/// }
+///
+/// // Notice that the implementation uses the associated type `Output`.
+/// impl<T: Sub<Output = T>> Sub for Point<T> {
+/// type Output = Self;
+///
+/// fn sub(self, other: Self) -> Self::Output {
+/// Point {
+/// x: self.x - other.x,
+/// y: self.y - other.y,
+/// }
+/// }
+/// }
+///
+/// assert_eq!(Point { x: 2, y: 3 } - Point { x: 1, y: 0 },
+/// Point { x: 1, y: 3 });
+/// ```
+#[lang = "sub"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "cannot subtract `{Rhs}` from `{Self}`",
+ label = "no implementation for `{Self} - {Rhs}`"
+)]
+#[doc(alias = "-")]
+pub trait Sub<Rhs = Self> {
+ /// The resulting type after applying the `-` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `-` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 - 1, 11);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn sub(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! sub_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Sub for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn sub(self, other: $t) -> $t { self - other }
+ }
+
+ forward_ref_binop! { impl const Sub, sub for $t, $t }
+ )*)
+}
+
+sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The multiplication operator `*`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// ## `Mul`tipliable rational numbers
+///
+/// ```
+/// use std::ops::Mul;
+///
+/// // By the fundamental theorem of arithmetic, rational numbers in lowest
+/// // terms are unique. So, by keeping `Rational`s in reduced form, we can
+/// // derive `Eq` and `PartialEq`.
+/// #[derive(Debug, Eq, PartialEq)]
+/// struct Rational {
+/// numerator: usize,
+/// denominator: usize,
+/// }
+///
+/// impl Rational {
+/// fn new(numerator: usize, denominator: usize) -> Self {
+/// if denominator == 0 {
+/// panic!("Zero is an invalid denominator!");
+/// }
+///
+/// // Reduce to lowest terms by dividing by the greatest common
+/// // divisor.
+/// let gcd = gcd(numerator, denominator);
+/// Self {
+/// numerator: numerator / gcd,
+/// denominator: denominator / gcd,
+/// }
+/// }
+/// }
+///
+/// impl Mul for Rational {
+/// // The multiplication of rational numbers is a closed operation.
+/// type Output = Self;
+///
+/// fn mul(self, rhs: Self) -> Self {
+/// let numerator = self.numerator * rhs.numerator;
+/// let denominator = self.denominator * rhs.denominator;
+/// Self::new(numerator, denominator)
+/// }
+/// }
+///
+/// // Euclid's two-thousand-year-old algorithm for finding the greatest common
+/// // divisor.
+/// fn gcd(x: usize, y: usize) -> usize {
+/// let mut x = x;
+/// let mut y = y;
+/// while y != 0 {
+/// let t = y;
+/// y = x % y;
+/// x = t;
+/// }
+/// x
+/// }
+///
+/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4));
+/// assert_eq!(Rational::new(2, 3) * Rational::new(3, 4),
+/// Rational::new(1, 2));
+/// ```
+///
+/// ## Multiplying vectors by scalars as in linear algebra
+///
+/// ```
+/// use std::ops::Mul;
+///
+/// struct Scalar { value: usize }
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Vector { value: Vec<usize> }
+///
+/// impl Mul<Scalar> for Vector {
+/// type Output = Self;
+///
+/// fn mul(self, rhs: Scalar) -> Self::Output {
+/// Self { value: self.value.iter().map(|v| v * rhs.value).collect() }
+/// }
+/// }
+///
+/// let vector = Vector { value: vec![2, 4, 6] };
+/// let scalar = Scalar { value: 3 };
+/// assert_eq!(vector * scalar, Vector { value: vec![6, 12, 18] });
+/// ```
+#[lang = "mul"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "cannot multiply `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} * {Rhs}`"
+)]
+#[doc(alias = "*")]
+pub trait Mul<Rhs = Self> {
+ /// The resulting type after applying the `*` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `*` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 * 2, 24);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn mul(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! mul_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Mul for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn mul(self, other: $t) -> $t { self * other }
+ }
+
+ forward_ref_binop! { impl const Mul, mul for $t, $t }
+ )*)
+}
+
+mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The division operator `/`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// ## `Div`idable rational numbers
+///
+/// ```
+/// use std::ops::Div;
+///
+/// // By the fundamental theorem of arithmetic, rational numbers in lowest
+/// // terms are unique. So, by keeping `Rational`s in reduced form, we can
+/// // derive `Eq` and `PartialEq`.
+/// #[derive(Debug, Eq, PartialEq)]
+/// struct Rational {
+/// numerator: usize,
+/// denominator: usize,
+/// }
+///
+/// impl Rational {
+/// fn new(numerator: usize, denominator: usize) -> Self {
+/// if denominator == 0 {
+/// panic!("Zero is an invalid denominator!");
+/// }
+///
+/// // Reduce to lowest terms by dividing by the greatest common
+/// // divisor.
+/// let gcd = gcd(numerator, denominator);
+/// Self {
+/// numerator: numerator / gcd,
+/// denominator: denominator / gcd,
+/// }
+/// }
+/// }
+///
+/// impl Div for Rational {
+/// // The division of rational numbers is a closed operation.
+/// type Output = Self;
+///
+/// fn div(self, rhs: Self) -> Self::Output {
+/// if rhs.numerator == 0 {
+/// panic!("Cannot divide by zero-valued `Rational`!");
+/// }
+///
+/// let numerator = self.numerator * rhs.denominator;
+/// let denominator = self.denominator * rhs.numerator;
+/// Self::new(numerator, denominator)
+/// }
+/// }
+///
+/// // Euclid's two-thousand-year-old algorithm for finding the greatest common
+/// // divisor.
+/// fn gcd(x: usize, y: usize) -> usize {
+/// let mut x = x;
+/// let mut y = y;
+/// while y != 0 {
+/// let t = y;
+/// y = x % y;
+/// x = t;
+/// }
+/// x
+/// }
+///
+/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4));
+/// assert_eq!(Rational::new(1, 2) / Rational::new(3, 4),
+/// Rational::new(2, 3));
+/// ```
+///
+/// ## Dividing vectors by scalars as in linear algebra
+///
+/// ```
+/// use std::ops::Div;
+///
+/// struct Scalar { value: f32 }
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Vector { value: Vec<f32> }
+///
+/// impl Div<Scalar> for Vector {
+/// type Output = Self;
+///
+/// fn div(self, rhs: Scalar) -> Self::Output {
+/// Self { value: self.value.iter().map(|v| v / rhs.value).collect() }
+/// }
+/// }
+///
+/// let scalar = Scalar { value: 2f32 };
+/// let vector = Vector { value: vec![2f32, 4f32, 6f32] };
+/// assert_eq!(vector / scalar, Vector { value: vec![1f32, 2f32, 3f32] });
+/// ```
+#[lang = "div"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "cannot divide `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} / {Rhs}`"
+)]
+#[doc(alias = "/")]
+pub trait Div<Rhs = Self> {
+ /// The resulting type after applying the `/` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `/` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 / 2, 6);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn div(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! div_impl_integer {
+ ($(($($t:ty)*) => $panic:expr),*) => ($($(
+ /// This operation rounds towards zero, truncating any
+ /// fractional part of the exact result.
+ ///
+ /// # Panics
+ ///
+ #[doc = $panic]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Div for $t {
+ type Output = $t;
+
+ #[inline]
+ fn div(self, other: $t) -> $t { self / other }
+ }
+
+ forward_ref_binop! { impl const Div, div for $t, $t }
+ )*)*)
+}
+
+div_impl_integer! {
+ (usize u8 u16 u32 u64 u128) => "This operation will panic if `other == 0`.",
+ (isize i8 i16 i32 i64 i128) => "This operation will panic if `other == 0` or the division results in overflow."
+}
+
+macro_rules! div_impl_float {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Div for $t {
+ type Output = $t;
+
+ #[inline]
+ fn div(self, other: $t) -> $t { self / other }
+ }
+
+ forward_ref_binop! { impl const Div, div for $t, $t }
+ )*)
+}
+
+div_impl_float! { f32 f64 }
+
+/// The remainder operator `%`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// This example implements `Rem` on a `SplitSlice` object. After `Rem` is
+/// implemented, one can use the `%` operator to find out what the remaining
+/// elements of the slice would be after splitting it into equal slices of a
+/// given length.
+///
+/// ```
+/// use std::ops::Rem;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct SplitSlice<'a, T: 'a> {
+/// slice: &'a [T],
+/// }
+///
+/// impl<'a, T> Rem<usize> for SplitSlice<'a, T> {
+/// type Output = Self;
+///
+/// fn rem(self, modulus: usize) -> Self::Output {
+/// let len = self.slice.len();
+/// let rem = len % modulus;
+/// let start = len - rem;
+/// Self {slice: &self.slice[start..]}
+/// }
+/// }
+///
+/// // If we were to divide &[0, 1, 2, 3, 4, 5, 6, 7] into slices of size 3,
+/// // the remainder would be &[6, 7].
+/// assert_eq!(SplitSlice { slice: &[0, 1, 2, 3, 4, 5, 6, 7] } % 3,
+/// SplitSlice { slice: &[6, 7] });
+/// ```
+#[lang = "rem"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "cannot mod `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} % {Rhs}`"
+)]
+#[doc(alias = "%")]
+pub trait Rem<Rhs = Self> {
+ /// The resulting type after applying the `%` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `%` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 % 10, 2);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn rem(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! rem_impl_integer {
+ ($(($($t:ty)*) => $panic:expr),*) => ($($(
+ /// This operation satisfies `n % d == n - (n / d) * d`. The
+ /// result has the same sign as the left operand.
+ ///
+ /// # Panics
+ ///
+ #[doc = $panic]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Rem for $t {
+ type Output = $t;
+
+ #[inline]
+ fn rem(self, other: $t) -> $t { self % other }
+ }
+
+ forward_ref_binop! { impl const Rem, rem for $t, $t }
+ )*)*)
+}
+
+rem_impl_integer! {
+ (usize u8 u16 u32 u64 u128) => "This operation will panic if `other == 0`.",
+ (isize i8 i16 i32 i64 i128) => "This operation will panic if `other == 0` or if `self / other` results in overflow."
+}
+
+macro_rules! rem_impl_float {
+ ($($t:ty)*) => ($(
+
+ /// The remainder from the division of two floats.
+ ///
+ /// The remainder has the same sign as the dividend and is computed as:
+ /// `x - (x / y).trunc() * y`.
+ ///
+ /// # Examples
+ /// ```
+ /// let x: f32 = 50.50;
+ /// let y: f32 = 8.125;
+ /// let remainder = x - (x / y).trunc() * y;
+ ///
+ /// // The answer to both operations is 1.75
+ /// assert_eq!(x % y, remainder);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Rem for $t {
+ type Output = $t;
+
+ #[inline]
+ fn rem(self, other: $t) -> $t { self % other }
+ }
+
+ forward_ref_binop! { impl const Rem, rem for $t, $t }
+ )*)
+}
+
+rem_impl_float! { f32 f64 }
+
+/// The unary negation operator `-`.
+///
+/// # Examples
+///
+/// An implementation of `Neg` for `Sign`, which allows the use of `-` to
+/// negate its value.
+///
+/// ```
+/// use std::ops::Neg;
+///
+/// #[derive(Debug, PartialEq)]
+/// enum Sign {
+/// Negative,
+/// Zero,
+/// Positive,
+/// }
+///
+/// impl Neg for Sign {
+/// type Output = Self;
+///
+/// fn neg(self) -> Self::Output {
+/// match self {
+/// Sign::Negative => Sign::Positive,
+/// Sign::Zero => Sign::Zero,
+/// Sign::Positive => Sign::Negative,
+/// }
+/// }
+/// }
+///
+/// // A negative positive is a negative.
+/// assert_eq!(-Sign::Positive, Sign::Negative);
+/// // A double negative is a positive.
+/// assert_eq!(-Sign::Negative, Sign::Positive);
+/// // Zero is its own negation.
+/// assert_eq!(-Sign::Zero, Sign::Zero);
+/// ```
+#[lang = "neg"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "-")]
+pub trait Neg {
+ /// The resulting type after applying the `-` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the unary `-` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let x: i32 = 12;
+ /// assert_eq!(-x, -12);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn neg(self) -> Self::Output;
+}
+
+macro_rules! neg_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Neg for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn neg(self) -> $t { -self }
+ }
+
+ forward_ref_unop! { impl const Neg, neg for $t }
+ )*)
+}
+
+neg_impl! { isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The addition assignment operator `+=`.
+///
+/// # Examples
+///
+/// This example creates a `Point` struct that implements the `AddAssign`
+/// trait, and then demonstrates add-assigning to a mutable `Point`.
+///
+/// ```
+/// use std::ops::AddAssign;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl AddAssign for Point {
+/// fn add_assign(&mut self, other: Self) {
+/// *self = Self {
+/// x: self.x + other.x,
+/// y: self.y + other.y,
+/// };
+/// }
+/// }
+///
+/// let mut point = Point { x: 1, y: 0 };
+/// point += Point { x: 2, y: 3 };
+/// assert_eq!(point, Point { x: 3, y: 3 });
+/// ```
+#[lang = "add_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot add-assign `{Rhs}` to `{Self}`",
+ label = "no implementation for `{Self} += {Rhs}`"
+)]
+#[doc(alias = "+")]
+#[doc(alias = "+=")]
+pub trait AddAssign<Rhs = Self> {
+ /// Performs the `+=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x += 1;
+ /// assert_eq!(x, 13);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn add_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! add_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const AddAssign for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn add_assign(&mut self, other: $t) { *self += other }
+ }
+
+ forward_ref_op_assign! { impl const AddAssign, add_assign for $t, $t }
+ )+)
+}
+
+add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The subtraction assignment operator `-=`.
+///
+/// # Examples
+///
+/// This example creates a `Point` struct that implements the `SubAssign`
+/// trait, and then demonstrates sub-assigning to a mutable `Point`.
+///
+/// ```
+/// use std::ops::SubAssign;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl SubAssign for Point {
+/// fn sub_assign(&mut self, other: Self) {
+/// *self = Self {
+/// x: self.x - other.x,
+/// y: self.y - other.y,
+/// };
+/// }
+/// }
+///
+/// let mut point = Point { x: 3, y: 3 };
+/// point -= Point { x: 2, y: 3 };
+/// assert_eq!(point, Point {x: 1, y: 0});
+/// ```
+#[lang = "sub_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot subtract-assign `{Rhs}` from `{Self}`",
+ label = "no implementation for `{Self} -= {Rhs}`"
+)]
+#[doc(alias = "-")]
+#[doc(alias = "-=")]
+pub trait SubAssign<Rhs = Self> {
+ /// Performs the `-=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x -= 1;
+ /// assert_eq!(x, 11);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn sub_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! sub_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const SubAssign for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn sub_assign(&mut self, other: $t) { *self -= other }
+ }
+
+ forward_ref_op_assign! { impl const SubAssign, sub_assign for $t, $t }
+ )+)
+}
+
+sub_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The multiplication assignment operator `*=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::MulAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Frequency { hertz: f64 }
+///
+/// impl MulAssign<f64> for Frequency {
+/// fn mul_assign(&mut self, rhs: f64) {
+/// self.hertz *= rhs;
+/// }
+/// }
+///
+/// let mut frequency = Frequency { hertz: 50.0 };
+/// frequency *= 4.0;
+/// assert_eq!(Frequency { hertz: 200.0 }, frequency);
+/// ```
+#[lang = "mul_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot multiply-assign `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} *= {Rhs}`"
+)]
+#[doc(alias = "*")]
+#[doc(alias = "*=")]
+pub trait MulAssign<Rhs = Self> {
+ /// Performs the `*=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x *= 2;
+ /// assert_eq!(x, 24);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn mul_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! mul_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const MulAssign for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn mul_assign(&mut self, other: $t) { *self *= other }
+ }
+
+ forward_ref_op_assign! { impl const MulAssign, mul_assign for $t, $t }
+ )+)
+}
+
+mul_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The division assignment operator `/=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::DivAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Frequency { hertz: f64 }
+///
+/// impl DivAssign<f64> for Frequency {
+/// fn div_assign(&mut self, rhs: f64) {
+/// self.hertz /= rhs;
+/// }
+/// }
+///
+/// let mut frequency = Frequency { hertz: 200.0 };
+/// frequency /= 4.0;
+/// assert_eq!(Frequency { hertz: 50.0 }, frequency);
+/// ```
+#[lang = "div_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot divide-assign `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} /= {Rhs}`"
+)]
+#[doc(alias = "/")]
+#[doc(alias = "/=")]
+pub trait DivAssign<Rhs = Self> {
+ /// Performs the `/=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x /= 2;
+ /// assert_eq!(x, 6);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn div_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! div_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const DivAssign for $t {
+ #[inline]
+ fn div_assign(&mut self, other: $t) { *self /= other }
+ }
+
+ forward_ref_op_assign! { impl const DivAssign, div_assign for $t, $t }
+ )+)
+}
+
+div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The remainder assignment operator `%=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::RemAssign;
+///
+/// struct CookieJar { cookies: u32 }
+///
+/// impl RemAssign<u32> for CookieJar {
+/// fn rem_assign(&mut self, piles: u32) {
+/// self.cookies %= piles;
+/// }
+/// }
+///
+/// let mut jar = CookieJar { cookies: 31 };
+/// let piles = 4;
+///
+/// println!("Splitting up {} cookies into {} even piles!", jar.cookies, piles);
+///
+/// jar %= piles;
+///
+/// println!("{} cookies remain in the cookie jar!", jar.cookies);
+/// ```
+#[lang = "rem_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot mod-assign `{Self}` by `{Rhs}``",
+ label = "no implementation for `{Self} %= {Rhs}`"
+)]
+#[doc(alias = "%")]
+#[doc(alias = "%=")]
+pub trait RemAssign<Rhs = Self> {
+ /// Performs the `%=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x %= 10;
+ /// assert_eq!(x, 2);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn rem_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! rem_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const RemAssign for $t {
+ #[inline]
+ fn rem_assign(&mut self, other: $t) { *self %= other }
+ }
+
+ forward_ref_op_assign! { impl const RemAssign, rem_assign for $t, $t }
+ )+)
+}
+
+rem_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
diff --git a/library/core/src/ops/bit.rs b/library/core/src/ops/bit.rs
new file mode 100644
index 000000000..7c664226f
--- /dev/null
+++ b/library/core/src/ops/bit.rs
@@ -0,0 +1,1044 @@
+/// The unary logical negation operator `!`.
+///
+/// # Examples
+///
+/// An implementation of `Not` for `Answer`, which enables the use of `!` to
+/// invert its value.
+///
+/// ```
+/// use std::ops::Not;
+///
+/// #[derive(Debug, PartialEq)]
+/// enum Answer {
+/// Yes,
+/// No,
+/// }
+///
+/// impl Not for Answer {
+/// type Output = Self;
+///
+/// fn not(self) -> Self::Output {
+/// match self {
+/// Answer::Yes => Answer::No,
+/// Answer::No => Answer::Yes
+/// }
+/// }
+/// }
+///
+/// assert_eq!(!Answer::Yes, Answer::No);
+/// assert_eq!(!Answer::No, Answer::Yes);
+/// ```
+#[lang = "not"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "!")]
+pub trait Not {
+ /// The resulting type after applying the `!` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the unary `!` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(!true, false);
+ /// assert_eq!(!false, true);
+ /// assert_eq!(!1u8, 254);
+ /// assert_eq!(!0u8, 255);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn not(self) -> Self::Output;
+}
+
+macro_rules! not_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Not for $t {
+ type Output = $t;
+
+ #[inline]
+ fn not(self) -> $t { !self }
+ }
+
+ forward_ref_unop! { impl const Not, not for $t }
+ )*)
+}
+
+not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+#[stable(feature = "not_never", since = "1.60.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+impl const Not for ! {
+ type Output = !;
+
+ #[inline]
+ fn not(self) -> ! {
+ match self {}
+ }
+}
+
+/// The bitwise AND operator `&`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// An implementation of `BitAnd` for a wrapper around `bool`.
+///
+/// ```
+/// use std::ops::BitAnd;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(bool);
+///
+/// impl BitAnd for Scalar {
+/// type Output = Self;
+///
+/// // rhs is the "right-hand side" of the expression `a & b`
+/// fn bitand(self, rhs: Self) -> Self::Output {
+/// Self(self.0 & rhs.0)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(true) & Scalar(true), Scalar(true));
+/// assert_eq!(Scalar(true) & Scalar(false), Scalar(false));
+/// assert_eq!(Scalar(false) & Scalar(true), Scalar(false));
+/// assert_eq!(Scalar(false) & Scalar(false), Scalar(false));
+/// ```
+///
+/// An implementation of `BitAnd` for a wrapper around `Vec<bool>`.
+///
+/// ```
+/// use std::ops::BitAnd;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct BooleanVector(Vec<bool>);
+///
+/// impl BitAnd for BooleanVector {
+/// type Output = Self;
+///
+/// fn bitand(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// assert_eq!(lhs.len(), rhs.len());
+/// Self(
+/// lhs.iter()
+/// .zip(rhs.iter())
+/// .map(|(x, y)| *x & *y)
+/// .collect()
+/// )
+/// }
+/// }
+///
+/// let bv1 = BooleanVector(vec![true, true, false, false]);
+/// let bv2 = BooleanVector(vec![true, false, true, false]);
+/// let expected = BooleanVector(vec![true, false, false, false]);
+/// assert_eq!(bv1 & bv2, expected);
+/// ```
+#[lang = "bitand"]
+#[doc(alias = "&")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} & {Rhs}`",
+ label = "no implementation for `{Self} & {Rhs}`"
+)]
+pub trait BitAnd<Rhs = Self> {
+ /// The resulting type after applying the `&` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `&` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(true & false, false);
+ /// assert_eq!(true & true, true);
+ /// assert_eq!(5u8 & 1u8, 1);
+ /// assert_eq!(5u8 & 2u8, 0);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn bitand(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! bitand_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAnd for $t {
+ type Output = $t;
+
+ #[inline]
+ fn bitand(self, rhs: $t) -> $t { self & rhs }
+ }
+
+ forward_ref_binop! { impl const BitAnd, bitand for $t, $t }
+ )*)
+}
+
+bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise OR operator `|`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// An implementation of `BitOr` for a wrapper around `bool`.
+///
+/// ```
+/// use std::ops::BitOr;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(bool);
+///
+/// impl BitOr for Scalar {
+/// type Output = Self;
+///
+/// // rhs is the "right-hand side" of the expression `a | b`
+/// fn bitor(self, rhs: Self) -> Self::Output {
+/// Self(self.0 | rhs.0)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(true) | Scalar(true), Scalar(true));
+/// assert_eq!(Scalar(true) | Scalar(false), Scalar(true));
+/// assert_eq!(Scalar(false) | Scalar(true), Scalar(true));
+/// assert_eq!(Scalar(false) | Scalar(false), Scalar(false));
+/// ```
+///
+/// An implementation of `BitOr` for a wrapper around `Vec<bool>`.
+///
+/// ```
+/// use std::ops::BitOr;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct BooleanVector(Vec<bool>);
+///
+/// impl BitOr for BooleanVector {
+/// type Output = Self;
+///
+/// fn bitor(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// assert_eq!(lhs.len(), rhs.len());
+/// Self(
+/// lhs.iter()
+/// .zip(rhs.iter())
+/// .map(|(x, y)| *x | *y)
+/// .collect()
+/// )
+/// }
+/// }
+///
+/// let bv1 = BooleanVector(vec![true, true, false, false]);
+/// let bv2 = BooleanVector(vec![true, false, true, false]);
+/// let expected = BooleanVector(vec![true, true, true, false]);
+/// assert_eq!(bv1 | bv2, expected);
+/// ```
+#[lang = "bitor"]
+#[doc(alias = "|")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} | {Rhs}`",
+ label = "no implementation for `{Self} | {Rhs}`"
+)]
+pub trait BitOr<Rhs = Self> {
+ /// The resulting type after applying the `|` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `|` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(true | false, true);
+ /// assert_eq!(false | false, false);
+ /// assert_eq!(5u8 | 1u8, 5);
+ /// assert_eq!(5u8 | 2u8, 7);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn bitor(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! bitor_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr for $t {
+ type Output = $t;
+
+ #[inline]
+ fn bitor(self, rhs: $t) -> $t { self | rhs }
+ }
+
+ forward_ref_binop! { impl const BitOr, bitor for $t, $t }
+ )*)
+}
+
+bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise XOR operator `^`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// An implementation of `BitXor` that lifts `^` to a wrapper around `bool`.
+///
+/// ```
+/// use std::ops::BitXor;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(bool);
+///
+/// impl BitXor for Scalar {
+/// type Output = Self;
+///
+/// // rhs is the "right-hand side" of the expression `a ^ b`
+/// fn bitxor(self, rhs: Self) -> Self::Output {
+/// Self(self.0 ^ rhs.0)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(true) ^ Scalar(true), Scalar(false));
+/// assert_eq!(Scalar(true) ^ Scalar(false), Scalar(true));
+/// assert_eq!(Scalar(false) ^ Scalar(true), Scalar(true));
+/// assert_eq!(Scalar(false) ^ Scalar(false), Scalar(false));
+/// ```
+///
+/// An implementation of `BitXor` trait for a wrapper around `Vec<bool>`.
+///
+/// ```
+/// use std::ops::BitXor;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct BooleanVector(Vec<bool>);
+///
+/// impl BitXor for BooleanVector {
+/// type Output = Self;
+///
+/// fn bitxor(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// assert_eq!(lhs.len(), rhs.len());
+/// Self(
+/// lhs.iter()
+/// .zip(rhs.iter())
+/// .map(|(x, y)| *x ^ *y)
+/// .collect()
+/// )
+/// }
+/// }
+///
+/// let bv1 = BooleanVector(vec![true, true, false, false]);
+/// let bv2 = BooleanVector(vec![true, false, true, false]);
+/// let expected = BooleanVector(vec![false, true, true, false]);
+/// assert_eq!(bv1 ^ bv2, expected);
+/// ```
+#[lang = "bitxor"]
+#[doc(alias = "^")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} ^ {Rhs}`",
+ label = "no implementation for `{Self} ^ {Rhs}`"
+)]
+pub trait BitXor<Rhs = Self> {
+ /// The resulting type after applying the `^` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `^` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(true ^ false, true);
+ /// assert_eq!(true ^ true, false);
+ /// assert_eq!(5u8 ^ 1u8, 4);
+ /// assert_eq!(5u8 ^ 2u8, 7);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn bitxor(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! bitxor_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXor for $t {
+ type Output = $t;
+
+ #[inline]
+ fn bitxor(self, other: $t) -> $t { self ^ other }
+ }
+
+ forward_ref_binop! { impl const BitXor, bitxor for $t, $t }
+ )*)
+}
+
+bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The left shift operator `<<`. Note that because this trait is implemented
+/// for all integer types with multiple right-hand-side types, Rust's type
+/// checker has special handling for `_ << _`, setting the result type for
+/// integer operations to the type of the left-hand-side operand. This means
+/// that though `a << b` and `a.shl(b)` are one and the same from an evaluation
+/// standpoint, they are different when it comes to type inference.
+///
+/// # Examples
+///
+/// An implementation of `Shl` that lifts the `<<` operation on integers to a
+/// wrapper around `usize`.
+///
+/// ```
+/// use std::ops::Shl;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct Scalar(usize);
+///
+/// impl Shl<Scalar> for Scalar {
+/// type Output = Self;
+///
+/// fn shl(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// Self(lhs << rhs)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(4) << Scalar(2), Scalar(16));
+/// ```
+///
+/// An implementation of `Shl` that spins a vector leftward by a given amount.
+///
+/// ```
+/// use std::ops::Shl;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct SpinVector<T: Clone> {
+/// vec: Vec<T>,
+/// }
+///
+/// impl<T: Clone> Shl<usize> for SpinVector<T> {
+/// type Output = Self;
+///
+/// fn shl(self, rhs: usize) -> Self::Output {
+/// // Rotate the vector by `rhs` places.
+/// let (a, b) = self.vec.split_at(rhs);
+/// let mut spun_vector = vec![];
+/// spun_vector.extend_from_slice(b);
+/// spun_vector.extend_from_slice(a);
+/// Self { vec: spun_vector }
+/// }
+/// }
+///
+/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } << 2,
+/// SpinVector { vec: vec![2, 3, 4, 0, 1] });
+/// ```
+#[lang = "shl"]
+#[doc(alias = "<<")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} << {Rhs}`",
+ label = "no implementation for `{Self} << {Rhs}`"
+)]
+pub trait Shl<Rhs = Self> {
+ /// The resulting type after applying the `<<` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `<<` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(5u8 << 1, 10);
+ /// assert_eq!(1u8 << 1, 2);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn shl(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! shl_impl {
+ ($t:ty, $f:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shl<$f> for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn shl(self, other: $f) -> $t {
+ self << other
+ }
+ }
+
+ forward_ref_binop! { impl const Shl, shl for $t, $f }
+ };
+}
+
+macro_rules! shl_impl_all {
+ ($($t:ty)*) => ($(
+ shl_impl! { $t, u8 }
+ shl_impl! { $t, u16 }
+ shl_impl! { $t, u32 }
+ shl_impl! { $t, u64 }
+ shl_impl! { $t, u128 }
+ shl_impl! { $t, usize }
+
+ shl_impl! { $t, i8 }
+ shl_impl! { $t, i16 }
+ shl_impl! { $t, i32 }
+ shl_impl! { $t, i64 }
+ shl_impl! { $t, i128 }
+ shl_impl! { $t, isize }
+ )*)
+}
+
+shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 }
+
+/// The right shift operator `>>`. Note that because this trait is implemented
+/// for all integer types with multiple right-hand-side types, Rust's type
+/// checker has special handling for `_ >> _`, setting the result type for
+/// integer operations to the type of the left-hand-side operand. This means
+/// that though `a >> b` and `a.shr(b)` are one and the same from an evaluation
+/// standpoint, they are different when it comes to type inference.
+///
+/// # Examples
+///
+/// An implementation of `Shr` that lifts the `>>` operation on integers to a
+/// wrapper around `usize`.
+///
+/// ```
+/// use std::ops::Shr;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct Scalar(usize);
+///
+/// impl Shr<Scalar> for Scalar {
+/// type Output = Self;
+///
+/// fn shr(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// Self(lhs >> rhs)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(16) >> Scalar(2), Scalar(4));
+/// ```
+///
+/// An implementation of `Shr` that spins a vector rightward by a given amount.
+///
+/// ```
+/// use std::ops::Shr;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct SpinVector<T: Clone> {
+/// vec: Vec<T>,
+/// }
+///
+/// impl<T: Clone> Shr<usize> for SpinVector<T> {
+/// type Output = Self;
+///
+/// fn shr(self, rhs: usize) -> Self::Output {
+/// // Rotate the vector by `rhs` places.
+/// let (a, b) = self.vec.split_at(self.vec.len() - rhs);
+/// let mut spun_vector = vec![];
+/// spun_vector.extend_from_slice(b);
+/// spun_vector.extend_from_slice(a);
+/// Self { vec: spun_vector }
+/// }
+/// }
+///
+/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } >> 2,
+/// SpinVector { vec: vec![3, 4, 0, 1, 2] });
+/// ```
+#[lang = "shr"]
+#[doc(alias = ">>")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} >> {Rhs}`",
+ label = "no implementation for `{Self} >> {Rhs}`"
+)]
+pub trait Shr<Rhs = Self> {
+ /// The resulting type after applying the `>>` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `>>` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(5u8 >> 1, 2);
+ /// assert_eq!(2u8 >> 1, 1);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn shr(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! shr_impl {
+ ($t:ty, $f:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shr<$f> for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn shr(self, other: $f) -> $t {
+ self >> other
+ }
+ }
+
+ forward_ref_binop! { impl const Shr, shr for $t, $f }
+ };
+}
+
+macro_rules! shr_impl_all {
+ ($($t:ty)*) => ($(
+ shr_impl! { $t, u8 }
+ shr_impl! { $t, u16 }
+ shr_impl! { $t, u32 }
+ shr_impl! { $t, u64 }
+ shr_impl! { $t, u128 }
+ shr_impl! { $t, usize }
+
+ shr_impl! { $t, i8 }
+ shr_impl! { $t, i16 }
+ shr_impl! { $t, i32 }
+ shr_impl! { $t, i64 }
+ shr_impl! { $t, i128 }
+ shr_impl! { $t, isize }
+ )*)
+}
+
+shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
+
+/// The bitwise AND assignment operator `&=`.
+///
+/// # Examples
+///
+/// An implementation of `BitAndAssign` that lifts the `&=` operator to a
+/// wrapper around `bool`.
+///
+/// ```
+/// use std::ops::BitAndAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(bool);
+///
+/// impl BitAndAssign for Scalar {
+/// // rhs is the "right-hand side" of the expression `a &= b`
+/// fn bitand_assign(&mut self, rhs: Self) {
+/// *self = Self(self.0 & rhs.0)
+/// }
+/// }
+///
+/// let mut scalar = Scalar(true);
+/// scalar &= Scalar(true);
+/// assert_eq!(scalar, Scalar(true));
+///
+/// let mut scalar = Scalar(true);
+/// scalar &= Scalar(false);
+/// assert_eq!(scalar, Scalar(false));
+///
+/// let mut scalar = Scalar(false);
+/// scalar &= Scalar(true);
+/// assert_eq!(scalar, Scalar(false));
+///
+/// let mut scalar = Scalar(false);
+/// scalar &= Scalar(false);
+/// assert_eq!(scalar, Scalar(false));
+/// ```
+///
+/// Here, the `BitAndAssign` trait is implemented for a wrapper around
+/// `Vec<bool>`.
+///
+/// ```
+/// use std::ops::BitAndAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct BooleanVector(Vec<bool>);
+///
+/// impl BitAndAssign for BooleanVector {
+/// // `rhs` is the "right-hand side" of the expression `a &= b`.
+/// fn bitand_assign(&mut self, rhs: Self) {
+/// assert_eq!(self.0.len(), rhs.0.len());
+/// *self = Self(
+/// self.0
+/// .iter()
+/// .zip(rhs.0.iter())
+/// .map(|(x, y)| *x & *y)
+/// .collect()
+/// );
+/// }
+/// }
+///
+/// let mut bv = BooleanVector(vec![true, true, false, false]);
+/// bv &= BooleanVector(vec![true, false, true, false]);
+/// let expected = BooleanVector(vec![true, false, false, false]);
+/// assert_eq!(bv, expected);
+/// ```
+#[lang = "bitand_assign"]
+#[doc(alias = "&=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} &= {Rhs}`",
+ label = "no implementation for `{Self} &= {Rhs}`"
+)]
+pub trait BitAndAssign<Rhs = Self> {
+ /// Performs the `&=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = true;
+ /// x &= false;
+ /// assert_eq!(x, false);
+ ///
+ /// let mut x = true;
+ /// x &= true;
+ /// assert_eq!(x, true);
+ ///
+ /// let mut x: u8 = 5;
+ /// x &= 1;
+ /// assert_eq!(x, 1);
+ ///
+ /// let mut x: u8 = 5;
+ /// x &= 2;
+ /// assert_eq!(x, 0);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn bitand_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! bitand_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAndAssign for $t {
+ #[inline]
+ fn bitand_assign(&mut self, other: $t) { *self &= other }
+ }
+
+ forward_ref_op_assign! { impl const BitAndAssign, bitand_assign for $t, $t }
+ )+)
+}
+
+bitand_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise OR assignment operator `|=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::BitOrAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct PersonalPreferences {
+/// likes_cats: bool,
+/// likes_dogs: bool,
+/// }
+///
+/// impl BitOrAssign for PersonalPreferences {
+/// fn bitor_assign(&mut self, rhs: Self) {
+/// self.likes_cats |= rhs.likes_cats;
+/// self.likes_dogs |= rhs.likes_dogs;
+/// }
+/// }
+///
+/// let mut prefs = PersonalPreferences { likes_cats: true, likes_dogs: false };
+/// prefs |= PersonalPreferences { likes_cats: false, likes_dogs: true };
+/// assert_eq!(prefs, PersonalPreferences { likes_cats: true, likes_dogs: true });
+/// ```
+#[lang = "bitor_assign"]
+#[doc(alias = "|=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} |= {Rhs}`",
+ label = "no implementation for `{Self} |= {Rhs}`"
+)]
+pub trait BitOrAssign<Rhs = Self> {
+ /// Performs the `|=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = true;
+ /// x |= false;
+ /// assert_eq!(x, true);
+ ///
+ /// let mut x = false;
+ /// x |= false;
+ /// assert_eq!(x, false);
+ ///
+ /// let mut x: u8 = 5;
+ /// x |= 1;
+ /// assert_eq!(x, 5);
+ ///
+ /// let mut x: u8 = 5;
+ /// x |= 2;
+ /// assert_eq!(x, 7);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn bitor_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! bitor_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign for $t {
+ #[inline]
+ fn bitor_assign(&mut self, other: $t) { *self |= other }
+ }
+
+ forward_ref_op_assign! { impl const BitOrAssign, bitor_assign for $t, $t }
+ )+)
+}
+
+bitor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise XOR assignment operator `^=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::BitXorAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Personality {
+/// has_soul: bool,
+/// likes_knitting: bool,
+/// }
+///
+/// impl BitXorAssign for Personality {
+/// fn bitxor_assign(&mut self, rhs: Self) {
+/// self.has_soul ^= rhs.has_soul;
+/// self.likes_knitting ^= rhs.likes_knitting;
+/// }
+/// }
+///
+/// let mut personality = Personality { has_soul: false, likes_knitting: true };
+/// personality ^= Personality { has_soul: true, likes_knitting: true };
+/// assert_eq!(personality, Personality { has_soul: true, likes_knitting: false});
+/// ```
+#[lang = "bitxor_assign"]
+#[doc(alias = "^=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} ^= {Rhs}`",
+ label = "no implementation for `{Self} ^= {Rhs}`"
+)]
+pub trait BitXorAssign<Rhs = Self> {
+ /// Performs the `^=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = true;
+ /// x ^= false;
+ /// assert_eq!(x, true);
+ ///
+ /// let mut x = true;
+ /// x ^= true;
+ /// assert_eq!(x, false);
+ ///
+ /// let mut x: u8 = 5;
+ /// x ^= 1;
+ /// assert_eq!(x, 4);
+ ///
+ /// let mut x: u8 = 5;
+ /// x ^= 2;
+ /// assert_eq!(x, 7);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn bitxor_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! bitxor_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXorAssign for $t {
+ #[inline]
+ fn bitxor_assign(&mut self, other: $t) { *self ^= other }
+ }
+
+ forward_ref_op_assign! { impl const BitXorAssign, bitxor_assign for $t, $t }
+ )+)
+}
+
+bitxor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The left shift assignment operator `<<=`.
+///
+/// # Examples
+///
+/// An implementation of `ShlAssign` for a wrapper around `usize`.
+///
+/// ```
+/// use std::ops::ShlAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(usize);
+///
+/// impl ShlAssign<usize> for Scalar {
+/// fn shl_assign(&mut self, rhs: usize) {
+/// self.0 <<= rhs;
+/// }
+/// }
+///
+/// let mut scalar = Scalar(4);
+/// scalar <<= 2;
+/// assert_eq!(scalar, Scalar(16));
+/// ```
+#[lang = "shl_assign"]
+#[doc(alias = "<<=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} <<= {Rhs}`",
+ label = "no implementation for `{Self} <<= {Rhs}`"
+)]
+pub trait ShlAssign<Rhs = Self> {
+ /// Performs the `<<=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x: u8 = 5;
+ /// x <<= 1;
+ /// assert_eq!(x, 10);
+ ///
+ /// let mut x: u8 = 1;
+ /// x <<= 1;
+ /// assert_eq!(x, 2);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn shl_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! shl_assign_impl {
+ ($t:ty, $f:ty) => {
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShlAssign<$f> for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn shl_assign(&mut self, other: $f) {
+ *self <<= other
+ }
+ }
+
+ forward_ref_op_assign! { impl const ShlAssign, shl_assign for $t, $f }
+ };
+}
+
+macro_rules! shl_assign_impl_all {
+ ($($t:ty)*) => ($(
+ shl_assign_impl! { $t, u8 }
+ shl_assign_impl! { $t, u16 }
+ shl_assign_impl! { $t, u32 }
+ shl_assign_impl! { $t, u64 }
+ shl_assign_impl! { $t, u128 }
+ shl_assign_impl! { $t, usize }
+
+ shl_assign_impl! { $t, i8 }
+ shl_assign_impl! { $t, i16 }
+ shl_assign_impl! { $t, i32 }
+ shl_assign_impl! { $t, i64 }
+ shl_assign_impl! { $t, i128 }
+ shl_assign_impl! { $t, isize }
+ )*)
+}
+
+shl_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
+
+/// The right shift assignment operator `>>=`.
+///
+/// # Examples
+///
+/// An implementation of `ShrAssign` for a wrapper around `usize`.
+///
+/// ```
+/// use std::ops::ShrAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(usize);
+///
+/// impl ShrAssign<usize> for Scalar {
+/// fn shr_assign(&mut self, rhs: usize) {
+/// self.0 >>= rhs;
+/// }
+/// }
+///
+/// let mut scalar = Scalar(16);
+/// scalar >>= 2;
+/// assert_eq!(scalar, Scalar(4));
+/// ```
+#[lang = "shr_assign"]
+#[doc(alias = ">>=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} >>= {Rhs}`",
+ label = "no implementation for `{Self} >>= {Rhs}`"
+)]
+pub trait ShrAssign<Rhs = Self> {
+ /// Performs the `>>=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x: u8 = 5;
+ /// x >>= 1;
+ /// assert_eq!(x, 2);
+ ///
+ /// let mut x: u8 = 2;
+ /// x >>= 1;
+ /// assert_eq!(x, 1);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn shr_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! shr_assign_impl {
+ ($t:ty, $f:ty) => {
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShrAssign<$f> for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn shr_assign(&mut self, other: $f) {
+ *self >>= other
+ }
+ }
+
+ forward_ref_op_assign! { impl const ShrAssign, shr_assign for $t, $f }
+ };
+}
+
+macro_rules! shr_assign_impl_all {
+ ($($t:ty)*) => ($(
+ shr_assign_impl! { $t, u8 }
+ shr_assign_impl! { $t, u16 }
+ shr_assign_impl! { $t, u32 }
+ shr_assign_impl! { $t, u64 }
+ shr_assign_impl! { $t, u128 }
+ shr_assign_impl! { $t, usize }
+
+ shr_assign_impl! { $t, i8 }
+ shr_assign_impl! { $t, i16 }
+ shr_assign_impl! { $t, i32 }
+ shr_assign_impl! { $t, i64 }
+ shr_assign_impl! { $t, i128 }
+ shr_assign_impl! { $t, isize }
+ )*)
+}
+
+shr_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs
new file mode 100644
index 000000000..b1f5559dc
--- /dev/null
+++ b/library/core/src/ops/control_flow.rs
@@ -0,0 +1,299 @@
+use crate::{convert, ops};
+
+/// Used to tell an operation whether it should exit early or go on as usual.
+///
+/// This is used when exposing things (like graph traversals or visitors) where
+/// you want the user to be able to choose whether to exit early.
+/// Having the enum makes it clearer -- no more wondering "wait, what did `false`
+/// mean again?" -- and allows including a value.
+///
+/// Similar to [`Option`] and [`Result`], this enum can be used with the `?` operator
+/// to return immediately if the [`Break`] variant is present or otherwise continue normally
+/// with the value inside the [`Continue`] variant.
+///
+/// # Examples
+///
+/// Early-exiting from [`Iterator::try_for_each`]:
+/// ```
+/// use std::ops::ControlFlow;
+///
+/// let r = (2..100).try_for_each(|x| {
+/// if 403 % x == 0 {
+/// return ControlFlow::Break(x)
+/// }
+///
+/// ControlFlow::Continue(())
+/// });
+/// assert_eq!(r, ControlFlow::Break(13));
+/// ```
+///
+/// A basic tree traversal:
+/// ```
+/// use std::ops::ControlFlow;
+///
+/// pub struct TreeNode<T> {
+/// value: T,
+/// left: Option<Box<TreeNode<T>>>,
+/// right: Option<Box<TreeNode<T>>>,
+/// }
+///
+/// impl<T> TreeNode<T> {
+/// pub fn traverse_inorder<B>(&self, f: &mut impl FnMut(&T) -> ControlFlow<B>) -> ControlFlow<B> {
+/// if let Some(left) = &self.left {
+/// left.traverse_inorder(f)?;
+/// }
+/// f(&self.value)?;
+/// if let Some(right) = &self.right {
+/// right.traverse_inorder(f)?;
+/// }
+/// ControlFlow::Continue(())
+/// }
+/// fn leaf(value: T) -> Option<Box<TreeNode<T>>> {
+/// Some(Box::new(Self { value, left: None, right: None }))
+/// }
+/// }
+///
+/// let node = TreeNode {
+/// value: 0,
+/// left: TreeNode::leaf(1),
+/// right: Some(Box::new(TreeNode {
+/// value: -1,
+/// left: TreeNode::leaf(5),
+/// right: TreeNode::leaf(2),
+/// }))
+/// };
+/// let mut sum = 0;
+///
+/// let res = node.traverse_inorder(&mut |val| {
+/// if *val < 0 {
+/// ControlFlow::Break(*val)
+/// } else {
+/// sum += *val;
+/// ControlFlow::Continue(())
+/// }
+/// });
+/// assert_eq!(res, ControlFlow::Break(-1));
+/// assert_eq!(sum, 6);
+/// ```
+///
+/// [`Break`]: ControlFlow::Break
+/// [`Continue`]: ControlFlow::Continue
+#[stable(feature = "control_flow_enum_type", since = "1.55.0")]
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum ControlFlow<B, C = ()> {
+ /// Move on to the next phase of the operation as normal.
+ #[stable(feature = "control_flow_enum_type", since = "1.55.0")]
+ #[lang = "Continue"]
+ Continue(C),
+ /// Exit the operation without running subsequent phases.
+ #[stable(feature = "control_flow_enum_type", since = "1.55.0")]
+ #[lang = "Break"]
+ Break(B),
+ // Yes, the order of the variants doesn't match the type parameters.
+ // They're in this order so that `ControlFlow<A, B>` <-> `Result<B, A>`
+ // is a no-op conversion in the `Try` implementation.
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+impl<B, C> ops::Try for ControlFlow<B, C> {
+ type Output = C;
+ type Residual = ControlFlow<B, convert::Infallible>;
+
+ #[inline]
+ fn from_output(output: Self::Output) -> Self {
+ ControlFlow::Continue(output)
+ }
+
+ #[inline]
+ fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
+ match self {
+ ControlFlow::Continue(c) => ControlFlow::Continue(c),
+ ControlFlow::Break(b) => ControlFlow::Break(ControlFlow::Break(b)),
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+impl<B, C> ops::FromResidual for ControlFlow<B, C> {
+ #[inline]
+ fn from_residual(residual: ControlFlow<B, convert::Infallible>) -> Self {
+ match residual {
+ ControlFlow::Break(b) => ControlFlow::Break(b),
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
+impl<B, C> ops::Residual<C> for ControlFlow<B, convert::Infallible> {
+ type TryType = ControlFlow<B, C>;
+}
+
+impl<B, C> ControlFlow<B, C> {
+ /// Returns `true` if this is a `Break` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ops::ControlFlow;
+ ///
+ /// assert!(ControlFlow::<i32, String>::Break(3).is_break());
+ /// assert!(!ControlFlow::<String, i32>::Continue(3).is_break());
+ /// ```
+ #[inline]
+ #[stable(feature = "control_flow_enum_is", since = "1.59.0")]
+ pub fn is_break(&self) -> bool {
+ matches!(*self, ControlFlow::Break(_))
+ }
+
+ /// Returns `true` if this is a `Continue` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ops::ControlFlow;
+ ///
+ /// assert!(!ControlFlow::<i32, String>::Break(3).is_continue());
+ /// assert!(ControlFlow::<String, i32>::Continue(3).is_continue());
+ /// ```
+ #[inline]
+ #[stable(feature = "control_flow_enum_is", since = "1.59.0")]
+ pub fn is_continue(&self) -> bool {
+ matches!(*self, ControlFlow::Continue(_))
+ }
+
+ /// Converts the `ControlFlow` into an `Option` which is `Some` if the
+ /// `ControlFlow` was `Break` and `None` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(control_flow_enum)]
+ /// use std::ops::ControlFlow;
+ ///
+ /// assert_eq!(ControlFlow::<i32, String>::Break(3).break_value(), Some(3));
+ /// assert_eq!(ControlFlow::<String, i32>::Continue(3).break_value(), None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub fn break_value(self) -> Option<B> {
+ match self {
+ ControlFlow::Continue(..) => None,
+ ControlFlow::Break(x) => Some(x),
+ }
+ }
+
+ /// Maps `ControlFlow<B, C>` to `ControlFlow<T, C>` by applying a function
+ /// to the break value in case it exists.
+ #[inline]
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub fn map_break<T, F>(self, f: F) -> ControlFlow<T, C>
+ where
+ F: FnOnce(B) -> T,
+ {
+ match self {
+ ControlFlow::Continue(x) => ControlFlow::Continue(x),
+ ControlFlow::Break(x) => ControlFlow::Break(f(x)),
+ }
+ }
+
+ /// Converts the `ControlFlow` into an `Option` which is `Some` if the
+ /// `ControlFlow` was `Continue` and `None` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(control_flow_enum)]
+ /// use std::ops::ControlFlow;
+ ///
+ /// assert_eq!(ControlFlow::<i32, String>::Break(3).continue_value(), None);
+ /// assert_eq!(ControlFlow::<String, i32>::Continue(3).continue_value(), Some(3));
+ /// ```
+ #[inline]
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub fn continue_value(self) -> Option<C> {
+ match self {
+ ControlFlow::Continue(x) => Some(x),
+ ControlFlow::Break(..) => None,
+ }
+ }
+
+ /// Maps `ControlFlow<B, C>` to `ControlFlow<B, T>` by applying a function
+ /// to the continue value in case it exists.
+ #[inline]
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub fn map_continue<T, F>(self, f: F) -> ControlFlow<B, T>
+ where
+ F: FnOnce(C) -> T,
+ {
+ match self {
+ ControlFlow::Continue(x) => ControlFlow::Continue(f(x)),
+ ControlFlow::Break(x) => ControlFlow::Break(x),
+ }
+ }
+}
+
+/// These are used only as part of implementing the iterator adapters.
+/// They have mediocre names and non-obvious semantics, so aren't
+/// currently on a path to potential stabilization.
+impl<R: ops::Try> ControlFlow<R, R::Output> {
+ /// Create a `ControlFlow` from any type implementing `Try`.
+ #[inline]
+ pub(crate) fn from_try(r: R) -> Self {
+ match R::branch(r) {
+ ControlFlow::Continue(v) => ControlFlow::Continue(v),
+ ControlFlow::Break(v) => ControlFlow::Break(R::from_residual(v)),
+ }
+ }
+
+ /// Convert a `ControlFlow` into any type implementing `Try`;
+ #[inline]
+ pub(crate) fn into_try(self) -> R {
+ match self {
+ ControlFlow::Continue(v) => R::from_output(v),
+ ControlFlow::Break(v) => v,
+ }
+ }
+}
+
+impl<B> ControlFlow<B, ()> {
+ /// It's frequently the case that there's no value needed with `Continue`,
+ /// so this provides a way to avoid typing `(())`, if you prefer it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(control_flow_enum)]
+ /// use std::ops::ControlFlow;
+ ///
+ /// let mut partial_sum = 0;
+ /// let last_used = (1..10).chain(20..25).try_for_each(|x| {
+ /// partial_sum += x;
+ /// if partial_sum > 100 { ControlFlow::Break(x) }
+ /// else { ControlFlow::CONTINUE }
+ /// });
+ /// assert_eq!(last_used.break_value(), Some(22));
+ /// ```
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub const CONTINUE: Self = ControlFlow::Continue(());
+}
+
+impl<C> ControlFlow<(), C> {
+ /// APIs like `try_for_each` don't need values with `Break`,
+ /// so this provides a way to avoid typing `(())`, if you prefer it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(control_flow_enum)]
+ /// use std::ops::ControlFlow;
+ ///
+ /// let mut partial_sum = 0;
+ /// (1..10).chain(20..25).try_for_each(|x| {
+ /// if partial_sum > 100 { ControlFlow::BREAK }
+ /// else { partial_sum += x; ControlFlow::CONTINUE }
+ /// });
+ /// assert_eq!(partial_sum, 108);
+ /// ```
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub const BREAK: Self = ControlFlow::Break(());
+}
diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs
new file mode 100644
index 000000000..d68932402
--- /dev/null
+++ b/library/core/src/ops/deref.rs
@@ -0,0 +1,199 @@
+/// Used for immutable dereferencing operations, like `*v`.
+///
+/// In addition to being used for explicit dereferencing operations with the
+/// (unary) `*` operator in immutable contexts, `Deref` is also used implicitly
+/// by the compiler in many circumstances. This mechanism is called
+/// ['`Deref` coercion'][more]. In mutable contexts, [`DerefMut`] is used.
+///
+/// Implementing `Deref` for smart pointers makes accessing the data behind them
+/// convenient, which is why they implement `Deref`. On the other hand, the
+/// rules regarding `Deref` and [`DerefMut`] were designed specifically to
+/// accommodate smart pointers. Because of this, **`Deref` should only be
+/// implemented for smart pointers** to avoid confusion.
+///
+/// For similar reasons, **this trait should never fail**. Failure during
+/// dereferencing can be extremely confusing when `Deref` is invoked implicitly.
+///
+/// # More on `Deref` coercion
+///
+/// If `T` implements `Deref<Target = U>`, and `x` is a value of type `T`, then:
+///
+/// * In immutable contexts, `*x` (where `T` is neither a reference nor a raw pointer)
+/// is equivalent to `*Deref::deref(&x)`.
+/// * Values of type `&T` are coerced to values of type `&U`
+/// * `T` implicitly implements all the (immutable) methods of the type `U`.
+///
+/// For more details, visit [the chapter in *The Rust Programming Language*][book]
+/// as well as the reference sections on [the dereference operator][ref-deref-op],
+/// [method resolution] and [type coercions].
+///
+/// [book]: ../../book/ch15-02-deref.html
+/// [more]: #more-on-deref-coercion
+/// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator
+/// [method resolution]: ../../reference/expressions/method-call-expr.html
+/// [type coercions]: ../../reference/type-coercions.html
+///
+/// # Examples
+///
+/// A struct with a single field which is accessible by dereferencing the
+/// struct.
+///
+/// ```
+/// use std::ops::Deref;
+///
+/// struct DerefExample<T> {
+/// value: T
+/// }
+///
+/// impl<T> Deref for DerefExample<T> {
+/// type Target = T;
+///
+/// fn deref(&self) -> &Self::Target {
+/// &self.value
+/// }
+/// }
+///
+/// let x = DerefExample { value: 'a' };
+/// assert_eq!('a', *x);
+/// ```
+#[lang = "deref"]
+#[doc(alias = "*")]
+#[doc(alias = "&*")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "Deref"]
+pub trait Deref {
+ /// The resulting type after dereferencing.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "deref_target"]
+ #[lang = "deref_target"]
+ type Target: ?Sized;
+
+ /// Dereferences the value.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "deref_method"]
+ fn deref(&self) -> &Self::Target;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
+impl<T: ?Sized> const Deref for &T {
+ type Target = T;
+
+ #[rustc_diagnostic_item = "noop_method_deref"]
+ fn deref(&self) -> &T {
+ *self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !DerefMut for &T {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
+impl<T: ?Sized> const Deref for &mut T {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ *self
+ }
+}
+
+/// Used for mutable dereferencing operations, like in `*v = 1;`.
+///
+/// In addition to being used for explicit dereferencing operations with the
+/// (unary) `*` operator in mutable contexts, `DerefMut` is also used implicitly
+/// by the compiler in many circumstances. This mechanism is called
+/// ['`Deref` coercion'][more]. In immutable contexts, [`Deref`] is used.
+///
+/// Implementing `DerefMut` for smart pointers makes mutating the data behind
+/// them convenient, which is why they implement `DerefMut`. On the other hand,
+/// the rules regarding [`Deref`] and `DerefMut` were designed specifically to
+/// accommodate smart pointers. Because of this, **`DerefMut` should only be
+/// implemented for smart pointers** to avoid confusion.
+///
+/// For similar reasons, **this trait should never fail**. Failure during
+/// dereferencing can be extremely confusing when `DerefMut` is invoked
+/// implicitly.
+///
+/// # More on `Deref` coercion
+///
+/// If `T` implements `DerefMut<Target = U>`, and `x` is a value of type `T`,
+/// then:
+///
+/// * In mutable contexts, `*x` (where `T` is neither a reference nor a raw pointer)
+/// is equivalent to `*DerefMut::deref_mut(&mut x)`.
+/// * Values of type `&mut T` are coerced to values of type `&mut U`
+/// * `T` implicitly implements all the (mutable) methods of the type `U`.
+///
+/// For more details, visit [the chapter in *The Rust Programming Language*][book]
+/// as well as the reference sections on [the dereference operator][ref-deref-op],
+/// [method resolution] and [type coercions].
+///
+/// [book]: ../../book/ch15-02-deref.html
+/// [more]: #more-on-deref-coercion
+/// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator
+/// [method resolution]: ../../reference/expressions/method-call-expr.html
+/// [type coercions]: ../../reference/type-coercions.html
+///
+/// # Examples
+///
+/// A struct with a single field which is modifiable by dereferencing the
+/// struct.
+///
+/// ```
+/// use std::ops::{Deref, DerefMut};
+///
+/// struct DerefMutExample<T> {
+/// value: T
+/// }
+///
+/// impl<T> Deref for DerefMutExample<T> {
+/// type Target = T;
+///
+/// fn deref(&self) -> &Self::Target {
+/// &self.value
+/// }
+/// }
+///
+/// impl<T> DerefMut for DerefMutExample<T> {
+/// fn deref_mut(&mut self) -> &mut Self::Target {
+/// &mut self.value
+/// }
+/// }
+///
+/// let mut x = DerefMutExample { value: 'a' };
+/// *x = 'b';
+/// assert_eq!('b', x.value);
+/// ```
+#[lang = "deref_mut"]
+#[doc(alias = "*")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait DerefMut: Deref {
+ /// Mutably dereferences the value.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn deref_mut(&mut self) -> &mut Self::Target;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> DerefMut for &mut T {
+ fn deref_mut(&mut self) -> &mut T {
+ *self
+ }
+}
+
+/// Indicates that a struct can be used as a method receiver, without the
+/// `arbitrary_self_types` feature. This is implemented by stdlib pointer types like `Box<T>`,
+/// `Rc<T>`, `&T`, and `Pin<P>`.
+#[lang = "receiver"]
+#[unstable(feature = "receiver_trait", issue = "none")]
+#[doc(hidden)]
+pub trait Receiver {
+ // Empty.
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized> Receiver for &T {}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized> Receiver for &mut T {}
diff --git a/library/core/src/ops/drop.rs b/library/core/src/ops/drop.rs
new file mode 100644
index 000000000..aa654aa55
--- /dev/null
+++ b/library/core/src/ops/drop.rs
@@ -0,0 +1,165 @@
+/// Custom code within the destructor.
+///
+/// When a value is no longer needed, Rust will run a "destructor" on that value.
+/// The most common way that a value is no longer needed is when it goes out of
+/// scope. Destructors may still run in other circumstances, but we're going to
+/// focus on scope for the examples here. To learn about some of those other cases,
+/// please see [the reference] section on destructors.
+///
+/// [the reference]: https://doc.rust-lang.org/reference/destructors.html
+///
+/// This destructor consists of two components:
+/// - A call to `Drop::drop` for that value, if this special `Drop` trait is implemented for its type.
+/// - The automatically generated "drop glue" which recursively calls the destructors
+/// of all the fields of this value.
+///
+/// As Rust automatically calls the destructors of all contained fields,
+/// you don't have to implement `Drop` in most cases. But there are some cases where
+/// it is useful, for example for types which directly manage a resource.
+/// That resource may be memory, it may be a file descriptor, it may be a network socket.
+/// Once a value of that type is no longer going to be used, it should "clean up" its
+/// resource by freeing the memory or closing the file or socket. This is
+/// the job of a destructor, and therefore the job of `Drop::drop`.
+///
+/// ## Examples
+///
+/// To see destructors in action, let's take a look at the following program:
+///
+/// ```rust
+/// struct HasDrop;
+///
+/// impl Drop for HasDrop {
+/// fn drop(&mut self) {
+/// println!("Dropping HasDrop!");
+/// }
+/// }
+///
+/// struct HasTwoDrops {
+/// one: HasDrop,
+/// two: HasDrop,
+/// }
+///
+/// impl Drop for HasTwoDrops {
+/// fn drop(&mut self) {
+/// println!("Dropping HasTwoDrops!");
+/// }
+/// }
+///
+/// fn main() {
+/// let _x = HasTwoDrops { one: HasDrop, two: HasDrop };
+/// println!("Running!");
+/// }
+/// ```
+///
+/// Rust will first call `Drop::drop` for `_x` and then for both `_x.one` and `_x.two`,
+/// meaning that running this will print
+///
+/// ```text
+/// Running!
+/// Dropping HasTwoDrops!
+/// Dropping HasDrop!
+/// Dropping HasDrop!
+/// ```
+///
+/// Even if we remove the implementation of `Drop` for `HasTwoDrop`, the destructors of its fields are still called.
+/// This would result in
+///
+/// ```test
+/// Running!
+/// Dropping HasDrop!
+/// Dropping HasDrop!
+/// ```
+///
+/// ## You cannot call `Drop::drop` yourself
+///
+/// Because `Drop::drop` is used to clean up a value, it may be dangerous to use this value after
+/// the method has been called. As `Drop::drop` does not take ownership of its input,
+/// Rust prevents misuse by not allowing you to call `Drop::drop` directly.
+///
+/// In other words, if you tried to explicitly call `Drop::drop` in the above example, you'd get a compiler error.
+///
+/// If you'd like to explicitly call the destructor of a value, [`mem::drop`] can be used instead.
+///
+/// [`mem::drop`]: drop
+///
+/// ## Drop order
+///
+/// Which of our two `HasDrop` drops first, though? For structs, it's the same
+/// order that they're declared: first `one`, then `two`. If you'd like to try
+/// this yourself, you can modify `HasDrop` above to contain some data, like an
+/// integer, and then use it in the `println!` inside of `Drop`. This behavior is
+/// guaranteed by the language.
+///
+/// Unlike for structs, local variables are dropped in reverse order:
+///
+/// ```rust
+/// struct Foo;
+///
+/// impl Drop for Foo {
+/// fn drop(&mut self) {
+/// println!("Dropping Foo!")
+/// }
+/// }
+///
+/// struct Bar;
+///
+/// impl Drop for Bar {
+/// fn drop(&mut self) {
+/// println!("Dropping Bar!")
+/// }
+/// }
+///
+/// fn main() {
+/// let _foo = Foo;
+/// let _bar = Bar;
+/// }
+/// ```
+///
+/// This will print
+///
+/// ```text
+/// Dropping Bar!
+/// Dropping Foo!
+/// ```
+///
+/// Please see [the reference] for the full rules.
+///
+/// [the reference]: https://doc.rust-lang.org/reference/destructors.html
+///
+/// ## `Copy` and `Drop` are exclusive
+///
+/// You cannot implement both [`Copy`] and `Drop` on the same type. Types that
+/// are `Copy` get implicitly duplicated by the compiler, making it very
+/// hard to predict when, and how often destructors will be executed. As such,
+/// these types cannot have destructors.
+#[lang = "drop"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Drop {
+ /// Executes the destructor for this type.
+ ///
+ /// This method is called implicitly when the value goes out of scope,
+ /// and cannot be called explicitly (this is compiler error [E0040]).
+ /// However, the [`mem::drop`] function in the prelude can be
+ /// used to call the argument's `Drop` implementation.
+ ///
+ /// When this method has been called, `self` has not yet been deallocated.
+ /// That only happens after the method is over.
+ /// If this wasn't the case, `self` would be a dangling reference.
+ ///
+ /// # Panics
+ ///
+ /// Given that a [`panic!`] will call `drop` as it unwinds, any [`panic!`]
+ /// in a `drop` implementation will likely abort.
+ ///
+ /// Note that even if this panics, the value is considered to be dropped;
+ /// you must not cause `drop` to be called again. This is normally automatically
+ /// handled by the compiler, but when using unsafe code, can sometimes occur
+ /// unintentionally, particularly when using [`ptr::drop_in_place`].
+ ///
+ /// [E0040]: ../../error-index.html#E0040
+ /// [`panic!`]: crate::panic!
+ /// [`mem::drop`]: drop
+ /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn drop(&mut self);
+}
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
new file mode 100644
index 000000000..c5a194b7d
--- /dev/null
+++ b/library/core/src/ops/function.rs
@@ -0,0 +1,304 @@
+/// The version of the call operator that takes an immutable receiver.
+///
+/// Instances of `Fn` can be called repeatedly without mutating state.
+///
+/// *This trait (`Fn`) is not to be confused with [function pointers]
+/// (`fn`).*
+///
+/// `Fn` is implemented automatically by closures which only take immutable
+/// references to captured variables or don't capture anything at all, as well
+/// as (safe) [function pointers] (with some caveats, see their documentation
+/// for more details). Additionally, for any type `F` that implements `Fn`, `&F`
+/// implements `Fn`, too.
+///
+/// Since both [`FnMut`] and [`FnOnce`] are supertraits of `Fn`, any
+/// instance of `Fn` can be used as a parameter where a [`FnMut`] or [`FnOnce`]
+/// is expected.
+///
+/// Use `Fn` as a bound when you want to accept a parameter of function-like
+/// type and need to call it repeatedly and without mutating state (e.g., when
+/// calling it concurrently). If you do not need such strict requirements, use
+/// [`FnMut`] or [`FnOnce`] as bounds.
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Calling a closure
+///
+/// ```
+/// let square = |x| x * x;
+/// assert_eq!(square(5), 25);
+/// ```
+///
+/// ## Using a `Fn` parameter
+///
+/// ```
+/// fn call_with_one<F>(func: F) -> usize
+/// where F: Fn(usize) -> usize {
+/// func(1)
+/// }
+///
+/// let double = |x| x * 2;
+/// assert_eq!(call_with_one(double), 2);
+/// ```
+#[lang = "fn"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "Fn"]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{Fn}<{Args}>` closure, found `{Self}`",
+ label = "expected an `Fn<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+pub trait Fn<Args>: FnMut<Args> {
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output;
+}
+
+/// The version of the call operator that takes a mutable receiver.
+///
+/// Instances of `FnMut` can be called repeatedly and may mutate state.
+///
+/// `FnMut` is implemented automatically by closures which take mutable
+/// references to captured variables, as well as all types that implement
+/// [`Fn`], e.g., (safe) [function pointers] (since `FnMut` is a supertrait of
+/// [`Fn`]). Additionally, for any type `F` that implements `FnMut`, `&mut F`
+/// implements `FnMut`, too.
+///
+/// Since [`FnOnce`] is a supertrait of `FnMut`, any instance of `FnMut` can be
+/// used where a [`FnOnce`] is expected, and since [`Fn`] is a subtrait of
+/// `FnMut`, any instance of [`Fn`] can be used where `FnMut` is expected.
+///
+/// Use `FnMut` as a bound when you want to accept a parameter of function-like
+/// type and need to call it repeatedly, while allowing it to mutate state.
+/// If you don't want the parameter to mutate state, use [`Fn`] as a
+/// bound; if you don't need to call it repeatedly, use [`FnOnce`].
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Calling a mutably capturing closure
+///
+/// ```
+/// let mut x = 5;
+/// {
+/// let mut square_x = || x *= x;
+/// square_x();
+/// }
+/// assert_eq!(x, 25);
+/// ```
+///
+/// ## Using a `FnMut` parameter
+///
+/// ```
+/// fn do_twice<F>(mut func: F)
+/// where F: FnMut()
+/// {
+/// func();
+/// func();
+/// }
+///
+/// let mut x: usize = 1;
+/// {
+/// let add_two_to_x = || x += 2;
+/// do_twice(add_two_to_x);
+/// }
+///
+/// assert_eq!(x, 5);
+/// ```
+#[lang = "fn_mut"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "FnMut"]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{FnMut}<{Args}>` closure, found `{Self}`",
+ label = "expected an `FnMut<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+pub trait FnMut<Args>: FnOnce<Args> {
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+/// The version of the call operator that takes a by-value receiver.
+///
+/// Instances of `FnOnce` can be called, but might not be callable multiple
+/// times. Because of this, if the only thing known about a type is that it
+/// implements `FnOnce`, it can only be called once.
+///
+/// `FnOnce` is implemented automatically by closures that might consume captured
+/// variables, as well as all types that implement [`FnMut`], e.g., (safe)
+/// [function pointers] (since `FnOnce` is a supertrait of [`FnMut`]).
+///
+/// Since both [`Fn`] and [`FnMut`] are subtraits of `FnOnce`, any instance of
+/// [`Fn`] or [`FnMut`] can be used where a `FnOnce` is expected.
+///
+/// Use `FnOnce` as a bound when you want to accept a parameter of function-like
+/// type and only need to call it once. If you need to call the parameter
+/// repeatedly, use [`FnMut`] as a bound; if you also need it to not mutate
+/// state, use [`Fn`].
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Using a `FnOnce` parameter
+///
+/// ```
+/// fn consume_with_relish<F>(func: F)
+/// where F: FnOnce() -> String
+/// {
+/// // `func` consumes its captured variables, so it cannot be run more
+/// // than once.
+/// println!("Consumed: {}", func());
+///
+/// println!("Delicious!");
+///
+/// // Attempting to invoke `func()` again will throw a `use of moved
+/// // value` error for `func`.
+/// }
+///
+/// let x = String::from("x");
+/// let consume_and_return_x = move || x;
+/// consume_with_relish(consume_and_return_x);
+///
+/// // `consume_and_return_x` can no longer be invoked at this point
+/// ```
+#[lang = "fn_once"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "FnOnce"]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{FnOnce}<{Args}>` closure, found `{Self}`",
+ label = "expected an `FnOnce<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+pub trait FnOnce<Args> {
+ /// The returned type after the call operator is used.
+ #[lang = "fn_once_output"]
+ #[stable(feature = "fn_once_output", since = "1.12.0")]
+ type Output;
+
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+mod impls {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> Fn<A> for &F
+ where
+ F: Fn<A>,
+ {
+ extern "rust-call" fn call(&self, args: A) -> F::Output {
+ (**self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> FnMut<A> for &F
+ where
+ F: Fn<A>,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
+ (**self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> FnOnce<A> for &F
+ where
+ F: Fn<A>,
+ {
+ type Output = F::Output;
+
+ extern "rust-call" fn call_once(self, args: A) -> F::Output {
+ (*self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> FnMut<A> for &mut F
+ where
+ F: FnMut<A>,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
+ (*self).call_mut(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> FnOnce<A> for &mut F
+ where
+ F: FnMut<A>,
+ {
+ type Output = F::Output;
+ extern "rust-call" fn call_once(self, args: A) -> F::Output {
+ (*self).call_mut(args)
+ }
+ }
+}
diff --git a/library/core/src/ops/generator.rs b/library/core/src/ops/generator.rs
new file mode 100644
index 000000000..b651b7b23
--- /dev/null
+++ b/library/core/src/ops/generator.rs
@@ -0,0 +1,136 @@
+use crate::marker::Unpin;
+use crate::pin::Pin;
+
+/// The result of a generator resumption.
+///
+/// This enum is returned from the `Generator::resume` method and indicates the
+/// possible return values of a generator. Currently this corresponds to either
+/// a suspension point (`Yielded`) or a termination point (`Complete`).
+#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[lang = "generator_state"]
+#[unstable(feature = "generator_trait", issue = "43122")]
+pub enum GeneratorState<Y, R> {
+ /// The generator suspended with a value.
+ ///
+ /// This state indicates that a generator has been suspended, and typically
+ /// corresponds to a `yield` statement. The value provided in this variant
+ /// corresponds to the expression passed to `yield` and allows generators to
+ /// provide a value each time they yield.
+ Yielded(Y),
+
+ /// The generator completed with a return value.
+ ///
+ /// This state indicates that a generator has finished execution with the
+ /// provided value. Once a generator has returned `Complete` it is
+ /// considered a programmer error to call `resume` again.
+ Complete(R),
+}
+
+/// The trait implemented by builtin generator types.
+///
+/// Generators, also commonly referred to as coroutines, are currently an
+/// experimental language feature in Rust. Added in [RFC 2033] generators are
+/// currently intended to primarily provide a building block for async/await
+/// syntax but will likely extend to also providing an ergonomic definition for
+/// iterators and other primitives.
+///
+/// The syntax and semantics for generators is unstable and will require a
+/// further RFC for stabilization. At this time, though, the syntax is
+/// closure-like:
+///
+/// ```rust
+/// #![feature(generators, generator_trait)]
+///
+/// use std::ops::{Generator, GeneratorState};
+/// use std::pin::Pin;
+///
+/// fn main() {
+/// let mut generator = || {
+/// yield 1;
+/// "foo"
+/// };
+///
+/// match Pin::new(&mut generator).resume(()) {
+/// GeneratorState::Yielded(1) => {}
+/// _ => panic!("unexpected return from resume"),
+/// }
+/// match Pin::new(&mut generator).resume(()) {
+/// GeneratorState::Complete("foo") => {}
+/// _ => panic!("unexpected return from resume"),
+/// }
+/// }
+/// ```
+///
+/// More documentation of generators can be found in the [unstable book].
+///
+/// [RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033
+/// [unstable book]: ../../unstable-book/language-features/generators.html
+#[lang = "generator"]
+#[unstable(feature = "generator_trait", issue = "43122")]
+#[fundamental]
+pub trait Generator<R = ()> {
+ /// The type of value this generator yields.
+ ///
+ /// This associated type corresponds to the `yield` expression and the
+ /// values which are allowed to be returned each time a generator yields.
+ /// For example an iterator-as-a-generator would likely have this type as
+ /// `T`, the type being iterated over.
+ type Yield;
+
+ /// The type of value this generator returns.
+ ///
+ /// This corresponds to the type returned from a generator either with a
+ /// `return` statement or implicitly as the last expression of a generator
+ /// literal. For example futures would use this as `Result<T, E>` as it
+ /// represents a completed future.
+ #[lang = "generator_return"]
+ type Return;
+
+ /// Resumes the execution of this generator.
+ ///
+ /// This function will resume execution of the generator or start execution
+ /// if it hasn't already. This call will return back into the generator's
+ /// last suspension point, resuming execution from the latest `yield`. The
+ /// generator will continue executing until it either yields or returns, at
+ /// which point this function will return.
+ ///
+ /// # Return value
+ ///
+ /// The `GeneratorState` enum returned from this function indicates what
+ /// state the generator is in upon returning. If the `Yielded` variant is
+ /// returned then the generator has reached a suspension point and a value
+ /// has been yielded out. Generators in this state are available for
+ /// resumption at a later point.
+ ///
+ /// If `Complete` is returned then the generator has completely finished
+ /// with the value provided. It is invalid for the generator to be resumed
+ /// again.
+ ///
+ /// # Panics
+ ///
+ /// This function may panic if it is called after the `Complete` variant has
+ /// been returned previously. While generator literals in the language are
+ /// guaranteed to panic on resuming after `Complete`, this is not guaranteed
+ /// for all implementations of the `Generator` trait.
+ fn resume(self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return>;
+}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R>, R> Generator<R> for Pin<&mut G> {
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume((*self).as_mut(), arg)
+ }
+}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R> + Unpin, R> Generator<R> for &mut G {
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume(Pin::new(&mut *self), arg)
+ }
+}
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
new file mode 100644
index 000000000..e2e569cb7
--- /dev/null
+++ b/library/core/src/ops/index.rs
@@ -0,0 +1,175 @@
+/// Used for indexing operations (`container[index]`) in immutable contexts.
+///
+/// `container[index]` is actually syntactic sugar for `*container.index(index)`,
+/// but only when used as an immutable value. If a mutable value is requested,
+/// [`IndexMut`] is used instead. This allows nice things such as
+/// `let value = v[index]` if the type of `value` implements [`Copy`].
+///
+/// # Examples
+///
+/// The following example implements `Index` on a read-only `NucleotideCount`
+/// container, enabling individual counts to be retrieved with index syntax.
+///
+/// ```
+/// use std::ops::Index;
+///
+/// enum Nucleotide {
+/// A,
+/// C,
+/// G,
+/// T,
+/// }
+///
+/// struct NucleotideCount {
+/// a: usize,
+/// c: usize,
+/// g: usize,
+/// t: usize,
+/// }
+///
+/// impl Index<Nucleotide> for NucleotideCount {
+/// type Output = usize;
+///
+/// fn index(&self, nucleotide: Nucleotide) -> &Self::Output {
+/// match nucleotide {
+/// Nucleotide::A => &self.a,
+/// Nucleotide::C => &self.c,
+/// Nucleotide::G => &self.g,
+/// Nucleotide::T => &self.t,
+/// }
+/// }
+/// }
+///
+/// let nucleotide_count = NucleotideCount {a: 14, c: 9, g: 10, t: 12};
+/// assert_eq!(nucleotide_count[Nucleotide::A], 14);
+/// assert_eq!(nucleotide_count[Nucleotide::C], 9);
+/// assert_eq!(nucleotide_count[Nucleotide::G], 10);
+/// assert_eq!(nucleotide_count[Nucleotide::T], 12);
+/// ```
+#[lang = "index"]
+#[rustc_on_unimplemented(
+ message = "the type `{Self}` cannot be indexed by `{Idx}`",
+ label = "`{Self}` cannot be indexed by `{Idx}`"
+)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "]")]
+#[doc(alias = "[")]
+#[doc(alias = "[]")]
+pub trait Index<Idx: ?Sized> {
+ /// The returned type after indexing.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output: ?Sized;
+
+ /// Performs the indexing (`container[index]`) operation.
+ ///
+ /// # Panics
+ ///
+ /// May panic if the index is out of bounds.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[track_caller]
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+/// Used for indexing operations (`container[index]`) in mutable contexts.
+///
+/// `container[index]` is actually syntactic sugar for
+/// `*container.index_mut(index)`, but only when used as a mutable value. If
+/// an immutable value is requested, the [`Index`] trait is used instead. This
+/// allows nice things such as `v[index] = value`.
+///
+/// # Examples
+///
+/// A very simple implementation of a `Balance` struct that has two sides, where
+/// each can be indexed mutably and immutably.
+///
+/// ```
+/// use std::ops::{Index, IndexMut};
+///
+/// #[derive(Debug)]
+/// enum Side {
+/// Left,
+/// Right,
+/// }
+///
+/// #[derive(Debug, PartialEq)]
+/// enum Weight {
+/// Kilogram(f32),
+/// Pound(f32),
+/// }
+///
+/// struct Balance {
+/// pub left: Weight,
+/// pub right: Weight,
+/// }
+///
+/// impl Index<Side> for Balance {
+/// type Output = Weight;
+///
+/// fn index(&self, index: Side) -> &Self::Output {
+/// println!("Accessing {index:?}-side of balance immutably");
+/// match index {
+/// Side::Left => &self.left,
+/// Side::Right => &self.right,
+/// }
+/// }
+/// }
+///
+/// impl IndexMut<Side> for Balance {
+/// fn index_mut(&mut self, index: Side) -> &mut Self::Output {
+/// println!("Accessing {index:?}-side of balance mutably");
+/// match index {
+/// Side::Left => &mut self.left,
+/// Side::Right => &mut self.right,
+/// }
+/// }
+/// }
+///
+/// let mut balance = Balance {
+/// right: Weight::Kilogram(2.5),
+/// left: Weight::Pound(1.5),
+/// };
+///
+/// // In this case, `balance[Side::Right]` is sugar for
+/// // `*balance.index(Side::Right)`, since we are only *reading*
+/// // `balance[Side::Right]`, not writing it.
+/// assert_eq!(balance[Side::Right], Weight::Kilogram(2.5));
+///
+/// // However, in this case `balance[Side::Left]` is sugar for
+/// // `*balance.index_mut(Side::Left)`, since we are writing
+/// // `balance[Side::Left]`.
+/// balance[Side::Left] = Weight::Kilogram(3.0);
+/// ```
+#[lang = "index_mut"]
+#[rustc_on_unimplemented(
+ on(
+ _Self = "&str",
+ note = "you can use `.chars().nth()` or `.bytes().nth()`
+see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
+ ),
+ on(
+ _Self = "str",
+ note = "you can use `.chars().nth()` or `.bytes().nth()`
+see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
+ ),
+ on(
+ _Self = "std::string::String",
+ note = "you can use `.chars().nth()` or `.bytes().nth()`
+see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
+ ),
+ message = "the type `{Self}` cannot be mutably indexed by `{Idx}`",
+ label = "`{Self}` cannot be mutably indexed by `{Idx}`"
+)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "[")]
+#[doc(alias = "]")]
+#[doc(alias = "[]")]
+pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
+ /// Performs the mutable indexing (`container[index]`) operation.
+ ///
+ /// # Panics
+ ///
+ /// May panic if the index is out of bounds.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[track_caller]
+ fn index_mut(&mut self, index: Idx) -> &mut Self::Output;
+}
diff --git a/library/core/src/ops/mod.rs b/library/core/src/ops/mod.rs
new file mode 100644
index 000000000..31c1a1d09
--- /dev/null
+++ b/library/core/src/ops/mod.rs
@@ -0,0 +1,208 @@
+//! Overloadable operators.
+//!
+//! Implementing these traits allows you to overload certain operators.
+//!
+//! Some of these traits are imported by the prelude, so they are available in
+//! every Rust program. Only operators backed by traits can be overloaded. For
+//! example, the addition operator (`+`) can be overloaded through the [`Add`]
+//! trait, but since the assignment operator (`=`) has no backing trait, there
+//! is no way of overloading its semantics. Additionally, this module does not
+//! provide any mechanism to create new operators. If traitless overloading or
+//! custom operators are required, you should look toward macros or compiler
+//! plugins to extend Rust's syntax.
+//!
+//! Implementations of operator traits should be unsurprising in their
+//! respective contexts, keeping in mind their usual meanings and
+//! [operator precedence]. For example, when implementing [`Mul`], the operation
+//! should have some resemblance to multiplication (and share expected
+//! properties like associativity).
+//!
+//! Note that the `&&` and `||` operators short-circuit, i.e., they only
+//! evaluate their second operand if it contributes to the result. Since this
+//! behavior is not enforceable by traits, `&&` and `||` are not supported as
+//! overloadable operators.
+//!
+//! Many of the operators take their operands by value. In non-generic
+//! contexts involving built-in types, this is usually not a problem.
+//! However, using these operators in generic code, requires some
+//! attention if values have to be reused as opposed to letting the operators
+//! consume them. One option is to occasionally use [`clone`].
+//! Another option is to rely on the types involved providing additional
+//! operator implementations for references. For example, for a user-defined
+//! type `T` which is supposed to support addition, it is probably a good
+//! idea to have both `T` and `&T` implement the traits [`Add<T>`][`Add`] and
+//! [`Add<&T>`][`Add`] so that generic code can be written without unnecessary
+//! cloning.
+//!
+//! # Examples
+//!
+//! This example creates a `Point` struct that implements [`Add`] and [`Sub`],
+//! and then demonstrates adding and subtracting two `Point`s.
+//!
+//! ```rust
+//! use std::ops::{Add, Sub};
+//!
+//! #[derive(Debug, Copy, Clone, PartialEq)]
+//! struct Point {
+//! x: i32,
+//! y: i32,
+//! }
+//!
+//! impl Add for Point {
+//! type Output = Self;
+//!
+//! fn add(self, other: Self) -> Self {
+//! Self {x: self.x + other.x, y: self.y + other.y}
+//! }
+//! }
+//!
+//! impl Sub for Point {
+//! type Output = Self;
+//!
+//! fn sub(self, other: Self) -> Self {
+//! Self {x: self.x - other.x, y: self.y - other.y}
+//! }
+//! }
+//!
+//! assert_eq!(Point {x: 3, y: 3}, Point {x: 1, y: 0} + Point {x: 2, y: 3});
+//! assert_eq!(Point {x: -1, y: -3}, Point {x: 1, y: 0} - Point {x: 2, y: 3});
+//! ```
+//!
+//! See the documentation for each trait for an example implementation.
+//!
+//! The [`Fn`], [`FnMut`], and [`FnOnce`] traits are implemented by types that can be
+//! invoked like functions. Note that [`Fn`] takes `&self`, [`FnMut`] takes `&mut
+//! self` and [`FnOnce`] takes `self`. These correspond to the three kinds of
+//! methods that can be invoked on an instance: call-by-reference,
+//! call-by-mutable-reference, and call-by-value. The most common use of these
+//! traits is to act as bounds to higher-level functions that take functions or
+//! closures as arguments.
+//!
+//! Taking a [`Fn`] as a parameter:
+//!
+//! ```rust
+//! fn call_with_one<F>(func: F) -> usize
+//! where F: Fn(usize) -> usize
+//! {
+//! func(1)
+//! }
+//!
+//! let double = |x| x * 2;
+//! assert_eq!(call_with_one(double), 2);
+//! ```
+//!
+//! Taking a [`FnMut`] as a parameter:
+//!
+//! ```rust
+//! fn do_twice<F>(mut func: F)
+//! where F: FnMut()
+//! {
+//! func();
+//! func();
+//! }
+//!
+//! let mut x: usize = 1;
+//! {
+//! let add_two_to_x = || x += 2;
+//! do_twice(add_two_to_x);
+//! }
+//!
+//! assert_eq!(x, 5);
+//! ```
+//!
+//! Taking a [`FnOnce`] as a parameter:
+//!
+//! ```rust
+//! fn consume_with_relish<F>(func: F)
+//! where F: FnOnce() -> String
+//! {
+//! // `func` consumes its captured variables, so it cannot be run more
+//! // than once
+//! println!("Consumed: {}", func());
+//!
+//! println!("Delicious!");
+//!
+//! // Attempting to invoke `func()` again will throw a `use of moved
+//! // value` error for `func`
+//! }
+//!
+//! let x = String::from("x");
+//! let consume_and_return_x = move || x;
+//! consume_with_relish(consume_and_return_x);
+//!
+//! // `consume_and_return_x` can no longer be invoked at this point
+//! ```
+//!
+//! [`clone`]: Clone::clone
+//! [operator precedence]: ../../reference/expressions.html#expression-precedence
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+mod arith;
+mod bit;
+mod control_flow;
+mod deref;
+mod drop;
+mod function;
+mod generator;
+mod index;
+mod range;
+mod try_trait;
+mod unsize;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::arith::{Add, Div, Mul, Neg, Rem, Sub};
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+pub use self::arith::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::bit::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+pub use self::bit::{BitAndAssign, BitOrAssign, BitXorAssign, ShlAssign, ShrAssign};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::deref::{Deref, DerefMut};
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+pub use self::deref::Receiver;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::drop::Drop;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::function::{Fn, FnMut, FnOnce};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::index::{Index, IndexMut};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::range::{Range, RangeFrom, RangeFull, RangeTo};
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+pub use self::range::{Bound, RangeBounds, RangeInclusive, RangeToInclusive};
+
+#[unstable(feature = "one_sided_range", issue = "69780")]
+pub use self::range::OneSidedRange;
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+pub use self::try_trait::{FromResidual, Try};
+
+#[unstable(feature = "try_trait_v2_yeet", issue = "96374")]
+pub use self::try_trait::Yeet;
+
+#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
+pub use self::try_trait::Residual;
+
+pub(crate) use self::try_trait::{ChangeOutputType, NeverShortCircuit};
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+pub use self::generator::{Generator, GeneratorState};
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+pub use self::unsize::CoerceUnsized;
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+pub use self::unsize::DispatchFromDyn;
+
+#[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+pub use self::control_flow::ControlFlow;
diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs
new file mode 100644
index 000000000..a3b148473
--- /dev/null
+++ b/library/core/src/ops/range.rs
@@ -0,0 +1,991 @@
+use crate::fmt;
+use crate::hash::Hash;
+
+/// An unbounded range (`..`).
+///
+/// `RangeFull` is primarily used as a [slicing index], its shorthand is `..`.
+/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
+///
+/// # Examples
+///
+/// The `..` syntax is a `RangeFull`:
+///
+/// ```
+/// assert_eq!((..), std::ops::RangeFull);
+/// ```
+///
+/// It does not have an [`IntoIterator`] implementation, so you can't use it in
+/// a `for` loop directly. This won't compile:
+///
+/// ```compile_fail,E0277
+/// for i in .. {
+/// // ...
+/// }
+/// ```
+///
+/// Used as a [slicing index], `RangeFull` produces the full array as a slice.
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]); // This is the `RangeFull`
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+///
+/// [slicing index]: crate::slice::SliceIndex
+#[lang = "RangeFull"]
+#[doc(alias = "..")]
+#[derive(Copy, Clone, Default, PartialEq, Eq, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RangeFull;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for RangeFull {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "..")
+ }
+}
+
+/// A (half-open) range bounded inclusively below and exclusively above
+/// (`start..end`).
+///
+/// The range `start..end` contains all values with `start <= x < end`.
+/// It is empty if `start >= end`.
+///
+/// # Examples
+///
+/// The `start..end` syntax is a `Range`:
+///
+/// ```
+/// assert_eq!((3..5), std::ops::Range { start: 3, end: 5 });
+/// assert_eq!(3 + 4 + 5, (3..6).sum());
+/// ```
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]); // This is a `Range`
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+#[lang = "Range"]
+#[doc(alias = "..")]
+#[derive(Clone, Default, PartialEq, Eq, Hash)] // not Copy -- see #27186
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Range<Idx> {
+ /// The lower bound of the range (inclusive).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub start: Idx,
+ /// The upper bound of the range (exclusive).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub end: Idx,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<Idx: fmt::Debug> fmt::Debug for Range<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.start.fmt(fmt)?;
+ write!(fmt, "..")?;
+ self.end.fmt(fmt)?;
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> Range<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..5).contains(&2));
+ /// assert!( (3..5).contains(&3));
+ /// assert!( (3..5).contains(&4));
+ /// assert!(!(3..5).contains(&5));
+ ///
+ /// assert!(!(3..3).contains(&3));
+ /// assert!(!(3..2).contains(&3));
+ ///
+ /// assert!( (0.0..1.0).contains(&0.5));
+ /// assert!(!(0.0..1.0).contains(&f32::NAN));
+ /// assert!(!(0.0..f32::NAN).contains(&0.5));
+ /// assert!(!(f32::NAN..1.0).contains(&0.5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+
+ /// Returns `true` if the range contains no items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..5).is_empty());
+ /// assert!( (3..3).is_empty());
+ /// assert!( (3..2).is_empty());
+ /// ```
+ ///
+ /// The range is empty if either side is incomparable:
+ ///
+ /// ```
+ /// assert!(!(3.0..5.0).is_empty());
+ /// assert!( (3.0..f32::NAN).is_empty());
+ /// assert!( (f32::NAN..5.0).is_empty());
+ /// ```
+ #[stable(feature = "range_is_empty", since = "1.47.0")]
+ pub fn is_empty(&self) -> bool {
+ !(self.start < self.end)
+ }
+}
+
+/// A range only bounded inclusively below (`start..`).
+///
+/// The `RangeFrom` `start..` contains all values with `x >= start`.
+///
+/// *Note*: Overflow in the [`Iterator`] implementation (when the contained
+/// data type reaches its numerical limit) is allowed to panic, wrap, or
+/// saturate. This behavior is defined by the implementation of the [`Step`]
+/// trait. For primitive integers, this follows the normal rules, and respects
+/// the overflow checks profile (panic in debug, wrap in release). Note also
+/// that overflow happens earlier than you might assume: the overflow happens
+/// in the call to `next` that yields the maximum value, as the range must be
+/// set to a state to yield the next value.
+///
+/// [`Step`]: crate::iter::Step
+///
+/// # Examples
+///
+/// The `start..` syntax is a `RangeFrom`:
+///
+/// ```
+/// assert_eq!((2..), std::ops::RangeFrom { start: 2 });
+/// assert_eq!(2 + 3 + 4, (2..).take(3).sum());
+/// ```
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]); // This is a `RangeFrom`
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+#[lang = "RangeFrom"]
+#[doc(alias = "..")]
+#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RangeFrom<Idx> {
+ /// The lower bound of the range (inclusive).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub start: Idx,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeFrom<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.start.fmt(fmt)?;
+ write!(fmt, "..")?;
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..).contains(&2));
+ /// assert!( (3..).contains(&3));
+ /// assert!( (3..).contains(&1_000_000_000));
+ ///
+ /// assert!( (0.0..).contains(&0.5));
+ /// assert!(!(0.0..).contains(&f32::NAN));
+ /// assert!(!(f32::NAN..).contains(&0.5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+}
+
+/// A range only bounded exclusively above (`..end`).
+///
+/// The `RangeTo` `..end` contains all values with `x < end`.
+/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
+///
+/// # Examples
+///
+/// The `..end` syntax is a `RangeTo`:
+///
+/// ```
+/// assert_eq!((..5), std::ops::RangeTo { end: 5 });
+/// ```
+///
+/// It does not have an [`IntoIterator`] implementation, so you can't use it in
+/// a `for` loop directly. This won't compile:
+///
+/// ```compile_fail,E0277
+/// // error[E0277]: the trait bound `std::ops::RangeTo<{integer}>:
+/// // std::iter::Iterator` is not satisfied
+/// for i in ..5 {
+/// // ...
+/// }
+/// ```
+///
+/// When used as a [slicing index], `RangeTo` produces a slice of all array
+/// elements before the index indicated by `end`.
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]); // This is a `RangeTo`
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+///
+/// [slicing index]: crate::slice::SliceIndex
+#[lang = "RangeTo"]
+#[doc(alias = "..")]
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RangeTo<Idx> {
+ /// The upper bound of the range (exclusive).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub end: Idx,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeTo<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "..")?;
+ self.end.fmt(fmt)?;
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> RangeTo<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!( (..5).contains(&-1_000_000_000));
+ /// assert!( (..5).contains(&4));
+ /// assert!(!(..5).contains(&5));
+ ///
+ /// assert!( (..1.0).contains(&0.5));
+ /// assert!(!(..1.0).contains(&f32::NAN));
+ /// assert!(!(..f32::NAN).contains(&0.5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+}
+
+/// A range bounded inclusively below and above (`start..=end`).
+///
+/// The `RangeInclusive` `start..=end` contains all values with `x >= start`
+/// and `x <= end`. It is empty unless `start <= end`.
+///
+/// This iterator is [fused], but the specific values of `start` and `end` after
+/// iteration has finished are **unspecified** other than that [`.is_empty()`]
+/// will return `true` once no more values will be produced.
+///
+/// [fused]: crate::iter::FusedIterator
+/// [`.is_empty()`]: RangeInclusive::is_empty
+///
+/// # Examples
+///
+/// The `start..=end` syntax is a `RangeInclusive`:
+///
+/// ```
+/// assert_eq!((3..=5), std::ops::RangeInclusive::new(3, 5));
+/// assert_eq!(3 + 4 + 5, (3..=5).sum());
+/// ```
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]); // This is a `RangeInclusive`
+/// ```
+#[lang = "RangeInclusive"]
+#[doc(alias = "..=")]
+#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+pub struct RangeInclusive<Idx> {
+ // Note that the fields here are not public to allow changing the
+ // representation in the future; in particular, while we could plausibly
+ // expose start/end, modifying them without changing (future/current)
+ // private fields may lead to incorrect behavior, so we don't want to
+ // support that mode.
+ pub(crate) start: Idx,
+ pub(crate) end: Idx,
+
+ // This field is:
+ // - `false` upon construction
+ // - `false` when iteration has yielded an element and the iterator is not exhausted
+ // - `true` when iteration has been used to exhaust the iterator
+ //
+ // This is required to support PartialEq and Hash without a PartialOrd bound or specialization.
+ pub(crate) exhausted: bool,
+}
+
+impl<Idx> RangeInclusive<Idx> {
+ /// Creates a new inclusive range. Equivalent to writing `start..=end`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ops::RangeInclusive;
+ ///
+ /// assert_eq!(3..=5, RangeInclusive::new(3, 5));
+ /// ```
+ #[lang = "range_inclusive_new"]
+ #[stable(feature = "inclusive_range_methods", since = "1.27.0")]
+ #[inline]
+ #[rustc_promotable]
+ #[rustc_const_stable(feature = "const_range_new", since = "1.32.0")]
+ pub const fn new(start: Idx, end: Idx) -> Self {
+ Self { start, end, exhausted: false }
+ }
+
+ /// Returns the lower bound of the range (inclusive).
+ ///
+ /// When using an inclusive range for iteration, the values of `start()` and
+ /// [`end()`] are unspecified after the iteration ended. To determine
+ /// whether the inclusive range is empty, use the [`is_empty()`] method
+ /// instead of comparing `start() > end()`.
+ ///
+ /// Note: the value returned by this method is unspecified after the range
+ /// has been iterated to exhaustion.
+ ///
+ /// [`end()`]: RangeInclusive::end
+ /// [`is_empty()`]: RangeInclusive::is_empty
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!((3..=5).start(), &3);
+ /// ```
+ #[stable(feature = "inclusive_range_methods", since = "1.27.0")]
+ #[rustc_const_stable(feature = "const_inclusive_range_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn start(&self) -> &Idx {
+ &self.start
+ }
+
+ /// Returns the upper bound of the range (inclusive).
+ ///
+ /// When using an inclusive range for iteration, the values of [`start()`]
+ /// and `end()` are unspecified after the iteration ended. To determine
+ /// whether the inclusive range is empty, use the [`is_empty()`] method
+ /// instead of comparing `start() > end()`.
+ ///
+ /// Note: the value returned by this method is unspecified after the range
+ /// has been iterated to exhaustion.
+ ///
+ /// [`start()`]: RangeInclusive::start
+ /// [`is_empty()`]: RangeInclusive::is_empty
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!((3..=5).end(), &5);
+ /// ```
+ #[stable(feature = "inclusive_range_methods", since = "1.27.0")]
+ #[rustc_const_stable(feature = "const_inclusive_range_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn end(&self) -> &Idx {
+ &self.end
+ }
+
+ /// Destructures the `RangeInclusive` into (lower bound, upper (inclusive) bound).
+ ///
+ /// Note: the value returned by this method is unspecified after the range
+ /// has been iterated to exhaustion.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!((3..=5).into_inner(), (3, 5));
+ /// ```
+ #[stable(feature = "inclusive_range_methods", since = "1.27.0")]
+ #[inline]
+ pub fn into_inner(self) -> (Idx, Idx) {
+ (self.start, self.end)
+ }
+}
+
+impl RangeInclusive<usize> {
+ /// Converts to an exclusive `Range` for `SliceIndex` implementations.
+ /// The caller is responsible for dealing with `end == usize::MAX`.
+ #[inline]
+ pub(crate) const fn into_slice_range(self) -> Range<usize> {
+ // If we're not exhausted, we want to simply slice `start..end + 1`.
+ // If we are exhausted, then slicing with `end + 1..end + 1` gives us an
+ // empty range that is still subject to bounds-checks for that endpoint.
+ let exclusive_end = self.end + 1;
+ let start = if self.exhausted { exclusive_end } else { self.start };
+ start..exclusive_end
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeInclusive<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.start.fmt(fmt)?;
+ write!(fmt, "..=")?;
+ self.end.fmt(fmt)?;
+ if self.exhausted {
+ write!(fmt, " (exhausted)")?;
+ }
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..=5).contains(&2));
+ /// assert!( (3..=5).contains(&3));
+ /// assert!( (3..=5).contains(&4));
+ /// assert!( (3..=5).contains(&5));
+ /// assert!(!(3..=5).contains(&6));
+ ///
+ /// assert!( (3..=3).contains(&3));
+ /// assert!(!(3..=2).contains(&3));
+ ///
+ /// assert!( (0.0..=1.0).contains(&1.0));
+ /// assert!(!(0.0..=1.0).contains(&f32::NAN));
+ /// assert!(!(0.0..=f32::NAN).contains(&0.0));
+ /// assert!(!(f32::NAN..=1.0).contains(&1.0));
+ /// ```
+ ///
+ /// This method always returns `false` after iteration has finished:
+ ///
+ /// ```
+ /// let mut r = 3..=5;
+ /// assert!(r.contains(&3) && r.contains(&5));
+ /// for _ in r.by_ref() {}
+ /// // Precise field values are unspecified here
+ /// assert!(!r.contains(&3) && !r.contains(&5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+
+ /// Returns `true` if the range contains no items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..=5).is_empty());
+ /// assert!(!(3..=3).is_empty());
+ /// assert!( (3..=2).is_empty());
+ /// ```
+ ///
+ /// The range is empty if either side is incomparable:
+ ///
+ /// ```
+ /// assert!(!(3.0..=5.0).is_empty());
+ /// assert!( (3.0..=f32::NAN).is_empty());
+ /// assert!( (f32::NAN..=5.0).is_empty());
+ /// ```
+ ///
+ /// This method returns `true` after iteration has finished:
+ ///
+ /// ```
+ /// let mut r = 3..=5;
+ /// for _ in r.by_ref() {}
+ /// // Precise field values are unspecified here
+ /// assert!(r.is_empty());
+ /// ```
+ #[stable(feature = "range_is_empty", since = "1.47.0")]
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.exhausted || !(self.start <= self.end)
+ }
+}
+
+/// A range only bounded inclusively above (`..=end`).
+///
+/// The `RangeToInclusive` `..=end` contains all values with `x <= end`.
+/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
+///
+/// # Examples
+///
+/// The `..=end` syntax is a `RangeToInclusive`:
+///
+/// ```
+/// assert_eq!((..=5), std::ops::RangeToInclusive{ end: 5 });
+/// ```
+///
+/// It does not have an [`IntoIterator`] implementation, so you can't use it in a
+/// `for` loop directly. This won't compile:
+///
+/// ```compile_fail,E0277
+/// // error[E0277]: the trait bound `std::ops::RangeToInclusive<{integer}>:
+/// // std::iter::Iterator` is not satisfied
+/// for i in ..=5 {
+/// // ...
+/// }
+/// ```
+///
+/// When used as a [slicing index], `RangeToInclusive` produces a slice of all
+/// array elements up to and including the index indicated by `end`.
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]); // This is a `RangeToInclusive`
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+///
+/// [slicing index]: crate::slice::SliceIndex
+#[lang = "RangeToInclusive"]
+#[doc(alias = "..=")]
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+pub struct RangeToInclusive<Idx> {
+ /// The upper bound of the range (inclusive)
+ #[stable(feature = "inclusive_range", since = "1.26.0")]
+ pub end: Idx,
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeToInclusive<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "..=")?;
+ self.end.fmt(fmt)?;
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!( (..=5).contains(&-1_000_000_000));
+ /// assert!( (..=5).contains(&5));
+ /// assert!(!(..=5).contains(&6));
+ ///
+ /// assert!( (..=1.0).contains(&1.0));
+ /// assert!(!(..=1.0).contains(&f32::NAN));
+ /// assert!(!(..=f32::NAN).contains(&0.5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+}
+
+// RangeToInclusive<Idx> cannot impl From<RangeTo<Idx>>
+// because underflow would be possible with (..0).into()
+
+/// An endpoint of a range of keys.
+///
+/// # Examples
+///
+/// `Bound`s are range endpoints:
+///
+/// ```
+/// use std::ops::Bound::*;
+/// use std::ops::RangeBounds;
+///
+/// assert_eq!((..100).start_bound(), Unbounded);
+/// assert_eq!((1..12).start_bound(), Included(&1));
+/// assert_eq!((1..12).end_bound(), Excluded(&12));
+/// ```
+///
+/// Using a tuple of `Bound`s as an argument to [`BTreeMap::range`].
+/// Note that in most cases, it's better to use range syntax (`1..5`) instead.
+///
+/// ```
+/// use std::collections::BTreeMap;
+/// use std::ops::Bound::{Excluded, Included, Unbounded};
+///
+/// let mut map = BTreeMap::new();
+/// map.insert(3, "a");
+/// map.insert(5, "b");
+/// map.insert(8, "c");
+///
+/// for (key, value) in map.range((Excluded(3), Included(8))) {
+/// println!("{key}: {value}");
+/// }
+///
+/// assert_eq!(Some((&3, &"a")), map.range((Unbounded, Included(5))).next());
+/// ```
+///
+/// [`BTreeMap::range`]: ../../std/collections/btree_map/struct.BTreeMap.html#method.range
+#[stable(feature = "collections_bound", since = "1.17.0")]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
+pub enum Bound<T> {
+ /// An inclusive bound.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
+ Included(#[stable(feature = "collections_bound", since = "1.17.0")] T),
+ /// An exclusive bound.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
+ Excluded(#[stable(feature = "collections_bound", since = "1.17.0")] T),
+ /// An infinite endpoint. Indicates that there is no bound in this direction.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
+ Unbounded,
+}
+
+impl<T> Bound<T> {
+ /// Converts from `&Bound<T>` to `Bound<&T>`.
+ #[inline]
+ #[unstable(feature = "bound_as_ref", issue = "80996")]
+ pub fn as_ref(&self) -> Bound<&T> {
+ match *self {
+ Included(ref x) => Included(x),
+ Excluded(ref x) => Excluded(x),
+ Unbounded => Unbounded,
+ }
+ }
+
+ /// Converts from `&mut Bound<T>` to `Bound<&mut T>`.
+ #[inline]
+ #[unstable(feature = "bound_as_ref", issue = "80996")]
+ pub fn as_mut(&mut self) -> Bound<&mut T> {
+ match *self {
+ Included(ref mut x) => Included(x),
+ Excluded(ref mut x) => Excluded(x),
+ Unbounded => Unbounded,
+ }
+ }
+
+ /// Maps a `Bound<T>` to a `Bound<U>` by applying a function to the contained value (including
+ /// both `Included` and `Excluded`), returning a `Bound` of the same kind.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(bound_map)]
+ /// use std::ops::Bound::*;
+ ///
+ /// let bound_string = Included("Hello, World!");
+ ///
+ /// assert_eq!(bound_string.map(|s| s.len()), Included(13));
+ /// ```
+ ///
+ /// ```
+ /// #![feature(bound_map)]
+ /// use std::ops::Bound;
+ /// use Bound::*;
+ ///
+ /// let unbounded_string: Bound<String> = Unbounded;
+ ///
+ /// assert_eq!(unbounded_string.map(|s| s.len()), Unbounded);
+ /// ```
+ #[inline]
+ #[unstable(feature = "bound_map", issue = "86026")]
+ pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Bound<U> {
+ match self {
+ Unbounded => Unbounded,
+ Included(x) => Included(f(x)),
+ Excluded(x) => Excluded(f(x)),
+ }
+ }
+}
+
+impl<T: Clone> Bound<&T> {
+ /// Map a `Bound<&T>` to a `Bound<T>` by cloning the contents of the bound.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ops::Bound::*;
+ /// use std::ops::RangeBounds;
+ ///
+ /// assert_eq!((1..12).start_bound(), Included(&1));
+ /// assert_eq!((1..12).start_bound().cloned(), Included(1));
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "bound_cloned", since = "1.55.0")]
+ pub fn cloned(self) -> Bound<T> {
+ match self {
+ Bound::Unbounded => Bound::Unbounded,
+ Bound::Included(x) => Bound::Included(x.clone()),
+ Bound::Excluded(x) => Bound::Excluded(x.clone()),
+ }
+ }
+}
+
+/// `RangeBounds` is implemented by Rust's built-in range types, produced
+/// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`.
+#[stable(feature = "collections_range", since = "1.28.0")]
+pub trait RangeBounds<T: ?Sized> {
+ /// Start index bound.
+ ///
+ /// Returns the start value as a `Bound`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn main() {
+ /// use std::ops::Bound::*;
+ /// use std::ops::RangeBounds;
+ ///
+ /// assert_eq!((..10).start_bound(), Unbounded);
+ /// assert_eq!((3..10).start_bound(), Included(&3));
+ /// # }
+ /// ```
+ #[stable(feature = "collections_range", since = "1.28.0")]
+ fn start_bound(&self) -> Bound<&T>;
+
+ /// End index bound.
+ ///
+ /// Returns the end value as a `Bound`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn main() {
+ /// use std::ops::Bound::*;
+ /// use std::ops::RangeBounds;
+ ///
+ /// assert_eq!((3..).end_bound(), Unbounded);
+ /// assert_eq!((3..10).end_bound(), Excluded(&10));
+ /// # }
+ /// ```
+ #[stable(feature = "collections_range", since = "1.28.0")]
+ fn end_bound(&self) -> Bound<&T>;
+
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!( (3..5).contains(&4));
+ /// assert!(!(3..5).contains(&2));
+ ///
+ /// assert!( (0.0..1.0).contains(&0.5));
+ /// assert!(!(0.0..1.0).contains(&f32::NAN));
+ /// assert!(!(0.0..f32::NAN).contains(&0.5));
+ /// assert!(!(f32::NAN..1.0).contains(&0.5));
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ fn contains<U>(&self, item: &U) -> bool
+ where
+ T: PartialOrd<U>,
+ U: ?Sized + PartialOrd<T>,
+ {
+ (match self.start_bound() {
+ Included(start) => start <= item,
+ Excluded(start) => start < item,
+ Unbounded => true,
+ }) && (match self.end_bound() {
+ Included(end) => item <= end,
+ Excluded(end) => item < end,
+ Unbounded => true,
+ })
+ }
+}
+
+use self::Bound::{Excluded, Included, Unbounded};
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T: ?Sized> RangeBounds<T> for RangeFull {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeFrom<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(&self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeTo<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Excluded(&self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for Range<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(&self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Excluded(&self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeInclusive<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(&self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ if self.exhausted {
+ // When the iterator is exhausted, we usually have start == end,
+ // but we want the range to appear empty, containing nothing.
+ Excluded(&self.end)
+ } else {
+ Included(&self.end)
+ }
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeToInclusive<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Included(&self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for (Bound<T>, Bound<T>) {
+ fn start_bound(&self) -> Bound<&T> {
+ match *self {
+ (Included(ref start), _) => Included(start),
+ (Excluded(ref start), _) => Excluded(start),
+ (Unbounded, _) => Unbounded,
+ }
+ }
+
+ fn end_bound(&self) -> Bound<&T> {
+ match *self {
+ (_, Included(ref end)) => Included(end),
+ (_, Excluded(ref end)) => Excluded(end),
+ (_, Unbounded) => Unbounded,
+ }
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<'a, T: ?Sized + 'a> RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) {
+ fn start_bound(&self) -> Bound<&T> {
+ self.0
+ }
+
+ fn end_bound(&self) -> Bound<&T> {
+ self.1
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeFrom<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeTo<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Excluded(self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for Range<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Excluded(self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeInclusive<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Included(self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeToInclusive<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Included(self.end)
+ }
+}
+
+/// `OneSidedRange` is implemented for built-in range types that are unbounded
+/// on one side. For example, `a..`, `..b` and `..=c` implement `OneSidedRange`,
+/// but `..`, `d..e`, and `f..=g` do not.
+///
+/// Types that implement `OneSidedRange<T>` must return `Bound::Unbounded`
+/// from one of `RangeBounds::start_bound` or `RangeBounds::end_bound`.
+#[unstable(feature = "one_sided_range", issue = "69780")]
+pub trait OneSidedRange<T: ?Sized>: RangeBounds<T> {}
+
+#[unstable(feature = "one_sided_range", issue = "69780")]
+impl<T> OneSidedRange<T> for RangeTo<T> where Self: RangeBounds<T> {}
+
+#[unstable(feature = "one_sided_range", issue = "69780")]
+impl<T> OneSidedRange<T> for RangeFrom<T> where Self: RangeBounds<T> {}
+
+#[unstable(feature = "one_sided_range", issue = "69780")]
+impl<T> OneSidedRange<T> for RangeToInclusive<T> where Self: RangeBounds<T> {}
diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs
new file mode 100644
index 000000000..02f7f62bf
--- /dev/null
+++ b/library/core/src/ops/try_trait.rs
@@ -0,0 +1,418 @@
+use crate::ops::ControlFlow;
+
+/// The `?` operator and `try {}` blocks.
+///
+/// `try_*` methods typically involve a type implementing this trait. For
+/// example, the closures passed to [`Iterator::try_fold`] and
+/// [`Iterator::try_for_each`] must return such a type.
+///
+/// `Try` types are typically those containing two or more categories of values,
+/// some subset of which are so commonly handled via early returns that it's
+/// worth providing a terse (but still visible) syntax to make that easy.
+///
+/// This is most often seen for error handling with [`Result`] and [`Option`].
+/// The quintessential implementation of this trait is on [`ControlFlow`].
+///
+/// # Using `Try` in Generic Code
+///
+/// `Iterator::try_fold` was stabilized to call back in Rust 1.27, but
+/// this trait is much newer. To illustrate the various associated types and
+/// methods, let's implement our own version.
+///
+/// As a reminder, an infallible version of a fold looks something like this:
+/// ```
+/// fn simple_fold<A, T>(
+/// iter: impl Iterator<Item = T>,
+/// mut accum: A,
+/// mut f: impl FnMut(A, T) -> A,
+/// ) -> A {
+/// for x in iter {
+/// accum = f(accum, x);
+/// }
+/// accum
+/// }
+/// ```
+///
+/// So instead of `f` returning just an `A`, we'll need it to return some other
+/// type that produces an `A` in the "don't short circuit" path. Conveniently,
+/// that's also the type we need to return from the function.
+///
+/// Let's add a new generic parameter `R` for that type, and bound it to the
+/// output type that we want:
+/// ```
+/// # #![feature(try_trait_v2)]
+/// # use std::ops::Try;
+/// fn simple_try_fold_1<A, T, R: Try<Output = A>>(
+/// iter: impl Iterator<Item = T>,
+/// mut accum: A,
+/// mut f: impl FnMut(A, T) -> R,
+/// ) -> R {
+/// todo!()
+/// }
+/// ```
+///
+/// If we get through the entire iterator, we need to wrap up the accumulator
+/// into the return type using [`Try::from_output`]:
+/// ```
+/// # #![feature(try_trait_v2)]
+/// # use std::ops::{ControlFlow, Try};
+/// fn simple_try_fold_2<A, T, R: Try<Output = A>>(
+/// iter: impl Iterator<Item = T>,
+/// mut accum: A,
+/// mut f: impl FnMut(A, T) -> R,
+/// ) -> R {
+/// for x in iter {
+/// let cf = f(accum, x).branch();
+/// match cf {
+/// ControlFlow::Continue(a) => accum = a,
+/// ControlFlow::Break(_) => todo!(),
+/// }
+/// }
+/// R::from_output(accum)
+/// }
+/// ```
+///
+/// We'll also need [`FromResidual::from_residual`] to turn the residual back
+/// into the original type. But because it's a supertrait of `Try`, we don't
+/// need to mention it in the bounds. All types which implement `Try` can be
+/// recreated from their corresponding residual, so we'll just call it:
+/// ```
+/// # #![feature(try_trait_v2)]
+/// # use std::ops::{ControlFlow, Try};
+/// pub fn simple_try_fold_3<A, T, R: Try<Output = A>>(
+/// iter: impl Iterator<Item = T>,
+/// mut accum: A,
+/// mut f: impl FnMut(A, T) -> R,
+/// ) -> R {
+/// for x in iter {
+/// let cf = f(accum, x).branch();
+/// match cf {
+/// ControlFlow::Continue(a) => accum = a,
+/// ControlFlow::Break(r) => return R::from_residual(r),
+/// }
+/// }
+/// R::from_output(accum)
+/// }
+/// ```
+///
+/// But this "call `branch`, then `match` on it, and `return` if it was a
+/// `Break`" is exactly what happens inside the `?` operator. So rather than
+/// do all this manually, we can just use `?` instead:
+/// ```
+/// # #![feature(try_trait_v2)]
+/// # use std::ops::Try;
+/// fn simple_try_fold<A, T, R: Try<Output = A>>(
+/// iter: impl Iterator<Item = T>,
+/// mut accum: A,
+/// mut f: impl FnMut(A, T) -> R,
+/// ) -> R {
+/// for x in iter {
+/// accum = f(accum, x)?;
+/// }
+/// R::from_output(accum)
+/// }
+/// ```
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+#[rustc_on_unimplemented(
+ on(
+ all(from_desugaring = "TryBlock"),
+ message = "a `try` block must return `Result` or `Option` \
+ (or another type that implements `{Try}`)",
+ label = "could not wrap the final value of the block as `{Self}` doesn't implement `Try`",
+ ),
+ on(
+ all(from_desugaring = "QuestionMark"),
+ message = "the `?` operator can only be applied to values that implement `{Try}`",
+ label = "the `?` operator cannot be applied to type `{Self}`"
+ )
+)]
+#[doc(alias = "?")]
+#[lang = "Try"]
+pub trait Try: FromResidual {
+ /// The type of the value produced by `?` when *not* short-circuiting.
+ #[unstable(feature = "try_trait_v2", issue = "84277")]
+ type Output;
+
+ /// The type of the value passed to [`FromResidual::from_residual`]
+ /// as part of `?` when short-circuiting.
+ ///
+ /// This represents the possible values of the `Self` type which are *not*
+ /// represented by the `Output` type.
+ ///
+ /// # Note to Implementors
+ ///
+ /// The choice of this type is critical to interconversion.
+ /// Unlike the `Output` type, which will often be a raw generic type,
+ /// this type is typically a newtype of some sort to "color" the type
+ /// so that it's distinguishable from the residuals of other types.
+ ///
+ /// This is why `Result<T, E>::Residual` is not `E`, but `Result<Infallible, E>`.
+ /// That way it's distinct from `ControlFlow<E>::Residual`, for example,
+ /// and thus `?` on `ControlFlow` cannot be used in a method returning `Result`.
+ ///
+ /// If you're making a generic type `Foo<T>` that implements `Try<Output = T>`,
+ /// then typically you can use `Foo<std::convert::Infallible>` as its `Residual`
+ /// type: that type will have a "hole" in the correct place, and will maintain the
+ /// "foo-ness" of the residual so other types need to opt-in to interconversion.
+ #[unstable(feature = "try_trait_v2", issue = "84277")]
+ type Residual;
+
+ /// Constructs the type from its `Output` type.
+ ///
+ /// This should be implemented consistently with the `branch` method
+ /// such that applying the `?` operator will get back the original value:
+ /// `Try::from_output(x).branch() --> ControlFlow::Continue(x)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_trait_v2)]
+ /// use std::ops::Try;
+ ///
+ /// assert_eq!(<Result<_, String> as Try>::from_output(3), Ok(3));
+ /// assert_eq!(<Option<_> as Try>::from_output(4), Some(4));
+ /// assert_eq!(
+ /// <std::ops::ControlFlow<String, _> as Try>::from_output(5),
+ /// std::ops::ControlFlow::Continue(5),
+ /// );
+ ///
+ /// # fn make_question_mark_work() -> Option<()> {
+ /// assert_eq!(Option::from_output(4)?, 4);
+ /// # None }
+ /// # make_question_mark_work();
+ ///
+ /// // This is used, for example, on the accumulator in `try_fold`:
+ /// let r = std::iter::empty().try_fold(4, |_, ()| -> Option<_> { unreachable!() });
+ /// assert_eq!(r, Some(4));
+ /// ```
+ #[lang = "from_output"]
+ #[unstable(feature = "try_trait_v2", issue = "84277")]
+ fn from_output(output: Self::Output) -> Self;
+
+ /// Used in `?` to decide whether the operator should produce a value
+ /// (because this returned [`ControlFlow::Continue`])
+ /// or propagate a value back to the caller
+ /// (because this returned [`ControlFlow::Break`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_trait_v2)]
+ /// use std::ops::{ControlFlow, Try};
+ ///
+ /// assert_eq!(Ok::<_, String>(3).branch(), ControlFlow::Continue(3));
+ /// assert_eq!(Err::<String, _>(3).branch(), ControlFlow::Break(Err(3)));
+ ///
+ /// assert_eq!(Some(3).branch(), ControlFlow::Continue(3));
+ /// assert_eq!(None::<String>.branch(), ControlFlow::Break(None));
+ ///
+ /// assert_eq!(ControlFlow::<String, _>::Continue(3).branch(), ControlFlow::Continue(3));
+ /// assert_eq!(
+ /// ControlFlow::<_, String>::Break(3).branch(),
+ /// ControlFlow::Break(ControlFlow::Break(3)),
+ /// );
+ /// ```
+ #[lang = "branch"]
+ #[unstable(feature = "try_trait_v2", issue = "84277")]
+ fn branch(self) -> ControlFlow<Self::Residual, Self::Output>;
+}
+
+/// Used to specify which residuals can be converted into which [`crate::ops::Try`] types.
+///
+/// Every `Try` type needs to be recreatable from its own associated
+/// `Residual` type, but can also have additional `FromResidual` implementations
+/// to support interconversion with other `Try` types.
+#[rustc_on_unimplemented(
+ on(
+ all(
+ from_desugaring = "QuestionMark",
+ _Self = "std::result::Result<T, E>",
+ R = "std::option::Option<std::convert::Infallible>"
+ ),
+ message = "the `?` operator can only be used on `Result`s, not `Option`s, \
+ in {ItemContext} that returns `Result`",
+ label = "use `.ok_or(...)?` to provide an error compatible with `{Self}`",
+ enclosing_scope = "this function returns a `Result`"
+ ),
+ on(
+ all(
+ from_desugaring = "QuestionMark",
+ _Self = "std::result::Result<T, E>",
+ ),
+ // There's a special error message in the trait selection code for
+ // `From` in `?`, so this is not shown for result-in-result errors,
+ // and thus it can be phrased more strongly than `ControlFlow`'s.
+ message = "the `?` operator can only be used on `Result`s \
+ in {ItemContext} that returns `Result`",
+ label = "this `?` produces `{R}`, which is incompatible with `{Self}`",
+ enclosing_scope = "this function returns a `Result`"
+ ),
+ on(
+ all(
+ from_desugaring = "QuestionMark",
+ _Self = "std::option::Option<T>",
+ R = "std::result::Result<T, E>",
+ ),
+ message = "the `?` operator can only be used on `Option`s, not `Result`s, \
+ in {ItemContext} that returns `Option`",
+ label = "use `.ok()?` if you want to discard the `{R}` error information",
+ enclosing_scope = "this function returns an `Option`"
+ ),
+ on(
+ all(
+ from_desugaring = "QuestionMark",
+ _Self = "std::option::Option<T>",
+ ),
+ // `Option`-in-`Option` always works, as there's only one possible
+ // residual, so this can also be phrased strongly.
+ message = "the `?` operator can only be used on `Option`s \
+ in {ItemContext} that returns `Option`",
+ label = "this `?` produces `{R}`, which is incompatible with `{Self}`",
+ enclosing_scope = "this function returns an `Option`"
+ ),
+ on(
+ all(
+ from_desugaring = "QuestionMark",
+ _Self = "std::ops::ControlFlow<B, C>",
+ R = "std::ops::ControlFlow<B, C>",
+ ),
+ message = "the `?` operator in {ItemContext} that returns `ControlFlow<B, _>` \
+ can only be used on other `ControlFlow<B, _>`s (with the same Break type)",
+ label = "this `?` produces `{R}`, which is incompatible with `{Self}`",
+ enclosing_scope = "this function returns a `ControlFlow`",
+ note = "unlike `Result`, there's no `From`-conversion performed for `ControlFlow`"
+ ),
+ on(
+ all(
+ from_desugaring = "QuestionMark",
+ _Self = "std::ops::ControlFlow<B, C>",
+ // `R` is not a `ControlFlow`, as that case was matched previously
+ ),
+ message = "the `?` operator can only be used on `ControlFlow`s \
+ in {ItemContext} that returns `ControlFlow`",
+ label = "this `?` produces `{R}`, which is incompatible with `{Self}`",
+ enclosing_scope = "this function returns a `ControlFlow`",
+ ),
+ on(
+ all(from_desugaring = "QuestionMark"),
+ message = "the `?` operator can only be used in {ItemContext} \
+ that returns `Result` or `Option` \
+ (or another type that implements `{FromResidual}`)",
+ label = "cannot use the `?` operator in {ItemContext} that returns `{Self}`",
+ enclosing_scope = "this function should return `Result` or `Option` to accept `?`"
+ ),
+)]
+#[rustc_diagnostic_item = "FromResidual"]
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+pub trait FromResidual<R = <Self as Try>::Residual> {
+ /// Constructs the type from a compatible `Residual` type.
+ ///
+ /// This should be implemented consistently with the `branch` method such
+ /// that applying the `?` operator will get back an equivalent residual:
+ /// `FromResidual::from_residual(r).branch() --> ControlFlow::Break(r)`.
+ /// (It must not be an *identical* residual when interconversion is involved.)
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_trait_v2)]
+ /// use std::ops::{ControlFlow, FromResidual};
+ ///
+ /// assert_eq!(Result::<String, i64>::from_residual(Err(3_u8)), Err(3));
+ /// assert_eq!(Option::<String>::from_residual(None), None);
+ /// assert_eq!(
+ /// ControlFlow::<_, String>::from_residual(ControlFlow::Break(5)),
+ /// ControlFlow::Break(5),
+ /// );
+ /// ```
+ #[lang = "from_residual"]
+ #[unstable(feature = "try_trait_v2", issue = "84277")]
+ fn from_residual(residual: R) -> Self;
+}
+
+#[unstable(
+ feature = "yeet_desugar_details",
+ issue = "none",
+ reason = "just here to simplify the desugaring; will never be stabilized"
+)]
+#[inline]
+#[track_caller] // because `Result::from_residual` has it
+#[lang = "from_yeet"]
+pub fn from_yeet<T, Y>(yeeted: Y) -> T
+where
+ T: FromResidual<Yeet<Y>>,
+{
+ FromResidual::from_residual(Yeet(yeeted))
+}
+
+/// Allows retrieving the canonical type implementing [`Try`] that has this type
+/// as its residual and allows it to hold an `O` as its output.
+///
+/// If you think of the `Try` trait as splitting a type into its [`Try::Output`]
+/// and [`Try::Residual`] components, this allows putting them back together.
+///
+/// For example,
+/// `Result<T, E>: Try<Output = T, Residual = Result<Infallible, E>>`,
+/// and in the other direction,
+/// `<Result<Infallible, E> as Residual<T>>::TryType = Result<T, E>`.
+#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
+pub trait Residual<O> {
+ /// The "return" type of this meta-function.
+ #[unstable(feature = "try_trait_v2_residual", issue = "91285")]
+ type TryType: Try<Output = O, Residual = Self>;
+}
+
+#[unstable(feature = "pub_crate_should_not_need_unstable_attr", issue = "none")]
+pub(crate) type ChangeOutputType<T, V> = <<T as Try>::Residual as Residual<V>>::TryType;
+
+/// An adapter for implementing non-try methods via the `Try` implementation.
+///
+/// Conceptually the same as `Result<T, !>`, but requiring less work in trait
+/// solving and inhabited-ness checking and such, by being an obvious newtype
+/// and not having `From` bounds lying around.
+///
+/// Not currently planned to be exposed publicly, so just `pub(crate)`.
+#[repr(transparent)]
+pub(crate) struct NeverShortCircuit<T>(pub T);
+
+impl<T> NeverShortCircuit<T> {
+ /// Wrap a binary `FnMut` to return its result wrapped in a `NeverShortCircuit`.
+ #[inline]
+ pub fn wrap_mut_2<A, B>(mut f: impl FnMut(A, B) -> T) -> impl FnMut(A, B) -> Self {
+ move |a, b| NeverShortCircuit(f(a, b))
+ }
+}
+
+pub(crate) enum NeverShortCircuitResidual {}
+
+impl<T> Try for NeverShortCircuit<T> {
+ type Output = T;
+ type Residual = NeverShortCircuitResidual;
+
+ #[inline]
+ fn branch(self) -> ControlFlow<NeverShortCircuitResidual, T> {
+ ControlFlow::Continue(self.0)
+ }
+
+ #[inline]
+ fn from_output(x: T) -> Self {
+ NeverShortCircuit(x)
+ }
+}
+
+impl<T> FromResidual for NeverShortCircuit<T> {
+ #[inline]
+ fn from_residual(never: NeverShortCircuitResidual) -> Self {
+ match never {}
+ }
+}
+
+impl<T> Residual<T> for NeverShortCircuitResidual {
+ type TryType = NeverShortCircuit<T>;
+}
+
+/// Implement `FromResidual<Yeet<T>>` on your type to enable
+/// `do yeet expr` syntax in functions returning your type.
+#[unstable(feature = "try_trait_v2_yeet", issue = "96374")]
+#[derive(Debug)]
+pub struct Yeet<T>(pub T);
diff --git a/library/core/src/ops/unsize.rs b/library/core/src/ops/unsize.rs
new file mode 100644
index 000000000..a920b9165
--- /dev/null
+++ b/library/core/src/ops/unsize.rs
@@ -0,0 +1,132 @@
+use crate::marker::Unsize;
+
+/// Trait that indicates that this is a pointer or a wrapper for one,
+/// where unsizing can be performed on the pointee.
+///
+/// See the [DST coercion RFC][dst-coerce] and [the nomicon entry on coercion][nomicon-coerce]
+/// for more details.
+///
+/// For builtin pointer types, pointers to `T` will coerce to pointers to `U` if `T: Unsize<U>`
+/// by converting from a thin pointer to a fat pointer.
+///
+/// For custom types, the coercion here works by coercing `Foo<T>` to `Foo<U>`
+/// provided an impl of `CoerceUnsized<Foo<U>> for Foo<T>` exists.
+/// Such an impl can only be written if `Foo<T>` has only a single non-phantomdata
+/// field involving `T`. If the type of that field is `Bar<T>`, an implementation
+/// of `CoerceUnsized<Bar<U>> for Bar<T>` must exist. The coercion will work by
+/// coercing the `Bar<T>` field into `Bar<U>` and filling in the rest of the fields
+/// from `Foo<T>` to create a `Foo<U>`. This will effectively drill down to a pointer
+/// field and coerce that.
+///
+/// Generally, for smart pointers you will implement
+/// `CoerceUnsized<Ptr<U>> for Ptr<T> where T: Unsize<U>, U: ?Sized`, with an
+/// optional `?Sized` bound on `T` itself. For wrapper types that directly embed `T`
+/// like `Cell<T>` and `RefCell<T>`, you
+/// can directly implement `CoerceUnsized<Wrap<U>> for Wrap<T> where T: CoerceUnsized<U>`.
+/// This will let coercions of types like `Cell<Box<T>>` work.
+///
+/// [`Unsize`][unsize] is used to mark types which can be coerced to DSTs if behind
+/// pointers. It is implemented automatically by the compiler.
+///
+/// [dst-coerce]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
+/// [unsize]: crate::marker::Unsize
+/// [nomicon-coerce]: ../../nomicon/coercions.html
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T: ?Sized> {
+ // Empty.
+}
+
+// &mut T -> &mut U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+// &mut T -> &U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b mut T {}
+// &mut T -> *mut U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for &'a mut T {}
+// &mut T -> *const U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a mut T {}
+
+// &T -> &U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+// &T -> *const U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a T {}
+
+// *mut T -> *mut U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+// *mut T -> *const U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *mut T {}
+
+// *const T -> *const U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+
+/// `DispatchFromDyn` is used in the implementation of object safety checks (specifically allowing
+/// arbitrary self types), to guarantee that a method's receiver type can be dispatched on.
+///
+/// Note: `DispatchFromDyn` was briefly named `CoerceSized` (and had a slightly different
+/// interpretation).
+///
+/// Imagine we have a trait object `t` with type `&dyn Tr`, where `Tr` is some trait with a method
+/// `m` defined as `fn m(&self);`. When calling `t.m()`, the receiver `t` is a wide pointer, but an
+/// implementation of `m` will expect a narrow pointer as `&self` (a reference to the concrete
+/// type). The compiler must generate an implicit conversion from the trait object/wide pointer to
+/// the concrete reference/narrow pointer. Implementing `DispatchFromDyn` indicates that that
+/// conversion is allowed and thus that the type implementing `DispatchFromDyn` is safe to use as
+/// the self type in an object-safe method. (in the above example, the compiler will require
+/// `DispatchFromDyn` is implemented for `&'a U`).
+///
+/// `DispatchFromDyn` does not specify the conversion from wide pointer to narrow pointer; the
+/// conversion is hard-wired into the compiler. For the conversion to work, the following
+/// properties must hold (i.e., it is only safe to implement `DispatchFromDyn` for types which have
+/// these properties, these are also checked by the compiler):
+///
+/// * EITHER `Self` and `T` are either both references or both raw pointers; in either case, with
+/// the same mutability.
+/// * OR, all of the following hold
+/// - `Self` and `T` must have the same type constructor, and only vary in a single type parameter
+/// formal (the *coerced type*, e.g., `impl DispatchFromDyn<Rc<T>> for Rc<U>` is ok and the
+/// single type parameter (instantiated with `T` or `U`) is the coerced type,
+/// `impl DispatchFromDyn<Arc<T>> for Rc<U>` is not ok).
+/// - The definition for `Self` must be a struct.
+/// - The definition for `Self` must not be `#[repr(packed)]` or `#[repr(C)]`.
+/// - Other than one-aligned, zero-sized fields, the definition for `Self` must have exactly one
+/// field and that field's type must be the coerced type. Furthermore, `Self`'s field type must
+/// implement `DispatchFromDyn<F>` where `F` is the type of `T`'s field type.
+///
+/// An example implementation of the trait:
+///
+/// ```
+/// # #![feature(dispatch_from_dyn, unsize)]
+/// # use std::{ops::DispatchFromDyn, marker::Unsize};
+/// # struct Rc<T: ?Sized>(std::rc::Rc<T>);
+/// impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T>
+/// where
+/// T: Unsize<U>,
+/// {}
+/// ```
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {
+ // Empty.
+}
+
+// &T -> &U
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
new file mode 100644
index 000000000..bca73cb77
--- /dev/null
+++ b/library/core/src/option.rs
@@ -0,0 +1,2356 @@
+//! Optional values.
+//!
+//! Type [`Option`] represents an optional value: every [`Option`]
+//! is either [`Some`] and contains a value, or [`None`], and
+//! does not. [`Option`] types are very common in Rust code, as
+//! they have a number of uses:
+//!
+//! * Initial values
+//! * Return values for functions that are not defined
+//! over their entire input range (partial functions)
+//! * Return value for otherwise reporting simple errors, where [`None`] is
+//! returned on error
+//! * Optional struct fields
+//! * Struct fields that can be loaned or "taken"
+//! * Optional function arguments
+//! * Nullable pointers
+//! * Swapping things out of difficult situations
+//!
+//! [`Option`]s are commonly paired with pattern matching to query the presence
+//! of a value and take action, always accounting for the [`None`] case.
+//!
+//! ```
+//! fn divide(numerator: f64, denominator: f64) -> Option<f64> {
+//! if denominator == 0.0 {
+//! None
+//! } else {
+//! Some(numerator / denominator)
+//! }
+//! }
+//!
+//! // The return value of the function is an option
+//! let result = divide(2.0, 3.0);
+//!
+//! // Pattern match to retrieve the value
+//! match result {
+//! // The division was valid
+//! Some(x) => println!("Result: {x}"),
+//! // The division was invalid
+//! None => println!("Cannot divide by 0"),
+//! }
+//! ```
+//!
+//
+// FIXME: Show how `Option` is used in practice, with lots of methods
+//
+//! # Options and pointers ("nullable" pointers)
+//!
+//! Rust's pointer types must always point to a valid location; there are
+//! no "null" references. Instead, Rust has *optional* pointers, like
+//! the optional owned box, <code>[Option]<[Box\<T>]></code>.
+//!
+//! [Box\<T>]: ../../std/boxed/struct.Box.html
+//!
+//! The following example uses [`Option`] to create an optional box of
+//! [`i32`]. Notice that in order to use the inner [`i32`] value, the
+//! `check_optional` function first needs to use pattern matching to
+//! determine whether the box has a value (i.e., it is [`Some(...)`][`Some`]) or
+//! not ([`None`]).
+//!
+//! ```
+//! let optional = None;
+//! check_optional(optional);
+//!
+//! let optional = Some(Box::new(9000));
+//! check_optional(optional);
+//!
+//! fn check_optional(optional: Option<Box<i32>>) {
+//! match optional {
+//! Some(p) => println!("has value {p}"),
+//! None => println!("has no value"),
+//! }
+//! }
+//! ```
+//!
+//! # Representation
+//!
+//! Rust guarantees to optimize the following types `T` such that
+//! [`Option<T>`] has the same size as `T`:
+//!
+//! * [`Box<U>`]
+//! * `&U`
+//! * `&mut U`
+//! * `fn`, `extern "C" fn`[^extern_fn]
+//! * [`num::NonZero*`]
+//! * [`ptr::NonNull<U>`]
+//! * `#[repr(transparent)]` struct around one of the types in this list.
+//!
+//! [^extern_fn]: this remains true for any other ABI: `extern "abi" fn` (_e.g._, `extern "system" fn`)
+//!
+//! [`Box<U>`]: ../../std/boxed/struct.Box.html
+//! [`num::NonZero*`]: crate::num
+//! [`ptr::NonNull<U>`]: crate::ptr::NonNull
+//!
+//! This is called the "null pointer optimization" or NPO.
+//!
+//! It is further guaranteed that, for the cases above, one can
+//! [`mem::transmute`] from all valid values of `T` to `Option<T>` and
+//! from `Some::<T>(_)` to `T` (but transmuting `None::<T>` to `T`
+//! is undefined behaviour).
+//!
+//! # Method overview
+//!
+//! In addition to working with pattern matching, [`Option`] provides a wide
+//! variety of different methods.
+//!
+//! ## Querying the variant
+//!
+//! The [`is_some`] and [`is_none`] methods return [`true`] if the [`Option`]
+//! is [`Some`] or [`None`], respectively.
+//!
+//! [`is_none`]: Option::is_none
+//! [`is_some`]: Option::is_some
+//!
+//! ## Adapters for working with references
+//!
+//! * [`as_ref`] converts from <code>[&][][Option]\<T></code> to <code>[Option]<[&]T></code>
+//! * [`as_mut`] converts from <code>[&mut] [Option]\<T></code> to <code>[Option]<[&mut] T></code>
+//! * [`as_deref`] converts from <code>[&][][Option]\<T></code> to
+//! <code>[Option]<[&]T::[Target]></code>
+//! * [`as_deref_mut`] converts from <code>[&mut] [Option]\<T></code> to
+//! <code>[Option]<[&mut] T::[Target]></code>
+//! * [`as_pin_ref`] converts from <code>[Pin]<[&][][Option]\<T>></code> to
+//! <code>[Option]<[Pin]<[&]T>></code>
+//! * [`as_pin_mut`] converts from <code>[Pin]<[&mut] [Option]\<T>></code> to
+//! <code>[Option]<[Pin]<[&mut] T>></code>
+//!
+//! [&]: reference "shared reference"
+//! [&mut]: reference "mutable reference"
+//! [Target]: Deref::Target "ops::Deref::Target"
+//! [`as_deref`]: Option::as_deref
+//! [`as_deref_mut`]: Option::as_deref_mut
+//! [`as_mut`]: Option::as_mut
+//! [`as_pin_mut`]: Option::as_pin_mut
+//! [`as_pin_ref`]: Option::as_pin_ref
+//! [`as_ref`]: Option::as_ref
+//!
+//! ## Extracting the contained value
+//!
+//! These methods extract the contained value in an [`Option<T>`] when it
+//! is the [`Some`] variant. If the [`Option`] is [`None`]:
+//!
+//! * [`expect`] panics with a provided custom message
+//! * [`unwrap`] panics with a generic message
+//! * [`unwrap_or`] returns the provided default value
+//! * [`unwrap_or_default`] returns the default value of the type `T`
+//! (which must implement the [`Default`] trait)
+//! * [`unwrap_or_else`] returns the result of evaluating the provided
+//! function
+//!
+//! [`expect`]: Option::expect
+//! [`unwrap`]: Option::unwrap
+//! [`unwrap_or`]: Option::unwrap_or
+//! [`unwrap_or_default`]: Option::unwrap_or_default
+//! [`unwrap_or_else`]: Option::unwrap_or_else
+//!
+//! ## Transforming contained values
+//!
+//! These methods transform [`Option`] to [`Result`]:
+//!
+//! * [`ok_or`] transforms [`Some(v)`] to [`Ok(v)`], and [`None`] to
+//! [`Err(err)`] using the provided default `err` value
+//! * [`ok_or_else`] transforms [`Some(v)`] to [`Ok(v)`], and [`None`] to
+//! a value of [`Err`] using the provided function
+//! * [`transpose`] transposes an [`Option`] of a [`Result`] into a
+//! [`Result`] of an [`Option`]
+//!
+//! [`Err(err)`]: Err
+//! [`Ok(v)`]: Ok
+//! [`Some(v)`]: Some
+//! [`ok_or`]: Option::ok_or
+//! [`ok_or_else`]: Option::ok_or_else
+//! [`transpose`]: Option::transpose
+//!
+//! These methods transform the [`Some`] variant:
+//!
+//! * [`filter`] calls the provided predicate function on the contained
+//! value `t` if the [`Option`] is [`Some(t)`], and returns [`Some(t)`]
+//! if the function returns `true`; otherwise, returns [`None`]
+//! * [`flatten`] removes one level of nesting from an
+//! [`Option<Option<T>>`]
+//! * [`map`] transforms [`Option<T>`] to [`Option<U>`] by applying the
+//! provided function to the contained value of [`Some`] and leaving
+//! [`None`] values unchanged
+//!
+//! [`Some(t)`]: Some
+//! [`filter`]: Option::filter
+//! [`flatten`]: Option::flatten
+//! [`map`]: Option::map
+//!
+//! These methods transform [`Option<T>`] to a value of a possibly
+//! different type `U`:
+//!
+//! * [`map_or`] applies the provided function to the contained value of
+//! [`Some`], or returns the provided default value if the [`Option`] is
+//! [`None`]
+//! * [`map_or_else`] applies the provided function to the contained value
+//! of [`Some`], or returns the result of evaluating the provided
+//! fallback function if the [`Option`] is [`None`]
+//!
+//! [`map_or`]: Option::map_or
+//! [`map_or_else`]: Option::map_or_else
+//!
+//! These methods combine the [`Some`] variants of two [`Option`] values:
+//!
+//! * [`zip`] returns [`Some((s, o))`] if `self` is [`Some(s)`] and the
+//! provided [`Option`] value is [`Some(o)`]; otherwise, returns [`None`]
+//! * [`zip_with`] calls the provided function `f` and returns
+//! [`Some(f(s, o))`] if `self` is [`Some(s)`] and the provided
+//! [`Option`] value is [`Some(o)`]; otherwise, returns [`None`]
+//!
+//! [`Some(f(s, o))`]: Some
+//! [`Some(o)`]: Some
+//! [`Some(s)`]: Some
+//! [`Some((s, o))`]: Some
+//! [`zip`]: Option::zip
+//! [`zip_with`]: Option::zip_with
+//!
+//! ## Boolean operators
+//!
+//! These methods treat the [`Option`] as a boolean value, where [`Some`]
+//! acts like [`true`] and [`None`] acts like [`false`]. There are two
+//! categories of these methods: ones that take an [`Option`] as input, and
+//! ones that take a function as input (to be lazily evaluated).
+//!
+//! The [`and`], [`or`], and [`xor`] methods take another [`Option`] as
+//! input, and produce an [`Option`] as output. Only the [`and`] method can
+//! produce an [`Option<U>`] value having a different inner type `U` than
+//! [`Option<T>`].
+//!
+//! | method | self | input | output |
+//! |---------|-----------|-----------|-----------|
+//! | [`and`] | `None` | (ignored) | `None` |
+//! | [`and`] | `Some(x)` | `None` | `None` |
+//! | [`and`] | `Some(x)` | `Some(y)` | `Some(y)` |
+//! | [`or`] | `None` | `None` | `None` |
+//! | [`or`] | `None` | `Some(y)` | `Some(y)` |
+//! | [`or`] | `Some(x)` | (ignored) | `Some(x)` |
+//! | [`xor`] | `None` | `None` | `None` |
+//! | [`xor`] | `None` | `Some(y)` | `Some(y)` |
+//! | [`xor`] | `Some(x)` | `None` | `Some(x)` |
+//! | [`xor`] | `Some(x)` | `Some(y)` | `None` |
+//!
+//! [`and`]: Option::and
+//! [`or`]: Option::or
+//! [`xor`]: Option::xor
+//!
+//! The [`and_then`] and [`or_else`] methods take a function as input, and
+//! only evaluate the function when they need to produce a new value. Only
+//! the [`and_then`] method can produce an [`Option<U>`] value having a
+//! different inner type `U` than [`Option<T>`].
+//!
+//! | method | self | function input | function result | output |
+//! |--------------|-----------|----------------|-----------------|-----------|
+//! | [`and_then`] | `None` | (not provided) | (not evaluated) | `None` |
+//! | [`and_then`] | `Some(x)` | `x` | `None` | `None` |
+//! | [`and_then`] | `Some(x)` | `x` | `Some(y)` | `Some(y)` |
+//! | [`or_else`] | `None` | (not provided) | `None` | `None` |
+//! | [`or_else`] | `None` | (not provided) | `Some(y)` | `Some(y)` |
+//! | [`or_else`] | `Some(x)` | (not provided) | (not evaluated) | `Some(x)` |
+//!
+//! [`and_then`]: Option::and_then
+//! [`or_else`]: Option::or_else
+//!
+//! This is an example of using methods like [`and_then`] and [`or`] in a
+//! pipeline of method calls. Early stages of the pipeline pass failure
+//! values ([`None`]) through unchanged, and continue processing on
+//! success values ([`Some`]). Toward the end, [`or`] substitutes an error
+//! message if it receives [`None`].
+//!
+//! ```
+//! # use std::collections::BTreeMap;
+//! let mut bt = BTreeMap::new();
+//! bt.insert(20u8, "foo");
+//! bt.insert(42u8, "bar");
+//! let res = [0u8, 1, 11, 200, 22]
+//! .into_iter()
+//! .map(|x| {
+//! // `checked_sub()` returns `None` on error
+//! x.checked_sub(1)
+//! // same with `checked_mul()`
+//! .and_then(|x| x.checked_mul(2))
+//! // `BTreeMap::get` returns `None` on error
+//! .and_then(|x| bt.get(&x))
+//! // Substitute an error message if we have `None` so far
+//! .or(Some(&"error!"))
+//! .copied()
+//! // Won't panic because we unconditionally used `Some` above
+//! .unwrap()
+//! })
+//! .collect::<Vec<_>>();
+//! assert_eq!(res, ["error!", "error!", "foo", "error!", "bar"]);
+//! ```
+//!
+//! ## Comparison operators
+//!
+//! If `T` implements [`PartialOrd`] then [`Option<T>`] will derive its
+//! [`PartialOrd`] implementation. With this order, [`None`] compares as
+//! less than any [`Some`], and two [`Some`] compare the same way as their
+//! contained values would in `T`. If `T` also implements
+//! [`Ord`], then so does [`Option<T>`].
+//!
+//! ```
+//! assert!(None < Some(0));
+//! assert!(Some(0) < Some(1));
+//! ```
+//!
+//! ## Iterating over `Option`
+//!
+//! An [`Option`] can be iterated over. This can be helpful if you need an
+//! iterator that is conditionally empty. The iterator will either produce
+//! a single value (when the [`Option`] is [`Some`]), or produce no values
+//! (when the [`Option`] is [`None`]). For example, [`into_iter`] acts like
+//! [`once(v)`] if the [`Option`] is [`Some(v)`], and like [`empty()`] if
+//! the [`Option`] is [`None`].
+//!
+//! [`Some(v)`]: Some
+//! [`empty()`]: crate::iter::empty
+//! [`once(v)`]: crate::iter::once
+//!
+//! Iterators over [`Option<T>`] come in three types:
+//!
+//! * [`into_iter`] consumes the [`Option`] and produces the contained
+//! value
+//! * [`iter`] produces an immutable reference of type `&T` to the
+//! contained value
+//! * [`iter_mut`] produces a mutable reference of type `&mut T` to the
+//! contained value
+//!
+//! [`into_iter`]: Option::into_iter
+//! [`iter`]: Option::iter
+//! [`iter_mut`]: Option::iter_mut
+//!
+//! An iterator over [`Option`] can be useful when chaining iterators, for
+//! example, to conditionally insert items. (It's not always necessary to
+//! explicitly call an iterator constructor: many [`Iterator`] methods that
+//! accept other iterators will also accept iterable types that implement
+//! [`IntoIterator`], which includes [`Option`].)
+//!
+//! ```
+//! let yep = Some(42);
+//! let nope = None;
+//! // chain() already calls into_iter(), so we don't have to do so
+//! let nums: Vec<i32> = (0..4).chain(yep).chain(4..8).collect();
+//! assert_eq!(nums, [0, 1, 2, 3, 42, 4, 5, 6, 7]);
+//! let nums: Vec<i32> = (0..4).chain(nope).chain(4..8).collect();
+//! assert_eq!(nums, [0, 1, 2, 3, 4, 5, 6, 7]);
+//! ```
+//!
+//! One reason to chain iterators in this way is that a function returning
+//! `impl Iterator` must have all possible return values be of the same
+//! concrete type. Chaining an iterated [`Option`] can help with that.
+//!
+//! ```
+//! fn make_iter(do_insert: bool) -> impl Iterator<Item = i32> {
+//! // Explicit returns to illustrate return types matching
+//! match do_insert {
+//! true => return (0..4).chain(Some(42)).chain(4..8),
+//! false => return (0..4).chain(None).chain(4..8),
+//! }
+//! }
+//! println!("{:?}", make_iter(true).collect::<Vec<_>>());
+//! println!("{:?}", make_iter(false).collect::<Vec<_>>());
+//! ```
+//!
+//! If we try to do the same thing, but using [`once()`] and [`empty()`],
+//! we can't return `impl Iterator` anymore because the concrete types of
+//! the return values differ.
+//!
+//! [`empty()`]: crate::iter::empty
+//! [`once()`]: crate::iter::once
+//!
+//! ```compile_fail,E0308
+//! # use std::iter::{empty, once};
+//! // This won't compile because all possible returns from the function
+//! // must have the same concrete type.
+//! fn make_iter(do_insert: bool) -> impl Iterator<Item = i32> {
+//! // Explicit returns to illustrate return types not matching
+//! match do_insert {
+//! true => return (0..4).chain(once(42)).chain(4..8),
+//! false => return (0..4).chain(empty()).chain(4..8),
+//! }
+//! }
+//! ```
+//!
+//! ## Collecting into `Option`
+//!
+//! [`Option`] implements the [`FromIterator`][impl-FromIterator] trait,
+//! which allows an iterator over [`Option`] values to be collected into an
+//! [`Option`] of a collection of each contained value of the original
+//! [`Option`] values, or [`None`] if any of the elements was [`None`].
+//!
+//! [impl-FromIterator]: Option#impl-FromIterator%3COption%3CA%3E%3E-for-Option%3CV%3E
+//!
+//! ```
+//! let v = [Some(2), Some(4), None, Some(8)];
+//! let res: Option<Vec<_>> = v.into_iter().collect();
+//! assert_eq!(res, None);
+//! let v = [Some(2), Some(4), Some(8)];
+//! let res: Option<Vec<_>> = v.into_iter().collect();
+//! assert_eq!(res, Some(vec![2, 4, 8]));
+//! ```
+//!
+//! [`Option`] also implements the [`Product`][impl-Product] and
+//! [`Sum`][impl-Sum] traits, allowing an iterator over [`Option`] values
+//! to provide the [`product`][Iterator::product] and
+//! [`sum`][Iterator::sum] methods.
+//!
+//! [impl-Product]: Option#impl-Product%3COption%3CU%3E%3E-for-Option%3CT%3E
+//! [impl-Sum]: Option#impl-Sum%3COption%3CU%3E%3E-for-Option%3CT%3E
+//!
+//! ```
+//! let v = [None, Some(1), Some(2), Some(3)];
+//! let res: Option<i32> = v.into_iter().sum();
+//! assert_eq!(res, None);
+//! let v = [Some(1), Some(2), Some(21)];
+//! let res: Option<i32> = v.into_iter().product();
+//! assert_eq!(res, Some(42));
+//! ```
+//!
+//! ## Modifying an [`Option`] in-place
+//!
+//! These methods return a mutable reference to the contained value of an
+//! [`Option<T>`]:
+//!
+//! * [`insert`] inserts a value, dropping any old contents
+//! * [`get_or_insert`] gets the current value, inserting a provided
+//! default value if it is [`None`]
+//! * [`get_or_insert_default`] gets the current value, inserting the
+//! default value of type `T` (which must implement [`Default`]) if it is
+//! [`None`]
+//! * [`get_or_insert_with`] gets the current value, inserting a default
+//! computed by the provided function if it is [`None`]
+//!
+//! [`get_or_insert`]: Option::get_or_insert
+//! [`get_or_insert_default`]: Option::get_or_insert_default
+//! [`get_or_insert_with`]: Option::get_or_insert_with
+//! [`insert`]: Option::insert
+//!
+//! These methods transfer ownership of the contained value of an
+//! [`Option`]:
+//!
+//! * [`take`] takes ownership of the contained value of an [`Option`], if
+//! any, replacing the [`Option`] with [`None`]
+//! * [`replace`] takes ownership of the contained value of an [`Option`],
+//! if any, replacing the [`Option`] with a [`Some`] containing the
+//! provided value
+//!
+//! [`replace`]: Option::replace
+//! [`take`]: Option::take
+//!
+//! # Examples
+//!
+//! Basic pattern matching on [`Option`]:
+//!
+//! ```
+//! let msg = Some("howdy");
+//!
+//! // Take a reference to the contained string
+//! if let Some(m) = &msg {
+//! println!("{}", *m);
+//! }
+//!
+//! // Remove the contained string, destroying the Option
+//! let unwrapped_msg = msg.unwrap_or("default message");
+//! ```
+//!
+//! Initialize a result to [`None`] before a loop:
+//!
+//! ```
+//! enum Kingdom { Plant(u32, &'static str), Animal(u32, &'static str) }
+//!
+//! // A list of data to search through.
+//! let all_the_big_things = [
+//! Kingdom::Plant(250, "redwood"),
+//! Kingdom::Plant(230, "noble fir"),
+//! Kingdom::Plant(229, "sugar pine"),
+//! Kingdom::Animal(25, "blue whale"),
+//! Kingdom::Animal(19, "fin whale"),
+//! Kingdom::Animal(15, "north pacific right whale"),
+//! ];
+//!
+//! // We're going to search for the name of the biggest animal,
+//! // but to start with we've just got `None`.
+//! let mut name_of_biggest_animal = None;
+//! let mut size_of_biggest_animal = 0;
+//! for big_thing in &all_the_big_things {
+//! match *big_thing {
+//! Kingdom::Animal(size, name) if size > size_of_biggest_animal => {
+//! // Now we've found the name of some big animal
+//! size_of_biggest_animal = size;
+//! name_of_biggest_animal = Some(name);
+//! }
+//! Kingdom::Animal(..) | Kingdom::Plant(..) => ()
+//! }
+//! }
+//!
+//! match name_of_biggest_animal {
+//! Some(name) => println!("the biggest animal is {name}"),
+//! None => println!("there are no animals :("),
+//! }
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::iter::{self, FromIterator, FusedIterator, TrustedLen};
+use crate::marker::Destruct;
+use crate::panicking::{panic, panic_str};
+use crate::pin::Pin;
+use crate::{
+ convert, hint, mem,
+ ops::{self, ControlFlow, Deref, DerefMut},
+};
+
+/// The `Option` type. See [the module level documentation](self) for more.
+#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[rustc_diagnostic_item = "Option"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Option<T> {
+ /// No value.
+ #[lang = "None"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ None,
+ /// Some value of type `T`.
+ #[lang = "Some"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Some(#[stable(feature = "rust1", since = "1.0.0")] T),
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Type implementation
+/////////////////////////////////////////////////////////////////////////////
+
+impl<T> Option<T> {
+ /////////////////////////////////////////////////////////////////////////
+ // Querying the contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns `true` if the option is a [`Some`] value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Option<u32> = Some(2);
+ /// assert_eq!(x.is_some(), true);
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.is_some(), false);
+ /// ```
+ #[must_use = "if you intended to assert that this has a value, consider `.unwrap()` instead"]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_option_basics", since = "1.48.0")]
+ pub const fn is_some(&self) -> bool {
+ matches!(*self, Some(_))
+ }
+
+ /// Returns `true` if the option is a [`Some`] and the value inside of it matches a predicate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_some_with)]
+ ///
+ /// let x: Option<u32> = Some(2);
+ /// assert_eq!(x.is_some_and(|&x| x > 1), true);
+ ///
+ /// let x: Option<u32> = Some(0);
+ /// assert_eq!(x.is_some_and(|&x| x > 1), false);
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.is_some_and(|&x| x > 1), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "is_some_with", issue = "93050")]
+ pub fn is_some_and(&self, f: impl FnOnce(&T) -> bool) -> bool {
+ matches!(self, Some(x) if f(x))
+ }
+
+ /// Returns `true` if the option is a [`None`] value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Option<u32> = Some(2);
+ /// assert_eq!(x.is_none(), false);
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.is_none(), true);
+ /// ```
+ #[must_use = "if you intended to assert that this doesn't have a value, consider \
+ `.and_then(|_| panic!(\"`Option` had a value when expected `None`\"))` instead"]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_option_basics", since = "1.48.0")]
+ pub const fn is_none(&self) -> bool {
+ !self.is_some()
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Adapter for working with references
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Converts from `&Option<T>` to `Option<&T>`.
+ ///
+ /// # Examples
+ ///
+ /// Converts an <code>Option<[String]></code> into an <code>Option<[usize]></code>, preserving
+ /// the original. The [`map`] method takes the `self` argument by value, consuming the original,
+ /// so this technique uses `as_ref` to first take an `Option` to a reference
+ /// to the value inside the original.
+ ///
+ /// [`map`]: Option::map
+ /// [String]: ../../std/string/struct.String.html "String"
+ ///
+ /// ```
+ /// let text: Option<String> = Some("Hello, world!".to_string());
+ /// // First, cast `Option<String>` to `Option<&String>` with `as_ref`,
+ /// // then consume *that* with `map`, leaving `text` on the stack.
+ /// let text_length: Option<usize> = text.as_ref().map(|s| s.len());
+ /// println!("still can print text: {text:?}");
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_option_basics", since = "1.48.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn as_ref(&self) -> Option<&T> {
+ match *self {
+ Some(ref x) => Some(x),
+ None => None,
+ }
+ }
+
+ /// Converts from `&mut Option<T>` to `Option<&mut T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = Some(2);
+ /// match x.as_mut() {
+ /// Some(v) => *v = 42,
+ /// None => {},
+ /// }
+ /// assert_eq!(x, Some(42));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn as_mut(&mut self) -> Option<&mut T> {
+ match *self {
+ Some(ref mut x) => Some(x),
+ None => None,
+ }
+ }
+
+ /// Converts from <code>[Pin]<[&]Option\<T>></code> to <code>Option<[Pin]<[&]T>></code>.
+ ///
+ /// [&]: reference "shared reference"
+ #[inline]
+ #[must_use]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn as_pin_ref(self: Pin<&Self>) -> Option<Pin<&T>> {
+ match Pin::get_ref(self).as_ref() {
+ // SAFETY: `x` is guaranteed to be pinned because it comes from `self`
+ // which is pinned.
+ Some(x) => unsafe { Some(Pin::new_unchecked(x)) },
+ None => None,
+ }
+ }
+
+ /// Converts from <code>[Pin]<[&mut] Option\<T>></code> to <code>Option<[Pin]<[&mut] T>></code>.
+ ///
+ /// [&mut]: reference "mutable reference"
+ #[inline]
+ #[must_use]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn as_pin_mut(self: Pin<&mut Self>) -> Option<Pin<&mut T>> {
+ // SAFETY: `get_unchecked_mut` is never used to move the `Option` inside `self`.
+ // `x` is guaranteed to be pinned because it comes from `self` which is pinned.
+ unsafe {
+ match Pin::get_unchecked_mut(self).as_mut() {
+ Some(x) => Some(Pin::new_unchecked(x)),
+ None => None,
+ }
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Getting to contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns the contained [`Some`] value, consuming the `self` value.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is a [`None`] with a custom panic message provided by
+ /// `msg`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("value");
+ /// assert_eq!(x.expect("fruits are healthy"), "value");
+ /// ```
+ ///
+ /// ```should_panic
+ /// let x: Option<&str> = None;
+ /// x.expect("fruits are healthy"); // panics with `fruits are healthy`
+ /// ```
+ ///
+ /// # Recommended Message Style
+ ///
+ /// We recommend that `expect` messages are used to describe the reason you
+ /// _expect_ the `Option` should be `Some`.
+ ///
+ /// ```should_panic
+ /// # let slice: &[u8] = &[];
+ /// let item = slice.get(0)
+ /// .expect("slice should not be empty");
+ /// ```
+ ///
+ /// **Hint**: If you're having trouble remembering how to phrase expect
+ /// error messages remember to focus on the word "should" as in "env
+ /// variable should be set by blah" or "the given binary should be available
+ /// and executable by the current user".
+ ///
+ /// For more detail on expect message styles and the reasoning behind our
+ /// recommendation please refer to the section on ["Common Message
+ /// Styles"](../../std/error/index.html#common-message-styles) in the [`std::error`](../../std/error/index.html) module docs.
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn expect(self, msg: &str) -> T {
+ match self {
+ Some(val) => val,
+ None => expect_failed(msg),
+ }
+ }
+
+ /// Returns the contained [`Some`] value, consuming the `self` value.
+ ///
+ /// Because this function may panic, its use is generally discouraged.
+ /// Instead, prefer to use pattern matching and handle the [`None`]
+ /// case explicitly, or call [`unwrap_or`], [`unwrap_or_else`], or
+ /// [`unwrap_or_default`].
+ ///
+ /// [`unwrap_or`]: Option::unwrap_or
+ /// [`unwrap_or_else`]: Option::unwrap_or_else
+ /// [`unwrap_or_default`]: Option::unwrap_or_default
+ ///
+ /// # Panics
+ ///
+ /// Panics if the self value equals [`None`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("air");
+ /// assert_eq!(x.unwrap(), "air");
+ /// ```
+ ///
+ /// ```should_panic
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.unwrap(), "air"); // fails
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn unwrap(self) -> T {
+ match self {
+ Some(val) => val,
+ None => panic("called `Option::unwrap()` on a `None` value"),
+ }
+ }
+
+ /// Returns the contained [`Some`] value or a provided default.
+ ///
+ /// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
+ /// the result of a function call, it is recommended to use [`unwrap_or_else`],
+ /// which is lazily evaluated.
+ ///
+ /// [`unwrap_or_else`]: Option::unwrap_or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Some("car").unwrap_or("bike"), "car");
+ /// assert_eq!(None.unwrap_or("bike"), "bike");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn unwrap_or(self, default: T) -> T
+ where
+ T: ~const Destruct,
+ {
+ match self {
+ Some(x) => x,
+ None => default,
+ }
+ }
+
+ /// Returns the contained [`Some`] value or computes it from a closure.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let k = 10;
+ /// assert_eq!(Some(4).unwrap_or_else(|| 2 * k), 4);
+ /// assert_eq!(None.unwrap_or_else(|| 2 * k), 20);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn unwrap_or_else<F>(self, f: F) -> T
+ where
+ F: ~const FnOnce() -> T,
+ F: ~const Destruct,
+ {
+ match self {
+ Some(x) => x,
+ None => f(),
+ }
+ }
+
+ /// Returns the contained [`Some`] value or a default.
+ ///
+ /// Consumes the `self` argument then, if [`Some`], returns the contained
+ /// value, otherwise if [`None`], returns the [default value] for that
+ /// type.
+ ///
+ /// # Examples
+ ///
+ /// Converts a string to an integer, turning poorly-formed strings
+ /// into 0 (the default value for integers). [`parse`] converts
+ /// a string to any other type that implements [`FromStr`], returning
+ /// [`None`] on error.
+ ///
+ /// ```
+ /// let good_year_from_input = "1909";
+ /// let bad_year_from_input = "190blarg";
+ /// let good_year = good_year_from_input.parse().ok().unwrap_or_default();
+ /// let bad_year = bad_year_from_input.parse().ok().unwrap_or_default();
+ ///
+ /// assert_eq!(1909, good_year);
+ /// assert_eq!(0, bad_year);
+ /// ```
+ ///
+ /// [default value]: Default::default
+ /// [`parse`]: str::parse
+ /// [`FromStr`]: crate::str::FromStr
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn unwrap_or_default(self) -> T
+ where
+ T: ~const Default,
+ {
+ match self {
+ Some(x) => x,
+ None => Default::default(),
+ }
+ }
+
+ /// Returns the contained [`Some`] value, consuming the `self` value,
+ /// without checking that the value is not [`None`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method on [`None`] is *[undefined behavior]*.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("air");
+ /// assert_eq!(unsafe { x.unwrap_unchecked() }, "air");
+ /// ```
+ ///
+ /// ```no_run
+ /// let x: Option<&str> = None;
+ /// assert_eq!(unsafe { x.unwrap_unchecked() }, "air"); // Undefined behavior!
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "option_result_unwrap_unchecked", since = "1.58.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const unsafe fn unwrap_unchecked(self) -> T {
+ debug_assert!(self.is_some());
+ match self {
+ Some(val) => val,
+ // SAFETY: the safety contract must be upheld by the caller.
+ None => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Transforming contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value.
+ ///
+ /// # Examples
+ ///
+ /// Converts an <code>Option<[String]></code> into an <code>Option<[usize]></code>, consuming
+ /// the original:
+ ///
+ /// [String]: ../../std/string/struct.String.html "String"
+ /// ```
+ /// let maybe_some_string = Some(String::from("Hello, World!"));
+ /// // `Option::map` takes self *by value*, consuming `maybe_some_string`
+ /// let maybe_some_len = maybe_some_string.map(|s| s.len());
+ ///
+ /// assert_eq!(maybe_some_len, Some(13));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn map<U, F>(self, f: F) -> Option<U>
+ where
+ F: ~const FnOnce(T) -> U,
+ F: ~const Destruct,
+ {
+ match self {
+ Some(x) => Some(f(x)),
+ None => None,
+ }
+ }
+
+ /// Calls the provided closure with a reference to the contained value (if [`Some`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_option_inspect)]
+ ///
+ /// let v = vec![1, 2, 3, 4, 5];
+ ///
+ /// // prints "got: 4"
+ /// let x: Option<&usize> = v.get(3).inspect(|x| println!("got: {x}"));
+ ///
+ /// // prints nothing
+ /// let x: Option<&usize> = v.get(5).inspect(|x| println!("got: {x}"));
+ /// ```
+ #[inline]
+ #[unstable(feature = "result_option_inspect", issue = "91345")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn inspect<F>(self, f: F) -> Self
+ where
+ F: ~const FnOnce(&T),
+ F: ~const Destruct,
+ {
+ if let Some(ref x) = self {
+ f(x);
+ }
+
+ self
+ }
+
+ /// Returns the provided default result (if none),
+ /// or applies a function to the contained value (if any).
+ ///
+ /// Arguments passed to `map_or` are eagerly evaluated; if you are passing
+ /// the result of a function call, it is recommended to use [`map_or_else`],
+ /// which is lazily evaluated.
+ ///
+ /// [`map_or_else`]: Option::map_or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("foo");
+ /// assert_eq!(x.map_or(42, |v| v.len()), 3);
+ ///
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.map_or(42, |v| v.len()), 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn map_or<U, F>(self, default: U, f: F) -> U
+ where
+ F: ~const FnOnce(T) -> U,
+ F: ~const Destruct,
+ U: ~const Destruct,
+ {
+ match self {
+ Some(t) => f(t),
+ None => default,
+ }
+ }
+
+ /// Computes a default function result (if none), or
+ /// applies a different function to the contained value (if any).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let k = 21;
+ ///
+ /// let x = Some("foo");
+ /// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 3);
+ ///
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
+ where
+ D: ~const FnOnce() -> U,
+ D: ~const Destruct,
+ F: ~const FnOnce(T) -> U,
+ F: ~const Destruct,
+ {
+ match self {
+ Some(t) => f(t),
+ None => default(),
+ }
+ }
+
+ /// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
+ /// [`Ok(v)`] and [`None`] to [`Err(err)`].
+ ///
+ /// Arguments passed to `ok_or` are eagerly evaluated; if you are passing the
+ /// result of a function call, it is recommended to use [`ok_or_else`], which is
+ /// lazily evaluated.
+ ///
+ /// [`Ok(v)`]: Ok
+ /// [`Err(err)`]: Err
+ /// [`Some(v)`]: Some
+ /// [`ok_or_else`]: Option::ok_or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("foo");
+ /// assert_eq!(x.ok_or(0), Ok("foo"));
+ ///
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.ok_or(0), Err(0));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn ok_or<E>(self, err: E) -> Result<T, E>
+ where
+ E: ~const Destruct,
+ {
+ match self {
+ Some(v) => Ok(v),
+ None => Err(err),
+ }
+ }
+
+ /// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
+ /// [`Ok(v)`] and [`None`] to [`Err(err())`].
+ ///
+ /// [`Ok(v)`]: Ok
+ /// [`Err(err())`]: Err
+ /// [`Some(v)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("foo");
+ /// assert_eq!(x.ok_or_else(|| 0), Ok("foo"));
+ ///
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.ok_or_else(|| 0), Err(0));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn ok_or_else<E, F>(self, err: F) -> Result<T, E>
+ where
+ F: ~const FnOnce() -> E,
+ F: ~const Destruct,
+ {
+ match self {
+ Some(v) => Ok(v),
+ None => Err(err()),
+ }
+ }
+
+ /// Converts from `Option<T>` (or `&Option<T>`) to `Option<&T::Target>`.
+ ///
+ /// Leaves the original Option in-place, creating a new one with a reference
+ /// to the original one, additionally coercing the contents via [`Deref`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Option<String> = Some("hey".to_owned());
+ /// assert_eq!(x.as_deref(), Some("hey"));
+ ///
+ /// let x: Option<String> = None;
+ /// assert_eq!(x.as_deref(), None);
+ /// ```
+ #[stable(feature = "option_deref", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn as_deref(&self) -> Option<&T::Target>
+ where
+ T: ~const Deref,
+ {
+ match self.as_ref() {
+ Some(t) => Some(t.deref()),
+ None => None,
+ }
+ }
+
+ /// Converts from `Option<T>` (or `&mut Option<T>`) to `Option<&mut T::Target>`.
+ ///
+ /// Leaves the original `Option` in-place, creating a new one containing a mutable reference to
+ /// the inner type's [`Deref::Target`] type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x: Option<String> = Some("hey".to_owned());
+ /// assert_eq!(x.as_deref_mut().map(|x| {
+ /// x.make_ascii_uppercase();
+ /// x
+ /// }), Some("HEY".to_owned().as_mut_str()));
+ /// ```
+ #[stable(feature = "option_deref", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn as_deref_mut(&mut self) -> Option<&mut T::Target>
+ where
+ T: ~const DerefMut,
+ {
+ match self.as_mut() {
+ Some(t) => Some(t.deref_mut()),
+ None => None,
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Iterator constructors
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns an iterator over the possibly contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(4);
+ /// assert_eq!(x.iter().next(), Some(&4));
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.iter().next(), None);
+ /// ```
+ #[inline]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn iter(&self) -> Iter<'_, T> {
+ Iter { inner: Item { opt: self.as_ref() } }
+ }
+
+ /// Returns a mutable iterator over the possibly contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = Some(4);
+ /// match x.iter_mut().next() {
+ /// Some(v) => *v = 42,
+ /// None => {},
+ /// }
+ /// assert_eq!(x, Some(42));
+ ///
+ /// let mut x: Option<u32> = None;
+ /// assert_eq!(x.iter_mut().next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut { inner: Item { opt: self.as_mut() } }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Boolean operations on the values, eager and lazy
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns [`None`] if the option is [`None`], otherwise returns `optb`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(2);
+ /// let y: Option<&str> = None;
+ /// assert_eq!(x.and(y), None);
+ ///
+ /// let x: Option<u32> = None;
+ /// let y = Some("foo");
+ /// assert_eq!(x.and(y), None);
+ ///
+ /// let x = Some(2);
+ /// let y = Some("foo");
+ /// assert_eq!(x.and(y), Some("foo"));
+ ///
+ /// let x: Option<u32> = None;
+ /// let y: Option<&str> = None;
+ /// assert_eq!(x.and(y), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn and<U>(self, optb: Option<U>) -> Option<U>
+ where
+ T: ~const Destruct,
+ U: ~const Destruct,
+ {
+ match self {
+ Some(_) => optb,
+ None => None,
+ }
+ }
+
+ /// Returns [`None`] if the option is [`None`], otherwise calls `f` with the
+ /// wrapped value and returns the result.
+ ///
+ /// Some languages call this operation flatmap.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// fn sq_then_to_string(x: u32) -> Option<String> {
+ /// x.checked_mul(x).map(|sq| sq.to_string())
+ /// }
+ ///
+ /// assert_eq!(Some(2).and_then(sq_then_to_string), Some(4.to_string()));
+ /// assert_eq!(Some(1_000_000).and_then(sq_then_to_string), None); // overflowed!
+ /// assert_eq!(None.and_then(sq_then_to_string), None);
+ /// ```
+ ///
+ /// Often used to chain fallible operations that may return [`None`].
+ ///
+ /// ```
+ /// let arr_2d = [["A0", "A1"], ["B0", "B1"]];
+ ///
+ /// let item_0_1 = arr_2d.get(0).and_then(|row| row.get(1));
+ /// assert_eq!(item_0_1, Some(&"A1"));
+ ///
+ /// let item_2_0 = arr_2d.get(2).and_then(|row| row.get(0));
+ /// assert_eq!(item_2_0, None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn and_then<U, F>(self, f: F) -> Option<U>
+ where
+ F: ~const FnOnce(T) -> Option<U>,
+ F: ~const Destruct,
+ {
+ match self {
+ Some(x) => f(x),
+ None => None,
+ }
+ }
+
+ /// Returns [`None`] if the option is [`None`], otherwise calls `predicate`
+ /// with the wrapped value and returns:
+ ///
+ /// - [`Some(t)`] if `predicate` returns `true` (where `t` is the wrapped
+ /// value), and
+ /// - [`None`] if `predicate` returns `false`.
+ ///
+ /// This function works similar to [`Iterator::filter()`]. You can imagine
+ /// the `Option<T>` being an iterator over one or zero elements. `filter()`
+ /// lets you decide which elements to keep.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// fn is_even(n: &i32) -> bool {
+ /// n % 2 == 0
+ /// }
+ ///
+ /// assert_eq!(None.filter(is_even), None);
+ /// assert_eq!(Some(3).filter(is_even), None);
+ /// assert_eq!(Some(4).filter(is_even), Some(4));
+ /// ```
+ ///
+ /// [`Some(t)`]: Some
+ #[inline]
+ #[stable(feature = "option_filter", since = "1.27.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn filter<P>(self, predicate: P) -> Self
+ where
+ T: ~const Destruct,
+ P: ~const FnOnce(&T) -> bool,
+ P: ~const Destruct,
+ {
+ if let Some(x) = self {
+ if predicate(&x) {
+ return Some(x);
+ }
+ }
+ None
+ }
+
+ /// Returns the option if it contains a value, otherwise returns `optb`.
+ ///
+ /// Arguments passed to `or` are eagerly evaluated; if you are passing the
+ /// result of a function call, it is recommended to use [`or_else`], which is
+ /// lazily evaluated.
+ ///
+ /// [`or_else`]: Option::or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(2);
+ /// let y = None;
+ /// assert_eq!(x.or(y), Some(2));
+ ///
+ /// let x = None;
+ /// let y = Some(100);
+ /// assert_eq!(x.or(y), Some(100));
+ ///
+ /// let x = Some(2);
+ /// let y = Some(100);
+ /// assert_eq!(x.or(y), Some(2));
+ ///
+ /// let x: Option<u32> = None;
+ /// let y = None;
+ /// assert_eq!(x.or(y), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn or(self, optb: Option<T>) -> Option<T>
+ where
+ T: ~const Destruct,
+ {
+ match self {
+ Some(x) => Some(x),
+ None => optb,
+ }
+ }
+
+ /// Returns the option if it contains a value, otherwise calls `f` and
+ /// returns the result.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// fn nobody() -> Option<&'static str> { None }
+ /// fn vikings() -> Option<&'static str> { Some("vikings") }
+ ///
+ /// assert_eq!(Some("barbarians").or_else(vikings), Some("barbarians"));
+ /// assert_eq!(None.or_else(vikings), Some("vikings"));
+ /// assert_eq!(None.or_else(nobody), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn or_else<F>(self, f: F) -> Option<T>
+ where
+ F: ~const FnOnce() -> Option<T>,
+ F: ~const Destruct,
+ {
+ match self {
+ Some(x) => Some(x),
+ None => f(),
+ }
+ }
+
+ /// Returns [`Some`] if exactly one of `self`, `optb` is [`Some`], otherwise returns [`None`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(2);
+ /// let y: Option<u32> = None;
+ /// assert_eq!(x.xor(y), Some(2));
+ ///
+ /// let x: Option<u32> = None;
+ /// let y = Some(2);
+ /// assert_eq!(x.xor(y), Some(2));
+ ///
+ /// let x = Some(2);
+ /// let y = Some(2);
+ /// assert_eq!(x.xor(y), None);
+ ///
+ /// let x: Option<u32> = None;
+ /// let y: Option<u32> = None;
+ /// assert_eq!(x.xor(y), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "option_xor", since = "1.37.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn xor(self, optb: Option<T>) -> Option<T>
+ where
+ T: ~const Destruct,
+ {
+ match (self, optb) {
+ (Some(a), None) => Some(a),
+ (None, Some(b)) => Some(b),
+ _ => None,
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Entry-like operations to insert a value and return a reference
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Inserts `value` into the option, then returns a mutable reference to it.
+ ///
+ /// If the option already contains a value, the old value is dropped.
+ ///
+ /// See also [`Option::get_or_insert`], which doesn't update the value if
+ /// the option already contains [`Some`].
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut opt = None;
+ /// let val = opt.insert(1);
+ /// assert_eq!(*val, 1);
+ /// assert_eq!(opt.unwrap(), 1);
+ /// let val = opt.insert(2);
+ /// assert_eq!(*val, 2);
+ /// *val = 3;
+ /// assert_eq!(opt.unwrap(), 3);
+ /// ```
+ #[must_use = "if you intended to set a value, consider assignment instead"]
+ #[inline]
+ #[stable(feature = "option_insert", since = "1.53.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn insert(&mut self, value: T) -> &mut T
+ where
+ T: ~const Destruct,
+ {
+ *self = Some(value);
+
+ // SAFETY: the code above just filled the option
+ unsafe { self.as_mut().unwrap_unchecked() }
+ }
+
+ /// Inserts `value` into the option if it is [`None`], then
+ /// returns a mutable reference to the contained value.
+ ///
+ /// See also [`Option::insert`], which updates the value even if
+ /// the option already contains [`Some`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = None;
+ ///
+ /// {
+ /// let y: &mut u32 = x.get_or_insert(5);
+ /// assert_eq!(y, &5);
+ ///
+ /// *y = 7;
+ /// }
+ ///
+ /// assert_eq!(x, Some(7));
+ /// ```
+ #[inline]
+ #[stable(feature = "option_entry", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn get_or_insert(&mut self, value: T) -> &mut T
+ where
+ T: ~const Destruct,
+ {
+ if let None = *self {
+ *self = Some(value);
+ }
+
+ // SAFETY: a `None` variant for `self` would have been replaced by a `Some`
+ // variant in the code above.
+ unsafe { self.as_mut().unwrap_unchecked() }
+ }
+
+ /// Inserts the default value into the option if it is [`None`], then
+ /// returns a mutable reference to the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_get_or_insert_default)]
+ ///
+ /// let mut x = None;
+ ///
+ /// {
+ /// let y: &mut u32 = x.get_or_insert_default();
+ /// assert_eq!(y, &0);
+ ///
+ /// *y = 7;
+ /// }
+ ///
+ /// assert_eq!(x, Some(7));
+ /// ```
+ #[inline]
+ #[unstable(feature = "option_get_or_insert_default", issue = "82901")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn get_or_insert_default(&mut self) -> &mut T
+ where
+ T: ~const Default,
+ {
+ const fn default<T: ~const Default>() -> T {
+ T::default()
+ }
+
+ self.get_or_insert_with(default)
+ }
+
+ /// Inserts a value computed from `f` into the option if it is [`None`],
+ /// then returns a mutable reference to the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = None;
+ ///
+ /// {
+ /// let y: &mut u32 = x.get_or_insert_with(|| 5);
+ /// assert_eq!(y, &5);
+ ///
+ /// *y = 7;
+ /// }
+ ///
+ /// assert_eq!(x, Some(7));
+ /// ```
+ #[inline]
+ #[stable(feature = "option_entry", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn get_or_insert_with<F>(&mut self, f: F) -> &mut T
+ where
+ F: ~const FnOnce() -> T,
+ F: ~const Destruct,
+ {
+ if let None = *self {
+ // the compiler isn't smart enough to know that we are not dropping a `T`
+ // here and wants us to ensure `T` can be dropped at compile time.
+ mem::forget(mem::replace(self, Some(f())))
+ }
+
+ // SAFETY: a `None` variant for `self` would have been replaced by a `Some`
+ // variant in the code above.
+ unsafe { self.as_mut().unwrap_unchecked() }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Misc
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Takes the value out of the option, leaving a [`None`] in its place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = Some(2);
+ /// let y = x.take();
+ /// assert_eq!(x, None);
+ /// assert_eq!(y, Some(2));
+ ///
+ /// let mut x: Option<u32> = None;
+ /// let y = x.take();
+ /// assert_eq!(x, None);
+ /// assert_eq!(y, None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn take(&mut self) -> Option<T> {
+ // FIXME replace `mem::replace` by `mem::take` when the latter is const ready
+ mem::replace(self, None)
+ }
+
+ /// Replaces the actual value in the option by the value given in parameter,
+ /// returning the old value if present,
+ /// leaving a [`Some`] in its place without deinitializing either one.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = Some(2);
+ /// let old = x.replace(5);
+ /// assert_eq!(x, Some(5));
+ /// assert_eq!(old, Some(2));
+ ///
+ /// let mut x = None;
+ /// let old = x.replace(3);
+ /// assert_eq!(x, Some(3));
+ /// assert_eq!(old, None);
+ /// ```
+ #[inline]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ #[stable(feature = "option_replace", since = "1.31.0")]
+ pub const fn replace(&mut self, value: T) -> Option<T> {
+ mem::replace(self, Some(value))
+ }
+
+ /// Returns `true` if the option is a [`Some`] value containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_result_contains)]
+ ///
+ /// let x: Option<u32> = Some(2);
+ /// assert_eq!(x.contains(&2), true);
+ ///
+ /// let x: Option<u32> = Some(3);
+ /// assert_eq!(x.contains(&2), false);
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.contains(&2), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "option_result_contains", issue = "62358")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn contains<U>(&self, x: &U) -> bool
+ where
+ U: ~const PartialEq<T>,
+ {
+ match self {
+ Some(y) => x.eq(y),
+ None => false,
+ }
+ }
+
+ /// Zips `self` with another `Option`.
+ ///
+ /// If `self` is `Some(s)` and `other` is `Some(o)`, this method returns `Some((s, o))`.
+ /// Otherwise, `None` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(1);
+ /// let y = Some("hi");
+ /// let z = None::<u8>;
+ ///
+ /// assert_eq!(x.zip(y), Some((1, "hi")));
+ /// assert_eq!(x.zip(z), None);
+ /// ```
+ #[stable(feature = "option_zip_option", since = "1.46.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn zip<U>(self, other: Option<U>) -> Option<(T, U)>
+ where
+ T: ~const Destruct,
+ U: ~const Destruct,
+ {
+ match (self, other) {
+ (Some(a), Some(b)) => Some((a, b)),
+ _ => None,
+ }
+ }
+
+ /// Zips `self` and another `Option` with function `f`.
+ ///
+ /// If `self` is `Some(s)` and `other` is `Some(o)`, this method returns `Some(f(s, o))`.
+ /// Otherwise, `None` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_zip)]
+ ///
+ /// #[derive(Debug, PartialEq)]
+ /// struct Point {
+ /// x: f64,
+ /// y: f64,
+ /// }
+ ///
+ /// impl Point {
+ /// fn new(x: f64, y: f64) -> Self {
+ /// Self { x, y }
+ /// }
+ /// }
+ ///
+ /// let x = Some(17.5);
+ /// let y = Some(42.7);
+ ///
+ /// assert_eq!(x.zip_with(y, Point::new), Some(Point { x: 17.5, y: 42.7 }));
+ /// assert_eq!(x.zip_with(None, Point::new), None);
+ /// ```
+ #[unstable(feature = "option_zip", issue = "70086")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn zip_with<U, F, R>(self, other: Option<U>, f: F) -> Option<R>
+ where
+ F: ~const FnOnce(T, U) -> R,
+ F: ~const Destruct,
+ T: ~const Destruct,
+ U: ~const Destruct,
+ {
+ match (self, other) {
+ (Some(a), Some(b)) => Some(f(a, b)),
+ _ => None,
+ }
+ }
+}
+
+impl<T, U> Option<(T, U)> {
+ /// Unzips an option containing a tuple of two options.
+ ///
+ /// If `self` is `Some((a, b))` this method returns `(Some(a), Some(b))`.
+ /// Otherwise, `(None, None)` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(unzip_option)]
+ ///
+ /// let x = Some((1, "hi"));
+ /// let y = None::<(u8, u32)>;
+ ///
+ /// assert_eq!(x.unzip(), (Some(1), Some("hi")));
+ /// assert_eq!(y.unzip(), (None, None));
+ /// ```
+ #[inline]
+ #[unstable(feature = "unzip_option", issue = "87800", reason = "recently added")]
+ pub const fn unzip(self) -> (Option<T>, Option<U>) {
+ match self {
+ Some((a, b)) => (Some(a), Some(b)),
+ None => (None, None),
+ }
+ }
+}
+
+impl<T> Option<&T> {
+ /// Maps an `Option<&T>` to an `Option<T>` by copying the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 12;
+ /// let opt_x = Some(&x);
+ /// assert_eq!(opt_x, Some(&12));
+ /// let copied = opt_x.copied();
+ /// assert_eq!(copied, Some(12));
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "copied", since = "1.35.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn copied(self) -> Option<T>
+ where
+ T: Copy,
+ {
+ // FIXME: this implementation, which sidesteps using `Option::map` since it's not const
+ // ready yet, should be reverted when possible to avoid code repetition
+ match self {
+ Some(&v) => Some(v),
+ None => None,
+ }
+ }
+
+ /// Maps an `Option<&T>` to an `Option<T>` by cloning the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 12;
+ /// let opt_x = Some(&x);
+ /// assert_eq!(opt_x, Some(&12));
+ /// let cloned = opt_x.cloned();
+ /// assert_eq!(cloned, Some(12));
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option_cloned", issue = "91582")]
+ pub const fn cloned(self) -> Option<T>
+ where
+ T: ~const Clone,
+ {
+ match self {
+ Some(t) => Some(t.clone()),
+ None => None,
+ }
+ }
+}
+
+impl<T> Option<&mut T> {
+ /// Maps an `Option<&mut T>` to an `Option<T>` by copying the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = 12;
+ /// let opt_x = Some(&mut x);
+ /// assert_eq!(opt_x, Some(&mut 12));
+ /// let copied = opt_x.copied();
+ /// assert_eq!(copied, Some(12));
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "copied", since = "1.35.0")]
+ #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ pub const fn copied(self) -> Option<T>
+ where
+ T: Copy,
+ {
+ match self {
+ Some(&mut t) => Some(t),
+ None => None,
+ }
+ }
+
+ /// Maps an `Option<&mut T>` to an `Option<T>` by cloning the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = 12;
+ /// let opt_x = Some(&mut x);
+ /// assert_eq!(opt_x, Some(&mut 12));
+ /// let cloned = opt_x.cloned();
+ /// assert_eq!(cloned, Some(12));
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(since = "1.26.0", feature = "option_ref_mut_cloned")]
+ #[rustc_const_unstable(feature = "const_option_cloned", issue = "91582")]
+ pub const fn cloned(self) -> Option<T>
+ where
+ T: ~const Clone,
+ {
+ match self {
+ Some(t) => Some(t.clone()),
+ None => None,
+ }
+ }
+}
+
+impl<T, E> Option<Result<T, E>> {
+ /// Transposes an `Option` of a [`Result`] into a [`Result`] of an `Option`.
+ ///
+ /// [`None`] will be mapped to <code>[Ok]\([None])</code>.
+ /// <code>[Some]\([Ok]\(\_))</code> and <code>[Some]\([Err]\(\_))</code> will be mapped to
+ /// <code>[Ok]\([Some]\(\_))</code> and <code>[Err]\(\_)</code>.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #[derive(Debug, Eq, PartialEq)]
+ /// struct SomeErr;
+ ///
+ /// let x: Result<Option<i32>, SomeErr> = Ok(Some(5));
+ /// let y: Option<Result<i32, SomeErr>> = Some(Ok(5));
+ /// assert_eq!(x, y.transpose());
+ /// ```
+ #[inline]
+ #[stable(feature = "transpose_result", since = "1.33.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn transpose(self) -> Result<Option<T>, E> {
+ match self {
+ Some(Ok(x)) => Ok(Some(x)),
+ Some(Err(e)) => Err(e),
+ None => Ok(None),
+ }
+ }
+}
+
+// This is a separate function to reduce the code size of .expect() itself.
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cold]
+#[track_caller]
+#[rustc_const_unstable(feature = "const_option", issue = "67441")]
+const fn expect_failed(msg: &str) -> ! {
+ panic_str(msg)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Trait implementations
+/////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+impl<T> const Clone for Option<T>
+where
+ T: ~const Clone + ~const Destruct,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ match self {
+ Some(x) => Some(x.clone()),
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (Some(to), Some(from)) => to.clone_from(from),
+ (to, from) => *to = from.clone(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for Option<T> {
+ /// Returns [`None`][Option::None].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let opt: Option<u32> = Option::default();
+ /// assert!(opt.is_none());
+ /// ```
+ #[inline]
+ fn default() -> Option<T> {
+ None
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> IntoIterator for Option<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Returns a consuming iterator over the possibly contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("string");
+ /// let v: Vec<&str> = x.into_iter().collect();
+ /// assert_eq!(v, ["string"]);
+ ///
+ /// let x = None;
+ /// let v: Vec<&str> = x.into_iter().collect();
+ /// assert!(v.is_empty());
+ /// ```
+ #[inline]
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { inner: Item { opt: self } }
+ }
+}
+
+#[stable(since = "1.4.0", feature = "option_iter")]
+impl<'a, T> IntoIterator for &'a Option<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(since = "1.4.0", feature = "option_iter")]
+impl<'a, T> IntoIterator for &'a mut Option<T> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[stable(since = "1.12.0", feature = "option_from")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<T> for Option<T> {
+ /// Moves `val` into a new [`Some`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let o: Option<u8> = Option::from(67);
+ ///
+ /// assert_eq!(Some(67), o);
+ /// ```
+ fn from(val: T) -> Option<T> {
+ Some(val)
+ }
+}
+
+#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<'a, T> const From<&'a Option<T>> for Option<&'a T> {
+ /// Converts from `&Option<T>` to `Option<&T>`.
+ ///
+ /// # Examples
+ ///
+ /// Converts an <code>[Option]<[String]></code> into an <code>[Option]<[usize]></code>, preserving
+ /// the original. The [`map`] method takes the `self` argument by value, consuming the original,
+ /// so this technique uses `from` to first take an [`Option`] to a reference
+ /// to the value inside the original.
+ ///
+ /// [`map`]: Option::map
+ /// [String]: ../../std/string/struct.String.html "String"
+ ///
+ /// ```
+ /// let s: Option<String> = Some(String::from("Hello, Rustaceans!"));
+ /// let o: Option<usize> = Option::from(&s).map(|ss: &String| ss.len());
+ ///
+ /// println!("Can still print s: {s:?}");
+ ///
+ /// assert_eq!(o, Some(18));
+ /// ```
+ fn from(o: &'a Option<T>) -> Option<&'a T> {
+ o.as_ref()
+ }
+}
+
+#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<'a, T> const From<&'a mut Option<T>> for Option<&'a mut T> {
+ /// Converts from `&mut Option<T>` to `Option<&mut T>`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = Some(String::from("Hello"));
+ /// let o: Option<&mut String> = Option::from(&mut s);
+ ///
+ /// match o {
+ /// Some(t) => *t = String::from("Hello, Rustaceans!"),
+ /// None => (),
+ /// }
+ ///
+ /// assert_eq!(s, Some(String::from("Hello, Rustaceans!")));
+ /// ```
+ fn from(o: &'a mut Option<T>) -> Option<&'a mut T> {
+ o.as_mut()
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// The Option Iterators
+/////////////////////////////////////////////////////////////////////////////
+
+#[derive(Clone, Debug)]
+struct Item<A> {
+ opt: Option<A>,
+}
+
+impl<A> Iterator for Item<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ self.opt.take()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self.opt {
+ Some(_) => (1, Some(1)),
+ None => (0, Some(0)),
+ }
+ }
+}
+
+impl<A> DoubleEndedIterator for Item<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.opt.take()
+ }
+}
+
+impl<A> ExactSizeIterator for Item<A> {}
+impl<A> FusedIterator for Item<A> {}
+unsafe impl<A> TrustedLen for Item<A> {}
+
+/// An iterator over a reference to the [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`Option::iter`] function.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Iter<'a, A: 'a> {
+ inner: Item<&'a A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, A> Iterator for Iter<'a, A> {
+ type Item = &'a A;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a A> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, A> DoubleEndedIterator for Iter<'a, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a A> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> ExactSizeIterator for Iter<'_, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A> FusedIterator for Iter<'_, A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for Iter<'_, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> Clone for Iter<'_, A> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Iter { inner: self.inner.clone() }
+ }
+}
+
+/// An iterator over a mutable reference to the [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`Option::iter_mut`] function.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct IterMut<'a, A: 'a> {
+ inner: Item<&'a mut A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, A> Iterator for IterMut<'a, A> {
+ type Item = &'a mut A;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut A> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, A> DoubleEndedIterator for IterMut<'a, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut A> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> ExactSizeIterator for IterMut<'_, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A> FusedIterator for IterMut<'_, A> {}
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for IterMut<'_, A> {}
+
+/// An iterator over the value in [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`Option::into_iter`] function.
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<A> {
+ inner: Item<A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> Iterator for IntoIter<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> DoubleEndedIterator for IntoIter<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> ExactSizeIterator for IntoIter<A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A> FusedIterator for IntoIter<A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for IntoIter<A> {}
+
+/////////////////////////////////////////////////////////////////////////////
+// FromIterator
+/////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, V: FromIterator<A>> FromIterator<Option<A>> for Option<V> {
+ /// Takes each element in the [`Iterator`]: if it is [`None`][Option::None],
+ /// no further elements are taken, and the [`None`][Option::None] is
+ /// returned. Should no [`None`][Option::None] occur, a container of type
+ /// `V` containing the values of each [`Option`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// Here is an example which increments every integer in a vector.
+ /// We use the checked variant of `add` that returns `None` when the
+ /// calculation would result in an overflow.
+ ///
+ /// ```
+ /// let items = vec![0_u16, 1, 2];
+ ///
+ /// let res: Option<Vec<u16>> = items
+ /// .iter()
+ /// .map(|x| x.checked_add(1))
+ /// .collect();
+ ///
+ /// assert_eq!(res, Some(vec![1, 2, 3]));
+ /// ```
+ ///
+ /// As you can see, this will return the expected, valid items.
+ ///
+ /// Here is another example that tries to subtract one from another list
+ /// of integers, this time checking for underflow:
+ ///
+ /// ```
+ /// let items = vec![2_u16, 1, 0];
+ ///
+ /// let res: Option<Vec<u16>> = items
+ /// .iter()
+ /// .map(|x| x.checked_sub(1))
+ /// .collect();
+ ///
+ /// assert_eq!(res, None);
+ /// ```
+ ///
+ /// Since the last element is zero, it would underflow. Thus, the resulting
+ /// value is `None`.
+ ///
+ /// Here is a variation on the previous example, showing that no
+ /// further elements are taken from `iter` after the first `None`.
+ ///
+ /// ```
+ /// let items = vec![3_u16, 2, 1, 10];
+ ///
+ /// let mut shared = 0;
+ ///
+ /// let res: Option<Vec<u16>> = items
+ /// .iter()
+ /// .map(|x| { shared += x; x.checked_sub(2) })
+ /// .collect();
+ ///
+ /// assert_eq!(res, None);
+ /// assert_eq!(shared, 6);
+ /// ```
+ ///
+ /// Since the third element caused an underflow, no further elements were taken,
+ /// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16.
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = Option<A>>>(iter: I) -> Option<V> {
+ // FIXME(#11084): This could be replaced with Iterator::scan when this
+ // performance bug is closed.
+
+ iter::try_process(iter.into_iter(), |i| i.collect())
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const ops::Try for Option<T> {
+ type Output = T;
+ type Residual = Option<convert::Infallible>;
+
+ #[inline]
+ fn from_output(output: Self::Output) -> Self {
+ Some(output)
+ }
+
+ #[inline]
+ fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
+ match self {
+ Some(v) => ControlFlow::Continue(v),
+ None => ControlFlow::Break(None),
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const ops::FromResidual for Option<T> {
+ #[inline]
+ fn from_residual(residual: Option<convert::Infallible>) -> Self {
+ match residual {
+ None => None,
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2_yeet", issue = "96374")]
+impl<T> ops::FromResidual<ops::Yeet<()>> for Option<T> {
+ #[inline]
+ fn from_residual(ops::Yeet(()): ops::Yeet<()>) -> Self {
+ None
+ }
+}
+
+#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
+impl<T> ops::Residual<T> for Option<convert::Infallible> {
+ type TryType = Option<T>;
+}
+
+impl<T> Option<Option<T>> {
+ /// Converts from `Option<Option<T>>` to `Option<T>`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Option<Option<u32>> = Some(Some(6));
+ /// assert_eq!(Some(6), x.flatten());
+ ///
+ /// let x: Option<Option<u32>> = Some(None);
+ /// assert_eq!(None, x.flatten());
+ ///
+ /// let x: Option<Option<u32>> = None;
+ /// assert_eq!(None, x.flatten());
+ /// ```
+ ///
+ /// Flattening only removes one level of nesting at a time:
+ ///
+ /// ```
+ /// let x: Option<Option<Option<u32>>> = Some(Some(Some(6)));
+ /// assert_eq!(Some(Some(6)), x.flatten());
+ /// assert_eq!(Some(6), x.flatten().flatten());
+ /// ```
+ #[inline]
+ #[stable(feature = "option_flattening", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn flatten(self) -> Option<T> {
+ match self {
+ Some(inner) => inner,
+ None => None,
+ }
+ }
+}
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
new file mode 100644
index 000000000..00b63dfbd
--- /dev/null
+++ b/library/core/src/panic.rs
@@ -0,0 +1,112 @@
+//! Panic support in the standard library.
+
+#![stable(feature = "core_panic_info", since = "1.41.0")]
+
+mod location;
+mod panic_info;
+mod unwind_safe;
+
+use crate::any::Any;
+
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+pub use self::location::Location;
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+pub use self::panic_info::PanicInfo;
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+pub use self::unwind_safe::{AssertUnwindSafe, RefUnwindSafe, UnwindSafe};
+
+#[doc(hidden)]
+#[unstable(feature = "edition_panic", issue = "none", reason = "use panic!() instead")]
+#[allow_internal_unstable(core_panic, const_format_args)]
+#[rustc_diagnostic_item = "core_panic_2015_macro"]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro panic_2015 {
+ () => (
+ $crate::panicking::panic("explicit panic")
+ ),
+ ($msg:literal $(,)?) => (
+ $crate::panicking::panic($msg)
+ ),
+ // Use `panic_str` instead of `panic_display::<&str>` for non_fmt_panic lint.
+ ($msg:expr $(,)?) => (
+ $crate::panicking::panic_str($msg)
+ ),
+ // Special-case the single-argument case for const_panic.
+ ("{}", $arg:expr $(,)?) => (
+ $crate::panicking::panic_display(&$arg)
+ ),
+ ($fmt:expr, $($arg:tt)+) => (
+ $crate::panicking::panic_fmt($crate::const_format_args!($fmt, $($arg)+))
+ ),
+}
+
+#[doc(hidden)]
+#[unstable(feature = "edition_panic", issue = "none", reason = "use panic!() instead")]
+#[allow_internal_unstable(core_panic, const_format_args)]
+#[rustc_diagnostic_item = "core_panic_2021_macro"]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro panic_2021 {
+ () => (
+ $crate::panicking::panic("explicit panic")
+ ),
+ // Special-case the single-argument case for const_panic.
+ ("{}", $arg:expr $(,)?) => (
+ $crate::panicking::panic_display(&$arg)
+ ),
+ ($($t:tt)+) => (
+ $crate::panicking::panic_fmt($crate::const_format_args!($($t)+))
+ ),
+}
+
+#[doc(hidden)]
+#[unstable(feature = "edition_panic", issue = "none", reason = "use unreachable!() instead")]
+#[allow_internal_unstable(core_panic)]
+#[rustc_diagnostic_item = "unreachable_2015_macro"]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro unreachable_2015 {
+ () => (
+ $crate::panicking::panic("internal error: entered unreachable code")
+ ),
+ // Use of `unreachable_display` for non_fmt_panic lint.
+ // NOTE: the message ("internal error ...") is embedded directly in unreachable_display
+ ($msg:expr $(,)?) => (
+ $crate::panicking::unreachable_display(&$msg)
+ ),
+ ($fmt:expr, $($arg:tt)*) => (
+ $crate::panic!($crate::concat!("internal error: entered unreachable code: ", $fmt), $($arg)*)
+ ),
+}
+
+#[doc(hidden)]
+#[unstable(feature = "edition_panic", issue = "none", reason = "use unreachable!() instead")]
+#[allow_internal_unstable(core_panic)]
+#[rustc_diagnostic_item = "unreachable_2021_macro"]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro unreachable_2021 {
+ () => (
+ $crate::panicking::panic("internal error: entered unreachable code")
+ ),
+ ($($t:tt)+) => (
+ $crate::panic!("internal error: entered unreachable code: {}", $crate::format_args!($($t)+))
+ ),
+}
+
+/// An internal trait used by libstd to pass data from libstd to `panic_unwind`
+/// and other panic runtimes. Not intended to be stabilized any time soon, do
+/// not use.
+#[unstable(feature = "std_internals", issue = "none")]
+#[doc(hidden)]
+pub unsafe trait BoxMeUp {
+ /// Take full ownership of the contents.
+ /// The return type is actually `Box<dyn Any + Send>`, but we cannot use `Box` in libcore.
+ ///
+ /// After this method got called, only some dummy default value is left in `self`.
+ /// Calling this method twice, or calling `get` after calling this method, is an error.
+ ///
+ /// The argument is borrowed because the panic runtime (`__rust_start_panic`) only
+ /// gets a borrowed `dyn BoxMeUp`.
+ fn take_box(&mut self) -> *mut (dyn Any + Send);
+
+ /// Just borrow the contents.
+ fn get(&mut self) -> &(dyn Any + Send);
+}
diff --git a/library/core/src/panic/location.rs b/library/core/src/panic/location.rs
new file mode 100644
index 000000000..8eefd9ff2
--- /dev/null
+++ b/library/core/src/panic/location.rs
@@ -0,0 +1,197 @@
+use crate::fmt;
+
+/// A struct containing information about the location of a panic.
+///
+/// This structure is created by [`PanicInfo::location()`].
+///
+/// [`PanicInfo::location()`]: crate::panic::PanicInfo::location
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// panic::set_hook(Box::new(|panic_info| {
+/// if let Some(location) = panic_info.location() {
+/// println!("panic occurred in file '{}' at line {}", location.file(), location.line());
+/// } else {
+/// println!("panic occurred but can't get location information...");
+/// }
+/// }));
+///
+/// panic!("Normal panic");
+/// ```
+///
+/// # Comparisons
+///
+/// Comparisons for equality and ordering are made in file, line, then column priority.
+/// Files are compared as strings, not `Path`, which could be unexpected.
+/// See [`Location::file`]'s documentation for more discussion.
+#[lang = "panic_location"]
+#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+pub struct Location<'a> {
+ file: &'a str,
+ line: u32,
+ col: u32,
+}
+
+impl<'a> Location<'a> {
+ /// Returns the source location of the caller of this function. If that function's caller is
+ /// annotated then its call location will be returned, and so on up the stack to the first call
+ /// within a non-tracked function body.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::panic::Location;
+ ///
+ /// /// Returns the [`Location`] at which it is called.
+ /// #[track_caller]
+ /// fn get_caller_location() -> &'static Location<'static> {
+ /// Location::caller()
+ /// }
+ ///
+ /// /// Returns a [`Location`] from within this function's definition.
+ /// fn get_just_one_location() -> &'static Location<'static> {
+ /// get_caller_location()
+ /// }
+ ///
+ /// let fixed_location = get_just_one_location();
+ /// assert_eq!(fixed_location.file(), file!());
+ /// assert_eq!(fixed_location.line(), 14);
+ /// assert_eq!(fixed_location.column(), 5);
+ ///
+ /// // running the same untracked function in a different location gives us the same result
+ /// let second_fixed_location = get_just_one_location();
+ /// assert_eq!(fixed_location.file(), second_fixed_location.file());
+ /// assert_eq!(fixed_location.line(), second_fixed_location.line());
+ /// assert_eq!(fixed_location.column(), second_fixed_location.column());
+ ///
+ /// let this_location = get_caller_location();
+ /// assert_eq!(this_location.file(), file!());
+ /// assert_eq!(this_location.line(), 28);
+ /// assert_eq!(this_location.column(), 21);
+ ///
+ /// // running the tracked function in a different location produces a different value
+ /// let another_location = get_caller_location();
+ /// assert_eq!(this_location.file(), another_location.file());
+ /// assert_ne!(this_location.line(), another_location.line());
+ /// assert_ne!(this_location.column(), another_location.column());
+ /// ```
+ #[must_use]
+ #[stable(feature = "track_caller", since = "1.46.0")]
+ #[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
+ #[track_caller]
+ #[inline]
+ pub const fn caller() -> &'static Location<'static> {
+ crate::intrinsics::caller_location()
+ }
+
+ /// Returns the name of the source file from which the panic originated.
+ ///
+ /// # `&str`, not `&Path`
+ ///
+ /// The returned name refers to a source path on the compiling system, but it isn't valid to
+ /// represent this directly as a `&Path`. The compiled code may run on a different system with
+ /// a different `Path` implementation than the system providing the contents and this library
+ /// does not currently have a different "host path" type.
+ ///
+ /// The most surprising behavior occurs when "the same" file is reachable via multiple paths in
+ /// the module system (usually using the `#[path = "..."]` attribute or similar), which can
+ /// cause what appears to be identical code to return differing values from this function.
+ ///
+ /// # Cross-compilation
+ ///
+ /// This value is not suitable for passing to `Path::new` or similar constructors when the host
+ /// platform and target platform differ.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occurred in file '{}'", location.file());
+ /// } else {
+ /// println!("panic occurred but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[must_use]
+ #[stable(feature = "panic_hooks", since = "1.10.0")]
+ #[inline]
+ pub fn file(&self) -> &str {
+ self.file
+ }
+
+ /// Returns the line number from which the panic originated.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occurred at line {}", location.line());
+ /// } else {
+ /// println!("panic occurred but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[must_use]
+ #[stable(feature = "panic_hooks", since = "1.10.0")]
+ #[inline]
+ pub fn line(&self) -> u32 {
+ self.line
+ }
+
+ /// Returns the column from which the panic originated.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occurred at column {}", location.column());
+ /// } else {
+ /// println!("panic occurred but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[must_use]
+ #[stable(feature = "panic_col", since = "1.25.0")]
+ #[inline]
+ pub fn column(&self) -> u32 {
+ self.col
+ }
+}
+
+#[unstable(
+ feature = "panic_internals",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+)]
+impl<'a> Location<'a> {
+ #[doc(hidden)]
+ pub const fn internal_constructor(file: &'a str, line: u32, col: u32) -> Self {
+ Location { file, line, col }
+ }
+}
+
+#[stable(feature = "panic_hook_display", since = "1.26.0")]
+impl fmt::Display for Location<'_> {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(formatter, "{}:{}:{}", self.file, self.line, self.col)
+ }
+}
diff --git a/library/core/src/panic/panic_info.rs b/library/core/src/panic/panic_info.rs
new file mode 100644
index 000000000..1923155eb
--- /dev/null
+++ b/library/core/src/panic/panic_info.rs
@@ -0,0 +1,166 @@
+use crate::any::Any;
+use crate::fmt;
+use crate::panic::Location;
+
+/// A struct providing information about a panic.
+///
+/// `PanicInfo` structure is passed to a panic hook set by the [`set_hook`]
+/// function.
+///
+/// [`set_hook`]: ../../std/panic/fn.set_hook.html
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// panic::set_hook(Box::new(|panic_info| {
+/// if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
+/// println!("panic occurred: {s:?}");
+/// } else {
+/// println!("panic occurred");
+/// }
+/// }));
+///
+/// panic!("Normal panic");
+/// ```
+#[lang = "panic_info"]
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+#[derive(Debug)]
+pub struct PanicInfo<'a> {
+ payload: &'a (dyn Any + Send),
+ message: Option<&'a fmt::Arguments<'a>>,
+ location: &'a Location<'a>,
+ can_unwind: bool,
+}
+
+impl<'a> PanicInfo<'a> {
+ #[unstable(
+ feature = "panic_internals",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ #[inline]
+ pub fn internal_constructor(
+ message: Option<&'a fmt::Arguments<'a>>,
+ location: &'a Location<'a>,
+ can_unwind: bool,
+ ) -> Self {
+ struct NoPayload;
+ PanicInfo { location, message, payload: &NoPayload, can_unwind }
+ }
+
+ #[unstable(
+ feature = "panic_internals",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ #[inline]
+ pub fn set_payload(&mut self, info: &'a (dyn Any + Send)) {
+ self.payload = info;
+ }
+
+ /// Returns the payload associated with the panic.
+ ///
+ /// This will commonly, but not always, be a `&'static str` or [`String`].
+ ///
+ /// [`String`]: ../../std/string/struct.String.html
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
+ /// println!("panic occurred: {s:?}");
+ /// } else {
+ /// println!("panic occurred");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[must_use]
+ #[stable(feature = "panic_hooks", since = "1.10.0")]
+ pub fn payload(&self) -> &(dyn Any + Send) {
+ self.payload
+ }
+
+ /// If the `panic!` macro from the `core` crate (not from `std`)
+ /// was used with a formatting string and some additional arguments,
+ /// returns that message ready to be used for example with [`fmt::write`]
+ #[must_use]
+ #[unstable(feature = "panic_info_message", issue = "66745")]
+ pub fn message(&self) -> Option<&fmt::Arguments<'_>> {
+ self.message
+ }
+
+ /// Returns information about the location from which the panic originated,
+ /// if available.
+ ///
+ /// This method will currently always return [`Some`], but this may change
+ /// in future versions.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occurred in file '{}' at line {}",
+ /// location.file(),
+ /// location.line(),
+ /// );
+ /// } else {
+ /// println!("panic occurred but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[must_use]
+ #[stable(feature = "panic_hooks", since = "1.10.0")]
+ pub fn location(&self) -> Option<&Location<'_>> {
+ // NOTE: If this is changed to sometimes return None,
+ // deal with that case in std::panicking::default_hook and core::panicking::panic_fmt.
+ Some(&self.location)
+ }
+
+ /// Returns whether the panic handler is allowed to unwind the stack from
+ /// the point where the panic occurred.
+ ///
+ /// This is true for most kinds of panics with the exception of panics
+ /// caused by trying to unwind out of a `Drop` implementation or a function
+ /// whose ABI does not support unwinding.
+ ///
+ /// It is safe for a panic handler to unwind even when this function returns
+ /// true, however this will simply cause the panic handler to be called
+ /// again.
+ #[must_use]
+ #[unstable(feature = "panic_can_unwind", issue = "92988")]
+ pub fn can_unwind(&self) -> bool {
+ self.can_unwind
+ }
+}
+
+#[stable(feature = "panic_hook_display", since = "1.26.0")]
+impl fmt::Display for PanicInfo<'_> {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("panicked at ")?;
+ if let Some(message) = self.message {
+ write!(formatter, "'{}', ", message)?
+ } else if let Some(payload) = self.payload.downcast_ref::<&'static str>() {
+ write!(formatter, "'{}', ", payload)?
+ }
+ // NOTE: we cannot use downcast_ref::<String>() here
+ // since String is not available in libcore!
+ // The payload is a String when `std::panic!` is called with multiple arguments,
+ // but in that case the message is also available.
+
+ self.location.fmt(formatter)
+ }
+}
diff --git a/library/core/src/panic/unwind_safe.rs b/library/core/src/panic/unwind_safe.rs
new file mode 100644
index 000000000..9a6153f12
--- /dev/null
+++ b/library/core/src/panic/unwind_safe.rs
@@ -0,0 +1,312 @@
+use crate::async_iter::AsyncIterator;
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::future::Future;
+use crate::ops::{Deref, DerefMut};
+use crate::pin::Pin;
+use crate::ptr::{NonNull, Unique};
+use crate::task::{Context, Poll};
+
+/// A marker trait which represents "panic safe" types in Rust.
+///
+/// This trait is implemented by default for many types and behaves similarly in
+/// terms of inference of implementation to the [`Send`] and [`Sync`] traits. The
+/// purpose of this trait is to encode what types are safe to cross a [`catch_unwind`]
+/// boundary with no fear of unwind safety.
+///
+/// [`catch_unwind`]: ../../std/panic/fn.catch_unwind.html
+///
+/// ## What is unwind safety?
+///
+/// In Rust a function can "return" early if it either panics or calls a
+/// function which transitively panics. This sort of control flow is not always
+/// anticipated, and has the possibility of causing subtle bugs through a
+/// combination of two critical components:
+///
+/// 1. A data structure is in a temporarily invalid state when the thread
+/// panics.
+/// 2. This broken invariant is then later observed.
+///
+/// Typically in Rust, it is difficult to perform step (2) because catching a
+/// panic involves either spawning a thread (which in turns makes it difficult
+/// to later witness broken invariants) or using the `catch_unwind` function in this
+/// module. Additionally, even if an invariant is witnessed, it typically isn't a
+/// problem in Rust because there are no uninitialized values (like in C or C++).
+///
+/// It is possible, however, for **logical** invariants to be broken in Rust,
+/// which can end up causing behavioral bugs. Another key aspect of unwind safety
+/// in Rust is that, in the absence of `unsafe` code, a panic cannot lead to
+/// memory unsafety.
+///
+/// That was a bit of a whirlwind tour of unwind safety, but for more information
+/// about unwind safety and how it applies to Rust, see an [associated RFC][rfc].
+///
+/// [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/1236-stabilize-catch-panic.md
+///
+/// ## What is `UnwindSafe`?
+///
+/// Now that we've got an idea of what unwind safety is in Rust, it's also
+/// important to understand what this trait represents. As mentioned above, one
+/// way to witness broken invariants is through the `catch_unwind` function in this
+/// module as it allows catching a panic and then re-using the environment of
+/// the closure.
+///
+/// Simply put, a type `T` implements `UnwindSafe` if it cannot easily allow
+/// witnessing a broken invariant through the use of `catch_unwind` (catching a
+/// panic). This trait is an auto trait, so it is automatically implemented for
+/// many types, and it is also structurally composed (e.g., a struct is unwind
+/// safe if all of its components are unwind safe).
+///
+/// Note, however, that this is not an unsafe trait, so there is not a succinct
+/// contract that this trait is providing. Instead it is intended as more of a
+/// "speed bump" to alert users of `catch_unwind` that broken invariants may be
+/// witnessed and may need to be accounted for.
+///
+/// ## Who implements `UnwindSafe`?
+///
+/// Types such as `&mut T` and `&RefCell<T>` are examples which are **not**
+/// unwind safe. The general idea is that any mutable state which can be shared
+/// across `catch_unwind` is not unwind safe by default. This is because it is very
+/// easy to witness a broken invariant outside of `catch_unwind` as the data is
+/// simply accessed as usual.
+///
+/// Types like `&Mutex<T>`, however, are unwind safe because they implement
+/// poisoning by default. They still allow witnessing a broken invariant, but
+/// they already provide their own "speed bumps" to do so.
+///
+/// ## When should `UnwindSafe` be used?
+///
+/// It is not intended that most types or functions need to worry about this trait.
+/// It is only used as a bound on the `catch_unwind` function and as mentioned
+/// above, the lack of `unsafe` means it is mostly an advisory. The
+/// [`AssertUnwindSafe`] wrapper struct can be used to force this trait to be
+/// implemented for any closed over variables passed to `catch_unwind`.
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "unwind_safe_trait")]
+#[rustc_on_unimplemented(
+ message = "the type `{Self}` may not be safely transferred across an unwind boundary",
+ label = "`{Self}` may not be safely transferred across an unwind boundary"
+)]
+pub auto trait UnwindSafe {}
+
+/// A marker trait representing types where a shared reference is considered
+/// unwind safe.
+///
+/// This trait is namely not implemented by [`UnsafeCell`], the root of all
+/// interior mutability.
+///
+/// This is a "helper marker trait" used to provide impl blocks for the
+/// [`UnwindSafe`] trait, for more information see that documentation.
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "ref_unwind_safe_trait")]
+#[rustc_on_unimplemented(
+ message = "the type `{Self}` may contain interior mutability and a reference may not be safely \
+ transferrable across a catch_unwind boundary",
+ label = "`{Self}` may contain interior mutability and a reference may not be safely \
+ transferrable across a catch_unwind boundary"
+)]
+pub auto trait RefUnwindSafe {}
+
+/// A simple wrapper around a type to assert that it is unwind safe.
+///
+/// When using [`catch_unwind`] it may be the case that some of the closed over
+/// variables are not unwind safe. For example if `&mut T` is captured the
+/// compiler will generate a warning indicating that it is not unwind safe. It
+/// might not be the case, however, that this is actually a problem due to the
+/// specific usage of [`catch_unwind`] if unwind safety is specifically taken into
+/// account. This wrapper struct is useful for a quick and lightweight
+/// annotation that a variable is indeed unwind safe.
+///
+/// [`catch_unwind`]: ../../std/panic/fn.catch_unwind.html
+///
+/// # Examples
+///
+/// One way to use `AssertUnwindSafe` is to assert that the entire closure
+/// itself is unwind safe, bypassing all checks for all variables:
+///
+/// ```
+/// use std::panic::{self, AssertUnwindSafe};
+///
+/// let mut variable = 4;
+///
+/// // This code will not compile because the closure captures `&mut variable`
+/// // which is not considered unwind safe by default.
+///
+/// // panic::catch_unwind(|| {
+/// // variable += 3;
+/// // });
+///
+/// // This, however, will compile due to the `AssertUnwindSafe` wrapper
+/// let result = panic::catch_unwind(AssertUnwindSafe(|| {
+/// variable += 3;
+/// }));
+/// // ...
+/// ```
+///
+/// Wrapping the entire closure amounts to a blanket assertion that all captured
+/// variables are unwind safe. This has the downside that if new captures are
+/// added in the future, they will also be considered unwind safe. Therefore,
+/// you may prefer to just wrap individual captures, as shown below. This is
+/// more annotation, but it ensures that if a new capture is added which is not
+/// unwind safe, you will get a compilation error at that time, which will
+/// allow you to consider whether that new capture in fact represent a bug or
+/// not.
+///
+/// ```
+/// use std::panic::{self, AssertUnwindSafe};
+///
+/// let mut variable = 4;
+/// let other_capture = 3;
+///
+/// let result = {
+/// let mut wrapper = AssertUnwindSafe(&mut variable);
+/// panic::catch_unwind(move || {
+/// **wrapper += other_capture;
+/// })
+/// };
+/// // ...
+/// ```
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+pub struct AssertUnwindSafe<T>(#[stable(feature = "catch_unwind", since = "1.9.0")] pub T);
+
+// Implementations of the `UnwindSafe` trait:
+//
+// * By default everything is unwind safe
+// * pointers T contains mutability of some form are not unwind safe
+// * Unique, an owning pointer, lifts an implementation
+// * Types like Mutex/RwLock which are explicitly poisoned are unwind safe
+// * Our custom AssertUnwindSafe wrapper is indeed unwind safe
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: ?Sized> !UnwindSafe for &mut T {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for &T {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for *const T {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for *mut T {}
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: UnwindSafe + ?Sized> UnwindSafe for Unique<T> {}
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for NonNull<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T> UnwindSafe for AssertUnwindSafe<T> {}
+
+// Pretty simple implementations for the `RefUnwindSafe` marker trait,
+// basically just saying that `UnsafeCell` is the
+// only thing which doesn't implement it (which then transitively applies to
+// everything else).
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: ?Sized> !RefUnwindSafe for UnsafeCell<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T> RefUnwindSafe for AssertUnwindSafe<T> {}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicIsize {}
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicI8 {}
+#[cfg(target_has_atomic_load_store = "16")]
+#[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicI16 {}
+#[cfg(target_has_atomic_load_store = "32")]
+#[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicI32 {}
+#[cfg(target_has_atomic_load_store = "64")]
+#[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicI64 {}
+#[cfg(target_has_atomic_load_store = "128")]
+#[unstable(feature = "integer_atomics", issue = "99069")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicI128 {}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicUsize {}
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicU8 {}
+#[cfg(target_has_atomic_load_store = "16")]
+#[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicU16 {}
+#[cfg(target_has_atomic_load_store = "32")]
+#[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicU32 {}
+#[cfg(target_has_atomic_load_store = "64")]
+#[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicU64 {}
+#[cfg(target_has_atomic_load_store = "128")]
+#[unstable(feature = "integer_atomics", issue = "99069")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicU128 {}
+
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+impl RefUnwindSafe for crate::sync::atomic::AtomicBool {}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+impl<T> RefUnwindSafe for crate::sync::atomic::AtomicPtr<T> {}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T> Deref for AssertUnwindSafe<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T> DerefMut for AssertUnwindSafe<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.0
+ }
+}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<R, F: FnOnce() -> R> FnOnce<()> for AssertUnwindSafe<F> {
+ type Output = R;
+
+ extern "rust-call" fn call_once(self, _args: ()) -> R {
+ (self.0)()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T: fmt::Debug> fmt::Debug for AssertUnwindSafe<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("AssertUnwindSafe").field(&self.0).finish()
+ }
+}
+
+#[stable(feature = "assertunwindsafe_default", since = "1.62.0")]
+impl<T: Default> Default for AssertUnwindSafe<T> {
+ fn default() -> Self {
+ Self(Default::default())
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<F: Future> Future for AssertUnwindSafe<F> {
+ type Output = F::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // SAFETY: pin projection. AssertUnwindSafe follows structural pinning.
+ let pinned_field = unsafe { Pin::map_unchecked_mut(self, |x| &mut x.0) };
+ F::poll(pinned_field, cx)
+ }
+}
+
+#[unstable(feature = "async_iterator", issue = "79024")]
+impl<S: AsyncIterator> AsyncIterator for AssertUnwindSafe<S> {
+ type Item = S::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<S::Item>> {
+ // SAFETY: pin projection. AssertUnwindSafe follows structural pinning.
+ unsafe { self.map_unchecked_mut(|x| &mut x.0) }.poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
new file mode 100644
index 000000000..7a575a88e
--- /dev/null
+++ b/library/core/src/panicking.rs
@@ -0,0 +1,231 @@
+//! Panic support for libcore
+//!
+//! The core library cannot define panicking, but it does *declare* panicking. This
+//! means that the functions inside of libcore are allowed to panic, but to be
+//! useful an upstream crate must define panicking for libcore to use. The current
+//! interface for panicking is:
+//!
+//! ```
+//! fn panic_impl(pi: &core::panic::PanicInfo<'_>) -> !
+//! # { loop {} }
+//! ```
+//!
+//! This definition allows for panicking with any general message, but it does not
+//! allow for failing with a `Box<Any>` value. (`PanicInfo` just contains a `&(dyn Any + Send)`,
+//! for which we fill in a dummy value in `PanicInfo::internal_constructor`.)
+//! The reason for this is that libcore is not allowed to allocate.
+//!
+//! This module contains a few other panicking functions, but these are just the
+//! necessary lang items for the compiler. All panics are funneled through this
+//! one function. The actual symbol is declared through the `#[panic_handler]` attribute.
+
+#![allow(dead_code, missing_docs)]
+#![unstable(
+ feature = "core_panic",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+)]
+
+use crate::fmt;
+use crate::panic::{Location, PanicInfo};
+
+/// The underlying implementation of libcore's `panic!` macro when no formatting is used.
+#[cold]
+// never inline unless panic_immediate_abort to avoid code
+// bloat at the call sites as much as possible
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[track_caller]
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+#[lang = "panic"] // needed by codegen for panic on overflow and other `Assert` MIR terminators
+pub const fn panic(expr: &'static str) -> ! {
+ // Use Arguments::new_v1 instead of format_args!("{expr}") to potentially
+ // reduce size overhead. The format_args! macro uses str's Display trait to
+ // write expr, which calls Formatter::pad, which must accommodate string
+ // truncation and padding (even though none is used here). Using
+ // Arguments::new_v1 may allow the compiler to omit Formatter::pad from the
+ // output binary, saving up to a few kilobytes.
+ panic_fmt(fmt::Arguments::new_v1(&[expr], &[]));
+}
+
+#[inline]
+#[track_caller]
+#[rustc_diagnostic_item = "panic_str"]
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_str(expr: &str) -> ! {
+ panic_display(&expr);
+}
+
+#[inline]
+#[track_caller]
+#[rustc_diagnostic_item = "unreachable_display"] // needed for `non-fmt-panics` lint
+pub fn unreachable_display<T: fmt::Display>(x: &T) -> ! {
+ panic_fmt(format_args!("internal error: entered unreachable code: {}", *x));
+}
+
+#[inline]
+#[track_caller]
+#[lang = "panic_display"] // needed for const-evaluated panics
+#[rustc_do_not_const_check] // hooked by const-eval
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_display<T: fmt::Display>(x: &T) -> ! {
+ panic_fmt(format_args!("{}", *x));
+}
+
+#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[track_caller]
+#[lang = "panic_bounds_check"] // needed by codegen for panic on OOB array/slice access
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ panic!("index out of bounds: the len is {len} but the index is {index}")
+}
+
+// This function is called directly by the codegen backend, and must not have
+// any extra arguments (including those synthesized by track_caller).
+#[cold]
+#[inline(never)]
+#[lang = "panic_no_unwind"] // needed by codegen for panic in nounwind function
+fn panic_no_unwind() -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
+ // that gets resolved to the `#[panic_handler]` function.
+ extern "Rust" {
+ #[lang = "panic_impl"]
+ fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ }
+
+ // PanicInfo with the `can_unwind` flag set to false forces an abort.
+ let fmt = format_args!("panic in a function that cannot unwind");
+ let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), false);
+
+ // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
+ unsafe { panic_impl(&pi) }
+}
+
+/// The entry point for panicking with a formatted message.
+///
+/// This is designed to reduce the amount of code required at the call
+/// site as much as possible (so that `panic!()` has as low an impact
+/// on (e.g.) the inlining of other functions as possible), by moving
+/// the actual formatting into this shared place.
+#[cold]
+// If panic_immediate_abort, inline the abort call,
+// otherwise avoid inlining because of it is cold path.
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[track_caller]
+#[lang = "panic_fmt"] // needed for const-evaluated panics
+#[rustc_do_not_const_check] // hooked by const-eval
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
+ // that gets resolved to the `#[panic_handler]` function.
+ extern "Rust" {
+ #[lang = "panic_impl"]
+ fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ }
+
+ let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), true);
+
+ // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
+ unsafe { panic_impl(&pi) }
+}
+
+/// This function is used instead of panic_fmt in const eval.
+#[lang = "const_panic_fmt"]
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn const_panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
+ if let Some(msg) = fmt.as_str() {
+ panic_str(msg);
+ } else {
+ // SAFETY: This is only evaluated at compile time, which reliably
+ // handles this UB (in case this branch turns out to be reachable
+ // somehow).
+ unsafe { crate::hint::unreachable_unchecked() };
+ }
+}
+
+#[derive(Debug)]
+#[doc(hidden)]
+pub enum AssertKind {
+ Eq,
+ Ne,
+ Match,
+}
+
+/// Internal function for `assert_eq!` and `assert_ne!` macros
+#[cold]
+#[track_caller]
+#[doc(hidden)]
+pub fn assert_failed<T, U>(
+ kind: AssertKind,
+ left: &T,
+ right: &U,
+ args: Option<fmt::Arguments<'_>>,
+) -> !
+where
+ T: fmt::Debug + ?Sized,
+ U: fmt::Debug + ?Sized,
+{
+ assert_failed_inner(kind, &left, &right, args)
+}
+
+/// Internal function for `assert_match!`
+#[cold]
+#[track_caller]
+#[doc(hidden)]
+pub fn assert_matches_failed<T: fmt::Debug + ?Sized>(
+ left: &T,
+ right: &str,
+ args: Option<fmt::Arguments<'_>>,
+) -> ! {
+ // Use the Display implementation to display the pattern.
+ struct Pattern<'a>(&'a str);
+ impl fmt::Debug for Pattern<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.0, f)
+ }
+ }
+ assert_failed_inner(AssertKind::Match, &left, &Pattern(right), args);
+}
+
+/// Non-generic version of the above functions, to avoid code bloat.
+#[track_caller]
+fn assert_failed_inner(
+ kind: AssertKind,
+ left: &dyn fmt::Debug,
+ right: &dyn fmt::Debug,
+ args: Option<fmt::Arguments<'_>>,
+) -> ! {
+ let op = match kind {
+ AssertKind::Eq => "==",
+ AssertKind::Ne => "!=",
+ AssertKind::Match => "matches",
+ };
+
+ match args {
+ Some(args) => panic!(
+ r#"assertion failed: `(left {} right)`
+ left: `{:?}`,
+ right: `{:?}`: {}"#,
+ op, left, right, args
+ ),
+ None => panic!(
+ r#"assertion failed: `(left {} right)`
+ left: `{:?}`,
+ right: `{:?}`"#,
+ op, left, right,
+ ),
+ }
+}
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
new file mode 100644
index 000000000..ccef35b45
--- /dev/null
+++ b/library/core/src/pin.rs
@@ -0,0 +1,1159 @@
+//! Types that pin data to its location in memory.
+//!
+//! It is sometimes useful to have objects that are guaranteed not to move,
+//! in the sense that their placement in memory does not change, and can thus be relied upon.
+//! A prime example of such a scenario would be building self-referential structs,
+//! as moving an object with pointers to itself will invalidate them, which could cause undefined
+//! behavior.
+//!
+//! At a high level, a <code>[Pin]\<P></code> ensures that the pointee of any pointer type
+//! `P` has a stable location in memory, meaning it cannot be moved elsewhere
+//! and its memory cannot be deallocated until it gets dropped. We say that the
+//! pointee is "pinned". Things get more subtle when discussing types that
+//! combine pinned with non-pinned data; [see below](#projections-and-structural-pinning)
+//! for more details.
+//!
+//! By default, all types in Rust are movable. Rust allows passing all types by-value,
+//! and common smart-pointer types such as <code>[Box]\<T></code> and <code>[&mut] T</code> allow
+//! replacing and moving the values they contain: you can move out of a <code>[Box]\<T></code>,
+//! or you can use [`mem::swap`]. <code>[Pin]\<P></code> wraps a pointer type `P`, so
+//! <code>[Pin]<[Box]\<T>></code> functions much like a regular <code>[Box]\<T></code>:
+//! when a <code>[Pin]<[Box]\<T>></code> gets dropped, so do its contents, and the memory gets
+//! deallocated. Similarly, <code>[Pin]<[&mut] T></code> is a lot like <code>[&mut] T</code>.
+//! However, <code>[Pin]\<P></code> does not let clients actually obtain a <code>[Box]\<T></code>
+//! or <code>[&mut] T</code> to pinned data, which implies that you cannot use operations such
+//! as [`mem::swap`]:
+//!
+//! ```
+//! use std::pin::Pin;
+//! fn swap_pins<T>(x: Pin<&mut T>, y: Pin<&mut T>) {
+//! // `mem::swap` needs `&mut T`, but we cannot get it.
+//! // We are stuck, we cannot swap the contents of these references.
+//! // We could use `Pin::get_unchecked_mut`, but that is unsafe for a reason:
+//! // we are not allowed to use it for moving things out of the `Pin`.
+//! }
+//! ```
+//!
+//! It is worth reiterating that <code>[Pin]\<P></code> does *not* change the fact that a Rust
+//! compiler considers all types movable. [`mem::swap`] remains callable for any `T`. Instead,
+//! <code>[Pin]\<P></code> prevents certain *values* (pointed to by pointers wrapped in
+//! <code>[Pin]\<P></code>) from being moved by making it impossible to call methods that require
+//! <code>[&mut] T</code> on them (like [`mem::swap`]).
+//!
+//! <code>[Pin]\<P></code> can be used to wrap any pointer type `P`, and as such it interacts with
+//! [`Deref`] and [`DerefMut`]. A <code>[Pin]\<P></code> where <code>P: [Deref]</code> should be
+//! considered as a "`P`-style pointer" to a pinned <code>P::[Target]</code> – so, a
+//! <code>[Pin]<[Box]\<T>></code> is an owned pointer to a pinned `T`, and a
+//! <code>[Pin]<[Rc]\<T>></code> is a reference-counted pointer to a pinned `T`.
+//! For correctness, <code>[Pin]\<P></code> relies on the implementations of [`Deref`] and
+//! [`DerefMut`] not to move out of their `self` parameter, and only ever to
+//! return a pointer to pinned data when they are called on a pinned pointer.
+//!
+//! # `Unpin`
+//!
+//! Many types are always freely movable, even when pinned, because they do not
+//! rely on having a stable address. This includes all the basic types (like
+//! [`bool`], [`i32`], and references) as well as types consisting solely of these
+//! types. Types that do not care about pinning implement the [`Unpin`]
+//! auto-trait, which cancels the effect of <code>[Pin]\<P></code>. For <code>T: [Unpin]</code>,
+//! <code>[Pin]<[Box]\<T>></code> and <code>[Box]\<T></code> function identically, as do
+//! <code>[Pin]<[&mut] T></code> and <code>[&mut] T</code>.
+//!
+//! Note that pinning and [`Unpin`] only affect the pointed-to type <code>P::[Target]</code>,
+//! not the pointer type `P` itself that got wrapped in <code>[Pin]\<P></code>. For example,
+//! whether or not <code>[Box]\<T></code> is [`Unpin`] has no effect on the behavior of
+//! <code>[Pin]<[Box]\<T>></code> (here, `T` is the pointed-to type).
+//!
+//! # Example: self-referential struct
+//!
+//! Before we go into more details to explain the guarantees and choices
+//! associated with <code>[Pin]\<P></code>, we discuss some examples for how it might be used.
+//! Feel free to [skip to where the theoretical discussion continues](#drop-guarantee).
+//!
+//! ```rust
+//! use std::pin::Pin;
+//! use std::marker::PhantomPinned;
+//! use std::ptr::NonNull;
+//!
+//! // This is a self-referential struct because the slice field points to the data field.
+//! // We cannot inform the compiler about that with a normal reference,
+//! // as this pattern cannot be described with the usual borrowing rules.
+//! // Instead we use a raw pointer, though one which is known not to be null,
+//! // as we know it's pointing at the string.
+//! struct Unmovable {
+//! data: String,
+//! slice: NonNull<String>,
+//! _pin: PhantomPinned,
+//! }
+//!
+//! impl Unmovable {
+//! // To ensure the data doesn't move when the function returns,
+//! // we place it in the heap where it will stay for the lifetime of the object,
+//! // and the only way to access it would be through a pointer to it.
+//! fn new(data: String) -> Pin<Box<Self>> {
+//! let res = Unmovable {
+//! data,
+//! // we only create the pointer once the data is in place
+//! // otherwise it will have already moved before we even started
+//! slice: NonNull::dangling(),
+//! _pin: PhantomPinned,
+//! };
+//! let mut boxed = Box::pin(res);
+//!
+//! let slice = NonNull::from(&boxed.data);
+//! // we know this is safe because modifying a field doesn't move the whole struct
+//! unsafe {
+//! let mut_ref: Pin<&mut Self> = Pin::as_mut(&mut boxed);
+//! Pin::get_unchecked_mut(mut_ref).slice = slice;
+//! }
+//! boxed
+//! }
+//! }
+//!
+//! let unmoved = Unmovable::new("hello".to_string());
+//! // The pointer should point to the correct location,
+//! // so long as the struct hasn't moved.
+//! // Meanwhile, we are free to move the pointer around.
+//! # #[allow(unused_mut)]
+//! let mut still_unmoved = unmoved;
+//! assert_eq!(still_unmoved.slice, NonNull::from(&still_unmoved.data));
+//!
+//! // Since our type doesn't implement Unpin, this will fail to compile:
+//! // let mut new_unmoved = Unmovable::new("world".to_string());
+//! // std::mem::swap(&mut *still_unmoved, &mut *new_unmoved);
+//! ```
+//!
+//! # Example: intrusive doubly-linked list
+//!
+//! In an intrusive doubly-linked list, the collection does not actually allocate
+//! the memory for the elements itself. Allocation is controlled by the clients,
+//! and elements can live on a stack frame that lives shorter than the collection does.
+//!
+//! To make this work, every element has pointers to its predecessor and successor in
+//! the list. Elements can only be added when they are pinned, because moving the elements
+//! around would invalidate the pointers. Moreover, the [`Drop`][Drop] implementation of a linked
+//! list element will patch the pointers of its predecessor and successor to remove itself
+//! from the list.
+//!
+//! Crucially, we have to be able to rely on [`drop`] being called. If an element
+//! could be deallocated or otherwise invalidated without calling [`drop`], the pointers into it
+//! from its neighboring elements would become invalid, which would break the data structure.
+//!
+//! Therefore, pinning also comes with a [`drop`]-related guarantee.
+//!
+//! # `Drop` guarantee
+//!
+//! The purpose of pinning is to be able to rely on the placement of some data in memory.
+//! To make this work, not just moving the data is restricted; deallocating, repurposing, or
+//! otherwise invalidating the memory used to store the data is restricted, too.
+//! Concretely, for pinned data you have to maintain the invariant
+//! that *its memory will not get invalidated or repurposed from the moment it gets pinned until
+//! when [`drop`] is called*. Only once [`drop`] returns or panics, the memory may be reused.
+//!
+//! Memory can be "invalidated" by deallocation, but also by
+//! replacing a <code>[Some]\(v)</code> by [`None`], or calling [`Vec::set_len`] to "kill" some
+//! elements off of a vector. It can be repurposed by using [`ptr::write`] to overwrite it without
+//! calling the destructor first. None of this is allowed for pinned data without calling [`drop`].
+//!
+//! This is exactly the kind of guarantee that the intrusive linked list from the previous
+//! section needs to function correctly.
+//!
+//! Notice that this guarantee does *not* mean that memory does not leak! It is still
+//! completely okay to not ever call [`drop`] on a pinned element (e.g., you can still
+//! call [`mem::forget`] on a <code>[Pin]<[Box]\<T>></code>). In the example of the doubly-linked
+//! list, that element would just stay in the list. However you must not free or reuse the storage
+//! *without calling [`drop`]*.
+//!
+//! # `Drop` implementation
+//!
+//! If your type uses pinning (such as the two examples above), you have to be careful
+//! when implementing [`Drop`][Drop]. The [`drop`] function takes <code>[&mut] self</code>, but this
+//! is called *even if your type was previously pinned*! It is as if the
+//! compiler automatically called [`Pin::get_unchecked_mut`].
+//!
+//! This can never cause a problem in safe code because implementing a type that
+//! relies on pinning requires unsafe code, but be aware that deciding to make
+//! use of pinning in your type (for example by implementing some operation on
+//! <code>[Pin]<[&]Self></code> or <code>[Pin]<[&mut] Self></code>) has consequences for your
+//! [`Drop`][Drop] implementation as well: if an element of your type could have been pinned,
+//! you must treat [`Drop`][Drop] as implicitly taking <code>[Pin]<[&mut] Self></code>.
+//!
+//! For example, you could implement [`Drop`][Drop] as follows:
+//!
+//! ```rust,no_run
+//! # use std::pin::Pin;
+//! # struct Type { }
+//! impl Drop for Type {
+//! fn drop(&mut self) {
+//! // `new_unchecked` is okay because we know this value is never used
+//! // again after being dropped.
+//! inner_drop(unsafe { Pin::new_unchecked(self)});
+//! fn inner_drop(this: Pin<&mut Type>) {
+//! // Actual drop code goes here.
+//! }
+//! }
+//! }
+//! ```
+//!
+//! The function `inner_drop` has the type that [`drop`] *should* have, so this makes sure that
+//! you do not accidentally use `self`/`this` in a way that is in conflict with pinning.
+//!
+//! Moreover, if your type is `#[repr(packed)]`, the compiler will automatically
+//! move fields around to be able to drop them. It might even do
+//! that for fields that happen to be sufficiently aligned. As a consequence, you cannot use
+//! pinning with a `#[repr(packed)]` type.
+//!
+//! # Projections and Structural Pinning
+//!
+//! When working with pinned structs, the question arises how one can access the
+//! fields of that struct in a method that takes just <code>[Pin]<[&mut] Struct></code>.
+//! The usual approach is to write helper methods (so called *projections*)
+//! that turn <code>[Pin]<[&mut] Struct></code> into a reference to the field, but what type should
+//! that reference have? Is it <code>[Pin]<[&mut] Field></code> or <code>[&mut] Field</code>?
+//! The same question arises with the fields of an `enum`, and also when considering
+//! container/wrapper types such as <code>[Vec]\<T></code>, <code>[Box]\<T></code>,
+//! or <code>[RefCell]\<T></code>. (This question applies to both mutable and shared references,
+//! we just use the more common case of mutable references here for illustration.)
+//!
+//! It turns out that it is actually up to the author of the data structure to decide whether
+//! the pinned projection for a particular field turns <code>[Pin]<[&mut] Struct></code>
+//! into <code>[Pin]<[&mut] Field></code> or <code>[&mut] Field</code>. There are some
+//! constraints though, and the most important constraint is *consistency*:
+//! every field can be *either* projected to a pinned reference, *or* have
+//! pinning removed as part of the projection. If both are done for the same field,
+//! that will likely be unsound!
+//!
+//! As the author of a data structure you get to decide for each field whether pinning
+//! "propagates" to this field or not. Pinning that propagates is also called "structural",
+//! because it follows the structure of the type.
+//! In the following subsections, we describe the considerations that have to be made
+//! for either choice.
+//!
+//! ## Pinning *is not* structural for `field`
+//!
+//! It may seem counter-intuitive that the field of a pinned struct might not be pinned,
+//! but that is actually the easiest choice: if a <code>[Pin]<[&mut] Field></code> is never created,
+//! nothing can go wrong! So, if you decide that some field does not have structural pinning,
+//! all you have to ensure is that you never create a pinned reference to that field.
+//!
+//! Fields without structural pinning may have a projection method that turns
+//! <code>[Pin]<[&mut] Struct></code> into <code>[&mut] Field</code>:
+//!
+//! ```rust,no_run
+//! # use std::pin::Pin;
+//! # type Field = i32;
+//! # struct Struct { field: Field }
+//! impl Struct {
+//! fn pin_get_field(self: Pin<&mut Self>) -> &mut Field {
+//! // This is okay because `field` is never considered pinned.
+//! unsafe { &mut self.get_unchecked_mut().field }
+//! }
+//! }
+//! ```
+//!
+//! You may also <code>impl [Unpin] for Struct</code> *even if* the type of `field`
+//! is not [`Unpin`]. What that type thinks about pinning is not relevant
+//! when no <code>[Pin]<[&mut] Field></code> is ever created.
+//!
+//! ## Pinning *is* structural for `field`
+//!
+//! The other option is to decide that pinning is "structural" for `field`,
+//! meaning that if the struct is pinned then so is the field.
+//!
+//! This allows writing a projection that creates a <code>[Pin]<[&mut] Field></code>, thus
+//! witnessing that the field is pinned:
+//!
+//! ```rust,no_run
+//! # use std::pin::Pin;
+//! # type Field = i32;
+//! # struct Struct { field: Field }
+//! impl Struct {
+//! fn pin_get_field(self: Pin<&mut Self>) -> Pin<&mut Field> {
+//! // This is okay because `field` is pinned when `self` is.
+//! unsafe { self.map_unchecked_mut(|s| &mut s.field) }
+//! }
+//! }
+//! ```
+//!
+//! However, structural pinning comes with a few extra requirements:
+//!
+//! 1. The struct must only be [`Unpin`] if all the structural fields are
+//! [`Unpin`]. This is the default, but [`Unpin`] is a safe trait, so as the author of
+//! the struct it is your responsibility *not* to add something like
+//! <code>impl\<T> [Unpin] for Struct\<T></code>. (Notice that adding a projection operation
+//! requires unsafe code, so the fact that [`Unpin`] is a safe trait does not break
+//! the principle that you only have to worry about any of this if you use [`unsafe`].)
+//! 2. The destructor of the struct must not move structural fields out of its argument. This
+//! is the exact point that was raised in the [previous section][drop-impl]: [`drop`] takes
+//! <code>[&mut] self</code>, but the struct (and hence its fields) might have been pinned
+//! before. You have to guarantee that you do not move a field inside your [`Drop`][Drop]
+//! implementation. In particular, as explained previously, this means that your struct
+//! must *not* be `#[repr(packed)]`.
+//! See that section for how to write [`drop`] in a way that the compiler can help you
+//! not accidentally break pinning.
+//! 3. You must make sure that you uphold the [`Drop` guarantee][drop-guarantee]:
+//! once your struct is pinned, the memory that contains the
+//! content is not overwritten or deallocated without calling the content's destructors.
+//! This can be tricky, as witnessed by <code>[VecDeque]\<T></code>: the destructor of
+//! <code>[VecDeque]\<T></code> can fail to call [`drop`] on all elements if one of the
+//! destructors panics. This violates the [`Drop`][Drop] guarantee, because it can lead to
+//! elements being deallocated without their destructor being called.
+//! (<code>[VecDeque]\<T></code> has no pinning projections, so this
+//! does not cause unsoundness.)
+//! 4. You must not offer any other operations that could lead to data being moved out of
+//! the structural fields when your type is pinned. For example, if the struct contains an
+//! <code>[Option]\<T></code> and there is a [`take`][Option::take]-like operation with type
+//! <code>fn([Pin]<[&mut] Struct\<T>>) -> [Option]\<T></code>,
+//! that operation can be used to move a `T` out of a pinned `Struct<T>` – which means
+//! pinning cannot be structural for the field holding this data.
+//!
+//! For a more complex example of moving data out of a pinned type,
+//! imagine if <code>[RefCell]\<T></code> had a method
+//! <code>fn get_pin_mut(self: [Pin]<[&mut] Self>) -> [Pin]<[&mut] T></code>.
+//! Then we could do the following:
+//! ```compile_fail
+//! fn exploit_ref_cell<T>(rc: Pin<&mut RefCell<T>>) {
+//! { let p = rc.as_mut().get_pin_mut(); } // Here we get pinned access to the `T`.
+//! let rc_shr: &RefCell<T> = rc.into_ref().get_ref();
+//! let b = rc_shr.borrow_mut();
+//! let content = &mut *b; // And here we have `&mut T` to the same data.
+//! }
+//! ```
+//! This is catastrophic, it means we can first pin the content of the
+//! <code>[RefCell]\<T></code> (using <code>[RefCell]::get_pin_mut</code>) and then move that
+//! content using the mutable reference we got later.
+//!
+//! ## Examples
+//!
+//! For a type like <code>[Vec]\<T></code>, both possibilities (structural pinning or not) make
+//! sense. A <code>[Vec]\<T></code> with structural pinning could have `get_pin`/`get_pin_mut`
+//! methods to get pinned references to elements. However, it could *not* allow calling
+//! [`pop`][Vec::pop] on a pinned <code>[Vec]\<T></code> because that would move the (structurally
+//! pinned) contents! Nor could it allow [`push`][Vec::push], which might reallocate and thus also
+//! move the contents.
+//!
+//! A <code>[Vec]\<T></code> without structural pinning could
+//! <code>impl\<T> [Unpin] for [Vec]\<T></code>, because the contents are never pinned
+//! and the <code>[Vec]\<T></code> itself is fine with being moved as well.
+//! At that point pinning just has no effect on the vector at all.
+//!
+//! In the standard library, pointer types generally do not have structural pinning,
+//! and thus they do not offer pinning projections. This is why <code>[Box]\<T>: [Unpin]</code>
+//! holds for all `T`. It makes sense to do this for pointer types, because moving the
+//! <code>[Box]\<T></code> does not actually move the `T`: the <code>[Box]\<T></code> can be freely
+//! movable (aka [`Unpin`]) even if the `T` is not. In fact, even <code>[Pin]<[Box]\<T>></code> and
+//! <code>[Pin]<[&mut] T></code> are always [`Unpin`] themselves, for the same reason:
+//! their contents (the `T`) are pinned, but the pointers themselves can be moved without moving
+//! the pinned data. For both <code>[Box]\<T></code> and <code>[Pin]<[Box]\<T>></code>,
+//! whether the content is pinned is entirely independent of whether the
+//! pointer is pinned, meaning pinning is *not* structural.
+//!
+//! When implementing a [`Future`] combinator, you will usually need structural pinning
+//! for the nested futures, as you need to get pinned references to them to call [`poll`].
+//! But if your combinator contains any other data that does not need to be pinned,
+//! you can make those fields not structural and hence freely access them with a
+//! mutable reference even when you just have <code>[Pin]<[&mut] Self></code> (such as in your own
+//! [`poll`] implementation).
+//!
+//! [Deref]: crate::ops::Deref "ops::Deref"
+//! [`Deref`]: crate::ops::Deref "ops::Deref"
+//! [Target]: crate::ops::Deref::Target "ops::Deref::Target"
+//! [`DerefMut`]: crate::ops::DerefMut "ops::DerefMut"
+//! [`mem::swap`]: crate::mem::swap "mem::swap"
+//! [`mem::forget`]: crate::mem::forget "mem::forget"
+//! [Vec]: ../../std/vec/struct.Vec.html "Vec"
+//! [`Vec::set_len`]: ../../std/vec/struct.Vec.html#method.set_len "Vec::set_len"
+//! [Box]: ../../std/boxed/struct.Box.html "Box"
+//! [Vec::pop]: ../../std/vec/struct.Vec.html#method.pop "Vec::pop"
+//! [Vec::push]: ../../std/vec/struct.Vec.html#method.push "Vec::push"
+//! [Rc]: ../../std/rc/struct.Rc.html "rc::Rc"
+//! [RefCell]: crate::cell::RefCell "cell::RefCell"
+//! [`drop`]: Drop::drop
+//! [VecDeque]: ../../std/collections/struct.VecDeque.html "collections::VecDeque"
+//! [`ptr::write`]: crate::ptr::write "ptr::write"
+//! [`Future`]: crate::future::Future "future::Future"
+//! [drop-impl]: #drop-implementation
+//! [drop-guarantee]: #drop-guarantee
+//! [`poll`]: crate::future::Future::poll "future::Future::poll"
+//! [&]: reference "shared reference"
+//! [&mut]: reference "mutable reference"
+//! [`unsafe`]: ../../std/keyword.unsafe.html "keyword unsafe"
+
+#![stable(feature = "pin", since = "1.33.0")]
+
+use crate::cmp::{self, PartialEq, PartialOrd};
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+use crate::marker::{Sized, Unpin};
+use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Receiver};
+
+/// A pinned pointer.
+///
+/// This is a wrapper around a kind of pointer which makes that pointer "pin" its
+/// value in place, preventing the value referenced by that pointer from being moved
+/// unless it implements [`Unpin`].
+///
+/// *See the [`pin` module] documentation for an explanation of pinning.*
+///
+/// [`pin` module]: self
+//
+// Note: the `Clone` derive below causes unsoundness as it's possible to implement
+// `Clone` for mutable references.
+// See <https://internals.rust-lang.org/t/unsoundness-in-pin/11311> for more details.
+#[stable(feature = "pin", since = "1.33.0")]
+#[lang = "pin"]
+#[fundamental]
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub struct Pin<P> {
+ // FIXME(#93176): this field is made `#[unstable] #[doc(hidden)] pub` to:
+ // - deter downstream users from accessing it (which would be unsound!),
+ // - let the `pin!` macro access it (such a macro requires using struct
+ // literal syntax in order to benefit from lifetime extension).
+ // Long-term, `unsafe` fields or macro hygiene are expected to offer more robust alternatives.
+ #[unstable(feature = "unsafe_pin_internals", issue = "none")]
+ #[doc(hidden)]
+ pub pointer: P,
+}
+
+// The following implementations aren't derived in order to avoid soundness
+// issues. `&self.pointer` should not be accessible to untrusted trait
+// implementations.
+//
+// See <https://internals.rust-lang.org/t/unsoundness-in-pin/11311/73> for more details.
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref, Q: Deref> PartialEq<Pin<Q>> for Pin<P>
+where
+ P::Target: PartialEq<Q::Target>,
+{
+ fn eq(&self, other: &Pin<Q>) -> bool {
+ P::Target::eq(self, other)
+ }
+
+ fn ne(&self, other: &Pin<Q>) -> bool {
+ P::Target::ne(self, other)
+ }
+}
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref<Target: Eq>> Eq for Pin<P> {}
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref, Q: Deref> PartialOrd<Pin<Q>> for Pin<P>
+where
+ P::Target: PartialOrd<Q::Target>,
+{
+ fn partial_cmp(&self, other: &Pin<Q>) -> Option<cmp::Ordering> {
+ P::Target::partial_cmp(self, other)
+ }
+
+ fn lt(&self, other: &Pin<Q>) -> bool {
+ P::Target::lt(self, other)
+ }
+
+ fn le(&self, other: &Pin<Q>) -> bool {
+ P::Target::le(self, other)
+ }
+
+ fn gt(&self, other: &Pin<Q>) -> bool {
+ P::Target::gt(self, other)
+ }
+
+ fn ge(&self, other: &Pin<Q>) -> bool {
+ P::Target::ge(self, other)
+ }
+}
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref<Target: Ord>> Ord for Pin<P> {
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ P::Target::cmp(self, other)
+ }
+}
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref<Target: Hash>> Hash for Pin<P> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ P::Target::hash(self, state);
+ }
+}
+
+impl<P: Deref<Target: Unpin>> Pin<P> {
+ /// Construct a new `Pin<P>` around a pointer to some data of a type that
+ /// implements [`Unpin`].
+ ///
+ /// Unlike `Pin::new_unchecked`, this method is safe because the pointer
+ /// `P` dereferences to an [`Unpin`] type, which cancels the pinning guarantees.
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub const fn new(pointer: P) -> Pin<P> {
+ // SAFETY: the value pointed to is `Unpin`, and so has no requirements
+ // around pinning.
+ unsafe { Pin::new_unchecked(pointer) }
+ }
+
+ /// Unwraps this `Pin<P>` returning the underlying pointer.
+ ///
+ /// This requires that the data inside this `Pin` is [`Unpin`] so that we
+ /// can ignore the pinning invariants when unwrapping it.
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin_into_inner", since = "1.39.0")]
+ pub const fn into_inner(pin: Pin<P>) -> P {
+ pin.pointer
+ }
+}
+
+impl<P: Deref> Pin<P> {
+ /// Construct a new `Pin<P>` around a reference to some data of a type that
+ /// may or may not implement `Unpin`.
+ ///
+ /// If `pointer` dereferences to an `Unpin` type, `Pin::new` should be used
+ /// instead.
+ ///
+ /// # Safety
+ ///
+ /// This constructor is unsafe because we cannot guarantee that the data
+ /// pointed to by `pointer` is pinned, meaning that the data will not be moved or
+ /// its storage invalidated until it gets dropped. If the constructed `Pin<P>` does
+ /// not guarantee that the data `P` points to is pinned, that is a violation of
+ /// the API contract and may lead to undefined behavior in later (safe) operations.
+ ///
+ /// By using this method, you are making a promise about the `P::Deref` and
+ /// `P::DerefMut` implementations, if they exist. Most importantly, they
+ /// must not move out of their `self` arguments: `Pin::as_mut` and `Pin::as_ref`
+ /// will call `DerefMut::deref_mut` and `Deref::deref` *on the pinned pointer*
+ /// and expect these methods to uphold the pinning invariants.
+ /// Moreover, by calling this method you promise that the reference `P`
+ /// dereferences to will not be moved out of again; in particular, it
+ /// must not be possible to obtain a `&mut P::Target` and then
+ /// move out of that reference (using, for example [`mem::swap`]).
+ ///
+ /// For example, calling `Pin::new_unchecked` on an `&'a mut T` is unsafe because
+ /// while you are able to pin it for the given lifetime `'a`, you have no control
+ /// over whether it is kept pinned once `'a` ends:
+ /// ```
+ /// use std::mem;
+ /// use std::pin::Pin;
+ ///
+ /// fn move_pinned_ref<T>(mut a: T, mut b: T) {
+ /// unsafe {
+ /// let p: Pin<&mut T> = Pin::new_unchecked(&mut a);
+ /// // This should mean the pointee `a` can never move again.
+ /// }
+ /// mem::swap(&mut a, &mut b);
+ /// // The address of `a` changed to `b`'s stack slot, so `a` got moved even
+ /// // though we have previously pinned it! We have violated the pinning API contract.
+ /// }
+ /// ```
+ /// A value, once pinned, must remain pinned forever (unless its type implements `Unpin`).
+ ///
+ /// Similarly, calling `Pin::new_unchecked` on an `Rc<T>` is unsafe because there could be
+ /// aliases to the same data that are not subject to the pinning restrictions:
+ /// ```
+ /// use std::rc::Rc;
+ /// use std::pin::Pin;
+ ///
+ /// fn move_pinned_rc<T>(mut x: Rc<T>) {
+ /// let pinned = unsafe { Pin::new_unchecked(Rc::clone(&x)) };
+ /// {
+ /// let p: Pin<&T> = pinned.as_ref();
+ /// // This should mean the pointee can never move again.
+ /// }
+ /// drop(pinned);
+ /// let content = Rc::get_mut(&mut x).unwrap();
+ /// // Now, if `x` was the only reference, we have a mutable reference to
+ /// // data that we pinned above, which we could use to move it as we have
+ /// // seen in the previous example. We have violated the pinning API contract.
+ /// }
+ /// ```
+ ///
+ /// [`mem::swap`]: crate::mem::swap
+ #[lang = "new_unchecked"]
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub const unsafe fn new_unchecked(pointer: P) -> Pin<P> {
+ Pin { pointer }
+ }
+
+ /// Gets a pinned shared reference from this pinned pointer.
+ ///
+ /// This is a generic method to go from `&Pin<Pointer<T>>` to `Pin<&T>`.
+ /// It is safe because, as part of the contract of `Pin::new_unchecked`,
+ /// the pointee cannot move after `Pin<Pointer<T>>` got created.
+ /// "Malicious" implementations of `Pointer::Deref` are likewise
+ /// ruled out by the contract of `Pin::new_unchecked`.
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[inline(always)]
+ pub fn as_ref(&self) -> Pin<&P::Target> {
+ // SAFETY: see documentation on this function
+ unsafe { Pin::new_unchecked(&*self.pointer) }
+ }
+
+ /// Unwraps this `Pin<P>` returning the underlying pointer.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe. You must guarantee that you will continue to
+ /// treat the pointer `P` as pinned after you call this function, so that
+ /// the invariants on the `Pin` type can be upheld. If the code using the
+ /// resulting `P` does not continue to maintain the pinning invariants that
+ /// is a violation of the API contract and may lead to undefined behavior in
+ /// later (safe) operations.
+ ///
+ /// If the underlying data is [`Unpin`], [`Pin::into_inner`] should be used
+ /// instead.
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin_into_inner", since = "1.39.0")]
+ pub const unsafe fn into_inner_unchecked(pin: Pin<P>) -> P {
+ pin.pointer
+ }
+}
+
+impl<P: DerefMut> Pin<P> {
+ /// Gets a pinned mutable reference from this pinned pointer.
+ ///
+ /// This is a generic method to go from `&mut Pin<Pointer<T>>` to `Pin<&mut T>`.
+ /// It is safe because, as part of the contract of `Pin::new_unchecked`,
+ /// the pointee cannot move after `Pin<Pointer<T>>` got created.
+ /// "Malicious" implementations of `Pointer::DerefMut` are likewise
+ /// ruled out by the contract of `Pin::new_unchecked`.
+ ///
+ /// This method is useful when doing multiple calls to functions that consume the pinned type.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::pin::Pin;
+ ///
+ /// # struct Type {}
+ /// impl Type {
+ /// fn method(self: Pin<&mut Self>) {
+ /// // do something
+ /// }
+ ///
+ /// fn call_method_twice(mut self: Pin<&mut Self>) {
+ /// // `method` consumes `self`, so reborrow the `Pin<&mut Self>` via `as_mut`.
+ /// self.as_mut().method();
+ /// self.as_mut().method();
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[inline(always)]
+ pub fn as_mut(&mut self) -> Pin<&mut P::Target> {
+ // SAFETY: see documentation on this function
+ unsafe { Pin::new_unchecked(&mut *self.pointer) }
+ }
+
+ /// Assigns a new value to the memory behind the pinned reference.
+ ///
+ /// This overwrites pinned data, but that is okay: its destructor gets
+ /// run before being overwritten, so no pinning guarantee is violated.
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[inline(always)]
+ pub fn set(&mut self, value: P::Target)
+ where
+ P::Target: Sized,
+ {
+ *(self.pointer) = value;
+ }
+}
+
+impl<'a, T: ?Sized> Pin<&'a T> {
+ /// Constructs a new pin by mapping the interior value.
+ ///
+ /// For example, if you wanted to get a `Pin` of a field of something,
+ /// you could use this to get access to that field in one line of code.
+ /// However, there are several gotchas with these "pinning projections";
+ /// see the [`pin` module] documentation for further details on that topic.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe. You must guarantee that the data you return
+ /// will not move so long as the argument value does not move (for example,
+ /// because it is one of the fields of that value), and also that you do
+ /// not move out of the argument you receive to the interior function.
+ ///
+ /// [`pin` module]: self#projections-and-structural-pinning
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub unsafe fn map_unchecked<U, F>(self, func: F) -> Pin<&'a U>
+ where
+ U: ?Sized,
+ F: FnOnce(&T) -> &U,
+ {
+ let pointer = &*self.pointer;
+ let new_pointer = func(pointer);
+
+ // SAFETY: the safety contract for `new_unchecked` must be
+ // upheld by the caller.
+ unsafe { Pin::new_unchecked(new_pointer) }
+ }
+
+ /// Gets a shared reference out of a pin.
+ ///
+ /// This is safe because it is not possible to move out of a shared reference.
+ /// It may seem like there is an issue here with interior mutability: in fact,
+ /// it *is* possible to move a `T` out of a `&RefCell<T>`. However, this is
+ /// not a problem as long as there does not also exist a `Pin<&T>` pointing
+ /// to the same data, and `RefCell<T>` does not let you create a pinned reference
+ /// to its contents. See the discussion on ["pinning projections"] for further
+ /// details.
+ ///
+ /// Note: `Pin` also implements `Deref` to the target, which can be used
+ /// to access the inner value. However, `Deref` only provides a reference
+ /// that lives for as long as the borrow of the `Pin`, not the lifetime of
+ /// the `Pin` itself. This method allows turning the `Pin` into a reference
+ /// with the same lifetime as the original `Pin`.
+ ///
+ /// ["pinning projections"]: self#projections-and-structural-pinning
+ #[inline(always)]
+ #[must_use]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub const fn get_ref(self) -> &'a T {
+ self.pointer
+ }
+}
+
+impl<'a, T: ?Sized> Pin<&'a mut T> {
+ /// Converts this `Pin<&mut T>` into a `Pin<&T>` with the same lifetime.
+ #[inline(always)]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub const fn into_ref(self) -> Pin<&'a T> {
+ Pin { pointer: self.pointer }
+ }
+
+ /// Gets a mutable reference to the data inside of this `Pin`.
+ ///
+ /// This requires that the data inside this `Pin` is `Unpin`.
+ ///
+ /// Note: `Pin` also implements `DerefMut` to the data, which can be used
+ /// to access the inner value. However, `DerefMut` only provides a reference
+ /// that lives for as long as the borrow of the `Pin`, not the lifetime of
+ /// the `Pin` itself. This method allows turning the `Pin` into a reference
+ /// with the same lifetime as the original `Pin`.
+ #[inline(always)]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ pub const fn get_mut(self) -> &'a mut T
+ where
+ T: Unpin,
+ {
+ self.pointer
+ }
+
+ /// Gets a mutable reference to the data inside of this `Pin`.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe. You must guarantee that you will never move
+ /// the data out of the mutable reference you receive when you call this
+ /// function, so that the invariants on the `Pin` type can be upheld.
+ ///
+ /// If the underlying data is `Unpin`, `Pin::get_mut` should be used
+ /// instead.
+ #[inline(always)]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ pub const unsafe fn get_unchecked_mut(self) -> &'a mut T {
+ self.pointer
+ }
+
+ /// Construct a new pin by mapping the interior value.
+ ///
+ /// For example, if you wanted to get a `Pin` of a field of something,
+ /// you could use this to get access to that field in one line of code.
+ /// However, there are several gotchas with these "pinning projections";
+ /// see the [`pin` module] documentation for further details on that topic.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe. You must guarantee that the data you return
+ /// will not move so long as the argument value does not move (for example,
+ /// because it is one of the fields of that value), and also that you do
+ /// not move out of the argument you receive to the interior function.
+ ///
+ /// [`pin` module]: self#projections-and-structural-pinning
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub unsafe fn map_unchecked_mut<U, F>(self, func: F) -> Pin<&'a mut U>
+ where
+ U: ?Sized,
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ // SAFETY: the caller is responsible for not moving the
+ // value out of this reference.
+ let pointer = unsafe { Pin::get_unchecked_mut(self) };
+ let new_pointer = func(pointer);
+ // SAFETY: as the value of `this` is guaranteed to not have
+ // been moved out, this call to `new_unchecked` is safe.
+ unsafe { Pin::new_unchecked(new_pointer) }
+ }
+}
+
+impl<T: ?Sized> Pin<&'static T> {
+ /// Get a pinned reference from a static reference.
+ ///
+ /// This is safe, because `T` is borrowed for the `'static` lifetime, which
+ /// never ends.
+ #[stable(feature = "pin_static_ref", since = "1.61.0")]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ pub const fn static_ref(r: &'static T) -> Pin<&'static T> {
+ // SAFETY: The 'static borrow guarantees the data will not be
+ // moved/invalidated until it gets dropped (which is never).
+ unsafe { Pin::new_unchecked(r) }
+ }
+}
+
+impl<'a, P: DerefMut> Pin<&'a mut Pin<P>> {
+ /// Gets a pinned mutable reference from this nested pinned pointer.
+ ///
+ /// This is a generic method to go from `Pin<&mut Pin<Pointer<T>>>` to `Pin<&mut T>`. It is
+ /// safe because the existence of a `Pin<Pointer<T>>` ensures that the pointee, `T`, cannot
+ /// move in the future, and this method does not enable the pointee to move. "Malicious"
+ /// implementations of `P::DerefMut` are likewise ruled out by the contract of
+ /// `Pin::new_unchecked`.
+ #[unstable(feature = "pin_deref_mut", issue = "86918")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline(always)]
+ pub fn as_deref_mut(self) -> Pin<&'a mut P::Target> {
+ // SAFETY: What we're asserting here is that going from
+ //
+ // Pin<&mut Pin<P>>
+ //
+ // to
+ //
+ // Pin<&mut P::Target>
+ //
+ // is safe.
+ //
+ // We need to ensure that two things hold for that to be the case:
+ //
+ // 1) Once we give out a `Pin<&mut P::Target>`, an `&mut P::Target` will not be given out.
+ // 2) By giving out a `Pin<&mut P::Target>`, we do not risk of violating `Pin<&mut Pin<P>>`
+ //
+ // The existence of `Pin<P>` is sufficient to guarantee #1: since we already have a
+ // `Pin<P>`, it must already uphold the pinning guarantees, which must mean that
+ // `Pin<&mut P::Target>` does as well, since `Pin::as_mut` is safe. We do not have to rely
+ // on the fact that P is _also_ pinned.
+ //
+ // For #2, we need to ensure that code given a `Pin<&mut P::Target>` cannot cause the
+ // `Pin<P>` to move? That is not possible, since `Pin<&mut P::Target>` no longer retains
+ // any access to the `P` itself, much less the `Pin<P>`.
+ unsafe { self.get_unchecked_mut() }.as_mut()
+ }
+}
+
+impl<T: ?Sized> Pin<&'static mut T> {
+ /// Get a pinned mutable reference from a static mutable reference.
+ ///
+ /// This is safe, because `T` is borrowed for the `'static` lifetime, which
+ /// never ends.
+ #[stable(feature = "pin_static_ref", since = "1.61.0")]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ pub const fn static_mut(r: &'static mut T) -> Pin<&'static mut T> {
+ // SAFETY: The 'static borrow guarantees the data will not be
+ // moved/invalidated until it gets dropped (which is never).
+ unsafe { Pin::new_unchecked(r) }
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: Deref> Deref for Pin<P> {
+ type Target = P::Target;
+ fn deref(&self) -> &P::Target {
+ Pin::get_ref(Pin::as_ref(self))
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: DerefMut<Target: Unpin>> DerefMut for Pin<P> {
+ fn deref_mut(&mut self) -> &mut P::Target {
+ Pin::get_mut(Pin::as_mut(self))
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<P: Receiver> Receiver for Pin<P> {}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: fmt::Debug> fmt::Debug for Pin<P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.pointer, f)
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: fmt::Display> fmt::Display for Pin<P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.pointer, f)
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: fmt::Pointer> fmt::Pointer for Pin<P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.pointer, f)
+ }
+}
+
+// Note: this means that any impl of `CoerceUnsized` that allows coercing from
+// a type that impls `Deref<Target=impl !Unpin>` to a type that impls
+// `Deref<Target=Unpin>` is unsound. Any such impl would probably be unsound
+// for other reasons, though, so we just need to take care not to allow such
+// impls to land in std.
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P, U> CoerceUnsized<Pin<U>> for Pin<P> where P: CoerceUnsized<U> {}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
+
+/// Constructs a <code>[Pin]<[&mut] T></code>, by pinning[^1] a `value: T` _locally_[^2].
+///
+/// Unlike [`Box::pin`], this does not involve a heap allocation.
+///
+/// [^1]: If the (type `T` of the) given value does not implement [`Unpin`], then this
+/// effectively pins the `value` in memory, where it will be unable to be moved.
+/// Otherwise, <code>[Pin]<[&mut] T></code> behaves like <code>[&mut] T</code>, and operations such
+/// as [`mem::replace()`][crate::mem::replace] will allow extracting that value, and therefore,
+/// moving it.
+/// See [the `Unpin` section of the `pin` module][self#unpin] for more info.
+///
+/// [^2]: This is usually dubbed "stack"-pinning. And whilst local values are almost always located
+/// in the stack (_e.g._, when within the body of a non-`async` function), the truth is that inside
+/// the body of an `async fn` or block —more generally, the body of a generator— any locals crossing
+/// an `.await` point —a `yield` point— end up being part of the state captured by the `Future` —by
+/// the `Generator`—, and thus will be stored wherever that one is.
+///
+/// ## Examples
+///
+/// ### Basic usage
+///
+/// ```rust
+/// #![feature(pin_macro)]
+/// # use core::marker::PhantomPinned as Foo;
+/// use core::pin::{pin, Pin};
+///
+/// fn stuff(foo: Pin<&mut Foo>) {
+/// // …
+/// # let _ = foo;
+/// }
+///
+/// let pinned_foo = pin!(Foo { /* … */ });
+/// stuff(pinned_foo);
+/// // or, directly:
+/// stuff(pin!(Foo { /* … */ }));
+/// ```
+///
+/// ### Manually polling a `Future` (without `Unpin` bounds)
+///
+/// ```rust
+/// #![feature(pin_macro)]
+/// use std::{
+/// future::Future,
+/// pin::pin,
+/// task::{Context, Poll},
+/// thread,
+/// };
+/// # use std::{sync::Arc, task::Wake, thread::Thread};
+///
+/// # /// A waker that wakes up the current thread when called.
+/// # struct ThreadWaker(Thread);
+/// #
+/// # impl Wake for ThreadWaker {
+/// # fn wake(self: Arc<Self>) {
+/// # self.0.unpark();
+/// # }
+/// # }
+/// #
+/// /// Runs a future to completion.
+/// fn block_on<Fut: Future>(fut: Fut) -> Fut::Output {
+/// let waker_that_unparks_thread = // …
+/// # Arc::new(ThreadWaker(thread::current())).into();
+/// let mut cx = Context::from_waker(&waker_that_unparks_thread);
+/// // Pin the future so it can be polled.
+/// let mut pinned_fut = pin!(fut);
+/// loop {
+/// match pinned_fut.as_mut().poll(&mut cx) {
+/// Poll::Pending => thread::park(),
+/// Poll::Ready(res) => return res,
+/// }
+/// }
+/// }
+/// #
+/// # assert_eq!(42, block_on(async { 42 }));
+/// ```
+///
+/// ### With `Generator`s
+///
+/// ```rust
+/// #![feature(generators, generator_trait, pin_macro)]
+/// use core::{
+/// ops::{Generator, GeneratorState},
+/// pin::pin,
+/// };
+///
+/// fn generator_fn() -> impl Generator<Yield = usize, Return = ()> /* not Unpin */ {
+/// // Allow generator to be self-referential (not `Unpin`)
+/// // vvvvvv so that locals can cross yield points.
+/// static || {
+/// let foo = String::from("foo");
+/// let foo_ref = &foo; // ------+
+/// yield 0; // | <- crosses yield point!
+/// println!("{foo_ref}"); // <--+
+/// yield foo.len();
+/// }
+/// }
+///
+/// fn main() {
+/// let mut generator = pin!(generator_fn());
+/// match generator.as_mut().resume(()) {
+/// GeneratorState::Yielded(0) => {},
+/// _ => unreachable!(),
+/// }
+/// match generator.as_mut().resume(()) {
+/// GeneratorState::Yielded(3) => {},
+/// _ => unreachable!(),
+/// }
+/// match generator.resume(()) {
+/// GeneratorState::Yielded(_) => unreachable!(),
+/// GeneratorState::Complete(()) => {},
+/// }
+/// }
+/// ```
+///
+/// ## Remarks
+///
+/// Precisely because a value is pinned to local storage, the resulting <code>[Pin]<[&mut] T></code>
+/// reference ends up borrowing a local tied to that block: it can't escape it.
+///
+/// The following, for instance, fails to compile:
+///
+/// ```rust,compile_fail
+/// #![feature(pin_macro)]
+/// use core::pin::{pin, Pin};
+/// # use core::{marker::PhantomPinned as Foo, mem::drop as stuff};
+///
+/// let x: Pin<&mut Foo> = {
+/// let x: Pin<&mut Foo> = pin!(Foo { /* … */ });
+/// x
+/// }; // <- Foo is dropped
+/// stuff(x); // Error: use of dropped value
+/// ```
+///
+/// <details><summary>Error message</summary>
+///
+/// ```console
+/// error[E0716]: temporary value dropped while borrowed
+/// --> src/main.rs:9:28
+/// |
+/// 8 | let x: Pin<&mut Foo> = {
+/// | - borrow later stored here
+/// 9 | let x: Pin<&mut Foo> = pin!(Foo { /* … */ });
+/// | ^^^^^^^^^^^^^^^^^^^^^ creates a temporary which is freed while still in use
+/// 10 | x
+/// 11 | }; // <- Foo is dropped
+/// | - temporary value is freed at the end of this statement
+/// |
+/// = note: consider using a `let` binding to create a longer lived value
+/// ```
+///
+/// </details>
+///
+/// This makes [`pin!`] **unsuitable to pin values when intending to _return_ them**. Instead, the
+/// value is expected to be passed around _unpinned_ until the point where it is to be consumed,
+/// where it is then useful and even sensible to pin the value locally using [`pin!`].
+///
+/// If you really need to return a pinned value, consider using [`Box::pin`] instead.
+///
+/// On the other hand, pinning to the stack[<sup>2</sup>](#fn2) using [`pin!`] is likely to be
+/// cheaper than pinning into a fresh heap allocation using [`Box::pin`]. Moreover, by virtue of not
+/// even needing an allocator, [`pin!`] is the main non-`unsafe` `#![no_std]`-compatible [`Pin`]
+/// constructor.
+///
+/// [`Box::pin`]: ../../std/boxed/struct.Box.html#method.pin
+#[unstable(feature = "pin_macro", issue = "93178")]
+#[rustc_macro_transparency = "semitransparent"]
+#[allow_internal_unstable(unsafe_pin_internals)]
+pub macro pin($value:expr $(,)?) {
+ // This is `Pin::new_unchecked(&mut { $value })`, so, for starters, let's
+ // review such a hypothetical macro (that any user-code could define):
+ //
+ // ```rust
+ // macro_rules! pin {( $value:expr ) => (
+ // match &mut { $value } { at_value => unsafe { // Do not wrap `$value` in an `unsafe` block.
+ // $crate::pin::Pin::<&mut _>::new_unchecked(at_value)
+ // }}
+ // )}
+ // ```
+ //
+ // Safety:
+ // - `type P = &mut _`. There are thus no pathological `Deref{,Mut}` impls
+ // that would break `Pin`'s invariants.
+ // - `{ $value }` is braced, making it a _block expression_, thus **moving**
+ // the given `$value`, and making it _become an **anonymous** temporary_.
+ // By virtue of being anonymous, it can no longer be accessed, thus
+ // preventing any attempts to `mem::replace` it or `mem::forget` it, _etc._
+ //
+ // This gives us a `pin!` definition that is sound, and which works, but only
+ // in certain scenarios:
+ // - If the `pin!(value)` expression is _directly_ fed to a function call:
+ // `let poll = pin!(fut).poll(cx);`
+ // - If the `pin!(value)` expression is part of a scrutinee:
+ // ```rust
+ // match pin!(fut) { pinned_fut => {
+ // pinned_fut.as_mut().poll(...);
+ // pinned_fut.as_mut().poll(...);
+ // }} // <- `fut` is dropped here.
+ // ```
+ // Alas, it doesn't work for the more straight-forward use-case: `let` bindings.
+ // ```rust
+ // let pinned_fut = pin!(fut); // <- temporary value is freed at the end of this statement
+ // pinned_fut.poll(...) // error[E0716]: temporary value dropped while borrowed
+ // // note: consider using a `let` binding to create a longer lived value
+ // ```
+ // - Issues such as this one are the ones motivating https://github.com/rust-lang/rfcs/pull/66
+ //
+ // This makes such a macro incredibly unergonomic in practice, and the reason most macros
+ // out there had to take the path of being a statement/binding macro (_e.g._, `pin!(future);`)
+ // instead of featuring the more intuitive ergonomics of an expression macro.
+ //
+ // Luckily, there is a way to avoid the problem. Indeed, the problem stems from the fact that a
+ // temporary is dropped at the end of its enclosing statement when it is part of the parameters
+ // given to function call, which has precisely been the case with our `Pin::new_unchecked()`!
+ // For instance,
+ // ```rust
+ // let p = Pin::new_unchecked(&mut <temporary>);
+ // ```
+ // becomes:
+ // ```rust
+ // let p = { let mut anon = <temporary>; &mut anon };
+ // ```
+ //
+ // However, when using a literal braced struct to construct the value, references to temporaries
+ // can then be taken. This makes Rust change the lifespan of such temporaries so that they are,
+ // instead, dropped _at the end of the enscoping block_.
+ // For instance,
+ // ```rust
+ // let p = Pin { pointer: &mut <temporary> };
+ // ```
+ // becomes:
+ // ```rust
+ // let mut anon = <temporary>;
+ // let p = Pin { pointer: &mut anon };
+ // ```
+ // which is *exactly* what we want.
+ //
+ // See https://doc.rust-lang.org/1.58.1/reference/destructors.html#temporary-lifetime-extension
+ // for more info.
+ $crate::pin::Pin::<&mut _> { pointer: &mut { $value } }
+}
diff --git a/library/core/src/prelude/mod.rs b/library/core/src/prelude/mod.rs
new file mode 100644
index 000000000..3cd3a3b78
--- /dev/null
+++ b/library/core/src/prelude/mod.rs
@@ -0,0 +1,57 @@
+//! The libcore prelude
+//!
+//! This module is intended for users of libcore which do not link to libstd as
+//! well. This module is imported by default when `#![no_std]` is used in the
+//! same manner as the standard library's prelude.
+
+#![stable(feature = "core_prelude", since = "1.4.0")]
+
+pub mod v1;
+
+/// The 2015 version of the core prelude.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "prelude_2015", since = "1.55.0")]
+pub mod rust_2015 {
+ #[stable(feature = "prelude_2015", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use super::v1::*;
+}
+
+/// The 2018 version of the core prelude.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "prelude_2018", since = "1.55.0")]
+pub mod rust_2018 {
+ #[stable(feature = "prelude_2018", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use super::v1::*;
+}
+
+/// The 2021 version of the core prelude.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "prelude_2021", since = "1.55.0")]
+pub mod rust_2021 {
+ #[stable(feature = "prelude_2021", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use super::v1::*;
+
+ #[stable(feature = "prelude_2021", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use crate::iter::FromIterator;
+
+ #[stable(feature = "prelude_2021", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use crate::convert::{TryFrom, TryInto};
+}
+
+/// The 2024 edition of the core prelude.
+///
+/// See the [module-level documentation](self) for more.
+#[unstable(feature = "prelude_2024", issue = "none")]
+pub mod rust_2024 {
+ #[unstable(feature = "prelude_2024", issue = "none")]
+ #[doc(no_inline)]
+ pub use super::rust_2021::*;
+}
diff --git a/library/core/src/prelude/v1.rs b/library/core/src/prelude/v1.rs
new file mode 100644
index 000000000..b566e211c
--- /dev/null
+++ b/library/core/src/prelude/v1.rs
@@ -0,0 +1,93 @@
+//! The first version of the core prelude.
+//!
+//! See the [module-level documentation](super) for more.
+
+#![stable(feature = "core_prelude", since = "1.4.0")]
+
+// Re-exported core operators
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::marker::{Copy, Send, Sized, Sync, Unpin};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::ops::{Drop, Fn, FnMut, FnOnce};
+
+// Re-exported functions
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::mem::drop;
+
+// Re-exported types and traits
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::clone::Clone;
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::cmp::{Eq, Ord, PartialEq, PartialOrd};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::convert::{AsMut, AsRef, From, Into};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::default::Default;
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::iter::{DoubleEndedIterator, ExactSizeIterator};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::iter::{Extend, IntoIterator, Iterator};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::option::Option::{self, None, Some};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::result::Result::{self, Err, Ok};
+
+// Re-exported built-in macros
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[doc(no_inline)]
+pub use crate::fmt::macros::Debug;
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[doc(no_inline)]
+pub use crate::hash::macros::Hash;
+
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow(deprecated)]
+#[doc(no_inline)]
+pub use crate::{
+ assert, cfg, column, compile_error, concat, concat_idents, env, file, format_args,
+ format_args_nl, include, include_bytes, include_str, line, log_syntax, module_path, option_env,
+ stringify, trace_macros,
+};
+
+#[unstable(
+ feature = "concat_bytes",
+ issue = "87555",
+ reason = "`concat_bytes` is not stable enough for use and is subject to change"
+)]
+#[doc(no_inline)]
+pub use crate::concat_bytes;
+
+// Do not `doc(inline)` these `doc(hidden)` items.
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow(deprecated)]
+pub use crate::macros::builtin::{RustcDecodable, RustcEncodable};
+
+// Do not `doc(no_inline)` so that they become doc items on their own
+// (no public module for them to be re-exported from).
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+pub use crate::macros::builtin::{bench, derive, global_allocator, test, test_case};
+
+#[unstable(
+ feature = "cfg_accessible",
+ issue = "64797",
+ reason = "`cfg_accessible` is not fully implemented"
+)]
+pub use crate::macros::builtin::cfg_accessible;
+
+#[unstable(
+ feature = "cfg_eval",
+ issue = "82679",
+ reason = "`cfg_eval` is a recently implemented feature"
+)]
+pub use crate::macros::builtin::cfg_eval;
diff --git a/library/core/src/primitive.rs b/library/core/src/primitive.rs
new file mode 100644
index 000000000..e20b2c5c9
--- /dev/null
+++ b/library/core/src/primitive.rs
@@ -0,0 +1,67 @@
+//! This module reexports the primitive types to allow usage that is not
+//! possibly shadowed by other declared types.
+//!
+//! This is normally only useful in macro generated code.
+//!
+//! An example of this is when generating a new struct and an impl for it:
+//!
+//! ```rust,compile_fail
+//! pub struct bool;
+//!
+//! impl QueryId for bool {
+//! const SOME_PROPERTY: bool = true;
+//! }
+//!
+//! # trait QueryId { const SOME_PROPERTY: core::primitive::bool; }
+//! ```
+//!
+//! Note that the `SOME_PROPERTY` associated constant would not compile, as its
+//! type `bool` refers to the struct, rather than to the primitive bool type.
+//!
+//! A correct implementation could look like:
+//!
+//! ```rust
+//! # #[allow(non_camel_case_types)]
+//! pub struct bool;
+//!
+//! impl QueryId for bool {
+//! const SOME_PROPERTY: core::primitive::bool = true;
+//! }
+//!
+//! # trait QueryId { const SOME_PROPERTY: core::primitive::bool; }
+//! ```
+
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use bool;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use char;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use f32;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use f64;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i128;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i16;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i32;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i64;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i8;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use isize;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use str;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u128;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u16;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u32;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u64;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u8;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use usize;
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
new file mode 100644
index 000000000..b8e546164
--- /dev/null
+++ b/library/core/src/primitive_docs.rs
@@ -0,0 +1,1508 @@
+// `library/{std,core}/src/primitive_docs.rs` should have the same contents.
+// These are different files so that relative links work properly without
+// having to have `CARGO_PKG_NAME` set, but conceptually they should always be the same.
+#[doc(primitive = "bool")]
+#[doc(alias = "true")]
+#[doc(alias = "false")]
+/// The boolean type.
+///
+/// The `bool` represents a value, which could only be either [`true`] or [`false`]. If you cast
+/// a `bool` into an integer, [`true`] will be 1 and [`false`] will be 0.
+///
+/// # Basic usage
+///
+/// `bool` implements various traits, such as [`BitAnd`], [`BitOr`], [`Not`], etc.,
+/// which allow us to perform boolean operations using `&`, `|` and `!`.
+///
+/// [`if`] requires a `bool` value as its conditional. [`assert!`], which is an
+/// important macro in testing, checks whether an expression is [`true`] and panics
+/// if it isn't.
+///
+/// ```
+/// let bool_val = true & false | false;
+/// assert!(!bool_val);
+/// ```
+///
+/// [`true`]: ../std/keyword.true.html
+/// [`false`]: ../std/keyword.false.html
+/// [`BitAnd`]: ops::BitAnd
+/// [`BitOr`]: ops::BitOr
+/// [`Not`]: ops::Not
+/// [`if`]: ../std/keyword.if.html
+///
+/// # Examples
+///
+/// A trivial example of the usage of `bool`:
+///
+/// ```
+/// let praise_the_borrow_checker = true;
+///
+/// // using the `if` conditional
+/// if praise_the_borrow_checker {
+/// println!("oh, yeah!");
+/// } else {
+/// println!("what?!!");
+/// }
+///
+/// // ... or, a match pattern
+/// match praise_the_borrow_checker {
+/// true => println!("keep praising!"),
+/// false => println!("you should praise!"),
+/// }
+/// ```
+///
+/// Also, since `bool` implements the [`Copy`] trait, we don't
+/// have to worry about the move semantics (just like the integer and float primitives).
+///
+/// Now an example of `bool` cast to integer type:
+///
+/// ```
+/// assert_eq!(true as i32, 1);
+/// assert_eq!(false as i32, 0);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_bool {}
+
+#[doc(primitive = "never")]
+#[doc(alias = "!")]
+//
+/// The `!` type, also called "never".
+///
+/// `!` represents the type of computations which never resolve to any value at all. For example,
+/// the [`exit`] function `fn exit(code: i32) -> !` exits the process without ever returning, and
+/// so returns `!`.
+///
+/// `break`, `continue` and `return` expressions also have type `!`. For example we are allowed to
+/// write:
+///
+/// ```
+/// #![feature(never_type)]
+/// # fn foo() -> u32 {
+/// let x: ! = {
+/// return 123
+/// };
+/// # }
+/// ```
+///
+/// Although the `let` is pointless here, it illustrates the meaning of `!`. Since `x` is never
+/// assigned a value (because `return` returns from the entire function), `x` can be given type
+/// `!`. We could also replace `return 123` with a `panic!` or a never-ending `loop` and this code
+/// would still be valid.
+///
+/// A more realistic usage of `!` is in this code:
+///
+/// ```
+/// # fn get_a_number() -> Option<u32> { None }
+/// # loop {
+/// let num: u32 = match get_a_number() {
+/// Some(num) => num,
+/// None => break,
+/// };
+/// # }
+/// ```
+///
+/// Both match arms must produce values of type [`u32`], but since `break` never produces a value
+/// at all we know it can never produce a value which isn't a [`u32`]. This illustrates another
+/// behaviour of the `!` type - expressions with type `!` will coerce into any other type.
+///
+/// [`u32`]: prim@u32
+#[doc = concat!("[`exit`]: ", include_str!("../primitive_docs/process_exit.md"))]
+///
+/// # `!` and generics
+///
+/// ## Infallible errors
+///
+/// The main place you'll see `!` used explicitly is in generic code. Consider the [`FromStr`]
+/// trait:
+///
+/// ```
+/// trait FromStr: Sized {
+/// type Err;
+/// fn from_str(s: &str) -> Result<Self, Self::Err>;
+/// }
+/// ```
+///
+/// When implementing this trait for [`String`] we need to pick a type for [`Err`]. And since
+/// converting a string into a string will never result in an error, the appropriate type is `!`.
+/// (Currently the type actually used is an enum with no variants, though this is only because `!`
+/// was added to Rust at a later date and it may change in the future.) With an [`Err`] type of
+/// `!`, if we have to call [`String::from_str`] for some reason the result will be a
+/// [`Result<String, !>`] which we can unpack like this:
+///
+/// ```
+/// #![feature(exhaustive_patterns)]
+/// use std::str::FromStr;
+/// let Ok(s) = String::from_str("hello");
+/// ```
+///
+/// Since the [`Err`] variant contains a `!`, it can never occur. If the `exhaustive_patterns`
+/// feature is present this means we can exhaustively match on [`Result<T, !>`] by just taking the
+/// [`Ok`] variant. This illustrates another behaviour of `!` - it can be used to "delete" certain
+/// enum variants from generic types like `Result`.
+///
+/// ## Infinite loops
+///
+/// While [`Result<T, !>`] is very useful for removing errors, `!` can also be used to remove
+/// successes as well. If we think of [`Result<T, !>`] as "if this function returns, it has not
+/// errored," we get a very intuitive idea of [`Result<!, E>`] as well: if the function returns, it
+/// *has* errored.
+///
+/// For example, consider the case of a simple web server, which can be simplified to:
+///
+/// ```ignore (hypothetical-example)
+/// loop {
+/// let (client, request) = get_request().expect("disconnected");
+/// let response = request.process();
+/// response.send(client);
+/// }
+/// ```
+///
+/// Currently, this isn't ideal, because we simply panic whenever we fail to get a new connection.
+/// Instead, we'd like to keep track of this error, like this:
+///
+/// ```ignore (hypothetical-example)
+/// loop {
+/// match get_request() {
+/// Err(err) => break err,
+/// Ok((client, request)) => {
+/// let response = request.process();
+/// response.send(client);
+/// },
+/// }
+/// }
+/// ```
+///
+/// Now, when the server disconnects, we exit the loop with an error instead of panicking. While it
+/// might be intuitive to simply return the error, we might want to wrap it in a [`Result<!, E>`]
+/// instead:
+///
+/// ```ignore (hypothetical-example)
+/// fn server_loop() -> Result<!, ConnectionError> {
+/// loop {
+/// let (client, request) = get_request()?;
+/// let response = request.process();
+/// response.send(client);
+/// }
+/// }
+/// ```
+///
+/// Now, we can use `?` instead of `match`, and the return type makes a lot more sense: if the loop
+/// ever stops, it means that an error occurred. We don't even have to wrap the loop in an `Ok`
+/// because `!` coerces to `Result<!, ConnectionError>` automatically.
+///
+/// [`String::from_str`]: str::FromStr::from_str
+#[doc = concat!("[`String`]: ", include_str!("../primitive_docs/string_string.md"))]
+/// [`FromStr`]: str::FromStr
+///
+/// # `!` and traits
+///
+/// When writing your own traits, `!` should have an `impl` whenever there is an obvious `impl`
+/// which doesn't `panic!`. The reason is that functions returning an `impl Trait` where `!`
+/// does not have an `impl` of `Trait` cannot diverge as their only possible code path. In other
+/// words, they can't return `!` from every code path. As an example, this code doesn't compile:
+///
+/// ```compile_fail
+/// use std::ops::Add;
+///
+/// fn foo() -> impl Add<u32> {
+/// unimplemented!()
+/// }
+/// ```
+///
+/// But this code does:
+///
+/// ```
+/// use std::ops::Add;
+///
+/// fn foo() -> impl Add<u32> {
+/// if true {
+/// unimplemented!()
+/// } else {
+/// 0
+/// }
+/// }
+/// ```
+///
+/// The reason is that, in the first example, there are many possible types that `!` could coerce
+/// to, because many types implement `Add<u32>`. However, in the second example,
+/// the `else` branch returns a `0`, which the compiler infers from the return type to be of type
+/// `u32`. Since `u32` is a concrete type, `!` can and will be coerced to it. See issue [#36375]
+/// for more information on this quirk of `!`.
+///
+/// [#36375]: https://github.com/rust-lang/rust/issues/36375
+///
+/// As it turns out, though, most traits can have an `impl` for `!`. Take [`Debug`]
+/// for example:
+///
+/// ```
+/// #![feature(never_type)]
+/// # use std::fmt;
+/// # trait Debug {
+/// # fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result;
+/// # }
+/// impl Debug for ! {
+/// fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// *self
+/// }
+/// }
+/// ```
+///
+/// Once again we're using `!`'s ability to coerce into any other type, in this case
+/// [`fmt::Result`]. Since this method takes a `&!` as an argument we know that it can never be
+/// called (because there is no value of type `!` for it to be called with). Writing `*self`
+/// essentially tells the compiler "We know that this code can never be run, so just treat the
+/// entire function body as having type [`fmt::Result`]". This pattern can be used a lot when
+/// implementing traits for `!`. Generally, any trait which only has methods which take a `self`
+/// parameter should have such an impl.
+///
+/// On the other hand, one trait which would not be appropriate to implement is [`Default`]:
+///
+/// ```
+/// trait Default {
+/// fn default() -> Self;
+/// }
+/// ```
+///
+/// Since `!` has no values, it has no default value either. It's true that we could write an
+/// `impl` for this which simply panics, but the same is true for any type (we could `impl
+/// Default` for (eg.) [`File`] by just making [`default()`] panic.)
+///
+#[doc = concat!("[`File`]: ", include_str!("../primitive_docs/fs_file.md"))]
+/// [`Debug`]: fmt::Debug
+/// [`default()`]: Default::default
+///
+#[unstable(feature = "never_type", issue = "35121")]
+mod prim_never {}
+
+#[doc(primitive = "char")]
+#[allow(rustdoc::invalid_rust_codeblocks)]
+/// A character type.
+///
+/// The `char` type represents a single character. More specifically, since
+/// 'character' isn't a well-defined concept in Unicode, `char` is a '[Unicode
+/// scalar value]'.
+///
+/// This documentation describes a number of methods and trait implementations on the
+/// `char` type. For technical reasons, there is additional, separate
+/// documentation in [the `std::char` module](char/index.html) as well.
+///
+/// # Validity
+///
+/// A `char` is a '[Unicode scalar value]', which is any '[Unicode code point]'
+/// other than a [surrogate code point]. This has a fixed numerical definition:
+/// code points are in the range 0 to 0x10FFFF, inclusive.
+/// Surrogate code points, used by UTF-16, are in the range 0xD800 to 0xDFFF.
+///
+/// No `char` may be constructed, whether as a literal or at runtime, that is not a
+/// Unicode scalar value:
+///
+/// ```compile_fail
+/// // Each of these is a compiler error
+/// ['\u{D800}', '\u{DFFF}', '\u{110000}'];
+/// ```
+///
+/// ```should_panic
+/// // Panics; from_u32 returns None.
+/// char::from_u32(0xDE01).unwrap();
+/// ```
+///
+/// ```no_run
+/// // Undefined behaviour
+/// unsafe { char::from_u32_unchecked(0x110000) };
+/// ```
+///
+/// USVs are also the exact set of values that may be encoded in UTF-8. Because
+/// `char` values are USVs and `str` values are valid UTF-8, it is safe to store
+/// any `char` in a `str` or read any character from a `str` as a `char`.
+///
+/// The gap in valid `char` values is understood by the compiler, so in the
+/// below example the two ranges are understood to cover the whole range of
+/// possible `char` values and there is no error for a [non-exhaustive match].
+///
+/// ```
+/// let c: char = 'a';
+/// match c {
+/// '\0' ..= '\u{D7FF}' => false,
+/// '\u{E000}' ..= '\u{10FFFF}' => true,
+/// };
+/// ```
+///
+/// All USVs are valid `char` values, but not all of them represent a real
+/// character. Many USVs are not currently assigned to a character, but may be
+/// in the future ("reserved"); some will never be a character
+/// ("noncharacters"); and some may be given different meanings by different
+/// users ("private use").
+///
+/// [Unicode code point]: https://www.unicode.org/glossary/#code_point
+/// [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
+/// [non-exhaustive match]: ../book/ch06-02-match.html#matches-are-exhaustive
+/// [surrogate code point]: https://www.unicode.org/glossary/#surrogate_code_point
+///
+/// # Representation
+///
+/// `char` is always four bytes in size. This is a different representation than
+/// a given character would have as part of a [`String`]. For example:
+///
+/// ```
+/// let v = vec!['h', 'e', 'l', 'l', 'o'];
+///
+/// // five elements times four bytes for each element
+/// assert_eq!(20, v.len() * std::mem::size_of::<char>());
+///
+/// let s = String::from("hello");
+///
+/// // five elements times one byte per element
+/// assert_eq!(5, s.len() * std::mem::size_of::<u8>());
+/// ```
+///
+#[doc = concat!("[`String`]: ", include_str!("../primitive_docs/string_string.md"))]
+///
+/// As always, remember that a human intuition for 'character' might not map to
+/// Unicode's definitions. For example, despite looking similar, the 'é'
+/// character is one Unicode code point while 'é' is two Unicode code points:
+///
+/// ```
+/// let mut chars = "é".chars();
+/// // U+00e9: 'latin small letter e with acute'
+/// assert_eq!(Some('\u{00e9}'), chars.next());
+/// assert_eq!(None, chars.next());
+///
+/// let mut chars = "é".chars();
+/// // U+0065: 'latin small letter e'
+/// assert_eq!(Some('\u{0065}'), chars.next());
+/// // U+0301: 'combining acute accent'
+/// assert_eq!(Some('\u{0301}'), chars.next());
+/// assert_eq!(None, chars.next());
+/// ```
+///
+/// This means that the contents of the first string above _will_ fit into a
+/// `char` while the contents of the second string _will not_. Trying to create
+/// a `char` literal with the contents of the second string gives an error:
+///
+/// ```text
+/// error: character literal may only contain one codepoint: 'é'
+/// let c = 'é';
+/// ^^^
+/// ```
+///
+/// Another implication of the 4-byte fixed size of a `char` is that
+/// per-`char` processing can end up using a lot more memory:
+///
+/// ```
+/// let s = String::from("love: ❤️");
+/// let v: Vec<char> = s.chars().collect();
+///
+/// assert_eq!(12, std::mem::size_of_val(&s[..]));
+/// assert_eq!(32, std::mem::size_of_val(&v[..]));
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_char {}
+
+#[doc(primitive = "unit")]
+#[doc(alias = "(")]
+#[doc(alias = ")")]
+#[doc(alias = "()")]
+//
+/// The `()` type, also called "unit".
+///
+/// The `()` type has exactly one value `()`, and is used when there
+/// is no other meaningful value that could be returned. `()` is most
+/// commonly seen implicitly: functions without a `-> ...` implicitly
+/// have return type `()`, that is, these are equivalent:
+///
+/// ```rust
+/// fn long() -> () {}
+///
+/// fn short() {}
+/// ```
+///
+/// The semicolon `;` can be used to discard the result of an
+/// expression at the end of a block, making the expression (and thus
+/// the block) evaluate to `()`. For example,
+///
+/// ```rust
+/// fn returns_i64() -> i64 {
+/// 1i64
+/// }
+/// fn returns_unit() {
+/// 1i64;
+/// }
+///
+/// let is_i64 = {
+/// returns_i64()
+/// };
+/// let is_unit = {
+/// returns_i64();
+/// };
+/// ```
+///
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_unit {}
+
+// Required to make auto trait impls render.
+// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
+#[doc(hidden)]
+impl () {}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Clone for () {
+ fn clone(&self) -> Self {
+ loop {}
+ }
+}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Copy for () {
+ // empty
+}
+
+#[doc(primitive = "pointer")]
+#[doc(alias = "ptr")]
+#[doc(alias = "*")]
+#[doc(alias = "*const")]
+#[doc(alias = "*mut")]
+//
+/// Raw, unsafe pointers, `*const T`, and `*mut T`.
+///
+/// *[See also the `std::ptr` module](ptr).*
+///
+/// Working with raw pointers in Rust is uncommon, typically limited to a few patterns.
+/// Raw pointers can be unaligned or [`null`]. However, when a raw pointer is
+/// dereferenced (using the `*` operator), it must be non-null and aligned.
+///
+/// Storing through a raw pointer using `*ptr = data` calls `drop` on the old value, so
+/// [`write`] must be used if the type has drop glue and memory is not already
+/// initialized - otherwise `drop` would be called on the uninitialized memory.
+///
+/// Use the [`null`] and [`null_mut`] functions to create null pointers, and the
+/// [`is_null`] method of the `*const T` and `*mut T` types to check for null.
+/// The `*const T` and `*mut T` types also define the [`offset`] method, for
+/// pointer math.
+///
+/// # Common ways to create raw pointers
+///
+/// ## 1. Coerce a reference (`&T`) or mutable reference (`&mut T`).
+///
+/// ```
+/// let my_num: i32 = 10;
+/// let my_num_ptr: *const i32 = &my_num;
+/// let mut my_speed: i32 = 88;
+/// let my_speed_ptr: *mut i32 = &mut my_speed;
+/// ```
+///
+/// To get a pointer to a boxed value, dereference the box:
+///
+/// ```
+/// let my_num: Box<i32> = Box::new(10);
+/// let my_num_ptr: *const i32 = &*my_num;
+/// let mut my_speed: Box<i32> = Box::new(88);
+/// let my_speed_ptr: *mut i32 = &mut *my_speed;
+/// ```
+///
+/// This does not take ownership of the original allocation
+/// and requires no resource management later,
+/// but you must not use the pointer after its lifetime.
+///
+/// ## 2. Consume a box (`Box<T>`).
+///
+/// The [`into_raw`] function consumes a box and returns
+/// the raw pointer. It doesn't destroy `T` or deallocate any memory.
+///
+/// ```
+/// let my_speed: Box<i32> = Box::new(88);
+/// let my_speed: *mut i32 = Box::into_raw(my_speed);
+///
+/// // By taking ownership of the original `Box<T>` though
+/// // we are obligated to put it together later to be destroyed.
+/// unsafe {
+/// drop(Box::from_raw(my_speed));
+/// }
+/// ```
+///
+/// Note that here the call to [`drop`] is for clarity - it indicates
+/// that we are done with the given value and it should be destroyed.
+///
+/// ## 3. Create it using `ptr::addr_of!`
+///
+/// Instead of coercing a reference to a raw pointer, you can use the macros
+/// [`ptr::addr_of!`] (for `*const T`) and [`ptr::addr_of_mut!`] (for `*mut T`).
+/// These macros allow you to create raw pointers to fields to which you cannot
+/// create a reference (without causing undefined behaviour), such as an
+/// unaligned field. This might be necessary if packed structs or uninitialized
+/// memory is involved.
+///
+/// ```
+/// #[derive(Debug, Default, Copy, Clone)]
+/// #[repr(C, packed)]
+/// struct S {
+/// aligned: u8,
+/// unaligned: u32,
+/// }
+/// let s = S::default();
+/// let p = std::ptr::addr_of!(s.unaligned); // not allowed with coercion
+/// ```
+///
+/// ## 4. Get it from C.
+///
+/// ```
+/// # #![feature(rustc_private)]
+/// extern crate libc;
+///
+/// use std::mem;
+///
+/// unsafe {
+/// let my_num: *mut i32 = libc::malloc(mem::size_of::<i32>()) as *mut i32;
+/// if my_num.is_null() {
+/// panic!("failed to allocate memory");
+/// }
+/// libc::free(my_num as *mut libc::c_void);
+/// }
+/// ```
+///
+/// Usually you wouldn't literally use `malloc` and `free` from Rust,
+/// but C APIs hand out a lot of pointers generally, so are a common source
+/// of raw pointers in Rust.
+///
+/// [`null`]: ptr::null
+/// [`null_mut`]: ptr::null_mut
+/// [`is_null`]: pointer::is_null
+/// [`offset`]: pointer::offset
+#[doc = concat!("[`into_raw`]: ", include_str!("../primitive_docs/box_into_raw.md"))]
+/// [`drop`]: mem::drop
+/// [`write`]: ptr::write
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_pointer {}
+
+#[doc(primitive = "array")]
+#[doc(alias = "[]")]
+#[doc(alias = "[T;N]")] // unfortunately, rustdoc doesn't have fuzzy search for aliases
+#[doc(alias = "[T; N]")]
+/// A fixed-size array, denoted `[T; N]`, for the element type, `T`, and the
+/// non-negative compile-time constant size, `N`.
+///
+/// There are two syntactic forms for creating an array:
+///
+/// * A list with each element, i.e., `[x, y, z]`.
+/// * A repeat expression `[x; N]`, which produces an array with `N` copies of `x`.
+/// The type of `x` must be [`Copy`].
+///
+/// Note that `[expr; 0]` is allowed, and produces an empty array.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
+/// Arrays of *any* size implement the following traits if the element type allows it:
+///
+/// - [`Copy`]
+/// - [`Clone`]
+/// - [`Debug`]
+/// - [`IntoIterator`] (implemented for `[T; N]`, `&[T; N]` and `&mut [T; N]`)
+/// - [`PartialEq`], [`PartialOrd`], [`Eq`], [`Ord`]
+/// - [`Hash`]
+/// - [`AsRef`], [`AsMut`]
+/// - [`Borrow`], [`BorrowMut`]
+///
+/// Arrays of sizes from 0 to 32 (inclusive) implement the [`Default`] trait
+/// if the element type allows it. As a stopgap, trait implementations are
+/// statically generated up to size 32.
+///
+/// Arrays coerce to [slices (`[T]`)][slice], so a slice method may be called on
+/// an array. Indeed, this provides most of the API for working with arrays.
+/// Slices have a dynamic size and do not coerce to arrays.
+///
+/// You can move elements out of an array with a [slice pattern]. If you want
+/// one element, see [`mem::replace`].
+///
+/// # Examples
+///
+/// ```
+/// let mut array: [i32; 3] = [0; 3];
+///
+/// array[1] = 1;
+/// array[2] = 2;
+///
+/// assert_eq!([1, 2], &array[1..]);
+///
+/// // This loop prints: 0 1 2
+/// for x in array {
+/// print!("{x} ");
+/// }
+/// ```
+///
+/// You can also iterate over reference to the array's elements:
+///
+/// ```
+/// let array: [i32; 3] = [0; 3];
+///
+/// for x in &array { }
+/// ```
+///
+/// You can use a [slice pattern] to move elements out of an array:
+///
+/// ```
+/// fn move_away(_: String) { /* Do interesting things. */ }
+///
+/// let [john, roa] = ["John".to_string(), "Roa".to_string()];
+/// move_away(john);
+/// move_away(roa);
+/// ```
+///
+/// # Editions
+///
+/// Prior to Rust 1.53, arrays did not implement [`IntoIterator`] by value, so the method call
+/// `array.into_iter()` auto-referenced into a [slice iterator](slice::iter). Right now, the old
+/// behavior is preserved in the 2015 and 2018 editions of Rust for compatibility, ignoring
+/// [`IntoIterator`] by value. In the future, the behavior on the 2015 and 2018 edition
+/// might be made consistent to the behavior of later editions.
+///
+/// ```rust,edition2018
+/// // Rust 2015 and 2018:
+///
+/// # #![allow(array_into_iter)] // override our `deny(warnings)`
+/// let array: [i32; 3] = [0; 3];
+///
+/// // This creates a slice iterator, producing references to each value.
+/// for item in array.into_iter().enumerate() {
+/// let (i, x): (usize, &i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+///
+/// // The `array_into_iter` lint suggests this change for future compatibility:
+/// for item in array.iter().enumerate() {
+/// let (i, x): (usize, &i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+///
+/// // You can explicitly iterate an array by value using `IntoIterator::into_iter`
+/// for item in IntoIterator::into_iter(array).enumerate() {
+/// let (i, x): (usize, i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+/// ```
+///
+/// Starting in the 2021 edition, `array.into_iter()` uses `IntoIterator` normally to iterate
+/// by value, and `iter()` should be used to iterate by reference like previous editions.
+///
+/// ```rust,edition2021
+/// // Rust 2021:
+///
+/// let array: [i32; 3] = [0; 3];
+///
+/// // This iterates by reference:
+/// for item in array.iter().enumerate() {
+/// let (i, x): (usize, &i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+///
+/// // This iterates by value:
+/// for item in array.into_iter().enumerate() {
+/// let (i, x): (usize, i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+/// ```
+///
+/// Future language versions might start treating the `array.into_iter()`
+/// syntax on editions 2015 and 2018 the same as on edition 2021. So code using
+/// those older editions should still be written with this change in mind, to
+/// prevent breakage in the future. The safest way to accomplish this is to
+/// avoid the `into_iter` syntax on those editions. If an edition update is not
+/// viable/desired, there are multiple alternatives:
+/// * use `iter`, equivalent to the old behavior, creating references
+/// * use [`IntoIterator::into_iter`], equivalent to the post-2021 behavior (Rust 1.53+)
+/// * replace `for ... in array.into_iter() {` with `for ... in array {`,
+/// equivalent to the post-2021 behavior (Rust 1.53+)
+///
+/// ```rust,edition2018
+/// // Rust 2015 and 2018:
+///
+/// let array: [i32; 3] = [0; 3];
+///
+/// // This iterates by reference:
+/// for item in array.iter() {
+/// let x: &i32 = item;
+/// println!("{x}");
+/// }
+///
+/// // This iterates by value:
+/// for item in IntoIterator::into_iter(array) {
+/// let x: i32 = item;
+/// println!("{x}");
+/// }
+///
+/// // This iterates by value:
+/// for item in array {
+/// let x: i32 = item;
+/// println!("{x}");
+/// }
+///
+/// // IntoIter can also start a chain.
+/// // This iterates by value:
+/// for item in IntoIterator::into_iter(array).enumerate() {
+/// let (i, x): (usize, i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+/// ```
+///
+/// [slice]: prim@slice
+/// [`Debug`]: fmt::Debug
+/// [`Hash`]: hash::Hash
+/// [`Borrow`]: borrow::Borrow
+/// [`BorrowMut`]: borrow::BorrowMut
+/// [slice pattern]: ../reference/patterns.html#slice-patterns
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_array {}
+
+#[doc(primitive = "slice")]
+#[doc(alias = "[")]
+#[doc(alias = "]")]
+#[doc(alias = "[]")]
+/// A dynamically-sized view into a contiguous sequence, `[T]`. Contiguous here
+/// means that elements are laid out so that every element is the same
+/// distance from its neighbors.
+///
+/// *[See also the `std::slice` module](crate::slice).*
+///
+/// Slices are a view into a block of memory represented as a pointer and a
+/// length.
+///
+/// ```
+/// // slicing a Vec
+/// let vec = vec![1, 2, 3];
+/// let int_slice = &vec[..];
+/// // coercing an array to a slice
+/// let str_slice: &[&str] = &["one", "two", "three"];
+/// ```
+///
+/// Slices are either mutable or shared. The shared slice type is `&[T]`,
+/// while the mutable slice type is `&mut [T]`, where `T` represents the element
+/// type. For example, you can mutate the block of memory that a mutable slice
+/// points to:
+///
+/// ```
+/// let mut x = [1, 2, 3];
+/// let x = &mut x[..]; // Take a full slice of `x`.
+/// x[1] = 7;
+/// assert_eq!(x, &[1, 7, 3]);
+/// ```
+///
+/// As slices store the length of the sequence they refer to, they have twice
+/// the size of pointers to [`Sized`](marker/trait.Sized.html) types.
+/// Also see the reference on
+/// [dynamically sized types](../reference/dynamically-sized-types.html).
+///
+/// ```
+/// # use std::rc::Rc;
+/// let pointer_size = std::mem::size_of::<&u8>();
+/// assert_eq!(2 * pointer_size, std::mem::size_of::<&[u8]>());
+/// assert_eq!(2 * pointer_size, std::mem::size_of::<*const [u8]>());
+/// assert_eq!(2 * pointer_size, std::mem::size_of::<Box<[u8]>>());
+/// assert_eq!(2 * pointer_size, std::mem::size_of::<Rc<[u8]>>());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_slice {}
+
+#[doc(primitive = "str")]
+//
+/// String slices.
+///
+/// *[See also the `std::str` module](crate::str).*
+///
+/// The `str` type, also called a 'string slice', is the most primitive string
+/// type. It is usually seen in its borrowed form, `&str`. It is also the type
+/// of string literals, `&'static str`.
+///
+/// String slices are always valid UTF-8.
+///
+/// # Examples
+///
+/// String literals are string slices:
+///
+/// ```
+/// let hello = "Hello, world!";
+///
+/// // with an explicit type annotation
+/// let hello: &'static str = "Hello, world!";
+/// ```
+///
+/// They are `'static` because they're stored directly in the final binary, and
+/// so will be valid for the `'static` duration.
+///
+/// # Representation
+///
+/// A `&str` is made up of two components: a pointer to some bytes, and a
+/// length. You can look at these with the [`as_ptr`] and [`len`] methods:
+///
+/// ```
+/// use std::slice;
+/// use std::str;
+///
+/// let story = "Once upon a time...";
+///
+/// let ptr = story.as_ptr();
+/// let len = story.len();
+///
+/// // story has nineteen bytes
+/// assert_eq!(19, len);
+///
+/// // We can re-build a str out of ptr and len. This is all unsafe because
+/// // we are responsible for making sure the two components are valid:
+/// let s = unsafe {
+/// // First, we build a &[u8]...
+/// let slice = slice::from_raw_parts(ptr, len);
+///
+/// // ... and then convert that slice into a string slice
+/// str::from_utf8(slice)
+/// };
+///
+/// assert_eq!(s, Ok(story));
+/// ```
+///
+/// [`as_ptr`]: str::as_ptr
+/// [`len`]: str::len
+///
+/// Note: This example shows the internals of `&str`. `unsafe` should not be
+/// used to get a string slice under normal circumstances. Use `as_str`
+/// instead.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_str {}
+
+#[doc(primitive = "tuple")]
+#[doc(alias = "(")]
+#[doc(alias = ")")]
+#[doc(alias = "()")]
+//
+/// A finite heterogeneous sequence, `(T, U, ..)`.
+///
+/// Let's cover each of those in turn:
+///
+/// Tuples are *finite*. In other words, a tuple has a length. Here's a tuple
+/// of length `3`:
+///
+/// ```
+/// ("hello", 5, 'c');
+/// ```
+///
+/// 'Length' is also sometimes called 'arity' here; each tuple of a different
+/// length is a different, distinct type.
+///
+/// Tuples are *heterogeneous*. This means that each element of the tuple can
+/// have a different type. In that tuple above, it has the type:
+///
+/// ```
+/// # let _:
+/// (&'static str, i32, char)
+/// # = ("hello", 5, 'c');
+/// ```
+///
+/// Tuples are a *sequence*. This means that they can be accessed by position;
+/// this is called 'tuple indexing', and it looks like this:
+///
+/// ```rust
+/// let tuple = ("hello", 5, 'c');
+///
+/// assert_eq!(tuple.0, "hello");
+/// assert_eq!(tuple.1, 5);
+/// assert_eq!(tuple.2, 'c');
+/// ```
+///
+/// The sequential nature of the tuple applies to its implementations of various
+/// traits. For example, in [`PartialOrd`] and [`Ord`], the elements are compared
+/// sequentially until the first non-equal set is found.
+///
+/// For more about tuples, see [the book](../book/ch03-02-data-types.html#the-tuple-type).
+///
+// Hardcoded anchor in src/librustdoc/html/format.rs
+// linked to as `#trait-implementations-1`
+/// # Trait implementations
+///
+/// In this documentation the shorthand `(T₁, T₂, …, Tₙ)` is used to represent tuples of varying
+/// length. When that is used, any trait bound expressed on `T` applies to each element of the
+/// tuple independently. Note that this is a convenience notation to avoid repetitive
+/// documentation, not valid Rust syntax.
+///
+/// Due to a temporary restriction in Rust’s type system, the following traits are only
+/// implemented on tuples of arity 12 or less. In the future, this may change:
+///
+/// * [`PartialEq`]
+/// * [`Eq`]
+/// * [`PartialOrd`]
+/// * [`Ord`]
+/// * [`Debug`]
+/// * [`Default`]
+/// * [`Hash`]
+///
+/// [`Debug`]: fmt::Debug
+/// [`Hash`]: hash::Hash
+///
+/// The following traits are implemented for tuples of any length. These traits have
+/// implementations that are automatically generated by the compiler, so are not limited by
+/// missing language features.
+///
+/// * [`Clone`]
+/// * [`Copy`]
+/// * [`Send`]
+/// * [`Sync`]
+/// * [`Unpin`]
+/// * [`UnwindSafe`]
+/// * [`RefUnwindSafe`]
+///
+/// [`Unpin`]: marker::Unpin
+/// [`UnwindSafe`]: panic::UnwindSafe
+/// [`RefUnwindSafe`]: panic::RefUnwindSafe
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let tuple = ("hello", 5, 'c');
+///
+/// assert_eq!(tuple.0, "hello");
+/// ```
+///
+/// Tuples are often used as a return type when you want to return more than
+/// one value:
+///
+/// ```
+/// fn calculate_point() -> (i32, i32) {
+/// // Don't do a calculation, that's not the point of the example
+/// (4, 5)
+/// }
+///
+/// let point = calculate_point();
+///
+/// assert_eq!(point.0, 4);
+/// assert_eq!(point.1, 5);
+///
+/// // Combining this with patterns can be nicer.
+///
+/// let (x, y) = calculate_point();
+///
+/// assert_eq!(x, 4);
+/// assert_eq!(y, 5);
+/// ```
+///
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_tuple {}
+
+// Required to make auto trait impls render.
+// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
+#[doc(hidden)]
+impl<T> (T,) {}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+/// This trait is implemented on arbitrary-length tuples.
+impl<T: Clone> Clone for (T,) {
+ fn clone(&self) -> Self {
+ loop {}
+ }
+}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+/// This trait is implemented on arbitrary-length tuples.
+impl<T: Copy> Copy for (T,) {
+ // empty
+}
+
+#[doc(primitive = "f32")]
+/// A 32-bit floating point type (specifically, the "binary32" type defined in IEEE 754-2008).
+///
+/// This type can represent a wide range of decimal numbers, like `3.5`, `27`,
+/// `-113.75`, `0.0078125`, `34359738368`, `0`, `-1`. So unlike integer types
+/// (such as `i32`), floating point types can represent non-integer numbers,
+/// too.
+///
+/// However, being able to represent this wide range of numbers comes at the
+/// cost of precision: floats can only represent some of the real numbers and
+/// calculation with floats round to a nearby representable number. For example,
+/// `5.0` and `1.0` can be exactly represented as `f32`, but `1.0 / 5.0` results
+/// in `0.20000000298023223876953125` since `0.2` cannot be exactly represented
+/// as `f32`. Note, however, that printing floats with `println` and friends will
+/// often discard insignificant digits: `println!("{}", 1.0f32 / 5.0f32)` will
+/// print `0.2`.
+///
+/// Additionally, `f32` can represent some special values:
+///
+/// - −0.0: IEEE 754 floating point numbers have a bit that indicates their sign, so −0.0 is a
+/// possible value. For comparison −0.0 = +0.0, but floating point operations can carry
+/// the sign bit through arithmetic operations. This means −0.0 × +0.0 produces −0.0 and
+/// a negative number rounded to a value smaller than a float can represent also produces −0.0.
+/// - [∞](#associatedconstant.INFINITY) and
+/// [−∞](#associatedconstant.NEG_INFINITY): these result from calculations
+/// like `1.0 / 0.0`.
+/// - [NaN (not a number)](#associatedconstant.NAN): this value results from
+/// calculations like `(-1.0).sqrt()`. NaN has some potentially unexpected
+/// behavior:
+/// - It is unequal to any float, including itself! This is the reason `f32`
+/// doesn't implement the `Eq` trait.
+/// - It is also neither smaller nor greater than any float, making it
+/// impossible to sort by the default comparison operation, which is the
+/// reason `f32` doesn't implement the `Ord` trait.
+/// - It is also considered *infectious* as almost all calculations where one
+/// of the operands is NaN will also result in NaN. The explanations on this
+/// page only explicitly document behavior on NaN operands if this default
+/// is deviated from.
+/// - Lastly, there are multiple bit patterns that are considered NaN.
+/// Rust does not currently guarantee that the bit patterns of NaN are
+/// preserved over arithmetic operations, and they are not guaranteed to be
+/// portable or even fully deterministic! This means that there may be some
+/// surprising results upon inspecting the bit patterns,
+/// as the same calculations might produce NaNs with different bit patterns.
+///
+/// When the number resulting from a primitive operation (addition,
+/// subtraction, multiplication, or division) on this type is not exactly
+/// representable as `f32`, it is rounded according to the roundTiesToEven
+/// direction defined in IEEE 754-2008. That means:
+///
+/// - The result is the representable value closest to the true value, if there
+/// is a unique closest representable value.
+/// - If the true value is exactly half-way between two representable values,
+/// the result is the one with an even least-significant binary digit.
+/// - If the true value's magnitude is ≥ `f32::MAX` + 2<sup>(`f32::MAX_EXP` −
+/// `f32::MANTISSA_DIGITS` − 1)</sup>, the result is ∞ or −∞ (preserving the
+/// true value's sign).
+///
+/// For more information on floating point numbers, see [Wikipedia][wikipedia].
+///
+/// *[See also the `std::f32::consts` module](crate::f32::consts).*
+///
+/// [wikipedia]: https://en.wikipedia.org/wiki/Single-precision_floating-point_format
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_f32 {}
+
+#[doc(primitive = "f64")]
+/// A 64-bit floating point type (specifically, the "binary64" type defined in IEEE 754-2008).
+///
+/// This type is very similar to [`f32`], but has increased
+/// precision by using twice as many bits. Please see [the documentation for
+/// `f32`][`f32`] or [Wikipedia on double precision
+/// values][wikipedia] for more information.
+///
+/// *[See also the `std::f64::consts` module](crate::f64::consts).*
+///
+/// [`f32`]: prim@f32
+/// [wikipedia]: https://en.wikipedia.org/wiki/Double-precision_floating-point_format
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_f64 {}
+
+#[doc(primitive = "i8")]
+//
+/// The 8-bit signed integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_i8 {}
+
+#[doc(primitive = "i16")]
+//
+/// The 16-bit signed integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_i16 {}
+
+#[doc(primitive = "i32")]
+//
+/// The 32-bit signed integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_i32 {}
+
+#[doc(primitive = "i64")]
+//
+/// The 64-bit signed integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_i64 {}
+
+#[doc(primitive = "i128")]
+//
+/// The 128-bit signed integer type.
+#[stable(feature = "i128", since = "1.26.0")]
+mod prim_i128 {}
+
+#[doc(primitive = "u8")]
+//
+/// The 8-bit unsigned integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_u8 {}
+
+#[doc(primitive = "u16")]
+//
+/// The 16-bit unsigned integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_u16 {}
+
+#[doc(primitive = "u32")]
+//
+/// The 32-bit unsigned integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_u32 {}
+
+#[doc(primitive = "u64")]
+//
+/// The 64-bit unsigned integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_u64 {}
+
+#[doc(primitive = "u128")]
+//
+/// The 128-bit unsigned integer type.
+#[stable(feature = "i128", since = "1.26.0")]
+mod prim_u128 {}
+
+#[doc(primitive = "isize")]
+//
+/// The pointer-sized signed integer type.
+///
+/// The size of this primitive is how many bytes it takes to reference any
+/// location in memory. For example, on a 32 bit target, this is 4 bytes
+/// and on a 64 bit target, this is 8 bytes.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_isize {}
+
+#[doc(primitive = "usize")]
+//
+/// The pointer-sized unsigned integer type.
+///
+/// The size of this primitive is how many bytes it takes to reference any
+/// location in memory. For example, on a 32 bit target, this is 4 bytes
+/// and on a 64 bit target, this is 8 bytes.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_usize {}
+
+#[doc(primitive = "reference")]
+#[doc(alias = "&")]
+#[doc(alias = "&mut")]
+//
+/// References, both shared and mutable.
+///
+/// A reference represents a borrow of some owned value. You can get one by using the `&` or `&mut`
+/// operators on a value, or by using a [`ref`](../std/keyword.ref.html) or
+/// <code>[ref](../std/keyword.ref.html) [mut](../std/keyword.mut.html)</code> pattern.
+///
+/// For those familiar with pointers, a reference is just a pointer that is assumed to be
+/// aligned, not null, and pointing to memory containing a valid value of `T` - for example,
+/// <code>&[bool]</code> can only point to an allocation containing the integer values `1`
+/// ([`true`](../std/keyword.true.html)) or `0` ([`false`](../std/keyword.false.html)), but
+/// creating a <code>&[bool]</code> that points to an allocation containing
+/// the value `3` causes undefined behaviour.
+/// In fact, <code>[Option]\<&T></code> has the same memory representation as a
+/// nullable but aligned pointer, and can be passed across FFI boundaries as such.
+///
+/// In most cases, references can be used much like the original value. Field access, method
+/// calling, and indexing work the same (save for mutability rules, of course). In addition, the
+/// comparison operators transparently defer to the referent's implementation, allowing references
+/// to be compared the same as owned values.
+///
+/// References have a lifetime attached to them, which represents the scope for which the borrow is
+/// valid. A lifetime is said to "outlive" another one if its representative scope is as long or
+/// longer than the other. The `'static` lifetime is the longest lifetime, which represents the
+/// total life of the program. For example, string literals have a `'static` lifetime because the
+/// text data is embedded into the binary of the program, rather than in an allocation that needs
+/// to be dynamically managed.
+///
+/// `&mut T` references can be freely coerced into `&T` references with the same referent type, and
+/// references with longer lifetimes can be freely coerced into references with shorter ones.
+///
+/// Reference equality by address, instead of comparing the values pointed to, is accomplished via
+/// implicit reference-pointer coercion and raw pointer equality via [`ptr::eq`], while
+/// [`PartialEq`] compares values.
+///
+/// ```
+/// use std::ptr;
+///
+/// let five = 5;
+/// let other_five = 5;
+/// let five_ref = &five;
+/// let same_five_ref = &five;
+/// let other_five_ref = &other_five;
+///
+/// assert!(five_ref == same_five_ref);
+/// assert!(five_ref == other_five_ref);
+///
+/// assert!(ptr::eq(five_ref, same_five_ref));
+/// assert!(!ptr::eq(five_ref, other_five_ref));
+/// ```
+///
+/// For more information on how to use references, see [the book's section on "References and
+/// Borrowing"][book-refs].
+///
+/// [book-refs]: ../book/ch04-02-references-and-borrowing.html
+///
+/// # Trait implementations
+///
+/// The following traits are implemented for all `&T`, regardless of the type of its referent:
+///
+/// * [`Copy`]
+/// * [`Clone`] \(Note that this will not defer to `T`'s `Clone` implementation if it exists!)
+/// * [`Deref`]
+/// * [`Borrow`]
+/// * [`fmt::Pointer`]
+///
+/// [`Deref`]: ops::Deref
+/// [`Borrow`]: borrow::Borrow
+///
+/// `&mut T` references get all of the above except `Copy` and `Clone` (to prevent creating
+/// multiple simultaneous mutable borrows), plus the following, regardless of the type of its
+/// referent:
+///
+/// * [`DerefMut`]
+/// * [`BorrowMut`]
+///
+/// [`DerefMut`]: ops::DerefMut
+/// [`BorrowMut`]: borrow::BorrowMut
+/// [bool]: prim@bool
+///
+/// The following traits are implemented on `&T` references if the underlying `T` also implements
+/// that trait:
+///
+/// * All the traits in [`std::fmt`] except [`fmt::Pointer`] (which is implemented regardless of the type of its referent) and [`fmt::Write`]
+/// * [`PartialOrd`]
+/// * [`Ord`]
+/// * [`PartialEq`]
+/// * [`Eq`]
+/// * [`AsRef`]
+/// * [`Fn`] \(in addition, `&T` references get [`FnMut`] and [`FnOnce`] if `T: Fn`)
+/// * [`Hash`]
+/// * [`ToSocketAddrs`]
+/// * [`Send`] \(`&T` references also require <code>T: [Sync]</code>)
+///
+/// [`std::fmt`]: fmt
+/// [`Hash`]: hash::Hash
+#[doc = concat!("[`ToSocketAddrs`]: ", include_str!("../primitive_docs/net_tosocketaddrs.md"))]
+///
+/// `&mut T` references get all of the above except `ToSocketAddrs`, plus the following, if `T`
+/// implements that trait:
+///
+/// * [`AsMut`]
+/// * [`FnMut`] \(in addition, `&mut T` references get [`FnOnce`] if `T: FnMut`)
+/// * [`fmt::Write`]
+/// * [`Iterator`]
+/// * [`DoubleEndedIterator`]
+/// * [`ExactSizeIterator`]
+/// * [`FusedIterator`]
+/// * [`TrustedLen`]
+/// * [`io::Write`]
+/// * [`Read`]
+/// * [`Seek`]
+/// * [`BufRead`]
+///
+/// [`FusedIterator`]: iter::FusedIterator
+/// [`TrustedLen`]: iter::TrustedLen
+#[doc = concat!("[`Seek`]: ", include_str!("../primitive_docs/io_seek.md"))]
+#[doc = concat!("[`BufRead`]: ", include_str!("../primitive_docs/io_bufread.md"))]
+#[doc = concat!("[`Read`]: ", include_str!("../primitive_docs/io_read.md"))]
+#[doc = concat!("[`io::Write`]: ", include_str!("../primitive_docs/io_write.md"))]
+///
+/// Note that due to method call deref coercion, simply calling a trait method will act like they
+/// work on references as well as they do on owned values! The implementations described here are
+/// meant for generic contexts, where the final type `T` is a type parameter or otherwise not
+/// locally known.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_ref {}
+
+#[doc(primitive = "fn")]
+//
+/// Function pointers, like `fn(usize) -> bool`.
+///
+/// *See also the traits [`Fn`], [`FnMut`], and [`FnOnce`].*
+///
+/// [`Fn`]: ops::Fn
+/// [`FnMut`]: ops::FnMut
+/// [`FnOnce`]: ops::FnOnce
+///
+/// Function pointers are pointers that point to *code*, not data. They can be called
+/// just like functions. Like references, function pointers are, among other things, assumed to
+/// not be null, so if you want to pass a function pointer over FFI and be able to accommodate null
+/// pointers, make your type [`Option<fn()>`](core::option#options-and-pointers-nullable-pointers)
+/// with your required signature.
+///
+/// ### Safety
+///
+/// Plain function pointers are obtained by casting either plain functions, or closures that don't
+/// capture an environment:
+///
+/// ```
+/// fn add_one(x: usize) -> usize {
+/// x + 1
+/// }
+///
+/// let ptr: fn(usize) -> usize = add_one;
+/// assert_eq!(ptr(5), 6);
+///
+/// let clos: fn(usize) -> usize = |x| x + 5;
+/// assert_eq!(clos(5), 10);
+/// ```
+///
+/// In addition to varying based on their signature, function pointers come in two flavors: safe
+/// and unsafe. Plain `fn()` function pointers can only point to safe functions,
+/// while `unsafe fn()` function pointers can point to safe or unsafe functions.
+///
+/// ```
+/// fn add_one(x: usize) -> usize {
+/// x + 1
+/// }
+///
+/// unsafe fn add_one_unsafely(x: usize) -> usize {
+/// x + 1
+/// }
+///
+/// let safe_ptr: fn(usize) -> usize = add_one;
+///
+/// //ERROR: mismatched types: expected normal fn, found unsafe fn
+/// //let bad_ptr: fn(usize) -> usize = add_one_unsafely;
+///
+/// let unsafe_ptr: unsafe fn(usize) -> usize = add_one_unsafely;
+/// let really_safe_ptr: unsafe fn(usize) -> usize = add_one;
+/// ```
+///
+/// ### ABI
+///
+/// On top of that, function pointers can vary based on what ABI they use. This
+/// is achieved by adding the `extern` keyword before the type, followed by the
+/// ABI in question. The default ABI is "Rust", i.e., `fn()` is the exact same
+/// type as `extern "Rust" fn()`. A pointer to a function with C ABI would have
+/// type `extern "C" fn()`.
+///
+/// `extern "ABI" { ... }` blocks declare functions with ABI "ABI". The default
+/// here is "C", i.e., functions declared in an `extern {...}` block have "C"
+/// ABI.
+///
+/// For more information and a list of supported ABIs, see [the nomicon's
+/// section on foreign calling conventions][nomicon-abi].
+///
+/// [nomicon-abi]: ../nomicon/ffi.html#foreign-calling-conventions
+///
+/// ### Variadic functions
+///
+/// Extern function declarations with the "C" or "cdecl" ABIs can also be *variadic*, allowing them
+/// to be called with a variable number of arguments. Normal Rust functions, even those with an
+/// `extern "ABI"`, cannot be variadic. For more information, see [the nomicon's section on
+/// variadic functions][nomicon-variadic].
+///
+/// [nomicon-variadic]: ../nomicon/ffi.html#variadic-functions
+///
+/// ### Creating function pointers
+///
+/// When `bar` is the name of a function, then the expression `bar` is *not* a
+/// function pointer. Rather, it denotes a value of an unnameable type that
+/// uniquely identifies the function `bar`. The value is zero-sized because the
+/// type already identifies the function. This has the advantage that "calling"
+/// the value (it implements the `Fn*` traits) does not require dynamic
+/// dispatch.
+///
+/// This zero-sized type *coerces* to a regular function pointer. For example:
+///
+/// ```rust
+/// use std::mem;
+///
+/// fn bar(x: i32) {}
+///
+/// let not_bar_ptr = bar; // `not_bar_ptr` is zero-sized, uniquely identifying `bar`
+/// assert_eq!(mem::size_of_val(&not_bar_ptr), 0);
+///
+/// let bar_ptr: fn(i32) = not_bar_ptr; // force coercion to function pointer
+/// assert_eq!(mem::size_of_val(&bar_ptr), mem::size_of::<usize>());
+///
+/// let footgun = &bar; // this is a shared reference to the zero-sized type identifying `bar`
+/// ```
+///
+/// The last line shows that `&bar` is not a function pointer either. Rather, it
+/// is a reference to the function-specific ZST. `&bar` is basically never what you
+/// want when `bar` is a function.
+///
+/// ### Casting to and from integers
+///
+/// You cast function pointers directly to integers:
+///
+/// ```rust
+/// let fnptr: fn(i32) -> i32 = |x| x+2;
+/// let fnptr_addr = fnptr as usize;
+/// ```
+///
+/// However, a direct cast back is not possible. You need to use `transmute`:
+///
+/// ```rust
+/// # let fnptr: fn(i32) -> i32 = |x| x+2;
+/// # let fnptr_addr = fnptr as usize;
+/// let fnptr = fnptr_addr as *const ();
+/// let fnptr: fn(i32) -> i32 = unsafe { std::mem::transmute(fnptr) };
+/// assert_eq!(fnptr(40), 42);
+/// ```
+///
+/// Crucially, we `as`-cast to a raw pointer before `transmute`ing to a function pointer.
+/// This avoids an integer-to-pointer `transmute`, which can be problematic.
+/// Transmuting between raw pointers and function pointers (i.e., two pointer types) is fine.
+///
+/// Note that all of this is not portable to platforms where function pointers and data pointers
+/// have different sizes.
+///
+/// ### Trait implementations
+///
+/// In this documentation the shorthand `fn (T₁, T₂, …, Tₙ)` is used to represent non-variadic
+/// function pointers of varying length. Note that this is a convenience notation to avoid
+/// repetitive documentation, not valid Rust syntax.
+///
+/// Due to a temporary restriction in Rust's type system, these traits are only implemented on
+/// functions that take 12 arguments or less, with the `"Rust"` and `"C"` ABIs. In the future, this
+/// may change:
+///
+/// * [`PartialEq`]
+/// * [`Eq`]
+/// * [`PartialOrd`]
+/// * [`Ord`]
+/// * [`Hash`]
+/// * [`Pointer`]
+/// * [`Debug`]
+///
+/// The following traits are implemented for function pointers with any number of arguments and
+/// any ABI. These traits have implementations that are automatically generated by the compiler,
+/// so are not limited by missing language features:
+///
+/// * [`Clone`]
+/// * [`Copy`]
+/// * [`Send`]
+/// * [`Sync`]
+/// * [`Unpin`]
+/// * [`UnwindSafe`]
+/// * [`RefUnwindSafe`]
+///
+/// [`Hash`]: hash::Hash
+/// [`Pointer`]: fmt::Pointer
+/// [`UnwindSafe`]: panic::UnwindSafe
+/// [`RefUnwindSafe`]: panic::RefUnwindSafe
+///
+/// In addition, all *safe* function pointers implement [`Fn`], [`FnMut`], and [`FnOnce`], because
+/// these traits are specially known to the compiler.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_fn {}
+
+// Required to make auto trait impls render.
+// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
+#[doc(hidden)]
+#[cfg(not(bootstrap))]
+impl<Ret, T> fn(T) -> Ret {}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+/// This trait is implemented on function pointers with any number of arguments.
+impl<Ret, T> Clone for fn(T) -> Ret {
+ fn clone(&self) -> Self {
+ loop {}
+ }
+}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+/// This trait is implemented on function pointers with any number of arguments.
+impl<Ret, T> Copy for fn(T) -> Ret {
+ // empty
+}
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
new file mode 100644
index 000000000..e0655d68d
--- /dev/null
+++ b/library/core/src/ptr/const_ptr.rs
@@ -0,0 +1,1525 @@
+use super::*;
+use crate::cmp::Ordering::{self, Equal, Greater, Less};
+use crate::intrinsics;
+use crate::mem;
+use crate::slice::{self, SliceIndex};
+
+impl<T: ?Sized> *const T {
+ /// Returns `true` if the pointer is null.
+ ///
+ /// Note that unsized types have many possible null pointers, as only the
+ /// raw data pointer is considered, not their length, vtable, etc.
+ /// Therefore, two pointers that are null may still not compare equal to
+ /// each other.
+ ///
+ /// ## Behavior during const evaluation
+ ///
+ /// When this function is used during const evaluation, it may return `false` for pointers
+ /// that turn out to be null at runtime. Specifically, when a pointer to some memory
+ /// is offset beyond its bounds in such a way that the resulting pointer is null,
+ /// the function will still return `false`. There is no way for CTFE to know
+ /// the absolute position of that memory, so we cannot tell if the pointer is
+ /// null or not.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "Follow the rabbit";
+ /// let ptr: *const u8 = s.as_ptr();
+ /// assert!(!ptr.is_null());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
+ #[inline]
+ pub const fn is_null(self) -> bool {
+ // Compare via a cast to a thin pointer, so fat pointers are only
+ // considering their "data" part for null-ness.
+ (self as *const u8).guaranteed_eq(null())
+ }
+
+ /// Casts to a pointer of another type.
+ #[stable(feature = "ptr_cast", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
+ #[inline]
+ pub const fn cast<U>(self) -> *const U {
+ self as _
+ }
+
+ /// Use the pointer value in a new pointer of another type.
+ ///
+ /// In case `val` is a (fat) pointer to an unsized type, this operation
+ /// will ignore the pointer part, whereas for (thin) pointers to sized
+ /// types, this has the same effect as a simple cast.
+ ///
+ /// The resulting pointer will have provenance of `self`, i.e., for a fat
+ /// pointer, this operation is semantically the same as creating a new
+ /// fat pointer with the data pointer value of `self` but the metadata of
+ /// `val`.
+ ///
+ /// # Examples
+ ///
+ /// This function is primarily useful for allowing byte-wise pointer
+ /// arithmetic on potentially fat pointers:
+ ///
+ /// ```
+ /// #![feature(set_ptr_value)]
+ /// # use core::fmt::Debug;
+ /// let arr: [i32; 3] = [1, 2, 3];
+ /// let mut ptr = arr.as_ptr() as *const dyn Debug;
+ /// let thin = ptr as *const u8;
+ /// unsafe {
+ /// ptr = thin.add(8).with_metadata_of(ptr);
+ /// # assert_eq!(*(ptr as *const i32), 3);
+ /// println!("{:?}", &*ptr); // will print "3"
+ /// }
+ /// ```
+ #[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline]
+ pub fn with_metadata_of<U>(self, mut val: *const U) -> *const U
+ where
+ U: ?Sized,
+ {
+ let target = &mut val as *mut *const U as *mut *const u8;
+ // SAFETY: In case of a thin pointer, this operations is identical
+ // to a simple assignment. In case of a fat pointer, with the current
+ // fat pointer layout implementation, the first field of such a
+ // pointer is always the data pointer, which is likewise assigned.
+ unsafe { *target = self as *const u8 };
+ val
+ }
+
+ /// Changes constness without changing the type.
+ ///
+ /// This is a bit safer than `as` because it wouldn't silently change the type if the code is
+ /// refactored.
+ #[unstable(feature = "ptr_const_cast", issue = "92675")]
+ #[rustc_const_unstable(feature = "ptr_const_cast", issue = "92675")]
+ pub const fn cast_mut(self) -> *mut T {
+ self as _
+ }
+
+ /// Casts a pointer to its raw bits.
+ ///
+ /// This is equivalent to `as usize`, but is more specific to enhance readability.
+ /// The inverse method is [`from_bits`](#method.from_bits).
+ ///
+ /// In particular, `*p as usize` and `p as usize` will both compile for
+ /// pointers to numeric types but do very different things, so using this
+ /// helps emphasize that reading the bits was intentional.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_to_from_bits)]
+ /// let array = [13, 42];
+ /// let p0: *const i32 = &array[0];
+ /// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0);
+ /// let p1: *const i32 = &array[1];
+ /// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
+ /// ```
+ #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ pub fn to_bits(self) -> usize
+ where
+ T: Sized,
+ {
+ self as usize
+ }
+
+ /// Creates a pointer from its raw bits.
+ ///
+ /// This is equivalent to `as *const T`, but is more specific to enhance readability.
+ /// The inverse method is [`to_bits`](#method.to_bits).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_to_from_bits)]
+ /// use std::ptr::NonNull;
+ /// let dangling: *const u8 = NonNull::dangling().as_ptr();
+ /// assert_eq!(<*const u8>::from_bits(1), dangling);
+ /// ```
+ #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ pub fn from_bits(bits: usize) -> Self
+ where
+ T: Sized,
+ {
+ bits as Self
+ }
+
+ /// Gets the "address" portion of the pointer.
+ ///
+ /// This is similar to `self as usize`, which semantically discards *provenance* and
+ /// *address-space* information. However, unlike `self as usize`, casting the returned address
+ /// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
+ /// properly restore the lost information and obtain a dereferencable pointer, use
+ /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
+ ///
+ /// If using those APIs is not possible because there is no way to preserve a pointer with the
+ /// required provenance, use [`expose_addr`][pointer::expose_addr] and
+ /// [`from_exposed_addr`][from_exposed_addr] instead. However, note that this makes
+ /// your code less portable and less amenable to tools that check for compliance with the Rust
+ /// memory model.
+ ///
+ /// On most platforms this will produce a value with the same bytes as the original
+ /// pointer, because all the bytes are dedicated to describing the address.
+ /// Platforms which need to store additional information in the pointer may
+ /// perform a change of representation to produce a value containing only the address
+ /// portion of the pointer. What that means is up to the platform to define.
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such
+ /// might change in the future (including possibly weakening this so it becomes wholly
+ /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn addr(self) -> usize
+ where
+ T: Sized,
+ {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
+ // provenance).
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
+ /// use in [`from_exposed_addr`][].
+ ///
+ /// This is equivalent to `self as usize`, which semantically discards *provenance* and
+ /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
+ /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can
+ /// later call [`from_exposed_addr`][] to reconstitute the original pointer including its
+ /// provenance. (Reconstructing address space information, if required, is your responsibility.)
+ ///
+ /// Using this method means that code is *not* following Strict Provenance rules. Supporting
+ /// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by
+ /// tools that help you to stay conformant with the Rust memory model, so it is recommended to
+ /// use [`addr`][pointer::addr] wherever possible.
+ ///
+ /// On most platforms this will produce a value with the same bytes as the original pointer,
+ /// because all the bytes are dedicated to describing the address. Platforms which need to store
+ /// additional information in the pointer may not support this operation, since the 'expose'
+ /// side-effect which is required for [`from_exposed_addr`][] to work is typically not
+ /// available.
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
+ /// [module documentation][crate::ptr] for details.
+ ///
+ /// [`from_exposed_addr`]: from_exposed_addr
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn expose_addr(self) -> usize
+ where
+ T: Sized,
+ {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ self as usize
+ }
+
+ /// Creates a new pointer with the given address.
+ ///
+ /// This performs the same operation as an `addr as ptr` cast, but copies
+ /// the *address-space* and *provenance* of `self` to the new pointer.
+ /// This allows us to dynamically preserve and propagate this important
+ /// information in a way that is otherwise impossible with a unary cast.
+ ///
+ /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
+ /// `self` to the given address, and therefore has all the same capabilities and restrictions.
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// see the [module documentation][crate::ptr] for details.
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn with_addr(self, addr: usize) -> Self
+ where
+ T: Sized,
+ {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ //
+ // In the mean-time, this operation is defined to be "as if" it was
+ // a wrapping_offset, so we can emulate it as such. This should properly
+ // restore pointer provenance even under today's compiler.
+ let self_addr = self.addr() as isize;
+ let dest_addr = addr as isize;
+ let offset = dest_addr.wrapping_sub(self_addr);
+
+ // This is the canonical desugarring of this operation
+ self.cast::<u8>().wrapping_offset(offset).cast::<T>()
+ }
+
+ /// Creates a new pointer by mapping `self`'s address to a new one.
+ ///
+ /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// see the [module documentation][crate::ptr] for details.
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
+ where
+ T: Sized,
+ {
+ self.with_addr(f(self.addr()))
+ }
+
+ /// Decompose a (possibly wide) pointer into its address and metadata components.
+ ///
+ /// The pointer can be later reconstructed with [`from_raw_parts`].
+ #[unstable(feature = "ptr_metadata", issue = "81513")]
+ #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
+ #[inline]
+ pub const fn to_raw_parts(self) -> (*const (), <T as super::Pointee>::Metadata) {
+ (self.cast(), metadata(self))
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared reference to
+ /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
+ /// must be used instead.
+ ///
+ /// [`as_uninit_ref`]: #method.as_uninit_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let ptr: *const u8 = &10u8 as *const u8;
+ ///
+ /// unsafe {
+ /// if let Some(val_back) = ptr.as_ref() {
+ /// println!("We got back the value: {val_back}!");
+ /// }
+ /// }
+ /// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
+ /// dereference the pointer directly.
+ ///
+ /// ```
+ /// let ptr: *const u8 = &10u8 as *const u8;
+ ///
+ /// unsafe {
+ /// let val_back = &*ptr;
+ /// println!("We got back the value: {val_back}!");
+ /// }
+ /// ```
+ #[stable(feature = "ptr_as_ref", since = "1.9.0")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ #[inline]
+ pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
+ // SAFETY: the caller must guarantee that `self` is valid
+ // for a reference if it isn't null.
+ if self.is_null() { None } else { unsafe { Some(&*self) } }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared reference to
+ /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// [`as_ref`]: #method.as_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(ptr_as_uninit)]
+ ///
+ /// let ptr: *const u8 = &10u8 as *const u8;
+ ///
+ /// unsafe {
+ /// if let Some(val_back) = ptr.as_uninit_ref() {
+ /// println!("We got back the value: {}!", val_back.assume_init());
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
+ }
+
+ /// Calculates the offset from a pointer.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_offset`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_offset`]: #method.wrapping_offset
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ /// let ptr: *const u8 = s.as_ptr();
+ ///
+ /// unsafe {
+ /// println!("{}", *ptr.offset(1) as char);
+ /// println!("{}", *ptr.offset(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset(self, count: isize) -> *const T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { intrinsics::offset(self, count) }
+ }
+
+ /// Calculates the offset from a pointer in bytes.
+ ///
+ /// `count` is in units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [offset][pointer::offset] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset(self, count: isize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ let this = unsafe { self.cast::<u8>().offset(count).cast::<()>() };
+ from_raw_parts::<T>(this, metadata(self))
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// This operation itself is always safe, but using the resulting pointer is not.
+ ///
+ /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
+ /// be used to read or write other allocated objects.
+ ///
+ /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
+ /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
+ /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
+ /// `x` and `y` point into the same allocated object.
+ ///
+ /// Compared to [`offset`], this method basically delays the requirement of staying within the
+ /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
+ /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
+ /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
+ /// can be optimized better and is thus preferable in performance-sensitive code.
+ ///
+ /// The delayed check only considers the value of the pointer that was dereferenced, not the
+ /// intermediate values used during the computation of the final result. For example,
+ /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
+ /// words, leaving the allocated object and then re-entering it later is permitted.
+ ///
+ /// [`offset`]: #method.offset
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let step = 2;
+ /// let end_rounded_up = ptr.wrapping_offset(6);
+ ///
+ /// // This loop prints "1, 3, 5, "
+ /// while ptr != end_rounded_up {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_offset(step);
+ /// }
+ /// ```
+ #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline(always)]
+ pub const fn wrapping_offset(self, count: isize) -> *const T
+ where
+ T: Sized,
+ {
+ // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
+ unsafe { intrinsics::arith_offset(self, count) }
+ }
+
+ /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
+ ///
+ /// `count` is in units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method
+ /// for documentation.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ pub const fn wrapping_byte_offset(self, count: isize) -> Self {
+ from_raw_parts::<T>(self.cast::<u8>().wrapping_offset(count).cast::<()>(), metadata(self))
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
+ ///
+ /// This function is the inverse of [`offset`].
+ ///
+ /// [`offset`]: #method.offset
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and other pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * Both pointers must be *derived from* a pointer to the same object.
+ /// (See below for an example.)
+ ///
+ /// * The distance between the pointers, in bytes, must be an exact multiple
+ /// of the size of `T`.
+ ///
+ /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The distance being in bounds cannot rely on "wrapping around" the address space.
+ ///
+ /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
+ /// address space, so two pointers within some value of any Rust type `T` will always satisfy
+ /// the last two conditions. The standard library also generally ensures that allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
+ /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
+ /// always satisfies the last two conditions.
+ ///
+ /// Most platforms fundamentally can't even construct such a large allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
+ /// such large allocations either.)
+ ///
+ /// [`add`]: #method.add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [0; 5];
+ /// let ptr1: *const i32 = &a[1];
+ /// let ptr2: *const i32 = &a[3];
+ /// unsafe {
+ /// assert_eq!(ptr2.offset_from(ptr1), 2);
+ /// assert_eq!(ptr1.offset_from(ptr2), -2);
+ /// assert_eq!(ptr1.offset(2), ptr2);
+ /// assert_eq!(ptr2.offset(-2), ptr1);
+ /// }
+ /// ```
+ ///
+ /// *Incorrect* usage:
+ ///
+ /// ```rust,no_run
+ /// let ptr1 = Box::into_raw(Box::new(0u8)) as *const u8;
+ /// let ptr2 = Box::into_raw(Box::new(1u8)) as *const u8;
+ /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
+ /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
+ /// let ptr2_other = (ptr1 as *const u8).wrapping_offset(diff);
+ /// assert_eq!(ptr2 as usize, ptr2_other as usize);
+ /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
+ /// // computing their offset is undefined behavior, even though
+ /// // they point to the same address!
+ /// unsafe {
+ /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
+ /// }
+ /// ```
+ #[stable(feature = "ptr_offset_from", since = "1.47.0")]
+ #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset_from(self, origin: *const T) -> isize
+ where
+ T: Sized,
+ {
+ let pointee_size = mem::size_of::<T>();
+ assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
+ // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
+ unsafe { intrinsics::ptr_offset_from(self, origin) }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [offset_from][pointer::offset_from] on it. See that method for
+ /// documentation and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointers,
+ /// ignoring the metadata.
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize {
+ // SAFETY: the caller must uphold the safety contract for `offset_from`.
+ unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
+ }
+
+ /// Calculates the distance between two pointers, *where it's known that
+ /// `self` is equal to or greater than `origin`*. The returned value is in
+ /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+ ///
+ /// This computes the same value that [`offset_from`](#method.offset_from)
+ /// would compute, but with the added precondition that that the offset is
+ /// guaranteed to be non-negative. This method is equivalent to
+ /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`,
+ /// but it provides slightly more information to the optimizer, which can
+ /// sometimes allow it to optimize slightly better with some backends.
+ ///
+ /// This method can be though of as recovering the `count` that was passed
+ /// to [`add`](#method.add) (or, with the parameters in the other order,
+ /// to [`sub`](#method.sub)). The following are all equivalent, assuming
+ /// that their safety preconditions are met:
+ /// ```rust
+ /// # #![feature(ptr_sub_ptr)]
+ /// # unsafe fn blah(ptr: *const i32, origin: *const i32, count: usize) -> bool {
+ /// ptr.sub_ptr(origin) == count
+ /// # &&
+ /// origin.add(count) == ptr
+ /// # &&
+ /// ptr.sub(count) == origin
+ /// # }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// - The distance between the pointers must be non-negative (`self >= origin`)
+ ///
+ /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
+ /// apply to this method as well; see it for the full details.
+ ///
+ /// Importantly, despite the return type of this method being able to represent
+ /// a larger offset, it's still *not permitted* to pass pointers which differ
+ /// by more than `isize::MAX` *bytes*. As such, the result of this method will
+ /// always be less than or equal to `isize::MAX as usize`.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_sub_ptr)]
+ ///
+ /// let a = [0; 5];
+ /// let ptr1: *const i32 = &a[1];
+ /// let ptr2: *const i32 = &a[3];
+ /// unsafe {
+ /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
+ /// assert_eq!(ptr1.add(2), ptr2);
+ /// assert_eq!(ptr2.sub(2), ptr1);
+ /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
+ /// }
+ ///
+ /// // This would be incorrect, as the pointers are not correctly ordered:
+ /// // ptr1.sub_ptr(ptr2)
+ /// ```
+ #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
+ #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub_ptr(self, origin: *const T) -> usize
+ where
+ T: Sized,
+ {
+ // SAFETY: The comparison has no side-effects, and the intrinsic
+ // does this check internally in the CTFE implementation.
+ unsafe { assert_unsafe_precondition!(self >= origin) };
+
+ let pointee_size = mem::size_of::<T>();
+ assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
+ // SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
+ unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
+ }
+
+ /// Returns whether two pointers are guaranteed to be equal.
+ ///
+ /// At runtime this function behaves like `self == other`.
+ /// However, in some contexts (e.g., compile-time evaluation),
+ /// it is not always possible to determine equality of two pointers, so this function may
+ /// spuriously return `false` for pointers that later actually turn out to be equal.
+ /// But when it returns `true`, the pointers are guaranteed to be equal.
+ ///
+ /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer
+ /// comparisons for which both functions return `false`.
+ ///
+ /// [`guaranteed_ne`]: #method.guaranteed_ne
+ ///
+ /// The return value may change depending on the compiler version and unsafe code must not
+ /// rely on the result of this function for soundness. It is suggested to only use this function
+ /// for performance optimizations where spurious `false` return values by this function do not
+ /// affect the outcome, but just the performance.
+ /// The consequences of using this method to make runtime and compile-time code behave
+ /// differently have not been explored. This method should not be used to introduce such
+ /// differences, and it should also not be stabilized before we have a better understanding
+ /// of this issue.
+ #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[inline]
+ pub const fn guaranteed_eq(self, other: *const T) -> bool
+ where
+ T: Sized,
+ {
+ intrinsics::ptr_guaranteed_eq(self, other)
+ }
+
+ /// Returns whether two pointers are guaranteed to be unequal.
+ ///
+ /// At runtime this function behaves like `self != other`.
+ /// However, in some contexts (e.g., compile-time evaluation),
+ /// it is not always possible to determine the inequality of two pointers, so this function may
+ /// spuriously return `false` for pointers that later actually turn out to be unequal.
+ /// But when it returns `true`, the pointers are guaranteed to be unequal.
+ ///
+ /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
+ /// comparisons for which both functions return `false`.
+ ///
+ /// [`guaranteed_eq`]: #method.guaranteed_eq
+ ///
+ /// The return value may change depending on the compiler version and unsafe code must not
+ /// rely on the result of this function for soundness. It is suggested to only use this function
+ /// for performance optimizations where spurious `false` return values by this function do not
+ /// affect the outcome, but just the performance.
+ /// The consequences of using this method to make runtime and compile-time code behave
+ /// differently have not been explored. This method should not be used to introduce such
+ /// differences, and it should also not be stabilized before we have a better understanding
+ /// of this issue.
+ #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[inline]
+ pub const fn guaranteed_ne(self, other: *const T) -> bool
+ where
+ T: Sized,
+ {
+ intrinsics::ptr_guaranteed_ne(self, other)
+ }
+
+ /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a `usize`.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_add`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_add`]: #method.wrapping_add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ /// let ptr: *const u8 = s.as_ptr();
+ ///
+ /// unsafe {
+ /// println!("{}", *ptr.add(1) as char);
+ /// println!("{}", *ptr.add(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { self.offset(count as isize) }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [add][pointer::add] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_add(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `add`.
+ let this = unsafe { self.cast::<u8>().add(count).cast::<()>() };
+ from_raw_parts::<T>(this, metadata(self))
+ }
+
+ /// Calculates the offset from a pointer (convenience for
+ /// `.offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset cannot exceed `isize::MAX` **bytes**.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_sub`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_sub`]: #method.wrapping_sub
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ ///
+ /// unsafe {
+ /// let end: *const u8 = s.as_ptr().add(3);
+ /// println!("{}", *end.sub(1) as char);
+ /// println!("{}", *end.sub(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { self.offset((count as isize).wrapping_neg()) }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for
+ /// `.byte_offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [sub][pointer::sub] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_sub(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `sub`.
+ let this = unsafe { self.cast::<u8>().sub(count).cast::<()>() };
+ from_raw_parts::<T>(this, metadata(self))
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset(count as isize)`)
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// This operation itself is always safe, but using the resulting pointer is not.
+ ///
+ /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
+ /// be used to read or write other allocated objects.
+ ///
+ /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
+ /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
+ /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
+ /// `x` and `y` point into the same allocated object.
+ ///
+ /// Compared to [`add`], this method basically delays the requirement of staying within the
+ /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
+ /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
+ /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
+ /// can be optimized better and is thus preferable in performance-sensitive code.
+ ///
+ /// The delayed check only considers the value of the pointer that was dereferenced, not the
+ /// intermediate values used during the computation of the final result. For example,
+ /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
+ /// allocated object and then re-entering it later is permitted.
+ ///
+ /// [`add`]: #method.add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let step = 2;
+ /// let end_rounded_up = ptr.wrapping_add(6);
+ ///
+ /// // This loop prints "1, 3, 5, "
+ /// while ptr != end_rounded_up {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_add(step);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline(always)]
+ pub const fn wrapping_add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ self.wrapping_offset(count as isize)
+ }
+
+ /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
+ /// (convenience for `.wrapping_byte_offset(count as isize)`)
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ pub const fn wrapping_byte_add(self, count: usize) -> Self {
+ from_raw_parts::<T>(self.cast::<u8>().wrapping_add(count).cast::<()>(), metadata(self))
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// This operation itself is always safe, but using the resulting pointer is not.
+ ///
+ /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
+ /// be used to read or write other allocated objects.
+ ///
+ /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
+ /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
+ /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
+ /// `x` and `y` point into the same allocated object.
+ ///
+ /// Compared to [`sub`], this method basically delays the requirement of staying within the
+ /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
+ /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
+ /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
+ /// can be optimized better and is thus preferable in performance-sensitive code.
+ ///
+ /// The delayed check only considers the value of the pointer that was dereferenced, not the
+ /// intermediate values used during the computation of the final result. For example,
+ /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
+ /// allocated object and then re-entering it later is permitted.
+ ///
+ /// [`sub`]: #method.sub
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements (backwards)
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let start_rounded_down = ptr.wrapping_sub(2);
+ /// ptr = ptr.wrapping_add(4);
+ /// let step = 2;
+ /// // This loop prints "5, 3, 1, "
+ /// while ptr != start_rounded_down {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_sub(step);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline]
+ pub const fn wrapping_sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ self.wrapping_offset((count as isize).wrapping_neg())
+ }
+
+ /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ pub const fn wrapping_byte_sub(self, count: usize) -> Self {
+ from_raw_parts::<T>(self.cast::<u8>().wrapping_sub(count).cast::<()>(), metadata(self))
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// See [`ptr::read`] for safety concerns and examples.
+ ///
+ /// [`ptr::read`]: crate::ptr::read()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read`.
+ unsafe { read(self) }
+ }
+
+ /// Performs a volatile read of the value from `self` without moving it. This
+ /// leaves the memory in `self` unchanged.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::read_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn read_volatile(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+ unsafe { read_volatile(self) }
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// Unlike `read`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::read_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read_unaligned(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+ unsafe { read_unaligned(self) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to(self, dest: *mut T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { copy(self, dest, count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { copy_nonoverlapping(self, dest, count) }
+ }
+
+ /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
+ /// `align`.
+ ///
+ /// If it is not possible to align the pointer, the implementation returns
+ /// `usize::MAX`. It is permissible for the implementation to *always*
+ /// return `usize::MAX`. Only your algorithm's performance can depend
+ /// on getting a usable offset here, not its correctness.
+ ///
+ /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
+ /// used with the `wrapping_add` method.
+ ///
+ /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
+ /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
+ /// the returned offset is correct in all terms other than alignment.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two.
+ ///
+ /// # Examples
+ ///
+ /// Accessing adjacent `u8` as `u16`
+ ///
+ /// ```
+ /// # fn foo(n: usize) {
+ /// # use std::mem::align_of;
+ /// # unsafe {
+ /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
+ /// let ptr = x.as_ptr().add(n) as *const u8;
+ /// let offset = ptr.align_offset(align_of::<u16>());
+ /// if offset < x.len() - n - 1 {
+ /// let u16_ptr = ptr.add(offset) as *const u16;
+ /// assert_ne!(*u16_ptr, 500);
+ /// } else {
+ /// // while the pointer can be aligned via `offset`, it would point
+ /// // outside the allocation
+ /// }
+ /// # } }
+ /// ```
+ #[stable(feature = "align_offset", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
+ pub const fn align_offset(self, align: usize) -> usize
+ where
+ T: Sized,
+ {
+ if !align.is_power_of_two() {
+ panic!("align_offset: align is not a power-of-two");
+ }
+
+ fn rt_impl<T>(p: *const T, align: usize) -> usize {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(p, align) }
+ }
+
+ const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
+ usize::MAX
+ }
+
+ // SAFETY:
+ // It is permissible for `align_offset` to always return `usize::MAX`,
+ // algorithm correctness can not depend on `align_offset` returning non-max values.
+ //
+ // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
+ unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
+ }
+
+ /// Returns whether the pointer is properly aligned for `T`.
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ pub fn is_aligned(self) -> bool
+ where
+ T: Sized,
+ {
+ self.is_aligned_to(core::mem::align_of::<T>())
+ }
+
+ /// Returns whether the pointer is aligned to `align`.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointer,
+ /// ignoring the metadata.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two (this includes 0).
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ pub fn is_aligned_to(self, align: usize) -> bool {
+ if !align.is_power_of_two() {
+ panic!("is_aligned_to: align is not a power-of-two");
+ }
+
+ // SAFETY: `is_power_of_two()` will return `false` for zero.
+ unsafe { core::intrinsics::assume(align != 0) };
+
+ // Cast is needed for `T: !Sized`
+ self.cast::<u8>().addr() % align == 0
+ }
+}
+
+impl<T> *const [T] {
+ /// Returns the length of a raw slice.
+ ///
+ /// The returned value is the number of **elements**, not the number of bytes.
+ ///
+ /// This function is safe, even when the raw slice cannot be cast to a slice
+ /// reference because the pointer is null or unaligned.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_len)]
+ ///
+ /// use std::ptr;
+ ///
+ /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
+ /// assert_eq!(slice.len(), 3);
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ pub const fn len(self) -> usize {
+ metadata(self)
+ }
+
+ /// Returns a raw pointer to the slice's buffer.
+ ///
+ /// This is equivalent to casting `self` to `*const T`, but more type-safe.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_get)]
+ /// use std::ptr;
+ ///
+ /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
+ /// assert_eq!(slice.as_ptr(), ptr::null());
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ pub const fn as_ptr(self) -> *const T {
+ self as *const T
+ }
+
+ /// Returns a raw pointer to an element or subslice, without doing bounds
+ /// checking.
+ ///
+ /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable
+ /// is *[undefined behavior]* even if the resulting pointer is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_get)]
+ ///
+ /// let x = &[1, 2, 4] as *const [i32];
+ ///
+ /// unsafe {
+ /// assert_eq!(x.get_unchecked(1), x.as_ptr().add(1));
+ /// }
+ /// ```
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ pub const unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
+ where
+ I: ~const SliceIndex<[T]>,
+ {
+ // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
+ unsafe { index.get_unchecked(self) }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared slice to
+ /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// [`as_ref`]: #method.as_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+ /// and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single [allocated object]!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts`][].
+ ///
+ /// [valid]: crate::ptr#safety
+ /// [allocated object]: crate::ptr#allocated-object
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
+ if self.is_null() {
+ None
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
+ Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
+ }
+ }
+}
+
+// Equality for pointers
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> PartialEq for *const T {
+ #[inline]
+ fn eq(&self, other: &*const T) -> bool {
+ *self == *other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Eq for *const T {}
+
+// Comparison for pointers
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Ord for *const T {
+ #[inline]
+ fn cmp(&self, other: &*const T) -> Ordering {
+ if self < other {
+ Less
+ } else if self == other {
+ Equal
+ } else {
+ Greater
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> PartialOrd for *const T {
+ #[inline]
+ fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+
+ #[inline]
+ fn lt(&self, other: &*const T) -> bool {
+ *self < *other
+ }
+
+ #[inline]
+ fn le(&self, other: &*const T) -> bool {
+ *self <= *other
+ }
+
+ #[inline]
+ fn gt(&self, other: &*const T) -> bool {
+ *self > *other
+ }
+
+ #[inline]
+ fn ge(&self, other: &*const T) -> bool {
+ *self >= *other
+ }
+}
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
new file mode 100644
index 000000000..cd5edee04
--- /dev/null
+++ b/library/core/src/ptr/metadata.rs
@@ -0,0 +1,290 @@
+#![unstable(feature = "ptr_metadata", issue = "81513")]
+
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+
+/// Provides the pointer metadata type of any pointed-to type.
+///
+/// # Pointer metadata
+///
+/// Raw pointer types and reference types in Rust can be thought of as made of two parts:
+/// a data pointer that contains the memory address of the value, and some metadata.
+///
+/// For statically-sized types (that implement the `Sized` traits)
+/// as well as for `extern` types,
+/// pointers are said to be “thin”: metadata is zero-sized and its type is `()`.
+///
+/// Pointers to [dynamically-sized types][dst] are said to be “wide” or “fat”,
+/// they have non-zero-sized metadata:
+///
+/// * For structs whose last field is a DST, metadata is the metadata for the last field
+/// * For the `str` type, metadata is the length in bytes as `usize`
+/// * For slice types like `[T]`, metadata is the length in items as `usize`
+/// * For trait objects like `dyn SomeTrait`, metadata is [`DynMetadata<Self>`][DynMetadata]
+/// (e.g. `DynMetadata<dyn SomeTrait>`)
+///
+/// In the future, the Rust language may gain new kinds of types
+/// that have different pointer metadata.
+///
+/// [dst]: https://doc.rust-lang.org/nomicon/exotic-sizes.html#dynamically-sized-types-dsts
+///
+///
+/// # The `Pointee` trait
+///
+/// The point of this trait is its `Metadata` associated type,
+/// which is `()` or `usize` or `DynMetadata<_>` as described above.
+/// It is automatically implemented for every type.
+/// It can be assumed to be implemented in a generic context, even without a corresponding bound.
+///
+///
+/// # Usage
+///
+/// Raw pointers can be decomposed into the data address and metadata components
+/// with their [`to_raw_parts`] method.
+///
+/// Alternatively, metadata alone can be extracted with the [`metadata`] function.
+/// A reference can be passed to [`metadata`] and implicitly coerced.
+///
+/// A (possibly-wide) pointer can be put back together from its address and metadata
+/// with [`from_raw_parts`] or [`from_raw_parts_mut`].
+///
+/// [`to_raw_parts`]: *const::to_raw_parts
+#[lang = "pointee_trait"]
+pub trait Pointee {
+ /// The type for metadata in pointers and references to `Self`.
+ #[lang = "metadata_type"]
+ // NOTE: Keep trait bounds in `static_assert_expected_bounds_for_metadata`
+ // in `library/core/src/ptr/metadata.rs`
+ // in sync with those here:
+ type Metadata: Copy + Send + Sync + Ord + Hash + Unpin;
+}
+
+/// Pointers to types implementing this trait alias are “thin”.
+///
+/// This includes statically-`Sized` types and `extern` types.
+///
+/// # Example
+///
+/// ```rust
+/// #![feature(ptr_metadata)]
+///
+/// fn this_never_panics<T: std::ptr::Thin>() {
+/// assert_eq!(std::mem::size_of::<&T>(), std::mem::size_of::<usize>())
+/// }
+/// ```
+#[unstable(feature = "ptr_metadata", issue = "81513")]
+// NOTE: don’t stabilize this before trait aliases are stable in the language?
+pub trait Thin = Pointee<Metadata = ()>;
+
+/// Extract the metadata component of a pointer.
+///
+/// Values of type `*mut T`, `&T`, or `&mut T` can be passed directly to this function
+/// as they implicitly coerce to `*const T`.
+///
+/// # Example
+///
+/// ```
+/// #![feature(ptr_metadata)]
+///
+/// assert_eq!(std::ptr::metadata("foo"), 3_usize);
+/// ```
+#[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
+#[inline]
+pub const fn metadata<T: ?Sized>(ptr: *const T) -> <T as Pointee>::Metadata {
+ // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
+ // and PtrComponents<T> have the same memory layouts. Only std can make this
+ // guarantee.
+ unsafe { PtrRepr { const_ptr: ptr }.components.metadata }
+}
+
+/// Forms a (possibly-wide) raw pointer from a data address and metadata.
+///
+/// This function is safe but the returned pointer is not necessarily safe to dereference.
+/// For slices, see the documentation of [`slice::from_raw_parts`] for safety requirements.
+/// For trait objects, the metadata must come from a pointer to the same underlying erased type.
+///
+/// [`slice::from_raw_parts`]: crate::slice::from_raw_parts
+#[unstable(feature = "ptr_metadata", issue = "81513")]
+#[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
+#[inline]
+pub const fn from_raw_parts<T: ?Sized>(
+ data_address: *const (),
+ metadata: <T as Pointee>::Metadata,
+) -> *const T {
+ // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
+ // and PtrComponents<T> have the same memory layouts. Only std can make this
+ // guarantee.
+ unsafe { PtrRepr { components: PtrComponents { data_address, metadata } }.const_ptr }
+}
+
+/// Performs the same functionality as [`from_raw_parts`], except that a
+/// raw `*mut` pointer is returned, as opposed to a raw `*const` pointer.
+///
+/// See the documentation of [`from_raw_parts`] for more details.
+#[unstable(feature = "ptr_metadata", issue = "81513")]
+#[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
+#[inline]
+pub const fn from_raw_parts_mut<T: ?Sized>(
+ data_address: *mut (),
+ metadata: <T as Pointee>::Metadata,
+) -> *mut T {
+ // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
+ // and PtrComponents<T> have the same memory layouts. Only std can make this
+ // guarantee.
+ unsafe { PtrRepr { components: PtrComponents { data_address, metadata } }.mut_ptr }
+}
+
+#[repr(C)]
+pub(crate) union PtrRepr<T: ?Sized> {
+ pub(crate) const_ptr: *const T,
+ pub(crate) mut_ptr: *mut T,
+ pub(crate) components: PtrComponents<T>,
+}
+
+#[repr(C)]
+pub(crate) struct PtrComponents<T: ?Sized> {
+ pub(crate) data_address: *const (),
+ pub(crate) metadata: <T as Pointee>::Metadata,
+}
+
+// Manual impl needed to avoid `T: Copy` bound.
+impl<T: ?Sized> Copy for PtrComponents<T> {}
+
+// Manual impl needed to avoid `T: Clone` bound.
+impl<T: ?Sized> Clone for PtrComponents<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+/// The metadata for a `Dyn = dyn SomeTrait` trait object type.
+///
+/// It is a pointer to a vtable (virtual call table)
+/// that represents all the necessary information
+/// to manipulate the concrete type stored inside a trait object.
+/// The vtable notably it contains:
+///
+/// * type size
+/// * type alignment
+/// * a pointer to the type’s `drop_in_place` impl (may be a no-op for plain-old-data)
+/// * pointers to all the methods for the type’s implementation of the trait
+///
+/// Note that the first three are special because they’re necessary to allocate, drop,
+/// and deallocate any trait object.
+///
+/// It is possible to name this struct with a type parameter that is not a `dyn` trait object
+/// (for example `DynMetadata<u64>`) but not to obtain a meaningful value of that struct.
+#[lang = "dyn_metadata"]
+pub struct DynMetadata<Dyn: ?Sized> {
+ vtable_ptr: &'static VTable,
+ phantom: crate::marker::PhantomData<Dyn>,
+}
+
+#[cfg(not(bootstrap))]
+extern "C" {
+ /// Opaque type for accessing vtables.
+ ///
+ /// Private implementation detail of `DynMetadata::size_of` etc.
+ /// There is conceptually not actually any Abstract Machine memory behind this pointer.
+ type VTable;
+}
+
+/// The common prefix of all vtables. It is followed by function pointers for trait methods.
+///
+/// Private implementation detail of `DynMetadata::size_of` etc.
+#[repr(C)]
+#[cfg(bootstrap)]
+struct VTable {
+ drop_in_place: fn(*mut ()),
+ size_of: usize,
+ align_of: usize,
+}
+
+impl<Dyn: ?Sized> DynMetadata<Dyn> {
+ /// Returns the size of the type associated with this vtable.
+ #[inline]
+ pub fn size_of(self) -> usize {
+ // Note that "size stored in vtable" is *not* the same as "result of size_of_val_raw".
+ // Consider a reference like `&(i32, dyn Send)`: the vtable will only store the size of the
+ // `Send` part!
+ #[cfg(bootstrap)]
+ return self.vtable_ptr.size_of;
+ #[cfg(not(bootstrap))]
+ // SAFETY: DynMetadata always contains a valid vtable pointer
+ return unsafe {
+ crate::intrinsics::vtable_size(self.vtable_ptr as *const VTable as *const ())
+ };
+ }
+
+ /// Returns the alignment of the type associated with this vtable.
+ #[inline]
+ pub fn align_of(self) -> usize {
+ #[cfg(bootstrap)]
+ return self.vtable_ptr.align_of;
+ #[cfg(not(bootstrap))]
+ // SAFETY: DynMetadata always contains a valid vtable pointer
+ return unsafe {
+ crate::intrinsics::vtable_align(self.vtable_ptr as *const VTable as *const ())
+ };
+ }
+
+ /// Returns the size and alignment together as a `Layout`
+ #[inline]
+ pub fn layout(self) -> crate::alloc::Layout {
+ // SAFETY: the compiler emitted this vtable for a concrete Rust type which
+ // is known to have a valid layout. Same rationale as in `Layout::for_value`.
+ unsafe { crate::alloc::Layout::from_size_align_unchecked(self.size_of(), self.align_of()) }
+ }
+}
+
+unsafe impl<Dyn: ?Sized> Send for DynMetadata<Dyn> {}
+unsafe impl<Dyn: ?Sized> Sync for DynMetadata<Dyn> {}
+
+impl<Dyn: ?Sized> fmt::Debug for DynMetadata<Dyn> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DynMetadata").field(&(self.vtable_ptr as *const VTable)).finish()
+ }
+}
+
+// Manual impls needed to avoid `Dyn: $Trait` bounds.
+
+impl<Dyn: ?Sized> Unpin for DynMetadata<Dyn> {}
+
+impl<Dyn: ?Sized> Copy for DynMetadata<Dyn> {}
+
+impl<Dyn: ?Sized> Clone for DynMetadata<Dyn> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<Dyn: ?Sized> Eq for DynMetadata<Dyn> {}
+
+impl<Dyn: ?Sized> PartialEq for DynMetadata<Dyn> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ crate::ptr::eq::<VTable>(self.vtable_ptr, other.vtable_ptr)
+ }
+}
+
+impl<Dyn: ?Sized> Ord for DynMetadata<Dyn> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> crate::cmp::Ordering {
+ (self.vtable_ptr as *const VTable).cmp(&(other.vtable_ptr as *const VTable))
+ }
+}
+
+impl<Dyn: ?Sized> PartialOrd for DynMetadata<Dyn> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<crate::cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<Dyn: ?Sized> Hash for DynMetadata<Dyn> {
+ #[inline]
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ crate::ptr::hash::<VTable, _>(self.vtable_ptr, hasher)
+ }
+}
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
new file mode 100644
index 000000000..40e28e636
--- /dev/null
+++ b/library/core/src/ptr/mod.rs
@@ -0,0 +1,2054 @@
+//! Manually manage memory through raw pointers.
+//!
+//! *[See also the pointer primitive types](pointer).*
+//!
+//! # Safety
+//!
+//! Many functions in this module take raw pointers as arguments and read from
+//! or write to them. For this to be safe, these pointers must be *valid*.
+//! Whether a pointer is valid depends on the operation it is used for
+//! (read or write), and the extent of the memory that is accessed (i.e.,
+//! how many bytes are read/written). Most functions use `*mut T` and `*const T`
+//! to access only a single value, in which case the documentation omits the size
+//! and implicitly assumes it to be `size_of::<T>()` bytes.
+//!
+//! The precise rules for validity are not determined yet. The guarantees that are
+//! provided at this point are very minimal:
+//!
+//! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst].
+//! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer
+//! be *dereferenceable*: the memory range of the given size starting at the pointer must all be
+//! within the bounds of a single allocated object. Note that in Rust,
+//! every (stack-allocated) variable is considered a separate allocated object.
+//! * Even for operations of [size zero][zst], the pointer must not be pointing to deallocated
+//! memory, i.e., deallocation makes pointers invalid even for zero-sized operations. However,
+//! casting any non-zero integer *literal* to a pointer is valid for zero-sized accesses, even if
+//! some memory happens to exist at that address and gets deallocated. This corresponds to writing
+//! your own allocator: allocating zero-sized objects is not very hard. The canonical way to
+//! obtain a pointer that is valid for zero-sized accesses is [`NonNull::dangling`].
+//FIXME: mention `ptr::invalid` above, once it is stable.
+//! * All accesses performed by functions in this module are *non-atomic* in the sense
+//! of [atomic operations] used to synchronize between threads. This means it is
+//! undefined behavior to perform two concurrent accesses to the same location from different
+//! threads unless both accesses only read from memory. Notice that this explicitly
+//! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
+//! be used for inter-thread synchronization.
+//! * The result of casting a reference to a pointer is valid for as long as the
+//! underlying object is live and no reference (just raw pointers) is used to
+//! access the same memory.
+//!
+//! These axioms, along with careful use of [`offset`] for pointer arithmetic,
+//! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
+//! will be provided eventually, as the [aliasing] rules are being determined. For more
+//! information, see the [book] as well as the section in the reference devoted
+//! to [undefined behavior][ub].
+//!
+//! ## Alignment
+//!
+//! Valid raw pointers as defined above are not necessarily properly aligned (where
+//! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
+//! aligned to `mem::align_of::<T>()`). However, most functions require their
+//! arguments to be properly aligned, and will explicitly state
+//! this requirement in their documentation. Notable exceptions to this are
+//! [`read_unaligned`] and [`write_unaligned`].
+//!
+//! When a function requires proper alignment, it does so even if the access
+//! has size 0, i.e., even if memory is not actually touched. Consider using
+//! [`NonNull::dangling`] in such cases.
+//!
+//! ## Allocated object
+//!
+//! For several operations, such as [`offset`] or field projections (`expr.field`), the notion of an
+//! "allocated object" becomes relevant. An allocated object is a contiguous region of memory.
+//! Common examples of allocated objects include stack-allocated variables (each variable is a
+//! separate allocated object), heap allocations (each allocation created by the global allocator is
+//! a separate allocated object), and `static` variables.
+//!
+//!
+//! # Strict Provenance
+//!
+//! **The following text is non-normative, insufficiently formal, and is an extremely strict
+//! interpretation of provenance. It's ok if your code doesn't strictly conform to it.**
+//!
+//! [Strict Provenance][] is an experimental set of APIs that help tools that try
+//! to validate the memory-safety of your program's execution. Notably this includes [Miri][]
+//! and [CHERI][], which can detect when you access out of bounds memory or otherwise violate
+//! Rust's memory model.
+//!
+//! Provenance must exist in some form for any programming
+//! language compiled for modern computer architectures, but specifying a model for provenance
+//! in a way that is useful to both compilers and programmers is an ongoing challenge.
+//! The [Strict Provenance][] experiment seeks to explore the question: *what if we just said you
+//! couldn't do all the nasty operations that make provenance so messy?*
+//!
+//! What APIs would have to be removed? What APIs would have to be added? How much would code
+//! have to change, and is it worse or better now? Would any patterns become truly inexpressible?
+//! Could we carve out special exceptions for those patterns? Should we?
+//!
+//! A secondary goal of this project is to see if we can disambiguate the many functions of
+//! pointer<->integer casts enough for the definition of `usize` to be loosened so that it
+//! isn't *pointer*-sized but address-space/offset/allocation-sized (we'll probably continue
+//! to conflate these notions). This would potentially make it possible to more efficiently
+//! target platforms where pointers are larger than offsets, such as CHERI and maybe some
+//! segmented architecures.
+//!
+//! ## Provenance
+//!
+//! **This section is *non-normative* and is part of the [Strict Provenance][] experiment.**
+//!
+//! Pointers are not *simply* an "integer" or "address". For instance, it's uncontroversial
+//! to say that a Use After Free is clearly Undefined Behaviour, even if you "get lucky"
+//! and the freed memory gets reallocated before your read/write (in fact this is the
+//! worst-case scenario, UAFs would be much less concerning if this didn't happen!).
+//! To rationalize this claim, pointers need to somehow be *more* than just their addresses:
+//! they must have provenance.
+//!
+//! When an allocation is created, that allocation has a unique Original Pointer. For alloc
+//! APIs this is literally the pointer the call returns, and for local variables and statics,
+//! this is the name of the variable/static. This is mildly overloading the term "pointer"
+//! for the sake of brevity/exposition.
+//!
+//! The Original Pointer for an allocation is guaranteed to have unique access to the entire
+//! allocation and *only* that allocation. In this sense, an allocation can be thought of
+//! as a "sandbox" that cannot be broken into or out of. *Provenance* is the permission
+//! to access an allocation's sandbox and has both a *spatial* and *temporal* component:
+//!
+//! * Spatial: A range of bytes that the pointer is allowed to access.
+//! * Temporal: The lifetime (of the allocation) that access to these bytes is tied to.
+//!
+//! Spatial provenance makes sure you don't go beyond your sandbox, while temporal provenance
+//! makes sure that you can't "get lucky" after your permission to access some memory
+//! has been revoked (either through deallocations or borrows expiring).
+//!
+//! Provenance is implicitly shared with all pointers transitively derived from
+//! The Original Pointer through operations like [`offset`], borrowing, and pointer casts.
+//! Some operations may *shrink* the derived provenance, limiting how much memory it can
+//! access or how long it's valid for (i.e. borrowing a subfield and subslicing).
+//!
+//! Shrinking provenance cannot be undone: even if you "know" there is a larger allocation, you
+//! can't derive a pointer with a larger provenance. Similarly, you cannot "recombine"
+//! two contiguous provenances back into one (i.e. with a `fn merge(&[T], &[T]) -> &[T]`).
+//!
+//! A reference to a value always has provenance over exactly the memory that field occupies.
+//! A reference to a slice always has provenance over exactly the range that slice describes.
+//!
+//! If an allocation is deallocated, all pointers with provenance to that allocation become
+//! invalidated, and effectively lose their provenance.
+//!
+//! The strict provenance experiment is mostly only interested in exploring stricter *spatial*
+//! provenance. In this sense it can be thought of as a subset of the more ambitious and
+//! formal [Stacked Borrows][] research project, which is what tools like [Miri][] are based on.
+//! In particular, Stacked Borrows is necessary to properly describe what borrows are allowed
+//! to do and when they become invalidated. This necessarily involves much more complex
+//! *temporal* reasoning than simply identifying allocations. Adjusting APIs and code
+//! for the strict provenance experiment will also greatly help Stacked Borrows.
+//!
+//!
+//! ## Pointer Vs Addresses
+//!
+//! **This section is *non-normative* and is part of the [Strict Provenance][] experiment.**
+//!
+//! One of the largest historical issues with trying to define provenance is that programmers
+//! freely convert between pointers and integers. Once you allow for this, it generally becomes
+//! impossible to accurately track and preserve provenance information, and you need to appeal
+//! to very complex and unreliable heuristics. But of course, converting between pointers and
+//! integers is very useful, so what can we do?
+//!
+//! Also did you know WASM is actually a "Harvard Architecture"? As in function pointers are
+//! handled completely differently from data pointers? And we kind of just shipped Rust on WASM
+//! without really addressing the fact that we let you freely convert between function pointers
+//! and data pointers, because it mostly Just Works? Let's just put that on the "pointer casts
+//! are dubious" pile.
+//!
+//! Strict Provenance attempts to square these circles by decoupling Rust's traditional conflation
+//! of pointers and `usize` (and `isize`), and defining a pointer to semantically contain the
+//! following information:
+//!
+//! * The **address-space** it is part of (e.g. "data" vs "code" in WASM).
+//! * The **address** it points to, which can be represented by a `usize`.
+//! * The **provenance** it has, defining the memory it has permission to access.
+//!
+//! Under Strict Provenance, a usize *cannot* accurately represent a pointer, and converting from
+//! a pointer to a usize is generally an operation which *only* extracts the address. It is
+//! therefore *impossible* to construct a valid pointer from a usize because there is no way
+//! to restore the address-space and provenance. In other words, pointer-integer-pointer
+//! roundtrips are not possible (in the sense that the resulting pointer is not dereferencable).
+//!
+//! The key insight to making this model *at all* viable is the [`with_addr`][] method:
+//!
+//! ```text
+//! /// Creates a new pointer with the given address.
+//! ///
+//! /// This performs the same operation as an `addr as ptr` cast, but copies
+//! /// the *address-space* and *provenance* of `self` to the new pointer.
+//! /// This allows us to dynamically preserve and propagate this important
+//! /// information in a way that is otherwise impossible with a unary cast.
+//! ///
+//! /// This is equivalent to using `wrapping_offset` to offset `self` to the
+//! /// given address, and therefore has all the same capabilities and restrictions.
+//! pub fn with_addr(self, addr: usize) -> Self;
+//! ```
+//!
+//! So you're still able to drop down to the address representation and do whatever
+//! clever bit tricks you want *as long as* you're able to keep around a pointer
+//! into the allocation you care about that can "reconstitute" the other parts of the pointer.
+//! Usually this is very easy, because you only are taking a pointer, messing with the address,
+//! and then immediately converting back to a pointer. To make this use case more ergonomic,
+//! we provide the [`map_addr`][] method.
+//!
+//! To help make it clear that code is "following" Strict Provenance semantics, we also provide an
+//! [`addr`][] method which promises that the returned address is not part of a
+//! pointer-usize-pointer roundtrip. In the future we may provide a lint for pointer<->integer
+//! casts to help you audit if your code conforms to strict provenance.
+//!
+//!
+//! ## Using Strict Provenance
+//!
+//! Most code needs no changes to conform to strict provenance, as the only really concerning
+//! operation that *wasn't* obviously already Undefined Behaviour is casts from usize to a
+//! pointer. For code which *does* cast a usize to a pointer, the scope of the change depends
+//! on exactly what you're doing.
+//!
+//! In general you just need to make sure that if you want to convert a usize address to a
+//! pointer and then use that pointer to read/write memory, you need to keep around a pointer
+//! that has sufficient provenance to perform that read/write itself. In this way all of your
+//! casts from an address to a pointer are essentially just applying offsets/indexing.
+//!
+//! This is generally trivial to do for simple cases like tagged pointers *as long as you
+//! represent the tagged pointer as an actual pointer and not a usize*. For instance:
+//!
+//! ```
+//! #![feature(strict_provenance)]
+//!
+//! unsafe {
+//! // A flag we want to pack into our pointer
+//! static HAS_DATA: usize = 0x1;
+//! static FLAG_MASK: usize = !HAS_DATA;
+//!
+//! // Our value, which must have enough alignment to have spare least-significant-bits.
+//! let my_precious_data: u32 = 17;
+//! assert!(core::mem::align_of::<u32>() > 1);
+//!
+//! // Create a tagged pointer
+//! let ptr = &my_precious_data as *const u32;
+//! let tagged = ptr.map_addr(|addr| addr | HAS_DATA);
+//!
+//! // Check the flag:
+//! if tagged.addr() & HAS_DATA != 0 {
+//! // Untag and read the pointer
+//! let data = *tagged.map_addr(|addr| addr & FLAG_MASK);
+//! assert_eq!(data, 17);
+//! } else {
+//! unreachable!()
+//! }
+//! }
+//! ```
+//!
+//! (Yes, if you've been using AtomicUsize for pointers in concurrent datastructures, you should
+//! be using AtomicPtr instead. If that messes up the way you atomically manipulate pointers,
+//! we would like to know why, and what needs to be done to fix it.)
+//!
+//! Something more complicated and just generally *evil* like an XOR-List requires more significant
+//! changes like allocating all nodes in a pre-allocated Vec or Arena and using a pointer
+//! to the whole allocation to reconstitute the XORed addresses.
+//!
+//! Situations where a valid pointer *must* be created from just an address, such as baremetal code
+//! accessing a memory-mapped interface at a fixed address, are an open question on how to support.
+//! These situations *will* still be allowed, but we might require some kind of "I know what I'm
+//! doing" annotation to explain the situation to the compiler. It's also possible they need no
+//! special attention at all, because they're generally accessing memory outside the scope of
+//! "the abstract machine", or already using "I know what I'm doing" annotations like "volatile".
+//!
+//! Under [Strict Provenance] it is Undefined Behaviour to:
+//!
+//! * Access memory through a pointer that does not have provenance over that memory.
+//!
+//! * [`offset`] a pointer to or from an address it doesn't have provenance over.
+//! This means it's always UB to offset a pointer derived from something deallocated,
+//! even if the offset is 0. Note that a pointer "one past the end" of its provenance
+//! is not actually outside its provenance, it just has 0 bytes it can load/store.
+//!
+//! But it *is* still sound to:
+//!
+//! * Create an invalid pointer from just an address (see [`ptr::invalid`][]). This can
+//! be used for sentinel values like `null` *or* to represent a tagged pointer that will
+//! never be dereferencable. In general, it is always sound for an integer to pretend
+//! to be a pointer "for fun" as long as you don't use operations on it which require
+//! it to be valid (offset, read, write, etc).
+//!
+//! * Forge an allocation of size zero at any sufficiently aligned non-null address.
+//! i.e. the usual "ZSTs are fake, do what you want" rules apply *but* this only applies
+//! for actual forgery (integers cast to pointers). If you borrow some struct's field
+//! that *happens* to be zero-sized, the resulting pointer will have provenance tied to
+//! that allocation and it will still get invalidated if the allocation gets deallocated.
+//! In the future we may introduce an API to make such a forged allocation explicit.
+//!
+//! * [`wrapping_offset`][] a pointer outside its provenance. This includes invalid pointers
+//! which have "no" provenance. Unfortunately there may be practical limits on this for a
+//! particular platform, and it's an open question as to how to specify this (if at all).
+//! Notably, [CHERI][] relies on a compression scheme that can't handle a
+//! pointer getting offset "too far" out of bounds. If this happens, the address
+//! returned by `addr` will be the value you expect, but the provenance will get invalidated
+//! and using it to read/write will fault. The details of this are architecture-specific
+//! and based on alignment, but the buffer on either side of the pointer's range is pretty
+//! generous (think kilobytes, not bytes).
+//!
+//! * Compare arbitrary pointers by address. Addresses *are* just integers and so there is
+//! always a coherent answer, even if the pointers are invalid or from different
+//! address-spaces/provenances. Of course, comparing addresses from different address-spaces
+//! is generally going to be *meaningless*, but so is comparing Kilograms to Meters, and Rust
+//! doesn't prevent that either. Similarly, if you get "lucky" and notice that a pointer
+//! one-past-the-end is the "same" address as the start of an unrelated allocation, anything
+//! you do with that fact is *probably* going to be gibberish. The scope of that gibberish
+//! is kept under control by the fact that the two pointers *still* aren't allowed to access
+//! the other's allocation (bytes), because they still have different provenance.
+//!
+//! * Perform pointer tagging tricks. This falls out of [`wrapping_offset`] but is worth
+//! mentioning in more detail because of the limitations of [CHERI][]. Low-bit tagging
+//! is very robust, and often doesn't even go out of bounds because types ensure
+//! size >= align (and over-aligning actually gives CHERI more flexibility). Anything
+//! more complex than this rapidly enters "extremely platform-specific" territory as
+//! certain things may or may not be allowed based on specific supported operations.
+//! For instance, ARM explicitly supports high-bit tagging, and so CHERI on ARM inherits
+//! that and should support it.
+//!
+//! ## Pointer-usize-pointer roundtrips and 'exposed' provenance
+//!
+//! **This section is *non-normative* and is part of the [Strict Provenance] experiment.**
+//!
+//! As discussed above, pointer-usize-pointer roundtrips are not possible under [Strict Provenance].
+//! However, there exists legacy Rust code that is full of such roundtrips, and legacy platform APIs
+//! regularly assume that `usize` can capture all the information that makes up a pointer. There
+//! also might be code that cannot be ported to Strict Provenance (which is something we would [like
+//! to hear about][Strict Provenance]).
+//!
+//! For situations like this, there is a fallback plan, a way to 'opt out' of Strict Provenance.
+//! However, note that this makes your code a lot harder to specify, and the code will not work
+//! (well) with tools like [Miri] and [CHERI].
+//!
+//! This fallback plan is provided by the [`expose_addr`] and [`from_exposed_addr`] methods (which
+//! are equivalent to `as` casts between pointers and integers). [`expose_addr`] is a lot like
+//! [`addr`], but additionally adds the provenance of the pointer to a global list of 'exposed'
+//! provenances. (This list is purely conceptual, it exists for the purpose of specifying Rust but
+//! is not materialized in actual executions, except in tools like [Miri].) [`from_exposed_addr`]
+//! can be used to construct a pointer with one of these previously 'exposed' provenances.
+//! [`from_exposed_addr`] takes only `addr: usize` as arguments, so unlike in [`with_addr`] there is
+//! no indication of what the correct provenance for the returned pointer is -- and that is exactly
+//! what makes pointer-usize-pointer roundtrips so tricky to rigorously specify! There is no
+//! algorithm that decides which provenance will be used. You can think of this as "guessing" the
+//! right provenance, and the guess will be "maximally in your favor", in the sense that if there is
+//! any way to avoid undefined behavior, then that is the guess that will be taken. However, if
+//! there is *no* previously 'exposed' provenance that justifies the way the returned pointer will
+//! be used, the program has undefined behavior.
+//!
+//! Using [`expose_addr`] or [`from_exposed_addr`] (or the equivalent `as` casts) means that code is
+//! *not* following Strict Provenance rules. The goal of the Strict Provenance experiment is to
+//! determine whether it is possible to use Rust without [`expose_addr`] and [`from_exposed_addr`].
+//! If this is successful, it would be a major win for avoiding specification complexity and to
+//! facilitate adoption of tools like [CHERI] and [Miri] that can be a big help in increasing the
+//! confidence in (unsafe) Rust code.
+//!
+//! [aliasing]: ../../nomicon/aliasing.html
+//! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
+//! [ub]: ../../reference/behavior-considered-undefined.html
+//! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
+//! [atomic operations]: crate::sync::atomic
+//! [`offset`]: pointer::offset
+//! [`wrapping_offset`]: pointer::wrapping_offset
+//! [`with_addr`]: pointer::with_addr
+//! [`map_addr`]: pointer::map_addr
+//! [`addr`]: pointer::addr
+//! [`ptr::invalid`]: core::ptr::invalid
+//! [`expose_addr`]: pointer::expose_addr
+//! [`from_exposed_addr`]: from_exposed_addr
+//! [Miri]: https://github.com/rust-lang/miri
+//! [CHERI]: https://www.cl.cam.ac.uk/research/security/ctsrd/cheri/
+//! [Strict Provenance]: https://github.com/rust-lang/rust/issues/95228
+//! [Stacked Borrows]: https://plv.mpi-sws.org/rustbelt/stacked-borrows/
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cmp::Ordering;
+use crate::fmt;
+use crate::hash;
+use crate::intrinsics::{
+ self, assert_unsafe_precondition, is_aligned_and_not_null, is_nonoverlapping,
+};
+
+use crate::mem::{self, MaybeUninit};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
+pub use crate::intrinsics::copy_nonoverlapping;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
+pub use crate::intrinsics::copy;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
+pub use crate::intrinsics::write_bytes;
+
+mod metadata;
+pub(crate) use metadata::PtrRepr;
+#[unstable(feature = "ptr_metadata", issue = "81513")]
+pub use metadata::{from_raw_parts, from_raw_parts_mut, metadata, DynMetadata, Pointee, Thin};
+
+mod non_null;
+#[stable(feature = "nonnull", since = "1.25.0")]
+pub use non_null::NonNull;
+
+mod unique;
+#[unstable(feature = "ptr_internals", issue = "none")]
+pub use unique::Unique;
+
+mod const_ptr;
+mod mut_ptr;
+
+/// Executes the destructor (if any) of the pointed-to value.
+///
+/// This is semantically equivalent to calling [`ptr::read`] and discarding
+/// the result, but has the following advantages:
+///
+/// * It is *required* to use `drop_in_place` to drop unsized types like
+/// trait objects, because they can't be read out onto the stack and
+/// dropped normally.
+///
+/// * It is friendlier to the optimizer to do this over [`ptr::read`] when
+/// dropping manually allocated memory (e.g., in the implementations of
+/// `Box`/`Rc`/`Vec`), as the compiler doesn't need to prove that it's
+/// sound to elide the copy.
+///
+/// * It can be used to drop [pinned] data when `T` is not `repr(packed)`
+/// (pinned data must not be moved before it is dropped).
+///
+/// Unaligned values cannot be dropped in place, they must be copied to an aligned
+/// location first using [`ptr::read_unaligned`]. For packed structs, this move is
+/// done automatically by the compiler. This means the fields of packed structs
+/// are not dropped in-place.
+///
+/// [`ptr::read`]: self::read
+/// [`ptr::read_unaligned`]: self::read_unaligned
+/// [pinned]: crate::pin
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `to_drop` must be [valid] for both reads and writes.
+///
+/// * `to_drop` must be properly aligned.
+///
+/// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold
+/// additional invariants - this is type-dependent.
+///
+/// Additionally, if `T` is not [`Copy`], using the pointed-to value after
+/// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
+/// foo` counts as a use because it will cause the value to be dropped
+/// again. [`write()`] can be used to overwrite data without causing it to be
+/// dropped.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// Manually remove the last item from a vector:
+///
+/// ```
+/// use std::ptr;
+/// use std::rc::Rc;
+///
+/// let last = Rc::new(1);
+/// let weak = Rc::downgrade(&last);
+///
+/// let mut v = vec![Rc::new(0), last];
+///
+/// unsafe {
+/// // Get a raw pointer to the last element in `v`.
+/// let ptr = &mut v[1] as *mut _;
+/// // Shorten `v` to prevent the last item from being dropped. We do that first,
+/// // to prevent issues if the `drop_in_place` below panics.
+/// v.set_len(1);
+/// // Without a call `drop_in_place`, the last item would never be dropped,
+/// // and the memory it manages would be leaked.
+/// ptr::drop_in_place(ptr);
+/// }
+///
+/// assert_eq!(v, &[0.into()]);
+///
+/// // Ensure that the last item was dropped.
+/// assert!(weak.upgrade().is_none());
+/// ```
+#[stable(feature = "drop_in_place", since = "1.8.0")]
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+
+ // SAFETY: see comment above
+ unsafe { drop_in_place(to_drop) }
+}
+
+/// Creates a null raw pointer.
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let p: *const i32 = ptr::null();
+/// assert!(p.is_null());
+/// ```
+#[inline(always)]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
+#[rustc_allow_const_fn_unstable(ptr_metadata)]
+#[rustc_diagnostic_item = "ptr_null"]
+pub const fn null<T: ?Sized + Thin>() -> *const T {
+ from_raw_parts(invalid(0), ())
+}
+
+/// Creates an invalid pointer with the given address.
+///
+/// This is different from `addr as *const T`, which creates a pointer that picks up a previously
+/// exposed provenance. See [`from_exposed_addr`] for more details on that operation.
+///
+/// The module's top-level documentation discusses the precise meaning of an "invalid"
+/// pointer but essentially this expresses that the pointer is not associated
+/// with any actual allocation and is little more than a usize address in disguise.
+///
+/// This pointer will have no provenance associated with it and is therefore
+/// UB to read/write/offset. This mostly exists to facilitate things
+/// like `ptr::null` and `NonNull::dangling` which make invalid pointers.
+///
+/// (Standard "Zero-Sized-Types get to cheat and lie" caveats apply, although it
+/// may be desirable to give them their own API just to make that 100% clear.)
+///
+/// This API and its claimed semantics are part of the Strict Provenance experiment,
+/// see the [module documentation][crate::ptr] for details.
+#[inline(always)]
+#[must_use]
+#[rustc_const_stable(feature = "stable_things_using_strict_provenance", since = "1.61.0")]
+#[unstable(feature = "strict_provenance", issue = "95228")]
+pub const fn invalid<T>(addr: usize) -> *const T {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ // We use transmute rather than a cast so tools like Miri can tell that this
+ // is *not* the same as from_exposed_addr.
+ // SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that
+ // pointer).
+ unsafe { mem::transmute(addr) }
+}
+
+/// Creates an invalid mutable pointer with the given address.
+///
+/// This is different from `addr as *mut T`, which creates a pointer that picks up a previously
+/// exposed provenance. See [`from_exposed_addr_mut`] for more details on that operation.
+///
+/// The module's top-level documentation discusses the precise meaning of an "invalid"
+/// pointer but essentially this expresses that the pointer is not associated
+/// with any actual allocation and is little more than a usize address in disguise.
+///
+/// This pointer will have no provenance associated with it and is therefore
+/// UB to read/write/offset. This mostly exists to facilitate things
+/// like `ptr::null` and `NonNull::dangling` which make invalid pointers.
+///
+/// (Standard "Zero-Sized-Types get to cheat and lie" caveats apply, although it
+/// may be desirable to give them their own API just to make that 100% clear.)
+///
+/// This API and its claimed semantics are part of the Strict Provenance experiment,
+/// see the [module documentation][crate::ptr] for details.
+#[inline(always)]
+#[must_use]
+#[rustc_const_stable(feature = "stable_things_using_strict_provenance", since = "1.61.0")]
+#[unstable(feature = "strict_provenance", issue = "95228")]
+pub const fn invalid_mut<T>(addr: usize) -> *mut T {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ // We use transmute rather than a cast so tools like Miri can tell that this
+ // is *not* the same as from_exposed_addr.
+ // SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that
+ // pointer).
+ unsafe { mem::transmute(addr) }
+}
+
+/// Convert an address back to a pointer, picking up a previously 'exposed' provenance.
+///
+/// This is equivalent to `addr as *const T`. The provenance of the returned pointer is that of *any*
+/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize`
+/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be
+/// used, the program has undefined behavior. Note that there is no algorithm that decides which
+/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess
+/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined
+/// behavior, then that is the guess that will be taken.
+///
+/// On platforms with multiple address spaces, it is your responsibility to ensure that the
+/// address makes sense in the address space that this pointer will be used with.
+///
+/// Using this method means that code is *not* following strict provenance rules. "Guessing" a
+/// suitable provenance complicates specification and reasoning and may not be supported by
+/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
+/// use [`with_addr`][pointer::with_addr] wherever possible.
+///
+/// On most platforms this will produce a value with the same bytes as the address. Platforms
+/// which need to store additional information in a pointer may not support this operation,
+/// since it is generally not possible to actually *compute* which provenance the returned
+/// pointer has to pick up.
+///
+/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
+/// [module documentation][crate::ptr] for details.
+#[must_use]
+#[inline]
+#[unstable(feature = "strict_provenance", issue = "95228")]
+pub fn from_exposed_addr<T>(addr: usize) -> *const T
+where
+ T: Sized,
+{
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ addr as *const T
+}
+
+/// Convert an address back to a mutable pointer, picking up a previously 'exposed' provenance.
+///
+/// This is equivalent to `addr as *mut T`. The provenance of the returned pointer is that of *any*
+/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize`
+/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be
+/// used, the program has undefined behavior. Note that there is no algorithm that decides which
+/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess
+/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined
+/// behavior, then that is the guess that will be taken.
+///
+/// On platforms with multiple address spaces, it is your responsibility to ensure that the
+/// address makes sense in the address space that this pointer will be used with.
+///
+/// Using this method means that code is *not* following strict provenance rules. "Guessing" a
+/// suitable provenance complicates specification and reasoning and may not be supported by
+/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
+/// use [`with_addr`][pointer::with_addr] wherever possible.
+///
+/// On most platforms this will produce a value with the same bytes as the address. Platforms
+/// which need to store additional information in a pointer may not support this operation,
+/// since it is generally not possible to actually *compute* which provenance the returned
+/// pointer has to pick up.
+///
+/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
+/// [module documentation][crate::ptr] for details.
+#[must_use]
+#[inline]
+#[unstable(feature = "strict_provenance", issue = "95228")]
+pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
+where
+ T: Sized,
+{
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ addr as *mut T
+}
+
+/// Creates a null mutable raw pointer.
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let p: *mut i32 = ptr::null_mut();
+/// assert!(p.is_null());
+/// ```
+#[inline(always)]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
+#[rustc_allow_const_fn_unstable(ptr_metadata)]
+#[rustc_diagnostic_item = "ptr_null_mut"]
+pub const fn null_mut<T: ?Sized + Thin>() -> *mut T {
+ from_raw_parts_mut(invalid_mut(0), ())
+}
+
+/// Forms a raw slice from a pointer and a length.
+///
+/// The `len` argument is the number of **elements**, not the number of bytes.
+///
+/// This function is safe, but actually using the return value is unsafe.
+/// See the documentation of [`slice::from_raw_parts`] for slice safety requirements.
+///
+/// [`slice::from_raw_parts`]: crate::slice::from_raw_parts
+///
+/// # Examples
+///
+/// ```rust
+/// use std::ptr;
+///
+/// // create a slice pointer when starting out with a pointer to the first element
+/// let x = [5, 6, 7];
+/// let raw_pointer = x.as_ptr();
+/// let slice = ptr::slice_from_raw_parts(raw_pointer, 3);
+/// assert_eq!(unsafe { &*slice }[2], 7);
+/// ```
+#[inline]
+#[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
+#[rustc_const_stable(feature = "const_slice_from_raw_parts", since = "1.64.0")]
+#[rustc_allow_const_fn_unstable(ptr_metadata)]
+pub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
+ from_raw_parts(data.cast(), len)
+}
+
+/// Performs the same functionality as [`slice_from_raw_parts`], except that a
+/// raw mutable slice is returned, as opposed to a raw immutable slice.
+///
+/// See the documentation of [`slice_from_raw_parts`] for more details.
+///
+/// This function is safe, but actually using the return value is unsafe.
+/// See the documentation of [`slice::from_raw_parts_mut`] for slice safety requirements.
+///
+/// [`slice::from_raw_parts_mut`]: crate::slice::from_raw_parts_mut
+///
+/// # Examples
+///
+/// ```rust
+/// use std::ptr;
+///
+/// let x = &mut [5, 6, 7];
+/// let raw_pointer = x.as_mut_ptr();
+/// let slice = ptr::slice_from_raw_parts_mut(raw_pointer, 3);
+///
+/// unsafe {
+/// (*slice)[2] = 99; // assign a value at an index in the slice
+/// };
+///
+/// assert_eq!(unsafe { &*slice }[2], 99);
+/// ```
+#[inline]
+#[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
+#[rustc_const_unstable(feature = "const_slice_from_raw_parts_mut", issue = "67456")]
+pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
+ from_raw_parts_mut(data.cast(), len)
+}
+
+/// Swaps the values at two mutable locations of the same type, without
+/// deinitializing either.
+///
+/// But for the following exceptions, this function is semantically
+/// equivalent to [`mem::swap`]:
+///
+/// * It operates on raw pointers instead of references. When references are
+/// available, [`mem::swap`] should be preferred.
+///
+/// * The two pointed-to values may overlap. If the values do overlap, then the
+/// overlapping region of memory from `x` will be used. This is demonstrated
+/// in the second example below.
+///
+/// * The operation is "untyped" in the sense that data may be uninitialized or otherwise violate
+/// the requirements of `T`. The initialization state is preserved exactly.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * Both `x` and `y` must be [valid] for both reads and writes.
+///
+/// * Both `x` and `y` must be properly aligned.
+///
+/// Note that even if `T` has size `0`, the pointers must be non-null and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// Swapping two non-overlapping regions:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut array = [0, 1, 2, 3];
+///
+/// let (x, y) = array.split_at_mut(2);
+/// let x = x.as_mut_ptr().cast::<[u32; 2]>(); // this is `array[0..2]`
+/// let y = y.as_mut_ptr().cast::<[u32; 2]>(); // this is `array[2..4]`
+///
+/// unsafe {
+/// ptr::swap(x, y);
+/// assert_eq!([2, 3, 0, 1], array);
+/// }
+/// ```
+///
+/// Swapping two overlapping regions:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut array: [i32; 4] = [0, 1, 2, 3];
+///
+/// let array_ptr: *mut i32 = array.as_mut_ptr();
+///
+/// let x = array_ptr as *mut [i32; 3]; // this is `array[0..3]`
+/// let y = unsafe { array_ptr.add(1) } as *mut [i32; 3]; // this is `array[1..4]`
+///
+/// unsafe {
+/// ptr::swap(x, y);
+/// // The indices `1..3` of the slice overlap between `x` and `y`.
+/// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are
+/// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]`
+/// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`).
+/// // This implementation is defined to make the latter choice.
+/// assert_eq!([1, 0, 1, 2], array);
+/// }
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
+ // Give ourselves some scratch space to work with.
+ // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
+ let mut tmp = MaybeUninit::<T>::uninit();
+
+ // Perform the swap
+ // SAFETY: the caller must guarantee that `x` and `y` are
+ // valid for writes and properly aligned. `tmp` cannot be
+ // overlapping either `x` or `y` because `tmp` was just allocated
+ // on the stack as a separate allocated object.
+ unsafe {
+ copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
+ copy(y, x, 1); // `x` and `y` may overlap
+ copy_nonoverlapping(tmp.as_ptr(), y, 1);
+ }
+}
+
+/// Swaps `count * size_of::<T>()` bytes between the two regions of memory
+/// beginning at `x` and `y`. The two regions must *not* overlap.
+///
+/// The operation is "untyped" in the sense that data may be uninitialized or otherwise violate the
+/// requirements of `T`. The initialization state is preserved exactly.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * Both `x` and `y` must be [valid] for both reads and writes of `count *
+/// size_of::<T>()` bytes.
+///
+/// * Both `x` and `y` must be properly aligned.
+///
+/// * The region of memory beginning at `x` with a size of `count *
+/// size_of::<T>()` bytes must *not* overlap with the region of memory
+/// beginning at `y` with the same size.
+///
+/// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`,
+/// the pointers must be non-null and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut x = [1, 2, 3, 4];
+/// let mut y = [7, 8, 9];
+///
+/// unsafe {
+/// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
+/// }
+///
+/// assert_eq!(x, [7, 8, 3, 4]);
+/// assert_eq!(y, [1, 2, 9]);
+/// ```
+#[inline]
+#[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
+ #[allow(unused)]
+ macro_rules! attempt_swap_as_chunks {
+ ($ChunkTy:ty) => {
+ if mem::align_of::<T>() >= mem::align_of::<$ChunkTy>()
+ && mem::size_of::<T>() % mem::size_of::<$ChunkTy>() == 0
+ {
+ let x: *mut $ChunkTy = x.cast();
+ let y: *mut $ChunkTy = y.cast();
+ let count = count * (mem::size_of::<T>() / mem::size_of::<$ChunkTy>());
+ // SAFETY: these are the same bytes that the caller promised were
+ // ok, just typed as `MaybeUninit<ChunkTy>`s instead of as `T`s.
+ // The `if` condition above ensures that we're not violating
+ // alignment requirements, and that the division is exact so
+ // that we don't lose any bytes off the end.
+ return unsafe { swap_nonoverlapping_simple_untyped(x, y, count) };
+ }
+ };
+ }
+
+ // SAFETY: the caller must guarantee that `x` and `y` are
+ // valid for writes and properly aligned.
+ unsafe {
+ assert_unsafe_precondition!(
+ is_aligned_and_not_null(x)
+ && is_aligned_and_not_null(y)
+ && is_nonoverlapping(x, y, count)
+ );
+ }
+
+ // NOTE(scottmcm) Miri is disabled here as reading in smaller units is a
+ // pessimization for it. Also, if the type contains any unaligned pointers,
+ // copying those over multiple reads is difficult to support.
+ #[cfg(not(miri))]
+ {
+ // Split up the slice into small power-of-two-sized chunks that LLVM is able
+ // to vectorize (unless it's a special type with more-than-pointer alignment,
+ // because we don't want to pessimize things like slices of SIMD vectors.)
+ if mem::align_of::<T>() <= mem::size_of::<usize>()
+ && (!mem::size_of::<T>().is_power_of_two()
+ || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
+ {
+ attempt_swap_as_chunks!(usize);
+ attempt_swap_as_chunks!(u8);
+ }
+ }
+
+ // SAFETY: Same preconditions as this function
+ unsafe { swap_nonoverlapping_simple_untyped(x, y, count) }
+}
+
+/// Same behaviour and safety conditions as [`swap_nonoverlapping`]
+///
+/// LLVM can vectorize this (at least it can for the power-of-two-sized types
+/// `swap_nonoverlapping` tries to use) so no need to manually SIMD it.
+#[inline]
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+const unsafe fn swap_nonoverlapping_simple_untyped<T>(x: *mut T, y: *mut T, count: usize) {
+ let x = x.cast::<MaybeUninit<T>>();
+ let y = y.cast::<MaybeUninit<T>>();
+ let mut i = 0;
+ while i < count {
+ // SAFETY: By precondition, `i` is in-bounds because it's below `n`
+ let x = unsafe { &mut *x.add(i) };
+ // SAFETY: By precondition, `i` is in-bounds because it's below `n`
+ // and it's distinct from `x` since the ranges are non-overlapping
+ let y = unsafe { &mut *y.add(i) };
+ mem::swap_simple::<MaybeUninit<T>>(x, y);
+
+ i += 1;
+ }
+}
+
+/// Moves `src` into the pointed `dst`, returning the previous `dst` value.
+///
+/// Neither value is dropped.
+///
+/// This function is semantically equivalent to [`mem::replace`] except that it
+/// operates on raw pointers instead of references. When references are
+/// available, [`mem::replace`] should be preferred.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for both reads and writes.
+///
+/// * `dst` must be properly aligned.
+///
+/// * `dst` must point to a properly initialized value of type `T`.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut rust = vec!['b', 'u', 's', 't'];
+///
+/// // `mem::replace` would have the same effect without requiring the unsafe
+/// // block.
+/// let b = unsafe {
+/// ptr::replace(&mut rust[0], 'r')
+/// };
+///
+/// assert_eq!(b, 'b');
+/// assert_eq!(rust, &['r', 'u', 's', 't']);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_replace", issue = "83164")]
+pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
+ // SAFETY: the caller must guarantee that `dst` is valid to be
+ // cast to a mutable reference (valid for writes, aligned, initialized),
+ // and cannot overlap `src` since `dst` must point to a distinct
+ // allocated object.
+ unsafe {
+ assert_unsafe_precondition!(is_aligned_and_not_null(dst));
+ mem::swap(&mut *dst, &mut src); // cannot overlap
+ }
+ src
+}
+
+/// Reads the value from `src` without moving it. This leaves the
+/// memory in `src` unchanged.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads.
+///
+/// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
+/// case.
+///
+/// * `src` must point to a properly initialized value of type `T`.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let x = 12;
+/// let y = &x as *const i32;
+///
+/// unsafe {
+/// assert_eq!(std::ptr::read(y), 12);
+/// }
+/// ```
+///
+/// Manually implement [`mem::swap`]:
+///
+/// ```
+/// use std::ptr;
+///
+/// fn swap<T>(a: &mut T, b: &mut T) {
+/// unsafe {
+/// // Create a bitwise copy of the value at `a` in `tmp`.
+/// let tmp = ptr::read(a);
+///
+/// // Exiting at this point (either by explicitly returning or by
+/// // calling a function which panics) would cause the value in `tmp` to
+/// // be dropped while the same value is still referenced by `a`. This
+/// // could trigger undefined behavior if `T` is not `Copy`.
+///
+/// // Create a bitwise copy of the value at `b` in `a`.
+/// // This is safe because mutable references cannot alias.
+/// ptr::copy_nonoverlapping(b, a, 1);
+///
+/// // As above, exiting here could trigger undefined behavior because
+/// // the same value is referenced by `a` and `b`.
+///
+/// // Move `tmp` into `b`.
+/// ptr::write(b, tmp);
+///
+/// // `tmp` has been moved (`write` takes ownership of its second argument),
+/// // so nothing is dropped implicitly here.
+/// }
+/// }
+///
+/// let mut foo = "foo".to_owned();
+/// let mut bar = "bar".to_owned();
+///
+/// swap(&mut foo, &mut bar);
+///
+/// assert_eq!(foo, "bar");
+/// assert_eq!(bar, "foo");
+/// ```
+///
+/// ## Ownership of the Returned Value
+///
+/// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`].
+/// If `T` is not [`Copy`], using both the returned value and the value at
+/// `*src` can violate memory safety. Note that assigning to `*src` counts as a
+/// use because it will attempt to drop the value at `*src`.
+///
+/// [`write()`] can be used to overwrite data without causing it to be dropped.
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut s = String::from("foo");
+/// unsafe {
+/// // `s2` now points to the same underlying memory as `s`.
+/// let mut s2: String = ptr::read(&s);
+///
+/// assert_eq!(s2, "foo");
+///
+/// // Assigning to `s2` causes its original value to be dropped. Beyond
+/// // this point, `s` must no longer be used, as the underlying memory has
+/// // been freed.
+/// s2 = String::default();
+/// assert_eq!(s2, "");
+///
+/// // Assigning to `s` would cause the old value to be dropped again,
+/// // resulting in undefined behavior.
+/// // s = String::from("bar"); // ERROR
+///
+/// // `ptr::write` can be used to overwrite a value without dropping it.
+/// ptr::write(&mut s, String::from("bar"));
+/// }
+///
+/// assert_eq!(s, "bar");
+/// ```
+///
+/// [valid]: self#safety
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const unsafe fn read<T>(src: *const T) -> T {
+ // We are calling the intrinsics directly to avoid function calls in the generated code
+ // as `intrinsics::copy_nonoverlapping` is a wrapper function.
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ let mut tmp = MaybeUninit::<T>::uninit();
+ // SAFETY: the caller must guarantee that `src` is valid for reads.
+ // `src` cannot overlap `tmp` because `tmp` was just allocated on
+ // the stack as a separate allocated object.
+ //
+ // Also, since we just wrote a valid value into `tmp`, it is guaranteed
+ // to be properly initialized.
+ unsafe {
+ copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
+ tmp.assume_init()
+ }
+}
+
+/// Reads the value from `src` without moving it. This leaves the
+/// memory in `src` unchanged.
+///
+/// Unlike [`read`], `read_unaligned` works with unaligned pointers.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads.
+///
+/// * `src` must point to a properly initialized value of type `T`.
+///
+/// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
+/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
+/// value and the value at `*src` can [violate memory safety][read-ownership].
+///
+/// Note that even if `T` has size `0`, the pointer must be non-null.
+///
+/// [read-ownership]: read#ownership-of-the-returned-value
+/// [valid]: self#safety
+///
+/// ## On `packed` structs
+///
+/// Attempting to create a raw pointer to an `unaligned` struct field with
+/// an expression such as `&packed.unaligned as *const FieldType` creates an
+/// intermediate unaligned reference before converting that to a raw pointer.
+/// That this reference is temporary and immediately cast is inconsequential
+/// as the compiler always expects references to be properly aligned.
+/// As a result, using `&packed.unaligned as *const FieldType` causes immediate
+/// *undefined behavior* in your program.
+///
+/// Instead you must use the [`ptr::addr_of!`](addr_of) macro to
+/// create the pointer. You may use that returned pointer together with this
+/// function.
+///
+/// An example of what not to do and how this relates to `read_unaligned` is:
+///
+/// ```
+/// #[repr(packed, C)]
+/// struct Packed {
+/// _padding: u8,
+/// unaligned: u32,
+/// }
+///
+/// let packed = Packed {
+/// _padding: 0x00,
+/// unaligned: 0x01020304,
+/// };
+///
+/// // Take the address of a 32-bit integer which is not aligned.
+/// // In contrast to `&packed.unaligned as *const _`, this has no undefined behavior.
+/// let unaligned = std::ptr::addr_of!(packed.unaligned);
+///
+/// let v = unsafe { std::ptr::read_unaligned(unaligned) };
+/// assert_eq!(v, 0x01020304);
+/// ```
+///
+/// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
+///
+/// # Examples
+///
+/// Read a usize value from a byte buffer:
+///
+/// ```
+/// use std::mem;
+///
+/// fn read_usize(x: &[u8]) -> usize {
+/// assert!(x.len() >= mem::size_of::<usize>());
+///
+/// let ptr = x.as_ptr() as *const usize;
+///
+/// unsafe { ptr.read_unaligned() }
+/// }
+/// ```
+#[inline]
+#[stable(feature = "ptr_unaligned", since = "1.17.0")]
+#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
+ let mut tmp = MaybeUninit::<T>::uninit();
+ // SAFETY: the caller must guarantee that `src` is valid for reads.
+ // `src` cannot overlap `tmp` because `tmp` was just allocated on
+ // the stack as a separate allocated object.
+ //
+ // Also, since we just wrote a valid value into `tmp`, it is guaranteed
+ // to be properly initialized.
+ unsafe {
+ copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
+ tmp.assume_init()
+ }
+}
+
+/// Overwrites a memory location with the given value without reading or
+/// dropping the old value.
+///
+/// `write` does not drop the contents of `dst`. This is safe, but it could leak
+/// allocations or resources, so care should be taken not to overwrite an object
+/// that should be dropped.
+///
+/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
+/// location pointed to by `dst`.
+///
+/// This is appropriate for initializing uninitialized memory, or overwriting
+/// memory that has previously been [`read`] from.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for writes.
+///
+/// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
+/// case.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let mut x = 0;
+/// let y = &mut x as *mut i32;
+/// let z = 12;
+///
+/// unsafe {
+/// std::ptr::write(y, z);
+/// assert_eq!(std::ptr::read(y), 12);
+/// }
+/// ```
+///
+/// Manually implement [`mem::swap`]:
+///
+/// ```
+/// use std::ptr;
+///
+/// fn swap<T>(a: &mut T, b: &mut T) {
+/// unsafe {
+/// // Create a bitwise copy of the value at `a` in `tmp`.
+/// let tmp = ptr::read(a);
+///
+/// // Exiting at this point (either by explicitly returning or by
+/// // calling a function which panics) would cause the value in `tmp` to
+/// // be dropped while the same value is still referenced by `a`. This
+/// // could trigger undefined behavior if `T` is not `Copy`.
+///
+/// // Create a bitwise copy of the value at `b` in `a`.
+/// // This is safe because mutable references cannot alias.
+/// ptr::copy_nonoverlapping(b, a, 1);
+///
+/// // As above, exiting here could trigger undefined behavior because
+/// // the same value is referenced by `a` and `b`.
+///
+/// // Move `tmp` into `b`.
+/// ptr::write(b, tmp);
+///
+/// // `tmp` has been moved (`write` takes ownership of its second argument),
+/// // so nothing is dropped implicitly here.
+/// }
+/// }
+///
+/// let mut foo = "foo".to_owned();
+/// let mut bar = "bar".to_owned();
+///
+/// swap(&mut foo, &mut bar);
+///
+/// assert_eq!(foo, "bar");
+/// assert_eq!(bar, "foo");
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const unsafe fn write<T>(dst: *mut T, src: T) {
+ // We are calling the intrinsics directly to avoid function calls in the generated code
+ // as `intrinsics::copy_nonoverlapping` is a wrapper function.
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ // SAFETY: the caller must guarantee that `dst` is valid for writes.
+ // `dst` cannot overlap `src` because the caller has mutable access
+ // to `dst` while `src` is owned by this function.
+ unsafe {
+ copy_nonoverlapping(&src as *const T, dst, 1);
+ intrinsics::forget(src);
+ }
+}
+
+/// Overwrites a memory location with the given value without reading or
+/// dropping the old value.
+///
+/// Unlike [`write()`], the pointer may be unaligned.
+///
+/// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
+/// could leak allocations or resources, so care should be taken not to overwrite
+/// an object that should be dropped.
+///
+/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
+/// location pointed to by `dst`.
+///
+/// This is appropriate for initializing uninitialized memory, or overwriting
+/// memory that has previously been read with [`read_unaligned`].
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for writes.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-null.
+///
+/// [valid]: self#safety
+///
+/// ## On `packed` structs
+///
+/// Attempting to create a raw pointer to an `unaligned` struct field with
+/// an expression such as `&packed.unaligned as *const FieldType` creates an
+/// intermediate unaligned reference before converting that to a raw pointer.
+/// That this reference is temporary and immediately cast is inconsequential
+/// as the compiler always expects references to be properly aligned.
+/// As a result, using `&packed.unaligned as *const FieldType` causes immediate
+/// *undefined behavior* in your program.
+///
+/// Instead you must use the [`ptr::addr_of_mut!`](addr_of_mut)
+/// macro to create the pointer. You may use that returned pointer together with
+/// this function.
+///
+/// An example of how to do it and how this relates to `write_unaligned` is:
+///
+/// ```
+/// #[repr(packed, C)]
+/// struct Packed {
+/// _padding: u8,
+/// unaligned: u32,
+/// }
+///
+/// let mut packed: Packed = unsafe { std::mem::zeroed() };
+///
+/// // Take the address of a 32-bit integer which is not aligned.
+/// // In contrast to `&packed.unaligned as *mut _`, this has no undefined behavior.
+/// let unaligned = std::ptr::addr_of_mut!(packed.unaligned);
+///
+/// unsafe { std::ptr::write_unaligned(unaligned, 42) };
+///
+/// assert_eq!({packed.unaligned}, 42); // `{...}` forces copying the field instead of creating a reference.
+/// ```
+///
+/// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however
+/// (as can be seen in the `assert_eq!` above).
+///
+/// # Examples
+///
+/// Write a usize value to a byte buffer:
+///
+/// ```
+/// use std::mem;
+///
+/// fn write_usize(x: &mut [u8], val: usize) {
+/// assert!(x.len() >= mem::size_of::<usize>());
+///
+/// let ptr = x.as_mut_ptr() as *mut usize;
+///
+/// unsafe { ptr.write_unaligned(val) }
+/// }
+/// ```
+#[inline]
+#[stable(feature = "ptr_unaligned", since = "1.17.0")]
+#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
+ // SAFETY: the caller must guarantee that `dst` is valid for writes.
+ // `dst` cannot overlap `src` because the caller has mutable access
+ // to `dst` while `src` is owned by this function.
+ unsafe {
+ copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
+ // We are calling the intrinsic directly to avoid function calls in the generated code.
+ intrinsics::forget(src);
+ }
+}
+
+/// Performs a volatile read of the value from `src` without moving it. This
+/// leaves the memory in `src` unchanged.
+///
+/// Volatile operations are intended to act on I/O memory, and are guaranteed
+/// to not be elided or reordered by the compiler across other volatile
+/// operations.
+///
+/// # Notes
+///
+/// Rust does not currently have a rigorously and formally defined memory model,
+/// so the precise semantics of what "volatile" means here is subject to change
+/// over time. That being said, the semantics will almost always end up pretty
+/// similar to [C11's definition of volatile][c11].
+///
+/// The compiler shouldn't change the relative order or number of volatile
+/// memory operations. However, volatile memory operations on zero-sized types
+/// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
+/// and may be ignored.
+///
+/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads.
+///
+/// * `src` must be properly aligned.
+///
+/// * `src` must point to a properly initialized value of type `T`.
+///
+/// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of
+/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
+/// value and the value at `*src` can [violate memory safety][read-ownership].
+/// However, storing non-[`Copy`] types in volatile memory is almost certainly
+/// incorrect.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
+///
+/// [valid]: self#safety
+/// [read-ownership]: read#ownership-of-the-returned-value
+///
+/// Just like in C, whether an operation is volatile has no bearing whatsoever
+/// on questions involving concurrent access from multiple threads. Volatile
+/// accesses behave exactly like non-atomic accesses in that regard. In particular,
+/// a race between a `read_volatile` and any write operation to the same location
+/// is undefined behavior.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let x = 12;
+/// let y = &x as *const i32;
+///
+/// unsafe {
+/// assert_eq!(std::ptr::read_volatile(y), 12);
+/// }
+/// ```
+#[inline]
+#[stable(feature = "volatile", since = "1.9.0")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub unsafe fn read_volatile<T>(src: *const T) -> T {
+ // SAFETY: the caller must uphold the safety contract for `volatile_load`.
+ unsafe {
+ assert_unsafe_precondition!(is_aligned_and_not_null(src));
+ intrinsics::volatile_load(src)
+ }
+}
+
+/// Performs a volatile write of a memory location with the given value without
+/// reading or dropping the old value.
+///
+/// Volatile operations are intended to act on I/O memory, and are guaranteed
+/// to not be elided or reordered by the compiler across other volatile
+/// operations.
+///
+/// `write_volatile` does not drop the contents of `dst`. This is safe, but it
+/// could leak allocations or resources, so care should be taken not to overwrite
+/// an object that should be dropped.
+///
+/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
+/// location pointed to by `dst`.
+///
+/// # Notes
+///
+/// Rust does not currently have a rigorously and formally defined memory model,
+/// so the precise semantics of what "volatile" means here is subject to change
+/// over time. That being said, the semantics will almost always end up pretty
+/// similar to [C11's definition of volatile][c11].
+///
+/// The compiler shouldn't change the relative order or number of volatile
+/// memory operations. However, volatile memory operations on zero-sized types
+/// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
+/// and may be ignored.
+///
+/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for writes.
+///
+/// * `dst` must be properly aligned.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// Just like in C, whether an operation is volatile has no bearing whatsoever
+/// on questions involving concurrent access from multiple threads. Volatile
+/// accesses behave exactly like non-atomic accesses in that regard. In particular,
+/// a race between a `write_volatile` and any other operation (reading or writing)
+/// on the same location is undefined behavior.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let mut x = 0;
+/// let y = &mut x as *mut i32;
+/// let z = 12;
+///
+/// unsafe {
+/// std::ptr::write_volatile(y, z);
+/// assert_eq!(std::ptr::read_volatile(y), 12);
+/// }
+/// ```
+#[inline]
+#[stable(feature = "volatile", since = "1.9.0")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
+ // SAFETY: the caller must uphold the safety contract for `volatile_store`.
+ unsafe {
+ assert_unsafe_precondition!(is_aligned_and_not_null(dst));
+ intrinsics::volatile_store(dst, src);
+ }
+}
+
+/// Align pointer `p`.
+///
+/// Calculate offset (in terms of elements of `stride` stride) that has to be applied
+/// to pointer `p` so that pointer `p` would get aligned to `a`.
+///
+/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
+/// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
+/// constants.
+///
+/// If we ever decide to make it possible to call the intrinsic with `a` that is not a
+/// power-of-two, it will probably be more prudent to just change to a naive implementation rather
+/// than trying to adapt this to accommodate that change.
+///
+/// Any questions go to @nagisa.
+#[lang = "align_offset"]
+pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
+ // FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
+ // 1, where the method versions of these operations are not inlined.
+ use intrinsics::{
+ cttz_nonzero, exact_div, unchecked_rem, unchecked_shl, unchecked_shr, unchecked_sub,
+ wrapping_add, wrapping_mul, wrapping_sub,
+ };
+
+ /// Calculate multiplicative modular inverse of `x` modulo `m`.
+ ///
+ /// This implementation is tailored for `align_offset` and has following preconditions:
+ ///
+ /// * `m` is a power-of-two;
+ /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
+ ///
+ /// Implementation of this function shall not panic. Ever.
+ #[inline]
+ unsafe fn mod_inv(x: usize, m: usize) -> usize {
+ /// Multiplicative modular inverse table modulo 2⁴ = 16.
+ ///
+ /// Note, that this table does not contain values where inverse does not exist (i.e., for
+ /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
+ const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
+ /// Modulo for which the `INV_TABLE_MOD_16` is intended.
+ const INV_TABLE_MOD: usize = 16;
+ /// INV_TABLE_MOD²
+ const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
+
+ let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
+ // SAFETY: `m` is required to be a power-of-two, hence non-zero.
+ let m_minus_one = unsafe { unchecked_sub(m, 1) };
+ if m <= INV_TABLE_MOD {
+ table_inverse & m_minus_one
+ } else {
+ // We iterate "up" using the following formula:
+ //
+ // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
+ //
+ // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
+ let mut inverse = table_inverse;
+ let mut going_mod = INV_TABLE_MOD_SQUARED;
+ loop {
+ // y = y * (2 - xy) mod n
+ //
+ // Note, that we use wrapping operations here intentionally – the original formula
+ // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
+ // usize::MAX` instead, because we take the result `mod n` at the end
+ // anyway.
+ inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
+ if going_mod >= m {
+ return inverse & m_minus_one;
+ }
+ going_mod = wrapping_mul(going_mod, going_mod);
+ }
+ }
+ }
+
+ let addr = p.addr();
+ let stride = mem::size_of::<T>();
+ // SAFETY: `a` is a power-of-two, therefore non-zero.
+ let a_minus_one = unsafe { unchecked_sub(a, 1) };
+
+ if stride == 0 {
+ // SPECIAL_CASE: handle 0-sized types. No matter how many times we step, the address will
+ // stay the same, so no offset will be able to align the pointer unless it is already
+ // aligned. This branch _will_ be optimized out as `stride` is known at compile-time.
+ let p_mod_a = addr & a_minus_one;
+ return if p_mod_a == 0 { 0 } else { usize::MAX };
+ }
+
+ // SAFETY: `stride == 0` case has been handled by the special case above.
+ let a_mod_stride = unsafe { unchecked_rem(a, stride) };
+ if a_mod_stride == 0 {
+ // SPECIAL_CASE: In cases where the `a` is divisible by `stride`, byte offset to align a
+ // pointer can be computed more simply through `-p (mod a)`. In the off-chance the byte
+ // offset is not a multiple of `stride`, the input pointer was misaligned and no pointer
+ // offset will be able to produce a `p` aligned to the specified `a`.
+ //
+ // The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
+ // like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
+ // redistributes operations around the load-bearing, but pessimizing `and` instruction
+ // sufficiently for LLVM to be able to utilize the various optimizations it knows about.
+ //
+ // LLVM handles the branch here particularly nicely. If this branch needs to be evaluated
+ // at runtime, it will produce a mask `if addr_mod_stride == 0 { 0 } else { usize::MAX }`
+ // in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
+ // computation produces.
+
+ // SAFETY: `stride == 0` case has been handled by the special case above.
+ let addr_mod_stride = unsafe { unchecked_rem(addr, stride) };
+
+ return if addr_mod_stride == 0 {
+ let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
+ let byte_offset = wrapping_sub(aligned_address, addr);
+ // SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because
+ // addr has been verified to be aligned to the original type’s alignment requirements.
+ unsafe { exact_div(byte_offset, stride) }
+ } else {
+ usize::MAX
+ };
+ }
+
+ // GENERAL_CASE: From here on we’re handling the very general case where `addr` may be
+ // misaligned, there isn’t an obvious relationship between `stride` and `a` that we can take an
+ // advantage of, etc. This case produces machine code that isn’t particularly high quality,
+ // compared to the special cases above. The code produced here is still within the realm of
+ // miracles, given the situations this case has to deal with.
+
+ // SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
+ let gcdpow = unsafe { cttz_nonzero(stride).min(cttz_nonzero(a)) };
+ // SAFETY: gcdpow has an upper-bound that’s at most the number of bits in a usize.
+ let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
+ // SAFETY: gcd is always greater or equal to 1.
+ if addr & unsafe { unchecked_sub(gcd, 1) } == 0 {
+ // This branch solves for the following linear congruence equation:
+ //
+ // ` p + so = 0 mod a `
+ //
+ // `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the
+ // requested alignment.
+ //
+ // With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by
+ // `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to:
+ //
+ // ` p' + s'o = 0 mod a' `
+ // ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
+ //
+ // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the
+ // second term is "how does incrementing `p` by `s` bytes change the relative alignment of
+ // `p`" (again divided by `g`). Division by `g` is necessary to make the inverse well
+ // formed if `a` and `s` are not co-prime.
+ //
+ // Furthermore, the result produced by this solution is not "minimal", so it is necessary
+ // to take the result `o mod lcm(s, a)`. This `lcm(s, a)` is the same as `a'`.
+
+ // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
+ // `a`.
+ let a2 = unsafe { unchecked_shr(a, gcdpow) };
+ // SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits
+ // in `a` (of which it has exactly one).
+ let a2minus1 = unsafe { unchecked_sub(a2, 1) };
+ // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
+ // `a`.
+ let s2 = unsafe { unchecked_shr(stride & a_minus_one, gcdpow) };
+ // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
+ // `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
+ // always be strictly greater than `(p % a) >> gcdpow`.
+ let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(addr & a_minus_one, gcdpow)) };
+ // SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
+ // because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
+ return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1;
+ }
+
+ // Cannot be aligned at all.
+ usize::MAX
+}
+
+/// Compares raw pointers for equality.
+///
+/// This is the same as using the `==` operator, but less generic:
+/// the arguments have to be `*const T` raw pointers,
+/// not anything that implements `PartialEq`.
+///
+/// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
+/// by their address rather than comparing the values they point to
+/// (which is what the `PartialEq for &T` implementation does).
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let five = 5;
+/// let other_five = 5;
+/// let five_ref = &five;
+/// let same_five_ref = &five;
+/// let other_five_ref = &other_five;
+///
+/// assert!(five_ref == same_five_ref);
+/// assert!(ptr::eq(five_ref, same_five_ref));
+///
+/// assert!(five_ref == other_five_ref);
+/// assert!(!ptr::eq(five_ref, other_five_ref));
+/// ```
+///
+/// Slices are also compared by their length (fat pointers):
+///
+/// ```
+/// let a = [1, 2, 3];
+/// assert!(std::ptr::eq(&a[..3], &a[..3]));
+/// assert!(!std::ptr::eq(&a[..2], &a[..3]));
+/// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
+/// ```
+///
+/// Traits are also compared by their implementation:
+///
+/// ```
+/// #[repr(transparent)]
+/// struct Wrapper { member: i32 }
+///
+/// trait Trait {}
+/// impl Trait for Wrapper {}
+/// impl Trait for i32 {}
+///
+/// let wrapper = Wrapper { member: 10 };
+///
+/// // Pointers have equal addresses.
+/// assert!(std::ptr::eq(
+/// &wrapper as *const Wrapper as *const u8,
+/// &wrapper.member as *const i32 as *const u8
+/// ));
+///
+/// // Objects have equal addresses, but `Trait` has different implementations.
+/// assert!(!std::ptr::eq(
+/// &wrapper as &dyn Trait,
+/// &wrapper.member as &dyn Trait,
+/// ));
+/// assert!(!std::ptr::eq(
+/// &wrapper as &dyn Trait as *const dyn Trait,
+/// &wrapper.member as &dyn Trait as *const dyn Trait,
+/// ));
+///
+/// // Converting the reference to a `*const u8` compares by address.
+/// assert!(std::ptr::eq(
+/// &wrapper as &dyn Trait as *const dyn Trait as *const u8,
+/// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8,
+/// ));
+/// ```
+#[stable(feature = "ptr_eq", since = "1.17.0")]
+#[inline]
+pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
+ a == b
+}
+
+/// Hash a raw pointer.
+///
+/// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly)
+/// by its address rather than the value it points to
+/// (which is what the `Hash for &T` implementation does).
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::hash_map::DefaultHasher;
+/// use std::hash::{Hash, Hasher};
+/// use std::ptr;
+///
+/// let five = 5;
+/// let five_ref = &five;
+///
+/// let mut hasher = DefaultHasher::new();
+/// ptr::hash(five_ref, &mut hasher);
+/// let actual = hasher.finish();
+///
+/// let mut hasher = DefaultHasher::new();
+/// (five_ref as *const i32).hash(&mut hasher);
+/// let expected = hasher.finish();
+///
+/// assert_eq!(actual, expected);
+/// ```
+#[stable(feature = "ptr_hash", since = "1.35.0")]
+pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
+ use crate::hash::Hash;
+ hashee.hash(into);
+}
+
+// If this is a unary fn pointer, it adds a doc comment.
+// Otherwise, it hides the docs entirely.
+macro_rules! maybe_fnptr_doc {
+ (@ #[$meta:meta] $item:item) => {
+ #[doc(hidden)]
+ #[$meta]
+ $item
+ };
+ ($a:ident @ #[$meta:meta] $item:item) => {
+ #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc = "This trait is implemented for function pointers with up to twelve arguments."]
+ #[$meta]
+ $item
+ };
+ ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => {
+ #[doc(hidden)]
+ #[$meta]
+ $item
+ };
+}
+
+// FIXME(strict_provenance_magic): function pointers have buggy codegen that
+// necessitates casting to a usize to get the backend to do the right thing.
+// for now I will break AVR to silence *a billion* lints. We should probably
+// have a proper "opaque function pointer type" to handle this kind of thing.
+
+// Impls for function pointers
+macro_rules! fnptr_impls_safety_abi {
+ ($FnTy: ty, $($Arg: ident),*) => {
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> PartialEq for $FnTy {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ *self as usize == *other as usize
+ }
+ }
+ }
+
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> Eq for $FnTy {}
+ }
+
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> PartialOrd for $FnTy {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ (*self as usize).partial_cmp(&(*other as usize))
+ }
+ }
+ }
+
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> Ord for $FnTy {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ (*self as usize).cmp(&(*other as usize))
+ }
+ }
+ }
+
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> hash::Hash for $FnTy {
+ fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
+ state.write_usize(*self as usize)
+ }
+ }
+ }
+
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(*self as usize, f)
+ }
+ }
+ }
+
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(*self as usize, f)
+ }
+ }
+ }
+ }
+}
+
+macro_rules! fnptr_impls_args {
+ ($($Arg: ident),+) => {
+ fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ };
+ () => {
+ // No variadic functions with 0 parameters
+ fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
+ fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
+ fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
+ };
+}
+
+fnptr_impls_args! {}
+fnptr_impls_args! { T }
+fnptr_impls_args! { A, B }
+fnptr_impls_args! { A, B, C }
+fnptr_impls_args! { A, B, C, D }
+fnptr_impls_args! { A, B, C, D, E }
+fnptr_impls_args! { A, B, C, D, E, F }
+fnptr_impls_args! { A, B, C, D, E, F, G }
+fnptr_impls_args! { A, B, C, D, E, F, G, H }
+fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
+fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
+fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
+fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
+
+/// Create a `const` raw pointer to a place, without creating an intermediate reference.
+///
+/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
+/// and points to initialized data. For cases where those requirements do not hold,
+/// raw pointers should be used instead. However, `&expr as *const _` creates a reference
+/// before casting it to a raw pointer, and that reference is subject to the same rules
+/// as all other references. This macro can create a raw pointer *without* creating
+/// a reference first.
+///
+/// Note, however, that the `expr` in `addr_of!(expr)` is still subject to all
+/// the usual rules. In particular, `addr_of!(*ptr::null())` is Undefined
+/// Behavior because it dereferences a null pointer.
+///
+/// # Example
+///
+/// ```
+/// use std::ptr;
+///
+/// #[repr(packed)]
+/// struct Packed {
+/// f1: u8,
+/// f2: u16,
+/// }
+///
+/// let packed = Packed { f1: 1, f2: 2 };
+/// // `&packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
+/// let raw_f2 = ptr::addr_of!(packed.f2);
+/// assert_eq!(unsafe { raw_f2.read_unaligned() }, 2);
+/// ```
+///
+/// See [`addr_of_mut`] for how to create a pointer to unininitialized data.
+/// Doing that with `addr_of` would not make much sense since one could only
+/// read the data, and that would be Undefined Behavior.
+#[stable(feature = "raw_ref_macros", since = "1.51.0")]
+#[rustc_macro_transparency = "semitransparent"]
+#[allow_internal_unstable(raw_ref_op)]
+pub macro addr_of($place:expr) {
+ &raw const $place
+}
+
+/// Create a `mut` raw pointer to a place, without creating an intermediate reference.
+///
+/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
+/// and points to initialized data. For cases where those requirements do not hold,
+/// raw pointers should be used instead. However, `&mut expr as *mut _` creates a reference
+/// before casting it to a raw pointer, and that reference is subject to the same rules
+/// as all other references. This macro can create a raw pointer *without* creating
+/// a reference first.
+///
+/// Note, however, that the `expr` in `addr_of_mut!(expr)` is still subject to all
+/// the usual rules. In particular, `addr_of_mut!(*ptr::null_mut())` is Undefined
+/// Behavior because it dereferences a null pointer.
+///
+/// # Examples
+///
+/// **Creating a pointer to unaligned data:**
+///
+/// ```
+/// use std::ptr;
+///
+/// #[repr(packed)]
+/// struct Packed {
+/// f1: u8,
+/// f2: u16,
+/// }
+///
+/// let mut packed = Packed { f1: 1, f2: 2 };
+/// // `&mut packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
+/// let raw_f2 = ptr::addr_of_mut!(packed.f2);
+/// unsafe { raw_f2.write_unaligned(42); }
+/// assert_eq!({packed.f2}, 42); // `{...}` forces copying the field instead of creating a reference.
+/// ```
+///
+/// **Creating a pointer to uninitialized data:**
+///
+/// ```rust
+/// use std::{ptr, mem::MaybeUninit};
+///
+/// struct Demo {
+/// field: bool,
+/// }
+///
+/// let mut uninit = MaybeUninit::<Demo>::uninit();
+/// // `&uninit.as_mut().field` would create a reference to an uninitialized `bool`,
+/// // and thus be Undefined Behavior!
+/// let f1_ptr = unsafe { ptr::addr_of_mut!((*uninit.as_mut_ptr()).field) };
+/// unsafe { f1_ptr.write(true); }
+/// let init = unsafe { uninit.assume_init() };
+/// ```
+#[stable(feature = "raw_ref_macros", since = "1.51.0")]
+#[rustc_macro_transparency = "semitransparent"]
+#[allow_internal_unstable(raw_ref_op)]
+pub macro addr_of_mut($place:expr) {
+ &raw mut $place
+}
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
new file mode 100644
index 000000000..fc3dd2a9b
--- /dev/null
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -0,0 +1,1973 @@
+use super::*;
+use crate::cmp::Ordering::{self, Equal, Greater, Less};
+use crate::intrinsics;
+use crate::slice::{self, SliceIndex};
+
+impl<T: ?Sized> *mut T {
+ /// Returns `true` if the pointer is null.
+ ///
+ /// Note that unsized types have many possible null pointers, as only the
+ /// raw data pointer is considered, not their length, vtable, etc.
+ /// Therefore, two pointers that are null may still not compare equal to
+ /// each other.
+ ///
+ /// ## Behavior during const evaluation
+ ///
+ /// When this function is used during const evaluation, it may return `false` for pointers
+ /// that turn out to be null at runtime. Specifically, when a pointer to some memory
+ /// is offset beyond its bounds in such a way that the resulting pointer is null,
+ /// the function will still return `false`. There is no way for CTFE to know
+ /// the absolute position of that memory, so we cannot tell if the pointer is
+ /// null or not.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = [1, 2, 3];
+ /// let ptr: *mut u32 = s.as_mut_ptr();
+ /// assert!(!ptr.is_null());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
+ #[inline]
+ pub const fn is_null(self) -> bool {
+ // Compare via a cast to a thin pointer, so fat pointers are only
+ // considering their "data" part for null-ness.
+ (self as *mut u8).guaranteed_eq(null_mut())
+ }
+
+ /// Casts to a pointer of another type.
+ #[stable(feature = "ptr_cast", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
+ #[inline(always)]
+ pub const fn cast<U>(self) -> *mut U {
+ self as _
+ }
+
+ /// Use the pointer value in a new pointer of another type.
+ ///
+ /// In case `val` is a (fat) pointer to an unsized type, this operation
+ /// will ignore the pointer part, whereas for (thin) pointers to sized
+ /// types, this has the same effect as a simple cast.
+ ///
+ /// The resulting pointer will have provenance of `self`, i.e., for a fat
+ /// pointer, this operation is semantically the same as creating a new
+ /// fat pointer with the data pointer value of `self` but the metadata of
+ /// `val`.
+ ///
+ /// # Examples
+ ///
+ /// This function is primarily useful for allowing byte-wise pointer
+ /// arithmetic on potentially fat pointers:
+ ///
+ /// ```
+ /// #![feature(set_ptr_value)]
+ /// # use core::fmt::Debug;
+ /// let mut arr: [i32; 3] = [1, 2, 3];
+ /// let mut ptr = arr.as_mut_ptr() as *mut dyn Debug;
+ /// let thin = ptr as *mut u8;
+ /// unsafe {
+ /// ptr = thin.add(8).with_metadata_of(ptr);
+ /// # assert_eq!(*(ptr as *mut i32), 3);
+ /// println!("{:?}", &*ptr); // will print "3"
+ /// }
+ /// ```
+ #[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline]
+ pub fn with_metadata_of<U>(self, mut val: *mut U) -> *mut U
+ where
+ U: ?Sized,
+ {
+ let target = &mut val as *mut *mut U as *mut *mut u8;
+ // SAFETY: In case of a thin pointer, this operations is identical
+ // to a simple assignment. In case of a fat pointer, with the current
+ // fat pointer layout implementation, the first field of such a
+ // pointer is always the data pointer, which is likewise assigned.
+ unsafe { *target = self as *mut u8 };
+ val
+ }
+
+ /// Changes constness without changing the type.
+ ///
+ /// This is a bit safer than `as` because it wouldn't silently change the type if the code is
+ /// refactored.
+ ///
+ /// While not strictly required (`*mut T` coerces to `*const T`), this is provided for symmetry
+ /// with [`cast_mut`] on `*const T` and may have documentation value if used instead of implicit
+ /// coercion.
+ ///
+ /// [`cast_mut`]: #method.cast_mut
+ #[unstable(feature = "ptr_const_cast", issue = "92675")]
+ #[rustc_const_unstable(feature = "ptr_const_cast", issue = "92675")]
+ pub const fn cast_const(self) -> *const T {
+ self as _
+ }
+
+ /// Casts a pointer to its raw bits.
+ ///
+ /// This is equivalent to `as usize`, but is more specific to enhance readability.
+ /// The inverse method is [`from_bits`](#method.from_bits-1).
+ ///
+ /// In particular, `*p as usize` and `p as usize` will both compile for
+ /// pointers to numeric types but do very different things, so using this
+ /// helps emphasize that reading the bits was intentional.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_to_from_bits)]
+ /// let mut array = [13, 42];
+ /// let mut it = array.iter_mut();
+ /// let p0: *mut i32 = it.next().unwrap();
+ /// assert_eq!(<*mut _>::from_bits(p0.to_bits()), p0);
+ /// let p1: *mut i32 = it.next().unwrap();
+ /// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
+ /// ```
+ #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ pub fn to_bits(self) -> usize
+ where
+ T: Sized,
+ {
+ self as usize
+ }
+
+ /// Creates a pointer from its raw bits.
+ ///
+ /// This is equivalent to `as *mut T`, but is more specific to enhance readability.
+ /// The inverse method is [`to_bits`](#method.to_bits-1).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_to_from_bits)]
+ /// use std::ptr::NonNull;
+ /// let dangling: *mut u8 = NonNull::dangling().as_ptr();
+ /// assert_eq!(<*mut u8>::from_bits(1), dangling);
+ /// ```
+ #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ pub fn from_bits(bits: usize) -> Self
+ where
+ T: Sized,
+ {
+ bits as Self
+ }
+
+ /// Gets the "address" portion of the pointer.
+ ///
+ /// This is similar to `self as usize`, which semantically discards *provenance* and
+ /// *address-space* information. However, unlike `self as usize`, casting the returned address
+ /// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
+ /// properly restore the lost information and obtain a dereferencable pointer, use
+ /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
+ ///
+ /// If using those APIs is not possible because there is no way to preserve a pointer with the
+ /// required provenance, use [`expose_addr`][pointer::expose_addr] and
+ /// [`from_exposed_addr_mut`][from_exposed_addr_mut] instead. However, note that this makes
+ /// your code less portable and less amenable to tools that check for compliance with the Rust
+ /// memory model.
+ ///
+ /// On most platforms this will produce a value with the same bytes as the original
+ /// pointer, because all the bytes are dedicated to describing the address.
+ /// Platforms which need to store additional information in the pointer may
+ /// perform a change of representation to produce a value containing only the address
+ /// portion of the pointer. What that means is up to the platform to define.
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such
+ /// might change in the future (including possibly weakening this so it becomes wholly
+ /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn addr(self) -> usize
+ where
+ T: Sized,
+ {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
+ // provenance).
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
+ /// use in [`from_exposed_addr`][].
+ ///
+ /// This is equivalent to `self as usize`, which semantically discards *provenance* and
+ /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
+ /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can
+ /// later call [`from_exposed_addr_mut`][] to reconstitute the original pointer including its
+ /// provenance. (Reconstructing address space information, if required, is your responsibility.)
+ ///
+ /// Using this method means that code is *not* following Strict Provenance rules. Supporting
+ /// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported
+ /// by tools that help you to stay conformant with the Rust memory model, so it is recommended
+ /// to use [`addr`][pointer::addr] wherever possible.
+ ///
+ /// On most platforms this will produce a value with the same bytes as the original pointer,
+ /// because all the bytes are dedicated to describing the address. Platforms which need to store
+ /// additional information in the pointer may not support this operation, since the 'expose'
+ /// side-effect which is required for [`from_exposed_addr_mut`][] to work is typically not
+ /// available.
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
+ /// [module documentation][crate::ptr] for details.
+ ///
+ /// [`from_exposed_addr_mut`]: from_exposed_addr_mut
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn expose_addr(self) -> usize
+ where
+ T: Sized,
+ {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ self as usize
+ }
+
+ /// Creates a new pointer with the given address.
+ ///
+ /// This performs the same operation as an `addr as ptr` cast, but copies
+ /// the *address-space* and *provenance* of `self` to the new pointer.
+ /// This allows us to dynamically preserve and propagate this important
+ /// information in a way that is otherwise impossible with a unary cast.
+ ///
+ /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
+ /// `self` to the given address, and therefore has all the same capabilities and restrictions.
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// see the [module documentation][crate::ptr] for details.
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn with_addr(self, addr: usize) -> Self
+ where
+ T: Sized,
+ {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ //
+ // In the mean-time, this operation is defined to be "as if" it was
+ // a wrapping_offset, so we can emulate it as such. This should properly
+ // restore pointer provenance even under today's compiler.
+ let self_addr = self.addr() as isize;
+ let dest_addr = addr as isize;
+ let offset = dest_addr.wrapping_sub(self_addr);
+
+ // This is the canonical desugarring of this operation
+ self.cast::<u8>().wrapping_offset(offset).cast::<T>()
+ }
+
+ /// Creates a new pointer by mapping `self`'s address to a new one.
+ ///
+ /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// see the [module documentation][crate::ptr] for details.
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
+ where
+ T: Sized,
+ {
+ self.with_addr(f(self.addr()))
+ }
+
+ /// Decompose a (possibly wide) pointer into its address and metadata components.
+ ///
+ /// The pointer can be later reconstructed with [`from_raw_parts_mut`].
+ #[unstable(feature = "ptr_metadata", issue = "81513")]
+ #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
+ #[inline]
+ pub const fn to_raw_parts(self) -> (*mut (), <T as super::Pointee>::Metadata) {
+ (self.cast(), super::metadata(self))
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared reference to
+ /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
+ /// must be used instead.
+ ///
+ /// For the mutable counterpart see [`as_mut`].
+ ///
+ /// [`as_uninit_ref`]: #method.as_uninit_ref-1
+ /// [`as_mut`]: #method.as_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
+ ///
+ /// unsafe {
+ /// if let Some(val_back) = ptr.as_ref() {
+ /// println!("We got back the value: {val_back}!");
+ /// }
+ /// }
+ /// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
+ /// dereference the pointer directly.
+ ///
+ /// ```
+ /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
+ ///
+ /// unsafe {
+ /// let val_back = &*ptr;
+ /// println!("We got back the value: {val_back}!");
+ /// }
+ /// ```
+ #[stable(feature = "ptr_as_ref", since = "1.9.0")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ #[inline]
+ pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
+ // SAFETY: the caller must guarantee that `self` is valid for a
+ // reference if it isn't null.
+ if self.is_null() { None } else { unsafe { Some(&*self) } }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared reference to
+ /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the mutable counterpart see [`as_uninit_mut`].
+ ///
+ /// [`as_ref`]: #method.as_ref-1
+ /// [`as_uninit_mut`]: #method.as_uninit_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(ptr_as_uninit)]
+ ///
+ /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
+ ///
+ /// unsafe {
+ /// if let Some(val_back) = ptr.as_uninit_ref() {
+ /// println!("We got back the value: {}!", val_back.assume_init());
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
+ }
+
+ /// Calculates the offset from a pointer.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_offset`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_offset`]: #method.wrapping_offset
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = [1, 2, 3];
+ /// let ptr: *mut u32 = s.as_mut_ptr();
+ ///
+ /// unsafe {
+ /// println!("{}", *ptr.offset(1));
+ /// println!("{}", *ptr.offset(2));
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset(self, count: isize) -> *mut T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // The obtained pointer is valid for writes since the caller must
+ // guarantee that it points to the same allocated object as `self`.
+ unsafe { intrinsics::offset(self, count) as *mut T }
+ }
+
+ /// Calculates the offset from a pointer in bytes.
+ ///
+ /// `count` is in units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [offset][pointer::offset] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset(self, count: isize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ let this = unsafe { self.cast::<u8>().offset(count).cast::<()>() };
+ from_raw_parts_mut::<T>(this, metadata(self))
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// This operation itself is always safe, but using the resulting pointer is not.
+ ///
+ /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
+ /// be used to read or write other allocated objects.
+ ///
+ /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
+ /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
+ /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
+ /// `x` and `y` point into the same allocated object.
+ ///
+ /// Compared to [`offset`], this method basically delays the requirement of staying within the
+ /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
+ /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
+ /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
+ /// can be optimized better and is thus preferable in performance-sensitive code.
+ ///
+ /// The delayed check only considers the value of the pointer that was dereferenced, not the
+ /// intermediate values used during the computation of the final result. For example,
+ /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
+ /// words, leaving the allocated object and then re-entering it later is permitted.
+ ///
+ /// [`offset`]: #method.offset
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements
+ /// let mut data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *mut u8 = data.as_mut_ptr();
+ /// let step = 2;
+ /// let end_rounded_up = ptr.wrapping_offset(6);
+ ///
+ /// while ptr != end_rounded_up {
+ /// unsafe {
+ /// *ptr = 0;
+ /// }
+ /// ptr = ptr.wrapping_offset(step);
+ /// }
+ /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
+ /// ```
+ #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline(always)]
+ pub const fn wrapping_offset(self, count: isize) -> *mut T
+ where
+ T: Sized,
+ {
+ // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
+ unsafe { intrinsics::arith_offset(self, count) as *mut T }
+ }
+
+ /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
+ ///
+ /// `count` is in units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method
+ /// for documentation.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ pub const fn wrapping_byte_offset(self, count: isize) -> Self {
+ from_raw_parts_mut::<T>(
+ self.cast::<u8>().wrapping_offset(count).cast::<()>(),
+ metadata(self),
+ )
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a unique reference to
+ /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_mut`]
+ /// must be used instead.
+ ///
+ /// For the shared counterpart see [`as_ref`].
+ ///
+ /// [`as_uninit_mut`]: #method.as_uninit_mut
+ /// [`as_ref`]: #method.as_ref-1
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = [1, 2, 3];
+ /// let ptr: *mut u32 = s.as_mut_ptr();
+ /// let first_value = unsafe { ptr.as_mut().unwrap() };
+ /// *first_value = 4;
+ /// # assert_eq!(s, [4, 2, 3]);
+ /// println!("{s:?}"); // It'll print: "[4, 2, 3]".
+ /// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_mut_unchecked` that returns the `&mut T` instead of `Option<&mut T>`, know that
+ /// you can dereference the pointer directly.
+ ///
+ /// ```
+ /// let mut s = [1, 2, 3];
+ /// let ptr: *mut u32 = s.as_mut_ptr();
+ /// let first_value = unsafe { &mut *ptr };
+ /// *first_value = 4;
+ /// # assert_eq!(s, [4, 2, 3]);
+ /// println!("{s:?}"); // It'll print: "[4, 2, 3]".
+ /// ```
+ #[stable(feature = "ptr_as_ref", since = "1.9.0")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ #[inline]
+ pub const unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
+ // SAFETY: the caller must guarantee that `self` is be valid for
+ // a mutable reference if it isn't null.
+ if self.is_null() { None } else { unsafe { Some(&mut *self) } }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a unique reference to
+ /// the value wrapped in `Some`. In contrast to [`as_mut`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the shared counterpart see [`as_uninit_ref`].
+ ///
+ /// [`as_mut`]: #method.as_mut
+ /// [`as_uninit_ref`]: #method.as_uninit_ref-1
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_mut<'a>(self) -> Option<&'a mut MaybeUninit<T>>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ if self.is_null() { None } else { Some(unsafe { &mut *(self as *mut MaybeUninit<T>) }) }
+ }
+
+ /// Returns whether two pointers are guaranteed to be equal.
+ ///
+ /// At runtime this function behaves like `self == other`.
+ /// However, in some contexts (e.g., compile-time evaluation),
+ /// it is not always possible to determine equality of two pointers, so this function may
+ /// spuriously return `false` for pointers that later actually turn out to be equal.
+ /// But when it returns `true`, the pointers are guaranteed to be equal.
+ ///
+ /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer
+ /// comparisons for which both functions return `false`.
+ ///
+ /// [`guaranteed_ne`]: #method.guaranteed_ne
+ ///
+ /// The return value may change depending on the compiler version and unsafe code might not
+ /// rely on the result of this function for soundness. It is suggested to only use this function
+ /// for performance optimizations where spurious `false` return values by this function do not
+ /// affect the outcome, but just the performance.
+ /// The consequences of using this method to make runtime and compile-time code behave
+ /// differently have not been explored. This method should not be used to introduce such
+ /// differences, and it should also not be stabilized before we have a better understanding
+ /// of this issue.
+ #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[inline]
+ pub const fn guaranteed_eq(self, other: *mut T) -> bool
+ where
+ T: Sized,
+ {
+ intrinsics::ptr_guaranteed_eq(self as *const _, other as *const _)
+ }
+
+ /// Returns whether two pointers are guaranteed to be unequal.
+ ///
+ /// At runtime this function behaves like `self != other`.
+ /// However, in some contexts (e.g., compile-time evaluation),
+ /// it is not always possible to determine the inequality of two pointers, so this function may
+ /// spuriously return `false` for pointers that later actually turn out to be unequal.
+ /// But when it returns `true`, the pointers are guaranteed to be unequal.
+ ///
+ /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
+ /// comparisons for which both functions return `false`.
+ ///
+ /// [`guaranteed_eq`]: #method.guaranteed_eq
+ ///
+ /// The return value may change depending on the compiler version and unsafe code might not
+ /// rely on the result of this function for soundness. It is suggested to only use this function
+ /// for performance optimizations where spurious `false` return values by this function do not
+ /// affect the outcome, but just the performance.
+ /// The consequences of using this method to make runtime and compile-time code behave
+ /// differently have not been explored. This method should not be used to introduce such
+ /// differences, and it should also not be stabilized before we have a better understanding
+ /// of this issue.
+ #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[inline]
+ pub const unsafe fn guaranteed_ne(self, other: *mut T) -> bool
+ where
+ T: Sized,
+ {
+ intrinsics::ptr_guaranteed_ne(self as *const _, other as *const _)
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
+ ///
+ /// This function is the inverse of [`offset`].
+ ///
+ /// [`offset`]: #method.offset-1
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and other pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * Both pointers must be *derived from* a pointer to the same object.
+ /// (See below for an example.)
+ ///
+ /// * The distance between the pointers, in bytes, must be an exact multiple
+ /// of the size of `T`.
+ ///
+ /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The distance being in bounds cannot rely on "wrapping around" the address space.
+ ///
+ /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
+ /// address space, so two pointers within some value of any Rust type `T` will always satisfy
+ /// the last two conditions. The standard library also generally ensures that allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
+ /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
+ /// always satisfies the last two conditions.
+ ///
+ /// Most platforms fundamentally can't even construct such a large allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
+ /// such large allocations either.)
+ ///
+ /// [`add`]: #method.add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut a = [0; 5];
+ /// let ptr1: *mut i32 = &mut a[1];
+ /// let ptr2: *mut i32 = &mut a[3];
+ /// unsafe {
+ /// assert_eq!(ptr2.offset_from(ptr1), 2);
+ /// assert_eq!(ptr1.offset_from(ptr2), -2);
+ /// assert_eq!(ptr1.offset(2), ptr2);
+ /// assert_eq!(ptr2.offset(-2), ptr1);
+ /// }
+ /// ```
+ ///
+ /// *Incorrect* usage:
+ ///
+ /// ```rust,no_run
+ /// let ptr1 = Box::into_raw(Box::new(0u8));
+ /// let ptr2 = Box::into_raw(Box::new(1u8));
+ /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
+ /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
+ /// let ptr2_other = (ptr1 as *mut u8).wrapping_offset(diff);
+ /// assert_eq!(ptr2 as usize, ptr2_other as usize);
+ /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
+ /// // computing their offset is undefined behavior, even though
+ /// // they point to the same address!
+ /// unsafe {
+ /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
+ /// }
+ /// ```
+ #[stable(feature = "ptr_offset_from", since = "1.47.0")]
+ #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset_from(self, origin: *const T) -> isize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset_from`.
+ unsafe { (self as *const T).offset_from(origin) }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [offset_from][pointer::offset_from] on it. See that method for
+ /// documentation and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointers,
+ /// ignoring the metadata.
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize {
+ // SAFETY: the caller must uphold the safety contract for `offset_from`.
+ unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
+ }
+
+ /// Calculates the distance between two pointers, *where it's known that
+ /// `self` is equal to or greater than `origin`*. The returned value is in
+ /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+ ///
+ /// This computes the same value that [`offset_from`](#method.offset_from)
+ /// would compute, but with the added precondition that that the offset is
+ /// guaranteed to be non-negative. This method is equivalent to
+ /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`,
+ /// but it provides slightly more information to the optimizer, which can
+ /// sometimes allow it to optimize slightly better with some backends.
+ ///
+ /// This method can be though of as recovering the `count` that was passed
+ /// to [`add`](#method.add) (or, with the parameters in the other order,
+ /// to [`sub`](#method.sub)). The following are all equivalent, assuming
+ /// that their safety preconditions are met:
+ /// ```rust
+ /// # #![feature(ptr_sub_ptr)]
+ /// # unsafe fn blah(ptr: *mut i32, origin: *mut i32, count: usize) -> bool {
+ /// ptr.sub_ptr(origin) == count
+ /// # &&
+ /// origin.add(count) == ptr
+ /// # &&
+ /// ptr.sub(count) == origin
+ /// # }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// - The distance between the pointers must be non-negative (`self >= origin`)
+ ///
+ /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
+ /// apply to this method as well; see it for the full details.
+ ///
+ /// Importantly, despite the return type of this method being able to represent
+ /// a larger offset, it's still *not permitted* to pass pointers which differ
+ /// by more than `isize::MAX` *bytes*. As such, the result of this method will
+ /// always be less than or equal to `isize::MAX as usize`.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_sub_ptr)]
+ ///
+ /// let mut a = [0; 5];
+ /// let p: *mut i32 = a.as_mut_ptr();
+ /// unsafe {
+ /// let ptr1: *mut i32 = p.add(1);
+ /// let ptr2: *mut i32 = p.add(3);
+ ///
+ /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
+ /// assert_eq!(ptr1.add(2), ptr2);
+ /// assert_eq!(ptr2.sub(2), ptr1);
+ /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
+ /// }
+ ///
+ /// // This would be incorrect, as the pointers are not correctly ordered:
+ /// // ptr1.offset_from(ptr2)
+ #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
+ #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub_ptr(self, origin: *const T) -> usize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `sub_ptr`.
+ unsafe { (self as *const T).sub_ptr(origin) }
+ }
+
+ /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a `usize`.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_add`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_add`]: #method.wrapping_add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ /// let ptr: *const u8 = s.as_ptr();
+ ///
+ /// unsafe {
+ /// println!("{}", *ptr.add(1) as char);
+ /// println!("{}", *ptr.add(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { self.offset(count as isize) }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [add][pointer::add] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_add(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `add`.
+ let this = unsafe { self.cast::<u8>().add(count).cast::<()>() };
+ from_raw_parts_mut::<T>(this, metadata(self))
+ }
+
+ /// Calculates the offset from a pointer (convenience for
+ /// `.offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset cannot exceed `isize::MAX` **bytes**.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_sub`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_sub`]: #method.wrapping_sub
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ ///
+ /// unsafe {
+ /// let end: *const u8 = s.as_ptr().add(3);
+ /// println!("{}", *end.sub(1) as char);
+ /// println!("{}", *end.sub(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { self.offset((count as isize).wrapping_neg()) }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for
+ /// `.byte_offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [sub][pointer::sub] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_sub(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `sub`.
+ let this = unsafe { self.cast::<u8>().sub(count).cast::<()>() };
+ from_raw_parts_mut::<T>(this, metadata(self))
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset(count as isize)`)
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// This operation itself is always safe, but using the resulting pointer is not.
+ ///
+ /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
+ /// be used to read or write other allocated objects.
+ ///
+ /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
+ /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
+ /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
+ /// `x` and `y` point into the same allocated object.
+ ///
+ /// Compared to [`add`], this method basically delays the requirement of staying within the
+ /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
+ /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
+ /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
+ /// can be optimized better and is thus preferable in performance-sensitive code.
+ ///
+ /// The delayed check only considers the value of the pointer that was dereferenced, not the
+ /// intermediate values used during the computation of the final result. For example,
+ /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
+ /// allocated object and then re-entering it later is permitted.
+ ///
+ /// [`add`]: #method.add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let step = 2;
+ /// let end_rounded_up = ptr.wrapping_add(6);
+ ///
+ /// // This loop prints "1, 3, 5, "
+ /// while ptr != end_rounded_up {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_add(step);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline(always)]
+ pub const fn wrapping_add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ self.wrapping_offset(count as isize)
+ }
+
+ /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
+ /// (convenience for `.wrapping_byte_offset(count as isize)`)
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ pub const fn wrapping_byte_add(self, count: usize) -> Self {
+ from_raw_parts_mut::<T>(self.cast::<u8>().wrapping_add(count).cast::<()>(), metadata(self))
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// This operation itself is always safe, but using the resulting pointer is not.
+ ///
+ /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
+ /// be used to read or write other allocated objects.
+ ///
+ /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
+ /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
+ /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
+ /// `x` and `y` point into the same allocated object.
+ ///
+ /// Compared to [`sub`], this method basically delays the requirement of staying within the
+ /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
+ /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
+ /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
+ /// can be optimized better and is thus preferable in performance-sensitive code.
+ ///
+ /// The delayed check only considers the value of the pointer that was dereferenced, not the
+ /// intermediate values used during the computation of the final result. For example,
+ /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
+ /// allocated object and then re-entering it later is permitted.
+ ///
+ /// [`sub`]: #method.sub
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements (backwards)
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let start_rounded_down = ptr.wrapping_sub(2);
+ /// ptr = ptr.wrapping_add(4);
+ /// let step = 2;
+ /// // This loop prints "5, 3, 1, "
+ /// while ptr != start_rounded_down {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_sub(step);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline]
+ pub const fn wrapping_sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ self.wrapping_offset((count as isize).wrapping_neg())
+ }
+
+ /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[must_use]
+ #[inline(always)]
+ #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
+ #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
+ pub const fn wrapping_byte_sub(self, count: usize) -> Self {
+ from_raw_parts_mut::<T>(self.cast::<u8>().wrapping_sub(count).cast::<()>(), metadata(self))
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// See [`ptr::read`] for safety concerns and examples.
+ ///
+ /// [`ptr::read`]: crate::ptr::read()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for ``.
+ unsafe { read(self) }
+ }
+
+ /// Performs a volatile read of the value from `self` without moving it. This
+ /// leaves the memory in `self` unchanged.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::read_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn read_volatile(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+ unsafe { read_volatile(self) }
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// Unlike `read`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::read_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read_unaligned(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+ unsafe { read_unaligned(self) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to(self, dest: *mut T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { copy(self, dest, count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { copy_nonoverlapping(self, dest, count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_from(self, src: *const T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { copy(src, self, count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { copy_nonoverlapping(src, self, count) }
+ }
+
+ /// Executes the destructor (if any) of the pointed-to value.
+ ///
+ /// See [`ptr::drop_in_place`] for safety concerns and examples.
+ ///
+ /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline(always)]
+ pub unsafe fn drop_in_place(self) {
+ // SAFETY: the caller must uphold the safety contract for `drop_in_place`.
+ unsafe { drop_in_place(self) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// See [`ptr::write`] for safety concerns and examples.
+ ///
+ /// [`ptr::write`]: crate::ptr::write()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write`.
+ unsafe { write(self, val) }
+ }
+
+ /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
+ /// bytes of memory starting at `self` to `val`.
+ ///
+ /// See [`ptr::write_bytes`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_bytes`]: crate::ptr::write_bytes()
+ #[doc(alias = "memset")]
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write_bytes(self, val: u8, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_bytes`.
+ unsafe { write_bytes(self, val, count) }
+ }
+
+ /// Performs a volatile write of a memory location with the given value without
+ /// reading or dropping the old value.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::write_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_volatile`]: crate::ptr::write_volatile()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn write_volatile(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_volatile`.
+ unsafe { write_volatile(self, val) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// Unlike `write`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::write_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write_unaligned(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_unaligned`.
+ unsafe { write_unaligned(self, val) }
+ }
+
+ /// Replaces the value at `self` with `src`, returning the old
+ /// value, without dropping either.
+ ///
+ /// See [`ptr::replace`] for safety concerns and examples.
+ ///
+ /// [`ptr::replace`]: crate::ptr::replace()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline(always)]
+ pub unsafe fn replace(self, src: T) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `replace`.
+ unsafe { replace(self, src) }
+ }
+
+ /// Swaps the values at two mutable locations of the same type, without
+ /// deinitializing either. They may overlap, unlike `mem::swap` which is
+ /// otherwise equivalent.
+ ///
+ /// See [`ptr::swap`] for safety concerns and examples.
+ ///
+ /// [`ptr::swap`]: crate::ptr::swap()
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+ #[inline(always)]
+ pub const unsafe fn swap(self, with: *mut T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `swap`.
+ unsafe { swap(self, with) }
+ }
+
+ /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
+ /// `align`.
+ ///
+ /// If it is not possible to align the pointer, the implementation returns
+ /// `usize::MAX`. It is permissible for the implementation to *always*
+ /// return `usize::MAX`. Only your algorithm's performance can depend
+ /// on getting a usable offset here, not its correctness.
+ ///
+ /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
+ /// used with the `wrapping_add` method.
+ ///
+ /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
+ /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
+ /// the returned offset is correct in all terms other than alignment.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two.
+ ///
+ /// # Examples
+ ///
+ /// Accessing adjacent `u8` as `u16`
+ ///
+ /// ```
+ /// # fn foo(n: usize) {
+ /// # use std::mem::align_of;
+ /// # unsafe {
+ /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
+ /// let ptr = x.as_ptr().add(n) as *const u8;
+ /// let offset = ptr.align_offset(align_of::<u16>());
+ /// if offset < x.len() - n - 1 {
+ /// let u16_ptr = ptr.add(offset) as *const u16;
+ /// assert_ne!(*u16_ptr, 500);
+ /// } else {
+ /// // while the pointer can be aligned via `offset`, it would point
+ /// // outside the allocation
+ /// }
+ /// # } }
+ /// ```
+ #[stable(feature = "align_offset", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
+ pub const fn align_offset(self, align: usize) -> usize
+ where
+ T: Sized,
+ {
+ if !align.is_power_of_two() {
+ panic!("align_offset: align is not a power-of-two");
+ }
+
+ fn rt_impl<T>(p: *mut T, align: usize) -> usize {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(p, align) }
+ }
+
+ const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
+ usize::MAX
+ }
+
+ // SAFETY:
+ // It is permissible for `align_offset` to always return `usize::MAX`,
+ // algorithm correctness can not depend on `align_offset` returning non-max values.
+ //
+ // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
+ unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
+ }
+
+ /// Returns whether the pointer is properly aligned for `T`.
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ pub fn is_aligned(self) -> bool
+ where
+ T: Sized,
+ {
+ self.is_aligned_to(core::mem::align_of::<T>())
+ }
+
+ /// Returns whether the pointer is aligned to `align`.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointer,
+ /// ignoring the metadata.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two (this includes 0).
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ pub fn is_aligned_to(self, align: usize) -> bool {
+ if !align.is_power_of_two() {
+ panic!("is_aligned_to: align is not a power-of-two");
+ }
+
+ // SAFETY: `is_power_of_two()` will return `false` for zero.
+ unsafe { core::intrinsics::assume(align != 0) };
+
+ // Cast is needed for `T: !Sized`
+ self.cast::<u8>().addr() % align == 0
+ }
+}
+
+impl<T> *mut [T] {
+ /// Returns the length of a raw slice.
+ ///
+ /// The returned value is the number of **elements**, not the number of bytes.
+ ///
+ /// This function is safe, even when the raw slice cannot be cast to a slice
+ /// reference because the pointer is null or unaligned.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_len)]
+ /// use std::ptr;
+ ///
+ /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
+ /// assert_eq!(slice.len(), 3);
+ /// ```
+ #[inline(always)]
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ pub const fn len(self) -> usize {
+ metadata(self)
+ }
+
+ /// Returns `true` if the raw slice has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_len)]
+ ///
+ /// let mut a = [1, 2, 3];
+ /// let ptr = &mut a as *mut [_];
+ /// assert!(!ptr.is_empty());
+ /// ```
+ #[inline(always)]
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ pub const fn is_empty(self) -> bool {
+ self.len() == 0
+ }
+
+ /// Divides one mutable raw slice into two at an index.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid > len`.
+ ///
+ /// # Safety
+ ///
+ /// `mid` must be [in-bounds] of the underlying [allocated object].
+ /// Which means `self` must be dereferenceable and span a single allocation
+ /// that is at least `mid * size_of::<T>()` bytes long. Not upholding these
+ /// requirements is *[undefined behavior]* even if the resulting pointers are not used.
+ ///
+ /// Since `len` being in-bounds it is not a safety invariant of `*mut [T]` the
+ /// safety requirements of this method are the same as for [`split_at_mut_unchecked`].
+ /// The explicit bounds check is only as useful as `len` is correct.
+ ///
+ /// [`split_at_mut_unchecked`]: #method.split_at_mut_unchecked
+ /// [in-bounds]: #method.add
+ /// [allocated object]: crate::ptr#allocated-object
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(raw_slice_split)]
+ /// #![feature(slice_ptr_get)]
+ ///
+ /// let mut v = [1, 0, 3, 0, 5, 6];
+ /// let ptr = &mut v as *mut [_];
+ /// unsafe {
+ /// let (left, right) = ptr.split_at_mut(2);
+ /// assert_eq!(&*left, [1, 0]);
+ /// assert_eq!(&*right, [3, 0, 5, 6]);
+ /// }
+ /// ```
+ #[inline(always)]
+ #[track_caller]
+ #[unstable(feature = "raw_slice_split", issue = "95595")]
+ pub unsafe fn split_at_mut(self, mid: usize) -> (*mut [T], *mut [T]) {
+ assert!(mid <= self.len());
+ // SAFETY: The assert above is only a safety-net as long as `self.len()` is correct
+ // The actual safety requirements of this function are the same as for `split_at_mut_unchecked`
+ unsafe { self.split_at_mut_unchecked(mid) }
+ }
+
+ /// Divides one mutable raw slice into two at an index, without doing bounds checking.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// # Safety
+ ///
+ /// `mid` must be [in-bounds] of the underlying [allocated object].
+ /// Which means `self` must be dereferenceable and span a single allocation
+ /// that is at least `mid * size_of::<T>()` bytes long. Not upholding these
+ /// requirements is *[undefined behavior]* even if the resulting pointers are not used.
+ ///
+ /// [in-bounds]: #method.add
+ /// [out-of-bounds index]: #method.add
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(raw_slice_split)]
+ ///
+ /// let mut v = [1, 0, 3, 0, 5, 6];
+ /// // scoped to restrict the lifetime of the borrows
+ /// unsafe {
+ /// let ptr = &mut v as *mut [_];
+ /// let (left, right) = ptr.split_at_mut_unchecked(2);
+ /// assert_eq!(&*left, [1, 0]);
+ /// assert_eq!(&*right, [3, 0, 5, 6]);
+ /// (&mut *left)[1] = 2;
+ /// (&mut *right)[1] = 4;
+ /// }
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[inline(always)]
+ #[unstable(feature = "raw_slice_split", issue = "95595")]
+ pub unsafe fn split_at_mut_unchecked(self, mid: usize) -> (*mut [T], *mut [T]) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+
+ // SAFETY: Caller must pass a valid pointer and an index that is in-bounds.
+ let tail = unsafe { ptr.add(mid) };
+ (
+ crate::ptr::slice_from_raw_parts_mut(ptr, mid),
+ crate::ptr::slice_from_raw_parts_mut(tail, len - mid),
+ )
+ }
+
+ /// Returns a raw pointer to the slice's buffer.
+ ///
+ /// This is equivalent to casting `self` to `*mut T`, but more type-safe.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_get)]
+ /// use std::ptr;
+ ///
+ /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
+ /// assert_eq!(slice.as_mut_ptr(), ptr::null_mut());
+ /// ```
+ #[inline(always)]
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ pub const fn as_mut_ptr(self) -> *mut T {
+ self as *mut T
+ }
+
+ /// Returns a raw pointer to an element or subslice, without doing bounds
+ /// checking.
+ ///
+ /// Calling this method with an [out-of-bounds index] or when `self` is not dereferenceable
+ /// is *[undefined behavior]* even if the resulting pointer is not used.
+ ///
+ /// [out-of-bounds index]: #method.add
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_get)]
+ ///
+ /// let x = &mut [1, 2, 4] as *mut [i32];
+ ///
+ /// unsafe {
+ /// assert_eq!(x.get_unchecked_mut(1), x.as_mut_ptr().add(1));
+ /// }
+ /// ```
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline(always)]
+ pub const unsafe fn get_unchecked_mut<I>(self, index: I) -> *mut I::Output
+ where
+ I: ~const SliceIndex<[T]>,
+ {
+ // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
+ unsafe { index.get_unchecked_mut(self) }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared slice to
+ /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the mutable counterpart see [`as_uninit_slice_mut`].
+ ///
+ /// [`as_ref`]: #method.as_ref-1
+ /// [`as_uninit_slice_mut`]: #method.as_uninit_slice_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+ /// and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single [allocated object]!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts`][].
+ ///
+ /// [valid]: crate::ptr#safety
+ /// [allocated object]: crate::ptr#allocated-object
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
+ if self.is_null() {
+ None
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
+ Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
+ }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a unique slice to
+ /// the value wrapped in `Some`. In contrast to [`as_mut`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the shared counterpart see [`as_uninit_slice`].
+ ///
+ /// [`as_mut`]: #method.as_mut
+ /// [`as_uninit_slice`]: #method.as_uninit_slice-1
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is null *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
+ /// many bytes, and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single [allocated object]!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts_mut`][].
+ ///
+ /// [valid]: crate::ptr#safety
+ /// [allocated object]: crate::ptr#allocated-object
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_slice_mut<'a>(self) -> Option<&'a mut [MaybeUninit<T>]> {
+ if self.is_null() {
+ None
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`.
+ Some(unsafe { slice::from_raw_parts_mut(self as *mut MaybeUninit<T>, self.len()) })
+ }
+ }
+}
+
+// Equality for pointers
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> PartialEq for *mut T {
+ #[inline(always)]
+ fn eq(&self, other: &*mut T) -> bool {
+ *self == *other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Eq for *mut T {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Ord for *mut T {
+ #[inline]
+ fn cmp(&self, other: &*mut T) -> Ordering {
+ if self < other {
+ Less
+ } else if self == other {
+ Equal
+ } else {
+ Greater
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> PartialOrd for *mut T {
+ #[inline(always)]
+ fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+
+ #[inline(always)]
+ fn lt(&self, other: &*mut T) -> bool {
+ *self < *other
+ }
+
+ #[inline(always)]
+ fn le(&self, other: &*mut T) -> bool {
+ *self <= *other
+ }
+
+ #[inline(always)]
+ fn gt(&self, other: &*mut T) -> bool {
+ *self > *other
+ }
+
+ #[inline(always)]
+ fn ge(&self, other: &*mut T) -> bool {
+ *self >= *other
+ }
+}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
new file mode 100644
index 000000000..f3ef094cb
--- /dev/null
+++ b/library/core/src/ptr/non_null.rs
@@ -0,0 +1,802 @@
+use crate::cmp::Ordering;
+use crate::convert::From;
+use crate::fmt;
+use crate::hash;
+use crate::marker::Unsize;
+use crate::mem::{self, MaybeUninit};
+use crate::num::NonZeroUsize;
+use crate::ops::{CoerceUnsized, DispatchFromDyn};
+use crate::ptr::Unique;
+use crate::slice::{self, SliceIndex};
+
+/// `*mut T` but non-zero and [covariant].
+///
+/// This is often the correct thing to use when building data structures using
+/// raw pointers, but is ultimately more dangerous to use because of its additional
+/// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
+///
+/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
+/// is never dereferenced. This is so that enums may use this forbidden value
+/// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`.
+/// However the pointer may still dangle if it isn't dereferenced.
+///
+/// Unlike `*mut T`, `NonNull<T>` was chosen to be covariant over `T`. This makes it
+/// possible to use `NonNull<T>` when building covariant types, but introduces the
+/// risk of unsoundness if used in a type that shouldn't actually be covariant.
+/// (The opposite choice was made for `*mut T` even though technically the unsoundness
+/// could only be caused by calling unsafe functions.)
+///
+/// Covariance is correct for most safe abstractions, such as `Box`, `Rc`, `Arc`, `Vec`,
+/// and `LinkedList`. This is the case because they provide a public API that follows the
+/// normal shared XOR mutable rules of Rust.
+///
+/// If your type cannot safely be covariant, you must ensure it contains some
+/// additional field to provide invariance. Often this field will be a [`PhantomData`]
+/// type like `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
+///
+/// Notice that `NonNull<T>` has a `From` instance for `&T`. However, this does
+/// not change the fact that mutating through a (pointer derived from a) shared
+/// reference is undefined behavior unless the mutation happens inside an
+/// [`UnsafeCell<T>`]. The same goes for creating a mutable reference from a shared
+/// reference. When using this `From` instance without an `UnsafeCell<T>`,
+/// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr`
+/// is never used for mutation.
+///
+/// [covariant]: https://doc.rust-lang.org/reference/subtyping.html
+/// [`PhantomData`]: crate::marker::PhantomData
+/// [`UnsafeCell<T>`]: crate::cell::UnsafeCell
+#[stable(feature = "nonnull", since = "1.25.0")]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(1)]
+#[rustc_nonnull_optimization_guaranteed]
+pub struct NonNull<T: ?Sized> {
+ pointer: *const T,
+}
+
+/// `NonNull` pointers are not `Send` because the data they reference may be aliased.
+// N.B., this impl is unnecessary, but should provide better error messages.
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> !Send for NonNull<T> {}
+
+/// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
+// N.B., this impl is unnecessary, but should provide better error messages.
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> !Sync for NonNull<T> {}
+
+impl<T: Sized> NonNull<T> {
+ /// Creates a new `NonNull` that is dangling, but well-aligned.
+ ///
+ /// This is useful for initializing types which lazily allocate, like
+ /// `Vec::new` does.
+ ///
+ /// Note that the pointer value may potentially represent a valid pointer to
+ /// a `T`, which means this must not be used as a "not yet initialized"
+ /// sentinel value. Types that lazily allocate must track initialization by
+ /// some other means.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr::NonNull;
+ ///
+ /// let ptr = NonNull::<u32>::dangling();
+ /// // Important: don't try to access the value of `ptr` without
+ /// // initializing it first! The pointer is not null but isn't valid either!
+ /// ```
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_stable(feature = "const_nonnull_dangling", since = "1.36.0")]
+ #[must_use]
+ #[inline]
+ pub const fn dangling() -> Self {
+ // SAFETY: mem::align_of() returns a non-zero usize which is then casted
+ // to a *mut T. Therefore, `ptr` is not null and the conditions for
+ // calling new_unchecked() are respected.
+ unsafe {
+ let ptr = crate::ptr::invalid_mut::<T>(mem::align_of::<T>());
+ NonNull::new_unchecked(ptr)
+ }
+ }
+
+ /// Returns a shared references to the value. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the mutable counterpart see [`as_uninit_mut`].
+ ///
+ /// [`as_ref`]: NonNull::as_ref
+ /// [`as_uninit_mut`]: NonNull::as_uninit_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_ref<'a>(self) -> &'a MaybeUninit<T> {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ unsafe { &*self.cast().as_ptr() }
+ }
+
+ /// Returns a unique references to the value. In contrast to [`as_mut`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the shared counterpart see [`as_uninit_ref`].
+ ///
+ /// [`as_mut`]: NonNull::as_mut
+ /// [`as_uninit_ref`]: NonNull::as_uninit_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_mut<'a>(self) -> &'a mut MaybeUninit<T> {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ unsafe { &mut *self.cast().as_ptr() }
+ }
+}
+
+impl<T: ?Sized> NonNull<T> {
+ /// Creates a new `NonNull`.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be non-null.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut x = 0u32;
+ /// let ptr = unsafe { NonNull::new_unchecked(&mut x as *mut _) };
+ /// ```
+ ///
+ /// *Incorrect* usage of this function:
+ ///
+ /// ```rust,no_run
+ /// use std::ptr::NonNull;
+ ///
+ /// // NEVER DO THAT!!! This is undefined behavior. ⚠️
+ /// let ptr = unsafe { NonNull::<u32>::new_unchecked(std::ptr::null_mut()) };
+ /// ```
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_stable(feature = "const_nonnull_new_unchecked", since = "1.25.0")]
+ #[inline]
+ pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
+ // SAFETY: the caller must guarantee that `ptr` is non-null.
+ unsafe { NonNull { pointer: ptr as _ } }
+ }
+
+ /// Creates a new `NonNull` if `ptr` is non-null.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut x = 0u32;
+ /// let ptr = NonNull::<u32>::new(&mut x as *mut _).expect("ptr is null!");
+ ///
+ /// if let Some(ptr) = NonNull::<u32>::new(std::ptr::null_mut()) {
+ /// unreachable!();
+ /// }
+ /// ```
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_unstable(feature = "const_nonnull_new", issue = "93235")]
+ #[inline]
+ pub const fn new(ptr: *mut T) -> Option<Self> {
+ if !ptr.is_null() {
+ // SAFETY: The pointer is already checked and is not null
+ Some(unsafe { Self::new_unchecked(ptr) })
+ } else {
+ None
+ }
+ }
+
+ /// Performs the same functionality as [`std::ptr::from_raw_parts`], except that a
+ /// `NonNull` pointer is returned, as opposed to a raw `*const` pointer.
+ ///
+ /// See the documentation of [`std::ptr::from_raw_parts`] for more details.
+ ///
+ /// [`std::ptr::from_raw_parts`]: crate::ptr::from_raw_parts
+ #[unstable(feature = "ptr_metadata", issue = "81513")]
+ #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
+ #[inline]
+ pub const fn from_raw_parts(
+ data_address: NonNull<()>,
+ metadata: <T as super::Pointee>::Metadata,
+ ) -> NonNull<T> {
+ // SAFETY: The result of `ptr::from::raw_parts_mut` is non-null because `data_address` is.
+ unsafe {
+ NonNull::new_unchecked(super::from_raw_parts_mut(data_address.as_ptr(), metadata))
+ }
+ }
+
+ /// Decompose a (possibly wide) pointer into its address and metadata components.
+ ///
+ /// The pointer can be later reconstructed with [`NonNull::from_raw_parts`].
+ #[unstable(feature = "ptr_metadata", issue = "81513")]
+ #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_raw_parts(self) -> (NonNull<()>, <T as super::Pointee>::Metadata) {
+ (self.cast(), super::metadata(self.as_ptr()))
+ }
+
+ /// Gets the "address" portion of the pointer.
+ ///
+ /// For more details see the equivalent method on a raw pointer, [`pointer::addr`].
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// see the [`ptr` module documentation][crate::ptr].
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn addr(self) -> NonZeroUsize
+ where
+ T: Sized,
+ {
+ // SAFETY: The pointer is guaranteed by the type to be non-null,
+ // meaning that the address will be non-zero.
+ unsafe { NonZeroUsize::new_unchecked(self.pointer.addr()) }
+ }
+
+ /// Creates a new pointer with the given address.
+ ///
+ /// For more details see the equivalent method on a raw pointer, [`pointer::with_addr`].
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// see the [`ptr` module documentation][crate::ptr].
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn with_addr(self, addr: NonZeroUsize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: The result of `ptr::from::with_addr` is non-null because `addr` is guaranteed to be non-zero.
+ unsafe { NonNull::new_unchecked(self.pointer.with_addr(addr.get()) as *mut _) }
+ }
+
+ /// Creates a new pointer by mapping `self`'s address to a new one.
+ ///
+ /// For more details see the equivalent method on a raw pointer, [`pointer::map_addr`].
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// see the [`ptr` module documentation][crate::ptr].
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "strict_provenance", issue = "95228")]
+ pub fn map_addr(self, f: impl FnOnce(NonZeroUsize) -> NonZeroUsize) -> Self
+ where
+ T: Sized,
+ {
+ self.with_addr(f(self.addr()))
+ }
+
+ /// Acquires the underlying `*mut` pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut x = 0u32;
+ /// let ptr = NonNull::new(&mut x).expect("ptr is null!");
+ ///
+ /// let x_value = unsafe { *ptr.as_ptr() };
+ /// assert_eq!(x_value, 0);
+ ///
+ /// unsafe { *ptr.as_ptr() += 2; }
+ /// let x_value = unsafe { *ptr.as_ptr() };
+ /// assert_eq!(x_value, 2);
+ /// ```
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_stable(feature = "const_nonnull_as_ptr", since = "1.32.0")]
+ #[must_use]
+ #[inline]
+ pub const fn as_ptr(self) -> *mut T {
+ self.pointer as *mut T
+ }
+
+ /// Returns a shared reference to the value. If the value may be uninitialized, [`as_uninit_ref`]
+ /// must be used instead.
+ ///
+ /// For the mutable counterpart see [`as_mut`].
+ ///
+ /// [`as_uninit_ref`]: NonNull::as_uninit_ref
+ /// [`as_mut`]: NonNull::as_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut x = 0u32;
+ /// let ptr = NonNull::new(&mut x as *mut _).expect("ptr is null!");
+ ///
+ /// let ref_x = unsafe { ptr.as_ref() };
+ /// println!("{ref_x}");
+ /// ```
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ #[must_use]
+ #[inline]
+ pub const unsafe fn as_ref<'a>(&self) -> &'a T {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ unsafe { &*self.as_ptr() }
+ }
+
+ /// Returns a unique reference to the value. If the value may be uninitialized, [`as_uninit_mut`]
+ /// must be used instead.
+ ///
+ /// For the shared counterpart see [`as_ref`].
+ ///
+ /// [`as_uninit_mut`]: NonNull::as_uninit_mut
+ /// [`as_ref`]: NonNull::as_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferenceable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut x = 0u32;
+ /// let mut ptr = NonNull::new(&mut x).expect("null pointer");
+ ///
+ /// let x_ref = unsafe { ptr.as_mut() };
+ /// assert_eq!(*x_ref, 0);
+ /// *x_ref += 2;
+ /// assert_eq!(*x_ref, 2);
+ /// ```
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ #[must_use]
+ #[inline]
+ pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a mutable reference.
+ unsafe { &mut *self.as_ptr() }
+ }
+
+ /// Casts to a pointer of another type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut x = 0u32;
+ /// let ptr = NonNull::new(&mut x as *mut _).expect("null pointer");
+ ///
+ /// let casted_ptr = ptr.cast::<i8>();
+ /// let raw_ptr: *mut i8 = casted_ptr.as_ptr();
+ /// ```
+ #[stable(feature = "nonnull_cast", since = "1.27.0")]
+ #[rustc_const_stable(feature = "const_nonnull_cast", since = "1.36.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn cast<U>(self) -> NonNull<U> {
+ // SAFETY: `self` is a `NonNull` pointer which is necessarily non-null
+ unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) }
+ }
+}
+
+impl<T> NonNull<[T]> {
+ /// Creates a non-null raw slice from a thin pointer and a length.
+ ///
+ /// The `len` argument is the number of **elements**, not the number of bytes.
+ ///
+ /// This function is safe, but dereferencing the return value is unsafe.
+ /// See the documentation of [`slice::from_raw_parts`] for slice safety requirements.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(nonnull_slice_from_raw_parts)]
+ ///
+ /// use std::ptr::NonNull;
+ ///
+ /// // create a slice pointer when starting out with a pointer to the first element
+ /// let mut x = [5, 6, 7];
+ /// let nonnull_pointer = NonNull::new(x.as_mut_ptr()).unwrap();
+ /// let slice = NonNull::slice_from_raw_parts(nonnull_pointer, 3);
+ /// assert_eq!(unsafe { slice.as_ref()[2] }, 7);
+ /// ```
+ ///
+ /// (Note that this example artificially demonstrates a use of this method,
+ /// but `let slice = NonNull::from(&x[..]);` would be a better way to write code like this.)
+ #[unstable(feature = "nonnull_slice_from_raw_parts", issue = "71941")]
+ #[rustc_const_unstable(feature = "const_nonnull_slice_from_raw_parts", issue = "71941")]
+ #[must_use]
+ #[inline]
+ pub const fn slice_from_raw_parts(data: NonNull<T>, len: usize) -> Self {
+ // SAFETY: `data` is a `NonNull` pointer which is necessarily non-null
+ unsafe { Self::new_unchecked(super::slice_from_raw_parts_mut(data.as_ptr(), len)) }
+ }
+
+ /// Returns the length of a non-null raw slice.
+ ///
+ /// The returned value is the number of **elements**, not the number of bytes.
+ ///
+ /// This function is safe, even when the non-null raw slice cannot be dereferenced to a slice
+ /// because the pointer does not have a valid address.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(nonnull_slice_from_raw_parts)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
+ /// assert_eq!(slice.len(), 3);
+ /// ```
+ #[stable(feature = "slice_ptr_len_nonnull", since = "1.63.0")]
+ #[rustc_const_stable(feature = "const_slice_ptr_len_nonnull", since = "1.63.0")]
+ #[rustc_allow_const_fn_unstable(const_slice_ptr_len)]
+ #[must_use]
+ #[inline]
+ pub const fn len(self) -> usize {
+ self.as_ptr().len()
+ }
+
+ /// Returns a non-null pointer to the slice's buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
+ /// assert_eq!(slice.as_non_null_ptr(), NonNull::<i8>::dangling());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ pub const fn as_non_null_ptr(self) -> NonNull<T> {
+ // SAFETY: We know `self` is non-null.
+ unsafe { NonNull::new_unchecked(self.as_ptr().as_mut_ptr()) }
+ }
+
+ /// Returns a raw pointer to the slice's buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
+ /// assert_eq!(slice.as_mut_ptr(), NonNull::<i8>::dangling().as_ptr());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ pub const fn as_mut_ptr(self) -> *mut T {
+ self.as_non_null_ptr().as_ptr()
+ }
+
+ /// Returns a shared reference to a slice of possibly uninitialized values. In contrast to
+ /// [`as_ref`], this does not require that the value has to be initialized.
+ ///
+ /// For the mutable counterpart see [`as_uninit_slice_mut`].
+ ///
+ /// [`as_ref`]: NonNull::as_ref
+ /// [`as_uninit_slice_mut`]: NonNull::as_uninit_slice_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+ /// and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single allocated object!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts`].
+ ///
+ /// [valid]: crate::ptr#safety
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_slice<'a>(self) -> &'a [MaybeUninit<T>] {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
+ unsafe { slice::from_raw_parts(self.cast().as_ptr(), self.len()) }
+ }
+
+ /// Returns a unique reference to a slice of possibly uninitialized values. In contrast to
+ /// [`as_mut`], this does not require that the value has to be initialized.
+ ///
+ /// For the shared counterpart see [`as_uninit_slice`].
+ ///
+ /// [`as_mut`]: NonNull::as_mut
+ /// [`as_uninit_slice`]: NonNull::as_uninit_slice
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
+ /// many bytes, and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single allocated object!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, while this reference exists, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts_mut`].
+ ///
+ /// [valid]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(allocator_api, ptr_as_uninit)]
+ ///
+ /// use std::alloc::{Allocator, Layout, Global};
+ /// use std::mem::MaybeUninit;
+ /// use std::ptr::NonNull;
+ ///
+ /// let memory: NonNull<[u8]> = Global.allocate(Layout::new::<[u8; 32]>())?;
+ /// // This is safe as `memory` is valid for reads and writes for `memory.len()` many bytes.
+ /// // Note that calling `memory.as_mut()` is not allowed here as the content may be uninitialized.
+ /// # #[allow(unused_variables)]
+ /// let slice: &mut [MaybeUninit<u8>] = unsafe { memory.as_uninit_slice_mut() };
+ /// # Ok::<_, std::alloc::AllocError>(())
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ pub const unsafe fn as_uninit_slice_mut<'a>(self) -> &'a mut [MaybeUninit<T>] {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`.
+ unsafe { slice::from_raw_parts_mut(self.cast().as_ptr(), self.len()) }
+ }
+
+ /// Returns a raw pointer to an element or subslice, without doing bounds
+ /// checking.
+ ///
+ /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable
+ /// is *[undefined behavior]* even if the resulting pointer is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let x = &mut [1, 2, 4];
+ /// let x = NonNull::slice_from_raw_parts(NonNull::new(x.as_mut_ptr()).unwrap(), x.len());
+ ///
+ /// unsafe {
+ /// assert_eq!(x.get_unchecked_mut(1).as_ptr(), x.as_non_null_ptr().as_ptr().add(1));
+ /// }
+ /// ```
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ pub const unsafe fn get_unchecked_mut<I>(self, index: I) -> NonNull<I::Output>
+ where
+ I: ~const SliceIndex<[T]>,
+ {
+ // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
+ // As a consequence, the resulting pointer cannot be null.
+ unsafe { NonNull::new_unchecked(self.as_ptr().get_unchecked_mut(index)) }
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+impl<T: ?Sized> const Clone for NonNull<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> Copy for NonNull<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> fmt::Debug for NonNull<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_ptr(), f)
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> fmt::Pointer for NonNull<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_ptr(), f)
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> Eq for NonNull<T> {}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> PartialEq for NonNull<T> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.as_ptr() == other.as_ptr()
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> Ord for NonNull<T> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.as_ptr().cmp(&other.as_ptr())
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> PartialOrd for NonNull<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.as_ptr().partial_cmp(&other.as_ptr())
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> hash::Hash for NonNull<T> {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.as_ptr().hash(state)
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T: ?Sized> const From<Unique<T>> for NonNull<T> {
+ #[inline]
+ fn from(unique: Unique<T>) -> Self {
+ // SAFETY: A Unique pointer cannot be null, so the conditions for
+ // new_unchecked() are respected.
+ unsafe { NonNull::new_unchecked(unique.as_ptr()) }
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T: ?Sized> const From<&mut T> for NonNull<T> {
+ /// Converts a `&mut T` to a `NonNull<T>`.
+ ///
+ /// This conversion is safe and infallible since references cannot be null.
+ #[inline]
+ fn from(reference: &mut T) -> Self {
+ // SAFETY: A mutable reference cannot be null.
+ unsafe { NonNull { pointer: reference as *mut T } }
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T: ?Sized> const From<&T> for NonNull<T> {
+ /// Converts a `&T` to a `NonNull<T>`.
+ ///
+ /// This conversion is safe and infallible since references cannot be null.
+ #[inline]
+ fn from(reference: &T) -> Self {
+ // SAFETY: A reference cannot be null, so the conditions for
+ // new_unchecked() are respected.
+ unsafe { NonNull { pointer: reference as *const T } }
+ }
+}
diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs
new file mode 100644
index 000000000..64616142b
--- /dev/null
+++ b/library/core/src/ptr/unique.rs
@@ -0,0 +1,193 @@
+use crate::convert::From;
+use crate::fmt;
+use crate::marker::{PhantomData, Unsize};
+use crate::ops::{CoerceUnsized, DispatchFromDyn};
+use crate::ptr::NonNull;
+
+/// A wrapper around a raw non-null `*mut T` that indicates that the possessor
+/// of this wrapper owns the referent. Useful for building abstractions like
+/// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
+///
+/// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
+/// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
+/// the kind of strong aliasing guarantees an instance of `T` can expect:
+/// the referent of the pointer should not be modified without a unique path to
+/// its owning Unique.
+///
+/// If you're uncertain of whether it's correct to use `Unique` for your purposes,
+/// consider using `NonNull`, which has weaker semantics.
+///
+/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
+/// is never dereferenced. This is so that enums may use this forbidden value
+/// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
+/// However the pointer may still dangle if it isn't dereferenced.
+///
+/// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
+/// for any type which upholds Unique's aliasing requirements.
+#[unstable(
+ feature = "ptr_internals",
+ issue = "none",
+ reason = "use `NonNull` instead and consider `PhantomData<T>` \
+ (if you also use `#[may_dangle]`), `Send`, and/or `Sync`"
+)]
+#[doc(hidden)]
+#[repr(transparent)]
+pub struct Unique<T: ?Sized> {
+ pointer: NonNull<T>,
+ // NOTE: this marker has no consequences for variance, but is necessary
+ // for dropck to understand that we logically own a `T`.
+ //
+ // For details, see:
+ // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
+ _marker: PhantomData<T>,
+}
+
+/// `Unique` pointers are `Send` if `T` is `Send` because the data they
+/// reference is unaliased. Note that this aliasing invariant is
+/// unenforced by the type system; the abstraction using the
+/// `Unique` must enforce it.
+#[unstable(feature = "ptr_internals", issue = "none")]
+unsafe impl<T: Send + ?Sized> Send for Unique<T> {}
+
+/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
+/// reference is unaliased. Note that this aliasing invariant is
+/// unenforced by the type system; the abstraction using the
+/// `Unique` must enforce it.
+#[unstable(feature = "ptr_internals", issue = "none")]
+unsafe impl<T: Sync + ?Sized> Sync for Unique<T> {}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: Sized> Unique<T> {
+ /// Creates a new `Unique` that is dangling, but well-aligned.
+ ///
+ /// This is useful for initializing types which lazily allocate, like
+ /// `Vec::new` does.
+ ///
+ /// Note that the pointer value may potentially represent a valid pointer to
+ /// a `T`, which means this must not be used as a "not yet initialized"
+ /// sentinel value. Types that lazily allocate must track initialization by
+ /// some other means.
+ #[must_use]
+ #[inline]
+ pub const fn dangling() -> Self {
+ Self::from(NonNull::dangling())
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> Unique<T> {
+ /// Creates a new `Unique`.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be non-null.
+ #[inline]
+ pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
+ // SAFETY: the caller must guarantee that `ptr` is non-null.
+ unsafe { Unique { pointer: NonNull::new_unchecked(ptr), _marker: PhantomData } }
+ }
+
+ /// Creates a new `Unique` if `ptr` is non-null.
+ #[inline]
+ pub const fn new(ptr: *mut T) -> Option<Self> {
+ if let Some(pointer) = NonNull::new(ptr) {
+ Some(Unique { pointer, _marker: PhantomData })
+ } else {
+ None
+ }
+ }
+
+ /// Acquires the underlying `*mut` pointer.
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub const fn as_ptr(self) -> *mut T {
+ self.pointer.as_ptr()
+ }
+
+ /// Dereferences the content.
+ ///
+ /// The resulting lifetime is bound to self so this behaves "as if"
+ /// it were actually an instance of T that is getting borrowed. If a longer
+ /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
+ #[must_use]
+ #[inline]
+ pub const unsafe fn as_ref(&self) -> &T {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ unsafe { self.pointer.as_ref() }
+ }
+
+ /// Mutably dereferences the content.
+ ///
+ /// The resulting lifetime is bound to self so this behaves "as if"
+ /// it were actually an instance of T that is getting borrowed. If a longer
+ /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
+ #[must_use]
+ #[inline]
+ pub const unsafe fn as_mut(&mut self) -> &mut T {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a mutable reference.
+ unsafe { self.pointer.as_mut() }
+ }
+
+ /// Casts to a pointer of another type.
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub const fn cast<U>(self) -> Unique<U> {
+ Unique::from(self.pointer.cast())
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+impl<T: ?Sized> const Clone for Unique<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> Copy for Unique<T> {}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> fmt::Debug for Unique<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_ptr(), f)
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> fmt::Pointer for Unique<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_ptr(), f)
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> const From<&mut T> for Unique<T> {
+ /// Converts a `&mut T` to a `Unique<T>`.
+ ///
+ /// This conversion is infallible since references cannot be null.
+ #[inline]
+ fn from(reference: &mut T) -> Self {
+ Self::from(NonNull::from(reference))
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> const From<NonNull<T>> for Unique<T> {
+ /// Converts a `NonNull<T>` to a `Unique<T>`.
+ ///
+ /// This conversion is infallible since `NonNull` cannot be null.
+ #[inline]
+ fn from(pointer: NonNull<T>) -> Self {
+ Unique { pointer, _marker: PhantomData }
+ }
+}
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
new file mode 100644
index 000000000..45b052c82
--- /dev/null
+++ b/library/core/src/result.rs
@@ -0,0 +1,2150 @@
+//! Error handling with the `Result` type.
+//!
+//! [`Result<T, E>`][`Result`] is the type used for returning and propagating
+//! errors. It is an enum with the variants, [`Ok(T)`], representing
+//! success and containing a value, and [`Err(E)`], representing error
+//! and containing an error value.
+//!
+//! ```
+//! # #[allow(dead_code)]
+//! enum Result<T, E> {
+//! Ok(T),
+//! Err(E),
+//! }
+//! ```
+//!
+//! Functions return [`Result`] whenever errors are expected and
+//! recoverable. In the `std` crate, [`Result`] is most prominently used
+//! for [I/O](../../std/io/index.html).
+//!
+//! A simple function returning [`Result`] might be
+//! defined and used like so:
+//!
+//! ```
+//! #[derive(Debug)]
+//! enum Version { Version1, Version2 }
+//!
+//! fn parse_version(header: &[u8]) -> Result<Version, &'static str> {
+//! match header.get(0) {
+//! None => Err("invalid header length"),
+//! Some(&1) => Ok(Version::Version1),
+//! Some(&2) => Ok(Version::Version2),
+//! Some(_) => Err("invalid version"),
+//! }
+//! }
+//!
+//! let version = parse_version(&[1, 2, 3, 4]);
+//! match version {
+//! Ok(v) => println!("working with version: {v:?}"),
+//! Err(e) => println!("error parsing header: {e:?}"),
+//! }
+//! ```
+//!
+//! Pattern matching on [`Result`]s is clear and straightforward for
+//! simple cases, but [`Result`] comes with some convenience methods
+//! that make working with it more succinct.
+//!
+//! ```
+//! let good_result: Result<i32, i32> = Ok(10);
+//! let bad_result: Result<i32, i32> = Err(10);
+//!
+//! // The `is_ok` and `is_err` methods do what they say.
+//! assert!(good_result.is_ok() && !good_result.is_err());
+//! assert!(bad_result.is_err() && !bad_result.is_ok());
+//!
+//! // `map` consumes the `Result` and produces another.
+//! let good_result: Result<i32, i32> = good_result.map(|i| i + 1);
+//! let bad_result: Result<i32, i32> = bad_result.map(|i| i - 1);
+//!
+//! // Use `and_then` to continue the computation.
+//! let good_result: Result<bool, i32> = good_result.and_then(|i| Ok(i == 11));
+//!
+//! // Use `or_else` to handle the error.
+//! let bad_result: Result<i32, i32> = bad_result.or_else(|i| Ok(i + 20));
+//!
+//! // Consume the result and return the contents with `unwrap`.
+//! let final_awesome_result = good_result.unwrap();
+//! ```
+//!
+//! # Results must be used
+//!
+//! A common problem with using return values to indicate errors is
+//! that it is easy to ignore the return value, thus failing to handle
+//! the error. [`Result`] is annotated with the `#[must_use]` attribute,
+//! which will cause the compiler to issue a warning when a Result
+//! value is ignored. This makes [`Result`] especially useful with
+//! functions that may encounter errors but don't otherwise return a
+//! useful value.
+//!
+//! Consider the [`write_all`] method defined for I/O types
+//! by the [`Write`] trait:
+//!
+//! ```
+//! use std::io;
+//!
+//! trait Write {
+//! fn write_all(&mut self, bytes: &[u8]) -> Result<(), io::Error>;
+//! }
+//! ```
+//!
+//! *Note: The actual definition of [`Write`] uses [`io::Result`], which
+//! is just a synonym for <code>[Result]<T, [io::Error]></code>.*
+//!
+//! This method doesn't produce a value, but the write may
+//! fail. It's crucial to handle the error case, and *not* write
+//! something like this:
+//!
+//! ```no_run
+//! # #![allow(unused_must_use)] // \o/
+//! use std::fs::File;
+//! use std::io::prelude::*;
+//!
+//! let mut file = File::create("valuable_data.txt").unwrap();
+//! // If `write_all` errors, then we'll never know, because the return
+//! // value is ignored.
+//! file.write_all(b"important message");
+//! ```
+//!
+//! If you *do* write that in Rust, the compiler will give you a
+//! warning (by default, controlled by the `unused_must_use` lint).
+//!
+//! You might instead, if you don't want to handle the error, simply
+//! assert success with [`expect`]. This will panic if the
+//! write fails, providing a marginally useful message indicating why:
+//!
+//! ```no_run
+//! use std::fs::File;
+//! use std::io::prelude::*;
+//!
+//! let mut file = File::create("valuable_data.txt").unwrap();
+//! file.write_all(b"important message").expect("failed to write message");
+//! ```
+//!
+//! You might also simply assert success:
+//!
+//! ```no_run
+//! # use std::fs::File;
+//! # use std::io::prelude::*;
+//! # let mut file = File::create("valuable_data.txt").unwrap();
+//! assert!(file.write_all(b"important message").is_ok());
+//! ```
+//!
+//! Or propagate the error up the call stack with [`?`]:
+//!
+//! ```
+//! # use std::fs::File;
+//! # use std::io::prelude::*;
+//! # use std::io;
+//! # #[allow(dead_code)]
+//! fn write_message() -> io::Result<()> {
+//! let mut file = File::create("valuable_data.txt")?;
+//! file.write_all(b"important message")?;
+//! Ok(())
+//! }
+//! ```
+//!
+//! # The question mark operator, `?`
+//!
+//! When writing code that calls many functions that return the
+//! [`Result`] type, the error handling can be tedious. The question mark
+//! operator, [`?`], hides some of the boilerplate of propagating errors
+//! up the call stack.
+//!
+//! It replaces this:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! use std::fs::File;
+//! use std::io::prelude::*;
+//! use std::io;
+//!
+//! struct Info {
+//! name: String,
+//! age: i32,
+//! rating: i32,
+//! }
+//!
+//! fn write_info(info: &Info) -> io::Result<()> {
+//! // Early return on error
+//! let mut file = match File::create("my_best_friends.txt") {
+//! Err(e) => return Err(e),
+//! Ok(f) => f,
+//! };
+//! if let Err(e) = file.write_all(format!("name: {}\n", info.name).as_bytes()) {
+//! return Err(e)
+//! }
+//! if let Err(e) = file.write_all(format!("age: {}\n", info.age).as_bytes()) {
+//! return Err(e)
+//! }
+//! if let Err(e) = file.write_all(format!("rating: {}\n", info.rating).as_bytes()) {
+//! return Err(e)
+//! }
+//! Ok(())
+//! }
+//! ```
+//!
+//! With this:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! use std::fs::File;
+//! use std::io::prelude::*;
+//! use std::io;
+//!
+//! struct Info {
+//! name: String,
+//! age: i32,
+//! rating: i32,
+//! }
+//!
+//! fn write_info(info: &Info) -> io::Result<()> {
+//! let mut file = File::create("my_best_friends.txt")?;
+//! // Early return on error
+//! file.write_all(format!("name: {}\n", info.name).as_bytes())?;
+//! file.write_all(format!("age: {}\n", info.age).as_bytes())?;
+//! file.write_all(format!("rating: {}\n", info.rating).as_bytes())?;
+//! Ok(())
+//! }
+//! ```
+//!
+//! *It's much nicer!*
+//!
+//! Ending the expression with [`?`] will result in the unwrapped
+//! success ([`Ok`]) value, unless the result is [`Err`], in which case
+//! [`Err`] is returned early from the enclosing function.
+//!
+//! [`?`] can only be used in functions that return [`Result`] because of the
+//! early return of [`Err`] that it provides.
+//!
+//! [`expect`]: Result::expect
+//! [`Write`]: ../../std/io/trait.Write.html "io::Write"
+//! [`write_all`]: ../../std/io/trait.Write.html#method.write_all "io::Write::write_all"
+//! [`io::Result`]: ../../std/io/type.Result.html "io::Result"
+//! [`?`]: crate::ops::Try
+//! [`Ok(T)`]: Ok
+//! [`Err(E)`]: Err
+//! [io::Error]: ../../std/io/struct.Error.html "io::Error"
+//!
+//! # Method overview
+//!
+//! In addition to working with pattern matching, [`Result`] provides a
+//! wide variety of different methods.
+//!
+//! ## Querying the variant
+//!
+//! The [`is_ok`] and [`is_err`] methods return [`true`] if the [`Result`]
+//! is [`Ok`] or [`Err`], respectively.
+//!
+//! [`is_err`]: Result::is_err
+//! [`is_ok`]: Result::is_ok
+//!
+//! ## Adapters for working with references
+//!
+//! * [`as_ref`] converts from `&Result<T, E>` to `Result<&T, &E>`
+//! * [`as_mut`] converts from `&mut Result<T, E>` to `Result<&mut T, &mut E>`
+//! * [`as_deref`] converts from `&Result<T, E>` to `Result<&T::Target, &E>`
+//! * [`as_deref_mut`] converts from `&mut Result<T, E>` to
+//! `Result<&mut T::Target, &mut E>`
+//!
+//! [`as_deref`]: Result::as_deref
+//! [`as_deref_mut`]: Result::as_deref_mut
+//! [`as_mut`]: Result::as_mut
+//! [`as_ref`]: Result::as_ref
+//!
+//! ## Extracting contained values
+//!
+//! These methods extract the contained value in a [`Result<T, E>`] when it
+//! is the [`Ok`] variant. If the [`Result`] is [`Err`]:
+//!
+//! * [`expect`] panics with a provided custom message
+//! * [`unwrap`] panics with a generic message
+//! * [`unwrap_or`] returns the provided default value
+//! * [`unwrap_or_default`] returns the default value of the type `T`
+//! (which must implement the [`Default`] trait)
+//! * [`unwrap_or_else`] returns the result of evaluating the provided
+//! function
+//!
+//! The panicking methods [`expect`] and [`unwrap`] require `E` to
+//! implement the [`Debug`] trait.
+//!
+//! [`Debug`]: crate::fmt::Debug
+//! [`expect`]: Result::expect
+//! [`unwrap`]: Result::unwrap
+//! [`unwrap_or`]: Result::unwrap_or
+//! [`unwrap_or_default`]: Result::unwrap_or_default
+//! [`unwrap_or_else`]: Result::unwrap_or_else
+//!
+//! These methods extract the contained value in a [`Result<T, E>`] when it
+//! is the [`Err`] variant. They require `T` to implement the [`Debug`]
+//! trait. If the [`Result`] is [`Ok`]:
+//!
+//! * [`expect_err`] panics with a provided custom message
+//! * [`unwrap_err`] panics with a generic message
+//!
+//! [`Debug`]: crate::fmt::Debug
+//! [`expect_err`]: Result::expect_err
+//! [`unwrap_err`]: Result::unwrap_err
+//!
+//! ## Transforming contained values
+//!
+//! These methods transform [`Result`] to [`Option`]:
+//!
+//! * [`err`][Result::err] transforms [`Result<T, E>`] into [`Option<E>`],
+//! mapping [`Err(e)`] to [`Some(e)`] and [`Ok(v)`] to [`None`]
+//! * [`ok`][Result::ok] transforms [`Result<T, E>`] into [`Option<T>`],
+//! mapping [`Ok(v)`] to [`Some(v)`] and [`Err(e)`] to [`None`]
+//! * [`transpose`] transposes a [`Result`] of an [`Option`] into an
+//! [`Option`] of a [`Result`]
+//!
+// Do NOT add link reference definitions for `err` or `ok`, because they
+// will generate numerous incorrect URLs for `Err` and `Ok` elsewhere, due
+// to case folding.
+//!
+//! [`Err(e)`]: Err
+//! [`Ok(v)`]: Ok
+//! [`Some(e)`]: Option::Some
+//! [`Some(v)`]: Option::Some
+//! [`transpose`]: Result::transpose
+//!
+//! This method transforms the contained value of the [`Ok`] variant:
+//!
+//! * [`map`] transforms [`Result<T, E>`] into [`Result<U, E>`] by applying
+//! the provided function to the contained value of [`Ok`] and leaving
+//! [`Err`] values unchanged
+//!
+//! [`map`]: Result::map
+//!
+//! This method transforms the contained value of the [`Err`] variant:
+//!
+//! * [`map_err`] transforms [`Result<T, E>`] into [`Result<T, F>`] by
+//! applying the provided function to the contained value of [`Err`] and
+//! leaving [`Ok`] values unchanged
+//!
+//! [`map_err`]: Result::map_err
+//!
+//! These methods transform a [`Result<T, E>`] into a value of a possibly
+//! different type `U`:
+//!
+//! * [`map_or`] applies the provided function to the contained value of
+//! [`Ok`], or returns the provided default value if the [`Result`] is
+//! [`Err`]
+//! * [`map_or_else`] applies the provided function to the contained value
+//! of [`Ok`], or applies the provided default fallback function to the
+//! contained value of [`Err`]
+//!
+//! [`map_or`]: Result::map_or
+//! [`map_or_else`]: Result::map_or_else
+//!
+//! ## Boolean operators
+//!
+//! These methods treat the [`Result`] as a boolean value, where [`Ok`]
+//! acts like [`true`] and [`Err`] acts like [`false`]. There are two
+//! categories of these methods: ones that take a [`Result`] as input, and
+//! ones that take a function as input (to be lazily evaluated).
+//!
+//! The [`and`] and [`or`] methods take another [`Result`] as input, and
+//! produce a [`Result`] as output. The [`and`] method can produce a
+//! [`Result<U, E>`] value having a different inner type `U` than
+//! [`Result<T, E>`]. The [`or`] method can produce a [`Result<T, F>`]
+//! value having a different error type `F` than [`Result<T, E>`].
+//!
+//! | method | self | input | output |
+//! |---------|----------|-----------|----------|
+//! | [`and`] | `Err(e)` | (ignored) | `Err(e)` |
+//! | [`and`] | `Ok(x)` | `Err(d)` | `Err(d)` |
+//! | [`and`] | `Ok(x)` | `Ok(y)` | `Ok(y)` |
+//! | [`or`] | `Err(e)` | `Err(d)` | `Err(d)` |
+//! | [`or`] | `Err(e)` | `Ok(y)` | `Ok(y)` |
+//! | [`or`] | `Ok(x)` | (ignored) | `Ok(x)` |
+//!
+//! [`and`]: Result::and
+//! [`or`]: Result::or
+//!
+//! The [`and_then`] and [`or_else`] methods take a function as input, and
+//! only evaluate the function when they need to produce a new value. The
+//! [`and_then`] method can produce a [`Result<U, E>`] value having a
+//! different inner type `U` than [`Result<T, E>`]. The [`or_else`] method
+//! can produce a [`Result<T, F>`] value having a different error type `F`
+//! than [`Result<T, E>`].
+//!
+//! | method | self | function input | function result | output |
+//! |--------------|----------|----------------|-----------------|----------|
+//! | [`and_then`] | `Err(e)` | (not provided) | (not evaluated) | `Err(e)` |
+//! | [`and_then`] | `Ok(x)` | `x` | `Err(d)` | `Err(d)` |
+//! | [`and_then`] | `Ok(x)` | `x` | `Ok(y)` | `Ok(y)` |
+//! | [`or_else`] | `Err(e)` | `e` | `Err(d)` | `Err(d)` |
+//! | [`or_else`] | `Err(e)` | `e` | `Ok(y)` | `Ok(y)` |
+//! | [`or_else`] | `Ok(x)` | (not provided) | (not evaluated) | `Ok(x)` |
+//!
+//! [`and_then`]: Result::and_then
+//! [`or_else`]: Result::or_else
+//!
+//! ## Comparison operators
+//!
+//! If `T` and `E` both implement [`PartialOrd`] then [`Result<T, E>`] will
+//! derive its [`PartialOrd`] implementation. With this order, an [`Ok`]
+//! compares as less than any [`Err`], while two [`Ok`] or two [`Err`]
+//! compare as their contained values would in `T` or `E` respectively. If `T`
+//! and `E` both also implement [`Ord`], then so does [`Result<T, E>`].
+//!
+//! ```
+//! assert!(Ok(1) < Err(0));
+//! let x: Result<i32, ()> = Ok(0);
+//! let y = Ok(1);
+//! assert!(x < y);
+//! let x: Result<(), i32> = Err(0);
+//! let y = Err(1);
+//! assert!(x < y);
+//! ```
+//!
+//! ## Iterating over `Result`
+//!
+//! A [`Result`] can be iterated over. This can be helpful if you need an
+//! iterator that is conditionally empty. The iterator will either produce
+//! a single value (when the [`Result`] is [`Ok`]), or produce no values
+//! (when the [`Result`] is [`Err`]). For example, [`into_iter`] acts like
+//! [`once(v)`] if the [`Result`] is [`Ok(v)`], and like [`empty()`] if the
+//! [`Result`] is [`Err`].
+//!
+//! [`Ok(v)`]: Ok
+//! [`empty()`]: crate::iter::empty
+//! [`once(v)`]: crate::iter::once
+//!
+//! Iterators over [`Result<T, E>`] come in three types:
+//!
+//! * [`into_iter`] consumes the [`Result`] and produces the contained
+//! value
+//! * [`iter`] produces an immutable reference of type `&T` to the
+//! contained value
+//! * [`iter_mut`] produces a mutable reference of type `&mut T` to the
+//! contained value
+//!
+//! See [Iterating over `Option`] for examples of how this can be useful.
+//!
+//! [Iterating over `Option`]: crate::option#iterating-over-option
+//! [`into_iter`]: Result::into_iter
+//! [`iter`]: Result::iter
+//! [`iter_mut`]: Result::iter_mut
+//!
+//! You might want to use an iterator chain to do multiple instances of an
+//! operation that can fail, but would like to ignore failures while
+//! continuing to process the successful results. In this example, we take
+//! advantage of the iterable nature of [`Result`] to select only the
+//! [`Ok`] values using [`flatten`][Iterator::flatten].
+//!
+//! ```
+//! # use std::str::FromStr;
+//! let mut results = vec![];
+//! let mut errs = vec![];
+//! let nums: Vec<_> = ["17", "not a number", "99", "-27", "768"]
+//! .into_iter()
+//! .map(u8::from_str)
+//! // Save clones of the raw `Result` values to inspect
+//! .inspect(|x| results.push(x.clone()))
+//! // Challenge: explain how this captures only the `Err` values
+//! .inspect(|x| errs.extend(x.clone().err()))
+//! .flatten()
+//! .collect();
+//! assert_eq!(errs.len(), 3);
+//! assert_eq!(nums, [17, 99]);
+//! println!("results {results:?}");
+//! println!("errs {errs:?}");
+//! println!("nums {nums:?}");
+//! ```
+//!
+//! ## Collecting into `Result`
+//!
+//! [`Result`] implements the [`FromIterator`][impl-FromIterator] trait,
+//! which allows an iterator over [`Result`] values to be collected into a
+//! [`Result`] of a collection of each contained value of the original
+//! [`Result`] values, or [`Err`] if any of the elements was [`Err`].
+//!
+//! [impl-FromIterator]: Result#impl-FromIterator%3CResult%3CA%2C%20E%3E%3E-for-Result%3CV%2C%20E%3E
+//!
+//! ```
+//! let v = [Ok(2), Ok(4), Err("err!"), Ok(8)];
+//! let res: Result<Vec<_>, &str> = v.into_iter().collect();
+//! assert_eq!(res, Err("err!"));
+//! let v = [Ok(2), Ok(4), Ok(8)];
+//! let res: Result<Vec<_>, &str> = v.into_iter().collect();
+//! assert_eq!(res, Ok(vec![2, 4, 8]));
+//! ```
+//!
+//! [`Result`] also implements the [`Product`][impl-Product] and
+//! [`Sum`][impl-Sum] traits, allowing an iterator over [`Result`] values
+//! to provide the [`product`][Iterator::product] and
+//! [`sum`][Iterator::sum] methods.
+//!
+//! [impl-Product]: Result#impl-Product%3CResult%3CU%2C%20E%3E%3E-for-Result%3CT%2C%20E%3E
+//! [impl-Sum]: Result#impl-Sum%3CResult%3CU%2C%20E%3E%3E-for-Result%3CT%2C%20E%3E
+//!
+//! ```
+//! let v = [Err("error!"), Ok(1), Ok(2), Ok(3), Err("foo")];
+//! let res: Result<i32, &str> = v.into_iter().sum();
+//! assert_eq!(res, Err("error!"));
+//! let v = [Ok(1), Ok(2), Ok(21)];
+//! let res: Result<i32, &str> = v.into_iter().product();
+//! assert_eq!(res, Ok(42));
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::iter::{self, FromIterator, FusedIterator, TrustedLen};
+use crate::marker::Destruct;
+use crate::ops::{self, ControlFlow, Deref, DerefMut};
+use crate::{convert, fmt, hint};
+
+/// `Result` is a type that represents either success ([`Ok`]) or failure ([`Err`]).
+///
+/// See the [module documentation](self) for details.
+#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[must_use = "this `Result` may be an `Err` variant, which should be handled"]
+#[rustc_diagnostic_item = "Result"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Result<T, E> {
+ /// Contains the success value
+ #[lang = "Ok"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Ok(#[stable(feature = "rust1", since = "1.0.0")] T),
+
+ /// Contains the error value
+ #[lang = "Err"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Err(#[stable(feature = "rust1", since = "1.0.0")] E),
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Type implementation
+/////////////////////////////////////////////////////////////////////////////
+
+impl<T, E> Result<T, E> {
+ /////////////////////////////////////////////////////////////////////////
+ // Querying the contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns `true` if the result is [`Ok`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<i32, &str> = Ok(-3);
+ /// assert_eq!(x.is_ok(), true);
+ ///
+ /// let x: Result<i32, &str> = Err("Some error message");
+ /// assert_eq!(x.is_ok(), false);
+ /// ```
+ #[must_use = "if you intended to assert that this is ok, consider `.unwrap()` instead"]
+ #[rustc_const_stable(feature = "const_result_basics", since = "1.48.0")]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn is_ok(&self) -> bool {
+ matches!(*self, Ok(_))
+ }
+
+ /// Returns `true` if the result is [`Ok`] and the value inside of it matches a predicate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_some_with)]
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.is_ok_and(|&x| x > 1), true);
+ ///
+ /// let x: Result<u32, &str> = Ok(0);
+ /// assert_eq!(x.is_ok_and(|&x| x > 1), false);
+ ///
+ /// let x: Result<u32, &str> = Err("hey");
+ /// assert_eq!(x.is_ok_and(|&x| x > 1), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "is_some_with", issue = "93050")]
+ pub fn is_ok_and(&self, f: impl FnOnce(&T) -> bool) -> bool {
+ matches!(self, Ok(x) if f(x))
+ }
+
+ /// Returns `true` if the result is [`Err`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<i32, &str> = Ok(-3);
+ /// assert_eq!(x.is_err(), false);
+ ///
+ /// let x: Result<i32, &str> = Err("Some error message");
+ /// assert_eq!(x.is_err(), true);
+ /// ```
+ #[must_use = "if you intended to assert that this is err, consider `.unwrap_err()` instead"]
+ #[rustc_const_stable(feature = "const_result_basics", since = "1.48.0")]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn is_err(&self) -> bool {
+ !self.is_ok()
+ }
+
+ /// Returns `true` if the result is [`Err`] and the value inside of it matches a predicate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_some_with)]
+ /// use std::io::{Error, ErrorKind};
+ ///
+ /// let x: Result<u32, Error> = Err(Error::new(ErrorKind::NotFound, "!"));
+ /// assert_eq!(x.is_err_and(|x| x.kind() == ErrorKind::NotFound), true);
+ ///
+ /// let x: Result<u32, Error> = Err(Error::new(ErrorKind::PermissionDenied, "!"));
+ /// assert_eq!(x.is_err_and(|x| x.kind() == ErrorKind::NotFound), false);
+ ///
+ /// let x: Result<u32, Error> = Ok(123);
+ /// assert_eq!(x.is_err_and(|x| x.kind() == ErrorKind::NotFound), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "is_some_with", issue = "93050")]
+ pub fn is_err_and(&self, f: impl FnOnce(&E) -> bool) -> bool {
+ matches!(self, Err(x) if f(x))
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Adapter for each variant
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Converts from `Result<T, E>` to [`Option<T>`].
+ ///
+ /// Converts `self` into an [`Option<T>`], consuming `self`,
+ /// and discarding the error, if any.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.ok(), Some(2));
+ ///
+ /// let x: Result<u32, &str> = Err("Nothing here");
+ /// assert_eq!(x.ok(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_result_drop", issue = "92384")]
+ pub const fn ok(self) -> Option<T>
+ where
+ E: ~const Destruct,
+ {
+ match self {
+ Ok(x) => Some(x),
+ // FIXME: ~const Drop doesn't quite work right yet
+ #[allow(unused_variables)]
+ Err(x) => None,
+ }
+ }
+
+ /// Converts from `Result<T, E>` to [`Option<E>`].
+ ///
+ /// Converts `self` into an [`Option<E>`], consuming `self`,
+ /// and discarding the success value, if any.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.err(), None);
+ ///
+ /// let x: Result<u32, &str> = Err("Nothing here");
+ /// assert_eq!(x.err(), Some("Nothing here"));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_result_drop", issue = "92384")]
+ pub const fn err(self) -> Option<E>
+ where
+ T: ~const Destruct,
+ {
+ match self {
+ // FIXME: ~const Drop doesn't quite work right yet
+ #[allow(unused_variables)]
+ Ok(x) => None,
+ Err(x) => Some(x),
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Adapter for working with references
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Converts from `&Result<T, E>` to `Result<&T, &E>`.
+ ///
+ /// Produces a new `Result`, containing a reference
+ /// into the original, leaving the original in place.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.as_ref(), Ok(&2));
+ ///
+ /// let x: Result<u32, &str> = Err("Error");
+ /// assert_eq!(x.as_ref(), Err(&"Error"));
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_result_basics", since = "1.48.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn as_ref(&self) -> Result<&T, &E> {
+ match *self {
+ Ok(ref x) => Ok(x),
+ Err(ref x) => Err(x),
+ }
+ }
+
+ /// Converts from `&mut Result<T, E>` to `Result<&mut T, &mut E>`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn mutate(r: &mut Result<i32, i32>) {
+ /// match r.as_mut() {
+ /// Ok(v) => *v = 42,
+ /// Err(e) => *e = 0,
+ /// }
+ /// }
+ ///
+ /// let mut x: Result<i32, i32> = Ok(2);
+ /// mutate(&mut x);
+ /// assert_eq!(x.unwrap(), 42);
+ ///
+ /// let mut x: Result<i32, i32> = Err(13);
+ /// mutate(&mut x);
+ /// assert_eq!(x.unwrap_err(), 0);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_result", issue = "82814")]
+ pub const fn as_mut(&mut self) -> Result<&mut T, &mut E> {
+ match *self {
+ Ok(ref mut x) => Ok(x),
+ Err(ref mut x) => Err(x),
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Transforming contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Maps a `Result<T, E>` to `Result<U, E>` by applying a function to a
+ /// contained [`Ok`] value, leaving an [`Err`] value untouched.
+ ///
+ /// This function can be used to compose the results of two functions.
+ ///
+ /// # Examples
+ ///
+ /// Print the numbers on each line of a string multiplied by two.
+ ///
+ /// ```
+ /// let line = "1\n2\n3\n4\n";
+ ///
+ /// for num in line.lines() {
+ /// match num.parse::<i32>().map(|i| i * 2) {
+ /// Ok(n) => println!("{n}"),
+ /// Err(..) => {}
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn map<U, F: FnOnce(T) -> U>(self, op: F) -> Result<U, E> {
+ match self {
+ Ok(t) => Ok(op(t)),
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Returns the provided default (if [`Err`]), or
+ /// applies a function to the contained value (if [`Ok`]),
+ ///
+ /// Arguments passed to `map_or` are eagerly evaluated; if you are passing
+ /// the result of a function call, it is recommended to use [`map_or_else`],
+ /// which is lazily evaluated.
+ ///
+ /// [`map_or_else`]: Result::map_or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Result<_, &str> = Ok("foo");
+ /// assert_eq!(x.map_or(42, |v| v.len()), 3);
+ ///
+ /// let x: Result<&str, _> = Err("bar");
+ /// assert_eq!(x.map_or(42, |v| v.len()), 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "result_map_or", since = "1.41.0")]
+ pub fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
+ match self {
+ Ok(t) => f(t),
+ Err(_) => default,
+ }
+ }
+
+ /// Maps a `Result<T, E>` to `U` by applying fallback function `default` to
+ /// a contained [`Err`] value, or function `f` to a contained [`Ok`] value.
+ ///
+ /// This function can be used to unpack a successful result
+ /// while handling an error.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let k = 21;
+ ///
+ /// let x : Result<_, &str> = Ok("foo");
+ /// assert_eq!(x.map_or_else(|e| k * 2, |v| v.len()), 3);
+ ///
+ /// let x : Result<&str, _> = Err("bar");
+ /// assert_eq!(x.map_or_else(|e| k * 2, |v| v.len()), 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "result_map_or_else", since = "1.41.0")]
+ pub fn map_or_else<U, D: FnOnce(E) -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
+ match self {
+ Ok(t) => f(t),
+ Err(e) => default(e),
+ }
+ }
+
+ /// Maps a `Result<T, E>` to `Result<T, F>` by applying a function to a
+ /// contained [`Err`] value, leaving an [`Ok`] value untouched.
+ ///
+ /// This function can be used to pass through a successful result while handling
+ /// an error.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn stringify(x: u32) -> String { format!("error code: {x}") }
+ ///
+ /// let x: Result<u32, u32> = Ok(2);
+ /// assert_eq!(x.map_err(stringify), Ok(2));
+ ///
+ /// let x: Result<u32, u32> = Err(13);
+ /// assert_eq!(x.map_err(stringify), Err("error code: 13".to_string()));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn map_err<F, O: FnOnce(E) -> F>(self, op: O) -> Result<T, F> {
+ match self {
+ Ok(t) => Ok(t),
+ Err(e) => Err(op(e)),
+ }
+ }
+
+ /// Calls the provided closure with a reference to the contained value (if [`Ok`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_option_inspect)]
+ ///
+ /// let x: u8 = "4"
+ /// .parse::<u8>()
+ /// .inspect(|x| println!("original: {x}"))
+ /// .map(|x| x.pow(3))
+ /// .expect("failed to parse number");
+ /// ```
+ #[inline]
+ #[unstable(feature = "result_option_inspect", issue = "91345")]
+ pub fn inspect<F: FnOnce(&T)>(self, f: F) -> Self {
+ if let Ok(ref t) = self {
+ f(t);
+ }
+
+ self
+ }
+
+ /// Calls the provided closure with a reference to the contained error (if [`Err`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_option_inspect)]
+ ///
+ /// use std::{fs, io};
+ ///
+ /// fn read() -> io::Result<String> {
+ /// fs::read_to_string("address.txt")
+ /// .inspect_err(|e| eprintln!("failed to read file: {e}"))
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "result_option_inspect", issue = "91345")]
+ pub fn inspect_err<F: FnOnce(&E)>(self, f: F) -> Self {
+ if let Err(ref e) = self {
+ f(e);
+ }
+
+ self
+ }
+
+ /// Converts from `Result<T, E>` (or `&Result<T, E>`) to `Result<&<T as Deref>::Target, &E>`.
+ ///
+ /// Coerces the [`Ok`] variant of the original [`Result`] via [`Deref`](crate::ops::Deref)
+ /// and returns the new [`Result`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Result<String, u32> = Ok("hello".to_string());
+ /// let y: Result<&str, &u32> = Ok("hello");
+ /// assert_eq!(x.as_deref(), y);
+ ///
+ /// let x: Result<String, u32> = Err(42);
+ /// let y: Result<&str, &u32> = Err(&42);
+ /// assert_eq!(x.as_deref(), y);
+ /// ```
+ #[stable(feature = "inner_deref", since = "1.47.0")]
+ pub fn as_deref(&self) -> Result<&T::Target, &E>
+ where
+ T: Deref,
+ {
+ self.as_ref().map(|t| t.deref())
+ }
+
+ /// Converts from `Result<T, E>` (or `&mut Result<T, E>`) to `Result<&mut <T as DerefMut>::Target, &mut E>`.
+ ///
+ /// Coerces the [`Ok`] variant of the original [`Result`] via [`DerefMut`](crate::ops::DerefMut)
+ /// and returns the new [`Result`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = "HELLO".to_string();
+ /// let mut x: Result<String, u32> = Ok("hello".to_string());
+ /// let y: Result<&mut str, &mut u32> = Ok(&mut s);
+ /// assert_eq!(x.as_deref_mut().map(|x| { x.make_ascii_uppercase(); x }), y);
+ ///
+ /// let mut i = 42;
+ /// let mut x: Result<String, u32> = Err(42);
+ /// let y: Result<&mut str, &mut u32> = Err(&mut i);
+ /// assert_eq!(x.as_deref_mut().map(|x| { x.make_ascii_uppercase(); x }), y);
+ /// ```
+ #[stable(feature = "inner_deref", since = "1.47.0")]
+ pub fn as_deref_mut(&mut self) -> Result<&mut T::Target, &mut E>
+ where
+ T: DerefMut,
+ {
+ self.as_mut().map(|t| t.deref_mut())
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Iterator constructors
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns an iterator over the possibly contained value.
+ ///
+ /// The iterator yields one value if the result is [`Result::Ok`], otherwise none.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(7);
+ /// assert_eq!(x.iter().next(), Some(&7));
+ ///
+ /// let x: Result<u32, &str> = Err("nothing!");
+ /// assert_eq!(x.iter().next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { inner: self.as_ref().ok() }
+ }
+
+ /// Returns a mutable iterator over the possibly contained value.
+ ///
+ /// The iterator yields one value if the result is [`Result::Ok`], otherwise none.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut x: Result<u32, &str> = Ok(7);
+ /// match x.iter_mut().next() {
+ /// Some(v) => *v = 40,
+ /// None => {},
+ /// }
+ /// assert_eq!(x, Ok(40));
+ ///
+ /// let mut x: Result<u32, &str> = Err("nothing!");
+ /// assert_eq!(x.iter_mut().next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut { inner: self.as_mut().ok() }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Extract a value
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns the contained [`Ok`] value, consuming the `self` value.
+ ///
+ /// Because this function may panic, its use is generally discouraged.
+ /// Instead, prefer to use pattern matching and handle the [`Err`]
+ /// case explicitly, or call [`unwrap_or`], [`unwrap_or_else`], or
+ /// [`unwrap_or_default`].
+ ///
+ /// [`unwrap_or`]: Result::unwrap_or
+ /// [`unwrap_or_else`]: Result::unwrap_or_else
+ /// [`unwrap_or_default`]: Result::unwrap_or_default
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is an [`Err`], with a panic message including the
+ /// passed message, and the content of the [`Err`].
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```should_panic
+ /// let x: Result<u32, &str> = Err("emergency failure");
+ /// x.expect("Testing expect"); // panics with `Testing expect: emergency failure`
+ /// ```
+ ///
+ /// # Recommended Message Style
+ ///
+ /// We recommend that `expect` messages are used to describe the reason you
+ /// _expect_ the `Result` should be `Ok`.
+ ///
+ /// ```should_panic
+ /// let path = std::env::var("IMPORTANT_PATH")
+ /// .expect("env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`");
+ /// ```
+ ///
+ /// **Hint**: If you're having trouble remembering how to phrase expect
+ /// error messages remember to focus on the word "should" as in "env
+ /// variable should be set by blah" or "the given binary should be available
+ /// and executable by the current user".
+ ///
+ /// For more detail on expect message styles and the reasoning behind our recommendation please
+ /// refer to the section on ["Common Message
+ /// Styles"](../../std/error/index.html#common-message-styles) in the
+ /// [`std::error`](../../std/error/index.html) module docs.
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "result_expect", since = "1.4.0")]
+ pub fn expect(self, msg: &str) -> T
+ where
+ E: fmt::Debug,
+ {
+ match self {
+ Ok(t) => t,
+ Err(e) => unwrap_failed(msg, &e),
+ }
+ }
+
+ /// Returns the contained [`Ok`] value, consuming the `self` value.
+ ///
+ /// Because this function may panic, its use is generally discouraged.
+ /// Instead, prefer to use pattern matching and handle the [`Err`]
+ /// case explicitly, or call [`unwrap_or`], [`unwrap_or_else`], or
+ /// [`unwrap_or_default`].
+ ///
+ /// [`unwrap_or`]: Result::unwrap_or
+ /// [`unwrap_or_else`]: Result::unwrap_or_else
+ /// [`unwrap_or_default`]: Result::unwrap_or_default
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is an [`Err`], with a panic message provided by the
+ /// [`Err`]'s value.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.unwrap(), 2);
+ /// ```
+ ///
+ /// ```should_panic
+ /// let x: Result<u32, &str> = Err("emergency failure");
+ /// x.unwrap(); // panics with `emergency failure`
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap(self) -> T
+ where
+ E: fmt::Debug,
+ {
+ match self {
+ Ok(t) => t,
+ Err(e) => unwrap_failed("called `Result::unwrap()` on an `Err` value", &e),
+ }
+ }
+
+ /// Returns the contained [`Ok`] value or a default
+ ///
+ /// Consumes the `self` argument then, if [`Ok`], returns the contained
+ /// value, otherwise if [`Err`], returns the default value for that
+ /// type.
+ ///
+ /// # Examples
+ ///
+ /// Converts a string to an integer, turning poorly-formed strings
+ /// into 0 (the default value for integers). [`parse`] converts
+ /// a string to any other type that implements [`FromStr`], returning an
+ /// [`Err`] on error.
+ ///
+ /// ```
+ /// let good_year_from_input = "1909";
+ /// let bad_year_from_input = "190blarg";
+ /// let good_year = good_year_from_input.parse().unwrap_or_default();
+ /// let bad_year = bad_year_from_input.parse().unwrap_or_default();
+ ///
+ /// assert_eq!(1909, good_year);
+ /// assert_eq!(0, bad_year);
+ /// ```
+ ///
+ /// [`parse`]: str::parse
+ /// [`FromStr`]: crate::str::FromStr
+ #[inline]
+ #[stable(feature = "result_unwrap_or_default", since = "1.16.0")]
+ pub fn unwrap_or_default(self) -> T
+ where
+ T: Default,
+ {
+ match self {
+ Ok(x) => x,
+ Err(_) => Default::default(),
+ }
+ }
+
+ /// Returns the contained [`Err`] value, consuming the `self` value.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is an [`Ok`], with a panic message including the
+ /// passed message, and the content of the [`Ok`].
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```should_panic
+ /// let x: Result<u32, &str> = Ok(10);
+ /// x.expect_err("Testing expect_err"); // panics with `Testing expect_err: 10`
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "result_expect_err", since = "1.17.0")]
+ pub fn expect_err(self, msg: &str) -> E
+ where
+ T: fmt::Debug,
+ {
+ match self {
+ Ok(t) => unwrap_failed(msg, &t),
+ Err(e) => e,
+ }
+ }
+
+ /// Returns the contained [`Err`] value, consuming the `self` value.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is an [`Ok`], with a custom panic message provided
+ /// by the [`Ok`]'s value.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// let x: Result<u32, &str> = Ok(2);
+ /// x.unwrap_err(); // panics with `2`
+ /// ```
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Err("emergency failure");
+ /// assert_eq!(x.unwrap_err(), "emergency failure");
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap_err(self) -> E
+ where
+ T: fmt::Debug,
+ {
+ match self {
+ Ok(t) => unwrap_failed("called `Result::unwrap_err()` on an `Ok` value", &t),
+ Err(e) => e,
+ }
+ }
+
+ /// Returns the contained [`Ok`] value, but never panics.
+ ///
+ /// Unlike [`unwrap`], this method is known to never panic on the
+ /// result types it is implemented for. Therefore, it can be used
+ /// instead of `unwrap` as a maintainability safeguard that will fail
+ /// to compile if the error type of the `Result` is later changed
+ /// to an error that can actually occur.
+ ///
+ /// [`unwrap`]: Result::unwrap
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(never_type)]
+ /// # #![feature(unwrap_infallible)]
+ ///
+ /// fn only_good_news() -> Result<String, !> {
+ /// Ok("this is fine".into())
+ /// }
+ ///
+ /// let s: String = only_good_news().into_ok();
+ /// println!("{s}");
+ /// ```
+ #[unstable(feature = "unwrap_infallible", reason = "newly added", issue = "61695")]
+ #[inline]
+ pub fn into_ok(self) -> T
+ where
+ E: Into<!>,
+ {
+ match self {
+ Ok(x) => x,
+ Err(e) => e.into(),
+ }
+ }
+
+ /// Returns the contained [`Err`] value, but never panics.
+ ///
+ /// Unlike [`unwrap_err`], this method is known to never panic on the
+ /// result types it is implemented for. Therefore, it can be used
+ /// instead of `unwrap_err` as a maintainability safeguard that will fail
+ /// to compile if the ok type of the `Result` is later changed
+ /// to a type that can actually occur.
+ ///
+ /// [`unwrap_err`]: Result::unwrap_err
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(never_type)]
+ /// # #![feature(unwrap_infallible)]
+ ///
+ /// fn only_bad_news() -> Result<!, String> {
+ /// Err("Oops, it failed".into())
+ /// }
+ ///
+ /// let error: String = only_bad_news().into_err();
+ /// println!("{error}");
+ /// ```
+ #[unstable(feature = "unwrap_infallible", reason = "newly added", issue = "61695")]
+ #[inline]
+ pub fn into_err(self) -> E
+ where
+ T: Into<!>,
+ {
+ match self {
+ Ok(x) => x.into(),
+ Err(e) => e,
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // Boolean operations on the values, eager and lazy
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns `res` if the result is [`Ok`], otherwise returns the [`Err`] value of `self`.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// let y: Result<&str, &str> = Err("late error");
+ /// assert_eq!(x.and(y), Err("late error"));
+ ///
+ /// let x: Result<u32, &str> = Err("early error");
+ /// let y: Result<&str, &str> = Ok("foo");
+ /// assert_eq!(x.and(y), Err("early error"));
+ ///
+ /// let x: Result<u32, &str> = Err("not a 2");
+ /// let y: Result<&str, &str> = Err("late error");
+ /// assert_eq!(x.and(y), Err("not a 2"));
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// let y: Result<&str, &str> = Ok("different result type");
+ /// assert_eq!(x.and(y), Ok("different result type"));
+ /// ```
+ #[inline]
+ #[rustc_const_unstable(feature = "const_result_drop", issue = "92384")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn and<U>(self, res: Result<U, E>) -> Result<U, E>
+ where
+ T: ~const Destruct,
+ U: ~const Destruct,
+ E: ~const Destruct,
+ {
+ match self {
+ // FIXME: ~const Drop doesn't quite work right yet
+ #[allow(unused_variables)]
+ Ok(x) => res,
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Calls `op` if the result is [`Ok`], otherwise returns the [`Err`] value of `self`.
+ ///
+ ///
+ /// This function can be used for control flow based on `Result` values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// fn sq_then_to_string(x: u32) -> Result<String, &'static str> {
+ /// x.checked_mul(x).map(|sq| sq.to_string()).ok_or("overflowed")
+ /// }
+ ///
+ /// assert_eq!(Ok(2).and_then(sq_then_to_string), Ok(4.to_string()));
+ /// assert_eq!(Ok(1_000_000).and_then(sq_then_to_string), Err("overflowed"));
+ /// assert_eq!(Err("not a number").and_then(sq_then_to_string), Err("not a number"));
+ /// ```
+ ///
+ /// Often used to chain fallible operations that may return [`Err`].
+ ///
+ /// ```
+ /// use std::{io::ErrorKind, path::Path};
+ ///
+ /// // Note: on Windows "/" maps to "C:\"
+ /// let root_modified_time = Path::new("/").metadata().and_then(|md| md.modified());
+ /// assert!(root_modified_time.is_ok());
+ ///
+ /// let should_fail = Path::new("/bad/path").metadata().and_then(|md| md.modified());
+ /// assert!(should_fail.is_err());
+ /// assert_eq!(should_fail.unwrap_err().kind(), ErrorKind::NotFound);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn and_then<U, F: FnOnce(T) -> Result<U, E>>(self, op: F) -> Result<U, E> {
+ match self {
+ Ok(t) => op(t),
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Returns `res` if the result is [`Err`], otherwise returns the [`Ok`] value of `self`.
+ ///
+ /// Arguments passed to `or` are eagerly evaluated; if you are passing the
+ /// result of a function call, it is recommended to use [`or_else`], which is
+ /// lazily evaluated.
+ ///
+ /// [`or_else`]: Result::or_else
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// let y: Result<u32, &str> = Err("late error");
+ /// assert_eq!(x.or(y), Ok(2));
+ ///
+ /// let x: Result<u32, &str> = Err("early error");
+ /// let y: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.or(y), Ok(2));
+ ///
+ /// let x: Result<u32, &str> = Err("not a 2");
+ /// let y: Result<u32, &str> = Err("late error");
+ /// assert_eq!(x.or(y), Err("late error"));
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// let y: Result<u32, &str> = Ok(100);
+ /// assert_eq!(x.or(y), Ok(2));
+ /// ```
+ #[inline]
+ #[rustc_const_unstable(feature = "const_result_drop", issue = "92384")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn or<F>(self, res: Result<T, F>) -> Result<T, F>
+ where
+ T: ~const Destruct,
+ E: ~const Destruct,
+ F: ~const Destruct,
+ {
+ match self {
+ Ok(v) => Ok(v),
+ // FIXME: ~const Drop doesn't quite work right yet
+ #[allow(unused_variables)]
+ Err(e) => res,
+ }
+ }
+
+ /// Calls `op` if the result is [`Err`], otherwise returns the [`Ok`] value of `self`.
+ ///
+ /// This function can be used for control flow based on result values.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn sq(x: u32) -> Result<u32, u32> { Ok(x * x) }
+ /// fn err(x: u32) -> Result<u32, u32> { Err(x) }
+ ///
+ /// assert_eq!(Ok(2).or_else(sq).or_else(sq), Ok(2));
+ /// assert_eq!(Ok(2).or_else(err).or_else(sq), Ok(2));
+ /// assert_eq!(Err(3).or_else(sq).or_else(err), Ok(9));
+ /// assert_eq!(Err(3).or_else(err).or_else(err), Err(3));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_else<F, O: FnOnce(E) -> Result<T, F>>(self, op: O) -> Result<T, F> {
+ match self {
+ Ok(t) => Ok(t),
+ Err(e) => op(e),
+ }
+ }
+
+ /// Returns the contained [`Ok`] value or a provided default.
+ ///
+ /// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
+ /// the result of a function call, it is recommended to use [`unwrap_or_else`],
+ /// which is lazily evaluated.
+ ///
+ /// [`unwrap_or_else`]: Result::unwrap_or_else
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let default = 2;
+ /// let x: Result<u32, &str> = Ok(9);
+ /// assert_eq!(x.unwrap_or(default), 9);
+ ///
+ /// let x: Result<u32, &str> = Err("error");
+ /// assert_eq!(x.unwrap_or(default), default);
+ /// ```
+ #[inline]
+ #[rustc_const_unstable(feature = "const_result_drop", issue = "92384")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn unwrap_or(self, default: T) -> T
+ where
+ T: ~const Destruct,
+ E: ~const Destruct,
+ {
+ match self {
+ Ok(t) => t,
+ // FIXME: ~const Drop doesn't quite work right yet
+ #[allow(unused_variables)]
+ Err(e) => default,
+ }
+ }
+
+ /// Returns the contained [`Ok`] value or computes it from a closure.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn count(x: &str) -> usize { x.len() }
+ ///
+ /// assert_eq!(Ok(2).unwrap_or_else(count), 2);
+ /// assert_eq!(Err("foo").unwrap_or_else(count), 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap_or_else<F: FnOnce(E) -> T>(self, op: F) -> T {
+ match self {
+ Ok(t) => t,
+ Err(e) => op(e),
+ }
+ }
+
+ /// Returns the contained [`Ok`] value, consuming the `self` value,
+ /// without checking that the value is not an [`Err`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method on an [`Err`] is *[undefined behavior]*.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(unsafe { x.unwrap_unchecked() }, 2);
+ /// ```
+ ///
+ /// ```no_run
+ /// let x: Result<u32, &str> = Err("emergency failure");
+ /// unsafe { x.unwrap_unchecked(); } // Undefined behavior!
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "option_result_unwrap_unchecked", since = "1.58.0")]
+ pub unsafe fn unwrap_unchecked(self) -> T {
+ debug_assert!(self.is_ok());
+ match self {
+ Ok(t) => t,
+ // SAFETY: the safety contract must be upheld by the caller.
+ Err(_) => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+ /// Returns the contained [`Err`] value, consuming the `self` value,
+ /// without checking that the value is not an [`Ok`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method on an [`Ok`] is *[undefined behavior]*.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// let x: Result<u32, &str> = Ok(2);
+ /// unsafe { x.unwrap_err_unchecked() }; // Undefined behavior!
+ /// ```
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Err("emergency failure");
+ /// assert_eq!(unsafe { x.unwrap_err_unchecked() }, "emergency failure");
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "option_result_unwrap_unchecked", since = "1.58.0")]
+ pub unsafe fn unwrap_err_unchecked(self) -> E {
+ debug_assert!(self.is_err());
+ match self {
+ // SAFETY: the safety contract must be upheld by the caller.
+ Ok(_) => unsafe { hint::unreachable_unchecked() },
+ Err(e) => e,
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Misc or niche
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns `true` if the result is an [`Ok`] value containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_result_contains)]
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.contains(&2), true);
+ ///
+ /// let x: Result<u32, &str> = Ok(3);
+ /// assert_eq!(x.contains(&2), false);
+ ///
+ /// let x: Result<u32, &str> = Err("Some error message");
+ /// assert_eq!(x.contains(&2), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "option_result_contains", issue = "62358")]
+ pub fn contains<U>(&self, x: &U) -> bool
+ where
+ U: PartialEq<T>,
+ {
+ match self {
+ Ok(y) => x == y,
+ Err(_) => false,
+ }
+ }
+
+ /// Returns `true` if the result is an [`Err`] value containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_contains_err)]
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.contains_err(&"Some error message"), false);
+ ///
+ /// let x: Result<u32, &str> = Err("Some error message");
+ /// assert_eq!(x.contains_err(&"Some error message"), true);
+ ///
+ /// let x: Result<u32, &str> = Err("Some other error message");
+ /// assert_eq!(x.contains_err(&"Some error message"), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "result_contains_err", issue = "62358")]
+ pub fn contains_err<F>(&self, f: &F) -> bool
+ where
+ F: PartialEq<E>,
+ {
+ match self {
+ Ok(_) => false,
+ Err(e) => f == e,
+ }
+ }
+}
+
+impl<T, E> Result<&T, E> {
+ /// Maps a `Result<&T, E>` to a `Result<T, E>` by copying the contents of the
+ /// `Ok` part.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let val = 12;
+ /// let x: Result<&i32, i32> = Ok(&val);
+ /// assert_eq!(x, Ok(&12));
+ /// let copied = x.copied();
+ /// assert_eq!(copied, Ok(12));
+ /// ```
+ #[inline]
+ #[stable(feature = "result_copied", since = "1.59.0")]
+ pub fn copied(self) -> Result<T, E>
+ where
+ T: Copy,
+ {
+ self.map(|&t| t)
+ }
+
+ /// Maps a `Result<&T, E>` to a `Result<T, E>` by cloning the contents of the
+ /// `Ok` part.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let val = 12;
+ /// let x: Result<&i32, i32> = Ok(&val);
+ /// assert_eq!(x, Ok(&12));
+ /// let cloned = x.cloned();
+ /// assert_eq!(cloned, Ok(12));
+ /// ```
+ #[inline]
+ #[stable(feature = "result_cloned", since = "1.59.0")]
+ pub fn cloned(self) -> Result<T, E>
+ where
+ T: Clone,
+ {
+ self.map(|t| t.clone())
+ }
+}
+
+impl<T, E> Result<&mut T, E> {
+ /// Maps a `Result<&mut T, E>` to a `Result<T, E>` by copying the contents of the
+ /// `Ok` part.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut val = 12;
+ /// let x: Result<&mut i32, i32> = Ok(&mut val);
+ /// assert_eq!(x, Ok(&mut 12));
+ /// let copied = x.copied();
+ /// assert_eq!(copied, Ok(12));
+ /// ```
+ #[inline]
+ #[stable(feature = "result_copied", since = "1.59.0")]
+ pub fn copied(self) -> Result<T, E>
+ where
+ T: Copy,
+ {
+ self.map(|&mut t| t)
+ }
+
+ /// Maps a `Result<&mut T, E>` to a `Result<T, E>` by cloning the contents of the
+ /// `Ok` part.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut val = 12;
+ /// let x: Result<&mut i32, i32> = Ok(&mut val);
+ /// assert_eq!(x, Ok(&mut 12));
+ /// let cloned = x.cloned();
+ /// assert_eq!(cloned, Ok(12));
+ /// ```
+ #[inline]
+ #[stable(feature = "result_cloned", since = "1.59.0")]
+ pub fn cloned(self) -> Result<T, E>
+ where
+ T: Clone,
+ {
+ self.map(|t| t.clone())
+ }
+}
+
+impl<T, E> Result<Option<T>, E> {
+ /// Transposes a `Result` of an `Option` into an `Option` of a `Result`.
+ ///
+ /// `Ok(None)` will be mapped to `None`.
+ /// `Ok(Some(_))` and `Err(_)` will be mapped to `Some(Ok(_))` and `Some(Err(_))`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #[derive(Debug, Eq, PartialEq)]
+ /// struct SomeErr;
+ ///
+ /// let x: Result<Option<i32>, SomeErr> = Ok(Some(5));
+ /// let y: Option<Result<i32, SomeErr>> = Some(Ok(5));
+ /// assert_eq!(x.transpose(), y);
+ /// ```
+ #[inline]
+ #[stable(feature = "transpose_result", since = "1.33.0")]
+ #[rustc_const_unstable(feature = "const_result", issue = "82814")]
+ pub const fn transpose(self) -> Option<Result<T, E>> {
+ match self {
+ Ok(Some(x)) => Some(Ok(x)),
+ Ok(None) => None,
+ Err(e) => Some(Err(e)),
+ }
+ }
+}
+
+impl<T, E> Result<Result<T, E>, E> {
+ /// Converts from `Result<Result<T, E>, E>` to `Result<T, E>`
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(result_flattening)]
+ /// let x: Result<Result<&'static str, u32>, u32> = Ok(Ok("hello"));
+ /// assert_eq!(Ok("hello"), x.flatten());
+ ///
+ /// let x: Result<Result<&'static str, u32>, u32> = Ok(Err(6));
+ /// assert_eq!(Err(6), x.flatten());
+ ///
+ /// let x: Result<Result<&'static str, u32>, u32> = Err(6);
+ /// assert_eq!(Err(6), x.flatten());
+ /// ```
+ ///
+ /// Flattening only removes one level of nesting at a time:
+ ///
+ /// ```
+ /// #![feature(result_flattening)]
+ /// let x: Result<Result<Result<&'static str, u32>, u32>, u32> = Ok(Ok(Ok("hello")));
+ /// assert_eq!(Ok(Ok("hello")), x.flatten());
+ /// assert_eq!(Ok("hello"), x.flatten().flatten());
+ /// ```
+ #[inline]
+ #[unstable(feature = "result_flattening", issue = "70142")]
+ pub fn flatten(self) -> Result<T, E> {
+ self.and_then(convert::identity)
+ }
+}
+
+impl<T> Result<T, T> {
+ /// Returns the [`Ok`] value if `self` is `Ok`, and the [`Err`] value if
+ /// `self` is `Err`.
+ ///
+ /// In other words, this function returns the value (the `T`) of a
+ /// `Result<T, T>`, regardless of whether or not that result is `Ok` or
+ /// `Err`.
+ ///
+ /// This can be useful in conjunction with APIs such as
+ /// [`Atomic*::compare_exchange`], or [`slice::binary_search`], but only in
+ /// cases where you don't care if the result was `Ok` or not.
+ ///
+ /// [`Atomic*::compare_exchange`]: crate::sync::atomic::AtomicBool::compare_exchange
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_into_ok_or_err)]
+ /// let ok: Result<u32, u32> = Ok(3);
+ /// let err: Result<u32, u32> = Err(4);
+ ///
+ /// assert_eq!(ok.into_ok_or_err(), 3);
+ /// assert_eq!(err.into_ok_or_err(), 4);
+ /// ```
+ #[inline]
+ #[unstable(feature = "result_into_ok_or_err", reason = "newly added", issue = "82223")]
+ pub const fn into_ok_or_err(self) -> T {
+ match self {
+ Ok(v) => v,
+ Err(v) => v,
+ }
+ }
+}
+
+// This is a separate function to reduce the code size of the methods
+#[cfg(not(feature = "panic_immediate_abort"))]
+#[inline(never)]
+#[cold]
+#[track_caller]
+fn unwrap_failed(msg: &str, error: &dyn fmt::Debug) -> ! {
+ panic!("{msg}: {error:?}")
+}
+
+// This is a separate function to avoid constructing a `dyn Debug`
+// that gets immediately thrown away, since vtables don't get cleaned up
+// by dead code elimination if a trait object is constructed even if it goes
+// unused
+#[cfg(feature = "panic_immediate_abort")]
+#[inline]
+#[cold]
+#[track_caller]
+fn unwrap_failed<T>(_msg: &str, _error: &T) -> ! {
+ panic!()
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Trait implementations
+/////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+impl<T, E> const Clone for Result<T, E>
+where
+ T: ~const Clone + ~const Destruct,
+ E: ~const Clone + ~const Destruct,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ match self {
+ Ok(x) => Ok(x.clone()),
+ Err(x) => Err(x.clone()),
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (Ok(to), Ok(from)) => to.clone_from(from),
+ (Err(to), Err(from)) => to.clone_from(from),
+ (to, from) => *to = from.clone(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, E> IntoIterator for Result<T, E> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Returns a consuming iterator over the possibly contained value.
+ ///
+ /// The iterator yields one value if the result is [`Result::Ok`], otherwise none.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(5);
+ /// let v: Vec<u32> = x.into_iter().collect();
+ /// assert_eq!(v, [5]);
+ ///
+ /// let x: Result<u32, &str> = Err("nothing!");
+ /// let v: Vec<u32> = x.into_iter().collect();
+ /// assert_eq!(v, []);
+ /// ```
+ #[inline]
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { inner: self.ok() }
+ }
+}
+
+#[stable(since = "1.4.0", feature = "result_iter")]
+impl<'a, T, E> IntoIterator for &'a Result<T, E> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(since = "1.4.0", feature = "result_iter")]
+impl<'a, T, E> IntoIterator for &'a mut Result<T, E> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// The Result Iterators
+/////////////////////////////////////////////////////////////////////////////
+
+/// An iterator over a reference to the [`Ok`] variant of a [`Result`].
+///
+/// The iterator yields one value if the result is [`Ok`], otherwise none.
+///
+/// Created by [`Result::iter`].
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ inner: Option<&'a T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ self.inner.take()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = if self.inner.is_some() { 1 } else { 0 };
+ (n, Some(n))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.inner.take()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for Iter<'_, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Iter { inner: self.inner }
+ }
+}
+
+/// An iterator over a mutable reference to the [`Ok`] variant of a [`Result`].
+///
+/// Created by [`Result::iter_mut`].
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, T: 'a> {
+ inner: Option<&'a mut T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut T> {
+ self.inner.take()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = if self.inner.is_some() { 1 } else { 0 };
+ (n, Some(n))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut T> {
+ self.inner.take()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IterMut<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IterMut<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for IterMut<'_, A> {}
+
+/// An iterator over the value in a [`Ok`] variant of a [`Result`].
+///
+/// The iterator yields one value if the result is [`Ok`], otherwise none.
+///
+/// This struct is created by the [`into_iter`] method on
+/// [`Result`] (provided by the [`IntoIterator`] trait).
+///
+/// [`into_iter`]: IntoIterator::into_iter
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<T> {
+ inner: Option<T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.take()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = if self.inner.is_some() { 1 } else { 0 };
+ (n, Some(n))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.inner.take()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for IntoIter<A> {}
+
+/////////////////////////////////////////////////////////////////////////////
+// FromIterator
+/////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, E, V: FromIterator<A>> FromIterator<Result<A, E>> for Result<V, E> {
+ /// Takes each element in the `Iterator`: if it is an `Err`, no further
+ /// elements are taken, and the `Err` is returned. Should no `Err` occur, a
+ /// container with the values of each `Result` is returned.
+ ///
+ /// Here is an example which increments every integer in a vector,
+ /// checking for overflow:
+ ///
+ /// ```
+ /// let v = vec![1, 2];
+ /// let res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32|
+ /// x.checked_add(1).ok_or("Overflow!")
+ /// ).collect();
+ /// assert_eq!(res, Ok(vec![2, 3]));
+ /// ```
+ ///
+ /// Here is another example that tries to subtract one from another list
+ /// of integers, this time checking for underflow:
+ ///
+ /// ```
+ /// let v = vec![1, 2, 0];
+ /// let res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32|
+ /// x.checked_sub(1).ok_or("Underflow!")
+ /// ).collect();
+ /// assert_eq!(res, Err("Underflow!"));
+ /// ```
+ ///
+ /// Here is a variation on the previous example, showing that no
+ /// further elements are taken from `iter` after the first `Err`.
+ ///
+ /// ```
+ /// let v = vec![3, 2, 1, 10];
+ /// let mut shared = 0;
+ /// let res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32| {
+ /// shared += x;
+ /// x.checked_sub(2).ok_or("Underflow!")
+ /// }).collect();
+ /// assert_eq!(res, Err("Underflow!"));
+ /// assert_eq!(shared, 6);
+ /// ```
+ ///
+ /// Since the third element caused an underflow, no further elements were taken,
+ /// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16.
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = Result<A, E>>>(iter: I) -> Result<V, E> {
+ // FIXME(#11084): This could be replaced with Iterator::scan when this
+ // performance bug is closed.
+
+ iter::try_process(iter.into_iter(), |i| i.collect())
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T, E> const ops::Try for Result<T, E> {
+ type Output = T;
+ type Residual = Result<convert::Infallible, E>;
+
+ #[inline]
+ fn from_output(output: Self::Output) -> Self {
+ Ok(output)
+ }
+
+ #[inline]
+ fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
+ match self {
+ Ok(v) => ControlFlow::Continue(v),
+ Err(e) => ControlFlow::Break(Err(e)),
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T, E, F: ~const From<E>> const ops::FromResidual<Result<convert::Infallible, E>>
+ for Result<T, F>
+{
+ #[inline]
+ #[track_caller]
+ fn from_residual(residual: Result<convert::Infallible, E>) -> Self {
+ match residual {
+ Err(e) => Err(From::from(e)),
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2_yeet", issue = "96374")]
+impl<T, E, F: From<E>> ops::FromResidual<ops::Yeet<E>> for Result<T, F> {
+ #[inline]
+ fn from_residual(ops::Yeet(e): ops::Yeet<E>) -> Self {
+ Err(From::from(e))
+ }
+}
+
+#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
+impl<T, E> ops::Residual<T> for Result<convert::Infallible, E> {
+ type TryType = Result<T, E>;
+}
diff --git a/library/core/src/slice/ascii.rs b/library/core/src/slice/ascii.rs
new file mode 100644
index 000000000..63715a6b8
--- /dev/null
+++ b/library/core/src/slice/ascii.rs
@@ -0,0 +1,330 @@
+//! Operations on ASCII `[u8]`.
+
+use crate::ascii;
+use crate::fmt::{self, Write};
+use crate::iter;
+use crate::mem;
+use crate::ops;
+
+#[cfg(not(test))]
+impl [u8] {
+ /// Checks if all bytes in this slice are within the ASCII range.
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[must_use]
+ #[inline]
+ pub fn is_ascii(&self) -> bool {
+ is_ascii(self)
+ }
+
+ /// Checks that two slices are an ASCII case-insensitive match.
+ ///
+ /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
+ /// but without allocating and copying temporaries.
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[must_use]
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
+ self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
+ }
+
+ /// Converts this slice to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase`].
+ ///
+ /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ for byte in self {
+ byte.make_ascii_uppercase();
+ }
+ }
+
+ /// Converts this slice to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase`].
+ ///
+ /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ for byte in self {
+ byte.make_ascii_lowercase();
+ }
+ }
+
+ /// Returns an iterator that produces an escaped version of this slice,
+ /// treating it as an ASCII string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ ///
+ /// let s = b"0\t\r\n'\"\\\x9d";
+ /// let escaped = s.escape_ascii().to_string();
+ /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
+ /// ```
+ #[must_use = "this returns the escaped bytes as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+ pub fn escape_ascii(&self) -> EscapeAscii<'_> {
+ EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
+ }
+
+ /// Returns a byte slice with leading ASCII whitespace bytes removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// `u8::is_ascii_whitespace`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
+ /// assert_eq!(b" ".trim_ascii_start(), b"");
+ /// assert_eq!(b"".trim_ascii_start(), b"");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ pub const fn trim_ascii_start(&self) -> &[u8] {
+ let mut bytes = self;
+ // Note: A pattern matching based approach (instead of indexing) allows
+ // making the function const.
+ while let [first, rest @ ..] = bytes {
+ if first.is_ascii_whitespace() {
+ bytes = rest;
+ } else {
+ break;
+ }
+ }
+ bytes
+ }
+
+ /// Returns a byte slice with trailing ASCII whitespace bytes removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// `u8::is_ascii_whitespace`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
+ /// assert_eq!(b" ".trim_ascii_end(), b"");
+ /// assert_eq!(b"".trim_ascii_end(), b"");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ pub const fn trim_ascii_end(&self) -> &[u8] {
+ let mut bytes = self;
+ // Note: A pattern matching based approach (instead of indexing) allows
+ // making the function const.
+ while let [rest @ .., last] = bytes {
+ if last.is_ascii_whitespace() {
+ bytes = rest;
+ } else {
+ break;
+ }
+ }
+ bytes
+ }
+
+ /// Returns a byte slice with leading and trailing ASCII whitespace bytes
+ /// removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// `u8::is_ascii_whitespace`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
+ /// assert_eq!(b" ".trim_ascii(), b"");
+ /// assert_eq!(b"".trim_ascii(), b"");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ pub const fn trim_ascii(&self) -> &[u8] {
+ self.trim_ascii_start().trim_ascii_end()
+ }
+}
+
+impl_fn_for_zst! {
+ #[derive(Clone)]
+ struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
+ ascii::escape_default(*byte)
+ };
+}
+
+/// An iterator over the escaped version of a byte slice.
+///
+/// This `struct` is created by the [`slice::escape_ascii`] method. See its
+/// documentation for more information.
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+#[derive(Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct EscapeAscii<'a> {
+ inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
+}
+
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+impl<'a> iter::Iterator for EscapeAscii<'a> {
+ type Item = u8;
+ #[inline]
+ fn next(&mut self) -> Option<u8> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: ops::Try<Output = Acc>,
+ {
+ self.inner.try_fold(init, fold)
+ }
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.fold(init, fold)
+ }
+ #[inline]
+ fn last(mut self) -> Option<u8> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
+ fn next_back(&mut self) -> Option<u8> {
+ self.inner.next_back()
+ }
+}
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+impl<'a> iter::ExactSizeIterator for EscapeAscii<'a> {}
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+impl<'a> fmt::Display for EscapeAscii<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.clone().try_for_each(|b| f.write_char(b as char))
+ }
+}
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+impl<'a> fmt::Debug for EscapeAscii<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("EscapeAscii").finish_non_exhaustive()
+ }
+}
+
+/// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
+/// from `../str/mod.rs`, which does something similar for utf8 validation.
+#[inline]
+fn contains_nonascii(v: usize) -> bool {
+ const NONASCII_MASK: usize = usize::repeat_u8(0x80);
+ (NONASCII_MASK & v) != 0
+}
+
+/// Optimized ASCII test that will use usize-at-a-time operations instead of
+/// byte-at-a-time operations (when possible).
+///
+/// The algorithm we use here is pretty simple. If `s` is too short, we just
+/// check each byte and be done with it. Otherwise:
+///
+/// - Read the first word with an unaligned load.
+/// - Align the pointer, read subsequent words until end with aligned loads.
+/// - Read the last `usize` from `s` with an unaligned load.
+///
+/// If any of these loads produces something for which `contains_nonascii`
+/// (above) returns true, then we know the answer is false.
+#[inline]
+fn is_ascii(s: &[u8]) -> bool {
+ const USIZE_SIZE: usize = mem::size_of::<usize>();
+
+ let len = s.len();
+ let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
+
+ // If we wouldn't gain anything from the word-at-a-time implementation, fall
+ // back to a scalar loop.
+ //
+ // We also do this for architectures where `size_of::<usize>()` isn't
+ // sufficient alignment for `usize`, because it's a weird edge case.
+ if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < mem::align_of::<usize>() {
+ return s.iter().all(|b| b.is_ascii());
+ }
+
+ // We always read the first word unaligned, which means `align_offset` is
+ // 0, we'd read the same value again for the aligned read.
+ let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
+
+ let start = s.as_ptr();
+ // SAFETY: We verify `len < USIZE_SIZE` above.
+ let first_word = unsafe { (start as *const usize).read_unaligned() };
+
+ if contains_nonascii(first_word) {
+ return false;
+ }
+ // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
+ // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
+ // above.
+ debug_assert!(offset_to_aligned <= len);
+
+ // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
+ // middle chunk of the slice.
+ let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
+
+ // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
+ let mut byte_pos = offset_to_aligned;
+
+ // Paranoia check about alignment, since we're about to do a bunch of
+ // unaligned loads. In practice this should be impossible barring a bug in
+ // `align_offset` though.
+ debug_assert_eq!(word_ptr.addr() % mem::align_of::<usize>(), 0);
+
+ // Read subsequent words until the last aligned word, excluding the last
+ // aligned word by itself to be done in tail check later, to ensure that
+ // tail is always one `usize` at most to extra branch `byte_pos == len`.
+ while byte_pos < len - USIZE_SIZE {
+ debug_assert!(
+ // Sanity check that the read is in bounds
+ (word_ptr.addr() + USIZE_SIZE) <= start.addr().wrapping_add(len) &&
+ // And that our assumptions about `byte_pos` hold.
+ (word_ptr.addr() - start.addr()) == byte_pos
+ );
+
+ // SAFETY: We know `word_ptr` is properly aligned (because of
+ // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
+ let word = unsafe { word_ptr.read() };
+ if contains_nonascii(word) {
+ return false;
+ }
+
+ byte_pos += USIZE_SIZE;
+ // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
+ // after this `add`, `word_ptr` will be at most one-past-the-end.
+ word_ptr = unsafe { word_ptr.add(1) };
+ }
+
+ // Sanity check to ensure there really is only one `usize` left. This should
+ // be guaranteed by our loop condition.
+ debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
+
+ // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
+ let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
+
+ !contains_nonascii(last_word)
+}
diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs
new file mode 100644
index 000000000..5e1b218e5
--- /dev/null
+++ b/library/core/src/slice/cmp.rs
@@ -0,0 +1,260 @@
+//! Comparison traits for `[T]`.
+
+use crate::cmp::{self, Ordering};
+use crate::ffi;
+use crate::mem;
+
+use super::from_raw_parts;
+use super::memchr;
+
+extern "C" {
+ /// Calls implementation provided memcmp.
+ ///
+ /// Interprets the data as u8.
+ ///
+ /// Returns 0 for equal, < 0 for less than and > 0 for greater
+ /// than.
+ fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> ffi::c_int;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> PartialEq<[B]> for [A]
+where
+ A: PartialEq<B>,
+{
+ fn eq(&self, other: &[B]) -> bool {
+ SlicePartialEq::equal(self, other)
+ }
+
+ fn ne(&self, other: &[B]) -> bool {
+ SlicePartialEq::not_equal(self, other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq> Eq for [T] {}
+
+/// Implements comparison of vectors [lexicographically](Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Ord for [T] {
+ fn cmp(&self, other: &[T]) -> Ordering {
+ SliceOrd::compare(self, other)
+ }
+}
+
+/// Implements comparison of vectors [lexicographically](Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd> PartialOrd for [T] {
+ fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
+ SlicePartialOrd::partial_compare(self, other)
+ }
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's PartialEq
+trait SlicePartialEq<B> {
+ fn equal(&self, other: &[B]) -> bool;
+
+ fn not_equal(&self, other: &[B]) -> bool {
+ !self.equal(other)
+ }
+}
+
+// Generic slice equality
+impl<A, B> SlicePartialEq<B> for [A]
+where
+ A: PartialEq<B>,
+{
+ default fn equal(&self, other: &[B]) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ self.iter().zip(other.iter()).all(|(x, y)| x == y)
+ }
+}
+
+// Use memcmp for bytewise equality when the types allow
+impl<A, B> SlicePartialEq<B> for [A]
+where
+ A: BytewiseEquality<B>,
+{
+ fn equal(&self, other: &[B]) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ // SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
+ // The two slices have been checked to have the same size above.
+ unsafe {
+ let size = mem::size_of_val(self);
+ memcmp(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
+ }
+ }
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's PartialOrd
+trait SlicePartialOrd: Sized {
+ fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
+}
+
+impl<A: PartialOrd> SlicePartialOrd for A {
+ default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ let l = cmp::min(left.len(), right.len());
+
+ // Slice to the loop iteration range to enable bound check
+ // elimination in the compiler
+ let lhs = &left[..l];
+ let rhs = &right[..l];
+
+ for i in 0..l {
+ match lhs[i].partial_cmp(&rhs[i]) {
+ Some(Ordering::Equal) => (),
+ non_eq => return non_eq,
+ }
+ }
+
+ left.len().partial_cmp(&right.len())
+ }
+}
+
+// This is the impl that we would like to have. Unfortunately it's not sound.
+// See `partial_ord_slice.rs`.
+/*
+impl<A> SlicePartialOrd for A
+where
+ A: Ord,
+{
+ default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ Some(SliceOrd::compare(left, right))
+ }
+}
+*/
+
+impl<A: AlwaysApplicableOrd> SlicePartialOrd for A {
+ fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ Some(SliceOrd::compare(left, right))
+ }
+}
+
+#[rustc_specialization_trait]
+trait AlwaysApplicableOrd: SliceOrd + Ord {}
+
+macro_rules! always_applicable_ord {
+ ($([$($p:tt)*] $t:ty,)*) => {
+ $(impl<$($p)*> AlwaysApplicableOrd for $t {})*
+ }
+}
+
+always_applicable_ord! {
+ [] u8, [] u16, [] u32, [] u64, [] u128, [] usize,
+ [] i8, [] i16, [] i32, [] i64, [] i128, [] isize,
+ [] bool, [] char,
+ [T: ?Sized] *const T, [T: ?Sized] *mut T,
+ [T: AlwaysApplicableOrd] &T,
+ [T: AlwaysApplicableOrd] &mut T,
+ [T: AlwaysApplicableOrd] Option<T>,
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's Ord
+trait SliceOrd: Sized {
+ fn compare(left: &[Self], right: &[Self]) -> Ordering;
+}
+
+impl<A: Ord> SliceOrd for A {
+ default fn compare(left: &[Self], right: &[Self]) -> Ordering {
+ let l = cmp::min(left.len(), right.len());
+
+ // Slice to the loop iteration range to enable bound check
+ // elimination in the compiler
+ let lhs = &left[..l];
+ let rhs = &right[..l];
+
+ for i in 0..l {
+ match lhs[i].cmp(&rhs[i]) {
+ Ordering::Equal => (),
+ non_eq => return non_eq,
+ }
+ }
+
+ left.len().cmp(&right.len())
+ }
+}
+
+// memcmp compares a sequence of unsigned bytes lexicographically.
+// this matches the order we want for [u8], but no others (not even [i8]).
+impl SliceOrd for u8 {
+ #[inline]
+ fn compare(left: &[Self], right: &[Self]) -> Ordering {
+ // Since the length of a slice is always less than or equal to isize::MAX, this never underflows.
+ let diff = left.len() as isize - right.len() as isize;
+ // This comparison gets optimized away (on x86_64 and ARM) because the subtraction updates flags.
+ let len = if left.len() < right.len() { left.len() } else { right.len() };
+ // SAFETY: `left` and `right` are references and are thus guaranteed to be valid.
+ // We use the minimum of both lengths which guarantees that both regions are
+ // valid for reads in that interval.
+ let mut order = unsafe { memcmp(left.as_ptr(), right.as_ptr(), len) as isize };
+ if order == 0 {
+ order = diff;
+ }
+ order.cmp(&0)
+ }
+}
+
+// Hack to allow specializing on `Eq` even though `Eq` has a method.
+#[rustc_unsafe_specialization_marker]
+trait MarkerEq<T>: PartialEq<T> {}
+
+impl<T: Eq> MarkerEq<T> for T {}
+
+#[doc(hidden)]
+/// Trait implemented for types that can be compared for equality using
+/// their bytewise representation
+#[rustc_specialization_trait]
+trait BytewiseEquality<T>: MarkerEq<T> + Copy {}
+
+macro_rules! impl_marker_for {
+ ($traitname:ident, $($ty:ty)*) => {
+ $(
+ impl $traitname<$ty> for $ty { }
+ )*
+ }
+}
+
+impl_marker_for!(BytewiseEquality,
+ u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool);
+
+pub(super) trait SliceContains: Sized {
+ fn slice_contains(&self, x: &[Self]) -> bool;
+}
+
+impl<T> SliceContains for T
+where
+ T: PartialEq,
+{
+ default fn slice_contains(&self, x: &[Self]) -> bool {
+ x.iter().any(|y| *y == *self)
+ }
+}
+
+impl SliceContains for u8 {
+ #[inline]
+ fn slice_contains(&self, x: &[Self]) -> bool {
+ memchr::memchr(*self, x).is_some()
+ }
+}
+
+impl SliceContains for i8 {
+ #[inline]
+ fn slice_contains(&self, x: &[Self]) -> bool {
+ let byte = *self as u8;
+ // SAFETY: `i8` and `u8` have the same memory layout, thus casting `x.as_ptr()`
+ // as `*const u8` is safe. The `x.as_ptr()` comes from a reference and is thus guaranteed
+ // to be valid for reads for the length of the slice `x.len()`, which cannot be larger
+ // than `isize::MAX`. The returned slice is never mutated.
+ let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
+ memchr::memchr(byte, bytes).is_some()
+ }
+}
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
new file mode 100644
index 000000000..fd7ecf3da
--- /dev/null
+++ b/library/core/src/slice/index.rs
@@ -0,0 +1,730 @@
+//! Indexing implementations for `[T]`.
+
+use crate::intrinsics::assert_unsafe_precondition;
+use crate::intrinsics::const_eval_select;
+use crate::ops;
+use crate::ptr;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+impl<T, I> const ops::Index<I> for [T]
+where
+ I: ~const SliceIndex<[T]>,
+{
+ type Output = I::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &I::Output {
+ index.index(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+impl<T, I> const ops::IndexMut<I> for [T]
+where
+ I: ~const SliceIndex<[T]>,
+{
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut I::Output {
+ index.index_mut(self)
+ }
+}
+
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cold]
+#[track_caller]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+const fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
+ // SAFETY: we are just panicking here
+ unsafe {
+ const_eval_select(
+ (index, len),
+ slice_start_index_len_fail_ct,
+ slice_start_index_len_fail_rt,
+ )
+ }
+}
+
+// FIXME const-hack
+fn slice_start_index_len_fail_rt(index: usize, len: usize) -> ! {
+ panic!("range start index {index} out of range for slice of length {len}");
+}
+
+const fn slice_start_index_len_fail_ct(_: usize, _: usize) -> ! {
+ panic!("slice start index is out of range for slice");
+}
+
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cold]
+#[track_caller]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+const fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
+ // SAFETY: we are just panicking here
+ unsafe {
+ const_eval_select((index, len), slice_end_index_len_fail_ct, slice_end_index_len_fail_rt)
+ }
+}
+
+// FIXME const-hack
+fn slice_end_index_len_fail_rt(index: usize, len: usize) -> ! {
+ panic!("range end index {index} out of range for slice of length {len}");
+}
+
+const fn slice_end_index_len_fail_ct(_: usize, _: usize) -> ! {
+ panic!("slice end index is out of range for slice");
+}
+
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cold]
+#[track_caller]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+const fn slice_index_order_fail(index: usize, end: usize) -> ! {
+ // SAFETY: we are just panicking here
+ unsafe { const_eval_select((index, end), slice_index_order_fail_ct, slice_index_order_fail_rt) }
+}
+
+// FIXME const-hack
+fn slice_index_order_fail_rt(index: usize, end: usize) -> ! {
+ panic!("slice index starts at {index} but ends at {end}");
+}
+
+const fn slice_index_order_fail_ct(_: usize, _: usize) -> ! {
+ panic!("slice index start is larger than end");
+}
+
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cold]
+#[track_caller]
+const fn slice_start_index_overflow_fail() -> ! {
+ panic!("attempted to index slice from after maximum usize");
+}
+
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cold]
+#[track_caller]
+const fn slice_end_index_overflow_fail() -> ! {
+ panic!("attempted to index slice up to maximum usize");
+}
+
+mod private_slice_index {
+ use super::ops;
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ pub trait Sealed {}
+
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for usize {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::Range<usize> {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeTo<usize> {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeFrom<usize> {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeFull {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeInclusive<usize> {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeToInclusive<usize> {}
+ #[stable(feature = "slice_index_with_ops_bound_pair", since = "1.53.0")]
+ impl Sealed for (ops::Bound<usize>, ops::Bound<usize>) {}
+}
+
+/// A helper trait used for indexing operations.
+///
+/// Implementations of this trait have to promise that if the argument
+/// to `get_unchecked(_mut)` is a safe reference, then so is the result.
+#[stable(feature = "slice_get_slice", since = "1.28.0")]
+#[rustc_diagnostic_item = "SliceIndex"]
+#[rustc_on_unimplemented(
+ on(T = "str", label = "string indices are ranges of `usize`",),
+ on(
+ all(any(T = "str", T = "&str", T = "std::string::String"), _Self = "{integer}"),
+ note = "you can use `.chars().nth()` or `.bytes().nth()`\n\
+ for more information, see chapter 8 in The Book: \
+ <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
+ ),
+ message = "the type `{T}` cannot be indexed by `{Self}`",
+ label = "slice indices are of type `usize` or ranges of `usize`"
+)]
+pub unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed {
+ /// The output type returned by methods.
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ type Output: ?Sized;
+
+ /// Returns a shared reference to the output at this location, if in
+ /// bounds.
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ fn get(self, slice: &T) -> Option<&Self::Output>;
+
+ /// Returns a mutable reference to the output at this location, if in
+ /// bounds.
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ fn get_mut(self, slice: &mut T) -> Option<&mut Self::Output>;
+
+ /// Returns a shared reference to the output at this location, without
+ /// performing any bounds checking.
+ /// Calling this method with an out-of-bounds index or a dangling `slice` pointer
+ /// is *[undefined behavior]* even if the resulting reference is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ unsafe fn get_unchecked(self, slice: *const T) -> *const Self::Output;
+
+ /// Returns a mutable reference to the output at this location, without
+ /// performing any bounds checking.
+ /// Calling this method with an out-of-bounds index or a dangling `slice` pointer
+ /// is *[undefined behavior]* even if the resulting reference is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ unsafe fn get_unchecked_mut(self, slice: *mut T) -> *mut Self::Output;
+
+ /// Returns a shared reference to the output at this location, panicking
+ /// if out of bounds.
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ #[track_caller]
+ fn index(self, slice: &T) -> &Self::Output;
+
+ /// Returns a mutable reference to the output at this location, panicking
+ /// if out of bounds.
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ #[track_caller]
+ fn index_mut(self, slice: &mut T) -> &mut Self::Output;
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl<T> const SliceIndex<[T]> for usize {
+ type Output = T;
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&T> {
+ // SAFETY: `self` is checked to be in bounds.
+ if self < slice.len() { unsafe { Some(&*self.get_unchecked(slice)) } } else { None }
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut T> {
+ // SAFETY: `self` is checked to be in bounds.
+ if self < slice.len() { unsafe { Some(&mut *self.get_unchecked_mut(slice)) } } else { None }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
+ // SAFETY: the caller guarantees that `slice` is not dangling, so it
+ // cannot be longer than `isize::MAX`. They also guarantee that
+ // `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
+ // so the call to `add` is safe.
+ unsafe {
+ assert_unsafe_precondition!(self < slice.len());
+ slice.as_ptr().add(self)
+ }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
+ // SAFETY: see comments for `get_unchecked` above.
+ unsafe {
+ assert_unsafe_precondition!(self < slice.len());
+ slice.as_mut_ptr().add(self)
+ }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &T {
+ // N.B., use intrinsic indexing
+ &(*slice)[self]
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut T {
+ // N.B., use intrinsic indexing
+ &mut (*slice)[self]
+ }
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ if self.start > self.end || self.end > slice.len() {
+ None
+ } else {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { Some(&*self.get_unchecked(slice)) }
+ }
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ if self.start > self.end || self.end > slice.len() {
+ None
+ } else {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { Some(&mut *self.get_unchecked_mut(slice)) }
+ }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller guarantees that `slice` is not dangling, so it
+ // cannot be longer than `isize::MAX`. They also guarantee that
+ // `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
+ // so the call to `add` is safe.
+
+ unsafe {
+ assert_unsafe_precondition!(self.end >= self.start && self.end <= slice.len());
+ ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start)
+ }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: see comments for `get_unchecked` above.
+ unsafe {
+ assert_unsafe_precondition!(self.end >= self.start && self.end <= slice.len());
+ ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
+ }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ if self.start > self.end {
+ slice_index_order_fail(self.start, self.end);
+ } else if self.end > slice.len() {
+ slice_end_index_len_fail(self.end, slice.len());
+ }
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &*self.get_unchecked(slice) }
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ if self.start > self.end {
+ slice_index_order_fail(self.start, self.end);
+ } else if self.end > slice.len() {
+ slice_end_index_len_fail(self.end, slice.len());
+ }
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ }
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl<T> const SliceIndex<[T]> for ops::RangeTo<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ (0..self.end).get(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ (0..self.end).get_mut(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { (0..self.end).get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (0..self.end).get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ (0..self.end).index(slice)
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ (0..self.end).index_mut(slice)
+ }
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl<T> const SliceIndex<[T]> for ops::RangeFrom<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ (self.start..slice.len()).get(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ (self.start..slice.len()).get_mut(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { (self.start..slice.len()).get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ if self.start > slice.len() {
+ slice_start_index_len_fail(self.start, slice.len());
+ }
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &*self.get_unchecked(slice) }
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ if self.start > slice.len() {
+ slice_start_index_len_fail(self.start, slice.len());
+ }
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ }
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl<T> const SliceIndex<[T]> for ops::RangeFull {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ Some(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ Some(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ slice
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ slice
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ slice
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ slice
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl<T> const SliceIndex<[T]> for ops::RangeInclusive<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ if *self.end() == usize::MAX { None } else { self.into_slice_range().get(slice) }
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ if *self.end() == usize::MAX { None } else { self.into_slice_range().get_mut(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { self.into_slice_range().get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { self.into_slice_range().get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ if *self.end() == usize::MAX {
+ slice_end_index_overflow_fail();
+ }
+ self.into_slice_range().index(slice)
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ if *self.end() == usize::MAX {
+ slice_end_index_overflow_fail();
+ }
+ self.into_slice_range().index_mut(slice)
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl<T> const SliceIndex<[T]> for ops::RangeToInclusive<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ (0..=self.end).get(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ (0..=self.end).get_mut(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { (0..=self.end).get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (0..=self.end).get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ (0..=self.end).index(slice)
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ (0..=self.end).index_mut(slice)
+ }
+}
+
+/// Performs bounds-checking of a range.
+///
+/// This method is similar to [`Index::index`] for slices, but it returns a
+/// [`Range`] equivalent to `range`. You can use this method to turn any range
+/// into `start` and `end` values.
+///
+/// `bounds` is the range of the slice to use for bounds-checking. It should
+/// be a [`RangeTo`] range that ends at the length of the slice.
+///
+/// The returned [`Range`] is safe to pass to [`slice::get_unchecked`] and
+/// [`slice::get_unchecked_mut`] for slices with the given range.
+///
+/// [`Range`]: ops::Range
+/// [`RangeTo`]: ops::RangeTo
+/// [`slice::get_unchecked`]: slice::get_unchecked
+/// [`slice::get_unchecked_mut`]: slice::get_unchecked_mut
+///
+/// # Panics
+///
+/// Panics if `range` would be out of bounds.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(slice_range)]
+///
+/// use std::slice;
+///
+/// let v = [10, 40, 30];
+/// assert_eq!(1..2, slice::range(1..2, ..v.len()));
+/// assert_eq!(0..2, slice::range(..2, ..v.len()));
+/// assert_eq!(1..3, slice::range(1.., ..v.len()));
+/// ```
+///
+/// Panics when [`Index::index`] would panic:
+///
+/// ```should_panic
+/// #![feature(slice_range)]
+///
+/// use std::slice;
+///
+/// let _ = slice::range(2..1, ..3);
+/// ```
+///
+/// ```should_panic
+/// #![feature(slice_range)]
+///
+/// use std::slice;
+///
+/// let _ = slice::range(1..4, ..3);
+/// ```
+///
+/// ```should_panic
+/// #![feature(slice_range)]
+///
+/// use std::slice;
+///
+/// let _ = slice::range(1..=usize::MAX, ..3);
+/// ```
+///
+/// [`Index::index`]: ops::Index::index
+#[track_caller]
+#[unstable(feature = "slice_range", issue = "76393")]
+#[must_use]
+pub fn range<R>(range: R, bounds: ops::RangeTo<usize>) -> ops::Range<usize>
+where
+ R: ops::RangeBounds<usize>,
+{
+ let len = bounds.end;
+
+ let start: ops::Bound<&usize> = range.start_bound();
+ let start = match start {
+ ops::Bound::Included(&start) => start,
+ ops::Bound::Excluded(start) => {
+ start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail())
+ }
+ ops::Bound::Unbounded => 0,
+ };
+
+ let end: ops::Bound<&usize> = range.end_bound();
+ let end = match end {
+ ops::Bound::Included(end) => {
+ end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail())
+ }
+ ops::Bound::Excluded(&end) => end,
+ ops::Bound::Unbounded => len,
+ };
+
+ if start > end {
+ slice_index_order_fail(start, end);
+ }
+ if end > len {
+ slice_end_index_len_fail(end, len);
+ }
+
+ ops::Range { start, end }
+}
+
+/// Convert pair of `ops::Bound`s into `ops::Range` without performing any bounds checking and (in debug) overflow checking
+fn into_range_unchecked(
+ len: usize,
+ (start, end): (ops::Bound<usize>, ops::Bound<usize>),
+) -> ops::Range<usize> {
+ use ops::Bound;
+ let start = match start {
+ Bound::Included(i) => i,
+ Bound::Excluded(i) => i + 1,
+ Bound::Unbounded => 0,
+ };
+ let end = match end {
+ Bound::Included(i) => i + 1,
+ Bound::Excluded(i) => i,
+ Bound::Unbounded => len,
+ };
+ start..end
+}
+
+/// Convert pair of `ops::Bound`s into `ops::Range`.
+/// Returns `None` on overflowing indices.
+fn into_range(
+ len: usize,
+ (start, end): (ops::Bound<usize>, ops::Bound<usize>),
+) -> Option<ops::Range<usize>> {
+ use ops::Bound;
+ let start = match start {
+ Bound::Included(start) => start,
+ Bound::Excluded(start) => start.checked_add(1)?,
+ Bound::Unbounded => 0,
+ };
+
+ let end = match end {
+ Bound::Included(end) => end.checked_add(1)?,
+ Bound::Excluded(end) => end,
+ Bound::Unbounded => len,
+ };
+
+ // Don't bother with checking `start < end` and `end <= len`
+ // since these checks are handled by `Range` impls
+
+ Some(start..end)
+}
+
+/// Convert pair of `ops::Bound`s into `ops::Range`.
+/// Panics on overflowing indices.
+fn into_slice_range(
+ len: usize,
+ (start, end): (ops::Bound<usize>, ops::Bound<usize>),
+) -> ops::Range<usize> {
+ use ops::Bound;
+ let start = match start {
+ Bound::Included(start) => start,
+ Bound::Excluded(start) => {
+ start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail())
+ }
+ Bound::Unbounded => 0,
+ };
+
+ let end = match end {
+ Bound::Included(end) => {
+ end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail())
+ }
+ Bound::Excluded(end) => end,
+ Bound::Unbounded => len,
+ };
+
+ // Don't bother with checking `start < end` and `end <= len`
+ // since these checks are handled by `Range` impls
+
+ start..end
+}
+
+#[stable(feature = "slice_index_with_ops_bound_pair", since = "1.53.0")]
+unsafe impl<T> SliceIndex<[T]> for (ops::Bound<usize>, ops::Bound<usize>) {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&Self::Output> {
+ into_range(slice.len(), self)?.get(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut Self::Output> {
+ into_range(slice.len(), self)?.get_mut(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const Self::Output {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { into_range_unchecked(slice.len(), self).get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut Self::Output {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { into_range_unchecked(slice.len(), self).get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &Self::Output {
+ into_slice_range(slice.len(), self).index(slice)
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut Self::Output {
+ into_slice_range(slice.len(), self).index_mut(slice)
+ }
+}
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
new file mode 100644
index 000000000..f1e659309
--- /dev/null
+++ b/library/core/src/slice/iter.rs
@@ -0,0 +1,3388 @@
+//! Definitions of a bunch of iterators for `[T]`.
+
+#[macro_use] // import iterator! and forward_iterator!
+mod macros;
+
+use crate::cmp;
+use crate::cmp::Ordering;
+use crate::fmt;
+use crate::intrinsics::{assume, exact_div, unchecked_sub};
+use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use crate::marker::{PhantomData, Send, Sized, Sync};
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ptr::NonNull;
+
+use super::{from_raw_parts, from_raw_parts_mut};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a [T] {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a mut [T] {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+// Macro helper functions
+#[inline(always)]
+fn size_from_ptr<T>(_: *const T) -> usize {
+ mem::size_of::<T>()
+}
+
+/// Immutable slice iterator
+///
+/// This struct is created by the [`iter`] method on [slices].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // First, we declare a type which has `iter` method to get the `Iter` struct (`&[usize]` here):
+/// let slice = &[1, 2, 3];
+///
+/// // Then, we iterate over it:
+/// for element in slice.iter() {
+/// println!("{element}");
+/// }
+/// ```
+///
+/// [`iter`]: slice::iter
+/// [slices]: slice
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct Iter<'a, T: 'a> {
+ ptr: NonNull<T>,
+ end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ // ptr == end is a quick test for the Iterator being empty, that works
+ // for both ZST and non-ZST.
+ _marker: PhantomData<&'a T>,
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter").field(&self.as_slice()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for Iter<'_, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Send for Iter<'_, T> {}
+
+impl<'a, T> Iter<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T]) -> Self {
+ let ptr = slice.as_ptr();
+ // SAFETY: Similar to `IterMut::new`.
+ unsafe {
+ assume(!ptr.is_null());
+
+ let end = if mem::size_of::<T>() == 0 {
+ (ptr as *const u8).wrapping_add(slice.len()) as *const T
+ } else {
+ ptr.add(slice.len())
+ };
+
+ Self { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: PhantomData }
+ }
+ }
+
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// This has the same lifetime as the original slice, and so the
+ /// iterator can continue to be used while this exists.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // First, we declare a type which has the `iter` method to get the `Iter`
+ /// // struct (`&[usize]` here):
+ /// let slice = &[1, 2, 3];
+ ///
+ /// // Then, we get the iterator:
+ /// let mut iter = slice.iter();
+ /// // So if we print what `as_slice` method returns here, we have "[1, 2, 3]":
+ /// println!("{:?}", iter.as_slice());
+ ///
+ /// // Next, we move to the second element of the slice:
+ /// iter.next();
+ /// // Now `as_slice` returns "[2, 3]":
+ /// println!("{:?}", iter.as_slice());
+ /// ```
+ #[must_use]
+ #[stable(feature = "iter_to_slice", since = "1.4.0")]
+ pub fn as_slice(&self) -> &'a [T] {
+ self.make_slice()
+ }
+}
+
+iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
+ fn is_sorted_by<F>(self, mut compare: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
+ {
+ self.as_slice().windows(2).all(|w| {
+ compare(&&w[0], &&w[1]).map(|o| o != Ordering::Greater).unwrap_or(false)
+ })
+ }
+}}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { ptr: self.ptr, end: self.end, _marker: self._marker }
+ }
+}
+
+#[stable(feature = "slice_iter_as_ref", since = "1.13.0")]
+impl<T> AsRef<[T]> for Iter<'_, T> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+/// Mutable slice iterator.
+///
+/// This struct is created by the [`iter_mut`] method on [slices].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // First, we declare a type which has `iter_mut` method to get the `IterMut`
+/// // struct (`&[usize]` here):
+/// let mut slice = &mut [1, 2, 3];
+///
+/// // Then, we iterate over it and increment each element value:
+/// for element in slice.iter_mut() {
+/// *element += 1;
+/// }
+///
+/// // We now have "[2, 3, 4]":
+/// println!("{slice:?}");
+/// ```
+///
+/// [`iter_mut`]: slice::iter_mut
+/// [slices]: slice
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct IterMut<'a, T: 'a> {
+ ptr: NonNull<T>,
+ end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ // ptr == end is a quick test for the Iterator being empty, that works
+ // for both ZST and non-ZST.
+ _marker: PhantomData<&'a mut T>,
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IterMut").field(&self.make_slice()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for IterMut<'_, T> {}
+
+impl<'a, T> IterMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T]) -> Self {
+ let ptr = slice.as_mut_ptr();
+ // SAFETY: There are several things here:
+ //
+ // `ptr` has been obtained by `slice.as_ptr()` where `slice` is a valid
+ // reference thus it is non-NUL and safe to use and pass to
+ // `NonNull::new_unchecked` .
+ //
+ // Adding `slice.len()` to the starting pointer gives a pointer
+ // at the end of `slice`. `end` will never be dereferenced, only checked
+ // for direct pointer equality with `ptr` to check if the iterator is
+ // done.
+ //
+ // In the case of a ZST, the end pointer is just the start pointer plus
+ // the length, to also allows for the fast `ptr == end` check.
+ //
+ // See the `next_unchecked!` and `is_empty!` macros as well as the
+ // `post_inc_start` method for more information.
+ unsafe {
+ assume(!ptr.is_null());
+
+ let end = if mem::size_of::<T>() == 0 {
+ (ptr as *mut u8).wrapping_add(slice.len()) as *mut T
+ } else {
+ ptr.add(slice.len())
+ };
+
+ Self { ptr: NonNull::new_unchecked(ptr), end, _marker: PhantomData }
+ }
+ }
+
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// To avoid creating `&mut` references that alias, this is forced
+ /// to consume the iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // First, we declare a type which has `iter_mut` method to get the `IterMut`
+ /// // struct (`&[usize]` here):
+ /// let mut slice = &mut [1, 2, 3];
+ ///
+ /// {
+ /// // Then, we get the iterator:
+ /// let mut iter = slice.iter_mut();
+ /// // We move to next element:
+ /// iter.next();
+ /// // So if we print what `into_slice` method returns here, we have "[2, 3]":
+ /// println!("{:?}", iter.into_slice());
+ /// }
+ ///
+ /// // Now let's modify a value of the slice:
+ /// {
+ /// // First we get back the iterator:
+ /// let mut iter = slice.iter_mut();
+ /// // We change the value of the first element of the slice returned by the `next` method:
+ /// *iter.next().unwrap() += 1;
+ /// }
+ /// // Now slice is "[2, 2, 3]":
+ /// println!("{slice:?}");
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "iter_to_slice", since = "1.4.0")]
+ pub fn into_slice(self) -> &'a mut [T] {
+ // SAFETY: the iterator was created from a mutable slice with pointer
+ // `self.ptr` and length `len!(self)`. This guarantees that all the prerequisites
+ // for `from_raw_parts_mut` are fulfilled.
+ unsafe { from_raw_parts_mut(self.ptr.as_ptr(), len!(self)) }
+ }
+
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// To avoid creating `&mut [T]` references that alias, the returned slice
+ /// borrows its lifetime from the iterator the method is applied on.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut slice: &mut [usize] = &mut [1, 2, 3];
+ ///
+ /// // First, we get the iterator:
+ /// let mut iter = slice.iter_mut();
+ /// // So if we check what the `as_slice` method returns here, we have "[1, 2, 3]":
+ /// assert_eq!(iter.as_slice(), &[1, 2, 3]);
+ ///
+ /// // Next, we move to the second element of the slice:
+ /// iter.next();
+ /// // Now `as_slice` returns "[2, 3]":
+ /// assert_eq!(iter.as_slice(), &[2, 3]);
+ /// ```
+ #[must_use]
+ #[stable(feature = "slice_iter_mut_as_slice", since = "1.53.0")]
+ pub fn as_slice(&self) -> &[T] {
+ self.make_slice()
+ }
+
+ /// Views the underlying data as a mutable subslice of the original data.
+ ///
+ /// To avoid creating `&mut [T]` references that alias, the returned slice
+ /// borrows its lifetime from the iterator the method is applied on.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(slice_iter_mut_as_mut_slice)]
+ ///
+ /// let mut slice: &mut [usize] = &mut [1, 2, 3];
+ ///
+ /// // First, we get the iterator:
+ /// let mut iter = slice.iter_mut();
+ /// // Then, we get a mutable slice from it:
+ /// let mut_slice = iter.as_mut_slice();
+ /// // So if we check what the `as_mut_slice` method returned, we have "[1, 2, 3]":
+ /// assert_eq!(mut_slice, &mut [1, 2, 3]);
+ ///
+ /// // We can use it to mutate the slice:
+ /// mut_slice[0] = 4;
+ /// mut_slice[2] = 5;
+ ///
+ /// // Next, we can move to the second element of the slice, checking that
+ /// // it yields the value we just wrote:
+ /// assert_eq!(iter.next(), Some(&mut 4));
+ /// // Now `as_mut_slice` returns "[2, 5]":
+ /// assert_eq!(iter.as_mut_slice(), &mut [2, 5]);
+ /// ```
+ #[must_use]
+ // FIXME: Uncomment the `AsMut<[T]>` impl when this gets stabilized.
+ #[unstable(feature = "slice_iter_mut_as_mut_slice", issue = "93079")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ // SAFETY: the iterator was created from a mutable slice with pointer
+ // `self.ptr` and length `len!(self)`. This guarantees that all the prerequisites
+ // for `from_raw_parts_mut` are fulfilled.
+ unsafe { from_raw_parts_mut(self.ptr.as_ptr(), len!(self)) }
+ }
+}
+
+#[stable(feature = "slice_iter_mut_as_slice", since = "1.53.0")]
+impl<T> AsRef<[T]> for IterMut<'_, T> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+// #[stable(feature = "slice_iter_mut_as_mut_slice", since = "FIXME")]
+// impl<T> AsMut<[T]> for IterMut<'_, T> {
+// fn as_mut(&mut self) -> &mut [T] {
+// self.as_mut_slice()
+// }
+// }
+
+iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, {}}
+
+/// An internal abstraction over the splitting iterators, so that
+/// splitn, splitn_mut etc can be implemented once.
+#[doc(hidden)]
+pub(super) trait SplitIter: DoubleEndedIterator {
+ /// Marks the underlying iterator as complete, extracting the remaining
+ /// portion of the slice.
+ fn finish(&mut self) -> Option<Self::Item>;
+}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function.
+///
+/// This struct is created by the [`split`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [10, 40, 33, 20];
+/// let mut iter = slice.split(|num| num % 3 == 0);
+/// ```
+///
+/// [`split`]: slice::split
+/// [slices]: slice
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct Split<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ // Used for `SplitWhitespace` and `SplitAsciiWhitespace` `as_str` methods
+ pub(crate) v: &'a [T],
+ pred: P,
+ // Used for `SplitAsciiWhitespace` `as_str` method
+ pub(crate) finished: bool,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> Split<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], pred: P) -> Self {
+ Self { v: slice, pred, finished: false }
+ }
+ /// Returns a slice which contains items not yet handled by split.
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(split_as_slice)]
+ /// let slice = [1,2,3,4,5];
+ /// let mut split = slice.split(|v| v % 2 == 0);
+ /// assert!(split.next().is_some());
+ /// assert_eq!(split.as_slice(), &[3,4,5]);
+ /// ```
+ #[unstable(feature = "split_as_slice", issue = "96137")]
+ pub fn as_slice(&self) -> &'a [T] {
+ if self.finished { &[] } else { &self.v }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for Split<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Split").field("v", &self.v).field("finished", &self.finished).finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, P> Clone for Split<'_, T, P>
+where
+ P: Clone + FnMut(&T) -> bool,
+{
+ fn clone(&self) -> Self {
+ Split { v: self.v, pred: self.pred.clone(), finished: self.finished }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, P> Iterator for Split<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ return None;
+ }
+
+ match self.v.iter().position(|x| (self.pred)(x)) {
+ None => self.finish(),
+ Some(idx) => {
+ let ret = Some(&self.v[..idx]);
+ self.v = &self.v[idx + 1..];
+ ret
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.finished {
+ (0, Some(0))
+ } else {
+ // If the predicate doesn't match anything, we yield one slice.
+ // If it matches every element, we yield `len() + 1` empty slices.
+ (1, Some(self.v.len() + 1))
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, P> DoubleEndedIterator for Split<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ return None;
+ }
+
+ match self.v.iter().rposition(|x| (self.pred)(x)) {
+ None => self.finish(),
+ Some(idx) => {
+ let ret = Some(&self.v[idx + 1..]);
+ self.v = &self.v[..idx];
+ ret
+ }
+ }
+ }
+}
+
+impl<'a, T, P> SplitIter for Split<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn finish(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ None
+ } else {
+ self.finished = true;
+ Some(self.v)
+ }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, P> FusedIterator for Split<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function. Unlike `Split`, it contains the matched part as a terminator
+/// of the subslice.
+///
+/// This struct is created by the [`split_inclusive`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [10, 40, 33, 20];
+/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+/// ```
+///
+/// [`split_inclusive`]: slice::split_inclusive
+/// [slices]: slice
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct SplitInclusive<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ v: &'a [T],
+ pred: P,
+ finished: bool,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitInclusive<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], pred: P) -> Self {
+ let finished = slice.is_empty();
+ Self { v: slice, pred, finished }
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitInclusive<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitInclusive")
+ .field("v", &self.v)
+ .field("finished", &self.finished)
+ .finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<T, P> Clone for SplitInclusive<'_, T, P>
+where
+ P: Clone + FnMut(&T) -> bool,
+{
+ fn clone(&self) -> Self {
+ SplitInclusive { v: self.v, pred: self.pred.clone(), finished: self.finished }
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, T, P> Iterator for SplitInclusive<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx =
+ self.v.iter().position(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(self.v.len());
+ if idx == self.v.len() {
+ self.finished = true;
+ }
+ let ret = Some(&self.v[..idx]);
+ self.v = &self.v[idx..];
+ ret
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.finished {
+ (0, Some(0))
+ } else {
+ // If the predicate doesn't match anything, we yield one slice.
+ // If it matches every element, we yield `len()` one-element slices,
+ // or a single empty slice.
+ (1, Some(cmp::max(1, self.v.len())))
+ }
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, T, P> DoubleEndedIterator for SplitInclusive<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ return None;
+ }
+
+ // The last index of self.v is already checked and found to match
+ // by the last iteration, so we start searching a new match
+ // one index to the left.
+ let remainder = if self.v.is_empty() { &[] } else { &self.v[..(self.v.len() - 1)] };
+ let idx = remainder.iter().rposition(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(0);
+ if idx == 0 {
+ self.finished = true;
+ }
+ let ret = Some(&self.v[idx..]);
+ self.v = &self.v[..idx];
+ ret
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<T, P> FusedIterator for SplitInclusive<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over the mutable subslices of the vector which are separated
+/// by elements that match `pred`.
+///
+/// This struct is created by the [`split_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut v = [10, 40, 30, 20, 60, 50];
+/// let iter = v.split_mut(|num| *num % 3 == 0);
+/// ```
+///
+/// [`split_mut`]: slice::split_mut
+/// [slices]: slice
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct SplitMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ v: &'a mut [T],
+ pred: P,
+ finished: bool,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
+ Self { v: slice, pred, finished: false }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitMut").field("v", &self.v).field("finished", &self.finished).finish()
+ }
+}
+
+impl<'a, T, P> SplitIter for SplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn finish(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ None
+ } else {
+ self.finished = true;
+ Some(mem::replace(&mut self.v, &mut []))
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, P> Iterator for SplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ return None;
+ }
+
+ match self.v.iter().position(|x| (self.pred)(x)) {
+ None => self.finish(),
+ Some(idx) => {
+ let tmp = mem::take(&mut self.v);
+ // idx is the index of the element we are splitting on. We want to set self to the
+ // region after idx, and return the subslice before and not including idx.
+ // So first we split after idx
+ let (head, tail) = tmp.split_at_mut(idx + 1);
+ self.v = tail;
+ // Then return the subslice up to but not including the found element
+ Some(&mut head[..idx])
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.finished {
+ (0, Some(0))
+ } else {
+ // If the predicate doesn't match anything, we yield one slice.
+ // If it matches every element, we yield `len() + 1` empty slices.
+ (1, Some(self.v.len() + 1))
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx_opt = {
+ // work around borrowck limitations
+ let pred = &mut self.pred;
+ self.v.iter().rposition(|x| (*pred)(x))
+ };
+ match idx_opt {
+ None => self.finish(),
+ Some(idx) => {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(idx);
+ self.v = head;
+ Some(&mut tail[1..])
+ }
+ }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, P> FusedIterator for SplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over the mutable subslices of the vector which are separated
+/// by elements that match `pred`. Unlike `SplitMut`, it contains the matched
+/// parts in the ends of the subslices.
+///
+/// This struct is created by the [`split_inclusive_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut v = [10, 40, 30, 20, 60, 50];
+/// let iter = v.split_inclusive_mut(|num| *num % 3 == 0);
+/// ```
+///
+/// [`split_inclusive_mut`]: slice::split_inclusive_mut
+/// [slices]: slice
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct SplitInclusiveMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ v: &'a mut [T],
+ pred: P,
+ finished: bool,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitInclusiveMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
+ let finished = slice.is_empty();
+ Self { v: slice, pred, finished }
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitInclusiveMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitInclusiveMut")
+ .field("v", &self.v)
+ .field("finished", &self.finished)
+ .finish()
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, T, P> Iterator for SplitInclusiveMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx_opt = {
+ // work around borrowck limitations
+ let pred = &mut self.pred;
+ self.v.iter().position(|x| (*pred)(x))
+ };
+ let idx = idx_opt.map(|idx| idx + 1).unwrap_or(self.v.len());
+ if idx == self.v.len() {
+ self.finished = true;
+ }
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(idx);
+ self.v = tail;
+ Some(head)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.finished {
+ (0, Some(0))
+ } else {
+ // If the predicate doesn't match anything, we yield one slice.
+ // If it matches every element, we yield `len()` one-element slices,
+ // or a single empty slice.
+ (1, Some(cmp::max(1, self.v.len())))
+ }
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, T, P> DoubleEndedIterator for SplitInclusiveMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx_opt = if self.v.is_empty() {
+ None
+ } else {
+ // work around borrowck limitations
+ let pred = &mut self.pred;
+
+ // The last index of self.v is already checked and found to match
+ // by the last iteration, so we start searching a new match
+ // one index to the left.
+ let remainder = &self.v[..(self.v.len() - 1)];
+ remainder.iter().rposition(|x| (*pred)(x))
+ };
+ let idx = idx_opt.map(|idx| idx + 1).unwrap_or(0);
+ if idx == 0 {
+ self.finished = true;
+ }
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(idx);
+ self.v = head;
+ Some(tail)
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<T, P> FusedIterator for SplitInclusiveMut<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function, starting from the end of the slice.
+///
+/// This struct is created by the [`rsplit`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [11, 22, 33, 0, 44, 55];
+/// let iter = slice.rsplit(|num| *num == 0);
+/// ```
+///
+/// [`rsplit`]: slice::rsplit
+/// [slices]: slice
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct RSplit<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: Split<'a, T, P>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplit<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], pred: P) -> Self {
+ Self { inner: Split::new(slice, pred) }
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T: fmt::Debug, P> fmt::Debug for RSplit<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RSplit")
+ .field("v", &self.inner.v)
+ .field("finished", &self.inner.finished)
+ .finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T, P> Clone for RSplit<'_, T, P>
+where
+ P: Clone + FnMut(&T) -> bool,
+{
+ fn clone(&self) -> Self {
+ RSplit { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> Iterator for RSplit<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ self.inner.next_back()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> DoubleEndedIterator for RSplit<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ self.inner.next()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> SplitIter for RSplit<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn finish(&mut self) -> Option<&'a [T]> {
+ self.inner.finish()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T, P> FusedIterator for RSplit<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over the subslices of the vector which are separated
+/// by elements that match `pred`, starting from the end of the slice.
+///
+/// This struct is created by the [`rsplit_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = [11, 22, 33, 0, 44, 55];
+/// let iter = slice.rsplit_mut(|num| *num == 0);
+/// ```
+///
+/// [`rsplit_mut`]: slice::rsplit_mut
+/// [slices]: slice
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct RSplitMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: SplitMut<'a, T, P>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
+ Self { inner: SplitMut::new(slice, pred) }
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T: fmt::Debug, P> fmt::Debug for RSplitMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RSplitMut")
+ .field("v", &self.inner.v)
+ .field("finished", &self.inner.finished)
+ .finish()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> SplitIter for RSplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn finish(&mut self) -> Option<&'a mut [T]> {
+ self.inner.finish()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> Iterator for RSplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ self.inner.next_back()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> DoubleEndedIterator for RSplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ self.inner.next()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T, P> FusedIterator for RSplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An private iterator over subslices separated by elements that
+/// match a predicate function, splitting at most a fixed number of
+/// times.
+#[derive(Debug)]
+struct GenericSplitN<I> {
+ iter: I,
+ count: usize,
+}
+
+impl<T, I: SplitIter<Item = T>> Iterator for GenericSplitN<I> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ match self.count {
+ 0 => None,
+ 1 => {
+ self.count -= 1;
+ self.iter.finish()
+ }
+ _ => {
+ self.count -= 1;
+ self.iter.next()
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lower, upper_opt) = self.iter.size_hint();
+ (
+ cmp::min(self.count, lower),
+ Some(upper_opt.map_or(self.count, |upper| cmp::min(self.count, upper))),
+ )
+ }
+}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function, limited to a given number of splits.
+///
+/// This struct is created by the [`splitn`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [10, 40, 30, 20, 60, 50];
+/// let iter = slice.splitn(2, |num| *num % 3 == 0);
+/// ```
+///
+/// [`splitn`]: slice::splitn
+/// [slices]: slice
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct SplitN<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: GenericSplitN<Split<'a, T, P>>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitN<'a, T, P> {
+ #[inline]
+ pub(super) fn new(s: Split<'a, T, P>, n: usize) -> Self {
+ Self { inner: GenericSplitN { iter: s, count: n } }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitN<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitN").field("inner", &self.inner).finish()
+ }
+}
+
+/// An iterator over subslices separated by elements that match a
+/// predicate function, limited to a given number of splits, starting
+/// from the end of the slice.
+///
+/// This struct is created by the [`rsplitn`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [10, 40, 30, 20, 60, 50];
+/// let iter = slice.rsplitn(2, |num| *num % 3 == 0);
+/// ```
+///
+/// [`rsplitn`]: slice::rsplitn
+/// [slices]: slice
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct RSplitN<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: GenericSplitN<RSplit<'a, T, P>>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitN<'a, T, P> {
+ #[inline]
+ pub(super) fn new(s: RSplit<'a, T, P>, n: usize) -> Self {
+ Self { inner: GenericSplitN { iter: s, count: n } }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for RSplitN<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RSplitN").field("inner", &self.inner).finish()
+ }
+}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function, limited to a given number of splits.
+///
+/// This struct is created by the [`splitn_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = [10, 40, 30, 20, 60, 50];
+/// let iter = slice.splitn_mut(2, |num| *num % 3 == 0);
+/// ```
+///
+/// [`splitn_mut`]: slice::splitn_mut
+/// [slices]: slice
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct SplitNMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: GenericSplitN<SplitMut<'a, T, P>>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitNMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(s: SplitMut<'a, T, P>, n: usize) -> Self {
+ Self { inner: GenericSplitN { iter: s, count: n } }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitNMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitNMut").field("inner", &self.inner).finish()
+ }
+}
+
+/// An iterator over subslices separated by elements that match a
+/// predicate function, limited to a given number of splits, starting
+/// from the end of the slice.
+///
+/// This struct is created by the [`rsplitn_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = [10, 40, 30, 20, 60, 50];
+/// let iter = slice.rsplitn_mut(2, |num| *num % 3 == 0);
+/// ```
+///
+/// [`rsplitn_mut`]: slice::rsplitn_mut
+/// [slices]: slice
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct RSplitNMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: GenericSplitN<RSplitMut<'a, T, P>>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitNMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(s: RSplitMut<'a, T, P>, n: usize) -> Self {
+ Self { inner: GenericSplitN { iter: s, count: n } }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for RSplitNMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RSplitNMut").field("inner", &self.inner).finish()
+ }
+}
+
+forward_iterator! { SplitN: T, &'a [T] }
+forward_iterator! { RSplitN: T, &'a [T] }
+forward_iterator! { SplitNMut: T, &'a mut [T] }
+forward_iterator! { RSplitNMut: T, &'a mut [T] }
+
+/// An iterator over overlapping subslices of length `size`.
+///
+/// This struct is created by the [`windows`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['r', 'u', 's', 't'];
+/// let iter = slice.windows(2);
+/// ```
+///
+/// [`windows`]: slice::windows
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct Windows<'a, T: 'a> {
+ v: &'a [T],
+ size: NonZeroUsize,
+}
+
+impl<'a, T: 'a> Windows<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], size: NonZeroUsize) -> Self {
+ Self { v: slice, size }
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Windows<'_, T> {
+ fn clone(&self) -> Self {
+ Windows { v: self.v, size: self.size }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Windows<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.size.get() > self.v.len() {
+ None
+ } else {
+ let ret = Some(&self.v[..self.size.get()]);
+ self.v = &self.v[1..];
+ ret
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.size.get() > self.v.len() {
+ (0, Some(0))
+ } else {
+ let size = self.v.len() - self.size.get() + 1;
+ (size, Some(size))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, overflow) = self.size.get().overflowing_add(n);
+ if end > self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let nth = &self.v[n..end];
+ self.v = &self.v[n + 1..];
+ Some(nth)
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.size.get() > self.v.len() {
+ None
+ } else {
+ let start = self.v.len() - self.size.get();
+ Some(&self.v[start..])
+ }
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // SAFETY: since the caller guarantees that `i` is in bounds,
+ // which means that `i` cannot overflow an `isize`, and the
+ // slice created by `from_raw_parts` is a subslice of `self.v`
+ // thus is guaranteed to be valid for the lifetime `'a` of `self.v`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(idx), self.size.get()) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Windows<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.size.get() > self.v.len() {
+ None
+ } else {
+ let ret = Some(&self.v[self.v.len() - self.size.get()..]);
+ self.v = &self.v[..self.v.len() - 1];
+ ret
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, overflow) = self.v.len().overflowing_sub(n);
+ if end < self.size.get() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let ret = &self.v[end - self.size.get()..end];
+ self.v = &self.v[..end - 1];
+ Some(ret)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Windows<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Windows<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Windows<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for Windows<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
+/// time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last slice
+/// of the iteration will be the remainder.
+///
+/// This struct is created by the [`chunks`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.chunks(2);
+/// ```
+///
+/// [`chunks`]: slice::chunks
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct Chunks<'a, T: 'a> {
+ v: &'a [T],
+ chunk_size: usize,
+}
+
+impl<'a, T: 'a> Chunks<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], size: usize) -> Self {
+ Self { v: slice, chunk_size: size }
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Chunks<'_, T> {
+ fn clone(&self) -> Self {
+ Chunks { v: self.v, chunk_size: self.chunk_size }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Chunks<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let chunksz = cmp::min(self.v.len(), self.chunk_size);
+ let (fst, snd) = self.v.split_at(chunksz);
+ self.v = snd;
+ Some(fst)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.v.is_empty() {
+ (0, Some(0))
+ } else {
+ let n = self.v.len() / self.chunk_size;
+ let rem = self.v.len() % self.chunk_size;
+ let n = if rem > 0 { n + 1 } else { n };
+ (n, Some(n))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (start, overflow) = n.overflowing_mul(self.chunk_size);
+ if start >= self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let end = match start.checked_add(self.chunk_size) {
+ Some(sum) => cmp::min(self.v.len(), sum),
+ None => self.v.len(),
+ };
+ let nth = &self.v[start..end];
+ self.v = &self.v[end..];
+ Some(nth)
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
+ Some(&self.v[start..])
+ }
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let start = idx * self.chunk_size;
+ // SAFETY: the caller guarantees that `i` is in bounds,
+ // which means that `start` must be in bounds of the
+ // underlying `self.v` slice, and we made sure that `len`
+ // is also in bounds of `self.v`. Thus, `start` cannot overflow
+ // an `isize`, and the slice constructed by `from_raw_parts`
+ // is a subslice of `self.v` which is guaranteed to be valid
+ // for the lifetime `'a` of `self.v`.
+ unsafe {
+ let len = cmp::min(self.v.len().unchecked_sub(start), self.chunk_size);
+ from_raw_parts(self.v.as_ptr().add(start), len)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Chunks<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let remainder = self.v.len() % self.chunk_size;
+ let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
+ // SAFETY: split_at_unchecked requires the argument be less than or
+ // equal to the length. This is guaranteed, but subtle: `chunksz`
+ // will always either be `self.v.len() % self.chunk_size`, which
+ // will always evaluate to strictly less than `self.v.len()` (or
+ // panic, in the case that `self.chunk_size` is zero), or it can be
+ // `self.chunk_size`, in the case that the length is exactly
+ // divisible by the chunk size.
+ //
+ // While it seems like using `self.chunk_size` in this case could
+ // lead to a value greater than `self.v.len()`, it cannot: if
+ // `self.chunk_size` were greater than `self.v.len()`, then
+ // `self.v.len() % self.chunk_size` would return nonzero (note that
+ // in this branch of the `if`, we already know that `self.v` is
+ // non-empty).
+ let (fst, snd) = unsafe { self.v.split_at_unchecked(self.v.len() - chunksz) };
+ self.v = fst;
+ Some(snd)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &[];
+ None
+ } else {
+ let start = (len - 1 - n) * self.chunk_size;
+ let end = match start.checked_add(self.chunk_size) {
+ Some(res) => cmp::min(self.v.len(), res),
+ None => self.v.len(),
+ };
+ let nth_back = &self.v[start..end];
+ self.v = &self.v[..start];
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Chunks<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Chunks<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Chunks<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for Chunks<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
+/// elements at a time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last slice
+/// of the iteration will be the remainder.
+///
+/// This struct is created by the [`chunks_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.chunks_mut(2);
+/// ```
+///
+/// [`chunks_mut`]: slice::chunks_mut
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct ChunksMut<'a, T: 'a> {
+ /// # Safety
+ /// This slice pointer must point at a valid region of `T` with at least length `v.len()`. Normally,
+ /// those requirements would mean that we could instead use a `&mut [T]` here, but we cannot
+ /// because `__iterator_get_unchecked` needs to return `&mut [T]`, which guarantees certain aliasing
+ /// properties that we cannot uphold if we hold on to the full original `&mut [T]`. Wrapping a raw
+ /// slice instead lets us hand out non-overlapping `&mut [T]` subslices of the slice we wrap.
+ v: *mut [T],
+ chunk_size: usize,
+ _marker: PhantomData<&'a mut T>,
+}
+
+impl<'a, T: 'a> ChunksMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], size: usize) -> Self {
+ Self { v: slice, chunk_size: size, _marker: PhantomData }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for ChunksMut<'a, T> {
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let sz = cmp::min(self.v.len(), self.chunk_size);
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (head, tail) = unsafe { self.v.split_at_mut(sz) };
+ self.v = tail;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *head })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.v.is_empty() {
+ (0, Some(0))
+ } else {
+ let n = self.v.len() / self.chunk_size;
+ let rem = self.v.len() % self.chunk_size;
+ let n = if rem > 0 { n + 1 } else { n };
+ (n, Some(n))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
+ let (start, overflow) = n.overflowing_mul(self.chunk_size);
+ if start >= self.v.len() || overflow {
+ self.v = &mut [];
+ None
+ } else {
+ let end = match start.checked_add(self.chunk_size) {
+ Some(sum) => cmp::min(self.v.len(), sum),
+ None => self.v.len(),
+ };
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (head, tail) = unsafe { self.v.split_at_mut(end) };
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (_, nth) = unsafe { head.split_at_mut(start) };
+ self.v = tail;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *nth })
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *self.v.get_unchecked_mut(start..) })
+ }
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let start = idx * self.chunk_size;
+ // SAFETY: see comments for `Chunks::__iterator_get_unchecked` and `self.v`.
+ //
+ // Also note that the caller also guarantees that we're never called
+ // with the same index again, and that no other methods that will
+ // access this subslice are called, so it is valid for the returned
+ // slice to be mutable.
+ unsafe {
+ let len = cmp::min(self.v.len().unchecked_sub(start), self.chunk_size);
+ from_raw_parts_mut(self.v.as_mut_ptr().add(start), len)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let remainder = self.v.len() % self.chunk_size;
+ let sz = if remainder != 0 { remainder } else { self.chunk_size };
+ let len = self.v.len();
+ // SAFETY: Similar to `Chunks::next_back`
+ let (head, tail) = unsafe { self.v.split_at_mut_unchecked(len - sz) };
+ self.v = head;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *tail })
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &mut [];
+ None
+ } else {
+ let start = (len - 1 - n) * self.chunk_size;
+ let end = match start.checked_add(self.chunk_size) {
+ Some(res) => cmp::min(self.v.len(), res),
+ None => self.v.len(),
+ };
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (temp, _tail) = unsafe { self.v.split_at_mut(end) };
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (head, nth_back) = unsafe { temp.split_at_mut(start) };
+ self.v = head;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *nth_back })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for ChunksMut<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for ChunksMut<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for ChunksMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for ChunksMut<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T> Send for ChunksMut<'_, T> where T: Send {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T> Sync for ChunksMut<'_, T> where T: Sync {}
+
+/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
+/// time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last
+/// up to `chunk_size-1` elements will be omitted but can be retrieved from
+/// the [`remainder`] function from the iterator.
+///
+/// This struct is created by the [`chunks_exact`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.chunks_exact(2);
+/// ```
+///
+/// [`chunks_exact`]: slice::chunks_exact
+/// [`remainder`]: ChunksExact::remainder
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct ChunksExact<'a, T: 'a> {
+ v: &'a [T],
+ rem: &'a [T],
+ chunk_size: usize,
+}
+
+impl<'a, T> ChunksExact<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], chunk_size: usize) -> Self {
+ let rem = slice.len() % chunk_size;
+ let fst_len = slice.len() - rem;
+ // SAFETY: 0 <= fst_len <= slice.len() by construction above
+ let (fst, snd) = unsafe { slice.split_at_unchecked(fst_len) };
+ Self { v: fst, rem: snd, chunk_size }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ #[must_use]
+ #[stable(feature = "chunks_exact", since = "1.31.0")]
+ pub fn remainder(&self) -> &'a [T] {
+ self.rem
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> Clone for ChunksExact<'_, T> {
+ fn clone(&self) -> Self {
+ ChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<'a, T> Iterator for ChunksExact<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let (fst, snd) = self.v.split_at(self.chunk_size);
+ self.v = snd;
+ Some(fst)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.v.len() / self.chunk_size;
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (start, overflow) = n.overflowing_mul(self.chunk_size);
+ if start >= self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let (_, snd) = self.v.split_at(start);
+ self.v = snd;
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let start = idx * self.chunk_size;
+ // SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for ChunksExact<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
+ self.v = fst;
+ Some(snd)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &[];
+ None
+ } else {
+ let start = (len - 1 - n) * self.chunk_size;
+ let end = start + self.chunk_size;
+ let nth_back = &self.v[start..end];
+ self.v = &self.v[..start];
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> ExactSizeIterator for ChunksExact<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.v.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for ChunksExact<'_, T> {}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> FusedIterator for ChunksExact<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for ChunksExact<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
+/// elements at a time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last up to
+/// `chunk_size-1` elements will be omitted but can be retrieved from the
+/// [`into_remainder`] function from the iterator.
+///
+/// This struct is created by the [`chunks_exact_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.chunks_exact_mut(2);
+/// ```
+///
+/// [`chunks_exact_mut`]: slice::chunks_exact_mut
+/// [`into_remainder`]: ChunksExactMut::into_remainder
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct ChunksExactMut<'a, T: 'a> {
+ /// # Safety
+ /// This slice pointer must point at a valid region of `T` with at least length `v.len()`. Normally,
+ /// those requirements would mean that we could instead use a `&mut [T]` here, but we cannot
+ /// because `__iterator_get_unchecked` needs to return `&mut [T]`, which guarantees certain aliasing
+ /// properties that we cannot uphold if we hold on to the full original `&mut [T]`. Wrapping a raw
+ /// slice instead lets us hand out non-overlapping `&mut [T]` subslices of the slice we wrap.
+ v: *mut [T],
+ rem: &'a mut [T], // The iterator never yields from here, so this can be unique
+ chunk_size: usize,
+ _marker: PhantomData<&'a mut T>,
+}
+
+impl<'a, T> ChunksExactMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], chunk_size: usize) -> Self {
+ let rem = slice.len() % chunk_size;
+ let fst_len = slice.len() - rem;
+ // SAFETY: 0 <= fst_len <= slice.len() by construction above
+ let (fst, snd) = unsafe { slice.split_at_mut_unchecked(fst_len) };
+ Self { v: fst, rem: snd, chunk_size, _marker: PhantomData }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "chunks_exact", since = "1.31.0")]
+ pub fn into_remainder(self) -> &'a mut [T] {
+ self.rem
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<'a, T> Iterator for ChunksExactMut<'a, T> {
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ // SAFETY: self.chunk_size is inbounds because we compared above against self.v.len()
+ let (head, tail) = unsafe { self.v.split_at_mut(self.chunk_size) };
+ self.v = tail;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *head })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.v.len() / self.chunk_size;
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
+ let (start, overflow) = n.overflowing_mul(self.chunk_size);
+ if start >= self.v.len() || overflow {
+ self.v = &mut [];
+ None
+ } else {
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (_, snd) = unsafe { self.v.split_at_mut(start) };
+ self.v = snd;
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let start = idx * self.chunk_size;
+ // SAFETY: see comments for `Chunks::__iterator_get_unchecked` and `self.v`.
+ unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for ChunksExactMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ // SAFETY: This subtraction is inbounds because of the check above
+ let (head, tail) = unsafe { self.v.split_at_mut(self.v.len() - self.chunk_size) };
+ self.v = head;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *tail })
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &mut [];
+ None
+ } else {
+ let start = (len - 1 - n) * self.chunk_size;
+ let end = start + self.chunk_size;
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (temp, _tail) = unsafe { mem::replace(&mut self.v, &mut []).split_at_mut(end) };
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (head, nth_back) = unsafe { temp.split_at_mut(start) };
+ self.v = head;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *nth_back })
+ }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> ExactSizeIterator for ChunksExactMut<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.v.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for ChunksExactMut<'_, T> {}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> FusedIterator for ChunksExactMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for ChunksExactMut<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+unsafe impl<T> Send for ChunksExactMut<'_, T> where T: Send {}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+unsafe impl<T> Sync for ChunksExactMut<'_, T> where T: Sync {}
+
+/// A windowed iterator over a slice in overlapping chunks (`N` elements at a
+/// time), starting at the beginning of the slice
+///
+/// This struct is created by the [`array_windows`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// #![feature(array_windows)]
+///
+/// let slice = [0, 1, 2, 3];
+/// let iter = slice.array_windows::<2>();
+/// ```
+///
+/// [`array_windows`]: slice::array_windows
+/// [slices]: slice
+#[derive(Debug, Clone, Copy)]
+#[unstable(feature = "array_windows", issue = "75027")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct ArrayWindows<'a, T: 'a, const N: usize> {
+ slice_head: *const T,
+ num: usize,
+ marker: PhantomData<&'a [T; N]>,
+}
+
+impl<'a, T: 'a, const N: usize> ArrayWindows<'a, T, N> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T]) -> Self {
+ let num_windows = slice.len().saturating_sub(N - 1);
+ Self { slice_head: slice.as_ptr(), num: num_windows, marker: PhantomData }
+ }
+}
+
+#[unstable(feature = "array_windows", issue = "75027")]
+impl<'a, T, const N: usize> Iterator for ArrayWindows<'a, T, N> {
+ type Item = &'a [T; N];
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.num == 0 {
+ return None;
+ }
+ // SAFETY:
+ // This is safe because it's indexing into a slice guaranteed to be length > N.
+ let ret = unsafe { &*self.slice_head.cast::<[T; N]>() };
+ // SAFETY: Guaranteed that there are at least 1 item remaining otherwise
+ // earlier branch would've been hit
+ self.slice_head = unsafe { self.slice_head.add(1) };
+
+ self.num -= 1;
+ Some(ret)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.num, Some(self.num))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.num
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ if self.num <= n {
+ self.num = 0;
+ return None;
+ }
+ // SAFETY:
+ // This is safe because it's indexing into a slice guaranteed to be length > N.
+ let ret = unsafe { &*self.slice_head.add(n).cast::<[T; N]>() };
+ // SAFETY: Guaranteed that there are at least n items remaining
+ self.slice_head = unsafe { self.slice_head.add(n + 1) };
+
+ self.num -= n + 1;
+ Some(ret)
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.nth(self.num.checked_sub(1)?)
+ }
+}
+
+#[unstable(feature = "array_windows", issue = "75027")]
+impl<'a, T, const N: usize> DoubleEndedIterator for ArrayWindows<'a, T, N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T; N]> {
+ if self.num == 0 {
+ return None;
+ }
+ // SAFETY: Guaranteed that there are n items remaining, n-1 for 0-indexing.
+ let ret = unsafe { &*self.slice_head.add(self.num - 1).cast::<[T; N]>() };
+ self.num -= 1;
+ Some(ret)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<&'a [T; N]> {
+ if self.num <= n {
+ self.num = 0;
+ return None;
+ }
+ // SAFETY: Guaranteed that there are n items remaining, n-1 for 0-indexing.
+ let ret = unsafe { &*self.slice_head.add(self.num - (n + 1)).cast::<[T; N]>() };
+ self.num -= n + 1;
+ Some(ret)
+ }
+}
+
+#[unstable(feature = "array_windows", issue = "75027")]
+impl<T, const N: usize> ExactSizeIterator for ArrayWindows<'_, T, N> {
+ fn is_empty(&self) -> bool {
+ self.num == 0
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) chunks (`N` elements at a
+/// time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last
+/// up to `N-1` elements will be omitted but can be retrieved from
+/// the [`remainder`] function from the iterator.
+///
+/// This struct is created by the [`array_chunks`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// #![feature(array_chunks)]
+///
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.array_chunks::<2>();
+/// ```
+///
+/// [`array_chunks`]: slice::array_chunks
+/// [`remainder`]: ArrayChunks::remainder
+/// [slices]: slice
+#[derive(Debug)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct ArrayChunks<'a, T: 'a, const N: usize> {
+ iter: Iter<'a, [T; N]>,
+ rem: &'a [T],
+}
+
+impl<'a, T, const N: usize> ArrayChunks<'a, T, N> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T]) -> Self {
+ let (array_slice, rem) = slice.as_chunks();
+ Self { iter: array_slice.iter(), rem }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `N-1`
+ /// elements.
+ #[must_use]
+ #[unstable(feature = "array_chunks", issue = "74985")]
+ pub fn remainder(&self) -> &'a [T] {
+ self.rem
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> Clone for ArrayChunks<'_, T, N> {
+ fn clone(&self) -> Self {
+ ArrayChunks { iter: self.iter.clone(), rem: self.rem }
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<'a, T, const N: usize> Iterator for ArrayChunks<'a, T, N> {
+ type Item = &'a [T; N];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T; N]> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth(n)
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.iter.last()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a [T; N] {
+ // SAFETY: The safety guarantees of `__iterator_get_unchecked` are
+ // transferred to the caller.
+ unsafe { self.iter.__iterator_get_unchecked(i) }
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunks<'a, T, N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T; N]> {
+ self.iter.next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth_back(n)
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> ExactSizeIterator for ArrayChunks<'_, T, N> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, const N: usize> TrustedLen for ArrayChunks<'_, T, N> {}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> FusedIterator for ArrayChunks<'_, T, N> {}
+
+#[doc(hidden)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunks<'a, T, N> {}
+
+#[doc(hidden)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+unsafe impl<'a, T, const N: usize> TrustedRandomAccessNoCoerce for ArrayChunks<'a, T, N> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`N` elements
+/// at a time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last
+/// up to `N-1` elements will be omitted but can be retrieved from
+/// the [`into_remainder`] function from the iterator.
+///
+/// This struct is created by the [`array_chunks_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// #![feature(array_chunks)]
+///
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.array_chunks_mut::<2>();
+/// ```
+///
+/// [`array_chunks_mut`]: slice::array_chunks_mut
+/// [`into_remainder`]: ../../std/slice/struct.ArrayChunksMut.html#method.into_remainder
+/// [slices]: slice
+#[derive(Debug)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct ArrayChunksMut<'a, T: 'a, const N: usize> {
+ iter: IterMut<'a, [T; N]>,
+ rem: &'a mut [T],
+}
+
+impl<'a, T, const N: usize> ArrayChunksMut<'a, T, N> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T]) -> Self {
+ let (array_slice, rem) = slice.as_chunks_mut();
+ Self { iter: array_slice.iter_mut(), rem }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `N-1`
+ /// elements.
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[unstable(feature = "array_chunks", issue = "74985")]
+ pub fn into_remainder(self) -> &'a mut [T] {
+ self.rem
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<'a, T, const N: usize> Iterator for ArrayChunksMut<'a, T, N> {
+ type Item = &'a mut [T; N];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T; N]> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth(n)
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.iter.last()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a mut [T; N] {
+ // SAFETY: The safety guarantees of `__iterator_get_unchecked` are transferred to
+ // the caller.
+ unsafe { self.iter.__iterator_get_unchecked(i) }
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunksMut<'a, T, N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T; N]> {
+ self.iter.next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth_back(n)
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> ExactSizeIterator for ArrayChunksMut<'_, T, N> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, const N: usize> TrustedLen for ArrayChunksMut<'_, T, N> {}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> FusedIterator for ArrayChunksMut<'_, T, N> {}
+
+#[doc(hidden)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunksMut<'a, T, N> {}
+
+#[doc(hidden)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+unsafe impl<'a, T, const N: usize> TrustedRandomAccessNoCoerce for ArrayChunksMut<'a, T, N> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
+/// time), starting at the end of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last slice
+/// of the iteration will be the remainder.
+///
+/// This struct is created by the [`rchunks`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.rchunks(2);
+/// ```
+///
+/// [`rchunks`]: slice::rchunks
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "rchunks", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct RChunks<'a, T: 'a> {
+ v: &'a [T],
+ chunk_size: usize,
+}
+
+impl<'a, T: 'a> RChunks<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], size: usize) -> Self {
+ Self { v: slice, chunk_size: size }
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> Clone for RChunks<'_, T> {
+ fn clone(&self) -> Self {
+ RChunks { v: self.v, chunk_size: self.chunk_size }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Iterator for RChunks<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let len = self.v.len();
+ let chunksz = cmp::min(len, self.chunk_size);
+ // SAFETY: split_at_unchecked just requires the argument be less
+ // than the length. This could only happen if the expression `len -
+ // chunksz` overflows. This could only happen if `chunksz > len`,
+ // which is impossible as we initialize it as the `min` of `len` and
+ // `self.chunk_size`.
+ let (fst, snd) = unsafe { self.v.split_at_unchecked(len - chunksz) };
+ self.v = fst;
+ Some(snd)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.v.is_empty() {
+ (0, Some(0))
+ } else {
+ let n = self.v.len() / self.chunk_size;
+ let rem = self.v.len() % self.chunk_size;
+ let n = if rem > 0 { n + 1 } else { n };
+ (n, Some(n))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, overflow) = n.overflowing_mul(self.chunk_size);
+ if end >= self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ // Can't underflow because of the check above
+ let end = self.v.len() - end;
+ let start = match end.checked_sub(self.chunk_size) {
+ Some(sum) => sum,
+ None => 0,
+ };
+ let nth = &self.v[start..end];
+ self.v = &self.v[0..start];
+ Some(nth)
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let rem = self.v.len() % self.chunk_size;
+ let end = if rem == 0 { self.chunk_size } else { rem };
+ Some(&self.v[0..end])
+ }
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let end = self.v.len() - idx * self.chunk_size;
+ let start = match end.checked_sub(self.chunk_size) {
+ None => 0,
+ Some(start) => start,
+ };
+ // SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for RChunks<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let remainder = self.v.len() % self.chunk_size;
+ let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
+ // SAFETY: similar to Chunks::next_back
+ let (fst, snd) = unsafe { self.v.split_at_unchecked(chunksz) };
+ self.v = snd;
+ Some(fst)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &[];
+ None
+ } else {
+ // can't underflow because `n < len`
+ let offset_from_end = (len - 1 - n) * self.chunk_size;
+ let end = self.v.len() - offset_from_end;
+ let start = end.saturating_sub(self.chunk_size);
+ let nth_back = &self.v[start..end];
+ self.v = &self.v[end..];
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> ExactSizeIterator for RChunks<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for RChunks<'_, T> {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> FusedIterator for RChunks<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for RChunks<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for RChunks<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
+/// elements at a time), starting at the end of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last slice
+/// of the iteration will be the remainder.
+///
+/// This struct is created by the [`rchunks_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.rchunks_mut(2);
+/// ```
+///
+/// [`rchunks_mut`]: slice::rchunks_mut
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "rchunks", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct RChunksMut<'a, T: 'a> {
+ /// # Safety
+ /// This slice pointer must point at a valid region of `T` with at least length `v.len()`. Normally,
+ /// those requirements would mean that we could instead use a `&mut [T]` here, but we cannot
+ /// because `__iterator_get_unchecked` needs to return `&mut [T]`, which guarantees certain aliasing
+ /// properties that we cannot uphold if we hold on to the full original `&mut [T]`. Wrapping a raw
+ /// slice instead lets us hand out non-overlapping `&mut [T]` subslices of the slice we wrap.
+ v: *mut [T],
+ chunk_size: usize,
+ _marker: PhantomData<&'a mut T>,
+}
+
+impl<'a, T: 'a> RChunksMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], size: usize) -> Self {
+ Self { v: slice, chunk_size: size, _marker: PhantomData }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Iterator for RChunksMut<'a, T> {
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let sz = cmp::min(self.v.len(), self.chunk_size);
+ let len = self.v.len();
+ // SAFETY: split_at_mut_unchecked just requires the argument be less
+ // than the length. This could only happen if the expression
+ // `len - sz` overflows. This could only happen if `sz >
+ // len`, which is impossible as we initialize it as the `min` of
+ // `self.v.len()` (e.g. `len`) and `self.chunk_size`.
+ let (head, tail) = unsafe { self.v.split_at_mut_unchecked(len - sz) };
+ self.v = head;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *tail })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.v.is_empty() {
+ (0, Some(0))
+ } else {
+ let n = self.v.len() / self.chunk_size;
+ let rem = self.v.len() % self.chunk_size;
+ let n = if rem > 0 { n + 1 } else { n };
+ (n, Some(n))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
+ let (end, overflow) = n.overflowing_mul(self.chunk_size);
+ if end >= self.v.len() || overflow {
+ self.v = &mut [];
+ None
+ } else {
+ // Can't underflow because of the check above
+ let end = self.v.len() - end;
+ let start = match end.checked_sub(self.chunk_size) {
+ Some(sum) => sum,
+ None => 0,
+ };
+ // SAFETY: This type ensures that self.v is a valid pointer with a correct len.
+ // Therefore the bounds check in split_at_mut guarantess the split point is inbounds.
+ let (head, tail) = unsafe { self.v.split_at_mut(start) };
+ // SAFETY: This type ensures that self.v is a valid pointer with a correct len.
+ // Therefore the bounds check in split_at_mut guarantess the split point is inbounds.
+ let (nth, _) = unsafe { tail.split_at_mut(end - start) };
+ self.v = head;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *nth })
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let rem = self.v.len() % self.chunk_size;
+ let end = if rem == 0 { self.chunk_size } else { rem };
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *self.v.get_unchecked_mut(0..end) })
+ }
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let end = self.v.len() - idx * self.chunk_size;
+ let start = match end.checked_sub(self.chunk_size) {
+ None => 0,
+ Some(start) => start,
+ };
+ // SAFETY: see comments for `RChunks::__iterator_get_unchecked` and
+ // `ChunksMut::__iterator_get_unchecked`, `self.v`.
+ unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for RChunksMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let remainder = self.v.len() % self.chunk_size;
+ let sz = if remainder != 0 { remainder } else { self.chunk_size };
+ // SAFETY: Similar to `Chunks::next_back`
+ let (head, tail) = unsafe { self.v.split_at_mut_unchecked(sz) };
+ self.v = tail;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *head })
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &mut [];
+ None
+ } else {
+ // can't underflow because `n < len`
+ let offset_from_end = (len - 1 - n) * self.chunk_size;
+ let end = self.v.len() - offset_from_end;
+ let start = end.saturating_sub(self.chunk_size);
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (tmp, tail) = unsafe { self.v.split_at_mut(end) };
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (_, nth_back) = unsafe { tmp.split_at_mut(start) };
+ self.v = tail;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *nth_back })
+ }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> ExactSizeIterator for RChunksMut<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for RChunksMut<'_, T> {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> FusedIterator for RChunksMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for RChunksMut<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for RChunksMut<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+unsafe impl<T> Send for RChunksMut<'_, T> where T: Send {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+unsafe impl<T> Sync for RChunksMut<'_, T> where T: Sync {}
+
+/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
+/// time), starting at the end of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last
+/// up to `chunk_size-1` elements will be omitted but can be retrieved from
+/// the [`remainder`] function from the iterator.
+///
+/// This struct is created by the [`rchunks_exact`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.rchunks_exact(2);
+/// ```
+///
+/// [`rchunks_exact`]: slice::rchunks_exact
+/// [`remainder`]: ChunksExact::remainder
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "rchunks", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct RChunksExact<'a, T: 'a> {
+ v: &'a [T],
+ rem: &'a [T],
+ chunk_size: usize,
+}
+
+impl<'a, T> RChunksExact<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], chunk_size: usize) -> Self {
+ let rem = slice.len() % chunk_size;
+ // SAFETY: 0 <= rem <= slice.len() by construction above
+ let (fst, snd) = unsafe { slice.split_at_unchecked(rem) };
+ Self { v: snd, rem: fst, chunk_size }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ #[must_use]
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ pub fn remainder(&self) -> &'a [T] {
+ self.rem
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Clone for RChunksExact<'a, T> {
+ fn clone(&self) -> RChunksExact<'a, T> {
+ RChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Iterator for RChunksExact<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
+ self.v = fst;
+ Some(snd)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.v.len() / self.chunk_size;
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, overflow) = n.overflowing_mul(self.chunk_size);
+ if end >= self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let (fst, _) = self.v.split_at(self.v.len() - end);
+ self.v = fst;
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let end = self.v.len() - idx * self.chunk_size;
+ let start = end - self.chunk_size;
+ // SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for RChunksExact<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let (fst, snd) = self.v.split_at(self.chunk_size);
+ self.v = snd;
+ Some(fst)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &[];
+ None
+ } else {
+ // now that we know that `n` corresponds to a chunk,
+ // none of these operations can underflow/overflow
+ let offset = (len - n) * self.chunk_size;
+ let start = self.v.len() - offset;
+ let end = start + self.chunk_size;
+ let nth_back = &self.v[start..end];
+ self.v = &self.v[end..];
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> ExactSizeIterator for RChunksExact<'a, T> {
+ fn is_empty(&self) -> bool {
+ self.v.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for RChunksExact<'_, T> {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> FusedIterator for RChunksExact<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for RChunksExact<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for RChunksExact<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
+/// elements at a time), starting at the end of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last up to
+/// `chunk_size-1` elements will be omitted but can be retrieved from the
+/// [`into_remainder`] function from the iterator.
+///
+/// This struct is created by the [`rchunks_exact_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.rchunks_exact_mut(2);
+/// ```
+///
+/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
+/// [`into_remainder`]: ChunksExactMut::into_remainder
+/// [slices]: slice
+#[derive(Debug)]
+#[stable(feature = "rchunks", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct RChunksExactMut<'a, T: 'a> {
+ /// # Safety
+ /// This slice pointer must point at a valid region of `T` with at least length `v.len()`. Normally,
+ /// those requirements would mean that we could instead use a `&mut [T]` here, but we cannot
+ /// because `__iterator_get_unchecked` needs to return `&mut [T]`, which guarantees certain aliasing
+ /// properties that we cannot uphold if we hold on to the full original `&mut [T]`. Wrapping a raw
+ /// slice instead lets us hand out non-overlapping `&mut [T]` subslices of the slice we wrap.
+ v: *mut [T],
+ rem: &'a mut [T],
+ chunk_size: usize,
+}
+
+impl<'a, T> RChunksExactMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], chunk_size: usize) -> Self {
+ let rem = slice.len() % chunk_size;
+ // SAFETY: 0 <= rem <= slice.len() by construction above
+ let (fst, snd) = unsafe { slice.split_at_mut_unchecked(rem) };
+ Self { v: snd, rem: fst, chunk_size }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ pub fn into_remainder(self) -> &'a mut [T] {
+ self.rem
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Iterator for RChunksExactMut<'a, T> {
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let len = self.v.len();
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (head, tail) = unsafe { self.v.split_at_mut(len - self.chunk_size) };
+ self.v = head;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *tail })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.v.len() / self.chunk_size;
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
+ let (end, overflow) = n.overflowing_mul(self.chunk_size);
+ if end >= self.v.len() || overflow {
+ self.v = &mut [];
+ None
+ } else {
+ let len = self.v.len();
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (fst, _) = unsafe { self.v.split_at_mut(len - end) };
+ self.v = fst;
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let end = self.v.len() - idx * self.chunk_size;
+ let start = end - self.chunk_size;
+ // SAFETY: see comments for `RChunksMut::__iterator_get_unchecked` and `self.v`.
+ unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for RChunksExactMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (head, tail) = unsafe { self.v.split_at_mut(self.chunk_size) };
+ self.v = tail;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *head })
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &mut [];
+ None
+ } else {
+ // now that we know that `n` corresponds to a chunk,
+ // none of these operations can underflow/overflow
+ let offset = (len - n) * self.chunk_size;
+ let start = self.v.len() - offset;
+ let end = start + self.chunk_size;
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (tmp, tail) = unsafe { self.v.split_at_mut(end) };
+ // SAFETY: The self.v contract ensures that any split_at_mut is valid.
+ let (_, nth_back) = unsafe { tmp.split_at_mut(start) };
+ self.v = tail;
+ // SAFETY: Nothing else points to or will point to the contents of this slice.
+ Some(unsafe { &mut *nth_back })
+ }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> ExactSizeIterator for RChunksExactMut<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.v.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for RChunksExactMut<'_, T> {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> FusedIterator for RChunksExactMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for RChunksExactMut<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for RChunksExactMut<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+unsafe impl<T> Send for RChunksExactMut<'_, T> where T: Send {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+unsafe impl<T> Sync for RChunksExactMut<'_, T> where T: Sync {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for Iter<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccessNoCoerce for IterMut<'a, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// An iterator over slice in (non-overlapping) chunks separated by a predicate.
+///
+/// This struct is created by the [`group_by`] method on [slices].
+///
+/// [`group_by`]: slice::group_by
+/// [slices]: slice
+#[unstable(feature = "slice_group_by", issue = "80552")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct GroupBy<'a, T: 'a, P> {
+ slice: &'a [T],
+ predicate: P,
+}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a, P> GroupBy<'a, T, P> {
+ pub(super) fn new(slice: &'a [T], predicate: P) -> Self {
+ GroupBy { slice, predicate }
+ }
+}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a, P> Iterator for GroupBy<'a, T, P>
+where
+ P: FnMut(&T, &T) -> bool,
+{
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.slice.is_empty() {
+ None
+ } else {
+ let mut len = 1;
+ let mut iter = self.slice.windows(2);
+ while let Some([l, r]) = iter.next() {
+ if (self.predicate)(l, r) { len += 1 } else { break }
+ }
+ let (head, tail) = self.slice.split_at(len);
+ self.slice = tail;
+ Some(head)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.slice.is_empty() { (0, Some(0)) } else { (1, Some(self.slice.len())) }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a, P> DoubleEndedIterator for GroupBy<'a, T, P>
+where
+ P: FnMut(&T, &T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.slice.is_empty() {
+ None
+ } else {
+ let mut len = 1;
+ let mut iter = self.slice.windows(2);
+ while let Some([l, r]) = iter.next_back() {
+ if (self.predicate)(l, r) { len += 1 } else { break }
+ }
+ let (head, tail) = self.slice.split_at(self.slice.len() - len);
+ self.slice = head;
+ Some(tail)
+ }
+ }
+}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a, P> FusedIterator for GroupBy<'a, T, P> where P: FnMut(&T, &T) -> bool {}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for GroupBy<'a, T, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("GroupBy").field("slice", &self.slice).finish()
+ }
+}
+
+/// An iterator over slice in (non-overlapping) mutable chunks separated
+/// by a predicate.
+///
+/// This struct is created by the [`group_by_mut`] method on [slices].
+///
+/// [`group_by_mut`]: slice::group_by_mut
+/// [slices]: slice
+#[unstable(feature = "slice_group_by", issue = "80552")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct GroupByMut<'a, T: 'a, P> {
+ slice: &'a mut [T],
+ predicate: P,
+}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a, P> GroupByMut<'a, T, P> {
+ pub(super) fn new(slice: &'a mut [T], predicate: P) -> Self {
+ GroupByMut { slice, predicate }
+ }
+}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a, P> Iterator for GroupByMut<'a, T, P>
+where
+ P: FnMut(&T, &T) -> bool,
+{
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.slice.is_empty() {
+ None
+ } else {
+ let mut len = 1;
+ let mut iter = self.slice.windows(2);
+ while let Some([l, r]) = iter.next() {
+ if (self.predicate)(l, r) { len += 1 } else { break }
+ }
+ let slice = mem::take(&mut self.slice);
+ let (head, tail) = slice.split_at_mut(len);
+ self.slice = tail;
+ Some(head)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.slice.is_empty() { (0, Some(0)) } else { (1, Some(self.slice.len())) }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a, P> DoubleEndedIterator for GroupByMut<'a, T, P>
+where
+ P: FnMut(&T, &T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.slice.is_empty() {
+ None
+ } else {
+ let mut len = 1;
+ let mut iter = self.slice.windows(2);
+ while let Some([l, r]) = iter.next_back() {
+ if (self.predicate)(l, r) { len += 1 } else { break }
+ }
+ let slice = mem::take(&mut self.slice);
+ let (head, tail) = slice.split_at_mut(slice.len() - len);
+ self.slice = head;
+ Some(tail)
+ }
+ }
+}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a, P> FusedIterator for GroupByMut<'a, T, P> where P: FnMut(&T, &T) -> bool {}
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for GroupByMut<'a, T, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("GroupByMut").field("slice", &self.slice).finish()
+ }
+}
diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs
new file mode 100644
index 000000000..c05242222
--- /dev/null
+++ b/library/core/src/slice/iter/macros.rs
@@ -0,0 +1,423 @@
+//! Macros used by iterators of slice.
+
+// Inlining is_empty and len makes a huge performance difference
+macro_rules! is_empty {
+ // The way we encode the length of a ZST iterator, this works both for ZST
+ // and non-ZST.
+ ($self: ident) => {
+ $self.ptr.as_ptr() as *const T == $self.end
+ };
+}
+
+// To get rid of some bounds checks (see `position`), we compute the length in a somewhat
+// unexpected way. (Tested by `codegen/slice-position-bounds-check`.)
+macro_rules! len {
+ ($self: ident) => {{
+ #![allow(unused_unsafe)] // we're sometimes used within an unsafe block
+
+ let start = $self.ptr;
+ let size = size_from_ptr(start.as_ptr());
+ if size == 0 {
+ // This _cannot_ use `unchecked_sub` because we depend on wrapping
+ // to represent the length of long ZST slice iterators.
+ $self.end.addr().wrapping_sub(start.as_ptr().addr())
+ } else {
+ // We know that `start <= end`, so can do better than `offset_from`,
+ // which needs to deal in signed. By setting appropriate flags here
+ // we can tell LLVM this, which helps it remove bounds checks.
+ // SAFETY: By the type invariant, `start <= end`
+ let diff = unsafe { unchecked_sub($self.end.addr(), start.as_ptr().addr()) };
+ // By also telling LLVM that the pointers are apart by an exact
+ // multiple of the type size, it can optimize `len() == 0` down to
+ // `start == end` instead of `(end - start) < size`.
+ // SAFETY: By the type invariant, the pointers are aligned so the
+ // distance between them must be a multiple of pointee size
+ unsafe { exact_div(diff, size) }
+ }
+ }};
+}
+
+// The shared definition of the `Iter` and `IterMut` iterators
+macro_rules! iterator {
+ (
+ struct $name:ident -> $ptr:ty,
+ $elem:ty,
+ $raw_mut:tt,
+ {$( $mut_:tt )?},
+ {$($extra:tt)*}
+ ) => {
+ // Returns the first element and moves the start of the iterator forwards by 1.
+ // Greatly improves performance compared to an inlined function. The iterator
+ // must not be empty.
+ macro_rules! next_unchecked {
+ ($self: ident) => {& $( $mut_ )? *$self.post_inc_start(1)}
+ }
+
+ // Returns the last element and moves the end of the iterator backwards by 1.
+ // Greatly improves performance compared to an inlined function. The iterator
+ // must not be empty.
+ macro_rules! next_back_unchecked {
+ ($self: ident) => {& $( $mut_ )? *$self.pre_dec_end(1)}
+ }
+
+ // Shrinks the iterator when T is a ZST, by moving the end of the iterator
+ // backwards by `n`. `n` must not exceed `self.len()`.
+ macro_rules! zst_shrink {
+ ($self: ident, $n: ident) => {
+ $self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T;
+ }
+ }
+
+ impl<'a, T> $name<'a, T> {
+ // Helper function for creating a slice from the iterator.
+ #[inline(always)]
+ fn make_slice(&self) -> &'a [T] {
+ // SAFETY: the iterator was created from a slice with pointer
+ // `self.ptr` and length `len!(self)`. This guarantees that all
+ // the prerequisites for `from_raw_parts` are fulfilled.
+ unsafe { from_raw_parts(self.ptr.as_ptr(), len!(self)) }
+ }
+
+ // Helper function for moving the start of the iterator forwards by `offset` elements,
+ // returning the old start.
+ // Unsafe because the offset must not exceed `self.len()`.
+ #[inline(always)]
+ unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T {
+ if mem::size_of::<T>() == 0 {
+ zst_shrink!(self, offset);
+ self.ptr.as_ptr()
+ } else {
+ let old = self.ptr.as_ptr();
+ // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+ // so this new pointer is inside `self` and thus guaranteed to be non-null.
+ self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) };
+ old
+ }
+ }
+
+ // Helper function for moving the end of the iterator backwards by `offset` elements,
+ // returning the new end.
+ // Unsafe because the offset must not exceed `self.len()`.
+ #[inline(always)]
+ unsafe fn pre_dec_end(&mut self, offset: isize) -> * $raw_mut T {
+ if mem::size_of::<T>() == 0 {
+ zst_shrink!(self, offset);
+ self.ptr.as_ptr()
+ } else {
+ // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+ // which is guaranteed to not overflow an `isize`. Also, the resulting pointer
+ // is in bounds of `slice`, which fulfills the other requirements for `offset`.
+ self.end = unsafe { self.end.offset(-offset) };
+ self.end
+ }
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T> ExactSizeIterator for $name<'_, T> {
+ #[inline(always)]
+ fn len(&self) -> usize {
+ len!(self)
+ }
+
+ #[inline(always)]
+ fn is_empty(&self) -> bool {
+ is_empty!(self)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<'a, T> Iterator for $name<'a, T> {
+ type Item = $elem;
+
+ #[inline]
+ fn next(&mut self) -> Option<$elem> {
+ // could be implemented with slices, but this avoids bounds checks
+
+ // SAFETY: `assume` calls are safe since a slice's start pointer
+ // must be non-null, and slices over non-ZSTs must also have a
+ // non-null end pointer. The call to `next_unchecked!` is safe
+ // since we check if the iterator is empty first.
+ unsafe {
+ assume(!self.ptr.as_ptr().is_null());
+ if mem::size_of::<T>() != 0 {
+ assume(!self.end.is_null());
+ }
+ if is_empty!(self) {
+ None
+ } else {
+ Some(next_unchecked!(self))
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = len!(self);
+ (exact, Some(exact))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ len!(self)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<$elem> {
+ if n >= len!(self) {
+ // This iterator is now empty.
+ if mem::size_of::<T>() == 0 {
+ // We have to do it this way as `ptr` may never be 0, but `end`
+ // could be (due to wrapping).
+ self.end = self.ptr.as_ptr();
+ } else {
+ // SAFETY: end can't be 0 if T isn't ZST because ptr isn't 0 and end >= ptr
+ unsafe {
+ self.ptr = NonNull::new_unchecked(self.end as *mut T);
+ }
+ }
+ return None;
+ }
+ // SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
+ unsafe {
+ self.post_inc_start(n as isize);
+ Some(next_unchecked!(self))
+ }
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let advance = cmp::min(len!(self), n);
+ // SAFETY: By construction, `advance` does not exceed `self.len()`.
+ unsafe { self.post_inc_start(advance as isize) };
+ if advance == n { Ok(()) } else { Err(advance) }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<$elem> {
+ self.next_back()
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn for_each<F>(mut self, mut f: F)
+ where
+ Self: Sized,
+ F: FnMut(Self::Item),
+ {
+ while let Some(x) = self.next() {
+ f(x);
+ }
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn all<F>(&mut self, mut f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> bool,
+ {
+ while let Some(x) = self.next() {
+ if !f(x) {
+ return false;
+ }
+ }
+ true
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn any<F>(&mut self, mut f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> bool,
+ {
+ while let Some(x) = self.next() {
+ if f(x) {
+ return true;
+ }
+ }
+ false
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ while let Some(x) = self.next() {
+ if predicate(&x) {
+ return Some(x);
+ }
+ }
+ None
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn find_map<B, F>(&mut self, mut f: F) -> Option<B>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> Option<B>,
+ {
+ while let Some(x) = self.next() {
+ if let Some(y) = f(x) {
+ return Some(y);
+ }
+ }
+ None
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile. Also, the `assume` avoids a bounds check.
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
+ Self: Sized,
+ P: FnMut(Self::Item) -> bool,
+ {
+ let n = len!(self);
+ let mut i = 0;
+ while let Some(x) = self.next() {
+ if predicate(x) {
+ // SAFETY: we are guaranteed to be in bounds by the loop invariant:
+ // when `i >= n`, `self.next()` returns `None` and the loop breaks.
+ unsafe { assume(i < n) };
+ return Some(i);
+ }
+ i += 1;
+ }
+ None
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile. Also, the `assume` avoids a bounds check.
+ #[inline]
+ fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where
+ P: FnMut(Self::Item) -> bool,
+ Self: Sized + ExactSizeIterator + DoubleEndedIterator
+ {
+ let n = len!(self);
+ let mut i = n;
+ while let Some(x) = self.next_back() {
+ i -= 1;
+ if predicate(x) {
+ // SAFETY: `i` must be lower than `n` since it starts at `n`
+ // and is only decreasing.
+ unsafe { assume(i < n) };
+ return Some(i);
+ }
+ }
+ None
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // SAFETY: the caller must guarantee that `i` is in bounds of
+ // the underlying slice, so `i` cannot overflow an `isize`, and
+ // the returned references is guaranteed to refer to an element
+ // of the slice and thus guaranteed to be valid.
+ //
+ // Also note that the caller also guarantees that we're never
+ // called with the same index again, and that no other methods
+ // that will access this subslice are called, so it is valid
+ // for the returned reference to be mutable in the case of
+ // `IterMut`
+ unsafe { & $( $mut_ )? * self.ptr.as_ptr().add(idx) }
+ }
+
+ $($extra)*
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<'a, T> DoubleEndedIterator for $name<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<$elem> {
+ // could be implemented with slices, but this avoids bounds checks
+
+ // SAFETY: `assume` calls are safe since a slice's start pointer must be non-null,
+ // and slices over non-ZSTs must also have a non-null end pointer.
+ // The call to `next_back_unchecked!` is safe since we check if the iterator is
+ // empty first.
+ unsafe {
+ assume(!self.ptr.as_ptr().is_null());
+ if mem::size_of::<T>() != 0 {
+ assume(!self.end.is_null());
+ }
+ if is_empty!(self) {
+ None
+ } else {
+ Some(next_back_unchecked!(self))
+ }
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<$elem> {
+ if n >= len!(self) {
+ // This iterator is now empty.
+ self.end = self.ptr.as_ptr();
+ return None;
+ }
+ // SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
+ unsafe {
+ self.pre_dec_end(n as isize);
+ Some(next_back_unchecked!(self))
+ }
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let advance = cmp::min(len!(self), n);
+ // SAFETY: By construction, `advance` does not exceed `self.len()`.
+ unsafe { self.pre_dec_end(advance as isize) };
+ if advance == n { Ok(()) } else { Err(advance) }
+ }
+ }
+
+ #[stable(feature = "fused", since = "1.26.0")]
+ impl<T> FusedIterator for $name<'_, T> {}
+
+ #[unstable(feature = "trusted_len", issue = "37572")]
+ unsafe impl<T> TrustedLen for $name<'_, T> {}
+ }
+}
+
+macro_rules! forward_iterator {
+ ($name:ident: $elem:ident, $iter_of:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<'a, $elem, P> Iterator for $name<'a, $elem, P>
+ where
+ P: FnMut(&T) -> bool,
+ {
+ type Item = $iter_of;
+
+ #[inline]
+ fn next(&mut self) -> Option<$iter_of> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+ }
+
+ #[stable(feature = "fused", since = "1.26.0")]
+ impl<'a, $elem, P> FusedIterator for $name<'a, $elem, P> where P: FnMut(&T) -> bool {}
+ };
+}
diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs
new file mode 100644
index 000000000..dffeaf6a8
--- /dev/null
+++ b/library/core/src/slice/memchr.rs
@@ -0,0 +1,142 @@
+// Original implementation taken from rust-memchr.
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+use crate::cmp;
+use crate::mem;
+
+const LO_USIZE: usize = usize::repeat_u8(0x01);
+const HI_USIZE: usize = usize::repeat_u8(0x80);
+const USIZE_BYTES: usize = mem::size_of::<usize>();
+
+/// Returns `true` if `x` contains any zero byte.
+///
+/// From *Matters Computational*, J. Arndt:
+///
+/// "The idea is to subtract one from each of the bytes and then look for
+/// bytes where the borrow propagated all the way to the most significant
+/// bit."
+#[inline]
+fn contains_zero_byte(x: usize) -> bool {
+ x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
+}
+
+#[cfg(target_pointer_width = "16")]
+#[inline]
+fn repeat_byte(b: u8) -> usize {
+ (b as usize) << 8 | b as usize
+}
+
+#[cfg(not(target_pointer_width = "16"))]
+#[inline]
+fn repeat_byte(b: u8) -> usize {
+ (b as usize) * (usize::MAX / 255)
+}
+
+/// Returns the first index matching the byte `x` in `text`.
+#[must_use]
+#[inline]
+pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
+ // Fast path for small slices
+ if text.len() < 2 * USIZE_BYTES {
+ return text.iter().position(|elt| *elt == x);
+ }
+
+ memchr_general_case(x, text)
+}
+
+fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
+ // Scan for a single byte value by reading two `usize` words at a time.
+ //
+ // Split `text` in three parts
+ // - unaligned initial part, before the first word aligned address in text
+ // - body, scan by 2 words at a time
+ // - the last remaining part, < 2 word size
+
+ // search up to an aligned boundary
+ let len = text.len();
+ let ptr = text.as_ptr();
+ let mut offset = ptr.align_offset(USIZE_BYTES);
+
+ if offset > 0 {
+ offset = cmp::min(offset, len);
+ if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
+ return Some(index);
+ }
+ }
+
+ // search the body of the text
+ let repeated_x = repeat_byte(x);
+ while offset <= len - 2 * USIZE_BYTES {
+ // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes
+ // between the offset and the end of the slice.
+ unsafe {
+ let u = *(ptr.add(offset) as *const usize);
+ let v = *(ptr.add(offset + USIZE_BYTES) as *const usize);
+
+ // break if there is a matching byte
+ let zu = contains_zero_byte(u ^ repeated_x);
+ let zv = contains_zero_byte(v ^ repeated_x);
+ if zu || zv {
+ break;
+ }
+ }
+ offset += USIZE_BYTES * 2;
+ }
+
+ // Find the byte after the point the body loop stopped.
+ text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
+}
+
+/// Returns the last index matching the byte `x` in `text`.
+#[must_use]
+pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
+ // Scan for a single byte value by reading two `usize` words at a time.
+ //
+ // Split `text` in three parts:
+ // - unaligned tail, after the last word aligned address in text,
+ // - body, scanned by 2 words at a time,
+ // - the first remaining bytes, < 2 word size.
+ let len = text.len();
+ let ptr = text.as_ptr();
+ type Chunk = usize;
+
+ let (min_aligned_offset, max_aligned_offset) = {
+ // We call this just to obtain the length of the prefix and suffix.
+ // In the middle we always process two chunks at once.
+ // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size differences
+ // which are handled by `align_to`.
+ let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() };
+ (prefix.len(), len - suffix.len())
+ };
+
+ let mut offset = max_aligned_offset;
+ if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
+ return Some(offset + index);
+ }
+
+ // Search the body of the text, make sure we don't cross min_aligned_offset.
+ // offset is always aligned, so just testing `>` is sufficient and avoids possible
+ // overflow.
+ let repeated_x = repeat_byte(x);
+ let chunk_bytes = mem::size_of::<Chunk>();
+
+ while offset > min_aligned_offset {
+ // SAFETY: offset starts at len - suffix.len(), as long as it is greater than
+ // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes.
+ unsafe {
+ let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk);
+ let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk);
+
+ // Break if there is a matching byte.
+ let zu = contains_zero_byte(u ^ repeated_x);
+ let zv = contains_zero_byte(v ^ repeated_x);
+ if zu || zv {
+ break;
+ }
+ }
+ offset -= 2 * chunk_bytes;
+ }
+
+ // Find the byte before the point the body loop stopped.
+ text[..offset].iter().rposition(|elt| *elt == x)
+}
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
new file mode 100644
index 000000000..e6ca6ef82
--- /dev/null
+++ b/library/core/src/slice/mod.rs
@@ -0,0 +1,4244 @@
+//! Slice management and manipulation.
+//!
+//! For more details see [`std::slice`].
+//!
+//! [`std::slice`]: ../../std/slice/index.html
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cmp::Ordering::{self, Greater, Less};
+use crate::intrinsics::{assert_unsafe_precondition, exact_div};
+use crate::marker::Copy;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ops::{Bound, FnMut, OneSidedRange, Range, RangeBounds};
+use crate::option::Option;
+use crate::option::Option::{None, Some};
+use crate::ptr;
+use crate::result::Result;
+use crate::result::Result::{Err, Ok};
+use crate::simd::{self, Simd};
+use crate::slice;
+
+#[unstable(
+ feature = "slice_internals",
+ issue = "none",
+ reason = "exposed from core to be reused in std; use the memchr crate"
+)]
+/// Pure rust memchr implementation, taken from rust-memchr
+pub mod memchr;
+
+mod ascii;
+mod cmp;
+mod index;
+mod iter;
+mod raw;
+mod rotate;
+mod sort;
+mod specialize;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{Chunks, ChunksMut, Windows};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{Iter, IterMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{RSplitN, RSplitNMut, Split, SplitMut, SplitN, SplitNMut};
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+pub use iter::{RSplit, RSplitMut};
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+pub use iter::{ChunksExact, ChunksExactMut};
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub use iter::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub use iter::{ArrayChunks, ArrayChunksMut};
+
+#[unstable(feature = "array_windows", issue = "75027")]
+pub use iter::ArrayWindows;
+
+#[unstable(feature = "slice_group_by", issue = "80552")]
+pub use iter::{GroupBy, GroupByMut};
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+pub use iter::{SplitInclusive, SplitInclusiveMut};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use raw::{from_raw_parts, from_raw_parts_mut};
+
+#[stable(feature = "from_ref", since = "1.28.0")]
+pub use raw::{from_mut, from_ref};
+
+#[unstable(feature = "slice_from_ptr_range", issue = "89792")]
+pub use raw::{from_mut_ptr_range, from_ptr_range};
+
+// This function is public only because there is no other way to unit test heapsort.
+#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
+pub use sort::heapsort;
+
+#[stable(feature = "slice_get_slice", since = "1.28.0")]
+pub use index::SliceIndex;
+
+#[unstable(feature = "slice_range", issue = "76393")]
+pub use index::range;
+
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+pub use ascii::EscapeAscii;
+
+/// Calculates the direction and split point of a one-sided range.
+///
+/// This is a helper function for `take` and `take_mut` that returns
+/// the direction of the split (front or back) as well as the index at
+/// which to split. Returns `None` if the split index would overflow.
+#[inline]
+fn split_point_of(range: impl OneSidedRange<usize>) -> Option<(Direction, usize)> {
+ use Bound::*;
+
+ Some(match (range.start_bound(), range.end_bound()) {
+ (Unbounded, Excluded(i)) => (Direction::Front, *i),
+ (Unbounded, Included(i)) => (Direction::Front, i.checked_add(1)?),
+ (Excluded(i), Unbounded) => (Direction::Back, i.checked_add(1)?),
+ (Included(i), Unbounded) => (Direction::Back, *i),
+ _ => unreachable!(),
+ })
+}
+
+enum Direction {
+ Front,
+ Back,
+}
+
+#[cfg(not(test))]
+impl<T> [T] {
+ /// Returns the number of elements in the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.len(), 3);
+ /// ```
+ #[lang = "slice_len_fn"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_slice_len", since = "1.39.0")]
+ #[inline]
+ #[must_use]
+ // SAFETY: const sound because we transmute out the length field as a usize (which it must be)
+ pub const fn len(&self) -> usize {
+ // FIXME: Replace with `crate::ptr::metadata(self)` when that is const-stable.
+ // As of this writing this causes a "Const-stable functions can only call other
+ // const-stable functions" error.
+
+ // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
+ // and PtrComponents<T> have the same memory layouts. Only std can make this
+ // guarantee.
+ unsafe { crate::ptr::PtrRepr { const_ptr: self }.components.metadata }
+ }
+
+ /// Returns `true` if the slice has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert!(!a.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_slice_is_empty", since = "1.39.0")]
+ #[inline]
+ #[must_use]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Returns the first element of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert_eq!(Some(&10), v.first());
+ ///
+ /// let w: &[i32] = &[];
+ /// assert_eq!(None, w.first());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
+ #[inline]
+ #[must_use]
+ pub const fn first(&self) -> Option<&T> {
+ if let [first, ..] = self { Some(first) } else { None }
+ }
+
+ /// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some(first) = x.first_mut() {
+ /// *first = 5;
+ /// }
+ /// assert_eq!(x, &[5, 1, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
+ #[inline]
+ #[must_use]
+ pub const fn first_mut(&mut self) -> Option<&mut T> {
+ if let [first, ..] = self { Some(first) } else { None }
+ }
+
+ /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[0, 1, 2];
+ ///
+ /// if let Some((first, elements)) = x.split_first() {
+ /// assert_eq!(first, &0);
+ /// assert_eq!(elements, &[1, 2]);
+ /// }
+ /// ```
+ #[stable(feature = "slice_splits", since = "1.5.0")]
+ #[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
+ #[inline]
+ #[must_use]
+ pub const fn split_first(&self) -> Option<(&T, &[T])> {
+ if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
+ }
+
+ /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some((first, elements)) = x.split_first_mut() {
+ /// *first = 3;
+ /// elements[0] = 4;
+ /// elements[1] = 5;
+ /// }
+ /// assert_eq!(x, &[3, 4, 5]);
+ /// ```
+ #[stable(feature = "slice_splits", since = "1.5.0")]
+ #[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
+ #[inline]
+ #[must_use]
+ pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
+ if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
+ }
+
+ /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[0, 1, 2];
+ ///
+ /// if let Some((last, elements)) = x.split_last() {
+ /// assert_eq!(last, &2);
+ /// assert_eq!(elements, &[0, 1]);
+ /// }
+ /// ```
+ #[stable(feature = "slice_splits", since = "1.5.0")]
+ #[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
+ #[inline]
+ #[must_use]
+ pub const fn split_last(&self) -> Option<(&T, &[T])> {
+ if let [init @ .., last] = self { Some((last, init)) } else { None }
+ }
+
+ /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some((last, elements)) = x.split_last_mut() {
+ /// *last = 3;
+ /// elements[0] = 4;
+ /// elements[1] = 5;
+ /// }
+ /// assert_eq!(x, &[4, 5, 3]);
+ /// ```
+ #[stable(feature = "slice_splits", since = "1.5.0")]
+ #[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
+ #[inline]
+ #[must_use]
+ pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
+ if let [init @ .., last] = self { Some((last, init)) } else { None }
+ }
+
+ /// Returns the last element of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert_eq!(Some(&30), v.last());
+ ///
+ /// let w: &[i32] = &[];
+ /// assert_eq!(None, w.last());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
+ #[inline]
+ #[must_use]
+ pub const fn last(&self) -> Option<&T> {
+ if let [.., last] = self { Some(last) } else { None }
+ }
+
+ /// Returns a mutable pointer to the last item in the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some(last) = x.last_mut() {
+ /// *last = 10;
+ /// }
+ /// assert_eq!(x, &[0, 1, 10]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
+ #[inline]
+ #[must_use]
+ pub const fn last_mut(&mut self) -> Option<&mut T> {
+ if let [.., last] = self { Some(last) } else { None }
+ }
+
+ /// Returns a reference to an element or subslice depending on the type of
+ /// index.
+ ///
+ /// - If given a position, returns a reference to the element at that
+ /// position or `None` if out of bounds.
+ /// - If given a range, returns the subslice corresponding to that range,
+ /// or `None` if out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert_eq!(Some(&40), v.get(1));
+ /// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
+ /// assert_eq!(None, v.get(3));
+ /// assert_eq!(None, v.get(0..4));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ #[must_use]
+ pub const fn get<I>(&self, index: I) -> Option<&I::Output>
+ where
+ I: ~const SliceIndex<Self>,
+ {
+ index.get(self)
+ }
+
+ /// Returns a mutable reference to an element or subslice depending on the
+ /// type of index (see [`get`]) or `None` if the index is out of bounds.
+ ///
+ /// [`get`]: slice::get
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some(elem) = x.get_mut(1) {
+ /// *elem = 42;
+ /// }
+ /// assert_eq!(x, &[0, 42, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ #[must_use]
+ pub const fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
+ where
+ I: ~const SliceIndex<Self>,
+ {
+ index.get_mut(self)
+ }
+
+ /// Returns a reference to an element or subslice, without doing bounds
+ /// checking.
+ ///
+ /// For a safe alternative see [`get`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting reference is not used.
+ ///
+ /// [`get`]: slice::get
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[1, 2, 4];
+ ///
+ /// unsafe {
+ /// assert_eq!(x.get_unchecked(1), &2);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ #[must_use]
+ pub const unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
+ where
+ I: ~const SliceIndex<Self>,
+ {
+ // SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
+ // the slice is dereferenceable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &*index.get_unchecked(self) }
+ }
+
+ /// Returns a mutable reference to an element or subslice, without doing
+ /// bounds checking.
+ ///
+ /// For a safe alternative see [`get_mut`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting reference is not used.
+ ///
+ /// [`get_mut`]: slice::get_mut
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [1, 2, 4];
+ ///
+ /// unsafe {
+ /// let elem = x.get_unchecked_mut(1);
+ /// *elem = 13;
+ /// }
+ /// assert_eq!(x, &[1, 13, 4]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ #[must_use]
+ pub const unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
+ where
+ I: ~const SliceIndex<Self>,
+ {
+ // SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
+ // the slice is dereferenceable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &mut *index.get_unchecked_mut(self) }
+ }
+
+ /// Returns a raw pointer to the slice's buffer.
+ ///
+ /// The caller must ensure that the slice outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ ///
+ /// The caller must also ensure that the memory the pointer (non-transitively) points to
+ /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
+ /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+ ///
+ /// Modifying the container referenced by this slice may cause its buffer
+ /// to be reallocated, which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[1, 2, 4];
+ /// let x_ptr = x.as_ptr();
+ ///
+ /// unsafe {
+ /// for i in 0..x.len() {
+ /// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
+ /// }
+ /// }
+ /// ```
+ ///
+ /// [`as_mut_ptr`]: slice::as_mut_ptr
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
+ #[inline]
+ #[must_use]
+ pub const fn as_ptr(&self) -> *const T {
+ self as *const [T] as *const T
+ }
+
+ /// Returns an unsafe mutable pointer to the slice's buffer.
+ ///
+ /// The caller must ensure that the slice outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ ///
+ /// Modifying the container referenced by this slice may cause its buffer
+ /// to be reallocated, which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [1, 2, 4];
+ /// let x_ptr = x.as_mut_ptr();
+ ///
+ /// unsafe {
+ /// for i in 0..x.len() {
+ /// *x_ptr.add(i) += 2;
+ /// }
+ /// }
+ /// assert_eq!(x, &[3, 4, 6]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[rustc_allow_const_fn_unstable(const_mut_refs)]
+ #[inline]
+ #[must_use]
+ pub const fn as_mut_ptr(&mut self) -> *mut T {
+ self as *mut [T] as *mut T
+ }
+
+ /// Returns the two raw pointers spanning the slice.
+ ///
+ /// The returned range is half-open, which means that the end pointer
+ /// points *one past* the last element of the slice. This way, an empty
+ /// slice is represented by two equal pointers, and the difference between
+ /// the two pointers represents the size of the slice.
+ ///
+ /// See [`as_ptr`] for warnings on using these pointers. The end pointer
+ /// requires extra caution, as it does not point to a valid element in the
+ /// slice.
+ ///
+ /// This function is useful for interacting with foreign interfaces which
+ /// use two pointers to refer to a range of elements in memory, as is
+ /// common in C++.
+ ///
+ /// It can also be useful to check if a pointer to an element refers to an
+ /// element of this slice:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let x = &a[1] as *const _;
+ /// let y = &5 as *const _;
+ ///
+ /// assert!(a.as_ptr_range().contains(&x));
+ /// assert!(!a.as_ptr_range().contains(&y));
+ /// ```
+ ///
+ /// [`as_ptr`]: slice::as_ptr
+ #[stable(feature = "slice_ptr_range", since = "1.48.0")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[inline]
+ #[must_use]
+ pub const fn as_ptr_range(&self) -> Range<*const T> {
+ let start = self.as_ptr();
+ // SAFETY: The `add` here is safe, because:
+ //
+ // - Both pointers are part of the same object, as pointing directly
+ // past the object also counts.
+ //
+ // - The size of the slice is never larger than isize::MAX bytes, as
+ // noted here:
+ // - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
+ // - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ // - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
+ // (This doesn't seem normative yet, but the very same assumption is
+ // made in many places, including the Index implementation of slices.)
+ //
+ // - There is no wrapping around involved, as slices do not wrap past
+ // the end of the address space.
+ //
+ // See the documentation of pointer::add.
+ let end = unsafe { start.add(self.len()) };
+ start..end
+ }
+
+ /// Returns the two unsafe mutable pointers spanning the slice.
+ ///
+ /// The returned range is half-open, which means that the end pointer
+ /// points *one past* the last element of the slice. This way, an empty
+ /// slice is represented by two equal pointers, and the difference between
+ /// the two pointers represents the size of the slice.
+ ///
+ /// See [`as_mut_ptr`] for warnings on using these pointers. The end
+ /// pointer requires extra caution, as it does not point to a valid element
+ /// in the slice.
+ ///
+ /// This function is useful for interacting with foreign interfaces which
+ /// use two pointers to refer to a range of elements in memory, as is
+ /// common in C++.
+ ///
+ /// [`as_mut_ptr`]: slice::as_mut_ptr
+ #[stable(feature = "slice_ptr_range", since = "1.48.0")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[rustc_allow_const_fn_unstable(const_mut_refs)]
+ #[inline]
+ #[must_use]
+ pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
+ let start = self.as_mut_ptr();
+ // SAFETY: See as_ptr_range() above for why `add` here is safe.
+ let end = unsafe { start.add(self.len()) };
+ start..end
+ }
+
+ /// Swaps two elements in the slice.
+ ///
+ /// # Arguments
+ ///
+ /// * a - The index of the first element
+ /// * b - The index of the second element
+ ///
+ /// # Panics
+ ///
+ /// Panics if `a` or `b` are out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = ["a", "b", "c", "d", "e"];
+ /// v.swap(2, 4);
+ /// assert!(v == ["a", "b", "e", "d", "c"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+ #[inline]
+ #[track_caller]
+ pub const fn swap(&mut self, a: usize, b: usize) {
+ // FIXME: use swap_unchecked here (https://github.com/rust-lang/rust/pull/88540#issuecomment-944344343)
+ // Can't take two mutable loans from one vector, so instead use raw pointers.
+ let pa = ptr::addr_of_mut!(self[a]);
+ let pb = ptr::addr_of_mut!(self[b]);
+ // SAFETY: `pa` and `pb` have been created from safe mutable references and refer
+ // to elements in the slice and therefore are guaranteed to be valid and aligned.
+ // Note that accessing the elements behind `a` and `b` is checked and will
+ // panic when out of bounds.
+ unsafe {
+ ptr::swap(pa, pb);
+ }
+ }
+
+ /// Swaps two elements in the slice, without doing bounds checking.
+ ///
+ /// For a safe alternative see [`swap`].
+ ///
+ /// # Arguments
+ ///
+ /// * a - The index of the first element
+ /// * b - The index of the second element
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*.
+ /// The caller has to ensure that `a < self.len()` and `b < self.len()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_swap_unchecked)]
+ ///
+ /// let mut v = ["a", "b", "c", "d"];
+ /// // SAFETY: we know that 1 and 3 are both indices of the slice
+ /// unsafe { v.swap_unchecked(1, 3) };
+ /// assert!(v == ["a", "d", "c", "b"]);
+ /// ```
+ ///
+ /// [`swap`]: slice::swap
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[unstable(feature = "slice_swap_unchecked", issue = "88539")]
+ #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+ pub const unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
+ let ptr = self.as_mut_ptr();
+ // SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()`
+ unsafe {
+ assert_unsafe_precondition!(a < self.len() && b < self.len());
+ ptr::swap(ptr.add(a), ptr.add(b));
+ }
+ }
+
+ /// Reverses the order of elements in the slice, in place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [1, 2, 3];
+ /// v.reverse();
+ /// assert!(v == [3, 2, 1]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn reverse(&mut self) {
+ let half_len = self.len() / 2;
+ let Range { start, end } = self.as_mut_ptr_range();
+
+ // These slices will skip the middle item for an odd length,
+ // since that one doesn't need to move.
+ let (front_half, back_half) =
+ // SAFETY: Both are subparts of the original slice, so the memory
+ // range is valid, and they don't overlap because they're each only
+ // half (or less) of the original slice.
+ unsafe {
+ (
+ slice::from_raw_parts_mut(start, half_len),
+ slice::from_raw_parts_mut(end.sub(half_len), half_len),
+ )
+ };
+
+ // Introducing a function boundary here means that the two halves
+ // get `noalias` markers, allowing better optimization as LLVM
+ // knows that they're disjoint, unlike in the original slice.
+ revswap(front_half, back_half, half_len);
+
+ #[inline]
+ fn revswap<T>(a: &mut [T], b: &mut [T], n: usize) {
+ debug_assert_eq!(a.len(), n);
+ debug_assert_eq!(b.len(), n);
+
+ // Because this function is first compiled in isolation,
+ // this check tells LLVM that the indexing below is
+ // in-bounds. Then after inlining -- once the actual
+ // lengths of the slices are known -- it's removed.
+ let (a, b) = (&mut a[..n], &mut b[..n]);
+
+ for i in 0..n {
+ mem::swap(&mut a[i], &mut b[n - 1 - i]);
+ }
+ }
+ }
+
+ /// Returns an iterator over the slice.
+ ///
+ /// The iterator yields all items from start to end.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[1, 2, 4];
+ /// let mut iterator = x.iter();
+ ///
+ /// assert_eq!(iterator.next(), Some(&1));
+ /// assert_eq!(iterator.next(), Some(&2));
+ /// assert_eq!(iterator.next(), Some(&4));
+ /// assert_eq!(iterator.next(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter::new(self)
+ }
+
+ /// Returns an iterator that allows modifying each value.
+ ///
+ /// The iterator yields all items from start to end.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [1, 2, 4];
+ /// for elem in x.iter_mut() {
+ /// *elem += 2;
+ /// }
+ /// assert_eq!(x, &[3, 4, 6]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut::new(self)
+ }
+
+ /// Returns an iterator over all contiguous windows of length
+ /// `size`. The windows overlap. If the slice is shorter than
+ /// `size`, the iterator returns no values.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['r', 'u', 's', 't'];
+ /// let mut iter = slice.windows(2);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'u']);
+ /// assert_eq!(iter.next().unwrap(), &['u', 's']);
+ /// assert_eq!(iter.next().unwrap(), &['s', 't']);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If the slice is shorter than `size`:
+ ///
+ /// ```
+ /// let slice = ['f', 'o', 'o'];
+ /// let mut iter = slice.windows(4);
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn windows(&self, size: usize) -> Windows<'_, T> {
+ let size = NonZeroUsize::new(size).expect("size is zero");
+ Windows::new(self, size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
+ /// slice, then the last chunk will not have length `chunk_size`.
+ ///
+ /// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
+ /// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
+ /// slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.chunks(2);
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e']);
+ /// assert_eq!(iter.next().unwrap(), &['m']);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`chunks_exact`]: slice::chunks_exact
+ /// [`rchunks`]: slice::rchunks
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
+ assert_ne!(chunk_size, 0, "chunks cannot have a size of zero");
+ Chunks::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
+ /// length of the slice, then the last chunk will not have length `chunk_size`.
+ ///
+ /// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
+ /// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
+ /// the end of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.chunks_mut(2) {
+ /// for elem in chunk.iter_mut() {
+ /// *elem += count;
+ /// }
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[1, 1, 2, 2, 3]);
+ /// ```
+ ///
+ /// [`chunks_exact_mut`]: slice::chunks_exact_mut
+ /// [`rchunks_mut`]: slice::rchunks_mut
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
+ assert_ne!(chunk_size, 0, "chunks cannot have a size of zero");
+ ChunksMut::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
+ /// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
+ /// from the `remainder` function of the iterator.
+ ///
+ /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
+ /// resulting code better than in the case of [`chunks`].
+ ///
+ /// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
+ /// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.chunks_exact(2);
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e']);
+ /// assert!(iter.next().is_none());
+ /// assert_eq!(iter.remainder(), &['m']);
+ /// ```
+ ///
+ /// [`chunks`]: slice::chunks
+ /// [`rchunks_exact`]: slice::rchunks_exact
+ #[stable(feature = "chunks_exact", since = "1.31.0")]
+ #[inline]
+ pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
+ assert_ne!(chunk_size, 0);
+ ChunksExact::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
+ /// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
+ /// retrieved from the `into_remainder` function of the iterator.
+ ///
+ /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
+ /// resulting code better than in the case of [`chunks_mut`].
+ ///
+ /// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
+ /// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
+ /// the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.chunks_exact_mut(2) {
+ /// for elem in chunk.iter_mut() {
+ /// *elem += count;
+ /// }
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[1, 1, 2, 2, 0]);
+ /// ```
+ ///
+ /// [`chunks_mut`]: slice::chunks_mut
+ /// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
+ #[stable(feature = "chunks_exact", since = "1.31.0")]
+ #[inline]
+ pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
+ assert_ne!(chunk_size, 0);
+ ChunksExactMut::new(self, chunk_size)
+ }
+
+ /// Splits the slice into a slice of `N`-element arrays,
+ /// assuming that there's no remainder.
+ ///
+ /// # Safety
+ ///
+ /// This may only be called when
+ /// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
+ /// - `N != 0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
+ /// let chunks: &[[char; 1]] =
+ /// // SAFETY: 1-element chunks never have remainder
+ /// unsafe { slice.as_chunks_unchecked() };
+ /// assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
+ /// let chunks: &[[char; 3]] =
+ /// // SAFETY: The slice length (6) is a multiple of 3
+ /// unsafe { slice.as_chunks_unchecked() };
+ /// assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
+ ///
+ /// // These would be unsound:
+ /// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
+ /// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
+ /// ```
+ #[unstable(feature = "slice_as_chunks", issue = "74985")]
+ #[inline]
+ #[must_use]
+ pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
+ // SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
+ let new_len = unsafe {
+ assert_unsafe_precondition!(N != 0 && self.len() % N == 0);
+ exact_div(self.len(), N)
+ };
+ // SAFETY: We cast a slice of `new_len * N` elements into
+ // a slice of `new_len` many `N` elements chunks.
+ unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
+ }
+
+ /// Splits the slice into a slice of `N`-element arrays,
+ /// starting at the beginning of the slice,
+ /// and a remainder slice with length strictly less than `N`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let (chunks, remainder) = slice.as_chunks();
+ /// assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
+ /// assert_eq!(remainder, &['m']);
+ /// ```
+ #[unstable(feature = "slice_as_chunks", issue = "74985")]
+ #[inline]
+ #[must_use]
+ pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
+ assert_ne!(N, 0);
+ let len = self.len() / N;
+ let (multiple_of_n, remainder) = self.split_at(len * N);
+ // SAFETY: We already panicked for zero, and ensured by construction
+ // that the length of the subslice is a multiple of N.
+ let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
+ (array_slice, remainder)
+ }
+
+ /// Splits the slice into a slice of `N`-element arrays,
+ /// starting at the end of the slice,
+ /// and a remainder slice with length strictly less than `N`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let (remainder, chunks) = slice.as_rchunks();
+ /// assert_eq!(remainder, &['l']);
+ /// assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
+ /// ```
+ #[unstable(feature = "slice_as_chunks", issue = "74985")]
+ #[inline]
+ #[must_use]
+ pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) {
+ assert_ne!(N, 0);
+ let len = self.len() / N;
+ let (remainder, multiple_of_n) = self.split_at(self.len() - len * N);
+ // SAFETY: We already panicked for zero, and ensured by construction
+ // that the length of the subslice is a multiple of N.
+ let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
+ (remainder, array_slice)
+ }
+
+ /// Returns an iterator over `N` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are array references and do not overlap. If `N` does not divide the
+ /// length of the slice, then the last up to `N-1` elements will be omitted and can be
+ /// retrieved from the `remainder` function of the iterator.
+ ///
+ /// This method is the const generic equivalent of [`chunks_exact`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_chunks)]
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.array_chunks();
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e']);
+ /// assert!(iter.next().is_none());
+ /// assert_eq!(iter.remainder(), &['m']);
+ /// ```
+ ///
+ /// [`chunks_exact`]: slice::chunks_exact
+ #[unstable(feature = "array_chunks", issue = "74985")]
+ #[inline]
+ pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
+ assert_ne!(N, 0);
+ ArrayChunks::new(self)
+ }
+
+ /// Splits the slice into a slice of `N`-element arrays,
+ /// assuming that there's no remainder.
+ ///
+ /// # Safety
+ ///
+ /// This may only be called when
+ /// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
+ /// - `N != 0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let slice: &mut [char] = &mut ['l', 'o', 'r', 'e', 'm', '!'];
+ /// let chunks: &mut [[char; 1]] =
+ /// // SAFETY: 1-element chunks never have remainder
+ /// unsafe { slice.as_chunks_unchecked_mut() };
+ /// chunks[0] = ['L'];
+ /// assert_eq!(chunks, &[['L'], ['o'], ['r'], ['e'], ['m'], ['!']]);
+ /// let chunks: &mut [[char; 3]] =
+ /// // SAFETY: The slice length (6) is a multiple of 3
+ /// unsafe { slice.as_chunks_unchecked_mut() };
+ /// chunks[1] = ['a', 'x', '?'];
+ /// assert_eq!(slice, &['L', 'o', 'r', 'a', 'x', '?']);
+ ///
+ /// // These would be unsound:
+ /// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked_mut() // The slice length is not a multiple of 5
+ /// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked_mut() // Zero-length chunks are never allowed
+ /// ```
+ #[unstable(feature = "slice_as_chunks", issue = "74985")]
+ #[inline]
+ #[must_use]
+ pub unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
+ // SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
+ let new_len = unsafe {
+ assert_unsafe_precondition!(N != 0 && self.len() % N == 0);
+ exact_div(self.len(), N)
+ };
+ // SAFETY: We cast a slice of `new_len * N` elements into
+ // a slice of `new_len` many `N` elements chunks.
+ unsafe { from_raw_parts_mut(self.as_mut_ptr().cast(), new_len) }
+ }
+
+ /// Splits the slice into a slice of `N`-element arrays,
+ /// starting at the beginning of the slice,
+ /// and a remainder slice with length strictly less than `N`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// let (chunks, remainder) = v.as_chunks_mut();
+ /// remainder[0] = 9;
+ /// for chunk in chunks {
+ /// *chunk = [count; 2];
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[1, 1, 2, 2, 9]);
+ /// ```
+ #[unstable(feature = "slice_as_chunks", issue = "74985")]
+ #[inline]
+ #[must_use]
+ pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
+ assert_ne!(N, 0);
+ let len = self.len() / N;
+ let (multiple_of_n, remainder) = self.split_at_mut(len * N);
+ // SAFETY: We already panicked for zero, and ensured by construction
+ // that the length of the subslice is a multiple of N.
+ let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
+ (array_slice, remainder)
+ }
+
+ /// Splits the slice into a slice of `N`-element arrays,
+ /// starting at the end of the slice,
+ /// and a remainder slice with length strictly less than `N`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// let (remainder, chunks) = v.as_rchunks_mut();
+ /// remainder[0] = 9;
+ /// for chunk in chunks {
+ /// *chunk = [count; 2];
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[9, 1, 1, 2, 2]);
+ /// ```
+ #[unstable(feature = "slice_as_chunks", issue = "74985")]
+ #[inline]
+ #[must_use]
+ pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) {
+ assert_ne!(N, 0);
+ let len = self.len() / N;
+ let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N);
+ // SAFETY: We already panicked for zero, and ensured by construction
+ // that the length of the subslice is a multiple of N.
+ let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
+ (remainder, array_slice)
+ }
+
+ /// Returns an iterator over `N` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are mutable array references and do not overlap. If `N` does not divide
+ /// the length of the slice, then the last up to `N-1` elements will be omitted and
+ /// can be retrieved from the `into_remainder` function of the iterator.
+ ///
+ /// This method is the const generic equivalent of [`chunks_exact_mut`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_chunks)]
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.array_chunks_mut() {
+ /// *chunk = [count; 2];
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[1, 1, 2, 2, 0]);
+ /// ```
+ ///
+ /// [`chunks_exact_mut`]: slice::chunks_exact_mut
+ #[unstable(feature = "array_chunks", issue = "74985")]
+ #[inline]
+ pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
+ assert_ne!(N, 0);
+ ArrayChunksMut::new(self)
+ }
+
+ /// Returns an iterator over overlapping windows of `N` elements of a slice,
+ /// starting at the beginning of the slice.
+ ///
+ /// This is the const generic equivalent of [`windows`].
+ ///
+ /// If `N` is greater than the size of the slice, it will return no windows.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_windows)]
+ /// let slice = [0, 1, 2, 3];
+ /// let mut iter = slice.array_windows();
+ /// assert_eq!(iter.next().unwrap(), &[0, 1]);
+ /// assert_eq!(iter.next().unwrap(), &[1, 2]);
+ /// assert_eq!(iter.next().unwrap(), &[2, 3]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`windows`]: slice::windows
+ #[unstable(feature = "array_windows", issue = "75027")]
+ #[inline]
+ pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
+ assert_ne!(N, 0);
+ ArrayWindows::new(self)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
+ /// of the slice.
+ ///
+ /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
+ /// slice, then the last chunk will not have length `chunk_size`.
+ ///
+ /// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
+ /// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
+ /// of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.rchunks(2);
+ /// assert_eq!(iter.next().unwrap(), &['e', 'm']);
+ /// assert_eq!(iter.next().unwrap(), &['o', 'r']);
+ /// assert_eq!(iter.next().unwrap(), &['l']);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`rchunks_exact`]: slice::rchunks_exact
+ /// [`chunks`]: slice::chunks
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ #[inline]
+ pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
+ assert!(chunk_size != 0);
+ RChunks::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
+ /// of the slice.
+ ///
+ /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
+ /// length of the slice, then the last chunk will not have length `chunk_size`.
+ ///
+ /// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
+ /// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
+ /// beginning of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.rchunks_mut(2) {
+ /// for elem in chunk.iter_mut() {
+ /// *elem += count;
+ /// }
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[3, 2, 2, 1, 1]);
+ /// ```
+ ///
+ /// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
+ /// [`chunks_mut`]: slice::chunks_mut
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ #[inline]
+ pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
+ assert!(chunk_size != 0);
+ RChunksMut::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// end of the slice.
+ ///
+ /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
+ /// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
+ /// from the `remainder` function of the iterator.
+ ///
+ /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
+ /// resulting code better than in the case of [`rchunks`].
+ ///
+ /// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
+ /// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
+ /// slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.rchunks_exact(2);
+ /// assert_eq!(iter.next().unwrap(), &['e', 'm']);
+ /// assert_eq!(iter.next().unwrap(), &['o', 'r']);
+ /// assert!(iter.next().is_none());
+ /// assert_eq!(iter.remainder(), &['l']);
+ /// ```
+ ///
+ /// [`chunks`]: slice::chunks
+ /// [`rchunks`]: slice::rchunks
+ /// [`chunks_exact`]: slice::chunks_exact
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ #[inline]
+ pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
+ assert!(chunk_size != 0);
+ RChunksExact::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
+ /// of the slice.
+ ///
+ /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
+ /// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
+ /// retrieved from the `into_remainder` function of the iterator.
+ ///
+ /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
+ /// resulting code better than in the case of [`chunks_mut`].
+ ///
+ /// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
+ /// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
+ /// of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.rchunks_exact_mut(2) {
+ /// for elem in chunk.iter_mut() {
+ /// *elem += count;
+ /// }
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[0, 2, 2, 1, 1]);
+ /// ```
+ ///
+ /// [`chunks_mut`]: slice::chunks_mut
+ /// [`rchunks_mut`]: slice::rchunks_mut
+ /// [`chunks_exact_mut`]: slice::chunks_exact_mut
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ #[inline]
+ pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
+ assert!(chunk_size != 0);
+ RChunksExactMut::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over the slice producing non-overlapping runs
+ /// of elements using the predicate to separate them.
+ ///
+ /// The predicate is called on two elements following themselves,
+ /// it means the predicate is called on `slice[0]` and `slice[1]`
+ /// then on `slice[1]` and `slice[2]` and so on.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_group_by)]
+ ///
+ /// let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
+ ///
+ /// let mut iter = slice.group_by(|a, b| a == b);
+ ///
+ /// assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
+ /// assert_eq!(iter.next(), Some(&[3, 3][..]));
+ /// assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// This method can be used to extract the sorted subslices:
+ ///
+ /// ```
+ /// #![feature(slice_group_by)]
+ ///
+ /// let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
+ ///
+ /// let mut iter = slice.group_by(|a, b| a <= b);
+ ///
+ /// assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
+ /// assert_eq!(iter.next(), Some(&[2, 3][..]));
+ /// assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[unstable(feature = "slice_group_by", issue = "80552")]
+ #[inline]
+ pub fn group_by<F>(&self, pred: F) -> GroupBy<'_, T, F>
+ where
+ F: FnMut(&T, &T) -> bool,
+ {
+ GroupBy::new(self, pred)
+ }
+
+ /// Returns an iterator over the slice producing non-overlapping mutable
+ /// runs of elements using the predicate to separate them.
+ ///
+ /// The predicate is called on two elements following themselves,
+ /// it means the predicate is called on `slice[0]` and `slice[1]`
+ /// then on `slice[1]` and `slice[2]` and so on.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_group_by)]
+ ///
+ /// let slice = &mut [1, 1, 1, 3, 3, 2, 2, 2];
+ ///
+ /// let mut iter = slice.group_by_mut(|a, b| a == b);
+ ///
+ /// assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
+ /// assert_eq!(iter.next(), Some(&mut [3, 3][..]));
+ /// assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// This method can be used to extract the sorted subslices:
+ ///
+ /// ```
+ /// #![feature(slice_group_by)]
+ ///
+ /// let slice = &mut [1, 1, 2, 3, 2, 3, 2, 3, 4];
+ ///
+ /// let mut iter = slice.group_by_mut(|a, b| a <= b);
+ ///
+ /// assert_eq!(iter.next(), Some(&mut [1, 1, 2, 3][..]));
+ /// assert_eq!(iter.next(), Some(&mut [2, 3][..]));
+ /// assert_eq!(iter.next(), Some(&mut [2, 3, 4][..]));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[unstable(feature = "slice_group_by", issue = "80552")]
+ #[inline]
+ pub fn group_by_mut<F>(&mut self, pred: F) -> GroupByMut<'_, T, F>
+ where
+ F: FnMut(&T, &T) -> bool,
+ {
+ GroupByMut::new(self, pred)
+ }
+
+ /// Divides one slice into two at an index.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [1, 2, 3, 4, 5, 6];
+ ///
+ /// {
+ /// let (left, right) = v.split_at(0);
+ /// assert_eq!(left, []);
+ /// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.split_at(2);
+ /// assert_eq!(left, [1, 2]);
+ /// assert_eq!(right, [3, 4, 5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.split_at(6);
+ /// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(right, []);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[track_caller]
+ #[must_use]
+ pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
+ assert!(mid <= self.len());
+ // SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
+ // fulfills the requirements of `from_raw_parts_mut`.
+ unsafe { self.split_at_unchecked(mid) }
+ }
+
+ /// Divides one mutable slice into two at an index.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [1, 0, 3, 0, 5, 6];
+ /// let (left, right) = v.split_at_mut(2);
+ /// assert_eq!(left, [1, 0]);
+ /// assert_eq!(right, [3, 0, 5, 6]);
+ /// left[1] = 2;
+ /// right[1] = 4;
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[track_caller]
+ #[must_use]
+ pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
+ assert!(mid <= self.len());
+ // SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
+ // fulfills the requirements of `from_raw_parts_mut`.
+ unsafe { self.split_at_mut_unchecked(mid) }
+ }
+
+ /// Divides one slice into two at an index, without doing bounds checking.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// For a safe alternative see [`split_at`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting reference is not used. The caller has to ensure that
+ /// `0 <= mid <= self.len()`.
+ ///
+ /// [`split_at`]: slice::split_at
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_split_at_unchecked)]
+ ///
+ /// let v = [1, 2, 3, 4, 5, 6];
+ ///
+ /// unsafe {
+ /// let (left, right) = v.split_at_unchecked(0);
+ /// assert_eq!(left, []);
+ /// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+ /// }
+ ///
+ /// unsafe {
+ /// let (left, right) = v.split_at_unchecked(2);
+ /// assert_eq!(left, [1, 2]);
+ /// assert_eq!(right, [3, 4, 5, 6]);
+ /// }
+ ///
+ /// unsafe {
+ /// let (left, right) = v.split_at_unchecked(6);
+ /// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(right, []);
+ /// }
+ /// ```
+ #[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
+ #[inline]
+ #[must_use]
+ pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
+ // SAFETY: Caller has to check that `0 <= mid <= self.len()`
+ unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
+ }
+
+ /// Divides one mutable slice into two at an index, without doing bounds checking.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// For a safe alternative see [`split_at_mut`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting reference is not used. The caller has to ensure that
+ /// `0 <= mid <= self.len()`.
+ ///
+ /// [`split_at_mut`]: slice::split_at_mut
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_split_at_unchecked)]
+ ///
+ /// let mut v = [1, 0, 3, 0, 5, 6];
+ /// // scoped to restrict the lifetime of the borrows
+ /// unsafe {
+ /// let (left, right) = v.split_at_mut_unchecked(2);
+ /// assert_eq!(left, [1, 0]);
+ /// assert_eq!(right, [3, 0, 5, 6]);
+ /// left[1] = 2;
+ /// right[1] = 4;
+ /// }
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
+ #[inline]
+ #[must_use]
+ pub unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+
+ // SAFETY: Caller has to check that `0 <= mid <= self.len()`.
+ //
+ // `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
+ // is fine.
+ unsafe {
+ assert_unsafe_precondition!(mid <= len);
+ (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid))
+ }
+ }
+
+ /// Divides one slice into an array and a remainder slice at an index.
+ ///
+ /// The array will contain all indices from `[0, N)` (excluding
+ /// the index `N` itself) and the slice will contain all
+ /// indices from `[N, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_array)]
+ ///
+ /// let v = &[1, 2, 3, 4, 5, 6][..];
+ ///
+ /// {
+ /// let (left, right) = v.split_array_ref::<0>();
+ /// assert_eq!(left, &[]);
+ /// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.split_array_ref::<2>();
+ /// assert_eq!(left, &[1, 2]);
+ /// assert_eq!(right, [3, 4, 5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.split_array_ref::<6>();
+ /// assert_eq!(left, &[1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(right, []);
+ /// }
+ /// ```
+ #[unstable(feature = "split_array", reason = "new API", issue = "90091")]
+ #[inline]
+ #[track_caller]
+ #[must_use]
+ pub fn split_array_ref<const N: usize>(&self) -> (&[T; N], &[T]) {
+ let (a, b) = self.split_at(N);
+ // SAFETY: a points to [T; N]? Yes it's [T] of length N (checked by split_at)
+ unsafe { (&*(a.as_ptr() as *const [T; N]), b) }
+ }
+
+ /// Divides one mutable slice into an array and a remainder slice at an index.
+ ///
+ /// The array will contain all indices from `[0, N)` (excluding
+ /// the index `N` itself) and the slice will contain all
+ /// indices from `[N, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_array)]
+ ///
+ /// let mut v = &mut [1, 0, 3, 0, 5, 6][..];
+ /// let (left, right) = v.split_array_mut::<2>();
+ /// assert_eq!(left, &mut [1, 0]);
+ /// assert_eq!(right, [3, 0, 5, 6]);
+ /// left[1] = 2;
+ /// right[1] = 4;
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[unstable(feature = "split_array", reason = "new API", issue = "90091")]
+ #[inline]
+ #[track_caller]
+ #[must_use]
+ pub fn split_array_mut<const N: usize>(&mut self) -> (&mut [T; N], &mut [T]) {
+ let (a, b) = self.split_at_mut(N);
+ // SAFETY: a points to [T; N]? Yes it's [T] of length N (checked by split_at_mut)
+ unsafe { (&mut *(a.as_mut_ptr() as *mut [T; N]), b) }
+ }
+
+ /// Divides one slice into an array and a remainder slice at an index from
+ /// the end.
+ ///
+ /// The slice will contain all indices from `[0, len - N)` (excluding
+ /// the index `len - N` itself) and the array will contain all
+ /// indices from `[len - N, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_array)]
+ ///
+ /// let v = &[1, 2, 3, 4, 5, 6][..];
+ ///
+ /// {
+ /// let (left, right) = v.rsplit_array_ref::<0>();
+ /// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(right, &[]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.rsplit_array_ref::<2>();
+ /// assert_eq!(left, [1, 2, 3, 4]);
+ /// assert_eq!(right, &[5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.rsplit_array_ref::<6>();
+ /// assert_eq!(left, []);
+ /// assert_eq!(right, &[1, 2, 3, 4, 5, 6]);
+ /// }
+ /// ```
+ #[unstable(feature = "split_array", reason = "new API", issue = "90091")]
+ #[inline]
+ #[must_use]
+ pub fn rsplit_array_ref<const N: usize>(&self) -> (&[T], &[T; N]) {
+ assert!(N <= self.len());
+ let (a, b) = self.split_at(self.len() - N);
+ // SAFETY: b points to [T; N]? Yes it's [T] of length N (checked by split_at)
+ unsafe { (a, &*(b.as_ptr() as *const [T; N])) }
+ }
+
+ /// Divides one mutable slice into an array and a remainder slice at an
+ /// index from the end.
+ ///
+ /// The slice will contain all indices from `[0, len - N)` (excluding
+ /// the index `N` itself) and the array will contain all
+ /// indices from `[len - N, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_array)]
+ ///
+ /// let mut v = &mut [1, 0, 3, 0, 5, 6][..];
+ /// let (left, right) = v.rsplit_array_mut::<4>();
+ /// assert_eq!(left, [1, 0]);
+ /// assert_eq!(right, &mut [3, 0, 5, 6]);
+ /// left[1] = 2;
+ /// right[1] = 4;
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[unstable(feature = "split_array", reason = "new API", issue = "90091")]
+ #[inline]
+ #[must_use]
+ pub fn rsplit_array_mut<const N: usize>(&mut self) -> (&mut [T], &mut [T; N]) {
+ assert!(N <= self.len());
+ let (a, b) = self.split_at_mut(self.len() - N);
+ // SAFETY: b points to [T; N]? Yes it's [T] of length N (checked by split_at_mut)
+ unsafe { (a, &mut *(b.as_mut_ptr() as *mut [T; N])) }
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`. The matched element is not contained in the subslices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = [10, 40, 33, 20];
+ /// let mut iter = slice.split(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[10, 40]);
+ /// assert_eq!(iter.next().unwrap(), &[20]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If the first element is matched, an empty slice will be the first item
+ /// returned by the iterator. Similarly, if the last element in the slice
+ /// is matched, an empty slice will be the last item returned by the
+ /// iterator:
+ ///
+ /// ```
+ /// let slice = [10, 40, 33];
+ /// let mut iter = slice.split(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[10, 40]);
+ /// assert_eq!(iter.next().unwrap(), &[]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If two matched elements are directly adjacent, an empty slice will be
+ /// present between them:
+ ///
+ /// ```
+ /// let slice = [10, 6, 33, 20];
+ /// let mut iter = slice.split(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[10]);
+ /// assert_eq!(iter.next().unwrap(), &[]);
+ /// assert_eq!(iter.next().unwrap(), &[20]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ Split::new(self, pred)
+ }
+
+ /// Returns an iterator over mutable subslices separated by elements that
+ /// match `pred`. The matched element is not contained in the subslices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.split_mut(|num| *num % 3 == 0) {
+ /// group[0] = 1;
+ /// }
+ /// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitMut::new(self, pred)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`. The matched element is contained in the end of the previous
+ /// subslice as a terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = [10, 40, 33, 20];
+ /// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+ /// assert_eq!(iter.next().unwrap(), &[20]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If the last element of the slice is matched,
+ /// that element will be considered the terminator of the preceding slice.
+ /// That slice will be the last item returned by the iterator.
+ ///
+ /// ```
+ /// let slice = [3, 10, 40, 33];
+ /// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[3]);
+ /// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[stable(feature = "split_inclusive", since = "1.51.0")]
+ #[inline]
+ pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitInclusive::new(self, pred)
+ }
+
+ /// Returns an iterator over mutable subslices separated by elements that
+ /// match `pred`. The matched element is contained in the previous
+ /// subslice as a terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
+ /// let terminator_idx = group.len()-1;
+ /// group[terminator_idx] = 1;
+ /// }
+ /// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
+ /// ```
+ #[stable(feature = "split_inclusive", since = "1.51.0")]
+ #[inline]
+ pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitInclusiveMut::new(self, pred)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`, starting at the end of the slice and working backwards.
+ /// The matched element is not contained in the subslices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = [11, 22, 33, 0, 44, 55];
+ /// let mut iter = slice.rsplit(|num| *num == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[44, 55]);
+ /// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// As with `split()`, if the first or last element is matched, an empty
+ /// slice will be the first (or last) item returned by the iterator.
+ ///
+ /// ```
+ /// let v = &[0, 1, 1, 2, 3, 5, 8];
+ /// let mut it = v.rsplit(|n| *n % 2 == 0);
+ /// assert_eq!(it.next().unwrap(), &[]);
+ /// assert_eq!(it.next().unwrap(), &[3, 5]);
+ /// assert_eq!(it.next().unwrap(), &[1, 1]);
+ /// assert_eq!(it.next().unwrap(), &[]);
+ /// assert_eq!(it.next(), None);
+ /// ```
+ #[stable(feature = "slice_rsplit", since = "1.27.0")]
+ #[inline]
+ pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ RSplit::new(self, pred)
+ }
+
+ /// Returns an iterator over mutable subslices separated by elements that
+ /// match `pred`, starting at the end of the slice and working
+ /// backwards. The matched element is not contained in the subslices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [100, 400, 300, 200, 600, 500];
+ ///
+ /// let mut count = 0;
+ /// for group in v.rsplit_mut(|num| *num % 3 == 0) {
+ /// count += 1;
+ /// group[0] = count;
+ /// }
+ /// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
+ /// ```
+ ///
+ #[stable(feature = "slice_rsplit", since = "1.27.0")]
+ #[inline]
+ pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ RSplitMut::new(self, pred)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`, limited to returning at most `n` items. The matched element is
+ /// not contained in the subslices.
+ ///
+ /// The last element returned, if any, will contain the remainder of the
+ /// slice.
+ ///
+ /// # Examples
+ ///
+ /// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
+ /// `[20, 60, 50]`):
+ ///
+ /// ```
+ /// let v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.splitn(2, |num| *num % 3 == 0) {
+ /// println!("{group:?}");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitN::new(self.split(pred), n)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`, limited to returning at most `n` items. The matched element is
+ /// not contained in the subslices.
+ ///
+ /// The last element returned, if any, will contain the remainder of the
+ /// slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
+ /// group[0] = 1;
+ /// }
+ /// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitNMut::new(self.split_mut(pred), n)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred` limited to returning at most `n` items. This starts at the end of
+ /// the slice and works backwards. The matched element is not contained in
+ /// the subslices.
+ ///
+ /// The last element returned, if any, will contain the remainder of the
+ /// slice.
+ ///
+ /// # Examples
+ ///
+ /// Print the slice split once, starting from the end, by numbers divisible
+ /// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
+ ///
+ /// ```
+ /// let v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.rsplitn(2, |num| *num % 3 == 0) {
+ /// println!("{group:?}");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ RSplitN::new(self.rsplit(pred), n)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred` limited to returning at most `n` items. This starts at the end of
+ /// the slice and works backwards. The matched element is not contained in
+ /// the subslices.
+ ///
+ /// The last element returned, if any, will contain the remainder of the
+ /// slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
+ /// group[0] = 1;
+ /// }
+ /// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ RSplitNMut::new(self.rsplit_mut(pred), n)
+ }
+
+ /// Returns `true` if the slice contains an element with the given value.
+ ///
+ /// This operation is *O*(*n*).
+ ///
+ /// Note that if you have a sorted slice, [`binary_search`] may be faster.
+ ///
+ /// [`binary_search`]: slice::binary_search
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert!(v.contains(&30));
+ /// assert!(!v.contains(&50));
+ /// ```
+ ///
+ /// If you do not have a `&T`, but some other value that you can compare
+ /// with one (for example, `String` implements `PartialEq<str>`), you can
+ /// use `iter().any`:
+ ///
+ /// ```
+ /// let v = [String::from("hello"), String::from("world")]; // slice of `String`
+ /// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
+ /// assert!(!v.iter().any(|e| e == "hi"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[must_use]
+ pub fn contains(&self, x: &T) -> bool
+ where
+ T: PartialEq,
+ {
+ cmp::SliceContains::slice_contains(x, self)
+ }
+
+ /// Returns `true` if `needle` is a prefix of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert!(v.starts_with(&[10]));
+ /// assert!(v.starts_with(&[10, 40]));
+ /// assert!(!v.starts_with(&[50]));
+ /// assert!(!v.starts_with(&[10, 50]));
+ /// ```
+ ///
+ /// Always returns `true` if `needle` is an empty slice:
+ ///
+ /// ```
+ /// let v = &[10, 40, 30];
+ /// assert!(v.starts_with(&[]));
+ /// let v: &[u8] = &[];
+ /// assert!(v.starts_with(&[]));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn starts_with(&self, needle: &[T]) -> bool
+ where
+ T: PartialEq,
+ {
+ let n = needle.len();
+ self.len() >= n && needle == &self[..n]
+ }
+
+ /// Returns `true` if `needle` is a suffix of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert!(v.ends_with(&[30]));
+ /// assert!(v.ends_with(&[40, 30]));
+ /// assert!(!v.ends_with(&[50]));
+ /// assert!(!v.ends_with(&[50, 30]));
+ /// ```
+ ///
+ /// Always returns `true` if `needle` is an empty slice:
+ ///
+ /// ```
+ /// let v = &[10, 40, 30];
+ /// assert!(v.ends_with(&[]));
+ /// let v: &[u8] = &[];
+ /// assert!(v.ends_with(&[]));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn ends_with(&self, needle: &[T]) -> bool
+ where
+ T: PartialEq,
+ {
+ let (m, n) = (self.len(), needle.len());
+ m >= n && needle == &self[m - n..]
+ }
+
+ /// Returns a subslice with the prefix removed.
+ ///
+ /// If the slice starts with `prefix`, returns the subslice after the prefix, wrapped in `Some`.
+ /// If `prefix` is empty, simply returns the original slice.
+ ///
+ /// If the slice does not start with `prefix`, returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &[10, 40, 30];
+ /// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
+ /// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
+ /// assert_eq!(v.strip_prefix(&[50]), None);
+ /// assert_eq!(v.strip_prefix(&[10, 50]), None);
+ ///
+ /// let prefix : &str = "he";
+ /// assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
+ /// Some(b"llo".as_ref()));
+ /// ```
+ #[must_use = "returns the subslice without modifying the original"]
+ #[stable(feature = "slice_strip", since = "1.51.0")]
+ pub fn strip_prefix<P: SlicePattern<Item = T> + ?Sized>(&self, prefix: &P) -> Option<&[T]>
+ where
+ T: PartialEq,
+ {
+ // This function will need rewriting if and when SlicePattern becomes more sophisticated.
+ let prefix = prefix.as_slice();
+ let n = prefix.len();
+ if n <= self.len() {
+ let (head, tail) = self.split_at(n);
+ if head == prefix {
+ return Some(tail);
+ }
+ }
+ None
+ }
+
+ /// Returns a subslice with the suffix removed.
+ ///
+ /// If the slice ends with `suffix`, returns the subslice before the suffix, wrapped in `Some`.
+ /// If `suffix` is empty, simply returns the original slice.
+ ///
+ /// If the slice does not end with `suffix`, returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &[10, 40, 30];
+ /// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
+ /// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
+ /// assert_eq!(v.strip_suffix(&[50]), None);
+ /// assert_eq!(v.strip_suffix(&[50, 30]), None);
+ /// ```
+ #[must_use = "returns the subslice without modifying the original"]
+ #[stable(feature = "slice_strip", since = "1.51.0")]
+ pub fn strip_suffix<P: SlicePattern<Item = T> + ?Sized>(&self, suffix: &P) -> Option<&[T]>
+ where
+ T: PartialEq,
+ {
+ // This function will need rewriting if and when SlicePattern becomes more sophisticated.
+ let suffix = suffix.as_slice();
+ let (len, n) = (self.len(), suffix.len());
+ if n <= len {
+ let (head, tail) = self.split_at(len - n);
+ if tail == suffix {
+ return Some(head);
+ }
+ }
+ None
+ }
+
+ /// Binary searches this slice for a given element.
+ /// This behaves similary to [`contains`] if this slice is sorted.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. The index is chosen
+ /// deterministically, but is subject to change in future versions of Rust.
+ /// If the value is not found then [`Result::Err`] is returned, containing
+ /// the index where a matching element could be inserted while maintaining
+ /// sorted order.
+ ///
+ /// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
+ ///
+ /// [`contains`]: slice::contains
+ /// [`binary_search_by`]: slice::binary_search_by
+ /// [`binary_search_by_key`]: slice::binary_search_by_key
+ /// [`partition_point`]: slice::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements. The first is found, with a
+ /// uniquely determined position; the second and third are not
+ /// found; the fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+ ///
+ /// assert_eq!(s.binary_search(&13), Ok(9));
+ /// assert_eq!(s.binary_search(&4), Err(7));
+ /// assert_eq!(s.binary_search(&100), Err(13));
+ /// let r = s.binary_search(&1);
+ /// assert!(match r { Ok(1..=4) => true, _ => false, });
+ /// ```
+ ///
+ /// If you want to insert an item to a sorted vector, while maintaining
+ /// sort order, consider using [`partition_point`]:
+ ///
+ /// ```
+ /// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+ /// let num = 42;
+ /// let idx = s.partition_point(|&x| x < num);
+ /// // The above is equivalent to `let idx = s.binary_search(&num).unwrap_or_else(|x| x);`
+ /// s.insert(idx, num);
+ /// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn binary_search(&self, x: &T) -> Result<usize, usize>
+ where
+ T: Ord,
+ {
+ self.binary_search_by(|p| p.cmp(x))
+ }
+
+ /// Binary searches this slice with a comparator function.
+ /// This behaves similarly to [`contains`] if this slice is sorted.
+ ///
+ /// The comparator function should implement an order consistent
+ /// with the sort order of the underlying slice, returning an
+ /// order code that indicates whether its argument is `Less`,
+ /// `Equal` or `Greater` the desired target.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. The index is chosen
+ /// deterministically, but is subject to change in future versions of Rust.
+ /// If the value is not found then [`Result::Err`] is returned, containing
+ /// the index where a matching element could be inserted while maintaining
+ /// sorted order.
+ ///
+ /// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
+ ///
+ /// [`contains`]: slice::contains
+ /// [`binary_search`]: slice::binary_search
+ /// [`binary_search_by_key`]: slice::binary_search_by_key
+ /// [`partition_point`]: slice::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements. The first is found, with a
+ /// uniquely determined position; the second and third are not
+ /// found; the fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+ ///
+ /// let seek = 13;
+ /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
+ /// let seek = 4;
+ /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
+ /// let seek = 100;
+ /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
+ /// let seek = 1;
+ /// let r = s.binary_search_by(|probe| probe.cmp(&seek));
+ /// assert!(match r { Ok(1..=4) => true, _ => false, });
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
+ where
+ F: FnMut(&'a T) -> Ordering,
+ {
+ let mut size = self.len();
+ let mut left = 0;
+ let mut right = size;
+ while left < right {
+ let mid = left + size / 2;
+
+ // SAFETY: the call is made safe by the following invariants:
+ // - `mid >= 0`
+ // - `mid < size`: `mid` is limited by `[left; right)` bound.
+ let cmp = f(unsafe { self.get_unchecked(mid) });
+
+ // The reason why we use if/else control flow rather than match
+ // is because match reorders comparison operations, which is perf sensitive.
+ // This is x86 asm for u8: https://rust.godbolt.org/z/8Y8Pra.
+ if cmp == Less {
+ left = mid + 1;
+ } else if cmp == Greater {
+ right = mid;
+ } else {
+ // SAFETY: same as the `get_unchecked` above
+ unsafe { crate::intrinsics::assume(mid < self.len()) };
+ return Ok(mid);
+ }
+
+ size = right - left;
+ }
+ Err(left)
+ }
+
+ /// Binary searches this slice with a key extraction function.
+ /// This behaves similarly to [`contains`] if this slice is sorted.
+ ///
+ /// Assumes that the slice is sorted by the key, for instance with
+ /// [`sort_by_key`] using the same key extraction function.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. The index is chosen
+ /// deterministically, but is subject to change in future versions of Rust.
+ /// If the value is not found then [`Result::Err`] is returned, containing
+ /// the index where a matching element could be inserted while maintaining
+ /// sorted order.
+ ///
+ /// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
+ ///
+ /// [`contains`]: slice::contains
+ /// [`sort_by_key`]: slice::sort_by_key
+ /// [`binary_search`]: slice::binary_search
+ /// [`binary_search_by`]: slice::binary_search_by
+ /// [`partition_point`]: slice::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements in a slice of pairs sorted by
+ /// their second elements. The first is found, with a uniquely
+ /// determined position; the second and third are not found; the
+ /// fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
+ /// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
+ /// (1, 21), (2, 34), (4, 55)];
+ ///
+ /// assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
+ /// assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7));
+ /// assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
+ /// let r = s.binary_search_by_key(&1, |&(a, b)| b);
+ /// assert!(match r { Ok(1..=4) => true, _ => false, });
+ /// ```
+ // Lint rustdoc::broken_intra_doc_links is allowed as `slice::sort_by_key` is
+ // in crate `alloc`, and as such doesn't exists yet when building `core`: #74481.
+ // This breaks links when slice is displayed in core, but changing it to use relative links
+ // would break when the item is re-exported. So allow the core links to be broken for now.
+ #[allow(rustdoc::broken_intra_doc_links)]
+ #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
+ #[inline]
+ pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
+ where
+ F: FnMut(&'a T) -> B,
+ B: Ord,
+ {
+ self.binary_search_by(|k| f(k).cmp(b))
+ }
+
+ /// Sorts the slice, but might not preserve the order of equal elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
+ /// slice consists of several concatenated sorted sequences.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5, 4, 1, -3, 2];
+ ///
+ /// v.sort_unstable();
+ /// assert!(v == [-5, -3, 1, 2, 4]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[stable(feature = "sort_unstable", since = "1.20.0")]
+ #[inline]
+ pub fn sort_unstable(&mut self)
+ where
+ T: Ord,
+ {
+ sort::quicksort(self, |a, b| a.lt(b));
+ }
+
+ /// Sorts the slice with a comparator function, but might not preserve the order of equal
+ /// elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// The comparator function must define a total ordering for the elements in the slice. If
+ /// the ordering is not total, the order of the elements is unspecified. An order is a
+ /// total order if it is (for all `a`, `b` and `c`):
+ ///
+ /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
+ /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+ ///
+ /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
+ /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
+ ///
+ /// ```
+ /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+ /// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
+ /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
+ /// ```
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
+ /// slice consists of several concatenated sorted sequences.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [5, 4, 1, 3, 2];
+ /// v.sort_unstable_by(|a, b| a.cmp(b));
+ /// assert!(v == [1, 2, 3, 4, 5]);
+ ///
+ /// // reverse sorting
+ /// v.sort_unstable_by(|a, b| b.cmp(a));
+ /// assert!(v == [5, 4, 3, 2, 1]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[stable(feature = "sort_unstable", since = "1.20.0")]
+ #[inline]
+ pub fn sort_unstable_by<F>(&mut self, mut compare: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
+ }
+
+ /// Sorts the slice with a key extraction function, but might not preserve the order of equal
+ /// elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
+ /// *O*(*m*).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
+ /// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
+ /// cases where the key function is expensive.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// v.sort_unstable_by_key(|k| k.abs());
+ /// assert!(v == [1, 2, -3, 4, -5]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[stable(feature = "sort_unstable", since = "1.20.0")]
+ #[inline]
+ pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ sort::quicksort(self, |a, b| f(a).lt(&f(b)));
+ }
+
+ /// Reorder the slice such that the element at `index` is at its final sorted position.
+ ///
+ /// This reordering has the additional property that any value at position `i < index` will be
+ /// less than or equal to any value at a position `j > index`. Additionally, this reordering is
+ /// unstable (i.e. any number of equal elements may end up at position `index`), in-place
+ /// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
+ /// element" in other libraries. It returns a triplet of the following values: all elements less
+ /// than the one at the given index, the value at the given index, and all elements greater than
+ /// the one at the given index.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on the quickselect portion of the same quicksort algorithm
+ /// used for [`sort_unstable`].
+ ///
+ /// [`sort_unstable`]: slice::sort_unstable
+ ///
+ /// # Panics
+ ///
+ /// Panics when `index >= len()`, meaning it always panics on empty slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// // Find the median
+ /// v.select_nth_unstable(2);
+ ///
+ /// // We are only guaranteed the slice will be one of the following, based on the way we sort
+ /// // about the specified index.
+ /// assert!(v == [-3, -5, 1, 2, 4] ||
+ /// v == [-5, -3, 1, 2, 4] ||
+ /// v == [-3, -5, 1, 4, 2] ||
+ /// v == [-5, -3, 1, 4, 2]);
+ /// ```
+ #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
+ #[inline]
+ pub fn select_nth_unstable(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
+ where
+ T: Ord,
+ {
+ let mut f = |a: &T, b: &T| a.lt(b);
+ sort::partition_at_index(self, index, &mut f)
+ }
+
+ /// Reorder the slice with a comparator function such that the element at `index` is at its
+ /// final sorted position.
+ ///
+ /// This reordering has the additional property that any value at position `i < index` will be
+ /// less than or equal to any value at a position `j > index` using the comparator function.
+ /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
+ /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
+ /// is also known as "kth element" in other libraries. It returns a triplet of the following
+ /// values: all elements less than the one at the given index, the value at the given index,
+ /// and all elements greater than the one at the given index, using the provided comparator
+ /// function.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on the quickselect portion of the same quicksort algorithm
+ /// used for [`sort_unstable`].
+ ///
+ /// [`sort_unstable`]: slice::sort_unstable
+ ///
+ /// # Panics
+ ///
+ /// Panics when `index >= len()`, meaning it always panics on empty slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// // Find the median as if the slice were sorted in descending order.
+ /// v.select_nth_unstable_by(2, |a, b| b.cmp(a));
+ ///
+ /// // We are only guaranteed the slice will be one of the following, based on the way we sort
+ /// // about the specified index.
+ /// assert!(v == [2, 4, 1, -5, -3] ||
+ /// v == [2, 4, 1, -3, -5] ||
+ /// v == [4, 2, 1, -5, -3] ||
+ /// v == [4, 2, 1, -3, -5]);
+ /// ```
+ #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
+ #[inline]
+ pub fn select_nth_unstable_by<F>(
+ &mut self,
+ index: usize,
+ mut compare: F,
+ ) -> (&mut [T], &mut T, &mut [T])
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ let mut f = |a: &T, b: &T| compare(a, b) == Less;
+ sort::partition_at_index(self, index, &mut f)
+ }
+
+ /// Reorder the slice with a key extraction function such that the element at `index` is at its
+ /// final sorted position.
+ ///
+ /// This reordering has the additional property that any value at position `i < index` will be
+ /// less than or equal to any value at a position `j > index` using the key extraction function.
+ /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
+ /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
+ /// is also known as "kth element" in other libraries. It returns a triplet of the following
+ /// values: all elements less than the one at the given index, the value at the given index, and
+ /// all elements greater than the one at the given index, using the provided key extraction
+ /// function.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on the quickselect portion of the same quicksort algorithm
+ /// used for [`sort_unstable`].
+ ///
+ /// [`sort_unstable`]: slice::sort_unstable
+ ///
+ /// # Panics
+ ///
+ /// Panics when `index >= len()`, meaning it always panics on empty slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// // Return the median as if the array were sorted according to absolute value.
+ /// v.select_nth_unstable_by_key(2, |a| a.abs());
+ ///
+ /// // We are only guaranteed the slice will be one of the following, based on the way we sort
+ /// // about the specified index.
+ /// assert!(v == [1, 2, -3, 4, -5] ||
+ /// v == [1, 2, -3, -5, 4] ||
+ /// v == [2, 1, -3, 4, -5] ||
+ /// v == [2, 1, -3, -5, 4]);
+ /// ```
+ #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
+ #[inline]
+ pub fn select_nth_unstable_by_key<K, F>(
+ &mut self,
+ index: usize,
+ mut f: F,
+ ) -> (&mut [T], &mut T, &mut [T])
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ let mut g = |a: &T, b: &T| f(a).lt(&f(b));
+ sort::partition_at_index(self, index, &mut g)
+ }
+
+ /// Moves all consecutive repeated elements to the end of the slice according to the
+ /// [`PartialEq`] trait implementation.
+ ///
+ /// Returns two slices. The first contains no consecutive repeated elements.
+ /// The second contains all the duplicates in no specified order.
+ ///
+ /// If the slice is sorted, the first returned slice contains no duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_partition_dedup)]
+ ///
+ /// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
+ ///
+ /// let (dedup, duplicates) = slice.partition_dedup();
+ ///
+ /// assert_eq!(dedup, [1, 2, 3, 2, 1]);
+ /// assert_eq!(duplicates, [2, 3, 1]);
+ /// ```
+ #[unstable(feature = "slice_partition_dedup", issue = "54279")]
+ #[inline]
+ pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
+ where
+ T: PartialEq,
+ {
+ self.partition_dedup_by(|a, b| a == b)
+ }
+
+ /// Moves all but the first of consecutive elements to the end of the slice satisfying
+ /// a given equality relation.
+ ///
+ /// Returns two slices. The first contains no consecutive repeated elements.
+ /// The second contains all the duplicates in no specified order.
+ ///
+ /// The `same_bucket` function is passed references to two elements from the slice and
+ /// must determine if the elements compare equal. The elements are passed in opposite order
+ /// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
+ /// at the end of the slice.
+ ///
+ /// If the slice is sorted, the first returned slice contains no duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_partition_dedup)]
+ ///
+ /// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
+ ///
+ /// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+ ///
+ /// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
+ /// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
+ /// ```
+ #[unstable(feature = "slice_partition_dedup", issue = "54279")]
+ #[inline]
+ pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
+ where
+ F: FnMut(&mut T, &mut T) -> bool,
+ {
+ // Although we have a mutable reference to `self`, we cannot make
+ // *arbitrary* changes. The `same_bucket` calls could panic, so we
+ // must ensure that the slice is in a valid state at all times.
+ //
+ // The way that we handle this is by using swaps; we iterate
+ // over all the elements, swapping as we go so that at the end
+ // the elements we wish to keep are in the front, and those we
+ // wish to reject are at the back. We can then split the slice.
+ // This operation is still `O(n)`.
+ //
+ // Example: We start in this state, where `r` represents "next
+ // read" and `w` represents "next_write`.
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 1 | 2 | 3 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Comparing self[r] against self[w-1], this is not a duplicate, so
+ // we swap self[r] and self[w] (no effect as r==w) and then increment both
+ // r and w, leaving us with:
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 1 | 2 | 3 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Comparing self[r] against self[w-1], this value is a duplicate,
+ // so we increment `r` but leave everything else unchanged:
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 1 | 2 | 3 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Comparing self[r] against self[w-1], this is not a duplicate,
+ // so swap self[r] and self[w] and advance r and w:
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 2 | 1 | 3 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Not a duplicate, repeat:
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 2 | 3 | 1 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Duplicate, advance r. End of slice. Split at w.
+
+ let len = self.len();
+ if len <= 1 {
+ return (self, &mut []);
+ }
+
+ let ptr = self.as_mut_ptr();
+ let mut next_read: usize = 1;
+ let mut next_write: usize = 1;
+
+ // SAFETY: the `while` condition guarantees `next_read` and `next_write`
+ // are less than `len`, thus are inside `self`. `prev_ptr_write` points to
+ // one element before `ptr_write`, but `next_write` starts at 1, so
+ // `prev_ptr_write` is never less than 0 and is inside the slice.
+ // This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write`
+ // and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)`
+ // and `prev_ptr_write.offset(1)`.
+ //
+ // `next_write` is also incremented at most once per loop at most meaning
+ // no element is skipped when it may need to be swapped.
+ //
+ // `ptr_read` and `prev_ptr_write` never point to the same element. This
+ // is required for `&mut *ptr_read`, `&mut *prev_ptr_write` to be safe.
+ // The explanation is simply that `next_read >= next_write` is always true,
+ // thus `next_read > next_write - 1` is too.
+ unsafe {
+ // Avoid bounds checks by using raw pointers.
+ while next_read < len {
+ let ptr_read = ptr.add(next_read);
+ let prev_ptr_write = ptr.add(next_write - 1);
+ if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
+ if next_read != next_write {
+ let ptr_write = prev_ptr_write.offset(1);
+ mem::swap(&mut *ptr_read, &mut *ptr_write);
+ }
+ next_write += 1;
+ }
+ next_read += 1;
+ }
+ }
+
+ self.split_at_mut(next_write)
+ }
+
+ /// Moves all but the first of consecutive elements to the end of the slice that resolve
+ /// to the same key.
+ ///
+ /// Returns two slices. The first contains no consecutive repeated elements.
+ /// The second contains all the duplicates in no specified order.
+ ///
+ /// If the slice is sorted, the first returned slice contains no duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_partition_dedup)]
+ ///
+ /// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
+ ///
+ /// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
+ ///
+ /// assert_eq!(dedup, [10, 20, 30, 20, 11]);
+ /// assert_eq!(duplicates, [21, 30, 13]);
+ /// ```
+ #[unstable(feature = "slice_partition_dedup", issue = "54279")]
+ #[inline]
+ pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
+ where
+ F: FnMut(&mut T) -> K,
+ K: PartialEq,
+ {
+ self.partition_dedup_by(|a, b| key(a) == key(b))
+ }
+
+ /// Rotates the slice in-place such that the first `mid` elements of the
+ /// slice move to the end while the last `self.len() - mid` elements move to
+ /// the front. After calling `rotate_left`, the element previously at index
+ /// `mid` will become the first element in the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `mid` is greater than the length of the
+ /// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
+ /// rotation.
+ ///
+ /// # Complexity
+ ///
+ /// Takes linear (in `self.len()`) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+ /// a.rotate_left(2);
+ /// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
+ /// ```
+ ///
+ /// Rotating a subslice:
+ ///
+ /// ```
+ /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+ /// a[1..5].rotate_left(1);
+ /// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
+ /// ```
+ #[stable(feature = "slice_rotate", since = "1.26.0")]
+ pub fn rotate_left(&mut self, mid: usize) {
+ assert!(mid <= self.len());
+ let k = self.len() - mid;
+ let p = self.as_mut_ptr();
+
+ // SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
+ // valid for reading and writing, as required by `ptr_rotate`.
+ unsafe {
+ rotate::ptr_rotate(mid, p.add(mid), k);
+ }
+ }
+
+ /// Rotates the slice in-place such that the first `self.len() - k`
+ /// elements of the slice move to the end while the last `k` elements move
+ /// to the front. After calling `rotate_right`, the element previously at
+ /// index `self.len() - k` will become the first element in the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `k` is greater than the length of the
+ /// slice. Note that `k == self.len()` does _not_ panic and is a no-op
+ /// rotation.
+ ///
+ /// # Complexity
+ ///
+ /// Takes linear (in `self.len()`) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+ /// a.rotate_right(2);
+ /// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
+ /// ```
+ ///
+ /// Rotate a subslice:
+ ///
+ /// ```
+ /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+ /// a[1..5].rotate_right(1);
+ /// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
+ /// ```
+ #[stable(feature = "slice_rotate", since = "1.26.0")]
+ pub fn rotate_right(&mut self, k: usize) {
+ assert!(k <= self.len());
+ let mid = self.len() - k;
+ let p = self.as_mut_ptr();
+
+ // SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
+ // valid for reading and writing, as required by `ptr_rotate`.
+ unsafe {
+ rotate::ptr_rotate(mid, p.add(mid), k);
+ }
+ }
+
+ /// Fills `self` with elements by cloning `value`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut buf = vec![0; 10];
+ /// buf.fill(1);
+ /// assert_eq!(buf, vec![1; 10]);
+ /// ```
+ #[doc(alias = "memset")]
+ #[stable(feature = "slice_fill", since = "1.50.0")]
+ pub fn fill(&mut self, value: T)
+ where
+ T: Clone,
+ {
+ specialize::SpecFill::spec_fill(self, value);
+ }
+
+ /// Fills `self` with elements returned by calling a closure repeatedly.
+ ///
+ /// This method uses a closure to create new values. If you'd rather
+ /// [`Clone`] a given value, use [`fill`]. If you want to use the [`Default`]
+ /// trait to generate values, you can pass [`Default::default`] as the
+ /// argument.
+ ///
+ /// [`fill`]: slice::fill
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut buf = vec![1; 10];
+ /// buf.fill_with(Default::default);
+ /// assert_eq!(buf, vec![0; 10]);
+ /// ```
+ #[stable(feature = "slice_fill_with", since = "1.51.0")]
+ pub fn fill_with<F>(&mut self, mut f: F)
+ where
+ F: FnMut() -> T,
+ {
+ for el in self {
+ *el = f();
+ }
+ }
+
+ /// Copies the elements from `src` into `self`.
+ ///
+ /// The length of `src` must be the same as `self`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths.
+ ///
+ /// # Examples
+ ///
+ /// Cloning two elements from a slice into another:
+ ///
+ /// ```
+ /// let src = [1, 2, 3, 4];
+ /// let mut dst = [0, 0];
+ ///
+ /// // Because the slices have to be the same length,
+ /// // we slice the source slice from four elements
+ /// // to two. It will panic if we don't do this.
+ /// dst.clone_from_slice(&src[2..]);
+ ///
+ /// assert_eq!(src, [1, 2, 3, 4]);
+ /// assert_eq!(dst, [3, 4]);
+ /// ```
+ ///
+ /// Rust enforces that there can only be one mutable reference with no
+ /// immutable references to a particular piece of data in a particular
+ /// scope. Because of this, attempting to use `clone_from_slice` on a
+ /// single slice will result in a compile failure:
+ ///
+ /// ```compile_fail
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
+ /// ```
+ ///
+ /// To work around this, we can use [`split_at_mut`] to create two distinct
+ /// sub-slices from a slice:
+ ///
+ /// ```
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// {
+ /// let (left, right) = slice.split_at_mut(2);
+ /// left.clone_from_slice(&right[1..]);
+ /// }
+ ///
+ /// assert_eq!(slice, [4, 5, 3, 4, 5]);
+ /// ```
+ ///
+ /// [`copy_from_slice`]: slice::copy_from_slice
+ /// [`split_at_mut`]: slice::split_at_mut
+ #[stable(feature = "clone_from_slice", since = "1.7.0")]
+ #[track_caller]
+ pub fn clone_from_slice(&mut self, src: &[T])
+ where
+ T: Clone,
+ {
+ self.spec_clone_from(src);
+ }
+
+ /// Copies all elements from `src` into `self`, using a memcpy.
+ ///
+ /// The length of `src` must be the same as `self`.
+ ///
+ /// If `T` does not implement `Copy`, use [`clone_from_slice`].
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths.
+ ///
+ /// # Examples
+ ///
+ /// Copying two elements from a slice into another:
+ ///
+ /// ```
+ /// let src = [1, 2, 3, 4];
+ /// let mut dst = [0, 0];
+ ///
+ /// // Because the slices have to be the same length,
+ /// // we slice the source slice from four elements
+ /// // to two. It will panic if we don't do this.
+ /// dst.copy_from_slice(&src[2..]);
+ ///
+ /// assert_eq!(src, [1, 2, 3, 4]);
+ /// assert_eq!(dst, [3, 4]);
+ /// ```
+ ///
+ /// Rust enforces that there can only be one mutable reference with no
+ /// immutable references to a particular piece of data in a particular
+ /// scope. Because of this, attempting to use `copy_from_slice` on a
+ /// single slice will result in a compile failure:
+ ///
+ /// ```compile_fail
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
+ /// ```
+ ///
+ /// To work around this, we can use [`split_at_mut`] to create two distinct
+ /// sub-slices from a slice:
+ ///
+ /// ```
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// {
+ /// let (left, right) = slice.split_at_mut(2);
+ /// left.copy_from_slice(&right[1..]);
+ /// }
+ ///
+ /// assert_eq!(slice, [4, 5, 3, 4, 5]);
+ /// ```
+ ///
+ /// [`clone_from_slice`]: slice::clone_from_slice
+ /// [`split_at_mut`]: slice::split_at_mut
+ #[doc(alias = "memcpy")]
+ #[stable(feature = "copy_from_slice", since = "1.9.0")]
+ #[track_caller]
+ pub fn copy_from_slice(&mut self, src: &[T])
+ where
+ T: Copy,
+ {
+ // The panic code path was put into a cold function to not bloat the
+ // call site.
+ #[inline(never)]
+ #[cold]
+ #[track_caller]
+ fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
+ panic!(
+ "source slice length ({}) does not match destination slice length ({})",
+ src_len, dst_len,
+ );
+ }
+
+ if self.len() != src.len() {
+ len_mismatch_fail(self.len(), src.len());
+ }
+
+ // SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
+ // checked to have the same length. The slices cannot overlap because
+ // mutable references are exclusive.
+ unsafe {
+ ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
+ }
+ }
+
+ /// Copies elements from one part of the slice to another part of itself,
+ /// using a memmove.
+ ///
+ /// `src` is the range within `self` to copy from. `dest` is the starting
+ /// index of the range within `self` to copy to, which will have the same
+ /// length as `src`. The two ranges may overlap. The ends of the two ranges
+ /// must be less than or equal to `self.len()`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if either range exceeds the end of the slice,
+ /// or if the end of `src` is before the start.
+ ///
+ /// # Examples
+ ///
+ /// Copying four bytes within a slice:
+ ///
+ /// ```
+ /// let mut bytes = *b"Hello, World!";
+ ///
+ /// bytes.copy_within(1..5, 8);
+ ///
+ /// assert_eq!(&bytes, b"Hello, Wello!");
+ /// ```
+ #[stable(feature = "copy_within", since = "1.37.0")]
+ #[track_caller]
+ pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
+ where
+ T: Copy,
+ {
+ let Range { start: src_start, end: src_end } = slice::range(src, ..self.len());
+ let count = src_end - src_start;
+ assert!(dest <= self.len() - count, "dest is out of bounds");
+ // SAFETY: the conditions for `ptr::copy` have all been checked above,
+ // as have those for `ptr::add`.
+ unsafe {
+ // Derive both `src_ptr` and `dest_ptr` from the same loan
+ let ptr = self.as_mut_ptr();
+ let src_ptr = ptr.add(src_start);
+ let dest_ptr = ptr.add(dest);
+ ptr::copy(src_ptr, dest_ptr, count);
+ }
+ }
+
+ /// Swaps all elements in `self` with those in `other`.
+ ///
+ /// The length of `other` must be the same as `self`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths.
+ ///
+ /// # Example
+ ///
+ /// Swapping two elements across slices:
+ ///
+ /// ```
+ /// let mut slice1 = [0, 0];
+ /// let mut slice2 = [1, 2, 3, 4];
+ ///
+ /// slice1.swap_with_slice(&mut slice2[2..]);
+ ///
+ /// assert_eq!(slice1, [3, 4]);
+ /// assert_eq!(slice2, [1, 2, 0, 0]);
+ /// ```
+ ///
+ /// Rust enforces that there can only be one mutable reference to a
+ /// particular piece of data in a particular scope. Because of this,
+ /// attempting to use `swap_with_slice` on a single slice will result in
+ /// a compile failure:
+ ///
+ /// ```compile_fail
+ /// let mut slice = [1, 2, 3, 4, 5];
+ /// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
+ /// ```
+ ///
+ /// To work around this, we can use [`split_at_mut`] to create two distinct
+ /// mutable sub-slices from a slice:
+ ///
+ /// ```
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// {
+ /// let (left, right) = slice.split_at_mut(2);
+ /// left.swap_with_slice(&mut right[1..]);
+ /// }
+ ///
+ /// assert_eq!(slice, [4, 5, 3, 1, 2]);
+ /// ```
+ ///
+ /// [`split_at_mut`]: slice::split_at_mut
+ #[stable(feature = "swap_with_slice", since = "1.27.0")]
+ #[track_caller]
+ pub fn swap_with_slice(&mut self, other: &mut [T]) {
+ assert!(self.len() == other.len(), "destination and source slices have different lengths");
+ // SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
+ // checked to have the same length. The slices cannot overlap because
+ // mutable references are exclusive.
+ unsafe {
+ ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
+ }
+ }
+
+ /// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
+ fn align_to_offsets<U>(&self) -> (usize, usize) {
+ // What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
+ // lowest number of `T`s. And how many `T`s we need for each such "multiple".
+ //
+ // Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
+ // for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
+ // place of every 3 Ts in the `rest` slice. A bit more complicated.
+ //
+ // Formula to calculate this is:
+ //
+ // Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
+ // Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
+ //
+ // Expanded and simplified:
+ //
+ // Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
+ // Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
+ //
+ // Luckily since all this is constant-evaluated... performance here matters not!
+ #[inline]
+ fn gcd(a: usize, b: usize) -> usize {
+ use crate::intrinsics;
+ // iterative stein’s algorithm
+ // We should still make this `const fn` (and revert to recursive algorithm if we do)
+ // because relying on llvm to consteval all this is… well, it makes me uncomfortable.
+
+ // SAFETY: `a` and `b` are checked to be non-zero values.
+ let (ctz_a, mut ctz_b) = unsafe {
+ if a == 0 {
+ return b;
+ }
+ if b == 0 {
+ return a;
+ }
+ (intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
+ };
+ let k = ctz_a.min(ctz_b);
+ let mut a = a >> ctz_a;
+ let mut b = b;
+ loop {
+ // remove all factors of 2 from b
+ b >>= ctz_b;
+ if a > b {
+ mem::swap(&mut a, &mut b);
+ }
+ b = b - a;
+ // SAFETY: `b` is checked to be non-zero.
+ unsafe {
+ if b == 0 {
+ break;
+ }
+ ctz_b = intrinsics::cttz_nonzero(b);
+ }
+ }
+ a << k
+ }
+ let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
+ let ts: usize = mem::size_of::<U>() / gcd;
+ let us: usize = mem::size_of::<T>() / gcd;
+
+ // Armed with this knowledge, we can find how many `U`s we can fit!
+ let us_len = self.len() / ts * us;
+ // And how many `T`s will be in the trailing slice!
+ let ts_len = self.len() % ts;
+ (us_len, ts_len)
+ }
+
+ /// Transmute the slice to a slice of another type, ensuring alignment of the types is
+ /// maintained.
+ ///
+ /// This method splits the slice into three distinct slices: prefix, correctly aligned middle
+ /// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
+ /// length possible for a given type and input slice, but only your algorithm's performance
+ /// should depend on that, not its correctness. It is permissible for all of the input data to
+ /// be returned as the prefix or suffix slice.
+ ///
+ /// This method has no purpose when either input element `T` or output element `U` are
+ /// zero-sized and will return the original slice without splitting anything.
+ ///
+ /// # Safety
+ ///
+ /// This method is essentially a `transmute` with respect to the elements in the returned
+ /// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// unsafe {
+ /// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ /// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
+ /// // less_efficient_algorithm_for_bytes(prefix);
+ /// // more_efficient_algorithm_for_aligned_shorts(shorts);
+ /// // less_efficient_algorithm_for_bytes(suffix);
+ /// }
+ /// ```
+ #[stable(feature = "slice_align_to", since = "1.30.0")]
+ #[must_use]
+ pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
+ // Note that most of this function will be constant-evaluated,
+ if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
+ // handle ZSTs specially, which is – don't handle them at all.
+ return (self, &[], &[]);
+ }
+
+ // First, find at what point do we split between the first and 2nd slice. Easy with
+ // ptr.align_offset.
+ let ptr = self.as_ptr();
+ // SAFETY: See the `align_to_mut` method for the detailed safety comment.
+ let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
+ if offset > self.len() {
+ (self, &[], &[])
+ } else {
+ let (left, rest) = self.split_at(offset);
+ let (us_len, ts_len) = rest.align_to_offsets::<U>();
+ // SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
+ // since the caller guarantees that we can transmute `T` to `U` safely.
+ unsafe {
+ (
+ left,
+ from_raw_parts(rest.as_ptr() as *const U, us_len),
+ from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
+ )
+ }
+ }
+ }
+
+ /// Transmute the slice to a slice of another type, ensuring alignment of the types is
+ /// maintained.
+ ///
+ /// This method splits the slice into three distinct slices: prefix, correctly aligned middle
+ /// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
+ /// length possible for a given type and input slice, but only your algorithm's performance
+ /// should depend on that, not its correctness. It is permissible for all of the input data to
+ /// be returned as the prefix or suffix slice.
+ ///
+ /// This method has no purpose when either input element `T` or output element `U` are
+ /// zero-sized and will return the original slice without splitting anything.
+ ///
+ /// # Safety
+ ///
+ /// This method is essentially a `transmute` with respect to the elements in the returned
+ /// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// unsafe {
+ /// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ /// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
+ /// // less_efficient_algorithm_for_bytes(prefix);
+ /// // more_efficient_algorithm_for_aligned_shorts(shorts);
+ /// // less_efficient_algorithm_for_bytes(suffix);
+ /// }
+ /// ```
+ #[stable(feature = "slice_align_to", since = "1.30.0")]
+ #[must_use]
+ pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
+ // Note that most of this function will be constant-evaluated,
+ if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
+ // handle ZSTs specially, which is – don't handle them at all.
+ return (self, &mut [], &mut []);
+ }
+
+ // First, find at what point do we split between the first and 2nd slice. Easy with
+ // ptr.align_offset.
+ let ptr = self.as_ptr();
+ // SAFETY: Here we are ensuring we will use aligned pointers for U for the
+ // rest of the method. This is done by passing a pointer to &[T] with an
+ // alignment targeted for U.
+ // `crate::ptr::align_offset` is called with a correctly aligned and
+ // valid pointer `ptr` (it comes from a reference to `self`) and with
+ // a size that is a power of two (since it comes from the alignement for U),
+ // satisfying its safety constraints.
+ let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
+ if offset > self.len() {
+ (self, &mut [], &mut [])
+ } else {
+ let (left, rest) = self.split_at_mut(offset);
+ let (us_len, ts_len) = rest.align_to_offsets::<U>();
+ let rest_len = rest.len();
+ let mut_ptr = rest.as_mut_ptr();
+ // We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
+ // SAFETY: see comments for `align_to`.
+ unsafe {
+ (
+ left,
+ from_raw_parts_mut(mut_ptr as *mut U, us_len),
+ from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
+ )
+ }
+ }
+ }
+
+ /// Split a slice into a prefix, a middle of aligned SIMD types, and a suffix.
+ ///
+ /// This is a safe wrapper around [`slice::align_to`], so has the same weak
+ /// postconditions as that method. You're only assured that
+ /// `self.len() == prefix.len() + middle.len() * LANES + suffix.len()`.
+ ///
+ /// Notably, all of the following are possible:
+ /// - `prefix.len() >= LANES`.
+ /// - `middle.is_empty()` despite `self.len() >= 3 * LANES`.
+ /// - `suffix.len() >= LANES`.
+ ///
+ /// That said, this is a safe method, so if you're only writing safe code,
+ /// then this can at most cause incorrect logic, not unsoundness.
+ ///
+ /// # Panics
+ ///
+ /// This will panic if the size of the SIMD type is different from
+ /// `LANES` times that of the scalar.
+ ///
+ /// At the time of writing, the trait restrictions on `Simd<T, LANES>` keeps
+ /// that from ever happening, as only power-of-two numbers of lanes are
+ /// supported. It's possible that, in the future, those restrictions might
+ /// be lifted in a way that would make it possible to see panics from this
+ /// method for something like `LANES == 3`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(portable_simd)]
+ /// use core::simd::SimdFloat;
+ ///
+ /// let short = &[1, 2, 3];
+ /// let (prefix, middle, suffix) = short.as_simd::<4>();
+ /// assert_eq!(middle, []); // Not enough elements for anything in the middle
+ ///
+ /// // They might be split in any possible way between prefix and suffix
+ /// let it = prefix.iter().chain(suffix).copied();
+ /// assert_eq!(it.collect::<Vec<_>>(), vec![1, 2, 3]);
+ ///
+ /// fn basic_simd_sum(x: &[f32]) -> f32 {
+ /// use std::ops::Add;
+ /// use std::simd::f32x4;
+ /// let (prefix, middle, suffix) = x.as_simd();
+ /// let sums = f32x4::from_array([
+ /// prefix.iter().copied().sum(),
+ /// 0.0,
+ /// 0.0,
+ /// suffix.iter().copied().sum(),
+ /// ]);
+ /// let sums = middle.iter().copied().fold(sums, f32x4::add);
+ /// sums.reduce_sum()
+ /// }
+ ///
+ /// let numbers: Vec<f32> = (1..101).map(|x| x as _).collect();
+ /// assert_eq!(basic_simd_sum(&numbers[1..99]), 4949.0);
+ /// ```
+ #[unstable(feature = "portable_simd", issue = "86656")]
+ #[must_use]
+ pub fn as_simd<const LANES: usize>(&self) -> (&[T], &[Simd<T, LANES>], &[T])
+ where
+ Simd<T, LANES>: AsRef<[T; LANES]>,
+ T: simd::SimdElement,
+ simd::LaneCount<LANES>: simd::SupportedLaneCount,
+ {
+ // These are expected to always match, as vector types are laid out like
+ // arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
+ // might as well double-check since it'll optimize away anyhow.
+ assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
+
+ // SAFETY: The simd types have the same layout as arrays, just with
+ // potentially-higher alignment, so the de-facto transmutes are sound.
+ unsafe { self.align_to() }
+ }
+
+ /// Split a slice into a prefix, a middle of aligned SIMD types, and a suffix.
+ ///
+ /// This is a safe wrapper around [`slice::align_to_mut`], so has the same weak
+ /// postconditions as that method. You're only assured that
+ /// `self.len() == prefix.len() + middle.len() * LANES + suffix.len()`.
+ ///
+ /// Notably, all of the following are possible:
+ /// - `prefix.len() >= LANES`.
+ /// - `middle.is_empty()` despite `self.len() >= 3 * LANES`.
+ /// - `suffix.len() >= LANES`.
+ ///
+ /// That said, this is a safe method, so if you're only writing safe code,
+ /// then this can at most cause incorrect logic, not unsoundness.
+ ///
+ /// This is the mutable version of [`slice::as_simd`]; see that for examples.
+ ///
+ /// # Panics
+ ///
+ /// This will panic if the size of the SIMD type is different from
+ /// `LANES` times that of the scalar.
+ ///
+ /// At the time of writing, the trait restrictions on `Simd<T, LANES>` keeps
+ /// that from ever happening, as only power-of-two numbers of lanes are
+ /// supported. It's possible that, in the future, those restrictions might
+ /// be lifted in a way that would make it possible to see panics from this
+ /// method for something like `LANES == 3`.
+ #[unstable(feature = "portable_simd", issue = "86656")]
+ #[must_use]
+ pub fn as_simd_mut<const LANES: usize>(&mut self) -> (&mut [T], &mut [Simd<T, LANES>], &mut [T])
+ where
+ Simd<T, LANES>: AsMut<[T; LANES]>,
+ T: simd::SimdElement,
+ simd::LaneCount<LANES>: simd::SupportedLaneCount,
+ {
+ // These are expected to always match, as vector types are laid out like
+ // arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
+ // might as well double-check since it'll optimize away anyhow.
+ assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
+
+ // SAFETY: The simd types have the same layout as arrays, just with
+ // potentially-higher alignment, so the de-facto transmutes are sound.
+ unsafe { self.align_to_mut() }
+ }
+
+ /// Checks if the elements of this slice are sorted.
+ ///
+ /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
+ /// slice yields exactly zero or one element, `true` is returned.
+ ///
+ /// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
+ /// implies that this function returns `false` if any two consecutive items are not
+ /// comparable.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ /// let empty: [i32; 0] = [];
+ ///
+ /// assert!([1, 2, 2, 9].is_sorted());
+ /// assert!(![1, 3, 2, 4].is_sorted());
+ /// assert!([0].is_sorted());
+ /// assert!(empty.is_sorted());
+ /// assert!(![0.0, 1.0, f32::NAN].is_sorted());
+ /// ```
+ #[inline]
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ #[must_use]
+ pub fn is_sorted(&self) -> bool
+ where
+ T: PartialOrd,
+ {
+ self.is_sorted_by(|a, b| a.partial_cmp(b))
+ }
+
+ /// Checks if the elements of this slice are sorted using the given comparator function.
+ ///
+ /// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
+ /// function to determine the ordering of two elements. Apart from that, it's equivalent to
+ /// [`is_sorted`]; see its documentation for more information.
+ ///
+ /// [`is_sorted`]: slice::is_sorted
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ #[must_use]
+ pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
+ where
+ F: FnMut(&T, &T) -> Option<Ordering>,
+ {
+ self.iter().is_sorted_by(|a, b| compare(*a, *b))
+ }
+
+ /// Checks if the elements of this slice are sorted using the given key extraction function.
+ ///
+ /// Instead of comparing the slice's elements directly, this function compares the keys of the
+ /// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
+ /// documentation for more information.
+ ///
+ /// [`is_sorted`]: slice::is_sorted
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ ///
+ /// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
+ /// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
+ /// ```
+ #[inline]
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ #[must_use]
+ pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
+ where
+ F: FnMut(&T) -> K,
+ K: PartialOrd,
+ {
+ self.iter().is_sorted_by_key(f)
+ }
+
+ /// Returns the index of the partition point according to the given predicate
+ /// (the index of the first element of the second partition).
+ ///
+ /// The slice is assumed to be partitioned according to the given predicate.
+ /// This means that all elements for which the predicate returns true are at the start of the slice
+ /// and all elements for which the predicate returns false are at the end.
+ /// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
+ /// (all odd numbers are at the start, all even at the end).
+ ///
+ /// If this slice is not partitioned, the returned result is unspecified and meaningless,
+ /// as this method performs a kind of binary search.
+ ///
+ /// See also [`binary_search`], [`binary_search_by`], and [`binary_search_by_key`].
+ ///
+ /// [`binary_search`]: slice::binary_search
+ /// [`binary_search_by`]: slice::binary_search_by
+ /// [`binary_search_by_key`]: slice::binary_search_by_key
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [1, 2, 3, 3, 5, 6, 7];
+ /// let i = v.partition_point(|&x| x < 5);
+ ///
+ /// assert_eq!(i, 4);
+ /// assert!(v[..i].iter().all(|&x| x < 5));
+ /// assert!(v[i..].iter().all(|&x| !(x < 5)));
+ /// ```
+ ///
+ /// If you want to insert an item to a sorted vector, while maintaining
+ /// sort order:
+ ///
+ /// ```
+ /// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+ /// let num = 42;
+ /// let idx = s.partition_point(|&x| x < num);
+ /// s.insert(idx, num);
+ /// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+ /// ```
+ #[stable(feature = "partition_point", since = "1.52.0")]
+ #[must_use]
+ pub fn partition_point<P>(&self, mut pred: P) -> usize
+ where
+ P: FnMut(&T) -> bool,
+ {
+ self.binary_search_by(|x| if pred(x) { Less } else { Greater }).unwrap_or_else(|i| i)
+ }
+
+ /// Removes the subslice corresponding to the given range
+ /// and returns a reference to it.
+ ///
+ /// Returns `None` and does not modify the slice if the given
+ /// range is out of bounds.
+ ///
+ /// Note that this method only accepts one-sided ranges such as
+ /// `2..` or `..6`, but not `2..6`.
+ ///
+ /// # Examples
+ ///
+ /// Taking the first three elements of a slice:
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+ /// let mut first_three = slice.take(..3).unwrap();
+ ///
+ /// assert_eq!(slice, &['d']);
+ /// assert_eq!(first_three, &['a', 'b', 'c']);
+ /// ```
+ ///
+ /// Taking the last two elements of a slice:
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+ /// let mut tail = slice.take(2..).unwrap();
+ ///
+ /// assert_eq!(slice, &['a', 'b']);
+ /// assert_eq!(tail, &['c', 'd']);
+ /// ```
+ ///
+ /// Getting `None` when `range` is out of bounds:
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+ ///
+ /// assert_eq!(None, slice.take(5..));
+ /// assert_eq!(None, slice.take(..5));
+ /// assert_eq!(None, slice.take(..=4));
+ /// let expected: &[char] = &['a', 'b', 'c', 'd'];
+ /// assert_eq!(Some(expected), slice.take(..4));
+ /// ```
+ #[inline]
+ #[must_use = "method does not modify the slice if the range is out of bounds"]
+ #[unstable(feature = "slice_take", issue = "62280")]
+ pub fn take<'a, R: OneSidedRange<usize>>(self: &mut &'a Self, range: R) -> Option<&'a Self> {
+ let (direction, split_index) = split_point_of(range)?;
+ if split_index > self.len() {
+ return None;
+ }
+ let (front, back) = self.split_at(split_index);
+ match direction {
+ Direction::Front => {
+ *self = back;
+ Some(front)
+ }
+ Direction::Back => {
+ *self = front;
+ Some(back)
+ }
+ }
+ }
+
+ /// Removes the subslice corresponding to the given range
+ /// and returns a mutable reference to it.
+ ///
+ /// Returns `None` and does not modify the slice if the given
+ /// range is out of bounds.
+ ///
+ /// Note that this method only accepts one-sided ranges such as
+ /// `2..` or `..6`, but not `2..6`.
+ ///
+ /// # Examples
+ ///
+ /// Taking the first three elements of a slice:
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+ /// let mut first_three = slice.take_mut(..3).unwrap();
+ ///
+ /// assert_eq!(slice, &mut ['d']);
+ /// assert_eq!(first_three, &mut ['a', 'b', 'c']);
+ /// ```
+ ///
+ /// Taking the last two elements of a slice:
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+ /// let mut tail = slice.take_mut(2..).unwrap();
+ ///
+ /// assert_eq!(slice, &mut ['a', 'b']);
+ /// assert_eq!(tail, &mut ['c', 'd']);
+ /// ```
+ ///
+ /// Getting `None` when `range` is out of bounds:
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+ ///
+ /// assert_eq!(None, slice.take_mut(5..));
+ /// assert_eq!(None, slice.take_mut(..5));
+ /// assert_eq!(None, slice.take_mut(..=4));
+ /// let expected: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+ /// assert_eq!(Some(expected), slice.take_mut(..4));
+ /// ```
+ #[inline]
+ #[must_use = "method does not modify the slice if the range is out of bounds"]
+ #[unstable(feature = "slice_take", issue = "62280")]
+ pub fn take_mut<'a, R: OneSidedRange<usize>>(
+ self: &mut &'a mut Self,
+ range: R,
+ ) -> Option<&'a mut Self> {
+ let (direction, split_index) = split_point_of(range)?;
+ if split_index > self.len() {
+ return None;
+ }
+ let (front, back) = mem::take(self).split_at_mut(split_index);
+ match direction {
+ Direction::Front => {
+ *self = back;
+ Some(front)
+ }
+ Direction::Back => {
+ *self = front;
+ Some(back)
+ }
+ }
+ }
+
+ /// Removes the first element of the slice and returns a reference
+ /// to it.
+ ///
+ /// Returns `None` if the slice is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &[_] = &['a', 'b', 'c'];
+ /// let first = slice.take_first().unwrap();
+ ///
+ /// assert_eq!(slice, &['b', 'c']);
+ /// assert_eq!(first, &'a');
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_take", issue = "62280")]
+ pub fn take_first<'a>(self: &mut &'a Self) -> Option<&'a T> {
+ let (first, rem) = self.split_first()?;
+ *self = rem;
+ Some(first)
+ }
+
+ /// Removes the first element of the slice and returns a mutable
+ /// reference to it.
+ ///
+ /// Returns `None` if the slice is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &mut [_] = &mut ['a', 'b', 'c'];
+ /// let first = slice.take_first_mut().unwrap();
+ /// *first = 'd';
+ ///
+ /// assert_eq!(slice, &['b', 'c']);
+ /// assert_eq!(first, &'d');
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_take", issue = "62280")]
+ pub fn take_first_mut<'a>(self: &mut &'a mut Self) -> Option<&'a mut T> {
+ let (first, rem) = mem::take(self).split_first_mut()?;
+ *self = rem;
+ Some(first)
+ }
+
+ /// Removes the last element of the slice and returns a reference
+ /// to it.
+ ///
+ /// Returns `None` if the slice is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &[_] = &['a', 'b', 'c'];
+ /// let last = slice.take_last().unwrap();
+ ///
+ /// assert_eq!(slice, &['a', 'b']);
+ /// assert_eq!(last, &'c');
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_take", issue = "62280")]
+ pub fn take_last<'a>(self: &mut &'a Self) -> Option<&'a T> {
+ let (last, rem) = self.split_last()?;
+ *self = rem;
+ Some(last)
+ }
+
+ /// Removes the last element of the slice and returns a mutable
+ /// reference to it.
+ ///
+ /// Returns `None` if the slice is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_take)]
+ ///
+ /// let mut slice: &mut [_] = &mut ['a', 'b', 'c'];
+ /// let last = slice.take_last_mut().unwrap();
+ /// *last = 'd';
+ ///
+ /// assert_eq!(slice, &['a', 'b']);
+ /// assert_eq!(last, &'d');
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_take", issue = "62280")]
+ pub fn take_last_mut<'a>(self: &mut &'a mut Self) -> Option<&'a mut T> {
+ let (last, rem) = mem::take(self).split_last_mut()?;
+ *self = rem;
+ Some(last)
+ }
+}
+
+impl<T, const N: usize> [[T; N]] {
+ /// Takes a `&[[T; N]]`, and flattens it to a `&[T]`.
+ ///
+ /// # Panics
+ ///
+ /// This panics if the length of the resulting slice would overflow a `usize`.
+ ///
+ /// This is only possible when flattening a slice of arrays of zero-sized
+ /// types, and thus tends to be irrelevant in practice. If
+ /// `size_of::<T>() > 0`, this will never panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_flatten)]
+ ///
+ /// assert_eq!([[1, 2, 3], [4, 5, 6]].flatten(), &[1, 2, 3, 4, 5, 6]);
+ ///
+ /// assert_eq!(
+ /// [[1, 2, 3], [4, 5, 6]].flatten(),
+ /// [[1, 2], [3, 4], [5, 6]].flatten(),
+ /// );
+ ///
+ /// let slice_of_empty_arrays: &[[i32; 0]] = &[[], [], [], [], []];
+ /// assert!(slice_of_empty_arrays.flatten().is_empty());
+ ///
+ /// let empty_slice_of_arrays: &[[u32; 10]] = &[];
+ /// assert!(empty_slice_of_arrays.flatten().is_empty());
+ /// ```
+ #[unstable(feature = "slice_flatten", issue = "95629")]
+ pub fn flatten(&self) -> &[T] {
+ let len = if crate::mem::size_of::<T>() == 0 {
+ self.len().checked_mul(N).expect("slice len overflow")
+ } else {
+ // SAFETY: `self.len() * N` cannot overflow because `self` is
+ // already in the address space.
+ unsafe { self.len().unchecked_mul(N) }
+ };
+ // SAFETY: `[T]` is layout-identical to `[T; N]`
+ unsafe { from_raw_parts(self.as_ptr().cast(), len) }
+ }
+
+ /// Takes a `&mut [[T; N]]`, and flattens it to a `&mut [T]`.
+ ///
+ /// # Panics
+ ///
+ /// This panics if the length of the resulting slice would overflow a `usize`.
+ ///
+ /// This is only possible when flattening a slice of arrays of zero-sized
+ /// types, and thus tends to be irrelevant in practice. If
+ /// `size_of::<T>() > 0`, this will never panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_flatten)]
+ ///
+ /// fn add_5_to_all(slice: &mut [i32]) {
+ /// for i in slice {
+ /// *i += 5;
+ /// }
+ /// }
+ ///
+ /// let mut array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]];
+ /// add_5_to_all(array.flatten_mut());
+ /// assert_eq!(array, [[6, 7, 8], [9, 10, 11], [12, 13, 14]]);
+ /// ```
+ #[unstable(feature = "slice_flatten", issue = "95629")]
+ pub fn flatten_mut(&mut self) -> &mut [T] {
+ let len = if crate::mem::size_of::<T>() == 0 {
+ self.len().checked_mul(N).expect("slice len overflow")
+ } else {
+ // SAFETY: `self.len() * N` cannot overflow because `self` is
+ // already in the address space.
+ unsafe { self.len().unchecked_mul(N) }
+ };
+ // SAFETY: `[T]` is layout-identical to `[T; N]`
+ unsafe { from_raw_parts_mut(self.as_mut_ptr().cast(), len) }
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(test))]
+impl [f32] {
+ /// Sorts the slice of floats.
+ ///
+ /// This sort is in-place (i.e. does not allocate), *O*(*n* \* log(*n*)) worst-case, and uses
+ /// the ordering defined by [`f32::total_cmp`].
+ ///
+ /// # Current implementation
+ ///
+ /// This uses the same sorting algorithm as [`sort_unstable_by`](slice::sort_unstable_by).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(sort_floats)]
+ /// let mut v = [2.6, -5e-8, f32::NAN, 8.29, f32::INFINITY, -1.0, 0.0, -f32::INFINITY, -0.0];
+ ///
+ /// v.sort_floats();
+ /// let sorted = [-f32::INFINITY, -1.0, -5e-8, -0.0, 0.0, 2.6, 8.29, f32::INFINITY, f32::NAN];
+ /// assert_eq!(&v[..8], &sorted[..8]);
+ /// assert!(v[8].is_nan());
+ /// ```
+ #[unstable(feature = "sort_floats", issue = "93396")]
+ #[inline]
+ pub fn sort_floats(&mut self) {
+ self.sort_unstable_by(f32::total_cmp);
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(test))]
+impl [f64] {
+ /// Sorts the slice of floats.
+ ///
+ /// This sort is in-place (i.e. does not allocate), *O*(*n* \* log(*n*)) worst-case, and uses
+ /// the ordering defined by [`f64::total_cmp`].
+ ///
+ /// # Current implementation
+ ///
+ /// This uses the same sorting algorithm as [`sort_unstable_by`](slice::sort_unstable_by).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(sort_floats)]
+ /// let mut v = [2.6, -5e-8, f64::NAN, 8.29, f64::INFINITY, -1.0, 0.0, -f64::INFINITY, -0.0];
+ ///
+ /// v.sort_floats();
+ /// let sorted = [-f64::INFINITY, -1.0, -5e-8, -0.0, 0.0, 2.6, 8.29, f64::INFINITY, f64::NAN];
+ /// assert_eq!(&v[..8], &sorted[..8]);
+ /// assert!(v[8].is_nan());
+ /// ```
+ #[unstable(feature = "sort_floats", issue = "93396")]
+ #[inline]
+ pub fn sort_floats(&mut self) {
+ self.sort_unstable_by(f64::total_cmp);
+ }
+}
+
+trait CloneFromSpec<T> {
+ fn spec_clone_from(&mut self, src: &[T]);
+}
+
+impl<T> CloneFromSpec<T> for [T]
+where
+ T: Clone,
+{
+ #[track_caller]
+ default fn spec_clone_from(&mut self, src: &[T]) {
+ assert!(self.len() == src.len(), "destination and source slices have different lengths");
+ // NOTE: We need to explicitly slice them to the same length
+ // to make it easier for the optimizer to elide bounds checking.
+ // But since it can't be relied on we also have an explicit specialization for T: Copy.
+ let len = self.len();
+ let src = &src[..len];
+ for i in 0..len {
+ self[i].clone_from(&src[i]);
+ }
+ }
+}
+
+impl<T> CloneFromSpec<T> for [T]
+where
+ T: Copy,
+{
+ #[track_caller]
+ fn spec_clone_from(&mut self, src: &[T]) {
+ self.copy_from_slice(src);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for &[T] {
+ /// Creates an empty slice.
+ fn default() -> Self {
+ &[]
+ }
+}
+
+#[stable(feature = "mut_slice_default", since = "1.5.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for &mut [T] {
+ /// Creates a mutable empty slice.
+ fn default() -> Self {
+ &mut []
+ }
+}
+
+#[unstable(feature = "slice_pattern", reason = "stopgap trait for slice patterns", issue = "56345")]
+/// Patterns in slices - currently, only used by `strip_prefix` and `strip_suffix`. At a future
+/// point, we hope to generalise `core::str::Pattern` (which at the time of writing is limited to
+/// `str`) to slices, and then this trait will be replaced or abolished.
+pub trait SlicePattern {
+ /// The element type of the slice being matched on.
+ type Item;
+
+ /// Currently, the consumers of `SlicePattern` need a slice.
+ fn as_slice(&self) -> &[Self::Item];
+}
+
+#[stable(feature = "slice_strip", since = "1.51.0")]
+impl<T> SlicePattern for [T] {
+ type Item = T;
+
+ #[inline]
+ fn as_slice(&self) -> &[Self::Item] {
+ self
+ }
+}
+
+#[stable(feature = "slice_strip", since = "1.51.0")]
+impl<T, const N: usize> SlicePattern for [T; N] {
+ type Item = T;
+
+ #[inline]
+ fn as_slice(&self) -> &[Self::Item] {
+ self
+ }
+}
diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs
new file mode 100644
index 000000000..107e71ab6
--- /dev/null
+++ b/library/core/src/slice/raw.rs
@@ -0,0 +1,271 @@
+//! Free functions to create `&[T]` and `&mut [T]`.
+
+use crate::array;
+use crate::intrinsics::{assert_unsafe_precondition, is_aligned_and_not_null};
+use crate::ops::Range;
+use crate::ptr;
+
+/// Forms a slice from a pointer and a length.
+///
+/// The `len` argument is the number of **elements**, not the number of bytes.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `data` must be [valid] for reads for `len * mem::size_of::<T>()` many bytes,
+/// and it must be properly aligned. This means in particular:
+///
+/// * The entire memory range of this slice must be contained within a single allocated object!
+/// Slices can never span across multiple allocated objects. See [below](#incorrect-usage)
+/// for an example incorrectly not taking this into account.
+/// * `data` must be non-null and aligned even for zero-length slices. One
+/// reason for this is that enum layout optimizations may rely on references
+/// (including slices of any length) being aligned and non-null to distinguish
+/// them from other data. You can obtain a pointer that is usable as `data`
+/// for zero-length slices using [`NonNull::dangling()`].
+///
+/// * `data` must point to `len` consecutive properly initialized values of type `T`.
+///
+/// * The memory referenced by the returned slice must not be mutated for the duration
+/// of lifetime `'a`, except inside an `UnsafeCell`.
+///
+/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+/// See the safety documentation of [`pointer::offset`].
+///
+/// # Caveat
+///
+/// The lifetime for the returned slice is inferred from its usage. To
+/// prevent accidental misuse, it's suggested to tie the lifetime to whichever
+/// source lifetime is safe in the context, such as by providing a helper
+/// function taking the lifetime of a host value for the slice, or by explicit
+/// annotation.
+///
+/// # Examples
+///
+/// ```
+/// use std::slice;
+///
+/// // manifest a slice for a single element
+/// let x = 42;
+/// let ptr = &x as *const _;
+/// let slice = unsafe { slice::from_raw_parts(ptr, 1) };
+/// assert_eq!(slice[0], 42);
+/// ```
+///
+/// ### Incorrect usage
+///
+/// The following `join_slices` function is **unsound** ⚠️
+///
+/// ```rust,no_run
+/// use std::slice;
+///
+/// fn join_slices<'a, T>(fst: &'a [T], snd: &'a [T]) -> &'a [T] {
+/// let fst_end = fst.as_ptr().wrapping_add(fst.len());
+/// let snd_start = snd.as_ptr();
+/// assert_eq!(fst_end, snd_start, "Slices must be contiguous!");
+/// unsafe {
+/// // The assertion above ensures `fst` and `snd` are contiguous, but they might
+/// // still be contained within _different allocated objects_, in which case
+/// // creating this slice is undefined behavior.
+/// slice::from_raw_parts(fst.as_ptr(), fst.len() + snd.len())
+/// }
+/// }
+///
+/// fn main() {
+/// // `a` and `b` are different allocated objects...
+/// let a = 42;
+/// let b = 27;
+/// // ... which may nevertheless be laid out contiguously in memory: | a | b |
+/// let _ = join_slices(slice::from_ref(&a), slice::from_ref(&b)); // UB
+/// }
+/// ```
+///
+/// [valid]: ptr#safety
+/// [`NonNull::dangling()`]: ptr::NonNull::dangling
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_stable(feature = "const_slice_from_raw_parts", since = "1.64.0")]
+#[must_use]
+pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
+ // SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
+ unsafe {
+ assert_unsafe_precondition!(
+ is_aligned_and_not_null(data)
+ && crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize
+ );
+ &*ptr::slice_from_raw_parts(data, len)
+ }
+}
+
+/// Performs the same functionality as [`from_raw_parts`], except that a
+/// mutable slice is returned.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `data` must be [valid] for both reads and writes for `len * mem::size_of::<T>()` many bytes,
+/// and it must be properly aligned. This means in particular:
+///
+/// * The entire memory range of this slice must be contained within a single allocated object!
+/// Slices can never span across multiple allocated objects.
+/// * `data` must be non-null and aligned even for zero-length slices. One
+/// reason for this is that enum layout optimizations may rely on references
+/// (including slices of any length) being aligned and non-null to distinguish
+/// them from other data. You can obtain a pointer that is usable as `data`
+/// for zero-length slices using [`NonNull::dangling()`].
+///
+/// * `data` must point to `len` consecutive properly initialized values of type `T`.
+///
+/// * The memory referenced by the returned slice must not be accessed through any other pointer
+/// (not derived from the return value) for the duration of lifetime `'a`.
+/// Both read and write accesses are forbidden.
+///
+/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+/// See the safety documentation of [`pointer::offset`].
+///
+/// [valid]: ptr#safety
+/// [`NonNull::dangling()`]: ptr::NonNull::dangling
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_slice_from_raw_parts_mut", issue = "67456")]
+#[must_use]
+pub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
+ // SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
+ unsafe {
+ assert_unsafe_precondition!(
+ is_aligned_and_not_null(data)
+ && crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize
+ );
+ &mut *ptr::slice_from_raw_parts_mut(data, len)
+ }
+}
+
+/// Converts a reference to T into a slice of length 1 (without copying).
+#[stable(feature = "from_ref", since = "1.28.0")]
+#[rustc_const_stable(feature = "const_slice_from_ref_shared", since = "1.63.0")]
+#[must_use]
+pub const fn from_ref<T>(s: &T) -> &[T] {
+ array::from_ref(s)
+}
+
+/// Converts a reference to T into a slice of length 1 (without copying).
+#[stable(feature = "from_ref", since = "1.28.0")]
+#[rustc_const_unstable(feature = "const_slice_from_ref", issue = "90206")]
+#[must_use]
+pub const fn from_mut<T>(s: &mut T) -> &mut [T] {
+ array::from_mut(s)
+}
+
+/// Forms a slice from a pointer range.
+///
+/// This function is useful for interacting with foreign interfaces which
+/// use two pointers to refer to a range of elements in memory, as is
+/// common in C++.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * The `start` pointer of the range must be a [valid] and properly aligned pointer
+/// to the first element of a slice.
+///
+/// * The `end` pointer must be a [valid] and properly aligned pointer to *one past*
+/// the last element, such that the offset from the end to the start pointer is
+/// the length of the slice.
+///
+/// * The range must contain `N` consecutive properly initialized values of type `T`:
+///
+/// * The entire memory range of this slice must be contained within a single allocated object!
+/// Slices can never span across multiple allocated objects.
+///
+/// * The memory referenced by the returned slice must not be mutated for the duration
+/// of lifetime `'a`, except inside an `UnsafeCell`.
+///
+/// * The total length of the range must be no larger than `isize::MAX`.
+/// See the safety documentation of [`pointer::offset`].
+///
+/// Note that a range created from [`slice::as_ptr_range`] fulfills these requirements.
+///
+/// # Caveat
+///
+/// The lifetime for the returned slice is inferred from its usage. To
+/// prevent accidental misuse, it's suggested to tie the lifetime to whichever
+/// source lifetime is safe in the context, such as by providing a helper
+/// function taking the lifetime of a host value for the slice, or by explicit
+/// annotation.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(slice_from_ptr_range)]
+///
+/// use core::slice;
+///
+/// let x = [1, 2, 3];
+/// let range = x.as_ptr_range();
+///
+/// unsafe {
+/// assert_eq!(slice::from_ptr_range(range), &x);
+/// }
+/// ```
+///
+/// [valid]: ptr#safety
+#[unstable(feature = "slice_from_ptr_range", issue = "89792")]
+#[rustc_const_unstable(feature = "const_slice_from_ptr_range", issue = "89792")]
+pub const unsafe fn from_ptr_range<'a, T>(range: Range<*const T>) -> &'a [T] {
+ // SAFETY: the caller must uphold the safety contract for `from_ptr_range`.
+ unsafe { from_raw_parts(range.start, range.end.sub_ptr(range.start)) }
+}
+
+/// Performs the same functionality as [`from_ptr_range`], except that a
+/// mutable slice is returned.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * The `start` pointer of the range must be a [valid] and properly aligned pointer
+/// to the first element of a slice.
+///
+/// * The `end` pointer must be a [valid] and properly aligned pointer to *one past*
+/// the last element, such that the offset from the end to the start pointer is
+/// the length of the slice.
+///
+/// * The range must contain `N` consecutive properly initialized values of type `T`:
+///
+/// * The entire memory range of this slice must be contained within a single allocated object!
+/// Slices can never span across multiple allocated objects.
+///
+/// * The memory referenced by the returned slice must not be accessed through any other pointer
+/// (not derived from the return value) for the duration of lifetime `'a`.
+/// Both read and write accesses are forbidden.
+///
+/// * The total length of the range must be no larger than `isize::MAX`.
+/// See the safety documentation of [`pointer::offset`].
+///
+/// Note that a range created from [`slice::as_mut_ptr_range`] fulfills these requirements.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(slice_from_ptr_range)]
+///
+/// use core::slice;
+///
+/// let mut x = [1, 2, 3];
+/// let range = x.as_mut_ptr_range();
+///
+/// unsafe {
+/// assert_eq!(slice::from_mut_ptr_range(range), &mut [1, 2, 3]);
+/// }
+/// ```
+///
+/// [valid]: ptr#safety
+#[unstable(feature = "slice_from_ptr_range", issue = "89792")]
+#[rustc_const_unstable(feature = "const_slice_from_mut_ptr_range", issue = "89792")]
+pub const unsafe fn from_mut_ptr_range<'a, T>(range: Range<*mut T>) -> &'a mut [T] {
+ // SAFETY: the caller must uphold the safety contract for `from_mut_ptr_range`.
+ unsafe { from_raw_parts_mut(range.start, range.end.sub_ptr(range.start)) }
+}
diff --git a/library/core/src/slice/rotate.rs b/library/core/src/slice/rotate.rs
new file mode 100644
index 000000000..4589c6c0f
--- /dev/null
+++ b/library/core/src/slice/rotate.rs
@@ -0,0 +1,234 @@
+use crate::cmp;
+use crate::mem::{self, MaybeUninit};
+use crate::ptr;
+
+/// Rotates the range `[mid-left, mid+right)` such that the element at `mid` becomes the first
+/// element. Equivalently, rotates the range `left` elements to the left or `right` elements to the
+/// right.
+///
+/// # Safety
+///
+/// The specified range must be valid for reading and writing.
+///
+/// # Algorithm
+///
+/// Algorithm 1 is used for small values of `left + right` or for large `T`. The elements are moved
+/// into their final positions one at a time starting at `mid - left` and advancing by `right` steps
+/// modulo `left + right`, such that only one temporary is needed. Eventually, we arrive back at
+/// `mid - left`. However, if `gcd(left + right, right)` is not 1, the above steps skipped over
+/// elements. For example:
+/// ```text
+/// left = 10, right = 6
+/// the `^` indicates an element in its final place
+/// 6 7 8 9 10 11 12 13 14 15 . 0 1 2 3 4 5
+/// after using one step of the above algorithm (The X will be overwritten at the end of the round,
+/// and 12 is stored in a temporary):
+/// X 7 8 9 10 11 6 13 14 15 . 0 1 2 3 4 5
+/// ^
+/// after using another step (now 2 is in the temporary):
+/// X 7 8 9 10 11 6 13 14 15 . 0 1 12 3 4 5
+/// ^ ^
+/// after the third step (the steps wrap around, and 8 is in the temporary):
+/// X 7 2 9 10 11 6 13 14 15 . 0 1 12 3 4 5
+/// ^ ^ ^
+/// after 7 more steps, the round ends with the temporary 0 getting put in the X:
+/// 0 7 2 9 4 11 6 13 8 15 . 10 1 12 3 14 5
+/// ^ ^ ^ ^ ^ ^ ^ ^
+/// ```
+/// Fortunately, the number of skipped over elements between finalized elements is always equal, so
+/// we can just offset our starting position and do more rounds (the total number of rounds is the
+/// `gcd(left + right, right)` value). The end result is that all elements are finalized once and
+/// only once.
+///
+/// Algorithm 2 is used if `left + right` is large but `min(left, right)` is small enough to
+/// fit onto a stack buffer. The `min(left, right)` elements are copied onto the buffer, `memmove`
+/// is applied to the others, and the ones on the buffer are moved back into the hole on the
+/// opposite side of where they originated.
+///
+/// Algorithms that can be vectorized outperform the above once `left + right` becomes large enough.
+/// Algorithm 1 can be vectorized by chunking and performing many rounds at once, but there are too
+/// few rounds on average until `left + right` is enormous, and the worst case of a single
+/// round is always there. Instead, algorithm 3 utilizes repeated swapping of
+/// `min(left, right)` elements until a smaller rotate problem is left.
+///
+/// ```text
+/// left = 11, right = 4
+/// [4 5 6 7 8 9 10 11 12 13 14 . 0 1 2 3]
+/// ^ ^ ^ ^ ^ ^ ^ ^ swapping the right most elements with elements to the left
+/// [4 5 6 7 8 9 10 . 0 1 2 3] 11 12 13 14
+/// ^ ^ ^ ^ ^ ^ ^ ^ swapping these
+/// [4 5 6 . 0 1 2 3] 7 8 9 10 11 12 13 14
+/// we cannot swap any more, but a smaller rotation problem is left to solve
+/// ```
+/// when `left < right` the swapping happens from the left instead.
+pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize) {
+ type BufType = [usize; 32];
+ if mem::size_of::<T>() == 0 {
+ return;
+ }
+ loop {
+ // N.B. the below algorithms can fail if these cases are not checked
+ if (right == 0) || (left == 0) {
+ return;
+ }
+ if (left + right < 24) || (mem::size_of::<T>() > mem::size_of::<[usize; 4]>()) {
+ // Algorithm 1
+ // Microbenchmarks indicate that the average performance for random shifts is better all
+ // the way until about `left + right == 32`, but the worst case performance breaks even
+ // around 16. 24 was chosen as middle ground. If the size of `T` is larger than 4
+ // `usize`s, this algorithm also outperforms other algorithms.
+ // SAFETY: callers must ensure `mid - left` is valid for reading and writing.
+ let x = unsafe { mid.sub(left) };
+ // beginning of first round
+ // SAFETY: see previous comment.
+ let mut tmp: T = unsafe { x.read() };
+ let mut i = right;
+ // `gcd` can be found before hand by calculating `gcd(left + right, right)`,
+ // but it is faster to do one loop which calculates the gcd as a side effect, then
+ // doing the rest of the chunk
+ let mut gcd = right;
+ // benchmarks reveal that it is faster to swap temporaries all the way through instead
+ // of reading one temporary once, copying backwards, and then writing that temporary at
+ // the very end. This is possibly due to the fact that swapping or replacing temporaries
+ // uses only one memory address in the loop instead of needing to manage two.
+ loop {
+ // [long-safety-expl]
+ // SAFETY: callers must ensure `[left, left+mid+right)` are all valid for reading and
+ // writing.
+ //
+ // - `i` start with `right` so `mid-left <= x+i = x+right = mid-left+right < mid+right`
+ // - `i <= left+right-1` is always true
+ // - if `i < left`, `right` is added so `i < left+right` and on the next
+ // iteration `left` is removed from `i` so it doesn't go further
+ // - if `i >= left`, `left` is removed immediately and so it doesn't go further.
+ // - overflows cannot happen for `i` since the function's safety contract ask for
+ // `mid+right-1 = x+left+right` to be valid for writing
+ // - underflows cannot happen because `i` must be bigger or equal to `left` for
+ // a subtraction of `left` to happen.
+ //
+ // So `x+i` is valid for reading and writing if the caller respected the contract
+ tmp = unsafe { x.add(i).replace(tmp) };
+ // instead of incrementing `i` and then checking if it is outside the bounds, we
+ // check if `i` will go outside the bounds on the next increment. This prevents
+ // any wrapping of pointers or `usize`.
+ if i >= left {
+ i -= left;
+ if i == 0 {
+ // end of first round
+ // SAFETY: tmp has been read from a valid source and x is valid for writing
+ // according to the caller.
+ unsafe { x.write(tmp) };
+ break;
+ }
+ // this conditional must be here if `left + right >= 15`
+ if i < gcd {
+ gcd = i;
+ }
+ } else {
+ i += right;
+ }
+ }
+ // finish the chunk with more rounds
+ for start in 1..gcd {
+ // SAFETY: `gcd` is at most equal to `right` so all values in `1..gcd` are valid for
+ // reading and writing as per the function's safety contract, see [long-safety-expl]
+ // above
+ tmp = unsafe { x.add(start).read() };
+ // [safety-expl-addition]
+ //
+ // Here `start < gcd` so `start < right` so `i < right+right`: `right` being the
+ // greatest common divisor of `(left+right, right)` means that `left = right` so
+ // `i < left+right` so `x+i = mid-left+i` is always valid for reading and writing
+ // according to the function's safety contract.
+ i = start + right;
+ loop {
+ // SAFETY: see [long-safety-expl] and [safety-expl-addition]
+ tmp = unsafe { x.add(i).replace(tmp) };
+ if i >= left {
+ i -= left;
+ if i == start {
+ // SAFETY: see [long-safety-expl] and [safety-expl-addition]
+ unsafe { x.add(start).write(tmp) };
+ break;
+ }
+ } else {
+ i += right;
+ }
+ }
+ }
+ return;
+ // `T` is not a zero-sized type, so it's okay to divide by its size.
+ } else if cmp::min(left, right) <= mem::size_of::<BufType>() / mem::size_of::<T>() {
+ // Algorithm 2
+ // The `[T; 0]` here is to ensure this is appropriately aligned for T
+ let mut rawarray = MaybeUninit::<(BufType, [T; 0])>::uninit();
+ let buf = rawarray.as_mut_ptr() as *mut T;
+ // SAFETY: `mid-left <= mid-left+right < mid+right`
+ let dim = unsafe { mid.sub(left).add(right) };
+ if left <= right {
+ // SAFETY:
+ //
+ // 1) The `else if` condition about the sizes ensures `[mid-left; left]` will fit in
+ // `buf` without overflow and `buf` was created just above and so cannot be
+ // overlapped with any value of `[mid-left; left]`
+ // 2) [mid-left, mid+right) are all valid for reading and writing and we don't care
+ // about overlaps here.
+ // 3) The `if` condition about `left <= right` ensures writing `left` elements to
+ // `dim = mid-left+right` is valid because:
+ // - `buf` is valid and `left` elements were written in it in 1)
+ // - `dim+left = mid-left+right+left = mid+right` and we write `[dim, dim+left)`
+ unsafe {
+ // 1)
+ ptr::copy_nonoverlapping(mid.sub(left), buf, left);
+ // 2)
+ ptr::copy(mid, mid.sub(left), right);
+ // 3)
+ ptr::copy_nonoverlapping(buf, dim, left);
+ }
+ } else {
+ // SAFETY: same reasoning as above but with `left` and `right` reversed
+ unsafe {
+ ptr::copy_nonoverlapping(mid, buf, right);
+ ptr::copy(mid.sub(left), dim, left);
+ ptr::copy_nonoverlapping(buf, mid.sub(left), right);
+ }
+ }
+ return;
+ } else if left >= right {
+ // Algorithm 3
+ // There is an alternate way of swapping that involves finding where the last swap
+ // of this algorithm would be, and swapping using that last chunk instead of swapping
+ // adjacent chunks like this algorithm is doing, but this way is still faster.
+ loop {
+ // SAFETY:
+ // `left >= right` so `[mid-right, mid+right)` is valid for reading and writing
+ // Subtracting `right` from `mid` each turn is counterbalanced by the addition and
+ // check after it.
+ unsafe {
+ ptr::swap_nonoverlapping(mid.sub(right), mid, right);
+ mid = mid.sub(right);
+ }
+ left -= right;
+ if left < right {
+ break;
+ }
+ }
+ } else {
+ // Algorithm 3, `left < right`
+ loop {
+ // SAFETY: `[mid-left, mid+left)` is valid for reading and writing because
+ // `left < right` so `mid+left < mid+right`.
+ // Adding `left` to `mid` each turn is counterbalanced by the subtraction and check
+ // after it.
+ unsafe {
+ ptr::swap_nonoverlapping(mid.sub(left), mid, left);
+ mid = mid.add(left);
+ }
+ right -= left;
+ if right < left {
+ break;
+ }
+ }
+ }
+ }
+}
diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs
new file mode 100644
index 000000000..6a201834b
--- /dev/null
+++ b/library/core/src/slice/sort.rs
@@ -0,0 +1,929 @@
+//! Slice sorting
+//!
+//! This module contains a sorting algorithm based on Orson Peters' pattern-defeating quicksort,
+//! published at: <https://github.com/orlp/pdqsort>
+//!
+//! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our
+//! stable sorting implementation.
+
+use crate::cmp;
+use crate::mem::{self, MaybeUninit};
+use crate::ptr;
+
+/// When dropped, copies from `src` into `dest`.
+struct CopyOnDrop<T> {
+ src: *const T,
+ dest: *mut T,
+}
+
+impl<T> Drop for CopyOnDrop<T> {
+ fn drop(&mut self) {
+ // SAFETY: This is a helper class.
+ // Please refer to its usage for correctness.
+ // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+}
+
+/// Shifts the first element to the right until it encounters a greater or equal element.
+fn shift_head<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ // SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a
+ // pointer) and copying memory (`ptr::copy_nonoverlapping`).
+ //
+ // a. Indexing:
+ // 1. We checked the size of the array to >=2.
+ // 2. All the indexing that we will do is always between {0 <= index < len} at most.
+ //
+ // b. Memory copying
+ // 1. We are obtaining pointers to references which are guaranteed to be valid.
+ // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
+ // Namely, `i` and `i-1`.
+ // 3. If the slice is properly aligned, the elements are properly aligned.
+ // It is the caller's responsibility to make sure the slice is properly aligned.
+ //
+ // See comments below for further detail.
+ unsafe {
+ // If the first two elements are out-of-order...
+ if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
+ // Read the first element into a stack-allocated variable. If a following comparison
+ // operation panics, `hole` will get dropped and automatically write the element back
+ // into the slice.
+ let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
+ let v = v.as_mut_ptr();
+ let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(1) };
+ ptr::copy_nonoverlapping(v.add(1), v.add(0), 1);
+
+ for i in 2..len {
+ if !is_less(&*v.add(i), &*tmp) {
+ break;
+ }
+
+ // Move `i`-th element one place to the left, thus shifting the hole to the right.
+ ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1);
+ hole.dest = v.add(i);
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+}
+
+/// Shifts the last element to the left until it encounters a smaller or equal element.
+fn shift_tail<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ // SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a
+ // pointer) and copying memory (`ptr::copy_nonoverlapping`).
+ //
+ // a. Indexing:
+ // 1. We checked the size of the array to >= 2.
+ // 2. All the indexing that we will do is always between `0 <= index < len-1` at most.
+ //
+ // b. Memory copying
+ // 1. We are obtaining pointers to references which are guaranteed to be valid.
+ // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
+ // Namely, `i` and `i+1`.
+ // 3. If the slice is properly aligned, the elements are properly aligned.
+ // It is the caller's responsibility to make sure the slice is properly aligned.
+ //
+ // See comments below for further detail.
+ unsafe {
+ // If the last two elements are out-of-order...
+ if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
+ // Read the last element into a stack-allocated variable. If a following comparison
+ // operation panics, `hole` will get dropped and automatically write the element back
+ // into the slice.
+ let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
+ let v = v.as_mut_ptr();
+ let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(len - 2) };
+ ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1);
+
+ for i in (0..len - 2).rev() {
+ if !is_less(&*tmp, &*v.add(i)) {
+ break;
+ }
+
+ // Move `i`-th element one place to the right, thus shifting the hole to the left.
+ ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1);
+ hole.dest = v.add(i);
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+}
+
+/// Partially sorts a slice by shifting several out-of-order elements around.
+///
+/// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case.
+#[cold]
+fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &mut F) -> bool
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Maximum number of adjacent out-of-order pairs that will get shifted.
+ const MAX_STEPS: usize = 5;
+ // If the slice is shorter than this, don't shift any elements.
+ const SHORTEST_SHIFTING: usize = 50;
+
+ let len = v.len();
+ let mut i = 1;
+
+ for _ in 0..MAX_STEPS {
+ // SAFETY: We already explicitly did the bound checking with `i < len`.
+ // All our subsequent indexing is only in the range `0 <= index < len`
+ unsafe {
+ // Find the next pair of adjacent out-of-order elements.
+ while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
+ i += 1;
+ }
+ }
+
+ // Are we done?
+ if i == len {
+ return true;
+ }
+
+ // Don't shift elements on short arrays, that has a performance cost.
+ if len < SHORTEST_SHIFTING {
+ return false;
+ }
+
+ // Swap the found pair of elements. This puts them in correct order.
+ v.swap(i - 1, i);
+
+ // Shift the smaller element to the left.
+ shift_tail(&mut v[..i], is_less);
+ // Shift the greater element to the right.
+ shift_head(&mut v[i..], is_less);
+ }
+
+ // Didn't manage to sort the slice in the limited number of steps.
+ false
+}
+
+/// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case.
+fn insertion_sort<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ for i in 1..v.len() {
+ shift_tail(&mut v[..i + 1], is_less);
+ }
+}
+
+/// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case.
+#[cold]
+#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
+pub fn heapsort<T, F>(v: &mut [T], mut is_less: F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // This binary heap respects the invariant `parent >= child`.
+ let mut sift_down = |v: &mut [T], mut node| {
+ loop {
+ // Children of `node`.
+ let mut child = 2 * node + 1;
+ if child >= v.len() {
+ break;
+ }
+
+ // Choose the greater child.
+ if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) {
+ child += 1;
+ }
+
+ // Stop if the invariant holds at `node`.
+ if !is_less(&v[node], &v[child]) {
+ break;
+ }
+
+ // Swap `node` with the greater child, move one step down, and continue sifting.
+ v.swap(node, child);
+ node = child;
+ }
+ };
+
+ // Build the heap in linear time.
+ for i in (0..v.len() / 2).rev() {
+ sift_down(v, i);
+ }
+
+ // Pop maximal elements from the heap.
+ for i in (1..v.len()).rev() {
+ v.swap(0, i);
+ sift_down(&mut v[..i], 0);
+ }
+}
+
+/// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
+/// to `pivot`.
+///
+/// Returns the number of elements smaller than `pivot`.
+///
+/// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
+/// This idea is presented in the [BlockQuicksort][pdf] paper.
+///
+/// [pdf]: https://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
+fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Number of elements in a typical block.
+ const BLOCK: usize = 128;
+
+ // The partitioning algorithm repeats the following steps until completion:
+ //
+ // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
+ // 2. Trace a block from the right side to identify elements smaller than the pivot.
+ // 3. Exchange the identified elements between the left and right side.
+ //
+ // We keep the following variables for a block of elements:
+ //
+ // 1. `block` - Number of elements in the block.
+ // 2. `start` - Start pointer into the `offsets` array.
+ // 3. `end` - End pointer into the `offsets` array.
+ // 4. `offsets - Indices of out-of-order elements within the block.
+
+ // The current block on the left side (from `l` to `l.add(block_l)`).
+ let mut l = v.as_mut_ptr();
+ let mut block_l = BLOCK;
+ let mut start_l = ptr::null_mut();
+ let mut end_l = ptr::null_mut();
+ let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK];
+
+ // The current block on the right side (from `r.sub(block_r)` to `r`).
+ // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe`
+ let mut r = unsafe { l.add(v.len()) };
+ let mut block_r = BLOCK;
+ let mut start_r = ptr::null_mut();
+ let mut end_r = ptr::null_mut();
+ let mut offsets_r = [MaybeUninit::<u8>::uninit(); BLOCK];
+
+ // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
+ // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
+
+ // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
+ fn width<T>(l: *mut T, r: *mut T) -> usize {
+ assert!(mem::size_of::<T>() > 0);
+ // FIXME: this should *likely* use `offset_from`, but more
+ // investigation is needed (including running tests in miri).
+ (r.addr() - l.addr()) / mem::size_of::<T>()
+ }
+
+ loop {
+ // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
+ // some patch-up work in order to partition the remaining elements in between.
+ let is_done = width(l, r) <= 2 * BLOCK;
+
+ if is_done {
+ // Number of remaining elements (still not compared to the pivot).
+ let mut rem = width(l, r);
+ if start_l < end_l || start_r < end_r {
+ rem -= BLOCK;
+ }
+
+ // Adjust block sizes so that the left and right block don't overlap, but get perfectly
+ // aligned to cover the whole remaining gap.
+ if start_l < end_l {
+ block_r = rem;
+ } else if start_r < end_r {
+ block_l = rem;
+ } else {
+ // There were the same number of elements to switch on both blocks during the last
+ // iteration, so there are no remaining elements on either block. Cover the remaining
+ // items with roughly equally-sized blocks.
+ block_l = rem / 2;
+ block_r = rem - block_l;
+ }
+ debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
+ debug_assert!(width(l, r) == block_l + block_r);
+ }
+
+ if start_l == end_l {
+ // Trace `block_l` elements from the left side.
+ start_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l);
+ end_l = start_l;
+ let mut elem = l;
+
+ for i in 0..block_l {
+ // SAFETY: The unsafety operations below involve the usage of the `offset`.
+ // According to the conditions required by the function, we satisfy them because:
+ // 1. `offsets_l` is stack-allocated, and thus considered separate allocated object.
+ // 2. The function `is_less` returns a `bool`.
+ // Casting a `bool` will never overflow `isize`.
+ // 3. We have guaranteed that `block_l` will be `<= BLOCK`.
+ // Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack.
+ // Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end.
+ // Another unsafety operation here is dereferencing `elem`.
+ // However, `elem` was initially the begin pointer to the slice which is always valid.
+ unsafe {
+ // Branchless comparison.
+ *end_l = i as u8;
+ end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
+ elem = elem.offset(1);
+ }
+ }
+ }
+
+ if start_r == end_r {
+ // Trace `block_r` elements from the right side.
+ start_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r);
+ end_r = start_r;
+ let mut elem = r;
+
+ for i in 0..block_r {
+ // SAFETY: The unsafety operations below involve the usage of the `offset`.
+ // According to the conditions required by the function, we satisfy them because:
+ // 1. `offsets_r` is stack-allocated, and thus considered separate allocated object.
+ // 2. The function `is_less` returns a `bool`.
+ // Casting a `bool` will never overflow `isize`.
+ // 3. We have guaranteed that `block_r` will be `<= BLOCK`.
+ // Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack.
+ // Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end.
+ // Another unsafety operation here is dereferencing `elem`.
+ // However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it.
+ // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
+ unsafe {
+ // Branchless comparison.
+ elem = elem.offset(-1);
+ *end_r = i as u8;
+ end_r = end_r.offset(is_less(&*elem, pivot) as isize);
+ }
+ }
+ }
+
+ // Number of out-of-order elements to swap between the left and right side.
+ let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
+
+ if count > 0 {
+ macro_rules! left {
+ () => {
+ l.offset(*start_l as isize)
+ };
+ }
+ macro_rules! right {
+ () => {
+ r.offset(-(*start_r as isize) - 1)
+ };
+ }
+
+ // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
+ // permutation. This is not strictly equivalent to swapping, but produces a similar
+ // result using fewer memory operations.
+
+ // SAFETY: The use of `ptr::read` is valid because there is at least one element in
+ // both `offsets_l` and `offsets_r`, so `left!` is a valid pointer to read from.
+ //
+ // The uses of `left!` involve calls to `offset` on `l`, which points to the
+ // beginning of `v`. All the offsets pointed-to by `start_l` are at most `block_l`, so
+ // these `offset` calls are safe as all reads are within the block. The same argument
+ // applies for the uses of `right!`.
+ //
+ // The calls to `start_l.offset` are valid because there are at most `count-1` of them,
+ // plus the final one at the end of the unsafe block, where `count` is the minimum number
+ // of collected offsets in `offsets_l` and `offsets_r`, so there is no risk of there not
+ // being enough elements. The same reasoning applies to the calls to `start_r.offset`.
+ //
+ // The calls to `copy_nonoverlapping` are safe because `left!` and `right!` are guaranteed
+ // not to overlap, and are valid because of the reasoning above.
+ unsafe {
+ let tmp = ptr::read(left!());
+ ptr::copy_nonoverlapping(right!(), left!(), 1);
+
+ for _ in 1..count {
+ start_l = start_l.offset(1);
+ ptr::copy_nonoverlapping(left!(), right!(), 1);
+ start_r = start_r.offset(1);
+ ptr::copy_nonoverlapping(right!(), left!(), 1);
+ }
+
+ ptr::copy_nonoverlapping(&tmp, right!(), 1);
+ mem::forget(tmp);
+ start_l = start_l.offset(1);
+ start_r = start_r.offset(1);
+ }
+ }
+
+ if start_l == end_l {
+ // All out-of-order elements in the left block were moved. Move to the next block.
+
+ // block-width-guarantee
+ // SAFETY: if `!is_done` then the slice width is guaranteed to be at least `2*BLOCK` wide. There
+ // are at most `BLOCK` elements in `offsets_l` because of its size, so the `offset` operation is
+ // safe. Otherwise, the debug assertions in the `is_done` case guarantee that
+ // `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
+ // for the smaller number of remaining elements.
+ l = unsafe { l.offset(block_l as isize) };
+ }
+
+ if start_r == end_r {
+ // All out-of-order elements in the right block were moved. Move to the previous block.
+
+ // SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
+ // or `block_r` has been adjusted for the last handful of elements.
+ r = unsafe { r.offset(-(block_r as isize)) };
+ }
+
+ if is_done {
+ break;
+ }
+ }
+
+ // All that remains now is at most one block (either the left or the right) with out-of-order
+ // elements that need to be moved. Such remaining elements can be simply shifted to the end
+ // within their block.
+
+ if start_l < end_l {
+ // The left block remains.
+ // Move its remaining out-of-order elements to the far right.
+ debug_assert_eq!(width(l, r), block_l);
+ while start_l < end_l {
+ // remaining-elements-safety
+ // SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it
+ // is safe to point `end_l` to the previous element.
+ //
+ // The `ptr::swap` is safe if both its arguments are valid for reads and writes:
+ // - Per the debug assert above, the distance between `l` and `r` is `block_l`
+ // elements, so there can be at most `block_l` remaining offsets between `start_l`
+ // and `end_l`. This means `r` will be moved at most `block_l` steps back, which
+ // makes the `r.offset` calls valid (at that point `l == r`).
+ // - `offsets_l` contains valid offsets into `v` collected during the partitioning of
+ // the last block, so the `l.offset` calls are valid.
+ unsafe {
+ end_l = end_l.offset(-1);
+ ptr::swap(l.offset(*end_l as isize), r.offset(-1));
+ r = r.offset(-1);
+ }
+ }
+ width(v.as_mut_ptr(), r)
+ } else if start_r < end_r {
+ // The right block remains.
+ // Move its remaining out-of-order elements to the far left.
+ debug_assert_eq!(width(l, r), block_r);
+ while start_r < end_r {
+ // SAFETY: See the reasoning in [remaining-elements-safety].
+ unsafe {
+ end_r = end_r.offset(-1);
+ ptr::swap(l, r.offset(-(*end_r as isize) - 1));
+ l = l.offset(1);
+ }
+ }
+ width(v.as_mut_ptr(), l)
+ } else {
+ // Nothing else to do, we're done.
+ width(v.as_mut_ptr(), l)
+ }
+}
+
+/// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
+/// equal to `v[pivot]`.
+///
+/// Returns a tuple of:
+///
+/// 1. Number of elements smaller than `v[pivot]`.
+/// 2. True if `v` was already partitioned.
+fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let (mid, was_partitioned) = {
+ // Place the pivot at the beginning of slice.
+ v.swap(0, pivot);
+ let (pivot, v) = v.split_at_mut(1);
+ let pivot = &mut pivot[0];
+
+ // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
+ // operation panics, the pivot will be automatically written back into the slice.
+
+ // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe.
+ let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
+ let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot };
+ let pivot = &*tmp;
+
+ // Find the first pair of out-of-order elements.
+ let mut l = 0;
+ let mut r = v.len();
+
+ // SAFETY: The unsafety below involves indexing an array.
+ // For the first one: We already do the bounds checking here with `l < r`.
+ // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
+ // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
+ unsafe {
+ // Find the first element greater than or equal to the pivot.
+ while l < r && is_less(v.get_unchecked(l), pivot) {
+ l += 1;
+ }
+
+ // Find the last element smaller that the pivot.
+ while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
+ r -= 1;
+ }
+ }
+
+ (l + partition_in_blocks(&mut v[l..r], pivot, is_less), l >= r)
+
+ // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated
+ // variable) back into the slice where it originally was. This step is critical in ensuring
+ // safety!
+ };
+
+ // Place the pivot between the two partitions.
+ v.swap(0, mid);
+
+ (mid, was_partitioned)
+}
+
+/// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
+///
+/// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
+/// elements smaller than the pivot.
+fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> usize
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Place the pivot at the beginning of slice.
+ v.swap(0, pivot);
+ let (pivot, v) = v.split_at_mut(1);
+ let pivot = &mut pivot[0];
+
+ // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
+ // operation panics, the pivot will be automatically written back into the slice.
+ // SAFETY: The pointer here is valid because it is obtained from a reference to a slice.
+ let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
+ let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot };
+ let pivot = &*tmp;
+
+ // Now partition the slice.
+ let mut l = 0;
+ let mut r = v.len();
+ loop {
+ // SAFETY: The unsafety below involves indexing an array.
+ // For the first one: We already do the bounds checking here with `l < r`.
+ // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
+ // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
+ unsafe {
+ // Find the first element greater than the pivot.
+ while l < r && !is_less(pivot, v.get_unchecked(l)) {
+ l += 1;
+ }
+
+ // Find the last element equal to the pivot.
+ while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
+ r -= 1;
+ }
+
+ // Are we done?
+ if l >= r {
+ break;
+ }
+
+ // Swap the found pair of out-of-order elements.
+ r -= 1;
+ let ptr = v.as_mut_ptr();
+ ptr::swap(ptr.add(l), ptr.add(r));
+ l += 1;
+ }
+ }
+
+ // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
+ l + 1
+
+ // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable)
+ // back into the slice where it originally was. This step is critical in ensuring safety!
+}
+
+/// Scatters some elements around in an attempt to break patterns that might cause imbalanced
+/// partitions in quicksort.
+#[cold]
+fn break_patterns<T>(v: &mut [T]) {
+ let len = v.len();
+ if len >= 8 {
+ // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
+ let mut random = len as u32;
+ let mut gen_u32 = || {
+ random ^= random << 13;
+ random ^= random >> 17;
+ random ^= random << 5;
+ random
+ };
+ let mut gen_usize = || {
+ if usize::BITS <= 32 {
+ gen_u32() as usize
+ } else {
+ (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
+ }
+ };
+
+ // Take random numbers modulo this number.
+ // The number fits into `usize` because `len` is not greater than `isize::MAX`.
+ let modulus = len.next_power_of_two();
+
+ // Some pivot candidates will be in the nearby of this index. Let's randomize them.
+ let pos = len / 4 * 2;
+
+ for i in 0..3 {
+ // Generate a random number modulo `len`. However, in order to avoid costly operations
+ // we first take it modulo a power of two, and then decrease by `len` until it fits
+ // into the range `[0, len - 1]`.
+ let mut other = gen_usize() & (modulus - 1);
+
+ // `other` is guaranteed to be less than `2 * len`.
+ if other >= len {
+ other -= len;
+ }
+
+ v.swap(pos - 1 + i, other);
+ }
+ }
+}
+
+/// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
+///
+/// Elements in `v` might be reordered in the process.
+fn choose_pivot<T, F>(v: &mut [T], is_less: &mut F) -> (usize, bool)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Minimum length to choose the median-of-medians method.
+ // Shorter slices use the simple median-of-three method.
+ const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
+ // Maximum number of swaps that can be performed in this function.
+ const MAX_SWAPS: usize = 4 * 3;
+
+ let len = v.len();
+
+ // Three indices near which we are going to choose a pivot.
+ let mut a = len / 4 * 1;
+ let mut b = len / 4 * 2;
+ let mut c = len / 4 * 3;
+
+ // Counts the total number of swaps we are about to perform while sorting indices.
+ let mut swaps = 0;
+
+ if len >= 8 {
+ // Swaps indices so that `v[a] <= v[b]`.
+ // SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of
+ // `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in
+ // corresponding calls to `sort3` with valid 3-item neighborhoods around each
+ // pointer, which in turn means the calls to `sort2` are done with valid
+ // references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap`
+ // call.
+ let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
+ if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
+ ptr::swap(a, b);
+ swaps += 1;
+ }
+ };
+
+ // Swaps indices so that `v[a] <= v[b] <= v[c]`.
+ let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
+ sort2(a, b);
+ sort2(b, c);
+ sort2(a, b);
+ };
+
+ if len >= SHORTEST_MEDIAN_OF_MEDIANS {
+ // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
+ let mut sort_adjacent = |a: &mut usize| {
+ let tmp = *a;
+ sort3(&mut (tmp - 1), a, &mut (tmp + 1));
+ };
+
+ // Find medians in the neighborhoods of `a`, `b`, and `c`.
+ sort_adjacent(&mut a);
+ sort_adjacent(&mut b);
+ sort_adjacent(&mut c);
+ }
+
+ // Find the median among `a`, `b`, and `c`.
+ sort3(&mut a, &mut b, &mut c);
+ }
+
+ if swaps < MAX_SWAPS {
+ (b, swaps == 0)
+ } else {
+ // The maximum number of swaps was performed. Chances are the slice is descending or mostly
+ // descending, so reversing will probably help sort it faster.
+ v.reverse();
+ (len - 1 - b, true)
+ }
+}
+
+/// Sorts `v` recursively.
+///
+/// If the slice had a predecessor in the original array, it is specified as `pred`.
+///
+/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
+/// this function will immediately switch to heapsort.
+fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: u32)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Slices of up to this length get sorted using insertion sort.
+ const MAX_INSERTION: usize = 20;
+
+ // True if the last partitioning was reasonably balanced.
+ let mut was_balanced = true;
+ // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
+ let mut was_partitioned = true;
+
+ loop {
+ let len = v.len();
+
+ // Very short slices get sorted using insertion sort.
+ if len <= MAX_INSERTION {
+ insertion_sort(v, is_less);
+ return;
+ }
+
+ // If too many bad pivot choices were made, simply fall back to heapsort in order to
+ // guarantee `O(n * log(n))` worst-case.
+ if limit == 0 {
+ heapsort(v, is_less);
+ return;
+ }
+
+ // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
+ // some elements around. Hopefully we'll choose a better pivot this time.
+ if !was_balanced {
+ break_patterns(v);
+ limit -= 1;
+ }
+
+ // Choose a pivot and try guessing whether the slice is already sorted.
+ let (pivot, likely_sorted) = choose_pivot(v, is_less);
+
+ // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
+ // selection predicts the slice is likely already sorted...
+ if was_balanced && was_partitioned && likely_sorted {
+ // Try identifying several out-of-order elements and shifting them to correct
+ // positions. If the slice ends up being completely sorted, we're done.
+ if partial_insertion_sort(v, is_less) {
+ return;
+ }
+ }
+
+ // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
+ // slice. Partition the slice into elements equal to and elements greater than the pivot.
+ // This case is usually hit when the slice contains many duplicate elements.
+ if let Some(p) = pred {
+ if !is_less(p, &v[pivot]) {
+ let mid = partition_equal(v, pivot, is_less);
+
+ // Continue sorting elements greater than the pivot.
+ v = &mut v[mid..];
+ continue;
+ }
+ }
+
+ // Partition the slice.
+ let (mid, was_p) = partition(v, pivot, is_less);
+ was_balanced = cmp::min(mid, len - mid) >= len / 8;
+ was_partitioned = was_p;
+
+ // Split the slice into `left`, `pivot`, and `right`.
+ let (left, right) = v.split_at_mut(mid);
+ let (pivot, right) = right.split_at_mut(1);
+ let pivot = &pivot[0];
+
+ // Recurse into the shorter side only in order to minimize the total number of recursive
+ // calls and consume less stack space. Then just continue with the longer side (this is
+ // akin to tail recursion).
+ if left.len() < right.len() {
+ recurse(left, is_less, pred, limit);
+ v = right;
+ pred = Some(pivot);
+ } else {
+ recurse(right, is_less, Some(pivot), limit);
+ v = left;
+ }
+ }
+}
+
+/// Sorts `v` using pattern-defeating quicksort, which is *O*(*n* \* log(*n*)) worst-case.
+pub fn quicksort<T, F>(v: &mut [T], mut is_less: F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Sorting has no meaningful behavior on zero-sized types.
+ if mem::size_of::<T>() == 0 {
+ return;
+ }
+
+ // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
+ let limit = usize::BITS - v.len().leading_zeros();
+
+ recurse(v, &mut is_less, None, limit);
+}
+
+fn partition_at_index_loop<'a, T, F>(
+ mut v: &'a mut [T],
+ mut index: usize,
+ is_less: &mut F,
+ mut pred: Option<&'a T>,
+) where
+ F: FnMut(&T, &T) -> bool,
+{
+ loop {
+ // For slices of up to this length it's probably faster to simply sort them.
+ const MAX_INSERTION: usize = 10;
+ if v.len() <= MAX_INSERTION {
+ insertion_sort(v, is_less);
+ return;
+ }
+
+ // Choose a pivot
+ let (pivot, _) = choose_pivot(v, is_less);
+
+ // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
+ // slice. Partition the slice into elements equal to and elements greater than the pivot.
+ // This case is usually hit when the slice contains many duplicate elements.
+ if let Some(p) = pred {
+ if !is_less(p, &v[pivot]) {
+ let mid = partition_equal(v, pivot, is_less);
+
+ // If we've passed our index, then we're good.
+ if mid > index {
+ return;
+ }
+
+ // Otherwise, continue sorting elements greater than the pivot.
+ v = &mut v[mid..];
+ index = index - mid;
+ pred = None;
+ continue;
+ }
+ }
+
+ let (mid, _) = partition(v, pivot, is_less);
+
+ // Split the slice into `left`, `pivot`, and `right`.
+ let (left, right) = v.split_at_mut(mid);
+ let (pivot, right) = right.split_at_mut(1);
+ let pivot = &pivot[0];
+
+ if mid < index {
+ v = right;
+ index = index - mid - 1;
+ pred = Some(pivot);
+ } else if mid > index {
+ v = left;
+ } else {
+ // If mid == index, then we're done, since partition() guaranteed that all elements
+ // after mid are greater than or equal to mid.
+ return;
+ }
+ }
+}
+
+pub fn partition_at_index<T, F>(
+ v: &mut [T],
+ index: usize,
+ mut is_less: F,
+) -> (&mut [T], &mut T, &mut [T])
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ use cmp::Ordering::Greater;
+ use cmp::Ordering::Less;
+
+ if index >= v.len() {
+ panic!("partition_at_index index {} greater than length of slice {}", index, v.len());
+ }
+
+ if mem::size_of::<T>() == 0 {
+ // Sorting has no meaningful behavior on zero-sized types. Do nothing.
+ } else if index == v.len() - 1 {
+ // Find max element and place it in the last position of the array. We're free to use
+ // `unwrap()` here because we know v must not be empty.
+ let (max_index, _) = v
+ .iter()
+ .enumerate()
+ .max_by(|&(_, x), &(_, y)| if is_less(x, y) { Less } else { Greater })
+ .unwrap();
+ v.swap(max_index, index);
+ } else if index == 0 {
+ // Find min element and place it in the first position of the array. We're free to use
+ // `unwrap()` here because we know v must not be empty.
+ let (min_index, _) = v
+ .iter()
+ .enumerate()
+ .min_by(|&(_, x), &(_, y)| if is_less(x, y) { Less } else { Greater })
+ .unwrap();
+ v.swap(min_index, index);
+ } else {
+ partition_at_index_loop(v, index, &mut is_less, None);
+ }
+
+ let (left, right) = v.split_at_mut(index);
+ let (pivot, right) = right.split_at_mut(1);
+ let pivot = &mut pivot[0];
+ (left, pivot, right)
+}
diff --git a/library/core/src/slice/specialize.rs b/library/core/src/slice/specialize.rs
new file mode 100644
index 000000000..80eb59058
--- /dev/null
+++ b/library/core/src/slice/specialize.rs
@@ -0,0 +1,23 @@
+pub(super) trait SpecFill<T> {
+ fn spec_fill(&mut self, value: T);
+}
+
+impl<T: Clone> SpecFill<T> for [T] {
+ default fn spec_fill(&mut self, value: T) {
+ if let Some((last, elems)) = self.split_last_mut() {
+ for el in elems {
+ el.clone_from(&value);
+ }
+
+ *last = value
+ }
+ }
+}
+
+impl<T: Copy> SpecFill<T> for [T] {
+ fn spec_fill(&mut self, value: T) {
+ for item in self.iter_mut() {
+ *item = value;
+ }
+ }
+}
diff --git a/library/core/src/str/converts.rs b/library/core/src/str/converts.rs
new file mode 100644
index 000000000..b0c55ca4f
--- /dev/null
+++ b/library/core/src/str/converts.rs
@@ -0,0 +1,203 @@
+//! Ways to create a `str` from bytes slice.
+
+use crate::mem;
+
+use super::validations::run_utf8_validation;
+use super::Utf8Error;
+
+/// Converts a slice of bytes to a string slice.
+///
+/// A string slice ([`&str`]) is made of bytes ([`u8`]), and a byte slice
+/// ([`&[u8]`][byteslice]) is made of bytes, so this function converts between
+/// the two. Not all byte slices are valid string slices, however: [`&str`] requires
+/// that it is valid UTF-8. `from_utf8()` checks to ensure that the bytes are valid
+/// UTF-8, and then does the conversion.
+///
+/// [`&str`]: str
+/// [byteslice]: slice
+///
+/// If you are sure that the byte slice is valid UTF-8, and you don't want to
+/// incur the overhead of the validity check, there is an unsafe version of
+/// this function, [`from_utf8_unchecked`], which has the same
+/// behavior but skips the check.
+///
+/// If you need a `String` instead of a `&str`, consider
+/// [`String::from_utf8`][string].
+///
+/// [string]: ../../std/string/struct.String.html#method.from_utf8
+///
+/// Because you can stack-allocate a `[u8; N]`, and you can take a
+/// [`&[u8]`][byteslice] of it, this function is one way to have a
+/// stack-allocated string. There is an example of this in the
+/// examples section below.
+///
+/// [byteslice]: slice
+///
+/// # Errors
+///
+/// Returns `Err` if the slice is not UTF-8 with a description as to why the
+/// provided slice is not UTF-8.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::str;
+///
+/// // some bytes, in a vector
+/// let sparkle_heart = vec![240, 159, 146, 150];
+///
+/// // We know these bytes are valid, so just use `unwrap()`.
+/// let sparkle_heart = str::from_utf8(&sparkle_heart).unwrap();
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+///
+/// Incorrect bytes:
+///
+/// ```
+/// use std::str;
+///
+/// // some invalid bytes, in a vector
+/// let sparkle_heart = vec![0, 159, 146, 150];
+///
+/// assert!(str::from_utf8(&sparkle_heart).is_err());
+/// ```
+///
+/// See the docs for [`Utf8Error`] for more details on the kinds of
+/// errors that can be returned.
+///
+/// A "stack allocated string":
+///
+/// ```
+/// use std::str;
+///
+/// // some bytes, in a stack-allocated array
+/// let sparkle_heart = [240, 159, 146, 150];
+///
+/// // We know these bytes are valid, so just use `unwrap()`.
+/// let sparkle_heart = str::from_utf8(&sparkle_heart).unwrap();
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_stable(feature = "const_str_from_utf8_shared", since = "1.63.0")]
+#[rustc_allow_const_fn_unstable(str_internals)]
+pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
+ // FIXME: This should use `?` again, once it's `const`
+ match run_utf8_validation(v) {
+ Ok(_) => {
+ // SAFETY: validation succeeded.
+ Ok(unsafe { from_utf8_unchecked(v) })
+ }
+ Err(err) => Err(err),
+ }
+}
+
+/// Converts a mutable slice of bytes to a mutable string slice.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::str;
+///
+/// // "Hello, Rust!" as a mutable vector
+/// let mut hellorust = vec![72, 101, 108, 108, 111, 44, 32, 82, 117, 115, 116, 33];
+///
+/// // As we know these bytes are valid, we can use `unwrap()`
+/// let outstr = str::from_utf8_mut(&mut hellorust).unwrap();
+///
+/// assert_eq!("Hello, Rust!", outstr);
+/// ```
+///
+/// Incorrect bytes:
+///
+/// ```
+/// use std::str;
+///
+/// // Some invalid bytes in a mutable vector
+/// let mut invalid = vec![128, 223];
+///
+/// assert!(str::from_utf8_mut(&mut invalid).is_err());
+/// ```
+/// See the docs for [`Utf8Error`] for more details on the kinds of
+/// errors that can be returned.
+#[stable(feature = "str_mut_extras", since = "1.20.0")]
+#[rustc_const_unstable(feature = "const_str_from_utf8", issue = "91006")]
+pub const fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
+ // This should use `?` again, once it's `const`
+ match run_utf8_validation(v) {
+ Ok(_) => {
+ // SAFETY: validation succeeded.
+ Ok(unsafe { from_utf8_unchecked_mut(v) })
+ }
+ Err(err) => Err(err),
+ }
+}
+
+/// Converts a slice of bytes to a string slice without checking
+/// that the string contains valid UTF-8.
+///
+/// See the safe version, [`from_utf8`], for more information.
+///
+/// # Safety
+///
+/// The bytes passed in must be valid UTF-8.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::str;
+///
+/// // some bytes, in a vector
+/// let sparkle_heart = vec![240, 159, 146, 150];
+///
+/// let sparkle_heart = unsafe {
+/// str::from_utf8_unchecked(&sparkle_heart)
+/// };
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_stable(feature = "const_str_from_utf8_unchecked", since = "1.55.0")]
+pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
+ // SAFETY: the caller must guarantee that the bytes `v` are valid UTF-8.
+ // Also relies on `&str` and `&[u8]` having the same layout.
+ unsafe { mem::transmute(v) }
+}
+
+/// Converts a slice of bytes to a string slice without checking
+/// that the string contains valid UTF-8; mutable version.
+///
+/// See the immutable version, [`from_utf8_unchecked()`] for more information.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::str;
+///
+/// let mut heart = vec![240, 159, 146, 150];
+/// let heart = unsafe { str::from_utf8_unchecked_mut(&mut heart) };
+///
+/// assert_eq!("💖", heart);
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "str_mut_extras", since = "1.20.0")]
+#[rustc_const_unstable(feature = "const_str_from_utf8_unchecked_mut", issue = "91005")]
+pub const unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
+ // SAFETY: the caller must guarantee that the bytes `v`
+ // are valid UTF-8, thus the cast to `*mut str` is safe.
+ // Also, the pointer dereference is safe because that pointer
+ // comes from a reference which is guaranteed to be valid for writes.
+ unsafe { &mut *(v as *mut [u8] as *mut str) }
+}
diff --git a/library/core/src/str/count.rs b/library/core/src/str/count.rs
new file mode 100644
index 000000000..28567a7e7
--- /dev/null
+++ b/library/core/src/str/count.rs
@@ -0,0 +1,136 @@
+//! Code for efficiently counting the number of `char`s in a UTF-8 encoded
+//! string.
+//!
+//! Broadly, UTF-8 encodes `char`s as a "leading" byte which begins the `char`,
+//! followed by some number (possibly 0) of continuation bytes.
+//!
+//! The leading byte can have a number of bit-patterns (with the specific
+//! pattern indicating how many continuation bytes follow), but the continuation
+//! bytes are always in the format `0b10XX_XXXX` (where the `X`s can take any
+//! value). That is, the most significant bit is set, and the second most
+//! significant bit is unset.
+//!
+//! To count the number of characters, we can just count the number of bytes in
+//! the string which are not continuation bytes, which can be done many bytes at
+//! a time fairly easily.
+//!
+//! Note: Because the term "leading byte" can sometimes be ambiguous (for
+//! example, it could also refer to the first byte of a slice), we'll often use
+//! the term "non-continuation byte" to refer to these bytes in the code.
+use core::intrinsics::unlikely;
+
+const USIZE_SIZE: usize = core::mem::size_of::<usize>();
+const UNROLL_INNER: usize = 4;
+
+#[inline]
+pub(super) fn count_chars(s: &str) -> usize {
+ if s.len() < USIZE_SIZE * UNROLL_INNER {
+ // Avoid entering the optimized implementation for strings where the
+ // difference is not likely to matter, or where it might even be slower.
+ // That said, a ton of thought was not spent on the particular threshold
+ // here, beyond "this value seems to make sense".
+ char_count_general_case(s.as_bytes())
+ } else {
+ do_count_chars(s)
+ }
+}
+
+fn do_count_chars(s: &str) -> usize {
+ // For correctness, `CHUNK_SIZE` must be:
+ //
+ // - Less than or equal to 255, otherwise we'll overflow bytes in `counts`.
+ // - A multiple of `UNROLL_INNER`, otherwise our `break` inside the
+ // `body.chunks(CHUNK_SIZE)` loop is incorrect.
+ //
+ // For performance, `CHUNK_SIZE` should be:
+ // - Relatively cheap to `/` against (so some simple sum of powers of two).
+ // - Large enough to avoid paying for the cost of the `sum_bytes_in_usize`
+ // too often.
+ const CHUNK_SIZE: usize = 192;
+
+ // Check the properties of `CHUNK_SIZE` and `UNROLL_INNER` that are required
+ // for correctness.
+ const _: () = assert!(CHUNK_SIZE < 256);
+ const _: () = assert!(CHUNK_SIZE % UNROLL_INNER == 0);
+
+ // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size
+ // differences which are handled by `align_to`.
+ let (head, body, tail) = unsafe { s.as_bytes().align_to::<usize>() };
+
+ // This should be quite rare, and basically exists to handle the degenerate
+ // cases where align_to fails (as well as miri under symbolic alignment
+ // mode).
+ //
+ // The `unlikely` helps discourage LLVM from inlining the body, which is
+ // nice, as we would rather not mark the `char_count_general_case` function
+ // as cold.
+ if unlikely(body.is_empty() || head.len() > USIZE_SIZE || tail.len() > USIZE_SIZE) {
+ return char_count_general_case(s.as_bytes());
+ }
+
+ let mut total = char_count_general_case(head) + char_count_general_case(tail);
+ // Split `body` into `CHUNK_SIZE` chunks to reduce the frequency with which
+ // we call `sum_bytes_in_usize`.
+ for chunk in body.chunks(CHUNK_SIZE) {
+ // We accumulate intermediate sums in `counts`, where each byte contains
+ // a subset of the sum of this chunk, like a `[u8; size_of::<usize>()]`.
+ let mut counts = 0;
+
+ let (unrolled_chunks, remainder) = chunk.as_chunks::<UNROLL_INNER>();
+ for unrolled in unrolled_chunks {
+ for &word in unrolled {
+ // Because `CHUNK_SIZE` is < 256, this addition can't cause the
+ // count in any of the bytes to overflow into a subsequent byte.
+ counts += contains_non_continuation_byte(word);
+ }
+ }
+
+ // Sum the values in `counts` (which, again, is conceptually a `[u8;
+ // size_of::<usize>()]`), and accumulate the result into `total`.
+ total += sum_bytes_in_usize(counts);
+
+ // If there's any data in `remainder`, then handle it. This will only
+ // happen for the last `chunk` in `body.chunks()` (because `CHUNK_SIZE`
+ // is divisible by `UNROLL_INNER`), so we explicitly break at the end
+ // (which seems to help LLVM out).
+ if !remainder.is_empty() {
+ // Accumulate all the data in the remainder.
+ let mut counts = 0;
+ for &word in remainder {
+ counts += contains_non_continuation_byte(word);
+ }
+ total += sum_bytes_in_usize(counts);
+ break;
+ }
+ }
+ total
+}
+
+// Checks each byte of `w` to see if it contains the first byte in a UTF-8
+// sequence. Bytes in `w` which are continuation bytes are left as `0x00` (e.g.
+// false), and bytes which are non-continuation bytes are left as `0x01` (e.g.
+// true)
+#[inline]
+fn contains_non_continuation_byte(w: usize) -> usize {
+ const LSB: usize = usize::repeat_u8(0x01);
+ ((!w >> 7) | (w >> 6)) & LSB
+}
+
+// Morally equivalent to `values.to_ne_bytes().into_iter().sum::<usize>()`, but
+// more efficient.
+#[inline]
+fn sum_bytes_in_usize(values: usize) -> usize {
+ const LSB_SHORTS: usize = usize::repeat_u16(0x0001);
+ const SKIP_BYTES: usize = usize::repeat_u16(0x00ff);
+
+ let pair_sum: usize = (values & SKIP_BYTES) + ((values >> 8) & SKIP_BYTES);
+ pair_sum.wrapping_mul(LSB_SHORTS) >> ((USIZE_SIZE - 2) * 8)
+}
+
+// This is the most direct implementation of the concept of "count the number of
+// bytes in the string which are not continuation bytes", and is used for the
+// head and tail of the input string (the first and last item in the tuple
+// returned by `slice::align_to`).
+fn char_count_general_case(s: &[u8]) -> usize {
+ s.iter().filter(|&&byte| !super::validations::utf8_is_cont_byte(byte)).count()
+}
diff --git a/library/core/src/str/error.rs b/library/core/src/str/error.rs
new file mode 100644
index 000000000..4e569fcc8
--- /dev/null
+++ b/library/core/src/str/error.rs
@@ -0,0 +1,138 @@
+//! Defines utf8 error type.
+
+use crate::fmt;
+
+/// Errors which can occur when attempting to interpret a sequence of [`u8`]
+/// as a string.
+///
+/// As such, the `from_utf8` family of functions and methods for both [`String`]s
+/// and [`&str`]s make use of this error, for example.
+///
+/// [`String`]: ../../std/string/struct.String.html#method.from_utf8
+/// [`&str`]: super::from_utf8
+///
+/// # Examples
+///
+/// This error type’s methods can be used to create functionality
+/// similar to `String::from_utf8_lossy` without allocating heap memory:
+///
+/// ```
+/// fn from_utf8_lossy<F>(mut input: &[u8], mut push: F) where F: FnMut(&str) {
+/// loop {
+/// match std::str::from_utf8(input) {
+/// Ok(valid) => {
+/// push(valid);
+/// break
+/// }
+/// Err(error) => {
+/// let (valid, after_valid) = input.split_at(error.valid_up_to());
+/// unsafe {
+/// push(std::str::from_utf8_unchecked(valid))
+/// }
+/// push("\u{FFFD}");
+///
+/// if let Some(invalid_sequence_length) = error.error_len() {
+/// input = &after_valid[invalid_sequence_length..]
+/// } else {
+/// break
+/// }
+/// }
+/// }
+/// }
+/// }
+/// ```
+#[derive(Copy, Eq, PartialEq, Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Utf8Error {
+ pub(super) valid_up_to: usize,
+ pub(super) error_len: Option<u8>,
+}
+
+impl Utf8Error {
+ /// Returns the index in the given string up to which valid UTF-8 was
+ /// verified.
+ ///
+ /// It is the maximum index such that `from_utf8(&input[..index])`
+ /// would return `Ok(_)`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::str;
+ ///
+ /// // some invalid bytes, in a vector
+ /// let sparkle_heart = vec![0, 159, 146, 150];
+ ///
+ /// // std::str::from_utf8 returns a Utf8Error
+ /// let error = str::from_utf8(&sparkle_heart).unwrap_err();
+ ///
+ /// // the second byte is invalid here
+ /// assert_eq!(1, error.valid_up_to());
+ /// ```
+ #[stable(feature = "utf8_error", since = "1.5.0")]
+ #[rustc_const_stable(feature = "const_str_from_utf8_shared", since = "1.63.0")]
+ #[must_use]
+ #[inline]
+ pub const fn valid_up_to(&self) -> usize {
+ self.valid_up_to
+ }
+
+ /// Provides more information about the failure:
+ ///
+ /// * `None`: the end of the input was reached unexpectedly.
+ /// `self.valid_up_to()` is 1 to 3 bytes from the end of the input.
+ /// If a byte stream (such as a file or a network socket) is being decoded incrementally,
+ /// this could be a valid `char` whose UTF-8 byte sequence is spanning multiple chunks.
+ ///
+ /// * `Some(len)`: an unexpected byte was encountered.
+ /// The length provided is that of the invalid byte sequence
+ /// that starts at the index given by `valid_up_to()`.
+ /// Decoding should resume after that sequence
+ /// (after inserting a [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]) in case of
+ /// lossy decoding.
+ ///
+ /// [U+FFFD]: ../../std/char/constant.REPLACEMENT_CHARACTER.html
+ #[stable(feature = "utf8_error_error_len", since = "1.20.0")]
+ #[rustc_const_stable(feature = "const_str_from_utf8_shared", since = "1.63.0")]
+ #[must_use]
+ #[inline]
+ pub const fn error_len(&self) -> Option<usize> {
+ // FIXME: This should become `map` again, once it's `const`
+ match self.error_len {
+ Some(len) => Some(len as usize),
+ None => None,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for Utf8Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if let Some(error_len) = self.error_len {
+ write!(
+ f,
+ "invalid utf-8 sequence of {} bytes from index {}",
+ error_len, self.valid_up_to
+ )
+ } else {
+ write!(f, "incomplete utf-8 byte sequence from index {}", self.valid_up_to)
+ }
+ }
+}
+
+/// An error returned when parsing a `bool` using [`from_str`] fails
+///
+/// [`from_str`]: super::FromStr::from_str
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[non_exhaustive]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseBoolError;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseBoolError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "provided string was not `true` or `false`".fmt(f)
+ }
+}
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
new file mode 100644
index 000000000..24083ee6a
--- /dev/null
+++ b/library/core/src/str/iter.rs
@@ -0,0 +1,1499 @@
+//! Iterators for `str` methods.
+
+use crate::char;
+use crate::fmt::{self, Write};
+use crate::iter::{Chain, FlatMap, Flatten};
+use crate::iter::{Copied, Filter, FusedIterator, Map, TrustedLen};
+use crate::iter::{TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use crate::ops::Try;
+use crate::option;
+use crate::slice::{self, Split as SliceSplit};
+
+use super::from_utf8_unchecked;
+use super::pattern::Pattern;
+use super::pattern::{DoubleEndedSearcher, ReverseSearcher, Searcher};
+use super::validations::{next_code_point, next_code_point_reverse};
+use super::LinesAnyMap;
+use super::{BytesIsNotEmpty, UnsafeBytesToStr};
+use super::{CharEscapeDebugContinue, CharEscapeDefault, CharEscapeUnicode};
+use super::{IsAsciiWhitespace, IsNotEmpty, IsWhitespace};
+
+/// An iterator over the [`char`]s of a string slice.
+///
+///
+/// This struct is created by the [`chars`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`char`]: prim@char
+/// [`chars`]: str::chars
+#[derive(Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Chars<'a> {
+ pub(super) iter: slice::Iter<'a, u8>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for Chars<'a> {
+ type Item = char;
+
+ #[inline]
+ fn next(&mut self) -> Option<char> {
+ // SAFETY: `str` invariant says `self.iter` is a valid UTF-8 string and
+ // the resulting `ch` is a valid Unicode Scalar Value.
+ unsafe { next_code_point(&mut self.iter).map(|ch| char::from_u32_unchecked(ch)) }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ super::count::count_chars(self.as_str())
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.iter.len();
+ // `(len + 3)` can't overflow, because we know that the `slice::Iter`
+ // belongs to a slice in memory which has a maximum length of
+ // `isize::MAX` (that's well below `usize::MAX`).
+ ((len + 3) / 4, Some(len))
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<char> {
+ // No need to go through the entire string.
+ self.next_back()
+ }
+}
+
+#[stable(feature = "chars_debug_impl", since = "1.38.0")]
+impl fmt::Debug for Chars<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Chars(")?;
+ f.debug_list().entries(self.clone()).finish()?;
+ write!(f, ")")?;
+ Ok(())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> DoubleEndedIterator for Chars<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<char> {
+ // SAFETY: `str` invariant says `self.iter` is a valid UTF-8 string and
+ // the resulting `ch` is a valid Unicode Scalar Value.
+ unsafe { next_code_point_reverse(&mut self.iter).map(|ch| char::from_u32_unchecked(ch)) }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Chars<'_> {}
+
+impl<'a> Chars<'a> {
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// This has the same lifetime as the original slice, and so the
+ /// iterator can continue to be used while this exists.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut chars = "abc".chars();
+ ///
+ /// assert_eq!(chars.as_str(), "abc");
+ /// chars.next();
+ /// assert_eq!(chars.as_str(), "bc");
+ /// chars.next();
+ /// chars.next();
+ /// assert_eq!(chars.as_str(), "");
+ /// ```
+ #[stable(feature = "iter_to_slice", since = "1.4.0")]
+ #[must_use]
+ #[inline]
+ pub fn as_str(&self) -> &'a str {
+ // SAFETY: `Chars` is only made from a str, which guarantees the iter is valid UTF-8.
+ unsafe { from_utf8_unchecked(self.iter.as_slice()) }
+ }
+}
+
+/// An iterator over the [`char`]s of a string slice, and their positions.
+///
+/// This struct is created by the [`char_indices`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`char`]: prim@char
+/// [`char_indices`]: str::char_indices
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct CharIndices<'a> {
+ pub(super) front_offset: usize,
+ pub(super) iter: Chars<'a>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for CharIndices<'a> {
+ type Item = (usize, char);
+
+ #[inline]
+ fn next(&mut self) -> Option<(usize, char)> {
+ let pre_len = self.iter.iter.len();
+ match self.iter.next() {
+ None => None,
+ Some(ch) => {
+ let index = self.front_offset;
+ let len = self.iter.iter.len();
+ self.front_offset += pre_len - len;
+ Some((index, ch))
+ }
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<(usize, char)> {
+ // No need to go through the entire string.
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> DoubleEndedIterator for CharIndices<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<(usize, char)> {
+ self.iter.next_back().map(|ch| {
+ let index = self.front_offset + self.iter.iter.len();
+ (index, ch)
+ })
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for CharIndices<'_> {}
+
+impl<'a> CharIndices<'a> {
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// This has the same lifetime as the original slice, and so the
+ /// iterator can continue to be used while this exists.
+ #[stable(feature = "iter_to_slice", since = "1.4.0")]
+ #[must_use]
+ #[inline]
+ pub fn as_str(&self) -> &'a str {
+ self.iter.as_str()
+ }
+
+ /// Returns the byte position of the next character, or the length
+ /// of the underlying string if there are no more characters.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(char_indices_offset)]
+ /// let mut chars = "a楽".char_indices();
+ ///
+ /// assert_eq!(chars.offset(), 0);
+ /// assert_eq!(chars.next(), Some((0, 'a')));
+ ///
+ /// assert_eq!(chars.offset(), 1);
+ /// assert_eq!(chars.next(), Some((1, '楽')));
+ ///
+ /// assert_eq!(chars.offset(), 4);
+ /// assert_eq!(chars.next(), None);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "char_indices_offset", issue = "83871")]
+ pub fn offset(&self) -> usize {
+ self.front_offset
+ }
+}
+
+/// An iterator over the bytes of a string slice.
+///
+/// This struct is created by the [`bytes`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`bytes`]: str::bytes
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone, Debug)]
+pub struct Bytes<'a>(pub(super) Copied<slice::Iter<'a, u8>>);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for Bytes<'_> {
+ type Item = u8;
+
+ #[inline]
+ fn next(&mut self) -> Option<u8> {
+ self.0.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.0.count()
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.0.last()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.0.nth(n)
+ }
+
+ #[inline]
+ fn all<F>(&mut self, f: F) -> bool
+ where
+ F: FnMut(Self::Item) -> bool,
+ {
+ self.0.all(f)
+ }
+
+ #[inline]
+ fn any<F>(&mut self, f: F) -> bool
+ where
+ F: FnMut(Self::Item) -> bool,
+ {
+ self.0.any(f)
+ }
+
+ #[inline]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.0.find(predicate)
+ }
+
+ #[inline]
+ fn position<P>(&mut self, predicate: P) -> Option<usize>
+ where
+ P: FnMut(Self::Item) -> bool,
+ {
+ self.0.position(predicate)
+ }
+
+ #[inline]
+ fn rposition<P>(&mut self, predicate: P) -> Option<usize>
+ where
+ P: FnMut(Self::Item) -> bool,
+ {
+ self.0.rposition(predicate)
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> u8 {
+ // SAFETY: the caller must uphold the safety contract
+ // for `Iterator::__iterator_get_unchecked`.
+ unsafe { self.0.__iterator_get_unchecked(idx) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl DoubleEndedIterator for Bytes<'_> {
+ #[inline]
+ fn next_back(&mut self) -> Option<u8> {
+ self.0.next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.0.nth_back(n)
+ }
+
+ #[inline]
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.0.rfind(predicate)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ExactSizeIterator for Bytes<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.0.len()
+ }
+
+ #[inline]
+ fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Bytes<'_> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl TrustedLen for Bytes<'_> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl TrustedRandomAccess for Bytes<'_> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl TrustedRandomAccessNoCoerce for Bytes<'_> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+/// This macro generates a Clone impl for string pattern API
+/// wrapper types of the form X<'a, P>
+macro_rules! derive_pattern_clone {
+ (clone $t:ident with |$s:ident| $e:expr) => {
+ impl<'a, P> Clone for $t<'a, P>
+ where
+ P: Pattern<'a, Searcher: Clone>,
+ {
+ fn clone(&self) -> Self {
+ let $s = self;
+ $e
+ }
+ }
+ };
+}
+
+/// This macro generates two public iterator structs
+/// wrapping a private internal one that makes use of the `Pattern` API.
+///
+/// For all patterns `P: Pattern<'a>` the following items will be
+/// generated (generics omitted):
+///
+/// struct $forward_iterator($internal_iterator);
+/// struct $reverse_iterator($internal_iterator);
+///
+/// impl Iterator for $forward_iterator
+/// { /* internal ends up calling Searcher::next_match() */ }
+///
+/// impl DoubleEndedIterator for $forward_iterator
+/// where P::Searcher: DoubleEndedSearcher
+/// { /* internal ends up calling Searcher::next_match_back() */ }
+///
+/// impl Iterator for $reverse_iterator
+/// where P::Searcher: ReverseSearcher
+/// { /* internal ends up calling Searcher::next_match_back() */ }
+///
+/// impl DoubleEndedIterator for $reverse_iterator
+/// where P::Searcher: DoubleEndedSearcher
+/// { /* internal ends up calling Searcher::next_match() */ }
+///
+/// The internal one is defined outside the macro, and has almost the same
+/// semantic as a DoubleEndedIterator by delegating to `pattern::Searcher` and
+/// `pattern::ReverseSearcher` for both forward and reverse iteration.
+///
+/// "Almost", because a `Searcher` and a `ReverseSearcher` for a given
+/// `Pattern` might not return the same elements, so actually implementing
+/// `DoubleEndedIterator` for it would be incorrect.
+/// (See the docs in `str::pattern` for more details)
+///
+/// However, the internal struct still represents a single ended iterator from
+/// either end, and depending on pattern is also a valid double ended iterator,
+/// so the two wrapper structs implement `Iterator`
+/// and `DoubleEndedIterator` depending on the concrete pattern type, leading
+/// to the complex impls seen above.
+macro_rules! generate_pattern_iterators {
+ {
+ // Forward iterator
+ forward:
+ $(#[$forward_iterator_attribute:meta])*
+ struct $forward_iterator:ident;
+
+ // Reverse iterator
+ reverse:
+ $(#[$reverse_iterator_attribute:meta])*
+ struct $reverse_iterator:ident;
+
+ // Stability of all generated items
+ stability:
+ $(#[$common_stability_attribute:meta])*
+
+ // Internal almost-iterator that is being delegated to
+ internal:
+ $internal_iterator:ident yielding ($iterty:ty);
+
+ // Kind of delegation - either single ended or double ended
+ delegate $($t:tt)*
+ } => {
+ $(#[$forward_iterator_attribute])*
+ $(#[$common_stability_attribute])*
+ pub struct $forward_iterator<'a, P: Pattern<'a>>(pub(super) $internal_iterator<'a, P>);
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> fmt::Debug for $forward_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple(stringify!($forward_iterator))
+ .field(&self.0)
+ .finish()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P: Pattern<'a>> Iterator for $forward_iterator<'a, P> {
+ type Item = $iterty;
+
+ #[inline]
+ fn next(&mut self) -> Option<$iterty> {
+ self.0.next()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> Clone for $forward_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: Clone>,
+ {
+ fn clone(&self) -> Self {
+ $forward_iterator(self.0.clone())
+ }
+ }
+
+ $(#[$reverse_iterator_attribute])*
+ $(#[$common_stability_attribute])*
+ pub struct $reverse_iterator<'a, P: Pattern<'a>>(pub(super) $internal_iterator<'a, P>);
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> fmt::Debug for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple(stringify!($reverse_iterator))
+ .field(&self.0)
+ .finish()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> Iterator for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ type Item = $iterty;
+
+ #[inline]
+ fn next(&mut self) -> Option<$iterty> {
+ self.0.next_back()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> Clone for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: Clone>,
+ {
+ fn clone(&self) -> Self {
+ $reverse_iterator(self.0.clone())
+ }
+ }
+
+ #[stable(feature = "fused", since = "1.26.0")]
+ impl<'a, P: Pattern<'a>> FusedIterator for $forward_iterator<'a, P> {}
+
+ #[stable(feature = "fused", since = "1.26.0")]
+ impl<'a, P> FusedIterator for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {}
+
+ generate_pattern_iterators!($($t)* with $(#[$common_stability_attribute])*,
+ $forward_iterator,
+ $reverse_iterator, $iterty);
+ };
+ {
+ double ended; with $(#[$common_stability_attribute:meta])*,
+ $forward_iterator:ident,
+ $reverse_iterator:ident, $iterty:ty
+ } => {
+ $(#[$common_stability_attribute])*
+ impl<'a, P> DoubleEndedIterator for $forward_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: DoubleEndedSearcher<'a>>,
+ {
+ #[inline]
+ fn next_back(&mut self) -> Option<$iterty> {
+ self.0.next_back()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> DoubleEndedIterator for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: DoubleEndedSearcher<'a>>,
+ {
+ #[inline]
+ fn next_back(&mut self) -> Option<$iterty> {
+ self.0.next()
+ }
+ }
+ };
+ {
+ single ended; with $(#[$common_stability_attribute:meta])*,
+ $forward_iterator:ident,
+ $reverse_iterator:ident, $iterty:ty
+ } => {}
+}
+
+derive_pattern_clone! {
+ clone SplitInternal
+ with |s| SplitInternal { matcher: s.matcher.clone(), ..*s }
+}
+
+pub(super) struct SplitInternal<'a, P: Pattern<'a>> {
+ pub(super) start: usize,
+ pub(super) end: usize,
+ pub(super) matcher: P::Searcher,
+ pub(super) allow_trailing_empty: bool,
+ pub(super) finished: bool,
+}
+
+impl<'a, P> fmt::Debug for SplitInternal<'a, P>
+where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitInternal")
+ .field("start", &self.start)
+ .field("end", &self.end)
+ .field("matcher", &self.matcher)
+ .field("allow_trailing_empty", &self.allow_trailing_empty)
+ .field("finished", &self.finished)
+ .finish()
+ }
+}
+
+impl<'a, P: Pattern<'a>> SplitInternal<'a, P> {
+ #[inline]
+ fn get_end(&mut self) -> Option<&'a str> {
+ if !self.finished && (self.allow_trailing_empty || self.end - self.start > 0) {
+ self.finished = true;
+ // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
+ unsafe {
+ let string = self.matcher.haystack().get_unchecked(self.start..self.end);
+ Some(string)
+ }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ if self.finished {
+ return None;
+ }
+
+ let haystack = self.matcher.haystack();
+ match self.matcher.next_match() {
+ // SAFETY: `Searcher` guarantees that `a` and `b` lie on unicode boundaries.
+ Some((a, b)) => unsafe {
+ let elt = haystack.get_unchecked(self.start..a);
+ self.start = b;
+ Some(elt)
+ },
+ None => self.get_end(),
+ }
+ }
+
+ #[inline]
+ fn next_inclusive(&mut self) -> Option<&'a str> {
+ if self.finished {
+ return None;
+ }
+
+ let haystack = self.matcher.haystack();
+ match self.matcher.next_match() {
+ // SAFETY: `Searcher` guarantees that `b` lies on unicode boundary,
+ // and self.start is either the start of the original string,
+ // or `b` was assigned to it, so it also lies on unicode boundary.
+ Some((_, b)) => unsafe {
+ let elt = haystack.get_unchecked(self.start..b);
+ self.start = b;
+ Some(elt)
+ },
+ None => self.get_end(),
+ }
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ if self.finished {
+ return None;
+ }
+
+ if !self.allow_trailing_empty {
+ self.allow_trailing_empty = true;
+ match self.next_back() {
+ Some(elt) if !elt.is_empty() => return Some(elt),
+ _ => {
+ if self.finished {
+ return None;
+ }
+ }
+ }
+ }
+
+ let haystack = self.matcher.haystack();
+ match self.matcher.next_match_back() {
+ // SAFETY: `Searcher` guarantees that `a` and `b` lie on unicode boundaries.
+ Some((a, b)) => unsafe {
+ let elt = haystack.get_unchecked(b..self.end);
+ self.end = a;
+ Some(elt)
+ },
+ // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
+ None => unsafe {
+ self.finished = true;
+ Some(haystack.get_unchecked(self.start..self.end))
+ },
+ }
+ }
+
+ #[inline]
+ fn next_back_inclusive(&mut self) -> Option<&'a str>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ if self.finished {
+ return None;
+ }
+
+ if !self.allow_trailing_empty {
+ self.allow_trailing_empty = true;
+ match self.next_back_inclusive() {
+ Some(elt) if !elt.is_empty() => return Some(elt),
+ _ => {
+ if self.finished {
+ return None;
+ }
+ }
+ }
+ }
+
+ let haystack = self.matcher.haystack();
+ match self.matcher.next_match_back() {
+ // SAFETY: `Searcher` guarantees that `b` lies on unicode boundary,
+ // and self.end is either the end of the original string,
+ // or `b` was assigned to it, so it also lies on unicode boundary.
+ Some((_, b)) => unsafe {
+ let elt = haystack.get_unchecked(b..self.end);
+ self.end = b;
+ Some(elt)
+ },
+ // SAFETY: self.start is either the start of the original string,
+ // or start of a substring that represents the part of the string that hasn't
+ // iterated yet. Either way, it is guaranteed to lie on unicode boundary.
+ // self.end is either the end of the original string,
+ // or `b` was assigned to it, so it also lies on unicode boundary.
+ None => unsafe {
+ self.finished = true;
+ Some(haystack.get_unchecked(self.start..self.end))
+ },
+ }
+ }
+
+ #[inline]
+ fn as_str(&self) -> &'a str {
+ // `Self::get_end` doesn't change `self.start`
+ if self.finished {
+ return "";
+ }
+
+ // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
+ unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) }
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`split`].
+ ///
+ /// [`split`]: str::split
+ struct Split;
+ reverse:
+ /// Created with the method [`rsplit`].
+ ///
+ /// [`rsplit`]: str::rsplit
+ struct RSplit;
+ stability:
+ #[stable(feature = "rust1", since = "1.0.0")]
+ internal:
+ SplitInternal yielding (&'a str);
+ delegate double ended;
+}
+
+impl<'a, P: Pattern<'a>> Split<'a, P> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "Mary had a little lamb".split(' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "had a little lamb");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+impl<'a, P: Pattern<'a>> RSplit<'a, P> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "Mary had a little lamb".rsplit(' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "Mary had a little");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`split_terminator`].
+ ///
+ /// [`split_terminator`]: str::split_terminator
+ struct SplitTerminator;
+ reverse:
+ /// Created with the method [`rsplit_terminator`].
+ ///
+ /// [`rsplit_terminator`]: str::rsplit_terminator
+ struct RSplitTerminator;
+ stability:
+ #[stable(feature = "rust1", since = "1.0.0")]
+ internal:
+ SplitInternal yielding (&'a str);
+ delegate double ended;
+}
+
+impl<'a, P: Pattern<'a>> SplitTerminator<'a, P> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "A..B..".split_terminator('.');
+ /// assert_eq!(split.as_str(), "A..B..");
+ /// split.next();
+ /// assert_eq!(split.as_str(), ".B..");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+impl<'a, P: Pattern<'a>> RSplitTerminator<'a, P> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "A..B..".rsplit_terminator('.');
+ /// assert_eq!(split.as_str(), "A..B..");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "A..B");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+derive_pattern_clone! {
+ clone SplitNInternal
+ with |s| SplitNInternal { iter: s.iter.clone(), ..*s }
+}
+
+pub(super) struct SplitNInternal<'a, P: Pattern<'a>> {
+ pub(super) iter: SplitInternal<'a, P>,
+ /// The number of splits remaining
+ pub(super) count: usize,
+}
+
+impl<'a, P> fmt::Debug for SplitNInternal<'a, P>
+where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitNInternal")
+ .field("iter", &self.iter)
+ .field("count", &self.count)
+ .finish()
+ }
+}
+
+impl<'a, P: Pattern<'a>> SplitNInternal<'a, P> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ match self.count {
+ 0 => None,
+ 1 => {
+ self.count = 0;
+ self.iter.get_end()
+ }
+ _ => {
+ self.count -= 1;
+ self.iter.next()
+ }
+ }
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ match self.count {
+ 0 => None,
+ 1 => {
+ self.count = 0;
+ self.iter.get_end()
+ }
+ _ => {
+ self.count -= 1;
+ self.iter.next_back()
+ }
+ }
+ }
+
+ #[inline]
+ fn as_str(&self) -> &'a str {
+ self.iter.as_str()
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`splitn`].
+ ///
+ /// [`splitn`]: str::splitn
+ struct SplitN;
+ reverse:
+ /// Created with the method [`rsplitn`].
+ ///
+ /// [`rsplitn`]: str::rsplitn
+ struct RSplitN;
+ stability:
+ #[stable(feature = "rust1", since = "1.0.0")]
+ internal:
+ SplitNInternal yielding (&'a str);
+ delegate single ended;
+}
+
+impl<'a, P: Pattern<'a>> SplitN<'a, P> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "Mary had a little lamb".splitn(3, ' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "had a little lamb");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+impl<'a, P: Pattern<'a>> RSplitN<'a, P> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "Mary had a little lamb".rsplitn(3, ' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "Mary had a little");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+derive_pattern_clone! {
+ clone MatchIndicesInternal
+ with |s| MatchIndicesInternal(s.0.clone())
+}
+
+pub(super) struct MatchIndicesInternal<'a, P: Pattern<'a>>(pub(super) P::Searcher);
+
+impl<'a, P> fmt::Debug for MatchIndicesInternal<'a, P>
+where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("MatchIndicesInternal").field(&self.0).finish()
+ }
+}
+
+impl<'a, P: Pattern<'a>> MatchIndicesInternal<'a, P> {
+ #[inline]
+ fn next(&mut self) -> Option<(usize, &'a str)> {
+ self.0
+ .next_match()
+ // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries.
+ .map(|(start, end)| unsafe { (start, self.0.haystack().get_unchecked(start..end)) })
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<(usize, &'a str)>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ self.0
+ .next_match_back()
+ // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries.
+ .map(|(start, end)| unsafe { (start, self.0.haystack().get_unchecked(start..end)) })
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`match_indices`].
+ ///
+ /// [`match_indices`]: str::match_indices
+ struct MatchIndices;
+ reverse:
+ /// Created with the method [`rmatch_indices`].
+ ///
+ /// [`rmatch_indices`]: str::rmatch_indices
+ struct RMatchIndices;
+ stability:
+ #[stable(feature = "str_match_indices", since = "1.5.0")]
+ internal:
+ MatchIndicesInternal yielding ((usize, &'a str));
+ delegate double ended;
+}
+
+derive_pattern_clone! {
+ clone MatchesInternal
+ with |s| MatchesInternal(s.0.clone())
+}
+
+pub(super) struct MatchesInternal<'a, P: Pattern<'a>>(pub(super) P::Searcher);
+
+impl<'a, P> fmt::Debug for MatchesInternal<'a, P>
+where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("MatchesInternal").field(&self.0).finish()
+ }
+}
+
+impl<'a, P: Pattern<'a>> MatchesInternal<'a, P> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries.
+ self.0.next_match().map(|(a, b)| unsafe {
+ // Indices are known to be on utf8 boundaries
+ self.0.haystack().get_unchecked(a..b)
+ })
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries.
+ self.0.next_match_back().map(|(a, b)| unsafe {
+ // Indices are known to be on utf8 boundaries
+ self.0.haystack().get_unchecked(a..b)
+ })
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`matches`].
+ ///
+ /// [`matches`]: str::matches
+ struct Matches;
+ reverse:
+ /// Created with the method [`rmatches`].
+ ///
+ /// [`rmatches`]: str::rmatches
+ struct RMatches;
+ stability:
+ #[stable(feature = "str_matches", since = "1.2.0")]
+ internal:
+ MatchesInternal yielding (&'a str);
+ delegate double ended;
+}
+
+/// An iterator over the lines of a string, as string slices.
+///
+/// This struct is created with the [`lines`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`lines`]: str::lines
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct Lines<'a>(pub(super) Map<SplitTerminator<'a, char>, LinesAnyMap>);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for Lines<'a> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.0.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a str> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> DoubleEndedIterator for Lines<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.0.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Lines<'_> {}
+
+/// Created with the method [`lines_any`].
+///
+/// [`lines_any`]: str::lines_any
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "1.4.0", note = "use lines()/Lines instead now")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+#[allow(deprecated)]
+pub struct LinesAny<'a>(pub(super) Lines<'a>);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+impl<'a> Iterator for LinesAny<'a> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.0.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+impl<'a> DoubleEndedIterator for LinesAny<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.0.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+#[allow(deprecated)]
+impl FusedIterator for LinesAny<'_> {}
+
+/// An iterator over the non-whitespace substrings of a string,
+/// separated by any amount of whitespace.
+///
+/// This struct is created by the [`split_whitespace`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`split_whitespace`]: str::split_whitespace
+#[stable(feature = "split_whitespace", since = "1.1.0")]
+#[derive(Clone, Debug)]
+pub struct SplitWhitespace<'a> {
+ pub(super) inner: Filter<Split<'a, IsWhitespace>, IsNotEmpty>,
+}
+
+/// An iterator over the non-ASCII-whitespace substrings of a string,
+/// separated by any amount of ASCII whitespace.
+///
+/// This struct is created by the [`split_ascii_whitespace`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`split_ascii_whitespace`]: str::split_ascii_whitespace
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+#[derive(Clone, Debug)]
+pub struct SplitAsciiWhitespace<'a> {
+ pub(super) inner:
+ Map<Filter<SliceSplit<'a, u8, IsAsciiWhitespace>, BytesIsNotEmpty>, UnsafeBytesToStr>,
+}
+
+/// An iterator over the substrings of a string,
+/// terminated by a substring matching to a predicate function
+/// Unlike `Split`, it contains the matched part as a terminator
+/// of the subslice.
+///
+/// This struct is created by the [`split_inclusive`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`split_inclusive`]: str::split_inclusive
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+pub struct SplitInclusive<'a, P: Pattern<'a>>(pub(super) SplitInternal<'a, P>);
+
+#[stable(feature = "split_whitespace", since = "1.1.0")]
+impl<'a> Iterator for SplitWhitespace<'a> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a str> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "split_whitespace", since = "1.1.0")]
+impl<'a> DoubleEndedIterator for SplitWhitespace<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for SplitWhitespace<'_> {}
+
+impl<'a> SplitWhitespace<'a> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_whitespace_as_str)]
+ ///
+ /// let mut split = "Mary had a little lamb".split_whitespace();
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ ///
+ /// split.next();
+ /// assert_eq!(split.as_str(), "had a little lamb");
+ ///
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "str_split_whitespace_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.inner.iter.as_str()
+ }
+}
+
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+impl<'a> Iterator for SplitAsciiWhitespace<'a> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a str> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+impl<'a> DoubleEndedIterator for SplitAsciiWhitespace<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+impl FusedIterator for SplitAsciiWhitespace<'_> {}
+
+impl<'a> SplitAsciiWhitespace<'a> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_whitespace_as_str)]
+ ///
+ /// let mut split = "Mary had a little lamb".split_ascii_whitespace();
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ ///
+ /// split.next();
+ /// assert_eq!(split.as_str(), "had a little lamb");
+ ///
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "str_split_whitespace_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ if self.inner.iter.iter.finished {
+ return "";
+ }
+
+ // SAFETY: Slice is created from str.
+ unsafe { crate::str::from_utf8_unchecked(&self.inner.iter.iter.v) }
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, P: Pattern<'a>> Iterator for SplitInclusive<'a, P> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.0.next_inclusive()
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, P: Pattern<'a, Searcher: fmt::Debug>> fmt::Debug for SplitInclusive<'a, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitInclusive").field("0", &self.0).finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, P: Pattern<'a, Searcher: Clone>> Clone for SplitInclusive<'a, P> {
+ fn clone(&self) -> Self {
+ SplitInclusive(self.0.clone())
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, P: Pattern<'a, Searcher: ReverseSearcher<'a>>> DoubleEndedIterator
+ for SplitInclusive<'a, P>
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.0.next_back_inclusive()
+ }
+}
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+impl<'a, P: Pattern<'a>> FusedIterator for SplitInclusive<'a, P> {}
+
+impl<'a, P: Pattern<'a>> SplitInclusive<'a, P> {
+ /// Returns remainder of the split string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_inclusive_as_str)]
+ /// let mut split = "Mary had a little lamb".split_inclusive(' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "had a little lamb");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_inclusive_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+/// An iterator of [`u16`] over the string encoded as UTF-16.
+///
+/// This struct is created by the [`encode_utf16`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`encode_utf16`]: str::encode_utf16
+#[derive(Clone)]
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+pub struct EncodeUtf16<'a> {
+ pub(super) chars: Chars<'a>,
+ pub(super) extra: u16,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl fmt::Debug for EncodeUtf16<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("EncodeUtf16").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+impl<'a> Iterator for EncodeUtf16<'a> {
+ type Item = u16;
+
+ #[inline]
+ fn next(&mut self) -> Option<u16> {
+ if self.extra != 0 {
+ let tmp = self.extra;
+ self.extra = 0;
+ return Some(tmp);
+ }
+
+ let mut buf = [0; 2];
+ self.chars.next().map(|ch| {
+ let n = ch.encode_utf16(&mut buf).len();
+ if n == 2 {
+ self.extra = buf[1];
+ }
+ buf[0]
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (low, high) = self.chars.size_hint();
+ // every char gets either one u16 or two u16,
+ // so this iterator is between 1 or 2 times as
+ // long as the underlying iterator.
+ (low, high.and_then(|n| n.checked_mul(2)))
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EncodeUtf16<'_> {}
+
+/// The return type of [`str::escape_debug`].
+#[stable(feature = "str_escape", since = "1.34.0")]
+#[derive(Clone, Debug)]
+pub struct EscapeDebug<'a> {
+ pub(super) inner: Chain<
+ Flatten<option::IntoIter<char::EscapeDebug>>,
+ FlatMap<Chars<'a>, char::EscapeDebug, CharEscapeDebugContinue>,
+ >,
+}
+
+/// The return type of [`str::escape_default`].
+#[stable(feature = "str_escape", since = "1.34.0")]
+#[derive(Clone, Debug)]
+pub struct EscapeDefault<'a> {
+ pub(super) inner: FlatMap<Chars<'a>, char::EscapeDefault, CharEscapeDefault>,
+}
+
+/// The return type of [`str::escape_unicode`].
+#[stable(feature = "str_escape", since = "1.34.0")]
+#[derive(Clone, Debug)]
+pub struct EscapeUnicode<'a> {
+ pub(super) inner: FlatMap<Chars<'a>, char::EscapeUnicode, CharEscapeUnicode>,
+}
+
+macro_rules! escape_types_impls {
+ ($( $Name: ident ),+) => {$(
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ impl<'a> fmt::Display for $Name<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.clone().try_for_each(|c| f.write_char(c))
+ }
+ }
+
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ impl<'a> Iterator for $Name<'a> {
+ type Item = char;
+
+ #[inline]
+ fn next(&mut self) -> Option<char> { self.inner.next() }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where
+ Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Output = Acc>
+ {
+ self.inner.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.fold(init, fold)
+ }
+ }
+
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ impl<'a> FusedIterator for $Name<'a> {}
+ )+}
+}
+
+escape_types_impls!(EscapeDebug, EscapeDefault, EscapeUnicode);
diff --git a/library/core/src/str/lossy.rs b/library/core/src/str/lossy.rs
new file mode 100644
index 000000000..6ec1c9390
--- /dev/null
+++ b/library/core/src/str/lossy.rs
@@ -0,0 +1,200 @@
+use crate::char;
+use crate::fmt::{self, Write};
+use crate::mem;
+
+use super::from_utf8_unchecked;
+use super::validations::utf8_char_width;
+
+/// Lossy UTF-8 string.
+#[unstable(feature = "str_internals", issue = "none")]
+pub struct Utf8Lossy {
+ bytes: [u8],
+}
+
+impl Utf8Lossy {
+ #[must_use]
+ pub fn from_bytes(bytes: &[u8]) -> &Utf8Lossy {
+ // SAFETY: Both use the same memory layout, and UTF-8 correctness isn't required.
+ unsafe { mem::transmute(bytes) }
+ }
+
+ pub fn chunks(&self) -> Utf8LossyChunksIter<'_> {
+ Utf8LossyChunksIter { source: &self.bytes }
+ }
+}
+
+/// Iterator over lossy UTF-8 string
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "str_internals", issue = "none")]
+#[allow(missing_debug_implementations)]
+pub struct Utf8LossyChunksIter<'a> {
+ source: &'a [u8],
+}
+
+#[unstable(feature = "str_internals", issue = "none")]
+#[derive(PartialEq, Eq, Debug)]
+pub struct Utf8LossyChunk<'a> {
+ /// Sequence of valid chars.
+ /// Can be empty between broken UTF-8 chars.
+ pub valid: &'a str,
+ /// Single broken char, empty if none.
+ /// Empty iff iterator item is last.
+ pub broken: &'a [u8],
+}
+
+impl<'a> Iterator for Utf8LossyChunksIter<'a> {
+ type Item = Utf8LossyChunk<'a>;
+
+ fn next(&mut self) -> Option<Utf8LossyChunk<'a>> {
+ if self.source.is_empty() {
+ return None;
+ }
+
+ const TAG_CONT_U8: u8 = 128;
+ fn safe_get(xs: &[u8], i: usize) -> u8 {
+ *xs.get(i).unwrap_or(&0)
+ }
+
+ let mut i = 0;
+ let mut valid_up_to = 0;
+ while i < self.source.len() {
+ // SAFETY: `i < self.source.len()` per previous line.
+ // For some reason the following are both significantly slower:
+ // while let Some(&byte) = self.source.get(i) {
+ // while let Some(byte) = self.source.get(i).copied() {
+ let byte = unsafe { *self.source.get_unchecked(i) };
+ i += 1;
+
+ if byte < 128 {
+ // This could be a `1 => ...` case in the match below, but for
+ // the common case of all-ASCII inputs, we bypass loading the
+ // sizeable UTF8_CHAR_WIDTH table into cache.
+ } else {
+ let w = utf8_char_width(byte);
+
+ match w {
+ 2 => {
+ if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
+ break;
+ }
+ i += 1;
+ }
+ 3 => {
+ match (byte, safe_get(self.source, i)) {
+ (0xE0, 0xA0..=0xBF) => (),
+ (0xE1..=0xEC, 0x80..=0xBF) => (),
+ (0xED, 0x80..=0x9F) => (),
+ (0xEE..=0xEF, 0x80..=0xBF) => (),
+ _ => break,
+ }
+ i += 1;
+ if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
+ break;
+ }
+ i += 1;
+ }
+ 4 => {
+ match (byte, safe_get(self.source, i)) {
+ (0xF0, 0x90..=0xBF) => (),
+ (0xF1..=0xF3, 0x80..=0xBF) => (),
+ (0xF4, 0x80..=0x8F) => (),
+ _ => break,
+ }
+ i += 1;
+ if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
+ break;
+ }
+ i += 1;
+ if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
+ break;
+ }
+ i += 1;
+ }
+ _ => break,
+ }
+ }
+
+ valid_up_to = i;
+ }
+
+ // SAFETY: `i <= self.source.len()` because it is only ever incremented
+ // via `i += 1` and in between every single one of those increments, `i`
+ // is compared against `self.source.len()`. That happens either
+ // literally by `i < self.source.len()` in the while-loop's condition,
+ // or indirectly by `safe_get(self.source, i) & 192 != TAG_CONT_U8`. The
+ // loop is terminated as soon as the latest `i += 1` has made `i` no
+ // longer less than `self.source.len()`, which means it'll be at most
+ // equal to `self.source.len()`.
+ let (inspected, remaining) = unsafe { self.source.split_at_unchecked(i) };
+ self.source = remaining;
+
+ // SAFETY: `valid_up_to <= i` because it is only ever assigned via
+ // `valid_up_to = i` and `i` only increases.
+ let (valid, broken) = unsafe { inspected.split_at_unchecked(valid_up_to) };
+
+ Some(Utf8LossyChunk {
+ // SAFETY: All bytes up to `valid_up_to` are valid UTF-8.
+ valid: unsafe { from_utf8_unchecked(valid) },
+ broken,
+ })
+ }
+}
+
+impl fmt::Display for Utf8Lossy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // If we're the empty string then our iterator won't actually yield
+ // anything, so perform the formatting manually
+ if self.bytes.is_empty() {
+ return "".fmt(f);
+ }
+
+ for Utf8LossyChunk { valid, broken } in self.chunks() {
+ // If we successfully decoded the whole chunk as a valid string then
+ // we can return a direct formatting of the string which will also
+ // respect various formatting flags if possible.
+ if valid.len() == self.bytes.len() {
+ assert!(broken.is_empty());
+ return valid.fmt(f);
+ }
+
+ f.write_str(valid)?;
+ if !broken.is_empty() {
+ f.write_char(char::REPLACEMENT_CHARACTER)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl fmt::Debug for Utf8Lossy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_char('"')?;
+
+ for Utf8LossyChunk { valid, broken } in self.chunks() {
+ // Valid part.
+ // Here we partially parse UTF-8 again which is suboptimal.
+ {
+ let mut from = 0;
+ for (i, c) in valid.char_indices() {
+ let esc = c.escape_debug();
+ // If char needs escaping, flush backlog so far and write, else skip
+ if esc.len() != 1 {
+ f.write_str(&valid[from..i])?;
+ for c in esc {
+ f.write_char(c)?;
+ }
+ from = i + c.len_utf8();
+ }
+ }
+ f.write_str(&valid[from..])?;
+ }
+
+ // Broken parts of string as hex escape.
+ for &b in broken {
+ write!(f, "\\x{:02x}", b)?;
+ }
+ }
+
+ f.write_char('"')
+ }
+}
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
new file mode 100644
index 000000000..c4f2e283e
--- /dev/null
+++ b/library/core/src/str/mod.rs
@@ -0,0 +1,2640 @@
+//! String manipulation.
+//!
+//! For more details, see the [`std::str`] module.
+//!
+//! [`std::str`]: ../../std/str/index.html
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+mod converts;
+mod count;
+mod error;
+mod iter;
+mod traits;
+mod validations;
+
+use self::pattern::Pattern;
+use self::pattern::{DoubleEndedSearcher, ReverseSearcher, Searcher};
+
+use crate::char::{self, EscapeDebugExtArgs};
+use crate::mem;
+use crate::slice::{self, SliceIndex};
+
+pub mod pattern;
+
+#[unstable(feature = "str_internals", issue = "none")]
+#[allow(missing_docs)]
+pub mod lossy;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use converts::{from_utf8, from_utf8_unchecked};
+
+#[stable(feature = "str_mut_extras", since = "1.20.0")]
+pub use converts::{from_utf8_mut, from_utf8_unchecked_mut};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use error::{ParseBoolError, Utf8Error};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use traits::FromStr;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{Bytes, CharIndices, Chars, Lines, SplitWhitespace};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+pub use iter::LinesAny;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{RSplit, RSplitTerminator, Split, SplitTerminator};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{RSplitN, SplitN};
+
+#[stable(feature = "str_matches", since = "1.2.0")]
+pub use iter::{Matches, RMatches};
+
+#[stable(feature = "str_match_indices", since = "1.5.0")]
+pub use iter::{MatchIndices, RMatchIndices};
+
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+pub use iter::EncodeUtf16;
+
+#[stable(feature = "str_escape", since = "1.34.0")]
+pub use iter::{EscapeDebug, EscapeDefault, EscapeUnicode};
+
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+pub use iter::SplitAsciiWhitespace;
+
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+pub use iter::SplitInclusive;
+
+#[unstable(feature = "str_internals", issue = "none")]
+pub use validations::{next_code_point, utf8_char_width};
+
+use iter::MatchIndicesInternal;
+use iter::SplitInternal;
+use iter::{MatchesInternal, SplitNInternal};
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+#[rustc_allow_const_fn_unstable(const_eval_select)]
+const fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
+ // SAFETY: panics for both branches
+ unsafe {
+ crate::intrinsics::const_eval_select(
+ (s, begin, end),
+ slice_error_fail_ct,
+ slice_error_fail_rt,
+ )
+ }
+}
+
+const fn slice_error_fail_ct(_: &str, _: usize, _: usize) -> ! {
+ panic!("failed to slice string");
+}
+
+fn slice_error_fail_rt(s: &str, begin: usize, end: usize) -> ! {
+ const MAX_DISPLAY_LENGTH: usize = 256;
+ let trunc_len = s.floor_char_boundary(MAX_DISPLAY_LENGTH);
+ let s_trunc = &s[..trunc_len];
+ let ellipsis = if trunc_len < s.len() { "[...]" } else { "" };
+
+ // 1. out of bounds
+ if begin > s.len() || end > s.len() {
+ let oob_index = if begin > s.len() { begin } else { end };
+ panic!("byte index {oob_index} is out of bounds of `{s_trunc}`{ellipsis}");
+ }
+
+ // 2. begin <= end
+ assert!(
+ begin <= end,
+ "begin <= end ({} <= {}) when slicing `{}`{}",
+ begin,
+ end,
+ s_trunc,
+ ellipsis
+ );
+
+ // 3. character boundary
+ let index = if !s.is_char_boundary(begin) { begin } else { end };
+ // find the character
+ let char_start = s.floor_char_boundary(index);
+ // `char_start` must be less than len and a char boundary
+ let ch = s[char_start..].chars().next().unwrap();
+ let char_range = char_start..char_start + ch.len_utf8();
+ panic!(
+ "byte index {} is not a char boundary; it is inside {:?} (bytes {:?}) of `{}`{}",
+ index, ch, char_range, s_trunc, ellipsis
+ );
+}
+
+#[cfg(not(test))]
+impl str {
+ /// Returns the length of `self`.
+ ///
+ /// This length is in bytes, not [`char`]s or graphemes. In other words,
+ /// it might not be what a human considers the length of the string.
+ ///
+ /// [`char`]: prim@char
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let len = "foo".len();
+ /// assert_eq!(3, len);
+ ///
+ /// assert_eq!("ƒoo".len(), 4); // fancy f!
+ /// assert_eq!("ƒoo".chars().count(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_str_len", since = "1.39.0")]
+ #[must_use]
+ #[inline]
+ pub const fn len(&self) -> usize {
+ self.as_bytes().len()
+ }
+
+ /// Returns `true` if `self` has a length of zero bytes.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "";
+ /// assert!(s.is_empty());
+ ///
+ /// let s = "not empty";
+ /// assert!(!s.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_str_is_empty", since = "1.39.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Checks that `index`-th byte is the first byte in a UTF-8 code point
+ /// sequence or the end of the string.
+ ///
+ /// The start and end of the string (when `index == self.len()`) are
+ /// considered to be boundaries.
+ ///
+ /// Returns `false` if `index` is greater than `self.len()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ /// assert!(s.is_char_boundary(0));
+ /// // start of `老`
+ /// assert!(s.is_char_boundary(6));
+ /// assert!(s.is_char_boundary(s.len()));
+ ///
+ /// // second byte of `ö`
+ /// assert!(!s.is_char_boundary(2));
+ ///
+ /// // third byte of `老`
+ /// assert!(!s.is_char_boundary(8));
+ /// ```
+ #[must_use]
+ #[stable(feature = "is_char_boundary", since = "1.9.0")]
+ #[rustc_const_unstable(feature = "const_is_char_boundary", issue = "none")]
+ #[inline]
+ pub const fn is_char_boundary(&self, index: usize) -> bool {
+ // 0 is always ok.
+ // Test for 0 explicitly so that it can optimize out the check
+ // easily and skip reading string data for that case.
+ // Note that optimizing `self.get(..index)` relies on this.
+ if index == 0 {
+ return true;
+ }
+
+ match self.as_bytes().get(index) {
+ // For `None` we have two options:
+ //
+ // - index == self.len()
+ // Empty strings are valid, so return true
+ // - index > self.len()
+ // In this case return false
+ //
+ // The check is placed exactly here, because it improves generated
+ // code on higher opt-levels. See PR #84751 for more details.
+ None => index == self.len(),
+
+ Some(&b) => b.is_utf8_char_boundary(),
+ }
+ }
+
+ /// Finds the closest `x` not exceeding `index` where `is_char_boundary(x)` is `true`.
+ ///
+ /// This method can help you truncate a string so that it's still valid UTF-8, but doesn't
+ /// exceed a given number of bytes. Note that this is done purely at the character level
+ /// and can still visually split graphemes, even though the underlying characters aren't
+ /// split. For example, the emoji 🧑‍🔬 (scientist) could be split so that the string only
+ /// includes 🧑 (person) instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(round_char_boundary)]
+ /// let s = "❤️🧡💛💚💙💜";
+ /// assert_eq!(s.len(), 26);
+ /// assert!(!s.is_char_boundary(13));
+ ///
+ /// let closest = s.floor_char_boundary(13);
+ /// assert_eq!(closest, 10);
+ /// assert_eq!(&s[..closest], "❤️🧡");
+ /// ```
+ #[unstable(feature = "round_char_boundary", issue = "93743")]
+ #[inline]
+ pub fn floor_char_boundary(&self, index: usize) -> usize {
+ if index >= self.len() {
+ self.len()
+ } else {
+ let lower_bound = index.saturating_sub(3);
+ let new_index = self.as_bytes()[lower_bound..=index]
+ .iter()
+ .rposition(|b| b.is_utf8_char_boundary());
+
+ // SAFETY: we know that the character boundary will be within four bytes
+ unsafe { lower_bound + new_index.unwrap_unchecked() }
+ }
+ }
+
+ /// Finds the closest `x` not below `index` where `is_char_boundary(x)` is `true`.
+ ///
+ /// This method is the natural complement to [`floor_char_boundary`]. See that method
+ /// for more details.
+ ///
+ /// [`floor_char_boundary`]: str::floor_char_boundary
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index > self.len()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(round_char_boundary)]
+ /// let s = "❤️🧡💛💚💙💜";
+ /// assert_eq!(s.len(), 26);
+ /// assert!(!s.is_char_boundary(13));
+ ///
+ /// let closest = s.ceil_char_boundary(13);
+ /// assert_eq!(closest, 14);
+ /// assert_eq!(&s[..closest], "❤️🧡💛");
+ /// ```
+ #[unstable(feature = "round_char_boundary", issue = "93743")]
+ #[inline]
+ pub fn ceil_char_boundary(&self, index: usize) -> usize {
+ if index > self.len() {
+ slice_error_fail(self, index, index)
+ } else {
+ let upper_bound = Ord::min(index + 4, self.len());
+ self.as_bytes()[index..upper_bound]
+ .iter()
+ .position(|b| b.is_utf8_char_boundary())
+ .map_or(upper_bound, |pos| pos + index)
+ }
+ }
+
+ /// Converts a string slice to a byte slice. To convert the byte slice back
+ /// into a string slice, use the [`from_utf8`] function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let bytes = "bors".as_bytes();
+ /// assert_eq!(b"bors", bytes);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "str_as_bytes", since = "1.39.0")]
+ #[must_use]
+ #[inline(always)]
+ #[allow(unused_attributes)]
+ pub const fn as_bytes(&self) -> &[u8] {
+ // SAFETY: const sound because we transmute two types with the same layout
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Converts a mutable string slice to a mutable byte slice.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the content of the slice is valid UTF-8
+ /// before the borrow ends and the underlying `str` is used.
+ ///
+ /// Use of a `str` whose contents are not valid UTF-8 is undefined behavior.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("Hello");
+ /// let bytes = unsafe { s.as_bytes_mut() };
+ ///
+ /// assert_eq!(b"Hello", bytes);
+ /// ```
+ ///
+ /// Mutability:
+ ///
+ /// ```
+ /// let mut s = String::from("🗻∈🌏");
+ ///
+ /// unsafe {
+ /// let bytes = s.as_bytes_mut();
+ ///
+ /// bytes[0] = 0xF0;
+ /// bytes[1] = 0x9F;
+ /// bytes[2] = 0x8D;
+ /// bytes[3] = 0x94;
+ /// }
+ ///
+ /// assert_eq!("🍔∈🌏", s);
+ /// ```
+ #[stable(feature = "str_mut_extras", since = "1.20.0")]
+ #[must_use]
+ #[inline(always)]
+ pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
+ // SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
+ // has the same layout as `&[u8]` (only libstd can make this guarantee).
+ // The pointer dereference is safe since it comes from a mutable reference which
+ // is guaranteed to be valid for writes.
+ unsafe { &mut *(self as *mut str as *mut [u8]) }
+ }
+
+ /// Converts a string slice to a raw pointer.
+ ///
+ /// As string slices are a slice of bytes, the raw pointer points to a
+ /// [`u8`]. This pointer will be pointing to the first byte of the string
+ /// slice.
+ ///
+ /// The caller must ensure that the returned pointer is never written to.
+ /// If you need to mutate the contents of the string slice, use [`as_mut_ptr`].
+ ///
+ /// [`as_mut_ptr`]: str::as_mut_ptr
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "Hello";
+ /// let ptr = s.as_ptr();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "rustc_str_as_ptr", since = "1.32.0")]
+ #[must_use]
+ #[inline]
+ pub const fn as_ptr(&self) -> *const u8 {
+ self as *const str as *const u8
+ }
+
+ /// Converts a mutable string slice to a raw pointer.
+ ///
+ /// As string slices are a slice of bytes, the raw pointer points to a
+ /// [`u8`]. This pointer will be pointing to the first byte of the string
+ /// slice.
+ ///
+ /// It is your responsibility to make sure that the string slice only gets
+ /// modified in a way that it remains valid UTF-8.
+ #[stable(feature = "str_as_mut_ptr", since = "1.36.0")]
+ #[must_use]
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut u8 {
+ self as *mut str as *mut u8
+ }
+
+ /// Returns a subslice of `str`.
+ ///
+ /// This is the non-panicking alternative to indexing the `str`. Returns
+ /// [`None`] whenever equivalent indexing operation would panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = String::from("🗻∈🌏");
+ ///
+ /// assert_eq!(Some("🗻"), v.get(0..4));
+ ///
+ /// // indices not on UTF-8 sequence boundaries
+ /// assert!(v.get(1..).is_none());
+ /// assert!(v.get(..8).is_none());
+ ///
+ /// // out of bounds
+ /// assert!(v.get(..42).is_none());
+ /// ```
+ #[stable(feature = "str_checked_slicing", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ pub const fn get<I: ~const SliceIndex<str>>(&self, i: I) -> Option<&I::Output> {
+ i.get(self)
+ }
+
+ /// Returns a mutable subslice of `str`.
+ ///
+ /// This is the non-panicking alternative to indexing the `str`. Returns
+ /// [`None`] whenever equivalent indexing operation would panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = String::from("hello");
+ /// // correct length
+ /// assert!(v.get_mut(0..5).is_some());
+ /// // out of bounds
+ /// assert!(v.get_mut(..42).is_none());
+ /// assert_eq!(Some("he"), v.get_mut(0..2).map(|v| &*v));
+ ///
+ /// assert_eq!("hello", v);
+ /// {
+ /// let s = v.get_mut(0..2);
+ /// let s = s.map(|s| {
+ /// s.make_ascii_uppercase();
+ /// &*s
+ /// });
+ /// assert_eq!(Some("HE"), s);
+ /// }
+ /// assert_eq!("HEllo", v);
+ /// ```
+ #[stable(feature = "str_checked_slicing", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ pub const fn get_mut<I: ~const SliceIndex<str>>(&mut self, i: I) -> Option<&mut I::Output> {
+ i.get_mut(self)
+ }
+
+ /// Returns an unchecked subslice of `str`.
+ ///
+ /// This is the unchecked alternative to indexing the `str`.
+ ///
+ /// # Safety
+ ///
+ /// Callers of this function are responsible that these preconditions are
+ /// satisfied:
+ ///
+ /// * The starting index must not exceed the ending index;
+ /// * Indexes must be within bounds of the original slice;
+ /// * Indexes must lie on UTF-8 sequence boundaries.
+ ///
+ /// Failing that, the returned string slice may reference invalid memory or
+ /// violate the invariants communicated by the `str` type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = "🗻∈🌏";
+ /// unsafe {
+ /// assert_eq!("🗻", v.get_unchecked(0..4));
+ /// assert_eq!("∈", v.get_unchecked(4..7));
+ /// assert_eq!("🌏", v.get_unchecked(7..11));
+ /// }
+ /// ```
+ #[stable(feature = "str_checked_slicing", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ pub const unsafe fn get_unchecked<I: ~const SliceIndex<str>>(&self, i: I) -> &I::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked`;
+ // the slice is dereferenceable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &*i.get_unchecked(self) }
+ }
+
+ /// Returns a mutable, unchecked subslice of `str`.
+ ///
+ /// This is the unchecked alternative to indexing the `str`.
+ ///
+ /// # Safety
+ ///
+ /// Callers of this function are responsible that these preconditions are
+ /// satisfied:
+ ///
+ /// * The starting index must not exceed the ending index;
+ /// * Indexes must be within bounds of the original slice;
+ /// * Indexes must lie on UTF-8 sequence boundaries.
+ ///
+ /// Failing that, the returned string slice may reference invalid memory or
+ /// violate the invariants communicated by the `str` type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = String::from("🗻∈🌏");
+ /// unsafe {
+ /// assert_eq!("🗻", v.get_unchecked_mut(0..4));
+ /// assert_eq!("∈", v.get_unchecked_mut(4..7));
+ /// assert_eq!("🌏", v.get_unchecked_mut(7..11));
+ /// }
+ /// ```
+ #[stable(feature = "str_checked_slicing", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+ #[inline]
+ pub const unsafe fn get_unchecked_mut<I: ~const SliceIndex<str>>(
+ &mut self,
+ i: I,
+ ) -> &mut I::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`;
+ // the slice is dereferenceable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &mut *i.get_unchecked_mut(self) }
+ }
+
+ /// Creates a string slice from another string slice, bypassing safety
+ /// checks.
+ ///
+ /// This is generally not recommended, use with caution! For a safe
+ /// alternative see [`str`] and [`Index`].
+ ///
+ /// [`Index`]: crate::ops::Index
+ ///
+ /// This new slice goes from `begin` to `end`, including `begin` but
+ /// excluding `end`.
+ ///
+ /// To get a mutable string slice instead, see the
+ /// [`slice_mut_unchecked`] method.
+ ///
+ /// [`slice_mut_unchecked`]: str::slice_mut_unchecked
+ ///
+ /// # Safety
+ ///
+ /// Callers of this function are responsible that three preconditions are
+ /// satisfied:
+ ///
+ /// * `begin` must not exceed `end`.
+ /// * `begin` and `end` must be byte positions within the string slice.
+ /// * `begin` and `end` must lie on UTF-8 sequence boundaries.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ ///
+ /// unsafe {
+ /// assert_eq!("Löwe 老虎 Léopard", s.slice_unchecked(0, 21));
+ /// }
+ ///
+ /// let s = "Hello, world!";
+ ///
+ /// unsafe {
+ /// assert_eq!("world", s.slice_unchecked(7, 12));
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.29.0", note = "use `get_unchecked(begin..end)` instead")]
+ #[must_use]
+ #[inline]
+ pub unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked`;
+ // the slice is dereferenceable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &*(begin..end).get_unchecked(self) }
+ }
+
+ /// Creates a string slice from another string slice, bypassing safety
+ /// checks.
+ /// This is generally not recommended, use with caution! For a safe
+ /// alternative see [`str`] and [`IndexMut`].
+ ///
+ /// [`IndexMut`]: crate::ops::IndexMut
+ ///
+ /// This new slice goes from `begin` to `end`, including `begin` but
+ /// excluding `end`.
+ ///
+ /// To get an immutable string slice instead, see the
+ /// [`slice_unchecked`] method.
+ ///
+ /// [`slice_unchecked`]: str::slice_unchecked
+ ///
+ /// # Safety
+ ///
+ /// Callers of this function are responsible that three preconditions are
+ /// satisfied:
+ ///
+ /// * `begin` must not exceed `end`.
+ /// * `begin` and `end` must be byte positions within the string slice.
+ /// * `begin` and `end` must lie on UTF-8 sequence boundaries.
+ #[stable(feature = "str_slice_mut", since = "1.5.0")]
+ #[deprecated(since = "1.29.0", note = "use `get_unchecked_mut(begin..end)` instead")]
+ #[inline]
+ pub unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`;
+ // the slice is dereferenceable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &mut *(begin..end).get_unchecked_mut(self) }
+ }
+
+ /// Divide one string slice into two at an index.
+ ///
+ /// The argument, `mid`, should be a byte offset from the start of the
+ /// string. It must also be on the boundary of a UTF-8 code point.
+ ///
+ /// The two slices returned go from the start of the string slice to `mid`,
+ /// and from `mid` to the end of the string slice.
+ ///
+ /// To get mutable string slices instead, see the [`split_at_mut`]
+ /// method.
+ ///
+ /// [`split_at_mut`]: str::split_at_mut
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is
+ /// past the end of the last code point of the string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "Per Martin-Löf";
+ ///
+ /// let (first, last) = s.split_at(3);
+ ///
+ /// assert_eq!("Per", first);
+ /// assert_eq!(" Martin-Löf", last);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "str_split_at", since = "1.4.0")]
+ pub fn split_at(&self, mid: usize) -> (&str, &str) {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(mid) {
+ // SAFETY: just checked that `mid` is on a char boundary.
+ unsafe { (self.get_unchecked(0..mid), self.get_unchecked(mid..self.len())) }
+ } else {
+ slice_error_fail(self, 0, mid)
+ }
+ }
+
+ /// Divide one mutable string slice into two at an index.
+ ///
+ /// The argument, `mid`, should be a byte offset from the start of the
+ /// string. It must also be on the boundary of a UTF-8 code point.
+ ///
+ /// The two slices returned go from the start of the string slice to `mid`,
+ /// and from `mid` to the end of the string slice.
+ ///
+ /// To get immutable string slices instead, see the [`split_at`] method.
+ ///
+ /// [`split_at`]: str::split_at
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is
+ /// past the end of the last code point of the string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = "Per Martin-Löf".to_string();
+ /// {
+ /// let (first, last) = s.split_at_mut(3);
+ /// first.make_ascii_uppercase();
+ /// assert_eq!("PER", first);
+ /// assert_eq!(" Martin-Löf", last);
+ /// }
+ /// assert_eq!("PER Martin-Löf", s);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "str_split_at", since = "1.4.0")]
+ pub fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str) {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(mid) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+ // SAFETY: just checked that `mid` is on a char boundary.
+ unsafe {
+ (
+ from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, mid)),
+ from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr.add(mid), len - mid)),
+ )
+ }
+ } else {
+ slice_error_fail(self, 0, mid)
+ }
+ }
+
+ /// Returns an iterator over the [`char`]s of a string slice.
+ ///
+ /// As a string slice consists of valid UTF-8, we can iterate through a
+ /// string slice by [`char`]. This method returns such an iterator.
+ ///
+ /// It's important to remember that [`char`] represents a Unicode Scalar
+ /// Value, and might not match your idea of what a 'character' is. Iteration
+ /// over grapheme clusters may be what you actually want. This functionality
+ /// is not provided by Rust's standard library, check crates.io instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let word = "goodbye";
+ ///
+ /// let count = word.chars().count();
+ /// assert_eq!(7, count);
+ ///
+ /// let mut chars = word.chars();
+ ///
+ /// assert_eq!(Some('g'), chars.next());
+ /// assert_eq!(Some('o'), chars.next());
+ /// assert_eq!(Some('o'), chars.next());
+ /// assert_eq!(Some('d'), chars.next());
+ /// assert_eq!(Some('b'), chars.next());
+ /// assert_eq!(Some('y'), chars.next());
+ /// assert_eq!(Some('e'), chars.next());
+ ///
+ /// assert_eq!(None, chars.next());
+ /// ```
+ ///
+ /// Remember, [`char`]s might not match your intuition about characters:
+ ///
+ /// [`char`]: prim@char
+ ///
+ /// ```
+ /// let y = "y̆";
+ ///
+ /// let mut chars = y.chars();
+ ///
+ /// assert_eq!(Some('y'), chars.next()); // not 'y̆'
+ /// assert_eq!(Some('\u{0306}'), chars.next());
+ ///
+ /// assert_eq!(None, chars.next());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn chars(&self) -> Chars<'_> {
+ Chars { iter: self.as_bytes().iter() }
+ }
+
+ /// Returns an iterator over the [`char`]s of a string slice, and their
+ /// positions.
+ ///
+ /// As a string slice consists of valid UTF-8, we can iterate through a
+ /// string slice by [`char`]. This method returns an iterator of both
+ /// these [`char`]s, as well as their byte positions.
+ ///
+ /// The iterator yields tuples. The position is first, the [`char`] is
+ /// second.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let word = "goodbye";
+ ///
+ /// let count = word.char_indices().count();
+ /// assert_eq!(7, count);
+ ///
+ /// let mut char_indices = word.char_indices();
+ ///
+ /// assert_eq!(Some((0, 'g')), char_indices.next());
+ /// assert_eq!(Some((1, 'o')), char_indices.next());
+ /// assert_eq!(Some((2, 'o')), char_indices.next());
+ /// assert_eq!(Some((3, 'd')), char_indices.next());
+ /// assert_eq!(Some((4, 'b')), char_indices.next());
+ /// assert_eq!(Some((5, 'y')), char_indices.next());
+ /// assert_eq!(Some((6, 'e')), char_indices.next());
+ ///
+ /// assert_eq!(None, char_indices.next());
+ /// ```
+ ///
+ /// Remember, [`char`]s might not match your intuition about characters:
+ ///
+ /// [`char`]: prim@char
+ ///
+ /// ```
+ /// let yes = "y̆es";
+ ///
+ /// let mut char_indices = yes.char_indices();
+ ///
+ /// assert_eq!(Some((0, 'y')), char_indices.next()); // not (0, 'y̆')
+ /// assert_eq!(Some((1, '\u{0306}')), char_indices.next());
+ ///
+ /// // note the 3 here - the last character took up two bytes
+ /// assert_eq!(Some((3, 'e')), char_indices.next());
+ /// assert_eq!(Some((4, 's')), char_indices.next());
+ ///
+ /// assert_eq!(None, char_indices.next());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn char_indices(&self) -> CharIndices<'_> {
+ CharIndices { front_offset: 0, iter: self.chars() }
+ }
+
+ /// An iterator over the bytes of a string slice.
+ ///
+ /// As a string slice consists of a sequence of bytes, we can iterate
+ /// through a string slice by byte. This method returns such an iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut bytes = "bors".bytes();
+ ///
+ /// assert_eq!(Some(b'b'), bytes.next());
+ /// assert_eq!(Some(b'o'), bytes.next());
+ /// assert_eq!(Some(b'r'), bytes.next());
+ /// assert_eq!(Some(b's'), bytes.next());
+ ///
+ /// assert_eq!(None, bytes.next());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn bytes(&self) -> Bytes<'_> {
+ Bytes(self.as_bytes().iter().copied())
+ }
+
+ /// Splits a string slice by whitespace.
+ ///
+ /// The iterator returned will return string slices that are sub-slices of
+ /// the original string slice, separated by any amount of whitespace.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`. If you only want to split on ASCII whitespace
+ /// instead, use [`split_ascii_whitespace`].
+ ///
+ /// [`split_ascii_whitespace`]: str::split_ascii_whitespace
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut iter = "A few words".split_whitespace();
+ ///
+ /// assert_eq!(Some("A"), iter.next());
+ /// assert_eq!(Some("few"), iter.next());
+ /// assert_eq!(Some("words"), iter.next());
+ ///
+ /// assert_eq!(None, iter.next());
+ /// ```
+ ///
+ /// All kinds of whitespace are considered:
+ ///
+ /// ```
+ /// let mut iter = " Mary had\ta\u{2009}little \n\t lamb".split_whitespace();
+ /// assert_eq!(Some("Mary"), iter.next());
+ /// assert_eq!(Some("had"), iter.next());
+ /// assert_eq!(Some("a"), iter.next());
+ /// assert_eq!(Some("little"), iter.next());
+ /// assert_eq!(Some("lamb"), iter.next());
+ ///
+ /// assert_eq!(None, iter.next());
+ /// ```
+ #[must_use = "this returns the split string as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "split_whitespace", since = "1.1.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "str_split_whitespace")]
+ #[inline]
+ pub fn split_whitespace(&self) -> SplitWhitespace<'_> {
+ SplitWhitespace { inner: self.split(IsWhitespace).filter(IsNotEmpty) }
+ }
+
+ /// Splits a string slice by ASCII whitespace.
+ ///
+ /// The iterator returned will return string slices that are sub-slices of
+ /// the original string slice, separated by any amount of ASCII whitespace.
+ ///
+ /// To split by Unicode `Whitespace` instead, use [`split_whitespace`].
+ ///
+ /// [`split_whitespace`]: str::split_whitespace
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut iter = "A few words".split_ascii_whitespace();
+ ///
+ /// assert_eq!(Some("A"), iter.next());
+ /// assert_eq!(Some("few"), iter.next());
+ /// assert_eq!(Some("words"), iter.next());
+ ///
+ /// assert_eq!(None, iter.next());
+ /// ```
+ ///
+ /// All kinds of ASCII whitespace are considered:
+ ///
+ /// ```
+ /// let mut iter = " Mary had\ta little \n\t lamb".split_ascii_whitespace();
+ /// assert_eq!(Some("Mary"), iter.next());
+ /// assert_eq!(Some("had"), iter.next());
+ /// assert_eq!(Some("a"), iter.next());
+ /// assert_eq!(Some("little"), iter.next());
+ /// assert_eq!(Some("lamb"), iter.next());
+ ///
+ /// assert_eq!(None, iter.next());
+ /// ```
+ #[must_use = "this returns the split string as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+ #[inline]
+ pub fn split_ascii_whitespace(&self) -> SplitAsciiWhitespace<'_> {
+ let inner =
+ self.as_bytes().split(IsAsciiWhitespace).filter(BytesIsNotEmpty).map(UnsafeBytesToStr);
+ SplitAsciiWhitespace { inner }
+ }
+
+ /// An iterator over the lines of a string, as string slices.
+ ///
+ /// Lines are ended with either a newline (`\n`) or a carriage return with
+ /// a line feed (`\r\n`).
+ ///
+ /// The final line ending is optional. A string that ends with a final line
+ /// ending will return the same lines as an otherwise identical string
+ /// without a final line ending.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let text = "foo\r\nbar\n\nbaz\n";
+ /// let mut lines = text.lines();
+ ///
+ /// assert_eq!(Some("foo"), lines.next());
+ /// assert_eq!(Some("bar"), lines.next());
+ /// assert_eq!(Some(""), lines.next());
+ /// assert_eq!(Some("baz"), lines.next());
+ ///
+ /// assert_eq!(None, lines.next());
+ /// ```
+ ///
+ /// The final line ending isn't required:
+ ///
+ /// ```
+ /// let text = "foo\nbar\n\r\nbaz";
+ /// let mut lines = text.lines();
+ ///
+ /// assert_eq!(Some("foo"), lines.next());
+ /// assert_eq!(Some("bar"), lines.next());
+ /// assert_eq!(Some(""), lines.next());
+ /// assert_eq!(Some("baz"), lines.next());
+ ///
+ /// assert_eq!(None, lines.next());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn lines(&self) -> Lines<'_> {
+ Lines(self.split_terminator('\n').map(LinesAnyMap))
+ }
+
+ /// An iterator over the lines of a string.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.4.0", note = "use lines() instead now")]
+ #[inline]
+ #[allow(deprecated)]
+ pub fn lines_any(&self) -> LinesAny<'_> {
+ LinesAny(self.lines())
+ }
+
+ /// Returns an iterator of `u16` over the string encoded as UTF-16.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let text = "Zażółć gęślą jaźń";
+ ///
+ /// let utf8_len = text.len();
+ /// let utf16_len = text.encode_utf16().count();
+ ///
+ /// assert!(utf16_len <= utf8_len);
+ /// ```
+ #[must_use = "this returns the encoded string as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "encode_utf16", since = "1.8.0")]
+ pub fn encode_utf16(&self) -> EncodeUtf16<'_> {
+ EncodeUtf16 { chars: self.chars(), extra: 0 }
+ }
+
+ /// Returns `true` if the given pattern matches a sub-slice of
+ /// this string slice.
+ ///
+ /// Returns `false` if it does not.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let bananas = "bananas";
+ ///
+ /// assert!(bananas.contains("nana"));
+ /// assert!(!bananas.contains("apples"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn contains<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool {
+ pat.is_contained_in(self)
+ }
+
+ /// Returns `true` if the given pattern matches a prefix of this
+ /// string slice.
+ ///
+ /// Returns `false` if it does not.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let bananas = "bananas";
+ ///
+ /// assert!(bananas.starts_with("bana"));
+ /// assert!(!bananas.starts_with("nana"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool {
+ pat.is_prefix_of(self)
+ }
+
+ /// Returns `true` if the given pattern matches a suffix of this
+ /// string slice.
+ ///
+ /// Returns `false` if it does not.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let bananas = "bananas";
+ ///
+ /// assert!(bananas.ends_with("anas"));
+ /// assert!(!bananas.ends_with("nana"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn ends_with<'a, P>(&'a self, pat: P) -> bool
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ pat.is_suffix_of(self)
+ }
+
+ /// Returns the byte index of the first character of this string slice that
+ /// matches the pattern.
+ ///
+ /// Returns [`None`] if the pattern doesn't match.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard Gepardi";
+ ///
+ /// assert_eq!(s.find('L'), Some(0));
+ /// assert_eq!(s.find('é'), Some(14));
+ /// assert_eq!(s.find("pard"), Some(17));
+ /// ```
+ ///
+ /// More complex patterns using point-free style and closures:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ ///
+ /// assert_eq!(s.find(char::is_whitespace), Some(5));
+ /// assert_eq!(s.find(char::is_lowercase), Some(1));
+ /// assert_eq!(s.find(|c: char| c.is_whitespace() || c.is_lowercase()), Some(1));
+ /// assert_eq!(s.find(|c: char| (c < 'o') && (c > 'a')), Some(4));
+ /// ```
+ ///
+ /// Not finding the pattern:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ /// let x: &[_] = &['1', '2'];
+ ///
+ /// assert_eq!(s.find(x), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn find<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option<usize> {
+ pat.into_searcher(self).next_match().map(|(i, _)| i)
+ }
+
+ /// Returns the byte index for the first character of the last match of the pattern in
+ /// this string slice.
+ ///
+ /// Returns [`None`] if the pattern doesn't match.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard Gepardi";
+ ///
+ /// assert_eq!(s.rfind('L'), Some(13));
+ /// assert_eq!(s.rfind('é'), Some(14));
+ /// assert_eq!(s.rfind("pard"), Some(24));
+ /// ```
+ ///
+ /// More complex patterns with closures:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ ///
+ /// assert_eq!(s.rfind(char::is_whitespace), Some(12));
+ /// assert_eq!(s.rfind(char::is_lowercase), Some(20));
+ /// ```
+ ///
+ /// Not finding the pattern:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ /// let x: &[_] = &['1', '2'];
+ ///
+ /// assert_eq!(s.rfind(x), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rfind<'a, P>(&'a self, pat: P) -> Option<usize>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ pat.into_searcher(self).next_match_back().map(|(i, _)| i)
+ }
+
+ /// An iterator over substrings of this string slice, separated by
+ /// characters matched by a pattern.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern
+ /// allows a reverse search and forward/reverse search yields the same
+ /// elements. This is true for, e.g., [`char`], but not for `&str`.
+ ///
+ /// If the pattern allows a reverse search but its results might differ
+ /// from a forward search, the [`rsplit`] method can be used.
+ ///
+ /// [`rsplit`]: str::rsplit
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lamb".split(' ').collect();
+ /// assert_eq!(v, ["Mary", "had", "a", "little", "lamb"]);
+ ///
+ /// let v: Vec<&str> = "".split('X').collect();
+ /// assert_eq!(v, [""]);
+ ///
+ /// let v: Vec<&str> = "lionXXtigerXleopard".split('X').collect();
+ /// assert_eq!(v, ["lion", "", "tiger", "leopard"]);
+ ///
+ /// let v: Vec<&str> = "lion::tiger::leopard".split("::").collect();
+ /// assert_eq!(v, ["lion", "tiger", "leopard"]);
+ ///
+ /// let v: Vec<&str> = "abc1def2ghi".split(char::is_numeric).collect();
+ /// assert_eq!(v, ["abc", "def", "ghi"]);
+ ///
+ /// let v: Vec<&str> = "lionXtigerXleopard".split(char::is_uppercase).collect();
+ /// assert_eq!(v, ["lion", "tiger", "leopard"]);
+ /// ```
+ ///
+ /// If the pattern is a slice of chars, split on each occurrence of any of the characters:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "2020-11-03 23:59".split(&['-', ' ', ':', '@'][..]).collect();
+ /// assert_eq!(v, ["2020", "11", "03", "23", "59"]);
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abc1defXghi".split(|c| c == '1' || c == 'X').collect();
+ /// assert_eq!(v, ["abc", "def", "ghi"]);
+ /// ```
+ ///
+ /// If a string contains multiple contiguous separators, you will end up
+ /// with empty strings in the output:
+ ///
+ /// ```
+ /// let x = "||||a||b|c".to_string();
+ /// let d: Vec<_> = x.split('|').collect();
+ ///
+ /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]);
+ /// ```
+ ///
+ /// Contiguous separators are separated by the empty string.
+ ///
+ /// ```
+ /// let x = "(///)".to_string();
+ /// let d: Vec<_> = x.split('/').collect();
+ ///
+ /// assert_eq!(d, &["(", "", "", ")"]);
+ /// ```
+ ///
+ /// Separators at the start or end of a string are neighbored
+ /// by empty strings.
+ ///
+ /// ```
+ /// let d: Vec<_> = "010".split("0").collect();
+ /// assert_eq!(d, &["", "1", ""]);
+ /// ```
+ ///
+ /// When the empty string is used as a separator, it separates
+ /// every character in the string, along with the beginning
+ /// and end of the string.
+ ///
+ /// ```
+ /// let f: Vec<_> = "rust".split("").collect();
+ /// assert_eq!(f, &["", "r", "u", "s", "t", ""]);
+ /// ```
+ ///
+ /// Contiguous separators can lead to possibly surprising behavior
+ /// when whitespace is used as the separator. This code is correct:
+ ///
+ /// ```
+ /// let x = " a b c".to_string();
+ /// let d: Vec<_> = x.split(' ').collect();
+ ///
+ /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]);
+ /// ```
+ ///
+ /// It does _not_ give you:
+ ///
+ /// ```,ignore
+ /// assert_eq!(d, &["a", "b", "c"]);
+ /// ```
+ ///
+ /// Use [`split_whitespace`] for this behavior.
+ ///
+ /// [`split_whitespace`]: str::split_whitespace
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split<'a, P: Pattern<'a>>(&'a self, pat: P) -> Split<'a, P> {
+ Split(SplitInternal {
+ start: 0,
+ end: self.len(),
+ matcher: pat.into_searcher(self),
+ allow_trailing_empty: true,
+ finished: false,
+ })
+ }
+
+ /// An iterator over substrings of this string slice, separated by
+ /// characters matched by a pattern. Differs from the iterator produced by
+ /// `split` in that `split_inclusive` leaves the matched part as the
+ /// terminator of the substring.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lamb\nlittle lamb\nlittle lamb."
+ /// .split_inclusive('\n').collect();
+ /// assert_eq!(v, ["Mary had a little lamb\n", "little lamb\n", "little lamb."]);
+ /// ```
+ ///
+ /// If the last element of the string is matched,
+ /// that element will be considered the terminator of the preceding substring.
+ /// That substring will be the last item returned by the iterator.
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lamb\nlittle lamb\nlittle lamb.\n"
+ /// .split_inclusive('\n').collect();
+ /// assert_eq!(v, ["Mary had a little lamb\n", "little lamb\n", "little lamb.\n"]);
+ /// ```
+ #[stable(feature = "split_inclusive", since = "1.51.0")]
+ #[inline]
+ pub fn split_inclusive<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitInclusive<'a, P> {
+ SplitInclusive(SplitInternal {
+ start: 0,
+ end: self.len(),
+ matcher: pat.into_searcher(self),
+ allow_trailing_empty: false,
+ finished: false,
+ })
+ }
+
+ /// An iterator over substrings of the given string slice, separated by
+ /// characters matched by a pattern and yielded in reverse order.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator requires that the pattern supports a reverse
+ /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse
+ /// search yields the same elements.
+ ///
+ /// For iterating from the front, the [`split`] method can be used.
+ ///
+ /// [`split`]: str::split
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lamb".rsplit(' ').collect();
+ /// assert_eq!(v, ["lamb", "little", "a", "had", "Mary"]);
+ ///
+ /// let v: Vec<&str> = "".rsplit('X').collect();
+ /// assert_eq!(v, [""]);
+ ///
+ /// let v: Vec<&str> = "lionXXtigerXleopard".rsplit('X').collect();
+ /// assert_eq!(v, ["leopard", "tiger", "", "lion"]);
+ ///
+ /// let v: Vec<&str> = "lion::tiger::leopard".rsplit("::").collect();
+ /// assert_eq!(v, ["leopard", "tiger", "lion"]);
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abc1defXghi".rsplit(|c| c == '1' || c == 'X').collect();
+ /// assert_eq!(v, ["ghi", "def", "abc"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplit<'a, P>(&'a self, pat: P) -> RSplit<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RSplit(self.split(pat).0)
+ }
+
+ /// An iterator over substrings of the given string slice, separated by
+ /// characters matched by a pattern.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// Equivalent to [`split`], except that the trailing substring
+ /// is skipped if empty.
+ ///
+ /// [`split`]: str::split
+ ///
+ /// This method can be used for string data that is _terminated_,
+ /// rather than _separated_ by a pattern.
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern
+ /// allows a reverse search and forward/reverse search yields the same
+ /// elements. This is true for, e.g., [`char`], but not for `&str`.
+ ///
+ /// If the pattern allows a reverse search but its results might differ
+ /// from a forward search, the [`rsplit_terminator`] method can be used.
+ ///
+ /// [`rsplit_terminator`]: str::rsplit_terminator
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "A.B.".split_terminator('.').collect();
+ /// assert_eq!(v, ["A", "B"]);
+ ///
+ /// let v: Vec<&str> = "A..B..".split_terminator(".").collect();
+ /// assert_eq!(v, ["A", "", "B", ""]);
+ ///
+ /// let v: Vec<&str> = "A.B:C.D".split_terminator(&['.', ':'][..]).collect();
+ /// assert_eq!(v, ["A", "B", "C", "D"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitTerminator<'a, P> {
+ SplitTerminator(SplitInternal { allow_trailing_empty: false, ..self.split(pat).0 })
+ }
+
+ /// An iterator over substrings of `self`, separated by characters
+ /// matched by a pattern and yielded in reverse order.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// Equivalent to [`split`], except that the trailing substring is
+ /// skipped if empty.
+ ///
+ /// [`split`]: str::split
+ ///
+ /// This method can be used for string data that is _terminated_,
+ /// rather than _separated_ by a pattern.
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator requires that the pattern supports a
+ /// reverse search, and it will be double ended if a forward/reverse
+ /// search yields the same elements.
+ ///
+ /// For iterating from the front, the [`split_terminator`] method can be
+ /// used.
+ ///
+ /// [`split_terminator`]: str::split_terminator
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v: Vec<&str> = "A.B.".rsplit_terminator('.').collect();
+ /// assert_eq!(v, ["B", "A"]);
+ ///
+ /// let v: Vec<&str> = "A..B..".rsplit_terminator(".").collect();
+ /// assert_eq!(v, ["", "B", "", "A"]);
+ ///
+ /// let v: Vec<&str> = "A.B:C.D".rsplit_terminator(&['.', ':'][..]).collect();
+ /// assert_eq!(v, ["D", "C", "B", "A"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplit_terminator<'a, P>(&'a self, pat: P) -> RSplitTerminator<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RSplitTerminator(self.split_terminator(pat).0)
+ }
+
+ /// An iterator over substrings of the given string slice, separated by a
+ /// pattern, restricted to returning at most `n` items.
+ ///
+ /// If `n` substrings are returned, the last substring (the `n`th substring)
+ /// will contain the remainder of the string.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will not be double ended, because it is
+ /// not efficient to support.
+ ///
+ /// If the pattern allows a reverse search, the [`rsplitn`] method can be
+ /// used.
+ ///
+ /// [`rsplitn`]: str::rsplitn
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lambda".splitn(3, ' ').collect();
+ /// assert_eq!(v, ["Mary", "had", "a little lambda"]);
+ ///
+ /// let v: Vec<&str> = "lionXXtigerXleopard".splitn(3, "X").collect();
+ /// assert_eq!(v, ["lion", "", "tigerXleopard"]);
+ ///
+ /// let v: Vec<&str> = "abcXdef".splitn(1, 'X').collect();
+ /// assert_eq!(v, ["abcXdef"]);
+ ///
+ /// let v: Vec<&str> = "".splitn(1, 'X').collect();
+ /// assert_eq!(v, [""]);
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abc1defXghi".splitn(2, |c| c == '1' || c == 'X').collect();
+ /// assert_eq!(v, ["abc", "defXghi"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn splitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> SplitN<'a, P> {
+ SplitN(SplitNInternal { iter: self.split(pat).0, count: n })
+ }
+
+ /// An iterator over substrings of this string slice, separated by a
+ /// pattern, starting from the end of the string, restricted to returning
+ /// at most `n` items.
+ ///
+ /// If `n` substrings are returned, the last substring (the `n`th substring)
+ /// will contain the remainder of the string.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will not be double ended, because it is not
+ /// efficient to support.
+ ///
+ /// For splitting from the front, the [`splitn`] method can be used.
+ ///
+ /// [`splitn`]: str::splitn
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lamb".rsplitn(3, ' ').collect();
+ /// assert_eq!(v, ["lamb", "little", "Mary had a"]);
+ ///
+ /// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(3, 'X').collect();
+ /// assert_eq!(v, ["leopard", "tiger", "lionX"]);
+ ///
+ /// let v: Vec<&str> = "lion::tiger::leopard".rsplitn(2, "::").collect();
+ /// assert_eq!(v, ["leopard", "lion::tiger"]);
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abc1defXghi".rsplitn(2, |c| c == '1' || c == 'X').collect();
+ /// assert_eq!(v, ["ghi", "abc1def"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplitn<'a, P>(&'a self, n: usize, pat: P) -> RSplitN<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RSplitN(self.splitn(n, pat).0)
+ }
+
+ /// Splits the string on the first occurrence of the specified delimiter and
+ /// returns prefix before delimiter and suffix after delimiter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!("cfg".split_once('='), None);
+ /// assert_eq!("cfg=".split_once('='), Some(("cfg", "")));
+ /// assert_eq!("cfg=foo".split_once('='), Some(("cfg", "foo")));
+ /// assert_eq!("cfg=foo=bar".split_once('='), Some(("cfg", "foo=bar")));
+ /// ```
+ #[stable(feature = "str_split_once", since = "1.52.0")]
+ #[inline]
+ pub fn split_once<'a, P: Pattern<'a>>(&'a self, delimiter: P) -> Option<(&'a str, &'a str)> {
+ let (start, end) = delimiter.into_searcher(self).next_match()?;
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { Some((self.get_unchecked(..start), self.get_unchecked(end..))) }
+ }
+
+ /// Splits the string on the last occurrence of the specified delimiter and
+ /// returns prefix before delimiter and suffix after delimiter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!("cfg".rsplit_once('='), None);
+ /// assert_eq!("cfg=foo".rsplit_once('='), Some(("cfg", "foo")));
+ /// assert_eq!("cfg=foo=bar".rsplit_once('='), Some(("cfg=foo", "bar")));
+ /// ```
+ #[stable(feature = "str_split_once", since = "1.52.0")]
+ #[inline]
+ pub fn rsplit_once<'a, P>(&'a self, delimiter: P) -> Option<(&'a str, &'a str)>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ let (start, end) = delimiter.into_searcher(self).next_match_back()?;
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { Some((self.get_unchecked(..start), self.get_unchecked(end..))) }
+ }
+
+ /// An iterator over the disjoint matches of a pattern within the given string
+ /// slice.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern
+ /// allows a reverse search and forward/reverse search yields the same
+ /// elements. This is true for, e.g., [`char`], but not for `&str`.
+ ///
+ /// If the pattern allows a reverse search but its results might differ
+ /// from a forward search, the [`rmatches`] method can be used.
+ ///
+ /// [`rmatches`]: str::matches
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abcXXXabcYYYabc".matches("abc").collect();
+ /// assert_eq!(v, ["abc", "abc", "abc"]);
+ ///
+ /// let v: Vec<&str> = "1abc2abc3".matches(char::is_numeric).collect();
+ /// assert_eq!(v, ["1", "2", "3"]);
+ /// ```
+ #[stable(feature = "str_matches", since = "1.2.0")]
+ #[inline]
+ pub fn matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> Matches<'a, P> {
+ Matches(MatchesInternal(pat.into_searcher(self)))
+ }
+
+ /// An iterator over the disjoint matches of a pattern within this string slice,
+ /// yielded in reverse order.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator requires that the pattern supports a reverse
+ /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse
+ /// search yields the same elements.
+ ///
+ /// For iterating from the front, the [`matches`] method can be used.
+ ///
+ /// [`matches`]: str::matches
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abcXXXabcYYYabc".rmatches("abc").collect();
+ /// assert_eq!(v, ["abc", "abc", "abc"]);
+ ///
+ /// let v: Vec<&str> = "1abc2abc3".rmatches(char::is_numeric).collect();
+ /// assert_eq!(v, ["3", "2", "1"]);
+ /// ```
+ #[stable(feature = "str_matches", since = "1.2.0")]
+ #[inline]
+ pub fn rmatches<'a, P>(&'a self, pat: P) -> RMatches<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RMatches(self.matches(pat).0)
+ }
+
+ /// An iterator over the disjoint matches of a pattern within this string
+ /// slice as well as the index that the match starts at.
+ ///
+ /// For matches of `pat` within `self` that overlap, only the indices
+ /// corresponding to the first match are returned.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern
+ /// allows a reverse search and forward/reverse search yields the same
+ /// elements. This is true for, e.g., [`char`], but not for `&str`.
+ ///
+ /// If the pattern allows a reverse search but its results might differ
+ /// from a forward search, the [`rmatch_indices`] method can be used.
+ ///
+ /// [`rmatch_indices`]: str::rmatch_indices
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<_> = "abcXXXabcYYYabc".match_indices("abc").collect();
+ /// assert_eq!(v, [(0, "abc"), (6, "abc"), (12, "abc")]);
+ ///
+ /// let v: Vec<_> = "1abcabc2".match_indices("abc").collect();
+ /// assert_eq!(v, [(1, "abc"), (4, "abc")]);
+ ///
+ /// let v: Vec<_> = "ababa".match_indices("aba").collect();
+ /// assert_eq!(v, [(0, "aba")]); // only the first `aba`
+ /// ```
+ #[stable(feature = "str_match_indices", since = "1.5.0")]
+ #[inline]
+ pub fn match_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> MatchIndices<'a, P> {
+ MatchIndices(MatchIndicesInternal(pat.into_searcher(self)))
+ }
+
+ /// An iterator over the disjoint matches of a pattern within `self`,
+ /// yielded in reverse order along with the index of the match.
+ ///
+ /// For matches of `pat` within `self` that overlap, only the indices
+ /// corresponding to the last match are returned.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator requires that the pattern supports a reverse
+ /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse
+ /// search yields the same elements.
+ ///
+ /// For iterating from the front, the [`match_indices`] method can be used.
+ ///
+ /// [`match_indices`]: str::match_indices
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<_> = "abcXXXabcYYYabc".rmatch_indices("abc").collect();
+ /// assert_eq!(v, [(12, "abc"), (6, "abc"), (0, "abc")]);
+ ///
+ /// let v: Vec<_> = "1abcabc2".rmatch_indices("abc").collect();
+ /// assert_eq!(v, [(4, "abc"), (1, "abc")]);
+ ///
+ /// let v: Vec<_> = "ababa".rmatch_indices("aba").collect();
+ /// assert_eq!(v, [(2, "aba")]); // only the last `aba`
+ /// ```
+ #[stable(feature = "str_match_indices", since = "1.5.0")]
+ #[inline]
+ pub fn rmatch_indices<'a, P>(&'a self, pat: P) -> RMatchIndices<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RMatchIndices(self.match_indices(pat).0)
+ }
+
+ /// Returns a string slice with leading and trailing whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`, which includes newlines.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "\n Hello\tworld\t\n";
+ ///
+ /// assert_eq!("Hello\tworld", s.trim());
+ /// ```
+ #[inline]
+ #[must_use = "this returns the trimmed string as a slice, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "str_trim")]
+ pub fn trim(&self) -> &str {
+ self.trim_matches(|c: char| c.is_whitespace())
+ }
+
+ /// Returns a string slice with leading whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`, which includes newlines.
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. `start` in this context means the first
+ /// position of that byte string; for a left-to-right language like English or
+ /// Russian, this will be left side, and for right-to-left languages like
+ /// Arabic or Hebrew, this will be the right side.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "\n Hello\tworld\t\n";
+ /// assert_eq!("Hello\tworld\t\n", s.trim_start());
+ /// ```
+ ///
+ /// Directionality:
+ ///
+ /// ```
+ /// let s = " English ";
+ /// assert!(Some('E') == s.trim_start().chars().next());
+ ///
+ /// let s = " עברית ";
+ /// assert!(Some('ע') == s.trim_start().chars().next());
+ /// ```
+ #[inline]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "trim_direction", since = "1.30.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "str_trim_start")]
+ pub fn trim_start(&self) -> &str {
+ self.trim_start_matches(|c: char| c.is_whitespace())
+ }
+
+ /// Returns a string slice with trailing whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`, which includes newlines.
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. `end` in this context means the last
+ /// position of that byte string; for a left-to-right language like English or
+ /// Russian, this will be right side, and for right-to-left languages like
+ /// Arabic or Hebrew, this will be the left side.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "\n Hello\tworld\t\n";
+ /// assert_eq!("\n Hello\tworld", s.trim_end());
+ /// ```
+ ///
+ /// Directionality:
+ ///
+ /// ```
+ /// let s = " English ";
+ /// assert!(Some('h') == s.trim_end().chars().rev().next());
+ ///
+ /// let s = " עברית ";
+ /// assert!(Some('ת') == s.trim_end().chars().rev().next());
+ /// ```
+ #[inline]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "trim_direction", since = "1.30.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "str_trim_end")]
+ pub fn trim_end(&self) -> &str {
+ self.trim_end_matches(|c: char| c.is_whitespace())
+ }
+
+ /// Returns a string slice with leading whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`.
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. 'Left' in this context means the first
+ /// position of that byte string; for a language like Arabic or Hebrew
+ /// which are 'right to left' rather than 'left to right', this will be
+ /// the _right_ side, not the left.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = " Hello\tworld\t";
+ ///
+ /// assert_eq!("Hello\tworld\t", s.trim_left());
+ /// ```
+ ///
+ /// Directionality:
+ ///
+ /// ```
+ /// let s = " English";
+ /// assert!(Some('E') == s.trim_left().chars().next());
+ ///
+ /// let s = " עברית";
+ /// assert!(Some('ע') == s.trim_left().chars().next());
+ /// ```
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.33.0", note = "superseded by `trim_start`", suggestion = "trim_start")]
+ pub fn trim_left(&self) -> &str {
+ self.trim_start()
+ }
+
+ /// Returns a string slice with trailing whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`.
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. 'Right' in this context means the last
+ /// position of that byte string; for a language like Arabic or Hebrew
+ /// which are 'right to left' rather than 'left to right', this will be
+ /// the _left_ side, not the right.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = " Hello\tworld\t";
+ ///
+ /// assert_eq!(" Hello\tworld", s.trim_right());
+ /// ```
+ ///
+ /// Directionality:
+ ///
+ /// ```
+ /// let s = "English ";
+ /// assert!(Some('h') == s.trim_right().chars().rev().next());
+ ///
+ /// let s = "עברית ";
+ /// assert!(Some('ת') == s.trim_right().chars().rev().next());
+ /// ```
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.33.0", note = "superseded by `trim_end`", suggestion = "trim_end")]
+ pub fn trim_right(&self) -> &str {
+ self.trim_end()
+ }
+
+ /// Returns a string slice with all prefixes and suffixes that match a
+ /// pattern repeatedly removed.
+ ///
+ /// The [pattern] can be a [`char`], a slice of [`char`]s, or a function
+ /// or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar");
+ /// assert_eq!("123foo1bar123".trim_matches(char::is_numeric), "foo1bar");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_matches(x), "foo1bar");
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// assert_eq!("1foo1barXX".trim_matches(|c| c == '1' || c == 'X'), "foo1bar");
+ /// ```
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn trim_matches<'a, P>(&'a self, pat: P) -> &'a str
+ where
+ P: Pattern<'a, Searcher: DoubleEndedSearcher<'a>>,
+ {
+ let mut i = 0;
+ let mut j = 0;
+ let mut matcher = pat.into_searcher(self);
+ if let Some((a, b)) = matcher.next_reject() {
+ i = a;
+ j = b; // Remember earliest known match, correct it below if
+ // last match is different
+ }
+ if let Some((_, b)) = matcher.next_reject_back() {
+ j = b;
+ }
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { self.get_unchecked(i..j) }
+ }
+
+ /// Returns a string slice with all prefixes that match a pattern
+ /// repeatedly removed.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. `start` in this context means the first
+ /// position of that byte string; for a left-to-right language like English or
+ /// Russian, this will be left side, and for right-to-left languages like
+ /// Arabic or Hebrew, this will be the right side.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_start_matches('1'), "foo1bar11");
+ /// assert_eq!("123foo1bar123".trim_start_matches(char::is_numeric), "foo1bar123");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_start_matches(x), "foo1bar12");
+ /// ```
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "trim_direction", since = "1.30.0")]
+ pub fn trim_start_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str {
+ let mut i = self.len();
+ let mut matcher = pat.into_searcher(self);
+ if let Some((a, _)) = matcher.next_reject() {
+ i = a;
+ }
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { self.get_unchecked(i..self.len()) }
+ }
+
+ /// Returns a string slice with the prefix removed.
+ ///
+ /// If the string starts with the pattern `prefix`, returns substring after the prefix, wrapped
+ /// in `Some`. Unlike `trim_start_matches`, this method removes the prefix exactly once.
+ ///
+ /// If the string does not start with `prefix`, returns `None`.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!("foo:bar".strip_prefix("foo:"), Some("bar"));
+ /// assert_eq!("foo:bar".strip_prefix("bar"), None);
+ /// assert_eq!("foofoo".strip_prefix("foo"), Some("foo"));
+ /// ```
+ #[must_use = "this returns the remaining substring as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "str_strip", since = "1.45.0")]
+ pub fn strip_prefix<'a, P: Pattern<'a>>(&'a self, prefix: P) -> Option<&'a str> {
+ prefix.strip_prefix_of(self)
+ }
+
+ /// Returns a string slice with the suffix removed.
+ ///
+ /// If the string ends with the pattern `suffix`, returns the substring before the suffix,
+ /// wrapped in `Some`. Unlike `trim_end_matches`, this method removes the suffix exactly once.
+ ///
+ /// If the string does not end with `suffix`, returns `None`.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!("bar:foo".strip_suffix(":foo"), Some("bar"));
+ /// assert_eq!("bar:foo".strip_suffix("bar"), None);
+ /// assert_eq!("foofoo".strip_suffix("foo"), Some("foo"));
+ /// ```
+ #[must_use = "this returns the remaining substring as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "str_strip", since = "1.45.0")]
+ pub fn strip_suffix<'a, P>(&'a self, suffix: P) -> Option<&'a str>
+ where
+ P: Pattern<'a>,
+ <P as Pattern<'a>>::Searcher: ReverseSearcher<'a>,
+ {
+ suffix.strip_suffix_of(self)
+ }
+
+ /// Returns a string slice with all suffixes that match a pattern
+ /// repeatedly removed.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. `end` in this context means the last
+ /// position of that byte string; for a left-to-right language like English or
+ /// Russian, this will be right side, and for right-to-left languages like
+ /// Arabic or Hebrew, this will be the left side.
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_end_matches('1'), "11foo1bar");
+ /// assert_eq!("123foo1bar123".trim_end_matches(char::is_numeric), "123foo1bar");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_end_matches(x), "12foo1bar");
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// assert_eq!("1fooX".trim_end_matches(|c| c == '1' || c == 'X'), "1foo");
+ /// ```
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "trim_direction", since = "1.30.0")]
+ pub fn trim_end_matches<'a, P>(&'a self, pat: P) -> &'a str
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ let mut j = 0;
+ let mut matcher = pat.into_searcher(self);
+ if let Some((_, b)) = matcher.next_reject_back() {
+ j = b;
+ }
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { self.get_unchecked(0..j) }
+ }
+
+ /// Returns a string slice with all prefixes that match a pattern
+ /// repeatedly removed.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. 'Left' in this context means the first
+ /// position of that byte string; for a language like Arabic or Hebrew
+ /// which are 'right to left' rather than 'left to right', this will be
+ /// the _right_ side, not the left.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11");
+ /// assert_eq!("123foo1bar123".trim_left_matches(char::is_numeric), "foo1bar123");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.33.0",
+ note = "superseded by `trim_start_matches`",
+ suggestion = "trim_start_matches"
+ )]
+ pub fn trim_left_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str {
+ self.trim_start_matches(pat)
+ }
+
+ /// Returns a string slice with all suffixes that match a pattern
+ /// repeatedly removed.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. 'Right' in this context means the last
+ /// position of that byte string; for a language like Arabic or Hebrew
+ /// which are 'right to left' rather than 'left to right', this will be
+ /// the _left_ side, not the right.
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar");
+ /// assert_eq!("123foo1bar123".trim_right_matches(char::is_numeric), "123foo1bar");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_right_matches(x), "12foo1bar");
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// assert_eq!("1fooX".trim_right_matches(|c| c == '1' || c == 'X'), "1foo");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.33.0",
+ note = "superseded by `trim_end_matches`",
+ suggestion = "trim_end_matches"
+ )]
+ pub fn trim_right_matches<'a, P>(&'a self, pat: P) -> &'a str
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ self.trim_end_matches(pat)
+ }
+
+ /// Parses this string slice into another type.
+ ///
+ /// Because `parse` is so general, it can cause problems with type
+ /// inference. As such, `parse` is one of the few times you'll see
+ /// the syntax affectionately known as the 'turbofish': `::<>`. This
+ /// helps the inference algorithm understand specifically which type
+ /// you're trying to parse into.
+ ///
+ /// `parse` can parse into any type that implements the [`FromStr`] trait.
+
+ ///
+ /// # Errors
+ ///
+ /// Will return [`Err`] if it's not possible to parse this string slice into
+ /// the desired type.
+ ///
+ /// [`Err`]: FromStr::Err
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// let four: u32 = "4".parse().unwrap();
+ ///
+ /// assert_eq!(4, four);
+ /// ```
+ ///
+ /// Using the 'turbofish' instead of annotating `four`:
+ ///
+ /// ```
+ /// let four = "4".parse::<u32>();
+ ///
+ /// assert_eq!(Ok(4), four);
+ /// ```
+ ///
+ /// Failing to parse:
+ ///
+ /// ```
+ /// let nope = "j".parse::<u32>();
+ ///
+ /// assert!(nope.is_err());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn parse<F: FromStr>(&self) -> Result<F, F::Err> {
+ FromStr::from_str(self)
+ }
+
+ /// Checks if all characters in this string are within the ASCII range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = "hello!\n";
+ /// let non_ascii = "Grüße, Jürgen ❤";
+ ///
+ /// assert!(ascii.is_ascii());
+ /// assert!(!non_ascii.is_ascii());
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[must_use]
+ #[inline]
+ pub fn is_ascii(&self) -> bool {
+ // We can treat each byte as character here: all multibyte characters
+ // start with a byte that is not in the ascii range, so we will stop
+ // there already.
+ self.as_bytes().is_ascii()
+ }
+
+ /// Checks that two strings are an ASCII case-insensitive match.
+ ///
+ /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
+ /// but without allocating and copying temporaries.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!("Ferris".eq_ignore_ascii_case("FERRIS"));
+ /// assert!("Ferrös".eq_ignore_ascii_case("FERRöS"));
+ /// assert!(!"Ferrös".eq_ignore_ascii_case("FERRÖS"));
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[must_use]
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &str) -> bool {
+ self.as_bytes().eq_ignore_ascii_case(other.as_bytes())
+ }
+
+ /// Converts this string to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase()`].
+ ///
+ /// [`to_ascii_uppercase()`]: #method.to_ascii_uppercase
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("Grüße, Jürgen ❤");
+ ///
+ /// s.make_ascii_uppercase();
+ ///
+ /// assert_eq!("GRüßE, JüRGEN ❤", s);
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ // SAFETY: changing ASCII letters only does not invalidate UTF-8.
+ let me = unsafe { self.as_bytes_mut() };
+ me.make_ascii_uppercase()
+ }
+
+ /// Converts this string to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase()`].
+ ///
+ /// [`to_ascii_lowercase()`]: #method.to_ascii_lowercase
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("GRÜßE, JÜRGEN ❤");
+ ///
+ /// s.make_ascii_lowercase();
+ ///
+ /// assert_eq!("grÜße, jÜrgen ❤", s);
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ // SAFETY: changing ASCII letters only does not invalidate UTF-8.
+ let me = unsafe { self.as_bytes_mut() };
+ me.make_ascii_lowercase()
+ }
+
+ /// Return an iterator that escapes each char in `self` with [`char::escape_debug`].
+ ///
+ /// Note: only extended grapheme codepoints that begin the string will be
+ /// escaped.
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in "❤\n!".escape_debug() {
+ /// print!("{c}");
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", "❤\n!".escape_debug());
+ /// ```
+ ///
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("❤\\n!");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!("❤\n!".escape_debug().to_string(), "❤\\n!");
+ /// ```
+ #[must_use = "this returns the escaped string as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ pub fn escape_debug(&self) -> EscapeDebug<'_> {
+ let mut chars = self.chars();
+ EscapeDebug {
+ inner: chars
+ .next()
+ .map(|first| first.escape_debug_ext(EscapeDebugExtArgs::ESCAPE_ALL))
+ .into_iter()
+ .flatten()
+ .chain(chars.flat_map(CharEscapeDebugContinue)),
+ }
+ }
+
+ /// Return an iterator that escapes each char in `self` with [`char::escape_default`].
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in "❤\n!".escape_default() {
+ /// print!("{c}");
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", "❤\n!".escape_default());
+ /// ```
+ ///
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\u{{2764}}\\n!");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!("❤\n!".escape_default().to_string(), "\\u{2764}\\n!");
+ /// ```
+ #[must_use = "this returns the escaped string as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ pub fn escape_default(&self) -> EscapeDefault<'_> {
+ EscapeDefault { inner: self.chars().flat_map(CharEscapeDefault) }
+ }
+
+ /// Return an iterator that escapes each char in `self` with [`char::escape_unicode`].
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in "❤\n!".escape_unicode() {
+ /// print!("{c}");
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", "❤\n!".escape_unicode());
+ /// ```
+ ///
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\u{{2764}}\\u{{a}}\\u{{21}}");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!("❤\n!".escape_unicode().to_string(), "\\u{2764}\\u{a}\\u{21}");
+ /// ```
+ #[must_use = "this returns the escaped string as an iterator, \
+ without modifying the original"]
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ pub fn escape_unicode(&self) -> EscapeUnicode<'_> {
+ EscapeUnicode { inner: self.chars().flat_map(CharEscapeUnicode) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<[u8]> for str {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.as_bytes()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl const Default for &str {
+ /// Creates an empty str
+ #[inline]
+ fn default() -> Self {
+ ""
+ }
+}
+
+#[stable(feature = "default_mut_str", since = "1.28.0")]
+impl Default for &mut str {
+ /// Creates an empty mutable str
+ #[inline]
+ fn default() -> Self {
+ // SAFETY: The empty string is valid UTF-8.
+ unsafe { from_utf8_unchecked_mut(&mut []) }
+ }
+}
+
+impl_fn_for_zst! {
+ /// A nameable, cloneable fn type
+ #[derive(Clone)]
+ struct LinesAnyMap impl<'a> Fn = |line: &'a str| -> &'a str {
+ let l = line.len();
+ if l > 0 && line.as_bytes()[l - 1] == b'\r' { &line[0 .. l - 1] }
+ else { line }
+ };
+
+ #[derive(Clone)]
+ struct CharEscapeDebugContinue impl Fn = |c: char| -> char::EscapeDebug {
+ c.escape_debug_ext(EscapeDebugExtArgs {
+ escape_grapheme_extended: false,
+ escape_single_quote: true,
+ escape_double_quote: true
+ })
+ };
+
+ #[derive(Clone)]
+ struct CharEscapeUnicode impl Fn = |c: char| -> char::EscapeUnicode {
+ c.escape_unicode()
+ };
+ #[derive(Clone)]
+ struct CharEscapeDefault impl Fn = |c: char| -> char::EscapeDefault {
+ c.escape_default()
+ };
+
+ #[derive(Clone)]
+ struct IsWhitespace impl Fn = |c: char| -> bool {
+ c.is_whitespace()
+ };
+
+ #[derive(Clone)]
+ struct IsAsciiWhitespace impl Fn = |byte: &u8| -> bool {
+ byte.is_ascii_whitespace()
+ };
+
+ #[derive(Clone)]
+ struct IsNotEmpty impl<'a, 'b> Fn = |s: &'a &'b str| -> bool {
+ !s.is_empty()
+ };
+
+ #[derive(Clone)]
+ struct BytesIsNotEmpty impl<'a, 'b> Fn = |s: &'a &'b [u8]| -> bool {
+ !s.is_empty()
+ };
+
+ #[derive(Clone)]
+ struct UnsafeBytesToStr impl<'a> Fn = |bytes: &'a [u8]| -> &'a str {
+ // SAFETY: not safe
+ unsafe { from_utf8_unchecked(bytes) }
+ };
+}
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
new file mode 100644
index 000000000..031fb8e8b
--- /dev/null
+++ b/library/core/src/str/pattern.rs
@@ -0,0 +1,1686 @@
+//! The string Pattern API.
+//!
+//! The Pattern API provides a generic mechanism for using different pattern
+//! types when searching through a string.
+//!
+//! For more details, see the traits [`Pattern`], [`Searcher`],
+//! [`ReverseSearcher`], and [`DoubleEndedSearcher`].
+//!
+//! Although this API is unstable, it is exposed via stable APIs on the
+//! [`str`] type.
+//!
+//! # Examples
+//!
+//! [`Pattern`] is [implemented][pattern-impls] in the stable API for
+//! [`&str`][`str`], [`char`], slices of [`char`], and functions and closures
+//! implementing `FnMut(char) -> bool`.
+//!
+//! ```
+//! let s = "Can you find a needle in a haystack?";
+//!
+//! // &str pattern
+//! assert_eq!(s.find("you"), Some(4));
+//! // char pattern
+//! assert_eq!(s.find('n'), Some(2));
+//! // array of chars pattern
+//! assert_eq!(s.find(&['a', 'e', 'i', 'o', 'u']), Some(1));
+//! // slice of chars pattern
+//! assert_eq!(s.find(&['a', 'e', 'i', 'o', 'u'][..]), Some(1));
+//! // closure pattern
+//! assert_eq!(s.find(|c: char| c.is_ascii_punctuation()), Some(35));
+//! ```
+//!
+//! [pattern-impls]: Pattern#implementors
+
+#![unstable(
+ feature = "pattern",
+ reason = "API not fully fleshed out and ready to be stabilized",
+ issue = "27721"
+)]
+
+use crate::cmp;
+use crate::fmt;
+use crate::slice::memchr;
+
+// Pattern
+
+/// A string pattern.
+///
+/// A `Pattern<'a>` expresses that the implementing type
+/// can be used as a string pattern for searching in a [`&'a str`][str].
+///
+/// For example, both `'a'` and `"aa"` are patterns that
+/// would match at index `1` in the string `"baaaab"`.
+///
+/// The trait itself acts as a builder for an associated
+/// [`Searcher`] type, which does the actual work of finding
+/// occurrences of the pattern in a string.
+///
+/// Depending on the type of the pattern, the behaviour of methods like
+/// [`str::find`] and [`str::contains`] can change. The table below describes
+/// some of those behaviours.
+///
+/// | Pattern type | Match condition |
+/// |--------------------------|-------------------------------------------|
+/// | `&str` | is substring |
+/// | `char` | is contained in string |
+/// | `&[char]` | any char in slice is contained in string |
+/// | `F: FnMut(char) -> bool` | `F` returns `true` for a char in string |
+/// | `&&str` | is substring |
+/// | `&String` | is substring |
+///
+/// # Examples
+///
+/// ```
+/// // &str
+/// assert_eq!("abaaa".find("ba"), Some(1));
+/// assert_eq!("abaaa".find("bac"), None);
+///
+/// // char
+/// assert_eq!("abaaa".find('a'), Some(0));
+/// assert_eq!("abaaa".find('b'), Some(1));
+/// assert_eq!("abaaa".find('c'), None);
+///
+/// // &[char; N]
+/// assert_eq!("ab".find(&['b', 'a']), Some(0));
+/// assert_eq!("abaaa".find(&['a', 'z']), Some(0));
+/// assert_eq!("abaaa".find(&['c', 'd']), None);
+///
+/// // &[char]
+/// assert_eq!("ab".find(&['b', 'a'][..]), Some(0));
+/// assert_eq!("abaaa".find(&['a', 'z'][..]), Some(0));
+/// assert_eq!("abaaa".find(&['c', 'd'][..]), None);
+///
+/// // FnMut(char) -> bool
+/// assert_eq!("abcdef_z".find(|ch| ch > 'd' && ch < 'y'), Some(4));
+/// assert_eq!("abcddd_z".find(|ch| ch > 'd' && ch < 'y'), None);
+/// ```
+pub trait Pattern<'a>: Sized {
+ /// Associated searcher for this pattern
+ type Searcher: Searcher<'a>;
+
+ /// Constructs the associated searcher from
+ /// `self` and the `haystack` to search in.
+ fn into_searcher(self, haystack: &'a str) -> Self::Searcher;
+
+ /// Checks whether the pattern matches anywhere in the haystack
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ self.into_searcher(haystack).next_match().is_some()
+ }
+
+ /// Checks whether the pattern matches at the front of the haystack
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ matches!(self.into_searcher(haystack).next(), SearchStep::Match(0, _))
+ }
+
+ /// Checks whether the pattern matches at the back of the haystack
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool
+ where
+ Self::Searcher: ReverseSearcher<'a>,
+ {
+ matches!(self.into_searcher(haystack).next_back(), SearchStep::Match(_, j) if haystack.len() == j)
+ }
+
+ /// Removes the pattern from the front of haystack, if it matches.
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ if let SearchStep::Match(start, len) = self.into_searcher(haystack).next() {
+ debug_assert_eq!(
+ start, 0,
+ "The first search step from Searcher \
+ must include the first character"
+ );
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { Some(haystack.get_unchecked(len..)) }
+ } else {
+ None
+ }
+ }
+
+ /// Removes the pattern from the back of haystack, if it matches.
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str>
+ where
+ Self::Searcher: ReverseSearcher<'a>,
+ {
+ if let SearchStep::Match(start, end) = self.into_searcher(haystack).next_back() {
+ debug_assert_eq!(
+ end,
+ haystack.len(),
+ "The first search step from ReverseSearcher \
+ must include the last character"
+ );
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { Some(haystack.get_unchecked(..start)) }
+ } else {
+ None
+ }
+ }
+}
+
+// Searcher
+
+/// Result of calling [`Searcher::next()`] or [`ReverseSearcher::next_back()`].
+#[derive(Copy, Clone, Eq, PartialEq, Debug)]
+pub enum SearchStep {
+ /// Expresses that a match of the pattern has been found at
+ /// `haystack[a..b]`.
+ Match(usize, usize),
+ /// Expresses that `haystack[a..b]` has been rejected as a possible match
+ /// of the pattern.
+ ///
+ /// Note that there might be more than one `Reject` between two `Match`es,
+ /// there is no requirement for them to be combined into one.
+ Reject(usize, usize),
+ /// Expresses that every byte of the haystack has been visited, ending
+ /// the iteration.
+ Done,
+}
+
+/// A searcher for a string pattern.
+///
+/// This trait provides methods for searching for non-overlapping
+/// matches of a pattern starting from the front (left) of a string.
+///
+/// It will be implemented by associated `Searcher`
+/// types of the [`Pattern`] trait.
+///
+/// The trait is marked unsafe because the indices returned by the
+/// [`next()`][Searcher::next] methods are required to lie on valid utf8
+/// boundaries in the haystack. This enables consumers of this trait to
+/// slice the haystack without additional runtime checks.
+pub unsafe trait Searcher<'a> {
+ /// Getter for the underlying string to be searched in
+ ///
+ /// Will always return the same [`&str`][str].
+ fn haystack(&self) -> &'a str;
+
+ /// Performs the next search step starting from the front.
+ ///
+ /// - Returns [`Match(a, b)`][SearchStep::Match] if `haystack[a..b]` matches
+ /// the pattern.
+ /// - Returns [`Reject(a, b)`][SearchStep::Reject] if `haystack[a..b]` can
+ /// not match the pattern, even partially.
+ /// - Returns [`Done`][SearchStep::Done] if every byte of the haystack has
+ /// been visited.
+ ///
+ /// The stream of [`Match`][SearchStep::Match] and
+ /// [`Reject`][SearchStep::Reject] values up to a [`Done`][SearchStep::Done]
+ /// will contain index ranges that are adjacent, non-overlapping,
+ /// covering the whole haystack, and laying on utf8 boundaries.
+ ///
+ /// A [`Match`][SearchStep::Match] result needs to contain the whole matched
+ /// pattern, however [`Reject`][SearchStep::Reject] results may be split up
+ /// into arbitrary many adjacent fragments. Both ranges may have zero length.
+ ///
+ /// As an example, the pattern `"aaa"` and the haystack `"cbaaaaab"`
+ /// might produce the stream
+ /// `[Reject(0, 1), Reject(1, 2), Match(2, 5), Reject(5, 8)]`
+ fn next(&mut self) -> SearchStep;
+
+ /// Finds the next [`Match`][SearchStep::Match] result. See [`next()`][Searcher::next].
+ ///
+ /// Unlike [`next()`][Searcher::next], there is no guarantee that the returned ranges
+ /// of this and [`next_reject`][Searcher::next_reject] will overlap. This will return
+ /// `(start_match, end_match)`, where start_match is the index of where
+ /// the match begins, and end_match is the index after the end of the match.
+ #[inline]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ loop {
+ match self.next() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ _ => continue,
+ }
+ }
+ }
+
+ /// Finds the next [`Reject`][SearchStep::Reject] result. See [`next()`][Searcher::next]
+ /// and [`next_match()`][Searcher::next_match].
+ ///
+ /// Unlike [`next()`][Searcher::next], there is no guarantee that the returned ranges
+ /// of this and [`next_match`][Searcher::next_match] will overlap.
+ #[inline]
+ fn next_reject(&mut self) -> Option<(usize, usize)> {
+ loop {
+ match self.next() {
+ SearchStep::Reject(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ _ => continue,
+ }
+ }
+ }
+}
+
+/// A reverse searcher for a string pattern.
+///
+/// This trait provides methods for searching for non-overlapping
+/// matches of a pattern starting from the back (right) of a string.
+///
+/// It will be implemented by associated [`Searcher`]
+/// types of the [`Pattern`] trait if the pattern supports searching
+/// for it from the back.
+///
+/// The index ranges returned by this trait are not required
+/// to exactly match those of the forward search in reverse.
+///
+/// For the reason why this trait is marked unsafe, see them
+/// parent trait [`Searcher`].
+pub unsafe trait ReverseSearcher<'a>: Searcher<'a> {
+ /// Performs the next search step starting from the back.
+ ///
+ /// - Returns [`Match(a, b)`][SearchStep::Match] if `haystack[a..b]`
+ /// matches the pattern.
+ /// - Returns [`Reject(a, b)`][SearchStep::Reject] if `haystack[a..b]`
+ /// can not match the pattern, even partially.
+ /// - Returns [`Done`][SearchStep::Done] if every byte of the haystack
+ /// has been visited
+ ///
+ /// The stream of [`Match`][SearchStep::Match] and
+ /// [`Reject`][SearchStep::Reject] values up to a [`Done`][SearchStep::Done]
+ /// will contain index ranges that are adjacent, non-overlapping,
+ /// covering the whole haystack, and laying on utf8 boundaries.
+ ///
+ /// A [`Match`][SearchStep::Match] result needs to contain the whole matched
+ /// pattern, however [`Reject`][SearchStep::Reject] results may be split up
+ /// into arbitrary many adjacent fragments. Both ranges may have zero length.
+ ///
+ /// As an example, the pattern `"aaa"` and the haystack `"cbaaaaab"`
+ /// might produce the stream
+ /// `[Reject(7, 8), Match(4, 7), Reject(1, 4), Reject(0, 1)]`.
+ fn next_back(&mut self) -> SearchStep;
+
+ /// Finds the next [`Match`][SearchStep::Match] result.
+ /// See [`next_back()`][ReverseSearcher::next_back].
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ loop {
+ match self.next_back() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ _ => continue,
+ }
+ }
+ }
+
+ /// Finds the next [`Reject`][SearchStep::Reject] result.
+ /// See [`next_back()`][ReverseSearcher::next_back].
+ #[inline]
+ fn next_reject_back(&mut self) -> Option<(usize, usize)> {
+ loop {
+ match self.next_back() {
+ SearchStep::Reject(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ _ => continue,
+ }
+ }
+ }
+}
+
+/// A marker trait to express that a [`ReverseSearcher`]
+/// can be used for a [`DoubleEndedIterator`] implementation.
+///
+/// For this, the impl of [`Searcher`] and [`ReverseSearcher`] need
+/// to follow these conditions:
+///
+/// - All results of `next()` need to be identical
+/// to the results of `next_back()` in reverse order.
+/// - `next()` and `next_back()` need to behave as
+/// the two ends of a range of values, that is they
+/// can not "walk past each other".
+///
+/// # Examples
+///
+/// `char::Searcher` is a `DoubleEndedSearcher` because searching for a
+/// [`char`] only requires looking at one at a time, which behaves the same
+/// from both ends.
+///
+/// `(&str)::Searcher` is not a `DoubleEndedSearcher` because
+/// the pattern `"aa"` in the haystack `"aaa"` matches as either
+/// `"[aa]a"` or `"a[aa]"`, depending from which side it is searched.
+pub trait DoubleEndedSearcher<'a>: ReverseSearcher<'a> {}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for char
+/////////////////////////////////////////////////////////////////////////////
+
+/// Associated type for `<char as Pattern<'a>>::Searcher`.
+#[derive(Clone, Debug)]
+pub struct CharSearcher<'a> {
+ haystack: &'a str,
+ // safety invariant: `finger`/`finger_back` must be a valid utf8 byte index of `haystack`
+ // This invariant can be broken *within* next_match and next_match_back, however
+ // they must exit with fingers on valid code point boundaries.
+ /// `finger` is the current byte index of the forward search.
+ /// Imagine that it exists before the byte at its index, i.e.
+ /// `haystack[finger]` is the first byte of the slice we must inspect during
+ /// forward searching
+ finger: usize,
+ /// `finger_back` is the current byte index of the reverse search.
+ /// Imagine that it exists after the byte at its index, i.e.
+ /// haystack[finger_back - 1] is the last byte of the slice we must inspect during
+ /// forward searching (and thus the first byte to be inspected when calling next_back()).
+ finger_back: usize,
+ /// The character being searched for
+ needle: char,
+
+ // safety invariant: `utf8_size` must be less than 5
+ /// The number of bytes `needle` takes up when encoded in utf8.
+ utf8_size: usize,
+ /// A utf8 encoded copy of the `needle`
+ utf8_encoded: [u8; 4],
+}
+
+unsafe impl<'a> Searcher<'a> for CharSearcher<'a> {
+ #[inline]
+ fn haystack(&self) -> &'a str {
+ self.haystack
+ }
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ let old_finger = self.finger;
+ // SAFETY: 1-4 guarantee safety of `get_unchecked`
+ // 1. `self.finger` and `self.finger_back` are kept on unicode boundaries
+ // (this is invariant)
+ // 2. `self.finger >= 0` since it starts at 0 and only increases
+ // 3. `self.finger < self.finger_back` because otherwise the char `iter`
+ // would return `SearchStep::Done`
+ // 4. `self.finger` comes before the end of the haystack because `self.finger_back`
+ // starts at the end and only decreases
+ let slice = unsafe { self.haystack.get_unchecked(old_finger..self.finger_back) };
+ let mut iter = slice.chars();
+ let old_len = iter.iter.len();
+ if let Some(ch) = iter.next() {
+ // add byte offset of current character
+ // without re-encoding as utf-8
+ self.finger += old_len - iter.iter.len();
+ if ch == self.needle {
+ SearchStep::Match(old_finger, self.finger)
+ } else {
+ SearchStep::Reject(old_finger, self.finger)
+ }
+ } else {
+ SearchStep::Done
+ }
+ }
+ #[inline]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ loop {
+ // get the haystack after the last character found
+ let bytes = self.haystack.as_bytes().get(self.finger..self.finger_back)?;
+ // the last byte of the utf8 encoded needle
+ // SAFETY: we have an invariant that `utf8_size < 5`
+ let last_byte = unsafe { *self.utf8_encoded.get_unchecked(self.utf8_size - 1) };
+ if let Some(index) = memchr::memchr(last_byte, bytes) {
+ // The new finger is the index of the byte we found,
+ // plus one, since we memchr'd for the last byte of the character.
+ //
+ // Note that this doesn't always give us a finger on a UTF8 boundary.
+ // If we *didn't* find our character
+ // we may have indexed to the non-last byte of a 3-byte or 4-byte character.
+ // We can't just skip to the next valid starting byte because a character like
+ // ꁁ (U+A041 YI SYLLABLE PA), utf-8 `EA 81 81` will have us always find
+ // the second byte when searching for the third.
+ //
+ // However, this is totally okay. While we have the invariant that
+ // self.finger is on a UTF8 boundary, this invariant is not relied upon
+ // within this method (it is relied upon in CharSearcher::next()).
+ //
+ // We only exit this method when we reach the end of the string, or if we
+ // find something. When we find something the `finger` will be set
+ // to a UTF8 boundary.
+ self.finger += index + 1;
+ if self.finger >= self.utf8_size {
+ let found_char = self.finger - self.utf8_size;
+ if let Some(slice) = self.haystack.as_bytes().get(found_char..self.finger) {
+ if slice == &self.utf8_encoded[0..self.utf8_size] {
+ return Some((found_char, self.finger));
+ }
+ }
+ }
+ } else {
+ // found nothing, exit
+ self.finger = self.finger_back;
+ return None;
+ }
+ }
+ }
+
+ // let next_reject use the default implementation from the Searcher trait
+}
+
+unsafe impl<'a> ReverseSearcher<'a> for CharSearcher<'a> {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ let old_finger = self.finger_back;
+ // SAFETY: see the comment for next() above
+ let slice = unsafe { self.haystack.get_unchecked(self.finger..old_finger) };
+ let mut iter = slice.chars();
+ let old_len = iter.iter.len();
+ if let Some(ch) = iter.next_back() {
+ // subtract byte offset of current character
+ // without re-encoding as utf-8
+ self.finger_back -= old_len - iter.iter.len();
+ if ch == self.needle {
+ SearchStep::Match(self.finger_back, old_finger)
+ } else {
+ SearchStep::Reject(self.finger_back, old_finger)
+ }
+ } else {
+ SearchStep::Done
+ }
+ }
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ let haystack = self.haystack.as_bytes();
+ loop {
+ // get the haystack up to but not including the last character searched
+ let bytes = haystack.get(self.finger..self.finger_back)?;
+ // the last byte of the utf8 encoded needle
+ // SAFETY: we have an invariant that `utf8_size < 5`
+ let last_byte = unsafe { *self.utf8_encoded.get_unchecked(self.utf8_size - 1) };
+ if let Some(index) = memchr::memrchr(last_byte, bytes) {
+ // we searched a slice that was offset by self.finger,
+ // add self.finger to recoup the original index
+ let index = self.finger + index;
+ // memrchr will return the index of the byte we wish to
+ // find. In case of an ASCII character, this is indeed
+ // were we wish our new finger to be ("after" the found
+ // char in the paradigm of reverse iteration). For
+ // multibyte chars we need to skip down by the number of more
+ // bytes they have than ASCII
+ let shift = self.utf8_size - 1;
+ if index >= shift {
+ let found_char = index - shift;
+ if let Some(slice) = haystack.get(found_char..(found_char + self.utf8_size)) {
+ if slice == &self.utf8_encoded[0..self.utf8_size] {
+ // move finger to before the character found (i.e., at its start index)
+ self.finger_back = found_char;
+ return Some((self.finger_back, self.finger_back + self.utf8_size));
+ }
+ }
+ }
+ // We can't use finger_back = index - size + 1 here. If we found the last char
+ // of a different-sized character (or the middle byte of a different character)
+ // we need to bump the finger_back down to `index`. This similarly makes
+ // `finger_back` have the potential to no longer be on a boundary,
+ // but this is OK since we only exit this function on a boundary
+ // or when the haystack has been searched completely.
+ //
+ // Unlike next_match this does not
+ // have the problem of repeated bytes in utf-8 because
+ // we're searching for the last byte, and we can only have
+ // found the last byte when searching in reverse.
+ self.finger_back = index;
+ } else {
+ self.finger_back = self.finger;
+ // found nothing, exit
+ return None;
+ }
+ }
+ }
+
+ // let next_reject_back use the default implementation from the Searcher trait
+}
+
+impl<'a> DoubleEndedSearcher<'a> for CharSearcher<'a> {}
+
+/// Searches for chars that are equal to a given [`char`].
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find('o'), Some(4));
+/// ```
+impl<'a> Pattern<'a> for char {
+ type Searcher = CharSearcher<'a>;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> Self::Searcher {
+ let mut utf8_encoded = [0; 4];
+ let utf8_size = self.encode_utf8(&mut utf8_encoded).len();
+ CharSearcher {
+ haystack,
+ finger: 0,
+ finger_back: haystack.len(),
+ needle: self,
+ utf8_size,
+ utf8_encoded,
+ }
+ }
+
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ if (self as u32) < 128 {
+ haystack.as_bytes().contains(&(self as u8))
+ } else {
+ let mut buffer = [0u8; 4];
+ self.encode_utf8(&mut buffer).is_contained_in(haystack)
+ }
+ }
+
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ self.encode_utf8(&mut [0u8; 4]).is_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ self.encode_utf8(&mut [0u8; 4]).strip_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool
+ where
+ Self::Searcher: ReverseSearcher<'a>,
+ {
+ self.encode_utf8(&mut [0u8; 4]).is_suffix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str>
+ where
+ Self::Searcher: ReverseSearcher<'a>,
+ {
+ self.encode_utf8(&mut [0u8; 4]).strip_suffix_of(haystack)
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for a MultiCharEq wrapper
+/////////////////////////////////////////////////////////////////////////////
+
+#[doc(hidden)]
+trait MultiCharEq {
+ fn matches(&mut self, c: char) -> bool;
+}
+
+impl<F> MultiCharEq for F
+where
+ F: FnMut(char) -> bool,
+{
+ #[inline]
+ fn matches(&mut self, c: char) -> bool {
+ (*self)(c)
+ }
+}
+
+impl<const N: usize> MultiCharEq for [char; N] {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool {
+ self.iter().any(|&m| m == c)
+ }
+}
+
+impl<const N: usize> MultiCharEq for &[char; N] {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool {
+ self.iter().any(|&m| m == c)
+ }
+}
+
+impl MultiCharEq for &[char] {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool {
+ self.iter().any(|&m| m == c)
+ }
+}
+
+struct MultiCharEqPattern<C: MultiCharEq>(C);
+
+#[derive(Clone, Debug)]
+struct MultiCharEqSearcher<'a, C: MultiCharEq> {
+ char_eq: C,
+ haystack: &'a str,
+ char_indices: super::CharIndices<'a>,
+}
+
+impl<'a, C: MultiCharEq> Pattern<'a> for MultiCharEqPattern<C> {
+ type Searcher = MultiCharEqSearcher<'a, C>;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> MultiCharEqSearcher<'a, C> {
+ MultiCharEqSearcher { haystack, char_eq: self.0, char_indices: haystack.char_indices() }
+ }
+}
+
+unsafe impl<'a, C: MultiCharEq> Searcher<'a> for MultiCharEqSearcher<'a, C> {
+ #[inline]
+ fn haystack(&self) -> &'a str {
+ self.haystack
+ }
+
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ let s = &mut self.char_indices;
+ // Compare lengths of the internal byte slice iterator
+ // to find length of current char
+ let pre_len = s.iter.iter.len();
+ if let Some((i, c)) = s.next() {
+ let len = s.iter.iter.len();
+ let char_len = pre_len - len;
+ if self.char_eq.matches(c) {
+ return SearchStep::Match(i, i + char_len);
+ } else {
+ return SearchStep::Reject(i, i + char_len);
+ }
+ }
+ SearchStep::Done
+ }
+}
+
+unsafe impl<'a, C: MultiCharEq> ReverseSearcher<'a> for MultiCharEqSearcher<'a, C> {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ let s = &mut self.char_indices;
+ // Compare lengths of the internal byte slice iterator
+ // to find length of current char
+ let pre_len = s.iter.iter.len();
+ if let Some((i, c)) = s.next_back() {
+ let len = s.iter.iter.len();
+ let char_len = pre_len - len;
+ if self.char_eq.matches(c) {
+ return SearchStep::Match(i, i + char_len);
+ } else {
+ return SearchStep::Reject(i, i + char_len);
+ }
+ }
+ SearchStep::Done
+ }
+}
+
+impl<'a, C: MultiCharEq> DoubleEndedSearcher<'a> for MultiCharEqSearcher<'a, C> {}
+
+/////////////////////////////////////////////////////////////////////////////
+
+macro_rules! pattern_methods {
+ ($t:ty, $pmap:expr, $smap:expr) => {
+ type Searcher = $t;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> $t {
+ ($smap)(($pmap)(self).into_searcher(haystack))
+ }
+
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ ($pmap)(self).is_contained_in(haystack)
+ }
+
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ ($pmap)(self).is_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ ($pmap)(self).strip_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool
+ where
+ $t: ReverseSearcher<'a>,
+ {
+ ($pmap)(self).is_suffix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str>
+ where
+ $t: ReverseSearcher<'a>,
+ {
+ ($pmap)(self).strip_suffix_of(haystack)
+ }
+ };
+}
+
+macro_rules! searcher_methods {
+ (forward) => {
+ #[inline]
+ fn haystack(&self) -> &'a str {
+ self.0.haystack()
+ }
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ self.0.next()
+ }
+ #[inline]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ self.0.next_match()
+ }
+ #[inline]
+ fn next_reject(&mut self) -> Option<(usize, usize)> {
+ self.0.next_reject()
+ }
+ };
+ (reverse) => {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ self.0.next_back()
+ }
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ self.0.next_match_back()
+ }
+ #[inline]
+ fn next_reject_back(&mut self) -> Option<(usize, usize)> {
+ self.0.next_reject_back()
+ }
+ };
+}
+
+/// Associated type for `<[char; N] as Pattern<'a>>::Searcher`.
+#[derive(Clone, Debug)]
+pub struct CharArraySearcher<'a, const N: usize>(
+ <MultiCharEqPattern<[char; N]> as Pattern<'a>>::Searcher,
+);
+
+/// Associated type for `<&[char; N] as Pattern<'a>>::Searcher`.
+#[derive(Clone, Debug)]
+pub struct CharArrayRefSearcher<'a, 'b, const N: usize>(
+ <MultiCharEqPattern<&'b [char; N]> as Pattern<'a>>::Searcher,
+);
+
+/// Searches for chars that are equal to any of the [`char`]s in the array.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find(['l', 'l']), Some(2));
+/// assert_eq!("Hello world".find(['l', 'l']), Some(2));
+/// ```
+impl<'a, const N: usize> Pattern<'a> for [char; N] {
+ pattern_methods!(CharArraySearcher<'a, N>, MultiCharEqPattern, CharArraySearcher);
+}
+
+unsafe impl<'a, const N: usize> Searcher<'a> for CharArraySearcher<'a, N> {
+ searcher_methods!(forward);
+}
+
+unsafe impl<'a, const N: usize> ReverseSearcher<'a> for CharArraySearcher<'a, N> {
+ searcher_methods!(reverse);
+}
+
+/// Searches for chars that are equal to any of the [`char`]s in the array.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find(&['l', 'l']), Some(2));
+/// assert_eq!("Hello world".find(&['l', 'l']), Some(2));
+/// ```
+impl<'a, 'b, const N: usize> Pattern<'a> for &'b [char; N] {
+ pattern_methods!(CharArrayRefSearcher<'a, 'b, N>, MultiCharEqPattern, CharArrayRefSearcher);
+}
+
+unsafe impl<'a, 'b, const N: usize> Searcher<'a> for CharArrayRefSearcher<'a, 'b, N> {
+ searcher_methods!(forward);
+}
+
+unsafe impl<'a, 'b, const N: usize> ReverseSearcher<'a> for CharArrayRefSearcher<'a, 'b, N> {
+ searcher_methods!(reverse);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for &[char]
+/////////////////////////////////////////////////////////////////////////////
+
+// Todo: Change / Remove due to ambiguity in meaning.
+
+/// Associated type for `<&[char] as Pattern<'a>>::Searcher`.
+#[derive(Clone, Debug)]
+pub struct CharSliceSearcher<'a, 'b>(<MultiCharEqPattern<&'b [char]> as Pattern<'a>>::Searcher);
+
+unsafe impl<'a, 'b> Searcher<'a> for CharSliceSearcher<'a, 'b> {
+ searcher_methods!(forward);
+}
+
+unsafe impl<'a, 'b> ReverseSearcher<'a> for CharSliceSearcher<'a, 'b> {
+ searcher_methods!(reverse);
+}
+
+impl<'a, 'b> DoubleEndedSearcher<'a> for CharSliceSearcher<'a, 'b> {}
+
+/// Searches for chars that are equal to any of the [`char`]s in the slice.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find(&['l', 'l'] as &[_]), Some(2));
+/// assert_eq!("Hello world".find(&['l', 'l'][..]), Some(2));
+/// ```
+impl<'a, 'b> Pattern<'a> for &'b [char] {
+ pattern_methods!(CharSliceSearcher<'a, 'b>, MultiCharEqPattern, CharSliceSearcher);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for F: FnMut(char) -> bool
+/////////////////////////////////////////////////////////////////////////////
+
+/// Associated type for `<F as Pattern<'a>>::Searcher`.
+#[derive(Clone)]
+pub struct CharPredicateSearcher<'a, F>(<MultiCharEqPattern<F> as Pattern<'a>>::Searcher)
+where
+ F: FnMut(char) -> bool;
+
+impl<F> fmt::Debug for CharPredicateSearcher<'_, F>
+where
+ F: FnMut(char) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("CharPredicateSearcher")
+ .field("haystack", &self.0.haystack)
+ .field("char_indices", &self.0.char_indices)
+ .finish()
+ }
+}
+unsafe impl<'a, F> Searcher<'a> for CharPredicateSearcher<'a, F>
+where
+ F: FnMut(char) -> bool,
+{
+ searcher_methods!(forward);
+}
+
+unsafe impl<'a, F> ReverseSearcher<'a> for CharPredicateSearcher<'a, F>
+where
+ F: FnMut(char) -> bool,
+{
+ searcher_methods!(reverse);
+}
+
+impl<'a, F> DoubleEndedSearcher<'a> for CharPredicateSearcher<'a, F> where F: FnMut(char) -> bool {}
+
+/// Searches for [`char`]s that match the given predicate.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find(char::is_uppercase), Some(0));
+/// assert_eq!("Hello world".find(|c| "aeiou".contains(c)), Some(1));
+/// ```
+impl<'a, F> Pattern<'a> for F
+where
+ F: FnMut(char) -> bool,
+{
+ pattern_methods!(CharPredicateSearcher<'a, F>, MultiCharEqPattern, CharPredicateSearcher);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for &&str
+/////////////////////////////////////////////////////////////////////////////
+
+/// Delegates to the `&str` impl.
+impl<'a, 'b, 'c> Pattern<'a> for &'c &'b str {
+ pattern_methods!(StrSearcher<'a, 'b>, |&s| s, |s| s);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for &str
+/////////////////////////////////////////////////////////////////////////////
+
+/// Non-allocating substring search.
+///
+/// Will handle the pattern `""` as returning empty matches at each character
+/// boundary.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find("world"), Some(6));
+/// ```
+impl<'a, 'b> Pattern<'a> for &'b str {
+ type Searcher = StrSearcher<'a, 'b>;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> StrSearcher<'a, 'b> {
+ StrSearcher::new(haystack, self)
+ }
+
+ /// Checks whether the pattern matches at the front of the haystack.
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ haystack.as_bytes().starts_with(self.as_bytes())
+ }
+
+ /// Removes the pattern from the front of haystack, if it matches.
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ if self.is_prefix_of(haystack) {
+ // SAFETY: prefix was just verified to exist.
+ unsafe { Some(haystack.get_unchecked(self.as_bytes().len()..)) }
+ } else {
+ None
+ }
+ }
+
+ /// Checks whether the pattern matches at the back of the haystack.
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool {
+ haystack.as_bytes().ends_with(self.as_bytes())
+ }
+
+ /// Removes the pattern from the back of haystack, if it matches.
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
+ if self.is_suffix_of(haystack) {
+ let i = haystack.len() - self.as_bytes().len();
+ // SAFETY: suffix was just verified to exist.
+ unsafe { Some(haystack.get_unchecked(..i)) }
+ } else {
+ None
+ }
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Two Way substring searcher
+/////////////////////////////////////////////////////////////////////////////
+
+#[derive(Clone, Debug)]
+/// Associated type for `<&str as Pattern<'a>>::Searcher`.
+pub struct StrSearcher<'a, 'b> {
+ haystack: &'a str,
+ needle: &'b str,
+
+ searcher: StrSearcherImpl,
+}
+
+#[derive(Clone, Debug)]
+enum StrSearcherImpl {
+ Empty(EmptyNeedle),
+ TwoWay(TwoWaySearcher),
+}
+
+#[derive(Clone, Debug)]
+struct EmptyNeedle {
+ position: usize,
+ end: usize,
+ is_match_fw: bool,
+ is_match_bw: bool,
+ // Needed in case of an empty haystack, see #85462
+ is_finished: bool,
+}
+
+impl<'a, 'b> StrSearcher<'a, 'b> {
+ fn new(haystack: &'a str, needle: &'b str) -> StrSearcher<'a, 'b> {
+ if needle.is_empty() {
+ StrSearcher {
+ haystack,
+ needle,
+ searcher: StrSearcherImpl::Empty(EmptyNeedle {
+ position: 0,
+ end: haystack.len(),
+ is_match_fw: true,
+ is_match_bw: true,
+ is_finished: false,
+ }),
+ }
+ } else {
+ StrSearcher {
+ haystack,
+ needle,
+ searcher: StrSearcherImpl::TwoWay(TwoWaySearcher::new(
+ needle.as_bytes(),
+ haystack.len(),
+ )),
+ }
+ }
+ }
+}
+
+unsafe impl<'a, 'b> Searcher<'a> for StrSearcher<'a, 'b> {
+ #[inline]
+ fn haystack(&self) -> &'a str {
+ self.haystack
+ }
+
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ match self.searcher {
+ StrSearcherImpl::Empty(ref mut searcher) => {
+ if searcher.is_finished {
+ return SearchStep::Done;
+ }
+ // empty needle rejects every char and matches every empty string between them
+ let is_match = searcher.is_match_fw;
+ searcher.is_match_fw = !searcher.is_match_fw;
+ let pos = searcher.position;
+ match self.haystack[pos..].chars().next() {
+ _ if is_match => SearchStep::Match(pos, pos),
+ None => {
+ searcher.is_finished = true;
+ SearchStep::Done
+ }
+ Some(ch) => {
+ searcher.position += ch.len_utf8();
+ SearchStep::Reject(pos, searcher.position)
+ }
+ }
+ }
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ // TwoWaySearcher produces valid *Match* indices that split at char boundaries
+ // as long as it does correct matching and that haystack and needle are
+ // valid UTF-8
+ // *Rejects* from the algorithm can fall on any indices, but we will walk them
+ // manually to the next character boundary, so that they are utf-8 safe.
+ if searcher.position == self.haystack.len() {
+ return SearchStep::Done;
+ }
+ let is_long = searcher.memory == usize::MAX;
+ match searcher.next::<RejectAndMatch>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ is_long,
+ ) {
+ SearchStep::Reject(a, mut b) => {
+ // skip to next char boundary
+ while !self.haystack.is_char_boundary(b) {
+ b += 1;
+ }
+ searcher.position = cmp::max(b, searcher.position);
+ SearchStep::Reject(a, b)
+ }
+ otherwise => otherwise,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ match self.searcher {
+ StrSearcherImpl::Empty(..) => loop {
+ match self.next() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ SearchStep::Reject(..) => {}
+ }
+ },
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ let is_long = searcher.memory == usize::MAX;
+ // write out `true` and `false` cases to encourage the compiler
+ // to specialize the two cases separately.
+ if is_long {
+ searcher.next::<MatchOnly>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ true,
+ )
+ } else {
+ searcher.next::<MatchOnly>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ false,
+ )
+ }
+ }
+ }
+ }
+}
+
+unsafe impl<'a, 'b> ReverseSearcher<'a> for StrSearcher<'a, 'b> {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ match self.searcher {
+ StrSearcherImpl::Empty(ref mut searcher) => {
+ if searcher.is_finished {
+ return SearchStep::Done;
+ }
+ let is_match = searcher.is_match_bw;
+ searcher.is_match_bw = !searcher.is_match_bw;
+ let end = searcher.end;
+ match self.haystack[..end].chars().next_back() {
+ _ if is_match => SearchStep::Match(end, end),
+ None => {
+ searcher.is_finished = true;
+ SearchStep::Done
+ }
+ Some(ch) => {
+ searcher.end -= ch.len_utf8();
+ SearchStep::Reject(searcher.end, end)
+ }
+ }
+ }
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ if searcher.end == 0 {
+ return SearchStep::Done;
+ }
+ let is_long = searcher.memory == usize::MAX;
+ match searcher.next_back::<RejectAndMatch>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ is_long,
+ ) {
+ SearchStep::Reject(mut a, b) => {
+ // skip to next char boundary
+ while !self.haystack.is_char_boundary(a) {
+ a -= 1;
+ }
+ searcher.end = cmp::min(a, searcher.end);
+ SearchStep::Reject(a, b)
+ }
+ otherwise => otherwise,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ match self.searcher {
+ StrSearcherImpl::Empty(..) => loop {
+ match self.next_back() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ SearchStep::Reject(..) => {}
+ }
+ },
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ let is_long = searcher.memory == usize::MAX;
+ // write out `true` and `false`, like `next_match`
+ if is_long {
+ searcher.next_back::<MatchOnly>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ true,
+ )
+ } else {
+ searcher.next_back::<MatchOnly>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ false,
+ )
+ }
+ }
+ }
+ }
+}
+
+/// The internal state of the two-way substring search algorithm.
+#[derive(Clone, Debug)]
+struct TwoWaySearcher {
+ // constants
+ /// critical factorization index
+ crit_pos: usize,
+ /// critical factorization index for reversed needle
+ crit_pos_back: usize,
+ period: usize,
+ /// `byteset` is an extension (not part of the two way algorithm);
+ /// it's a 64-bit "fingerprint" where each set bit `j` corresponds
+ /// to a (byte & 63) == j present in the needle.
+ byteset: u64,
+
+ // variables
+ position: usize,
+ end: usize,
+ /// index into needle before which we have already matched
+ memory: usize,
+ /// index into needle after which we have already matched
+ memory_back: usize,
+}
+
+/*
+ This is the Two-Way search algorithm, which was introduced in the paper:
+ Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
+
+ Here's some background information.
+
+ A *word* is a string of symbols. The *length* of a word should be a familiar
+ notion, and here we denote it for any word x by |x|.
+ (We also allow for the possibility of the *empty word*, a word of length zero).
+
+ If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
+ *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
+ For example, both 1 and 2 are periods for the string "aa". As another example,
+ the only period of the string "abcd" is 4.
+
+ We denote by period(x) the *smallest* period of x (provided that x is non-empty).
+ This is always well-defined since every non-empty word x has at least one period,
+ |x|. We sometimes call this *the period* of x.
+
+ If u, v and x are words such that x = uv, where uv is the concatenation of u and
+ v, then we say that (u, v) is a *factorization* of x.
+
+ Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
+ that both of the following hold
+
+ - either w is a suffix of u or u is a suffix of w
+ - either w is a prefix of v or v is a prefix of w
+
+ then w is said to be a *repetition* for the factorization (u, v).
+
+ Just to unpack this, there are four possibilities here. Let w = "abc". Then we
+ might have:
+
+ - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
+ - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
+ - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
+ - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
+
+ Note that the word vu is a repetition for any factorization (u,v) of x = uv,
+ so every factorization has at least one repetition.
+
+ If x is a string and (u, v) is a factorization for x, then a *local period* for
+ (u, v) is an integer r such that there is some word w such that |w| = r and w is
+ a repetition for (u, v).
+
+ We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
+ call this *the local period* of (u, v). Provided that x = uv is non-empty, this
+ is well-defined (because each non-empty word has at least one factorization, as
+ noted above).
+
+ It can be proven that the following is an equivalent definition of a local period
+ for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
+ all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
+ defined. (i.e., i > 0 and i + r < |x|).
+
+ Using the above reformulation, it is easy to prove that
+
+ 1 <= local_period(u, v) <= period(uv)
+
+ A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
+ *critical factorization*.
+
+ The algorithm hinges on the following theorem, which is stated without proof:
+
+ **Critical Factorization Theorem** Any word x has at least one critical
+ factorization (u, v) such that |u| < period(x).
+
+ The purpose of maximal_suffix is to find such a critical factorization.
+
+ If the period is short, compute another factorization x = u' v' to use
+ for reverse search, chosen instead so that |v'| < period(x).
+
+*/
+impl TwoWaySearcher {
+ fn new(needle: &[u8], end: usize) -> TwoWaySearcher {
+ let (crit_pos_false, period_false) = TwoWaySearcher::maximal_suffix(needle, false);
+ let (crit_pos_true, period_true) = TwoWaySearcher::maximal_suffix(needle, true);
+
+ let (crit_pos, period) = if crit_pos_false > crit_pos_true {
+ (crit_pos_false, period_false)
+ } else {
+ (crit_pos_true, period_true)
+ };
+
+ // A particularly readable explanation of what's going on here can be found
+ // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
+ // see the code for "Algorithm CP" on p. 323.
+ //
+ // What's going on is we have some critical factorization (u, v) of the
+ // needle, and we want to determine whether u is a suffix of
+ // &v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
+ // "Algorithm CP2", which is optimized for when the period of the needle
+ // is large.
+ if needle[..crit_pos] == needle[period..period + crit_pos] {
+ // short period case -- the period is exact
+ // compute a separate critical factorization for the reversed needle
+ // x = u' v' where |v'| < period(x).
+ //
+ // This is sped up by the period being known already.
+ // Note that a case like x = "acba" may be factored exactly forwards
+ // (crit_pos = 1, period = 3) while being factored with approximate
+ // period in reverse (crit_pos = 2, period = 2). We use the given
+ // reverse factorization but keep the exact period.
+ let crit_pos_back = needle.len()
+ - cmp::max(
+ TwoWaySearcher::reverse_maximal_suffix(needle, period, false),
+ TwoWaySearcher::reverse_maximal_suffix(needle, period, true),
+ );
+
+ TwoWaySearcher {
+ crit_pos,
+ crit_pos_back,
+ period,
+ byteset: Self::byteset_create(&needle[..period]),
+
+ position: 0,
+ end,
+ memory: 0,
+ memory_back: needle.len(),
+ }
+ } else {
+ // long period case -- we have an approximation to the actual period,
+ // and don't use memorization.
+ //
+ // Approximate the period by lower bound max(|u|, |v|) + 1.
+ // The critical factorization is efficient to use for both forward and
+ // reverse search.
+
+ TwoWaySearcher {
+ crit_pos,
+ crit_pos_back: crit_pos,
+ period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
+ byteset: Self::byteset_create(needle),
+
+ position: 0,
+ end,
+ memory: usize::MAX, // Dummy value to signify that the period is long
+ memory_back: usize::MAX,
+ }
+ }
+ }
+
+ #[inline]
+ fn byteset_create(bytes: &[u8]) -> u64 {
+ bytes.iter().fold(0, |a, &b| (1 << (b & 0x3f)) | a)
+ }
+
+ #[inline]
+ fn byteset_contains(&self, byte: u8) -> bool {
+ (self.byteset >> ((byte & 0x3f) as usize)) & 1 != 0
+ }
+
+ // One of the main ideas of Two-Way is that we factorize the needle into
+ // two halves, (u, v), and begin trying to find v in the haystack by scanning
+ // left to right. If v matches, we try to match u by scanning right to left.
+ // How far we can jump when we encounter a mismatch is all based on the fact
+ // that (u, v) is a critical factorization for the needle.
+ #[inline]
+ fn next<S>(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> S::Output
+ where
+ S: TwoWayStrategy,
+ {
+ // `next()` uses `self.position` as its cursor
+ let old_pos = self.position;
+ let needle_last = needle.len() - 1;
+ 'search: loop {
+ // Check that we have room to search in
+ // position + needle_last can not overflow if we assume slices
+ // are bounded by isize's range.
+ let tail_byte = match haystack.get(self.position + needle_last) {
+ Some(&b) => b,
+ None => {
+ self.position = haystack.len();
+ return S::rejecting(old_pos, self.position);
+ }
+ };
+
+ if S::use_early_reject() && old_pos != self.position {
+ return S::rejecting(old_pos, self.position);
+ }
+
+ // Quickly skip by large portions unrelated to our substring
+ if !self.byteset_contains(tail_byte) {
+ self.position += needle.len();
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+
+ // See if the right part of the needle matches
+ let start =
+ if long_period { self.crit_pos } else { cmp::max(self.crit_pos, self.memory) };
+ for i in start..needle.len() {
+ if needle[i] != haystack[self.position + i] {
+ self.position += i - self.crit_pos + 1;
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+ }
+
+ // See if the left part of the needle matches
+ let start = if long_period { 0 } else { self.memory };
+ for i in (start..self.crit_pos).rev() {
+ if needle[i] != haystack[self.position + i] {
+ self.position += self.period;
+ if !long_period {
+ self.memory = needle.len() - self.period;
+ }
+ continue 'search;
+ }
+ }
+
+ // We have found a match!
+ let match_pos = self.position;
+
+ // Note: add self.period instead of needle.len() to have overlapping matches
+ self.position += needle.len();
+ if !long_period {
+ self.memory = 0; // set to needle.len() - self.period for overlapping matches
+ }
+
+ return S::matching(match_pos, match_pos + needle.len());
+ }
+ }
+
+ // Follows the ideas in `next()`.
+ //
+ // The definitions are symmetrical, with period(x) = period(reverse(x))
+ // and local_period(u, v) = local_period(reverse(v), reverse(u)), so if (u, v)
+ // is a critical factorization, so is (reverse(v), reverse(u)).
+ //
+ // For the reverse case we have computed a critical factorization x = u' v'
+ // (field `crit_pos_back`). We need |u| < period(x) for the forward case and
+ // thus |v'| < period(x) for the reverse.
+ //
+ // To search in reverse through the haystack, we search forward through
+ // a reversed haystack with a reversed needle, matching first u' and then v'.
+ #[inline]
+ fn next_back<S>(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> S::Output
+ where
+ S: TwoWayStrategy,
+ {
+ // `next_back()` uses `self.end` as its cursor -- so that `next()` and `next_back()`
+ // are independent.
+ let old_end = self.end;
+ 'search: loop {
+ // Check that we have room to search in
+ // end - needle.len() will wrap around when there is no more room,
+ // but due to slice length limits it can never wrap all the way back
+ // into the length of haystack.
+ let front_byte = match haystack.get(self.end.wrapping_sub(needle.len())) {
+ Some(&b) => b,
+ None => {
+ self.end = 0;
+ return S::rejecting(0, old_end);
+ }
+ };
+
+ if S::use_early_reject() && old_end != self.end {
+ return S::rejecting(self.end, old_end);
+ }
+
+ // Quickly skip by large portions unrelated to our substring
+ if !self.byteset_contains(front_byte) {
+ self.end -= needle.len();
+ if !long_period {
+ self.memory_back = needle.len();
+ }
+ continue 'search;
+ }
+
+ // See if the left part of the needle matches
+ let crit = if long_period {
+ self.crit_pos_back
+ } else {
+ cmp::min(self.crit_pos_back, self.memory_back)
+ };
+ for i in (0..crit).rev() {
+ if needle[i] != haystack[self.end - needle.len() + i] {
+ self.end -= self.crit_pos_back - i;
+ if !long_period {
+ self.memory_back = needle.len();
+ }
+ continue 'search;
+ }
+ }
+
+ // See if the right part of the needle matches
+ let needle_end = if long_period { needle.len() } else { self.memory_back };
+ for i in self.crit_pos_back..needle_end {
+ if needle[i] != haystack[self.end - needle.len() + i] {
+ self.end -= self.period;
+ if !long_period {
+ self.memory_back = self.period;
+ }
+ continue 'search;
+ }
+ }
+
+ // We have found a match!
+ let match_pos = self.end - needle.len();
+ // Note: sub self.period instead of needle.len() to have overlapping matches
+ self.end -= needle.len();
+ if !long_period {
+ self.memory_back = needle.len();
+ }
+
+ return S::matching(match_pos, match_pos + needle.len());
+ }
+ }
+
+ // Compute the maximal suffix of `arr`.
+ //
+ // The maximal suffix is a possible critical factorization (u, v) of `arr`.
+ //
+ // Returns (`i`, `p`) where `i` is the starting index of v and `p` is the
+ // period of v.
+ //
+ // `order_greater` determines if lexical order is `<` or `>`. Both
+ // orders must be computed -- the ordering with the largest `i` gives
+ // a critical factorization.
+ //
+ // For long period cases, the resulting period is not exact (it is too short).
+ #[inline]
+ fn maximal_suffix(arr: &[u8], order_greater: bool) -> (usize, usize) {
+ let mut left = 0; // Corresponds to i in the paper
+ let mut right = 1; // Corresponds to j in the paper
+ let mut offset = 0; // Corresponds to k in the paper, but starting at 0
+ // to match 0-based indexing.
+ let mut period = 1; // Corresponds to p in the paper
+
+ while let Some(&a) = arr.get(right + offset) {
+ // `left` will be inbounds when `right` is.
+ let b = arr[left + offset];
+ if (a < b && !order_greater) || (a > b && order_greater) {
+ // Suffix is smaller, period is entire prefix so far.
+ right += offset + 1;
+ offset = 0;
+ period = right - left;
+ } else if a == b {
+ // Advance through repetition of the current period.
+ if offset + 1 == period {
+ right += offset + 1;
+ offset = 0;
+ } else {
+ offset += 1;
+ }
+ } else {
+ // Suffix is larger, start over from current location.
+ left = right;
+ right += 1;
+ offset = 0;
+ period = 1;
+ }
+ }
+ (left, period)
+ }
+
+ // Compute the maximal suffix of the reverse of `arr`.
+ //
+ // The maximal suffix is a possible critical factorization (u', v') of `arr`.
+ //
+ // Returns `i` where `i` is the starting index of v', from the back;
+ // returns immediately when a period of `known_period` is reached.
+ //
+ // `order_greater` determines if lexical order is `<` or `>`. Both
+ // orders must be computed -- the ordering with the largest `i` gives
+ // a critical factorization.
+ //
+ // For long period cases, the resulting period is not exact (it is too short).
+ fn reverse_maximal_suffix(arr: &[u8], known_period: usize, order_greater: bool) -> usize {
+ let mut left = 0; // Corresponds to i in the paper
+ let mut right = 1; // Corresponds to j in the paper
+ let mut offset = 0; // Corresponds to k in the paper, but starting at 0
+ // to match 0-based indexing.
+ let mut period = 1; // Corresponds to p in the paper
+ let n = arr.len();
+
+ while right + offset < n {
+ let a = arr[n - (1 + right + offset)];
+ let b = arr[n - (1 + left + offset)];
+ if (a < b && !order_greater) || (a > b && order_greater) {
+ // Suffix is smaller, period is entire prefix so far.
+ right += offset + 1;
+ offset = 0;
+ period = right - left;
+ } else if a == b {
+ // Advance through repetition of the current period.
+ if offset + 1 == period {
+ right += offset + 1;
+ offset = 0;
+ } else {
+ offset += 1;
+ }
+ } else {
+ // Suffix is larger, start over from current location.
+ left = right;
+ right += 1;
+ offset = 0;
+ period = 1;
+ }
+ if period == known_period {
+ break;
+ }
+ }
+ debug_assert!(period <= known_period);
+ left
+ }
+}
+
+// TwoWayStrategy allows the algorithm to either skip non-matches as quickly
+// as possible, or to work in a mode where it emits Rejects relatively quickly.
+trait TwoWayStrategy {
+ type Output;
+ fn use_early_reject() -> bool;
+ fn rejecting(a: usize, b: usize) -> Self::Output;
+ fn matching(a: usize, b: usize) -> Self::Output;
+}
+
+/// Skip to match intervals as quickly as possible
+enum MatchOnly {}
+
+impl TwoWayStrategy for MatchOnly {
+ type Output = Option<(usize, usize)>;
+
+ #[inline]
+ fn use_early_reject() -> bool {
+ false
+ }
+ #[inline]
+ fn rejecting(_a: usize, _b: usize) -> Self::Output {
+ None
+ }
+ #[inline]
+ fn matching(a: usize, b: usize) -> Self::Output {
+ Some((a, b))
+ }
+}
+
+/// Emit Rejects regularly
+enum RejectAndMatch {}
+
+impl TwoWayStrategy for RejectAndMatch {
+ type Output = SearchStep;
+
+ #[inline]
+ fn use_early_reject() -> bool {
+ true
+ }
+ #[inline]
+ fn rejecting(a: usize, b: usize) -> Self::Output {
+ SearchStep::Reject(a, b)
+ }
+ #[inline]
+ fn matching(a: usize, b: usize) -> Self::Output {
+ SearchStep::Match(a, b)
+ }
+}
diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs
new file mode 100644
index 000000000..e9649fc91
--- /dev/null
+++ b/library/core/src/str/traits.rs
@@ -0,0 +1,604 @@
+//! Trait implementations for `str`.
+
+use crate::cmp::Ordering;
+use crate::ops;
+use crate::ptr;
+use crate::slice::SliceIndex;
+
+use super::ParseBoolError;
+
+/// Implements ordering of strings.
+///
+/// Strings are ordered [lexicographically](Ord#lexicographical-comparison) by their byte values. This orders Unicode code
+/// points based on their positions in the code charts. This is not necessarily the same as
+/// "alphabetical" order, which varies by language and locale. Sorting strings according to
+/// culturally-accepted standards requires locale-specific data that is outside the scope of
+/// the `str` type.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for str {
+ #[inline]
+ fn cmp(&self, other: &str) -> Ordering {
+ self.as_bytes().cmp(other.as_bytes())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq for str {
+ #[inline]
+ fn eq(&self, other: &str) -> bool {
+ self.as_bytes() == other.as_bytes()
+ }
+ #[inline]
+ fn ne(&self, other: &str) -> bool {
+ !(*self).eq(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Eq for str {}
+
+/// Implements comparison operations on strings.
+///
+/// Strings are compared [lexicographically](Ord#lexicographical-comparison) by their byte values. This compares Unicode code
+/// points based on their positions in the code charts. This is not necessarily the same as
+/// "alphabetical" order, which varies by language and locale. Comparing strings according to
+/// culturally-accepted standards requires locale-specific data that is outside the scope of
+/// the `str` type.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for str {
+ #[inline]
+ fn partial_cmp(&self, other: &str) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+impl<I> const ops::Index<I> for str
+where
+ I: ~const SliceIndex<str>,
+{
+ type Output = I::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &I::Output {
+ index.index(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+impl<I> const ops::IndexMut<I> for str
+where
+ I: ~const SliceIndex<str>,
+{
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut I::Output {
+ index.index_mut(self)
+ }
+}
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+const fn str_index_overflow_fail() -> ! {
+ panic!("attempted to index str up to maximum usize");
+}
+
+/// Implements substring slicing with syntax `&self[..]` or `&mut self[..]`.
+///
+/// Returns a slice of the whole string, i.e., returns `&self` or `&mut
+/// self`. Equivalent to `&self[0 .. len]` or `&mut self[0 .. len]`. Unlike
+/// other indexing operations, this can never panic.
+///
+/// This operation is *O*(1).
+///
+/// Prior to 1.20.0, these indexing operations were still supported by
+/// direct implementation of `Index` and `IndexMut`.
+///
+/// Equivalent to `&self[0 .. len]` or `&mut self[0 .. len]`.
+#[stable(feature = "str_checked_slicing", since = "1.20.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl const SliceIndex<str> for ops::RangeFull {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ Some(slice)
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ Some(slice)
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ slice
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ slice
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ slice
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ slice
+ }
+}
+
+/// Implements substring slicing with syntax `&self[begin .. end]` or `&mut
+/// self[begin .. end]`.
+///
+/// Returns a slice of the given string from the byte range
+/// [`begin`, `end`).
+///
+/// This operation is *O*(1).
+///
+/// Prior to 1.20.0, these indexing operations were still supported by
+/// direct implementation of `Index` and `IndexMut`.
+///
+/// # Panics
+///
+/// Panics if `begin` or `end` does not point to the starting byte offset of
+/// a character (as defined by `is_char_boundary`), if `begin > end`, or if
+/// `end > len`.
+///
+/// # Examples
+///
+/// ```
+/// let s = "Löwe 老虎 Léopard";
+/// assert_eq!(&s[0 .. 1], "L");
+///
+/// assert_eq!(&s[1 .. 9], "öwe 老");
+///
+/// // these will panic:
+/// // byte 2 lies within `ö`:
+/// // &s[2 ..3];
+///
+/// // byte 8 lies within `老`
+/// // &s[1 .. 8];
+///
+/// // byte 100 is outside the string
+/// // &s[3 .. 100];
+/// ```
+#[stable(feature = "str_checked_slicing", since = "1.20.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl const SliceIndex<str> for ops::Range<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if self.start <= self.end
+ && slice.is_char_boundary(self.start)
+ && slice.is_char_boundary(self.end)
+ {
+ // SAFETY: just checked that `start` and `end` are on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ // We also checked char boundaries, so this is valid UTF-8.
+ Some(unsafe { &*self.get_unchecked(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if self.start <= self.end
+ && slice.is_char_boundary(self.start)
+ && slice.is_char_boundary(self.end)
+ {
+ // SAFETY: just checked that `start` and `end` are on a char boundary.
+ // We know the pointer is unique because we got it from `slice`.
+ Some(unsafe { &mut *self.get_unchecked_mut(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ let slice = slice as *const [u8];
+ // SAFETY: the caller guarantees that `self` is in bounds of `slice`
+ // which satisfies all the conditions for `add`.
+ let ptr = unsafe { slice.as_ptr().add(self.start) };
+ let len = self.end - self.start;
+ ptr::slice_from_raw_parts(ptr, len) as *const str
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ let slice = slice as *mut [u8];
+ // SAFETY: see comments for `get_unchecked`.
+ let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
+ let len = self.end - self.start;
+ ptr::slice_from_raw_parts_mut(ptr, len) as *mut str
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ let (start, end) = (self.start, self.end);
+ match self.get(slice) {
+ Some(s) => s,
+ None => super::slice_error_fail(slice, start, end),
+ }
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ // is_char_boundary checks that the index is in [0, .len()]
+ // cannot reuse `get` as above, because of NLL trouble
+ if self.start <= self.end
+ && slice.is_char_boundary(self.start)
+ && slice.is_char_boundary(self.end)
+ {
+ // SAFETY: just checked that `start` and `end` are on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ } else {
+ super::slice_error_fail(slice, self.start, self.end)
+ }
+ }
+}
+
+/// Implements substring slicing with syntax `&self[.. end]` or `&mut
+/// self[.. end]`.
+///
+/// Returns a slice of the given string from the byte range \[0, `end`).
+/// Equivalent to `&self[0 .. end]` or `&mut self[0 .. end]`.
+///
+/// This operation is *O*(1).
+///
+/// Prior to 1.20.0, these indexing operations were still supported by
+/// direct implementation of `Index` and `IndexMut`.
+///
+/// # Panics
+///
+/// Panics if `end` does not point to the starting byte offset of a
+/// character (as defined by `is_char_boundary`), or if `end > len`.
+#[stable(feature = "str_checked_slicing", since = "1.20.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl const SliceIndex<str> for ops::RangeTo<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if slice.is_char_boundary(self.end) {
+ // SAFETY: just checked that `end` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ Some(unsafe { &*self.get_unchecked(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if slice.is_char_boundary(self.end) {
+ // SAFETY: just checked that `end` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ Some(unsafe { &mut *self.get_unchecked_mut(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ let slice = slice as *const [u8];
+ let ptr = slice.as_ptr();
+ ptr::slice_from_raw_parts(ptr, self.end) as *const str
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ let slice = slice as *mut [u8];
+ let ptr = slice.as_mut_ptr();
+ ptr::slice_from_raw_parts_mut(ptr, self.end) as *mut str
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ let end = self.end;
+ match self.get(slice) {
+ Some(s) => s,
+ None => super::slice_error_fail(slice, 0, end),
+ }
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ if slice.is_char_boundary(self.end) {
+ // SAFETY: just checked that `end` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ } else {
+ super::slice_error_fail(slice, 0, self.end)
+ }
+ }
+}
+
+/// Implements substring slicing with syntax `&self[begin ..]` or `&mut
+/// self[begin ..]`.
+///
+/// Returns a slice of the given string from the byte range \[`begin`, `len`).
+/// Equivalent to `&self[begin .. len]` or `&mut self[begin .. len]`.
+///
+/// This operation is *O*(1).
+///
+/// Prior to 1.20.0, these indexing operations were still supported by
+/// direct implementation of `Index` and `IndexMut`.
+///
+/// # Panics
+///
+/// Panics if `begin` does not point to the starting byte offset of
+/// a character (as defined by `is_char_boundary`), or if `begin > len`.
+#[stable(feature = "str_checked_slicing", since = "1.20.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl const SliceIndex<str> for ops::RangeFrom<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if slice.is_char_boundary(self.start) {
+ // SAFETY: just checked that `start` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ Some(unsafe { &*self.get_unchecked(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if slice.is_char_boundary(self.start) {
+ // SAFETY: just checked that `start` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ Some(unsafe { &mut *self.get_unchecked_mut(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ let slice = slice as *const [u8];
+ // SAFETY: the caller guarantees that `self` is in bounds of `slice`
+ // which satisfies all the conditions for `add`.
+ let ptr = unsafe { slice.as_ptr().add(self.start) };
+ let len = slice.len() - self.start;
+ ptr::slice_from_raw_parts(ptr, len) as *const str
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ let slice = slice as *mut [u8];
+ // SAFETY: identical to `get_unchecked`.
+ let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
+ let len = slice.len() - self.start;
+ ptr::slice_from_raw_parts_mut(ptr, len) as *mut str
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ let (start, end) = (self.start, slice.len());
+ match self.get(slice) {
+ Some(s) => s,
+ None => super::slice_error_fail(slice, start, end),
+ }
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ if slice.is_char_boundary(self.start) {
+ // SAFETY: just checked that `start` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ } else {
+ super::slice_error_fail(slice, self.start, slice.len())
+ }
+ }
+}
+
+/// Implements substring slicing with syntax `&self[begin ..= end]` or `&mut
+/// self[begin ..= end]`.
+///
+/// Returns a slice of the given string from the byte range
+/// [`begin`, `end`]. Equivalent to `&self [begin .. end + 1]` or `&mut
+/// self[begin .. end + 1]`, except if `end` has the maximum value for
+/// `usize`.
+///
+/// This operation is *O*(1).
+///
+/// # Panics
+///
+/// Panics if `begin` does not point to the starting byte offset of
+/// a character (as defined by `is_char_boundary`), if `end` does not point
+/// to the ending byte offset of a character (`end + 1` is either a starting
+/// byte offset or equal to `len`), if `begin > end`, or if `end >= len`.
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl const SliceIndex<str> for ops::RangeInclusive<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if *self.end() == usize::MAX { None } else { self.into_slice_range().get(slice) }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if *self.end() == usize::MAX { None } else { self.into_slice_range().get_mut(slice) }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+ unsafe { self.into_slice_range().get_unchecked(slice) }
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+ unsafe { self.into_slice_range().get_unchecked_mut(slice) }
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ if *self.end() == usize::MAX {
+ str_index_overflow_fail();
+ }
+ self.into_slice_range().index(slice)
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ if *self.end() == usize::MAX {
+ str_index_overflow_fail();
+ }
+ self.into_slice_range().index_mut(slice)
+ }
+}
+
+/// Implements substring slicing with syntax `&self[..= end]` or `&mut
+/// self[..= end]`.
+///
+/// Returns a slice of the given string from the byte range \[0, `end`\].
+/// Equivalent to `&self [0 .. end + 1]`, except if `end` has the maximum
+/// value for `usize`.
+///
+/// This operation is *O*(1).
+///
+/// # Panics
+///
+/// Panics if `end` does not point to the ending byte offset of a character
+/// (`end + 1` is either a starting byte offset as defined by
+/// `is_char_boundary`, or equal to `len`), or if `end >= len`.
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
+unsafe impl const SliceIndex<str> for ops::RangeToInclusive<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if self.end == usize::MAX { None } else { (..self.end + 1).get(slice) }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if self.end == usize::MAX { None } else { (..self.end + 1).get_mut(slice) }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+ unsafe { (..self.end + 1).get_unchecked(slice) }
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (..self.end + 1).get_unchecked_mut(slice) }
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ if self.end == usize::MAX {
+ str_index_overflow_fail();
+ }
+ (..self.end + 1).index(slice)
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ if self.end == usize::MAX {
+ str_index_overflow_fail();
+ }
+ (..self.end + 1).index_mut(slice)
+ }
+}
+
+/// Parse a value from a string
+///
+/// `FromStr`'s [`from_str`] method is often used implicitly, through
+/// [`str`]'s [`parse`] method. See [`parse`]'s documentation for examples.
+///
+/// [`from_str`]: FromStr::from_str
+/// [`parse`]: str::parse
+///
+/// `FromStr` does not have a lifetime parameter, and so you can only parse types
+/// that do not contain a lifetime parameter themselves. In other words, you can
+/// parse an `i32` with `FromStr`, but not a `&i32`. You can parse a struct that
+/// contains an `i32`, but not one that contains an `&i32`.
+///
+/// # Examples
+///
+/// Basic implementation of `FromStr` on an example `Point` type:
+///
+/// ```
+/// use std::str::FromStr;
+/// use std::num::ParseIntError;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32
+/// }
+///
+/// impl FromStr for Point {
+/// type Err = ParseIntError;
+///
+/// fn from_str(s: &str) -> Result<Self, Self::Err> {
+/// let (x, y) = s
+/// .strip_prefix('(')
+/// .and_then(|s| s.strip_suffix(')'))
+/// .and_then(|s| s.split_once(','))
+/// .unwrap();
+///
+/// let x_fromstr = x.parse::<i32>()?;
+/// let y_fromstr = y.parse::<i32>()?;
+///
+/// Ok(Point { x: x_fromstr, y: y_fromstr })
+/// }
+/// }
+///
+/// let expected = Ok(Point { x: 1, y: 2 });
+/// // Explicit call
+/// assert_eq!(Point::from_str("(1,2)"), expected);
+/// // Implicit calls, through parse
+/// assert_eq!("(1,2)".parse(), expected);
+/// assert_eq!("(1,2)".parse::<Point>(), expected);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait FromStr: Sized {
+ /// The associated error which can be returned from parsing.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Err;
+
+ /// Parses a string `s` to return a value of this type.
+ ///
+ /// If parsing succeeds, return the value inside [`Ok`], otherwise
+ /// when the string is ill-formatted return an error specific to the
+ /// inside [`Err`]. The error type is specific to the implementation of the trait.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage with [`i32`], a type that implements `FromStr`:
+ ///
+ /// ```
+ /// use std::str::FromStr;
+ ///
+ /// let s = "5";
+ /// let x = i32::from_str(s).unwrap();
+ ///
+ /// assert_eq!(5, x);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn from_str(s: &str) -> Result<Self, Self::Err>;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for bool {
+ type Err = ParseBoolError;
+
+ /// Parse a `bool` from a string.
+ ///
+ /// Yields a `Result<bool, ParseBoolError>`, because `s` may or may not
+ /// actually be parseable.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::str::FromStr;
+ ///
+ /// assert_eq!(FromStr::from_str("true"), Ok(true));
+ /// assert_eq!(FromStr::from_str("false"), Ok(false));
+ /// assert!(<bool as FromStr>::from_str("not even a boolean").is_err());
+ /// ```
+ ///
+ /// Note, in many cases, the `.parse()` method on `str` is more proper.
+ ///
+ /// ```
+ /// assert_eq!("true".parse(), Ok(true));
+ /// assert_eq!("false".parse(), Ok(false));
+ /// assert!("not even a boolean".parse::<bool>().is_err());
+ /// ```
+ #[inline]
+ fn from_str(s: &str) -> Result<bool, ParseBoolError> {
+ match s {
+ "true" => Ok(true),
+ "false" => Ok(false),
+ _ => Err(ParseBoolError),
+ }
+ }
+}
diff --git a/library/core/src/str/validations.rs b/library/core/src/str/validations.rs
new file mode 100644
index 000000000..04bc66523
--- /dev/null
+++ b/library/core/src/str/validations.rs
@@ -0,0 +1,274 @@
+//! Operations related to UTF-8 validation.
+
+use crate::mem;
+
+use super::Utf8Error;
+
+/// Returns the initial codepoint accumulator for the first byte.
+/// The first byte is special, only want bottom 5 bits for width 2, 4 bits
+/// for width 3, and 3 bits for width 4.
+#[inline]
+const fn utf8_first_byte(byte: u8, width: u32) -> u32 {
+ (byte & (0x7F >> width)) as u32
+}
+
+/// Returns the value of `ch` updated with continuation byte `byte`.
+#[inline]
+const fn utf8_acc_cont_byte(ch: u32, byte: u8) -> u32 {
+ (ch << 6) | (byte & CONT_MASK) as u32
+}
+
+/// Checks whether the byte is a UTF-8 continuation byte (i.e., starts with the
+/// bits `10`).
+#[inline]
+pub(super) const fn utf8_is_cont_byte(byte: u8) -> bool {
+ (byte as i8) < -64
+}
+
+/// Reads the next code point out of a byte iterator (assuming a
+/// UTF-8-like encoding).
+///
+/// # Safety
+///
+/// `bytes` must produce a valid UTF-8-like (UTF-8 or WTF-8) string
+#[unstable(feature = "str_internals", issue = "none")]
+#[inline]
+pub unsafe fn next_code_point<'a, I: Iterator<Item = &'a u8>>(bytes: &mut I) -> Option<u32> {
+ // Decode UTF-8
+ let x = *bytes.next()?;
+ if x < 128 {
+ return Some(x as u32);
+ }
+
+ // Multibyte case follows
+ // Decode from a byte combination out of: [[[x y] z] w]
+ // NOTE: Performance is sensitive to the exact formulation here
+ let init = utf8_first_byte(x, 2);
+ // SAFETY: `bytes` produces an UTF-8-like string,
+ // so the iterator must produce a value here.
+ let y = unsafe { *bytes.next().unwrap_unchecked() };
+ let mut ch = utf8_acc_cont_byte(init, y);
+ if x >= 0xE0 {
+ // [[x y z] w] case
+ // 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
+ // SAFETY: `bytes` produces an UTF-8-like string,
+ // so the iterator must produce a value here.
+ let z = unsafe { *bytes.next().unwrap_unchecked() };
+ let y_z = utf8_acc_cont_byte((y & CONT_MASK) as u32, z);
+ ch = init << 12 | y_z;
+ if x >= 0xF0 {
+ // [x y z w] case
+ // use only the lower 3 bits of `init`
+ // SAFETY: `bytes` produces an UTF-8-like string,
+ // so the iterator must produce a value here.
+ let w = unsafe { *bytes.next().unwrap_unchecked() };
+ ch = (init & 7) << 18 | utf8_acc_cont_byte(y_z, w);
+ }
+ }
+
+ Some(ch)
+}
+
+/// Reads the last code point out of a byte iterator (assuming a
+/// UTF-8-like encoding).
+///
+/// # Safety
+///
+/// `bytes` must produce a valid UTF-8-like (UTF-8 or WTF-8) string
+#[inline]
+pub(super) unsafe fn next_code_point_reverse<'a, I>(bytes: &mut I) -> Option<u32>
+where
+ I: DoubleEndedIterator<Item = &'a u8>,
+{
+ // Decode UTF-8
+ let w = match *bytes.next_back()? {
+ next_byte if next_byte < 128 => return Some(next_byte as u32),
+ back_byte => back_byte,
+ };
+
+ // Multibyte case follows
+ // Decode from a byte combination out of: [x [y [z w]]]
+ let mut ch;
+ // SAFETY: `bytes` produces an UTF-8-like string,
+ // so the iterator must produce a value here.
+ let z = unsafe { *bytes.next_back().unwrap_unchecked() };
+ ch = utf8_first_byte(z, 2);
+ if utf8_is_cont_byte(z) {
+ // SAFETY: `bytes` produces an UTF-8-like string,
+ // so the iterator must produce a value here.
+ let y = unsafe { *bytes.next_back().unwrap_unchecked() };
+ ch = utf8_first_byte(y, 3);
+ if utf8_is_cont_byte(y) {
+ // SAFETY: `bytes` produces an UTF-8-like string,
+ // so the iterator must produce a value here.
+ let x = unsafe { *bytes.next_back().unwrap_unchecked() };
+ ch = utf8_first_byte(x, 4);
+ ch = utf8_acc_cont_byte(ch, y);
+ }
+ ch = utf8_acc_cont_byte(ch, z);
+ }
+ ch = utf8_acc_cont_byte(ch, w);
+
+ Some(ch)
+}
+
+const NONASCII_MASK: usize = usize::repeat_u8(0x80);
+
+/// Returns `true` if any byte in the word `x` is nonascii (>= 128).
+#[inline]
+const fn contains_nonascii(x: usize) -> bool {
+ (x & NONASCII_MASK) != 0
+}
+
+/// Walks through `v` checking that it's a valid UTF-8 sequence,
+/// returning `Ok(())` in that case, or, if it is invalid, `Err(err)`.
+#[inline(always)]
+#[rustc_const_unstable(feature = "str_internals", issue = "none")]
+pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
+ let mut index = 0;
+ let len = v.len();
+
+ let usize_bytes = mem::size_of::<usize>();
+ let ascii_block_size = 2 * usize_bytes;
+ let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 };
+ let align = v.as_ptr().align_offset(usize_bytes);
+
+ while index < len {
+ let old_offset = index;
+ macro_rules! err {
+ ($error_len: expr) => {
+ return Err(Utf8Error { valid_up_to: old_offset, error_len: $error_len })
+ };
+ }
+
+ macro_rules! next {
+ () => {{
+ index += 1;
+ // we needed data, but there was none: error!
+ if index >= len {
+ err!(None)
+ }
+ v[index]
+ }};
+ }
+
+ let first = v[index];
+ if first >= 128 {
+ let w = utf8_char_width(first);
+ // 2-byte encoding is for codepoints \u{0080} to \u{07ff}
+ // first C2 80 last DF BF
+ // 3-byte encoding is for codepoints \u{0800} to \u{ffff}
+ // first E0 A0 80 last EF BF BF
+ // excluding surrogates codepoints \u{d800} to \u{dfff}
+ // ED A0 80 to ED BF BF
+ // 4-byte encoding is for codepoints \u{1000}0 to \u{10ff}ff
+ // first F0 90 80 80 last F4 8F BF BF
+ //
+ // Use the UTF-8 syntax from the RFC
+ //
+ // https://tools.ietf.org/html/rfc3629
+ // UTF8-1 = %x00-7F
+ // UTF8-2 = %xC2-DF UTF8-tail
+ // UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
+ // %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
+ // UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
+ // %xF4 %x80-8F 2( UTF8-tail )
+ match w {
+ 2 => {
+ if next!() as i8 >= -64 {
+ err!(Some(1))
+ }
+ }
+ 3 => {
+ match (first, next!()) {
+ (0xE0, 0xA0..=0xBF)
+ | (0xE1..=0xEC, 0x80..=0xBF)
+ | (0xED, 0x80..=0x9F)
+ | (0xEE..=0xEF, 0x80..=0xBF) => {}
+ _ => err!(Some(1)),
+ }
+ if next!() as i8 >= -64 {
+ err!(Some(2))
+ }
+ }
+ 4 => {
+ match (first, next!()) {
+ (0xF0, 0x90..=0xBF) | (0xF1..=0xF3, 0x80..=0xBF) | (0xF4, 0x80..=0x8F) => {}
+ _ => err!(Some(1)),
+ }
+ if next!() as i8 >= -64 {
+ err!(Some(2))
+ }
+ if next!() as i8 >= -64 {
+ err!(Some(3))
+ }
+ }
+ _ => err!(Some(1)),
+ }
+ index += 1;
+ } else {
+ // Ascii case, try to skip forward quickly.
+ // When the pointer is aligned, read 2 words of data per iteration
+ // until we find a word containing a non-ascii byte.
+ if align != usize::MAX && align.wrapping_sub(index) % usize_bytes == 0 {
+ let ptr = v.as_ptr();
+ while index < blocks_end {
+ // SAFETY: since `align - index` and `ascii_block_size` are
+ // multiples of `usize_bytes`, `block = ptr.add(index)` is
+ // always aligned with a `usize` so it's safe to dereference
+ // both `block` and `block.offset(1)`.
+ unsafe {
+ let block = ptr.add(index) as *const usize;
+ // break if there is a nonascii byte
+ let zu = contains_nonascii(*block);
+ let zv = contains_nonascii(*block.offset(1));
+ if zu || zv {
+ break;
+ }
+ }
+ index += ascii_block_size;
+ }
+ // step from the point where the wordwise loop stopped
+ while index < len && v[index] < 128 {
+ index += 1;
+ }
+ } else {
+ index += 1;
+ }
+ }
+ }
+
+ Ok(())
+}
+
+// https://tools.ietf.org/html/rfc3629
+const UTF8_CHAR_WIDTH: &[u8; 256] = &[
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 9
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // A
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // B
+ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // D
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // E
+ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // F
+];
+
+/// Given a first byte, determines how many bytes are in this UTF-8 character.
+#[unstable(feature = "str_internals", issue = "none")]
+#[must_use]
+#[inline]
+pub const fn utf8_char_width(b: u8) -> usize {
+ UTF8_CHAR_WIDTH[b as usize] as usize
+}
+
+/// Mask of the value bits of a continuation byte.
+const CONT_MASK: u8 = 0b0011_1111;
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
new file mode 100644
index 000000000..5e2e0c4d8
--- /dev/null
+++ b/library/core/src/sync/atomic.rs
@@ -0,0 +1,3488 @@
+//! Atomic types
+//!
+//! Atomic types provide primitive shared-memory communication between
+//! threads, and are the building blocks of other concurrent
+//! types.
+//!
+//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically `atomic_ref`.
+//! Basically, creating a *shared reference* to one of the Rust atomic types corresponds to creating
+//! an `atomic_ref` in C++; the `atomic_ref` is destroyed when the lifetime of the shared reference
+//! ends. (A Rust atomic type that is exclusively owned or behind a mutable reference does *not*
+//! correspond to an "atomic object" in C++, since it can be accessed via non-atomic operations.)
+//!
+//! This module defines atomic versions of a select number of primitive
+//! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
+//! [`AtomicI8`], [`AtomicU16`], etc.
+//! Atomic types present operations that, when used correctly, synchronize
+//! updates between threads.
+//!
+//! Each method takes an [`Ordering`] which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
+//!
+//! [cpp]: https://en.cppreference.com/w/cpp/atomic
+//! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
+//! [2]: ../../../nomicon/atomics.html
+//!
+//! Atomic variables are safe to share between threads (they implement [`Sync`])
+//! but they do not themselves provide the mechanism for sharing and follow the
+//! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
+//! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
+//! atomically-reference-counted shared pointer).
+//!
+//! [arc]: ../../../std/sync/struct.Arc.html
+//!
+//! Atomic types may be stored in static variables, initialized using
+//! the constant initializers like [`AtomicBool::new`]. Atomic statics
+//! are often used for lazy global initialization.
+//!
+//! # Portability
+//!
+//! All atomic types in this module are guaranteed to be [lock-free] if they're
+//! available. This means they don't internally acquire a global mutex. Atomic
+//! types and operations are not guaranteed to be wait-free. This means that
+//! operations like `fetch_or` may be implemented with a compare-and-swap loop.
+//!
+//! Atomic operations may be implemented at the instruction layer with
+//! larger-size atomics. For example some platforms use 4-byte atomic
+//! instructions to implement `AtomicI8`. Note that this emulation should not
+//! have an impact on correctness of code, it's just something to be aware of.
+//!
+//! The atomic types in this module might not be available on all platforms. The
+//! atomic types here are all widely available, however, and can generally be
+//! relied upon existing. Some notable exceptions are:
+//!
+//! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
+//! `AtomicI64` types.
+//! * ARM platforms like `armv5te` that aren't for Linux only provide `load`
+//! and `store` operations, and do not support Compare and Swap (CAS)
+//! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux,
+//! these CAS operations are implemented via [operating system support], which
+//! may come with a performance penalty.
+//! * ARM targets with `thumbv6m` only provide `load` and `store` operations,
+//! and do not support Compare and Swap (CAS) operations, such as `swap`,
+//! `fetch_add`, etc.
+//!
+//! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
+//!
+//! Note that future platforms may be added that also do not have support for
+//! some atomic operations. Maximally portable code will want to be careful
+//! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
+//! generally the most portable, but even then they're not available everywhere.
+//! For reference, the `std` library requires `AtomicBool`s and pointer-sized atomics, although
+//! `core` does not.
+//!
+//! The `#[cfg(target_has_atomic)]` attribute can be used to conditionally
+//! compile based on the target's supported bit widths. It is a key-value
+//! option set for each supported size, with values "8", "16", "32", "64",
+//! "128", and "ptr" for pointer-sized atomics.
+//!
+//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
+//!
+//! # Examples
+//!
+//! A simple spinlock:
+//!
+//! ```
+//! use std::sync::Arc;
+//! use std::sync::atomic::{AtomicUsize, Ordering};
+//! use std::{hint, thread};
+//!
+//! fn main() {
+//! let spinlock = Arc::new(AtomicUsize::new(1));
+//!
+//! let spinlock_clone = Arc::clone(&spinlock);
+//! let thread = thread::spawn(move|| {
+//! spinlock_clone.store(0, Ordering::SeqCst);
+//! });
+//!
+//! // Wait for the other thread to release the lock
+//! while spinlock.load(Ordering::SeqCst) != 0 {
+//! hint::spin_loop();
+//! }
+//!
+//! if let Err(panic) = thread.join() {
+//! println!("Thread had an error: {panic:?}");
+//! }
+//! }
+//! ```
+//!
+//! Keep a global count of live threads:
+//!
+//! ```
+//! use std::sync::atomic::{AtomicUsize, Ordering};
+//!
+//! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
+//!
+//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
+//! println!("live threads: {}", old_thread_count + 1);
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
+#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
+#![rustc_diagnostic_item = "atomic_mod"]
+
+use self::Ordering::*;
+
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::intrinsics;
+
+use crate::hint::spin_loop;
+
+/// A boolean type which can be safely shared between threads.
+///
+/// This type has the same in-memory representation as a [`bool`].
+///
+/// **Note**: This type is only available on platforms that support atomic
+/// loads and stores of `u8`.
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "AtomicBool"]
+#[repr(C, align(1))]
+pub struct AtomicBool {
+ v: UnsafeCell<u8>,
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl const Default for AtomicBool {
+ /// Creates an `AtomicBool` initialized to `false`.
+ #[inline]
+ fn default() -> Self {
+ Self::new(false)
+ }
+}
+
+// Send is implicitly implemented for AtomicBool.
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl Sync for AtomicBool {}
+
+/// A raw pointer type which can be safely shared between threads.
+///
+/// This type has the same in-memory representation as a `*mut T`.
+///
+/// **Note**: This type is only available on platforms that support atomic
+/// loads and stores of pointers. Its size depends on the target pointer's size.
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "AtomicPtr")]
+#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
+#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
+#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
+pub struct AtomicPtr<T> {
+ p: UnsafeCell<*mut T>,
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for AtomicPtr<T> {
+ /// Creates a null `AtomicPtr<T>`.
+ fn default() -> AtomicPtr<T> {
+ AtomicPtr::new(crate::ptr::null_mut())
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T> Send for AtomicPtr<T> {}
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T> Sync for AtomicPtr<T> {}
+
+/// Atomic memory orderings
+///
+/// Memory orderings specify the way atomic operations synchronize memory.
+/// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
+/// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
+/// operations synchronize other memory while additionally preserving a total order of such
+/// operations across all threads.
+///
+/// Rust's memory orderings are [the same as those of
+/// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
+///
+/// For more information see the [nomicon].
+///
+/// [nomicon]: ../../../nomicon/atomics.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+#[non_exhaustive]
+#[rustc_diagnostic_item = "Ordering"]
+pub enum Ordering {
+ /// No ordering constraints, only atomic operations.
+ ///
+ /// Corresponds to [`memory_order_relaxed`] in C++20.
+ ///
+ /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Relaxed,
+ /// When coupled with a store, all previous operations become ordered
+ /// before any load of this value with [`Acquire`] (or stronger) ordering.
+ /// In particular, all previous writes become visible to all threads
+ /// that perform an [`Acquire`] (or stronger) load of this value.
+ ///
+ /// Notice that using this ordering for an operation that combines loads
+ /// and stores leads to a [`Relaxed`] load operation!
+ ///
+ /// This ordering is only applicable for operations that can perform a store.
+ ///
+ /// Corresponds to [`memory_order_release`] in C++20.
+ ///
+ /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Release,
+ /// When coupled with a load, if the loaded value was written by a store operation with
+ /// [`Release`] (or stronger) ordering, then all subsequent operations
+ /// become ordered after that store. In particular, all subsequent loads will see data
+ /// written before the store.
+ ///
+ /// Notice that using this ordering for an operation that combines loads
+ /// and stores leads to a [`Relaxed`] store operation!
+ ///
+ /// This ordering is only applicable for operations that can perform a load.
+ ///
+ /// Corresponds to [`memory_order_acquire`] in C++20.
+ ///
+ /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Acquire,
+ /// Has the effects of both [`Acquire`] and [`Release`] together:
+ /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
+ ///
+ /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
+ /// not performing any store and hence it has just [`Acquire`] ordering. However,
+ /// `AcqRel` will never perform [`Relaxed`] accesses.
+ ///
+ /// This ordering is only applicable for operations that combine both loads and stores.
+ ///
+ /// Corresponds to [`memory_order_acq_rel`] in C++20.
+ ///
+ /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ AcqRel,
+ /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
+ /// operations, respectively) with the additional guarantee that all threads see all
+ /// sequentially consistent operations in the same order.
+ ///
+ /// Corresponds to [`memory_order_seq_cst`] in C++20.
+ ///
+ /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ SeqCst,
+}
+
+/// An [`AtomicBool`] initialized to `false`.
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(
+ since = "1.34.0",
+ note = "the `new` function is now preferred",
+ suggestion = "AtomicBool::new(false)"
+)]
+pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
+
+#[cfg(target_has_atomic_load_store = "8")]
+impl AtomicBool {
+ /// Creates a new `AtomicBool`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicBool;
+ ///
+ /// let atomic_true = AtomicBool::new(true);
+ /// let atomic_false = AtomicBool::new(false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
+ #[must_use]
+ pub const fn new(v: bool) -> AtomicBool {
+ AtomicBool { v: UnsafeCell::new(v as u8) }
+ }
+
+ /// Returns a mutable reference to the underlying [`bool`].
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let mut some_bool = AtomicBool::new(true);
+ /// assert_eq!(*some_bool.get_mut(), true);
+ /// *some_bool.get_mut() = false;
+ /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_access", since = "1.15.0")]
+ pub fn get_mut(&mut self) -> &mut bool {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ unsafe { &mut *(self.v.get() as *mut bool) }
+ }
+
+ /// Get atomic access to a `&mut bool`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let mut some_bool = true;
+ /// let a = AtomicBool::from_mut(&mut some_bool);
+ /// a.store(false, Ordering::Relaxed);
+ /// assert_eq!(some_bool, false);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic_equal_alignment = "8")]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut(v: &mut bool) -> &mut Self {
+ // SAFETY: the mutable reference guarantees unique ownership, and
+ // alignment of both `bool` and `Self` is 1.
+ unsafe { &mut *(v as *mut bool as *mut Self) }
+ }
+
+ /// Get non-atomic access to a `&mut [AtomicBool]` slice.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut, inline_const)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let mut some_bools = [const { AtomicBool::new(false) }; 10];
+ ///
+ /// let view: &mut [bool] = AtomicBool::get_mut_slice(&mut some_bools);
+ /// assert_eq!(view, [false; 10]);
+ /// view[..5].copy_from_slice(&[true; 5]);
+ ///
+ /// std::thread::scope(|s| {
+ /// for t in &some_bools[..5] {
+ /// s.spawn(move || assert_eq!(t.load(Ordering::Relaxed), true));
+ /// }
+ ///
+ /// for f in &some_bools[5..] {
+ /// s.spawn(move || assert_eq!(f.load(Ordering::Relaxed), false));
+ /// }
+ /// });
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn get_mut_slice(this: &mut [Self]) -> &mut [bool] {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ unsafe { &mut *(this as *mut [Self] as *mut [bool]) }
+ }
+
+ /// Get atomic access to a `&mut [bool]` slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let mut some_bools = [false; 10];
+ /// let a = &*AtomicBool::from_mut_slice(&mut some_bools);
+ /// std::thread::scope(|s| {
+ /// for i in 0..a.len() {
+ /// s.spawn(move || a[i].store(true, Ordering::Relaxed));
+ /// }
+ /// });
+ /// assert_eq!(some_bools, [true; 10]);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic_equal_alignment = "8")]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut_slice(v: &mut [bool]) -> &mut [Self] {
+ // SAFETY: the mutable reference guarantees unique ownership, and
+ // alignment of both `bool` and `Self` is 1.
+ unsafe { &mut *(v as *mut [bool] as *mut [Self]) }
+ }
+
+ /// Consumes the atomic and returns the contained value.
+ ///
+ /// This is safe because passing `self` by value guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicBool;
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ /// assert_eq!(some_bool.into_inner(), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_access", since = "1.15.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> bool {
+ self.v.into_inner() != 0
+ }
+
+ /// Loads a value from the bool.
+ ///
+ /// `load` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Release`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn load(&self, order: Ordering) -> bool {
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ unsafe { atomic_load(self.v.get(), order) != 0 }
+ }
+
+ /// Stores a value into the bool.
+ ///
+ /// `store` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Acquire`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// some_bool.store(false, Ordering::Relaxed);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn store(&self, val: bool, order: Ordering) {
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ unsafe {
+ atomic_store(self.v.get(), val as u8, order);
+ }
+ }
+
+ /// Stores a value into the bool, returning the previous value.
+ ///
+ /// `swap` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn swap(&self, val: bool, order: Ordering) -> bool {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
+ }
+
+ /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
+ /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
+ /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
+ /// happens, and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Migrating to `compare_exchange` and `compare_exchange_weak`
+ ///
+ /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
+ /// memory orderings:
+ ///
+ /// Original | Success | Failure
+ /// -------- | ------- | -------
+ /// Relaxed | Relaxed | Relaxed
+ /// Acquire | Acquire | Acquire
+ /// Release | Release | Relaxed
+ /// AcqRel | AcqRel | Acquire
+ /// SeqCst | SeqCst | SeqCst
+ ///
+ /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
+ /// which allows the compiler to generate better assembly code when the compare and swap
+ /// is used in a loop.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ ///
+ /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.50.0",
+ note = "Use `compare_exchange` or `compare_exchange_weak` instead"
+ )]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+
+ /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was written and containing
+ /// the previous value. On success this value is guaranteed to be equal to `current`.
+ ///
+ /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. `success` describes the required ordering for the
+ /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+ /// `failure` describes the required ordering for the load operation that takes place when
+ /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.compare_exchange(true,
+ /// false,
+ /// Ordering::Acquire,
+ /// Ordering::Relaxed),
+ /// Ok(true));
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ ///
+ /// assert_eq!(some_bool.compare_exchange(true, true,
+ /// Ordering::SeqCst,
+ /// Ordering::Acquire),
+ /// Err(false));
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[doc(alias = "compare_and_swap")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_exchange(
+ &self,
+ current: bool,
+ new: bool,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<bool, bool> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ match unsafe {
+ atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
+ } {
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
+ }
+ }
+
+ /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
+ ///
+ /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// return value is a result indicating whether the new value was written and containing the
+ /// previous value.
+ ///
+ /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. `success` describes the required ordering for the
+ /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+ /// `failure` describes the required ordering for the load operation that takes place when
+ /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let val = AtomicBool::new(false);
+ ///
+ /// let new = true;
+ /// let mut old = val.load(Ordering::Relaxed);
+ /// loop {
+ /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+ /// Ok(_) => break,
+ /// Err(x) => old = x,
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[doc(alias = "compare_and_swap")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_exchange_weak(
+ &self,
+ current: bool,
+ new: bool,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<bool, bool> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ match unsafe {
+ atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
+ } {
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
+ }
+ }
+
+ /// Logical "and" with a boolean value.
+ ///
+ /// Performs a logical "and" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
+ }
+
+ /// Logical "nand" with a boolean value.
+ ///
+ /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
+ // We can't use atomic_nand here because it can result in a bool with
+ // an invalid value. This happens because the atomic operation is done
+ // with an 8-bit integer internally, which would set the upper 7 bits.
+ // So we just use fetch_xor or swap instead.
+ if val {
+ // !(x & true) == !x
+ // We must invert the bool.
+ self.fetch_xor(true, order)
+ } else {
+ // !(x & false) == true
+ // We must set the bool to true.
+ self.swap(true, order)
+ }
+ }
+
+ /// Logical "or" with a boolean value.
+ ///
+ /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
+ /// new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
+ }
+
+ /// Logical "xor" with a boolean value.
+ ///
+ /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
+ }
+
+ /// Logical "not" with a boolean value.
+ ///
+ /// Performs a logical "not" operation on the current value, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_not` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_bool_fetch_not)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_not(Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_not(Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_bool_fetch_not", issue = "98485")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_not(&self, order: Ordering) -> bool {
+ self.fetch_xor(true, order)
+ }
+
+ /// Returns a mutable pointer to the underlying [`bool`].
+ ///
+ /// Doing non-atomic reads and writes on the resulting integer can be a data race.
+ /// This method is mostly useful for FFI, where the function signature may use
+ /// `*mut bool` instead of `&AtomicBool`.
+ ///
+ /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
+ /// atomic types work with interior mutability. All modifications of an atomic change the value
+ /// through a shared reference, and can do so safely as long as they use atomic operations. Any
+ /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
+ /// restriction: operations on it must be atomic.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (extern-declaration)
+ /// # fn main() {
+ /// use std::sync::atomic::AtomicBool;
+ /// extern "C" {
+ /// fn my_atomic_op(arg: *mut bool);
+ /// }
+ ///
+ /// let mut atomic = AtomicBool::new(true);
+ /// unsafe {
+ /// my_atomic_op(atomic.as_mut_ptr());
+ /// }
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
+ pub fn as_mut_ptr(&self) -> *mut bool {
+ self.v.get() as *mut bool
+ }
+
+ /// Fetches the value, and applies a function to it that returns an optional
+ /// new value. Returns a `Result` of `Ok(previous_value)` if the function
+ /// returned `Some(_)`, else `Err(previous_value)`.
+ ///
+ /// Note: This may call the function multiple times if the value has been
+ /// changed from other threads in the meantime, as long as the function
+ /// returns `Some(_)`, but the function will have been applied only once to
+ /// the stored value.
+ ///
+ /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering for
+ /// when the operation finally succeeds while the second describes the
+ /// required ordering for loads. These correspond to the success and failure
+ /// orderings of [`AtomicBool::compare_exchange`] respectively.
+ ///
+ /// Using [`Acquire`] as success ordering makes the store part of this
+ /// operation [`Relaxed`], and using [`Release`] makes the final successful
+ /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
+ /// [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let x = AtomicBool::new(false);
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
+ /// assert_eq!(x.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
+ #[cfg(target_has_atomic = "8")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_update<F>(
+ &self,
+ set_order: Ordering,
+ fetch_order: Ordering,
+ mut f: F,
+ ) -> Result<bool, bool>
+ where
+ F: FnMut(bool) -> Option<bool>,
+ {
+ let mut prev = self.load(fetch_order);
+ while let Some(next) = f(prev) {
+ match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
+ x @ Ok(_) => return x,
+ Err(next_prev) => prev = next_prev,
+ }
+ }
+ Err(prev)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+impl<T> AtomicPtr<T> {
+ /// Creates a new `AtomicPtr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicPtr;
+ ///
+ /// let ptr = &mut 5;
+ /// let atomic_ptr = AtomicPtr::new(ptr);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
+ pub const fn new(p: *mut T) -> AtomicPtr<T> {
+ AtomicPtr { p: UnsafeCell::new(p) }
+ }
+
+ /// Returns a mutable reference to the underlying pointer.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let mut data = 10;
+ /// let mut atomic_ptr = AtomicPtr::new(&mut data);
+ /// let mut other_data = 5;
+ /// *atomic_ptr.get_mut() = &mut other_data;
+ /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_access", since = "1.15.0")]
+ pub fn get_mut(&mut self) -> &mut *mut T {
+ self.p.get_mut()
+ }
+
+ /// Get atomic access to a pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut)]
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let mut data = 123;
+ /// let mut some_ptr = &mut data as *mut i32;
+ /// let a = AtomicPtr::from_mut(&mut some_ptr);
+ /// let mut other_data = 456;
+ /// a.store(&mut other_data, Ordering::Relaxed);
+ /// assert_eq!(unsafe { *some_ptr }, 456);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic_equal_alignment = "ptr")]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut(v: &mut *mut T) -> &mut Self {
+ use crate::mem::align_of;
+ let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
+ // SAFETY:
+ // - the mutable reference guarantees unique ownership.
+ // - the alignment of `*mut T` and `Self` is the same on all platforms
+ // supported by rust, as verified above.
+ unsafe { &mut *(v as *mut *mut T as *mut Self) }
+ }
+
+ /// Get non-atomic access to a `&mut [AtomicPtr]` slice.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut, inline_const)]
+ /// use std::ptr::null_mut;
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let mut some_ptrs = [const { AtomicPtr::new(null_mut::<String>()) }; 10];
+ ///
+ /// let view: &mut [*mut String] = AtomicPtr::get_mut_slice(&mut some_ptrs);
+ /// assert_eq!(view, [null_mut::<String>(); 10]);
+ /// view
+ /// .iter_mut()
+ /// .enumerate()
+ /// .for_each(|(i, ptr)| *ptr = Box::into_raw(Box::new(format!("iteration#{i}"))));
+ ///
+ /// std::thread::scope(|s| {
+ /// for ptr in &some_ptrs {
+ /// s.spawn(move || {
+ /// let ptr = ptr.load(Ordering::Relaxed);
+ /// assert!(!ptr.is_null());
+ ///
+ /// let name = unsafe { Box::from_raw(ptr) };
+ /// println!("Hello, {name}!");
+ /// });
+ /// }
+ /// });
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn get_mut_slice(this: &mut [Self]) -> &mut [*mut T] {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ unsafe { &mut *(this as *mut [Self] as *mut [*mut T]) }
+ }
+
+ /// Get atomic access to a slice of pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut)]
+ /// use std::ptr::null_mut;
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let mut some_ptrs = [null_mut::<String>(); 10];
+ /// let a = &*AtomicPtr::from_mut_slice(&mut some_ptrs);
+ /// std::thread::scope(|s| {
+ /// for i in 0..a.len() {
+ /// s.spawn(move || {
+ /// let name = Box::new(format!("thread{i}"));
+ /// a[i].store(Box::into_raw(name), Ordering::Relaxed);
+ /// });
+ /// }
+ /// });
+ /// for p in some_ptrs {
+ /// assert!(!p.is_null());
+ /// let name = unsafe { Box::from_raw(p) };
+ /// println!("Hello, {name}!");
+ /// }
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic_equal_alignment = "ptr")]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut_slice(v: &mut [*mut T]) -> &mut [Self] {
+ // SAFETY:
+ // - the mutable reference guarantees unique ownership.
+ // - the alignment of `*mut T` and `Self` is the same on all platforms
+ // supported by rust, as verified above.
+ unsafe { &mut *(v as *mut [*mut T] as *mut [Self]) }
+ }
+
+ /// Consumes the atomic and returns the contained value.
+ ///
+ /// This is safe because passing `self` by value guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicPtr;
+ ///
+ /// let mut data = 5;
+ /// let atomic_ptr = AtomicPtr::new(&mut data);
+ /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_access", since = "1.15.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> *mut T {
+ self.p.into_inner()
+ }
+
+ /// Loads a value from the pointer.
+ ///
+ /// `load` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Release`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let value = some_ptr.load(Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn load(&self, order: Ordering) -> *mut T {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_load(self.p.get(), order) }
+ }
+
+ /// Stores a value into the pointer.
+ ///
+ /// `store` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Acquire`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// some_ptr.store(other_ptr, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn store(&self, ptr: *mut T, order: Ordering) {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_store(self.p.get(), ptr, order);
+ }
+ }
+
+ /// Stores a value into the pointer, returning the previous value.
+ ///
+ /// `swap` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "ptr")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_swap(self.p.get(), ptr, order) }
+ }
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
+ /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
+ /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
+ /// happens, and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Migrating to `compare_exchange` and `compare_exchange_weak`
+ ///
+ /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
+ /// memory orderings:
+ ///
+ /// Original | Success | Failure
+ /// -------- | ------- | -------
+ /// Relaxed | Relaxed | Relaxed
+ /// Acquire | Acquire | Acquire
+ /// Release | Release | Relaxed
+ /// AcqRel | AcqRel | Acquire
+ /// SeqCst | SeqCst | SeqCst
+ ///
+ /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
+ /// which allows the compiler to generate better assembly code when the compare and swap
+ /// is used in a loop.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.50.0",
+ note = "Use `compare_exchange` or `compare_exchange_weak` instead"
+ )]
+ #[cfg(target_has_atomic = "ptr")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
+ match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was written and containing
+ /// the previous value. On success this value is guaranteed to be equal to `current`.
+ ///
+ /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. `success` describes the required ordering for the
+ /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+ /// `failure` describes the required ordering for the load operation that takes place when
+ /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.compare_exchange(ptr, other_ptr,
+ /// Ordering::SeqCst, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[cfg(target_has_atomic = "ptr")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_exchange(
+ &self,
+ current: *mut T,
+ new: *mut T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<*mut T, *mut T> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
+ }
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// return value is a result indicating whether the new value was written and containing the
+ /// previous value.
+ ///
+ /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. `success` describes the required ordering for the
+ /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+ /// `failure` describes the required ordering for the load operation that takes place when
+ /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let some_ptr = AtomicPtr::new(&mut 5);
+ ///
+ /// let new = &mut 10;
+ /// let mut old = some_ptr.load(Ordering::Relaxed);
+ /// loop {
+ /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+ /// Ok(_) => break,
+ /// Err(x) => old = x,
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[cfg(target_has_atomic = "ptr")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_exchange_weak(
+ &self,
+ current: *mut T,
+ new: *mut T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<*mut T, *mut T> {
+ // SAFETY: This intrinsic is unsafe because it operates on a raw pointer
+ // but we know for sure that the pointer is valid (we just got it from
+ // an `UnsafeCell` that we have by reference) and the atomic operation
+ // itself allows us to safely mutate the `UnsafeCell` contents.
+ unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
+ }
+
+ /// Fetches the value, and applies a function to it that returns an optional
+ /// new value. Returns a `Result` of `Ok(previous_value)` if the function
+ /// returned `Some(_)`, else `Err(previous_value)`.
+ ///
+ /// Note: This may call the function multiple times if the value has been
+ /// changed from other threads in the meantime, as long as the function
+ /// returns `Some(_)`, but the function will have been applied only once to
+ /// the stored value.
+ ///
+ /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering for
+ /// when the operation finally succeeds while the second describes the
+ /// required ordering for loads. These correspond to the success and failure
+ /// orderings of [`AtomicPtr::compare_exchange`] respectively.
+ ///
+ /// Using [`Acquire`] as success ordering makes the store part of this
+ /// operation [`Relaxed`], and using [`Release`] makes the final successful
+ /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
+ /// [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr: *mut _ = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let new: *mut _ = &mut 10;
+ /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
+ /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
+ /// if x == ptr {
+ /// Some(new)
+ /// } else {
+ /// None
+ /// }
+ /// });
+ /// assert_eq!(result, Ok(ptr));
+ /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
+ #[cfg(target_has_atomic = "ptr")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_update<F>(
+ &self,
+ set_order: Ordering,
+ fetch_order: Ordering,
+ mut f: F,
+ ) -> Result<*mut T, *mut T>
+ where
+ F: FnMut(*mut T) -> Option<*mut T>,
+ {
+ let mut prev = self.load(fetch_order);
+ while let Some(next) = f(prev) {
+ match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
+ x @ Ok(_) => return x,
+ Err(next_prev) => prev = next_prev,
+ }
+ }
+ Err(prev)
+ }
+
+ /// Offsets the pointer's address by adding `val` (in units of `T`),
+ /// returning the previous pointer.
+ ///
+ /// This is equivalent to using [`wrapping_add`] to atomically perform the
+ /// equivalent of `ptr = ptr.wrapping_add(val);`.
+ ///
+ /// This method operates in units of `T`, which means that it cannot be used
+ /// to offset the pointer by an amount which is not a multiple of
+ /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
+ /// work with a deliberately misaligned pointer. In such cases, you may use
+ /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead.
+ ///
+ /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the
+ /// memory ordering of this operation. All ordering modes are possible. Note
+ /// that using [`Acquire`] makes the store part of this operation
+ /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic
+ /// operations on [`AtomicPtr`].
+ ///
+ /// [`wrapping_add`]: pointer::wrapping_add
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
+ /// use core::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
+ /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0);
+ /// // Note: units of `size_of::<i64>()`.
+ /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
+ self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::<T>()), order)
+ }
+
+ /// Offsets the pointer's address by subtracting `val` (in units of `T`),
+ /// returning the previous pointer.
+ ///
+ /// This is equivalent to using [`wrapping_sub`] to atomically perform the
+ /// equivalent of `ptr = ptr.wrapping_sub(val);`.
+ ///
+ /// This method operates in units of `T`, which means that it cannot be used
+ /// to offset the pointer by an amount which is not a multiple of
+ /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
+ /// work with a deliberately misaligned pointer. In such cases, you may use
+ /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead.
+ ///
+ /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. All ordering modes are possible. Note that
+ /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
+ /// and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic
+ /// operations on [`AtomicPtr`].
+ ///
+ /// [`wrapping_sub`]: pointer::wrapping_sub
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(strict_provenance_atomic_ptr)]
+ /// use core::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let array = [1i32, 2i32];
+ /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
+ ///
+ /// assert!(core::ptr::eq(
+ /// atom.fetch_ptr_sub(1, Ordering::Relaxed),
+ /// &array[1],
+ /// ));
+ /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
+ self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::<T>()), order)
+ }
+
+ /// Offsets the pointer's address by adding `val` *bytes*, returning the
+ /// previous pointer.
+ ///
+ /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
+ /// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
+ ///
+ /// `fetch_byte_add` takes an [`Ordering`] argument which describes the
+ /// memory ordering of this operation. All ordering modes are possible. Note
+ /// that using [`Acquire`] makes the store part of this operation
+ /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic
+ /// operations on [`AtomicPtr`].
+ ///
+ /// [`wrapping_add`]: pointer::wrapping_add
+ /// [`cast`]: pointer::cast
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
+ /// use core::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
+ /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0);
+ /// // Note: in units of bytes, not `size_of::<i64>()`.
+ /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
+ #[cfg(not(bootstrap))]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast()
+ }
+ #[cfg(bootstrap)]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_add(self.p.get().cast::<usize>(), val, order) as *mut T
+ }
+ }
+
+ /// Offsets the pointer's address by subtracting `val` *bytes*, returning the
+ /// previous pointer.
+ ///
+ /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
+ /// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
+ ///
+ /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
+ /// memory ordering of this operation. All ordering modes are possible. Note
+ /// that using [`Acquire`] makes the store part of this operation
+ /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic
+ /// operations on [`AtomicPtr`].
+ ///
+ /// [`wrapping_sub`]: pointer::wrapping_sub
+ /// [`cast`]: pointer::cast
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
+ /// use core::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1));
+ /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1);
+ /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
+ #[cfg(not(bootstrap))]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast()
+ }
+ #[cfg(bootstrap)]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_sub(self.p.get().cast::<usize>(), val, order) as *mut T
+ }
+ }
+
+ /// Performs a bitwise "or" operation on the address of the current pointer,
+ /// and the argument `val`, and stores a pointer with provenance of the
+ /// current pointer and the resulting address.
+ ///
+ /// This is equivalent equivalent to using [`map_addr`] to atomically
+ /// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
+ /// pointer schemes to atomically set tag bits.
+ ///
+ /// **Caveat**: This operation returns the previous value. To compute the
+ /// stored value without losing provenance, you may use [`map_addr`]. For
+ /// example: `a.fetch_or(val).map_addr(|a| a | val)`.
+ ///
+ /// `fetch_or` takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. All ordering modes are possible. Note that
+ /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
+ /// and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic
+ /// operations on [`AtomicPtr`].
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance
+ /// experiment, see the [module documentation for `ptr`][crate::ptr] for
+ /// details.
+ ///
+ /// [`map_addr`]: pointer::map_addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
+ /// use core::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let pointer = &mut 3i64 as *mut i64;
+ ///
+ /// let atom = AtomicPtr::<i64>::new(pointer);
+ /// // Tag the bottom bit of the pointer.
+ /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0);
+ /// // Extract and untag.
+ /// let tagged = atom.load(Ordering::Relaxed);
+ /// assert_eq!(tagged.addr() & 1, 1);
+ /// assert_eq!(tagged.map_addr(|p| p & !1), pointer);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
+ #[cfg(not(bootstrap))]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast()
+ }
+ #[cfg(bootstrap)]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_or(self.p.get().cast::<usize>(), val, order) as *mut T
+ }
+ }
+
+ /// Performs a bitwise "and" operation on the address of the current
+ /// pointer, and the argument `val`, and stores a pointer with provenance of
+ /// the current pointer and the resulting address.
+ ///
+ /// This is equivalent equivalent to using [`map_addr`] to atomically
+ /// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
+ /// pointer schemes to atomically unset tag bits.
+ ///
+ /// **Caveat**: This operation returns the previous value. To compute the
+ /// stored value without losing provenance, you may use [`map_addr`]. For
+ /// example: `a.fetch_and(val).map_addr(|a| a & val)`.
+ ///
+ /// `fetch_and` takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. All ordering modes are possible. Note that
+ /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
+ /// and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic
+ /// operations on [`AtomicPtr`].
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance
+ /// experiment, see the [module documentation for `ptr`][crate::ptr] for
+ /// details.
+ ///
+ /// [`map_addr`]: pointer::map_addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
+ /// use core::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let pointer = &mut 3i64 as *mut i64;
+ /// // A tagged pointer
+ /// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1));
+ /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1);
+ /// // Untag, and extract the previously tagged pointer.
+ /// let untagged = atom.fetch_and(!1, Ordering::Relaxed)
+ /// .map_addr(|a| a & !1);
+ /// assert_eq!(untagged, pointer);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
+ #[cfg(not(bootstrap))]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast()
+ }
+ #[cfg(bootstrap)]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_and(self.p.get().cast::<usize>(), val, order) as *mut T
+ }
+ }
+
+ /// Performs a bitwise "xor" operation on the address of the current
+ /// pointer, and the argument `val`, and stores a pointer with provenance of
+ /// the current pointer and the resulting address.
+ ///
+ /// This is equivalent equivalent to using [`map_addr`] to atomically
+ /// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
+ /// pointer schemes to atomically toggle tag bits.
+ ///
+ /// **Caveat**: This operation returns the previous value. To compute the
+ /// stored value without losing provenance, you may use [`map_addr`]. For
+ /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`.
+ ///
+ /// `fetch_xor` takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. All ordering modes are possible. Note that
+ /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
+ /// and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic
+ /// operations on [`AtomicPtr`].
+ ///
+ /// This API and its claimed semantics are part of the Strict Provenance
+ /// experiment, see the [module documentation for `ptr`][crate::ptr] for
+ /// details.
+ ///
+ /// [`map_addr`]: pointer::map_addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
+ /// use core::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let pointer = &mut 3i64 as *mut i64;
+ /// let atom = AtomicPtr::<i64>::new(pointer);
+ ///
+ /// // Toggle a tag bit on the pointer.
+ /// atom.fetch_xor(1, Ordering::Relaxed);
+ /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
+ #[cfg(not(bootstrap))]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast()
+ }
+ #[cfg(bootstrap)]
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_xor(self.p.get().cast::<usize>(), val, order) as *mut T
+ }
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "atomic_bool_from", since = "1.24.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl const From<bool> for AtomicBool {
+ /// Converts a `bool` into an `AtomicBool`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicBool;
+ /// let atomic_bool = AtomicBool::from(true);
+ /// assert_eq!(format!("{atomic_bool:?}"), "true")
+ /// ```
+ #[inline]
+ fn from(b: bool) -> Self {
+ Self::new(b)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "atomic_from", since = "1.23.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<*mut T> for AtomicPtr<T> {
+ /// Converts a `*mut T` into an `AtomicPtr<T>`.
+ #[inline]
+ fn from(p: *mut T) -> Self {
+ Self::new(p)
+ }
+}
+
+#[allow(unused_macros)] // This macro ends up being unused on some architectures.
+macro_rules! if_not_8_bit {
+ (u8, $($tt:tt)*) => { "" };
+ (i8, $($tt:tt)*) => { "" };
+ ($_:ident, $($tt:tt)*) => { $($tt)* };
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+macro_rules! atomic_int {
+ ($cfg_cas:meta,
+ $cfg_align:meta,
+ $stable:meta,
+ $stable_cxchg:meta,
+ $stable_debug:meta,
+ $stable_access:meta,
+ $stable_from:meta,
+ $stable_nand:meta,
+ $const_stable:meta,
+ $stable_init_const:meta,
+ $diagnostic_item:meta,
+ $s_int_type:literal,
+ $extra_feature:expr,
+ $min_fn:ident, $max_fn:ident,
+ $align:expr,
+ $atomic_new:expr,
+ $int_type:ident $atomic_type:ident $atomic_init:ident) => {
+ /// An integer type which can be safely shared between threads.
+ ///
+ /// This type has the same in-memory representation as the underlying
+ /// integer type, [`
+ #[doc = $s_int_type]
+ /// `]. For more about the differences between atomic types and
+ /// non-atomic types as well as information about the portability of
+ /// this type, please see the [module-level documentation].
+ ///
+ /// **Note:** This type is only available on platforms that support
+ /// atomic loads and stores of [`
+ #[doc = $s_int_type]
+ /// `].
+ ///
+ /// [module-level documentation]: crate::sync::atomic
+ #[$stable]
+ #[$diagnostic_item]
+ #[repr(C, align($align))]
+ pub struct $atomic_type {
+ v: UnsafeCell<$int_type>,
+ }
+
+ /// An atomic integer initialized to `0`.
+ #[$stable_init_const]
+ #[deprecated(
+ since = "1.34.0",
+ note = "the `new` function is now preferred",
+ suggestion = $atomic_new,
+ )]
+ pub const $atomic_init: $atomic_type = $atomic_type::new(0);
+
+ #[$stable]
+ #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+ impl const Default for $atomic_type {
+ #[inline]
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+ }
+
+ #[$stable_from]
+ #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+ impl const From<$int_type> for $atomic_type {
+ #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")]
+ #[inline]
+ fn from(v: $int_type) -> Self { Self::new(v) }
+ }
+
+ #[$stable_debug]
+ impl fmt::Debug for $atomic_type {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
+ }
+ }
+
+ // Send is implicitly implemented.
+ #[$stable]
+ unsafe impl Sync for $atomic_type {}
+
+ impl $atomic_type {
+ /// Creates a new atomic integer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
+ ///
+ #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")]
+ /// ```
+ #[inline]
+ #[$stable]
+ #[$const_stable]
+ #[must_use]
+ pub const fn new(v: $int_type) -> Self {
+ Self {v: UnsafeCell::new(v)}
+ }
+
+ /// Returns a mutable reference to the underlying integer.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")]
+ /// assert_eq!(*some_var.get_mut(), 10);
+ /// *some_var.get_mut() = 5;
+ /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
+ /// ```
+ #[inline]
+ #[$stable_access]
+ pub fn get_mut(&mut self) -> &mut $int_type {
+ self.v.get_mut()
+ }
+
+ #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
+ ///
+ #[doc = if_not_8_bit! {
+ $int_type,
+ concat!(
+ "**Note:** This function is only available on targets where `",
+ stringify!($int_type), "` has an alignment of ", $align, " bytes."
+ )
+ }]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut)]
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ /// let mut some_int = 123;
+ #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")]
+ /// a.store(100, Ordering::Relaxed);
+ /// assert_eq!(some_int, 100);
+ /// ```
+ ///
+ #[inline]
+ #[$cfg_align]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut(v: &mut $int_type) -> &mut Self {
+ use crate::mem::align_of;
+ let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
+ // SAFETY:
+ // - the mutable reference guarantees unique ownership.
+ // - the alignment of `$int_type` and `Self` is the
+ // same, as promised by $cfg_align and verified above.
+ unsafe { &mut *(v as *mut $int_type as *mut Self) }
+ }
+
+ #[doc = concat!("Get non-atomic access to a `&mut [", stringify!($atomic_type), "]` slice")]
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut, inline_const)]
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let mut some_ints = [const { ", stringify!($atomic_type), "::new(0) }; 10];")]
+ ///
+ #[doc = concat!("let view: &mut [", stringify!($int_type), "] = ", stringify!($atomic_type), "::get_mut_slice(&mut some_ints);")]
+ /// assert_eq!(view, [0; 10]);
+ /// view
+ /// .iter_mut()
+ /// .enumerate()
+ /// .for_each(|(idx, int)| *int = idx as _);
+ ///
+ /// std::thread::scope(|s| {
+ /// some_ints
+ /// .iter()
+ /// .enumerate()
+ /// .for_each(|(idx, int)| {
+ /// s.spawn(move || assert_eq!(int.load(Ordering::Relaxed), idx as _));
+ /// })
+ /// });
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
+ }
+
+ #[doc = concat!("Get atomic access to a `&mut [", stringify!($int_type), "]` slice.")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut)]
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ /// let mut some_ints = [0; 10];
+ #[doc = concat!("let a = &*", stringify!($atomic_type), "::from_mut_slice(&mut some_ints);")]
+ /// std::thread::scope(|s| {
+ /// for i in 0..a.len() {
+ /// s.spawn(move || a[i].store(i as _, Ordering::Relaxed));
+ /// }
+ /// });
+ /// for (i, n) in some_ints.into_iter().enumerate() {
+ /// assert_eq!(i, n as usize);
+ /// }
+ /// ```
+ #[inline]
+ #[$cfg_align]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
+ use crate::mem::align_of;
+ let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
+ // SAFETY:
+ // - the mutable reference guarantees unique ownership.
+ // - the alignment of `$int_type` and `Self` is the
+ // same, as promised by $cfg_align and verified above.
+ unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
+ }
+
+ /// Consumes the atomic and returns the contained value.
+ ///
+ /// This is safe because passing `self` by value guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
+ ///
+ #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
+ /// assert_eq!(some_var.into_inner(), 5);
+ /// ```
+ #[inline]
+ #[$stable_access]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> $int_type {
+ self.v.into_inner()
+ }
+
+ /// Loads a value from the atomic integer.
+ ///
+ /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
+ /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Release`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
+ ///
+ /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn load(&self, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_load(self.v.get(), order) }
+ }
+
+ /// Stores a value into the atomic integer.
+ ///
+ /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
+ /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Acquire`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
+ ///
+ /// some_var.store(10, Ordering::Relaxed);
+ /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn store(&self, val: $int_type, order: Ordering) {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_store(self.v.get(), val, order); }
+ }
+
+ /// Stores a value into the atomic integer, returning the previous value.
+ ///
+ /// `swap` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
+ ///
+ /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_swap(self.v.get(), val, order) }
+ }
+
+ /// Stores a value into the atomic integer if the current value is the same as
+ /// the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the
+ /// value was updated.
+ ///
+ /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
+ /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
+ /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
+ /// happens, and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Migrating to `compare_exchange` and `compare_exchange_weak`
+ ///
+ /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
+ /// memory orderings:
+ ///
+ /// Original | Success | Failure
+ /// -------- | ------- | -------
+ /// Relaxed | Relaxed | Relaxed
+ /// Acquire | Acquire | Acquire
+ /// Release | Release | Relaxed
+ /// AcqRel | AcqRel | Acquire
+ /// SeqCst | SeqCst | SeqCst
+ ///
+ /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
+ /// which allows the compiler to generate better assembly code when the compare and swap
+ /// is used in a loop.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
+ ///
+ /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
+ /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
+ ///
+ /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
+ /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[deprecated(
+ since = "1.50.0",
+ note = "Use `compare_exchange` or `compare_exchange_weak` instead")
+ ]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_and_swap(&self,
+ current: $int_type,
+ new: $int_type,
+ order: Ordering) -> $int_type {
+ match self.compare_exchange(current,
+ new,
+ order,
+ strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+
+ /// Stores a value into the atomic integer if the current value is the same as
+ /// the `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was written and
+ /// containing the previous value. On success this value is guaranteed to be equal to
+ /// `current`.
+ ///
+ /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. `success` describes the required ordering for the
+ /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+ /// `failure` describes the required ordering for the load operation that takes place when
+ /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
+ ///
+ /// assert_eq!(some_var.compare_exchange(5, 10,
+ /// Ordering::Acquire,
+ /// Ordering::Relaxed),
+ /// Ok(5));
+ /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
+ ///
+ /// assert_eq!(some_var.compare_exchange(6, 12,
+ /// Ordering::SeqCst,
+ /// Ordering::Acquire),
+ /// Err(10));
+ /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[$stable_cxchg]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_exchange(&self,
+ current: $int_type,
+ new: $int_type,
+ success: Ordering,
+ failure: Ordering) -> Result<$int_type, $int_type> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
+ }
+
+ /// Stores a value into the atomic integer if the current value is the same as
+ /// the `current` value.
+ ///
+ #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")]
+ /// this function is allowed to spuriously fail even
+ /// when the comparison succeeds, which can result in more efficient code on some
+ /// platforms. The return value is a result indicating whether the new value was
+ /// written and containing the previous value.
+ ///
+ /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. `success` describes the required ordering for the
+ /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+ /// `failure` describes the required ordering for the load operation that takes place when
+ /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")]
+ ///
+ /// let mut old = val.load(Ordering::Relaxed);
+ /// loop {
+ /// let new = old * 2;
+ /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+ /// Ok(_) => break,
+ /// Err(x) => old = x,
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[$stable_cxchg]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn compare_exchange_weak(&self,
+ current: $int_type,
+ new: $int_type,
+ success: Ordering,
+ failure: Ordering) -> Result<$int_type, $int_type> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
+ }
+ }
+
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// This operation wraps around on overflow.
+ ///
+ /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")]
+ /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 10);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_add(self.v.get(), val, order) }
+ }
+
+ /// Subtracts from the current value, returning the previous value.
+ ///
+ /// This operation wraps around on overflow.
+ ///
+ /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")]
+ /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 10);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_sub(self.v.get(), val, order) }
+ }
+
+ /// Bitwise "and" with the current value.
+ ///
+ /// Performs a bitwise "and" operation on the current value and the argument `val`, and
+ /// sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
+ /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Bitwise "nand" with the current value.
+ ///
+ /// Performs a bitwise "nand" operation on the current value and the argument `val`, and
+ /// sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")]
+ /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
+ /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
+ /// ```
+ #[inline]
+ #[$stable_nand]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_nand(self.v.get(), val, order) }
+ }
+
+ /// Bitwise "or" with the current value.
+ ///
+ /// Performs a bitwise "or" operation on the current value and the argument `val`, and
+ /// sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
+ /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Bitwise "xor" with the current value.
+ ///
+ /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
+ /// sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
+ /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
+ /// ```
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_xor(self.v.get(), val, order) }
+ }
+
+ /// Fetches the value, and applies a function to it that returns an optional
+ /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
+ /// `Err(previous_value)`.
+ ///
+ /// Note: This may call the function multiple times if the value has been changed from other threads in
+ /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
+ /// only once to the stored value.
+ ///
+ /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
+ /// The first describes the required ordering for when the operation finally succeeds while the second
+ /// describes the required ordering for loads. These correspond to the success and failure orderings of
+ #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
+ /// respectively.
+ ///
+ /// Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
+ /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
+ /// assert_eq!(x.load(Ordering::SeqCst), 9);
+ /// ```
+ #[inline]
+ #[stable(feature = "no_more_cas", since = "1.45.0")]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_update<F>(&self,
+ set_order: Ordering,
+ fetch_order: Ordering,
+ mut f: F) -> Result<$int_type, $int_type>
+ where F: FnMut($int_type) -> Option<$int_type> {
+ let mut prev = self.load(fetch_order);
+ while let Some(next) = f(prev) {
+ match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
+ x @ Ok(_) => return x,
+ Err(next_prev) => prev = next_prev
+ }
+ }
+ Err(prev)
+ }
+
+ /// Maximum with the current value.
+ ///
+ /// Finds the maximum of the current value and the argument `val`, and
+ /// sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
+ /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 42);
+ /// ```
+ ///
+ /// If you want to obtain the maximum value in one step, you can use the following:
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
+ /// let bar = 42;
+ /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
+ /// assert!(max_foo == 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_min_max", since = "1.45.0")]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { $max_fn(self.v.get(), val, order) }
+ }
+
+ /// Minimum with the current value.
+ ///
+ /// Finds the minimum of the current value and the argument `val`, and
+ /// sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note**: This method is only available on platforms that support atomic operations on
+ #[doc = concat!("[`", $s_int_type, "`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
+ /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
+ /// assert_eq!(foo.load(Ordering::Relaxed), 23);
+ /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
+ /// assert_eq!(foo.load(Ordering::Relaxed), 22);
+ /// ```
+ ///
+ /// If you want to obtain the minimum value in one step, you can use the following:
+ ///
+ /// ```
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
+ /// let bar = 12;
+ /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
+ /// assert_eq!(min_foo, 12);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_min_max", since = "1.45.0")]
+ #[$cfg_cas]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { $min_fn(self.v.get(), val, order) }
+ }
+
+ /// Returns a mutable pointer to the underlying integer.
+ ///
+ /// Doing non-atomic reads and writes on the resulting integer can be a data race.
+ /// This method is mostly useful for FFI, where the function signature may use
+ #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")]
+ ///
+ /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
+ /// atomic types work with interior mutability. All modifications of an atomic change the value
+ /// through a shared reference, and can do so safely as long as they use atomic operations. Any
+ /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
+ /// restriction: operations on it must be atomic.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (extern-declaration)
+ /// # fn main() {
+ #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
+ ///
+ /// extern "C" {
+ #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
+ /// }
+ ///
+ #[doc = concat!("let mut atomic = ", stringify!($atomic_type), "::new(1);")]
+ ///
+ // SAFETY: Safe as long as `my_atomic_op` is atomic.
+ /// unsafe {
+ /// my_atomic_op(atomic.as_mut_ptr());
+ /// }
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_mut_ptr",
+ reason = "recently added",
+ issue = "66893")]
+ pub fn as_mut_ptr(&self) -> *mut $int_type {
+ self.v.get()
+ }
+ }
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+atomic_int! {
+ cfg(target_has_atomic = "8"),
+ cfg(target_has_atomic_equal_alignment = "8"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicI8"),
+ "i8",
+ "",
+ atomic_min, atomic_max,
+ 1,
+ "AtomicI8::new(0)",
+ i8 AtomicI8 ATOMIC_I8_INIT
+}
+#[cfg(target_has_atomic_load_store = "8")]
+atomic_int! {
+ cfg(target_has_atomic = "8"),
+ cfg(target_has_atomic_equal_alignment = "8"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicU8"),
+ "u8",
+ "",
+ atomic_umin, atomic_umax,
+ 1,
+ "AtomicU8::new(0)",
+ u8 AtomicU8 ATOMIC_U8_INIT
+}
+#[cfg(target_has_atomic_load_store = "16")]
+atomic_int! {
+ cfg(target_has_atomic = "16"),
+ cfg(target_has_atomic_equal_alignment = "16"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicI16"),
+ "i16",
+ "",
+ atomic_min, atomic_max,
+ 2,
+ "AtomicI16::new(0)",
+ i16 AtomicI16 ATOMIC_I16_INIT
+}
+#[cfg(target_has_atomic_load_store = "16")]
+atomic_int! {
+ cfg(target_has_atomic = "16"),
+ cfg(target_has_atomic_equal_alignment = "16"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicU16"),
+ "u16",
+ "",
+ atomic_umin, atomic_umax,
+ 2,
+ "AtomicU16::new(0)",
+ u16 AtomicU16 ATOMIC_U16_INIT
+}
+#[cfg(target_has_atomic_load_store = "32")]
+atomic_int! {
+ cfg(target_has_atomic = "32"),
+ cfg(target_has_atomic_equal_alignment = "32"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicI32"),
+ "i32",
+ "",
+ atomic_min, atomic_max,
+ 4,
+ "AtomicI32::new(0)",
+ i32 AtomicI32 ATOMIC_I32_INIT
+}
+#[cfg(target_has_atomic_load_store = "32")]
+atomic_int! {
+ cfg(target_has_atomic = "32"),
+ cfg(target_has_atomic_equal_alignment = "32"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicU32"),
+ "u32",
+ "",
+ atomic_umin, atomic_umax,
+ 4,
+ "AtomicU32::new(0)",
+ u32 AtomicU32 ATOMIC_U32_INIT
+}
+#[cfg(target_has_atomic_load_store = "64")]
+atomic_int! {
+ cfg(target_has_atomic = "64"),
+ cfg(target_has_atomic_equal_alignment = "64"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicI64"),
+ "i64",
+ "",
+ atomic_min, atomic_max,
+ 8,
+ "AtomicI64::new(0)",
+ i64 AtomicI64 ATOMIC_I64_INIT
+}
+#[cfg(target_has_atomic_load_store = "64")]
+atomic_int! {
+ cfg(target_has_atomic = "64"),
+ cfg(target_has_atomic_equal_alignment = "64"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicU64"),
+ "u64",
+ "",
+ atomic_umin, atomic_umax,
+ 8,
+ "AtomicU64::new(0)",
+ u64 AtomicU64 ATOMIC_U64_INIT
+}
+#[cfg(target_has_atomic_load_store = "128")]
+atomic_int! {
+ cfg(target_has_atomic = "128"),
+ cfg(target_has_atomic_equal_alignment = "128"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicI128"),
+ "i128",
+ "#![feature(integer_atomics)]\n\n",
+ atomic_min, atomic_max,
+ 16,
+ "AtomicI128::new(0)",
+ i128 AtomicI128 ATOMIC_I128_INIT
+}
+#[cfg(target_has_atomic_load_store = "128")]
+atomic_int! {
+ cfg(target_has_atomic = "128"),
+ cfg(target_has_atomic_equal_alignment = "128"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "99069"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicU128"),
+ "u128",
+ "#![feature(integer_atomics)]\n\n",
+ atomic_umin, atomic_umax,
+ 16,
+ "AtomicU128::new(0)",
+ u128 AtomicU128 ATOMIC_U128_INIT
+}
+
+macro_rules! atomic_int_ptr_sized {
+ ( $($target_pointer_width:literal $align:literal)* ) => { $(
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[cfg(target_pointer_width = $target_pointer_width)]
+ atomic_int! {
+ cfg(target_has_atomic = "ptr"),
+ cfg(target_has_atomic_equal_alignment = "ptr"),
+ stable(feature = "rust1", since = "1.0.0"),
+ stable(feature = "extended_compare_and_swap", since = "1.10.0"),
+ stable(feature = "atomic_debug", since = "1.3.0"),
+ stable(feature = "atomic_access", since = "1.15.0"),
+ stable(feature = "atomic_from", since = "1.23.0"),
+ stable(feature = "atomic_nand", since = "1.27.0"),
+ rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
+ stable(feature = "rust1", since = "1.0.0"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicIsize"),
+ "isize",
+ "",
+ atomic_min, atomic_max,
+ $align,
+ "AtomicIsize::new(0)",
+ isize AtomicIsize ATOMIC_ISIZE_INIT
+ }
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[cfg(target_pointer_width = $target_pointer_width)]
+ atomic_int! {
+ cfg(target_has_atomic = "ptr"),
+ cfg(target_has_atomic_equal_alignment = "ptr"),
+ stable(feature = "rust1", since = "1.0.0"),
+ stable(feature = "extended_compare_and_swap", since = "1.10.0"),
+ stable(feature = "atomic_debug", since = "1.3.0"),
+ stable(feature = "atomic_access", since = "1.15.0"),
+ stable(feature = "atomic_from", since = "1.23.0"),
+ stable(feature = "atomic_nand", since = "1.27.0"),
+ rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
+ stable(feature = "rust1", since = "1.0.0"),
+ cfg_attr(not(test), rustc_diagnostic_item = "AtomicUsize"),
+ "usize",
+ "",
+ atomic_umin, atomic_umax,
+ $align,
+ "AtomicUsize::new(0)",
+ usize AtomicUsize ATOMIC_USIZE_INIT
+ }
+ )* };
+}
+
+atomic_int_ptr_sized! {
+ "16" 2
+ "32" 4
+ "64" 8
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+fn strongest_failure_ordering(order: Ordering) -> Ordering {
+ match order {
+ Release => Relaxed,
+ Relaxed => Relaxed,
+ SeqCst => SeqCst,
+ Acquire => Acquire,
+ AcqRel => Acquire,
+ }
+}
+
+#[inline]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
+ // SAFETY: the caller must uphold the safety contract for `atomic_store`.
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_store_relaxed(dst, val),
+ Release => intrinsics::atomic_store_release(dst, val),
+ SeqCst => intrinsics::atomic_store_seqcst(dst, val),
+ Acquire => panic!("there is no such thing as an acquire store"),
+ AcqRel => panic!("there is no such thing as an acquire-release store"),
+ }
+ }
+}
+
+#[inline]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_load`.
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_load_relaxed(dst),
+ Acquire => intrinsics::atomic_load_acquire(dst),
+ SeqCst => intrinsics::atomic_load_seqcst(dst),
+ Release => panic!("there is no such thing as a release load"),
+ AcqRel => panic!("there is no such thing as an acquire-release load"),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
+ Acquire => intrinsics::atomic_xchg_acquire(dst, val),
+ Release => intrinsics::atomic_xchg_release(dst, val),
+ AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_xchg_seqcst(dst, val),
+ }
+ }
+}
+
+/// Returns the previous value (like __sync_fetch_and_add).
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_add`.
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
+ Acquire => intrinsics::atomic_xadd_acquire(dst, val),
+ Release => intrinsics::atomic_xadd_release(dst, val),
+ AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_xadd_seqcst(dst, val),
+ }
+ }
+}
+
+/// Returns the previous value (like __sync_fetch_and_sub).
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
+ Acquire => intrinsics::atomic_xsub_acquire(dst, val),
+ Release => intrinsics::atomic_xsub_release(dst, val),
+ AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_xsub_seqcst(dst, val),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_compare_exchange<T: Copy>(
+ dst: *mut T,
+ old: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
+ let (val, ok) = unsafe {
+ match (success, failure) {
+ (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed_relaxed(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Relaxed, Acquire) => intrinsics::atomic_cxchg_relaxed_acquire(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Relaxed, SeqCst) => intrinsics::atomic_cxchg_relaxed_seqcst(dst, old, new),
+ (Acquire, Relaxed) => intrinsics::atomic_cxchg_acquire_relaxed(dst, old, new),
+ (Acquire, Acquire) => intrinsics::atomic_cxchg_acquire_acquire(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Acquire, SeqCst) => intrinsics::atomic_cxchg_acquire_seqcst(dst, old, new),
+ (Release, Relaxed) => intrinsics::atomic_cxchg_release_relaxed(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Release, Acquire) => intrinsics::atomic_cxchg_release_acquire(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Release, SeqCst) => intrinsics::atomic_cxchg_release_seqcst(dst, old, new),
+ (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_relaxed(dst, old, new),
+ (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel_acquire(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (AcqRel, SeqCst) => intrinsics::atomic_cxchg_acqrel_seqcst(dst, old, new),
+ (SeqCst, Relaxed) => intrinsics::atomic_cxchg_seqcst_relaxed(dst, old, new),
+ (SeqCst, Acquire) => intrinsics::atomic_cxchg_seqcst_acquire(dst, old, new),
+ (SeqCst, SeqCst) => intrinsics::atomic_cxchg_seqcst_seqcst(dst, old, new),
+ (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
+ (_, Release) => panic!("there is no such thing as a release failure ordering"),
+ #[cfg(bootstrap)]
+ _ => panic!("a failure ordering can't be stronger than a success ordering"),
+ }
+ };
+ if ok { Ok(val) } else { Err(val) }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_compare_exchange_weak<T: Copy>(
+ dst: *mut T,
+ old: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
+ let (val, ok) = unsafe {
+ match (success, failure) {
+ (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Relaxed, Acquire) => intrinsics::atomic_cxchgweak_relaxed_acquire(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Relaxed, SeqCst) => intrinsics::atomic_cxchgweak_relaxed_seqcst(dst, old, new),
+ (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, new),
+ (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acquire_acquire(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Acquire, SeqCst) => intrinsics::atomic_cxchgweak_acquire_seqcst(dst, old, new),
+ (Release, Relaxed) => intrinsics::atomic_cxchgweak_release_relaxed(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Release, Acquire) => intrinsics::atomic_cxchgweak_release_acquire(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (Release, SeqCst) => intrinsics::atomic_cxchgweak_release_seqcst(dst, old, new),
+ (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, new),
+ (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel_acquire(dst, old, new),
+ #[cfg(not(bootstrap))]
+ (AcqRel, SeqCst) => intrinsics::atomic_cxchgweak_acqrel_seqcst(dst, old, new),
+ (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, new),
+ (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_seqcst_acquire(dst, old, new),
+ (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak_seqcst_seqcst(dst, old, new),
+ (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
+ (_, Release) => panic!("there is no such thing as a release failure ordering"),
+ #[cfg(bootstrap)]
+ _ => panic!("a failure ordering can't be stronger than a success ordering"),
+ }
+ };
+ if ok { Ok(val) } else { Err(val) }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_and`
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_and_relaxed(dst, val),
+ Acquire => intrinsics::atomic_and_acquire(dst, val),
+ Release => intrinsics::atomic_and_release(dst, val),
+ AcqRel => intrinsics::atomic_and_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_and_seqcst(dst, val),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_nand`
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
+ Acquire => intrinsics::atomic_nand_acquire(dst, val),
+ Release => intrinsics::atomic_nand_release(dst, val),
+ AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_nand_seqcst(dst, val),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_or`
+ unsafe {
+ match order {
+ SeqCst => intrinsics::atomic_or_seqcst(dst, val),
+ Acquire => intrinsics::atomic_or_acquire(dst, val),
+ Release => intrinsics::atomic_or_release(dst, val),
+ AcqRel => intrinsics::atomic_or_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_or_relaxed(dst, val),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_xor`
+ unsafe {
+ match order {
+ SeqCst => intrinsics::atomic_xor_seqcst(dst, val),
+ Acquire => intrinsics::atomic_xor_acquire(dst, val),
+ Release => intrinsics::atomic_xor_release(dst, val),
+ AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
+ }
+ }
+}
+
+/// returns the max value (signed comparison)
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_max`
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_max_relaxed(dst, val),
+ Acquire => intrinsics::atomic_max_acquire(dst, val),
+ Release => intrinsics::atomic_max_release(dst, val),
+ AcqRel => intrinsics::atomic_max_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_max_seqcst(dst, val),
+ }
+ }
+}
+
+/// returns the min value (signed comparison)
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_min`
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_min_relaxed(dst, val),
+ Acquire => intrinsics::atomic_min_acquire(dst, val),
+ Release => intrinsics::atomic_min_release(dst, val),
+ AcqRel => intrinsics::atomic_min_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_min_seqcst(dst, val),
+ }
+ }
+}
+
+/// returns the max value (unsigned comparison)
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_umax`
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
+ Acquire => intrinsics::atomic_umax_acquire(dst, val),
+ Release => intrinsics::atomic_umax_release(dst, val),
+ AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_umax_seqcst(dst, val),
+ }
+ }
+}
+
+/// returns the min value (unsigned comparison)
+#[inline]
+#[cfg(target_has_atomic = "8")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_umin`
+ unsafe {
+ match order {
+ Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
+ Acquire => intrinsics::atomic_umin_acquire(dst, val),
+ Release => intrinsics::atomic_umin_release(dst, val),
+ AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
+ SeqCst => intrinsics::atomic_umin_seqcst(dst, val),
+ }
+ }
+}
+
+/// An atomic fence.
+///
+/// Depending on the specified order, a fence prevents the compiler and CPU from
+/// reordering certain types of memory operations around it.
+/// That creates synchronizes-with relationships between it and atomic operations
+/// or fences in other threads.
+///
+/// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
+/// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
+/// exist operations X and Y, both operating on some atomic object 'M' such
+/// that A is sequenced before X, Y is sequenced before B and Y observes
+/// the change to M. This provides a happens-before dependence between A and B.
+///
+/// ```text
+/// Thread 1 Thread 2
+///
+/// fence(Release); A --------------
+/// x.store(3, Relaxed); X --------- |
+/// | |
+/// | |
+/// -------------> Y if x.load(Relaxed) == 3 {
+/// |-------> B fence(Acquire);
+/// ...
+/// }
+/// ```
+///
+/// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
+/// with a fence.
+///
+/// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
+/// and [`Release`] semantics, participates in the global program order of the
+/// other [`SeqCst`] operations and/or fences.
+///
+/// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
+///
+/// # Panics
+///
+/// Panics if `order` is [`Relaxed`].
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::atomic::AtomicBool;
+/// use std::sync::atomic::fence;
+/// use std::sync::atomic::Ordering;
+///
+/// // A mutual exclusion primitive based on spinlock.
+/// pub struct Mutex {
+/// flag: AtomicBool,
+/// }
+///
+/// impl Mutex {
+/// pub fn new() -> Mutex {
+/// Mutex {
+/// flag: AtomicBool::new(false),
+/// }
+/// }
+///
+/// pub fn lock(&self) {
+/// // Wait until the old value is `false`.
+/// while self
+/// .flag
+/// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed)
+/// .is_err()
+/// {}
+/// // This fence synchronizes-with store in `unlock`.
+/// fence(Ordering::Acquire);
+/// }
+///
+/// pub fn unlock(&self) {
+/// self.flag.store(false, Ordering::Release);
+/// }
+/// }
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "fence"]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub fn fence(order: Ordering) {
+ // SAFETY: using an atomic fence is safe.
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_fence_acquire(),
+ Release => intrinsics::atomic_fence_release(),
+ AcqRel => intrinsics::atomic_fence_acqrel(),
+ SeqCst => intrinsics::atomic_fence_seqcst(),
+ Relaxed => panic!("there is no such thing as a relaxed fence"),
+ }
+ }
+}
+
+/// A compiler memory fence.
+///
+/// `compiler_fence` does not emit any machine code, but restricts the kinds
+/// of memory re-ordering the compiler is allowed to do. Specifically, depending on
+/// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
+/// or writes from before or after the call to the other side of the call to
+/// `compiler_fence`. Note that it does **not** prevent the *hardware*
+/// from doing such re-ordering. This is not a problem in a single-threaded,
+/// execution context, but when other threads may modify memory at the same
+/// time, stronger synchronization primitives such as [`fence`] are required.
+///
+/// The re-ordering prevented by the different ordering semantics are:
+///
+/// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
+/// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
+/// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
+/// - with [`AcqRel`], both of the above rules are enforced.
+///
+/// `compiler_fence` is generally only useful for preventing a thread from
+/// racing *with itself*. That is, if a given thread is executing one piece
+/// of code, and is then interrupted, and starts executing code elsewhere
+/// (while still in the same thread, and conceptually still on the same
+/// core). In traditional programs, this can only occur when a signal
+/// handler is registered. In more low-level code, such situations can also
+/// arise when handling interrupts, when implementing green threads with
+/// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
+/// discussion of [memory barriers].
+///
+/// # Panics
+///
+/// Panics if `order` is [`Relaxed`].
+///
+/// # Examples
+///
+/// Without `compiler_fence`, the `assert_eq!` in following code
+/// is *not* guaranteed to succeed, despite everything happening in a single thread.
+/// To see why, remember that the compiler is free to swap the stores to
+/// `IMPORTANT_VARIABLE` and `IS_READY` since they are both
+/// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
+/// after `IS_READY` is updated, then the signal handler will see
+/// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
+/// Using a `compiler_fence` remedies this situation.
+///
+/// ```
+/// use std::sync::atomic::{AtomicBool, AtomicUsize};
+/// use std::sync::atomic::Ordering;
+/// use std::sync::atomic::compiler_fence;
+///
+/// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
+/// static IS_READY: AtomicBool = AtomicBool::new(false);
+///
+/// fn main() {
+/// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
+/// // prevent earlier writes from being moved beyond this point
+/// compiler_fence(Ordering::Release);
+/// IS_READY.store(true, Ordering::Relaxed);
+/// }
+///
+/// fn signal_handler() {
+/// if IS_READY.load(Ordering::Relaxed) {
+/// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
+/// }
+/// }
+/// ```
+///
+/// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
+#[inline]
+#[stable(feature = "compiler_fences", since = "1.21.0")]
+#[rustc_diagnostic_item = "compiler_fence"]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+pub fn compiler_fence(order: Ordering) {
+ // SAFETY: using an atomic fence is safe.
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_singlethreadfence_acquire(),
+ Release => intrinsics::atomic_singlethreadfence_release(),
+ AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
+ SeqCst => intrinsics::atomic_singlethreadfence_seqcst(),
+ Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
+ }
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "atomic_debug", since = "1.3.0")]
+impl fmt::Debug for AtomicBool {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "atomic_debug", since = "1.3.0")]
+impl<T> fmt::Debug for AtomicPtr<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "atomic_pointer", since = "1.24.0")]
+impl<T> fmt::Pointer for AtomicPtr<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
+ }
+}
+
+/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
+///
+/// This function is deprecated in favor of [`hint::spin_loop`].
+///
+/// [`hint::spin_loop`]: crate::hint::spin_loop
+#[inline]
+#[stable(feature = "spin_loop_hint", since = "1.24.0")]
+#[deprecated(since = "1.51.0", note = "use hint::spin_loop instead")]
+pub fn spin_loop_hint() {
+ spin_loop()
+}
diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs
new file mode 100644
index 000000000..a7519ab5a
--- /dev/null
+++ b/library/core/src/sync/exclusive.rs
@@ -0,0 +1,173 @@
+//! Defines [`Exclusive`].
+
+use core::fmt;
+use core::future::Future;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// `Exclusive` provides only _mutable_ access, also referred to as _exclusive_
+/// access to the underlying value. It provides no _immutable_, or _shared_
+/// access to the underlying value.
+///
+/// While this may seem not very useful, it allows `Exclusive` to _unconditionally_
+/// implement [`Sync`]. Indeed, the safety requirements of `Sync` state that for `Exclusive`
+/// to be `Sync`, it must be sound to _share_ across threads, that is, it must be sound
+/// for `&Exclusive` to cross thread boundaries. By design, a `&Exclusive` has no API
+/// whatsoever, making it useless, thus harmless, thus memory safe.
+///
+/// Certain constructs like [`Future`]s can only be used with _exclusive_ access,
+/// and are often `Send` but not `Sync`, so `Exclusive` can be used as hint to the
+/// rust compiler that something is `Sync` in practice.
+///
+/// ## Examples
+/// Using a non-`Sync` future prevents the wrapping struct from being `Sync`
+/// ```compile_fail
+/// use core::cell::Cell;
+///
+/// async fn other() {}
+/// fn assert_sync<T: Sync>(t: T) {}
+/// struct State<F> {
+/// future: F
+/// }
+///
+/// assert_sync(State {
+/// future: async {
+/// let cell = Cell::new(1);
+/// let cell_ref = &cell;
+/// other().await;
+/// let value = cell_ref.get();
+/// }
+/// });
+/// ```
+///
+/// `Exclusive` ensures the struct is `Sync` without stripping the future of its
+/// functionality.
+/// ```
+/// #![feature(exclusive_wrapper)]
+/// use core::cell::Cell;
+/// use core::sync::Exclusive;
+///
+/// async fn other() {}
+/// fn assert_sync<T: Sync>(t: T) {}
+/// struct State<F> {
+/// future: Exclusive<F>
+/// }
+///
+/// assert_sync(State {
+/// future: Exclusive::new(async {
+/// let cell = Cell::new(1);
+/// let cell_ref = &cell;
+/// other().await;
+/// let value = cell_ref.get();
+/// })
+/// });
+/// ```
+///
+/// ## Parallels with a mutex
+/// In some sense, `Exclusive` can be thought of as a _compile-time_ version of
+/// a mutex, as the borrow-checker guarantees that only one `&mut` can exist
+/// for any value. This is a parallel with the fact that
+/// `&` and `&mut` references together can be thought of as a _compile-time_
+/// version of a read-write lock.
+///
+///
+/// [`Sync`]: core::marker::Sync
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+#[doc(alias = "SyncWrapper")]
+#[doc(alias = "SyncCell")]
+#[doc(alias = "Unique")]
+// `Exclusive` can't have `PartialOrd`, `Clone`, etc. impls as they would
+// use `&` access to the inner value, violating the `Sync` impl's safety
+// requirements.
+#[derive(Default)]
+#[repr(transparent)]
+pub struct Exclusive<T: ?Sized> {
+ inner: T,
+}
+
+// See `Exclusive`'s docs for justification.
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+unsafe impl<T: ?Sized> Sync for Exclusive<T> {}
+
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+impl<T: ?Sized> fmt::Debug for Exclusive<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
+ f.debug_struct("Exclusive").finish_non_exhaustive()
+ }
+}
+
+impl<T: Sized> Exclusive<T> {
+ /// Wrap a value in an `Exclusive`
+ #[unstable(feature = "exclusive_wrapper", issue = "98407")]
+ #[must_use]
+ pub const fn new(t: T) -> Self {
+ Self { inner: t }
+ }
+
+ /// Unwrap the value contained in the `Exclusive`
+ #[unstable(feature = "exclusive_wrapper", issue = "98407")]
+ #[must_use]
+ pub const fn into_inner(self) -> T {
+ self.inner
+ }
+}
+
+impl<T: ?Sized> Exclusive<T> {
+ /// Get exclusive access to the underlying value.
+ #[unstable(feature = "exclusive_wrapper", issue = "98407")]
+ #[must_use]
+ pub const fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+
+ /// Get pinned exclusive access to the underlying value.
+ ///
+ /// `Exclusive` is considered to _structurally pin_ the underlying
+ /// value, which means _unpinned_ `Exclusive`s can produce _unpinned_
+ /// access to the underlying value, but _pinned_ `Exclusive`s only
+ /// produce _pinned_ access to the underlying value.
+ #[unstable(feature = "exclusive_wrapper", issue = "98407")]
+ #[must_use]
+ pub const fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> {
+ // SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned
+ // `Pin::map_unchecked_mut` is not const, so we do this conversion manually
+ unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().inner) }
+ }
+
+ /// Build a _mutable_ references to an `Exclusive<T>` from
+ /// a _mutable_ reference to a `T`. This allows you to skip
+ /// building an `Exclusive` with [`Exclusive::new`].
+ #[unstable(feature = "exclusive_wrapper", issue = "98407")]
+ #[must_use]
+ pub const fn from_mut(r: &'_ mut T) -> &'_ mut Exclusive<T> {
+ // SAFETY: repr is ≥ C, so refs have the same layout; and `Exclusive` properties are `&mut`-agnostic
+ unsafe { &mut *(r as *mut T as *mut Exclusive<T>) }
+ }
+
+ /// Build a _pinned mutable_ references to an `Exclusive<T>` from
+ /// a _pinned mutable_ reference to a `T`. This allows you to skip
+ /// building an `Exclusive` with [`Exclusive::new`].
+ #[unstable(feature = "exclusive_wrapper", issue = "98407")]
+ #[must_use]
+ pub const fn from_pin_mut(r: Pin<&'_ mut T>) -> Pin<&'_ mut Exclusive<T>> {
+ // SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned
+ // `Pin::map_unchecked_mut` is not const, so we do this conversion manually
+ unsafe { Pin::new_unchecked(Self::from_mut(r.get_unchecked_mut())) }
+ }
+}
+
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+impl<T> From<T> for Exclusive<T> {
+ fn from(t: T) -> Self {
+ Self::new(t)
+ }
+}
+
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+impl<T: Future + ?Sized> Future for Exclusive<T> {
+ type Output = T::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.get_pin_mut().poll(cx)
+ }
+}
diff --git a/library/core/src/sync/mod.rs b/library/core/src/sync/mod.rs
new file mode 100644
index 000000000..4365e4cb2
--- /dev/null
+++ b/library/core/src/sync/mod.rs
@@ -0,0 +1,8 @@
+//! Synchronization primitives
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+pub mod atomic;
+mod exclusive;
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+pub use exclusive::Exclusive;
diff --git a/library/core/src/task/mod.rs b/library/core/src/task/mod.rs
new file mode 100644
index 000000000..c5f89b9a2
--- /dev/null
+++ b/library/core/src/task/mod.rs
@@ -0,0 +1,17 @@
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+//! Types and Traits for working with asynchronous tasks.
+
+mod poll;
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub use self::poll::Poll;
+
+mod wake;
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub use self::wake::{Context, RawWaker, RawWakerVTable, Waker};
+
+mod ready;
+#[stable(feature = "ready_macro", since = "1.64.0")]
+pub use ready::ready;
+#[unstable(feature = "poll_ready", issue = "89780")]
+pub use ready::Ready;
diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs
new file mode 100644
index 000000000..41f0a25db
--- /dev/null
+++ b/library/core/src/task/poll.rs
@@ -0,0 +1,320 @@
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+use crate::convert;
+use crate::ops::{self, ControlFlow};
+use crate::result::Result;
+use crate::task::Ready;
+
+/// Indicates whether a value is available or if the current task has been
+/// scheduled to receive a wakeup instead.
+#[must_use = "this `Poll` may be a `Pending` variant, which should be handled"]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub enum Poll<T> {
+ /// Represents that a value is immediately ready.
+ #[lang = "Ready"]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ Ready(#[stable(feature = "futures_api", since = "1.36.0")] T),
+
+ /// Represents that a value is not ready yet.
+ ///
+ /// When a function returns `Pending`, the function *must* also
+ /// ensure that the current task is scheduled to be awoken when
+ /// progress can be made.
+ #[lang = "Pending"]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ Pending,
+}
+
+impl<T> Poll<T> {
+ /// Maps a `Poll<T>` to `Poll<U>` by applying a function to a contained value.
+ ///
+ /// # Examples
+ ///
+ /// Converts a <code>Poll<[String]></code> into a <code>Poll<[usize]></code>, consuming
+ /// the original:
+ ///
+ /// [String]: ../../std/string/struct.String.html "String"
+ /// ```
+ /// # use core::task::Poll;
+ /// let poll_some_string = Poll::Ready(String::from("Hello, World!"));
+ /// // `Poll::map` takes self *by value*, consuming `poll_some_string`
+ /// let poll_some_len = poll_some_string.map(|s| s.len());
+ ///
+ /// assert_eq!(poll_some_len, Poll::Ready(13));
+ /// ```
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn map<U, F>(self, f: F) -> Poll<U>
+ where
+ F: FnOnce(T) -> U,
+ {
+ match self {
+ Poll::Ready(t) => Poll::Ready(f(t)),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+
+ /// Returns `true` if the poll is a [`Poll::Ready`] value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::task::Poll;
+ /// let x: Poll<u32> = Poll::Ready(2);
+ /// assert_eq!(x.is_ready(), true);
+ ///
+ /// let x: Poll<u32> = Poll::Pending;
+ /// assert_eq!(x.is_ready(), false);
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_poll", since = "1.49.0")]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub const fn is_ready(&self) -> bool {
+ matches!(*self, Poll::Ready(_))
+ }
+
+ /// Returns `true` if the poll is a [`Pending`] value.
+ ///
+ /// [`Pending`]: Poll::Pending
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::task::Poll;
+ /// let x: Poll<u32> = Poll::Ready(2);
+ /// assert_eq!(x.is_pending(), false);
+ ///
+ /// let x: Poll<u32> = Poll::Pending;
+ /// assert_eq!(x.is_pending(), true);
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_poll", since = "1.49.0")]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub const fn is_pending(&self) -> bool {
+ !self.is_ready()
+ }
+
+ /// Extracts the successful type of a [`Poll<T>`].
+ ///
+ /// When combined with the `?` operator, this function will
+ /// propagate any [`Poll::Pending`] values to the caller, and
+ /// extract the `T` from [`Poll::Ready`].
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(poll_ready)]
+ ///
+ /// use std::task::{Context, Poll};
+ /// use std::future::{self, Future};
+ /// use std::pin::Pin;
+ ///
+ /// pub fn do_poll(cx: &mut Context<'_>) -> Poll<()> {
+ /// let mut fut = future::ready(42);
+ /// let fut = Pin::new(&mut fut);
+ ///
+ /// let num = fut.poll(cx).ready()?;
+ /// # drop(num);
+ /// // ... use num
+ ///
+ /// Poll::Ready(())
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "poll_ready", issue = "89780")]
+ pub fn ready(self) -> Ready<T> {
+ Ready(self)
+ }
+}
+
+impl<T, E> Poll<Result<T, E>> {
+ /// Maps a `Poll<Result<T, E>>` to `Poll<Result<U, E>>` by applying a
+ /// function to a contained `Poll::Ready(Ok)` value, leaving all other
+ /// variants untouched.
+ ///
+ /// This function can be used to compose the results of two functions.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::task::Poll;
+ /// let res: Poll<Result<u8, _>> = Poll::Ready("12".parse());
+ /// let squared = res.map_ok(|n| n * n);
+ /// assert_eq!(squared, Poll::Ready(Ok(144)));
+ /// ```
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn map_ok<U, F>(self, f: F) -> Poll<Result<U, E>>
+ where
+ F: FnOnce(T) -> U,
+ {
+ match self {
+ Poll::Ready(Ok(t)) => Poll::Ready(Ok(f(t))),
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+
+ /// Maps a `Poll::Ready<Result<T, E>>` to `Poll::Ready<Result<T, F>>` by
+ /// applying a function to a contained `Poll::Ready(Err)` value, leaving all other
+ /// variants untouched.
+ ///
+ /// This function can be used to pass through a successful result while handling
+ /// an error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::task::Poll;
+ /// let res: Poll<Result<u8, _>> = Poll::Ready("oops".parse());
+ /// let res = res.map_err(|_| 0_u8);
+ /// assert_eq!(res, Poll::Ready(Err(0)));
+ /// ```
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn map_err<U, F>(self, f: F) -> Poll<Result<T, U>>
+ where
+ F: FnOnce(E) -> U,
+ {
+ match self {
+ Poll::Ready(Ok(t)) => Poll::Ready(Ok(t)),
+ Poll::Ready(Err(e)) => Poll::Ready(Err(f(e))),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+}
+
+impl<T, E> Poll<Option<Result<T, E>>> {
+ /// Maps a `Poll<Option<Result<T, E>>>` to `Poll<Option<Result<U, E>>>` by
+ /// applying a function to a contained `Poll::Ready(Some(Ok))` value,
+ /// leaving all other variants untouched.
+ ///
+ /// This function can be used to compose the results of two functions.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::task::Poll;
+ /// let res: Poll<Option<Result<u8, _>>> = Poll::Ready(Some("12".parse()));
+ /// let squared = res.map_ok(|n| n * n);
+ /// assert_eq!(squared, Poll::Ready(Some(Ok(144))));
+ /// ```
+ #[stable(feature = "poll_map", since = "1.51.0")]
+ pub fn map_ok<U, F>(self, f: F) -> Poll<Option<Result<U, E>>>
+ where
+ F: FnOnce(T) -> U,
+ {
+ match self {
+ Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(Ok(f(t)))),
+ Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
+ Poll::Ready(None) => Poll::Ready(None),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+
+ /// Maps a `Poll::Ready<Option<Result<T, E>>>` to
+ /// `Poll::Ready<Option<Result<T, F>>>` by applying a function to a
+ /// contained `Poll::Ready(Some(Err))` value, leaving all other variants
+ /// untouched.
+ ///
+ /// This function can be used to pass through a successful result while handling
+ /// an error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::task::Poll;
+ /// let res: Poll<Option<Result<u8, _>>> = Poll::Ready(Some("oops".parse()));
+ /// let res = res.map_err(|_| 0_u8);
+ /// assert_eq!(res, Poll::Ready(Some(Err(0))));
+ /// ```
+ #[stable(feature = "poll_map", since = "1.51.0")]
+ pub fn map_err<U, F>(self, f: F) -> Poll<Option<Result<T, U>>>
+ where
+ F: FnOnce(E) -> U,
+ {
+ match self {
+ Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(Ok(t))),
+ Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(f(e)))),
+ Poll::Ready(None) => Poll::Ready(None),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
+impl<T> const From<T> for Poll<T> {
+ /// Moves the value into a [`Poll::Ready`] to make a `Poll<T>`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use core::task::Poll;
+ /// assert_eq!(Poll::from(true), Poll::Ready(true));
+ /// ```
+ fn from(t: T) -> Poll<T> {
+ Poll::Ready(t)
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+impl<T, E> ops::Try for Poll<Result<T, E>> {
+ type Output = Poll<T>;
+ type Residual = Result<convert::Infallible, E>;
+
+ #[inline]
+ fn from_output(c: Self::Output) -> Self {
+ c.map(Ok)
+ }
+
+ #[inline]
+ fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
+ match self {
+ Poll::Ready(Ok(x)) => ControlFlow::Continue(Poll::Ready(x)),
+ Poll::Ready(Err(e)) => ControlFlow::Break(Err(e)),
+ Poll::Pending => ControlFlow::Continue(Poll::Pending),
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+impl<T, E, F: From<E>> ops::FromResidual<Result<convert::Infallible, E>> for Poll<Result<T, F>> {
+ #[inline]
+ fn from_residual(x: Result<convert::Infallible, E>) -> Self {
+ match x {
+ Err(e) => Poll::Ready(Err(From::from(e))),
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+impl<T, E> ops::Try for Poll<Option<Result<T, E>>> {
+ type Output = Poll<Option<T>>;
+ type Residual = Result<convert::Infallible, E>;
+
+ #[inline]
+ fn from_output(c: Self::Output) -> Self {
+ c.map(|x| x.map(Ok))
+ }
+
+ #[inline]
+ fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
+ match self {
+ Poll::Ready(Some(Ok(x))) => ControlFlow::Continue(Poll::Ready(Some(x))),
+ Poll::Ready(Some(Err(e))) => ControlFlow::Break(Err(e)),
+ Poll::Ready(None) => ControlFlow::Continue(Poll::Ready(None)),
+ Poll::Pending => ControlFlow::Continue(Poll::Pending),
+ }
+ }
+}
+
+#[unstable(feature = "try_trait_v2", issue = "84277")]
+impl<T, E, F: From<E>> ops::FromResidual<Result<convert::Infallible, E>>
+ for Poll<Option<Result<T, F>>>
+{
+ #[inline]
+ fn from_residual(x: Result<convert::Infallible, E>) -> Self {
+ match x {
+ Err(e) => Poll::Ready(Some(Err(From::from(e)))),
+ }
+ }
+}
diff --git a/library/core/src/task/ready.rs b/library/core/src/task/ready.rs
new file mode 100644
index 000000000..b1daf545f
--- /dev/null
+++ b/library/core/src/task/ready.rs
@@ -0,0 +1,114 @@
+use core::convert;
+use core::fmt;
+use core::ops::{ControlFlow, FromResidual, Try};
+use core::task::Poll;
+
+/// Extracts the successful type of a [`Poll<T>`].
+///
+/// This macro bakes in propagation of [`Pending`] signals by returning early.
+///
+/// [`Poll<T>`]: crate::task::Poll
+/// [`Pending`]: crate::task::Poll::Pending
+///
+/// # Examples
+///
+/// ```
+/// use std::task::{ready, Context, Poll};
+/// use std::future::{self, Future};
+/// use std::pin::Pin;
+///
+/// pub fn do_poll(cx: &mut Context<'_>) -> Poll<()> {
+/// let mut fut = future::ready(42);
+/// let fut = Pin::new(&mut fut);
+///
+/// let num = ready!(fut.poll(cx));
+/// # drop(num);
+/// // ... use num
+///
+/// Poll::Ready(())
+/// }
+/// ```
+///
+/// The `ready!` call expands to:
+///
+/// ```
+/// # use std::task::{Context, Poll};
+/// # use std::future::{self, Future};
+/// # use std::pin::Pin;
+/// #
+/// # pub fn do_poll(cx: &mut Context<'_>) -> Poll<()> {
+/// # let mut fut = future::ready(42);
+/// # let fut = Pin::new(&mut fut);
+/// #
+/// let num = match fut.poll(cx) {
+/// Poll::Ready(t) => t,
+/// Poll::Pending => return Poll::Pending,
+/// };
+/// # drop(num);
+/// # // ... use num
+/// #
+/// # Poll::Ready(())
+/// # }
+/// ```
+#[stable(feature = "ready_macro", since = "1.64.0")]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro ready($e:expr) {
+ match $e {
+ $crate::task::Poll::Ready(t) => t,
+ $crate::task::Poll::Pending => {
+ return $crate::task::Poll::Pending;
+ }
+ }
+}
+
+/// Extracts the successful type of a [`Poll<T>`].
+///
+/// See [`Poll::ready`] for details.
+#[unstable(feature = "poll_ready", issue = "89780")]
+pub struct Ready<T>(pub(crate) Poll<T>);
+
+#[unstable(feature = "poll_ready", issue = "89780")]
+impl<T> Try for Ready<T> {
+ type Output = T;
+ type Residual = Ready<convert::Infallible>;
+
+ #[inline]
+ fn from_output(output: Self::Output) -> Self {
+ Ready(Poll::Ready(output))
+ }
+
+ #[inline]
+ fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
+ match self.0 {
+ Poll::Ready(v) => ControlFlow::Continue(v),
+ Poll::Pending => ControlFlow::Break(Ready(Poll::Pending)),
+ }
+ }
+}
+
+#[unstable(feature = "poll_ready", issue = "89780")]
+impl<T> FromResidual for Ready<T> {
+ #[inline]
+ fn from_residual(residual: Ready<convert::Infallible>) -> Self {
+ match residual.0 {
+ Poll::Pending => Ready(Poll::Pending),
+ }
+ }
+}
+
+#[unstable(feature = "poll_ready", issue = "89780")]
+impl<T> FromResidual<Ready<convert::Infallible>> for Poll<T> {
+ #[inline]
+ fn from_residual(residual: Ready<convert::Infallible>) -> Self {
+ match residual.0 {
+ Poll::Pending => Poll::Pending,
+ }
+ }
+}
+
+#[unstable(feature = "poll_ready", issue = "89780")]
+impl<T> fmt::Debug for Ready<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Ready").finish()
+ }
+}
diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs
new file mode 100644
index 000000000..87d4a25af
--- /dev/null
+++ b/library/core/src/task/wake.rs
@@ -0,0 +1,334 @@
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+use crate::fmt;
+use crate::marker::{PhantomData, Unpin};
+
+/// A `RawWaker` allows the implementor of a task executor to create a [`Waker`]
+/// which provides customized wakeup behavior.
+///
+/// [vtable]: https://en.wikipedia.org/wiki/Virtual_method_table
+///
+/// It consists of a data pointer and a [virtual function pointer table (vtable)][vtable]
+/// that customizes the behavior of the `RawWaker`.
+#[derive(PartialEq, Debug)]
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub struct RawWaker {
+ /// A data pointer, which can be used to store arbitrary data as required
+ /// by the executor. This could be e.g. a type-erased pointer to an `Arc`
+ /// that is associated with the task.
+ /// The value of this field gets passed to all functions that are part of
+ /// the vtable as the first parameter.
+ data: *const (),
+ /// Virtual function pointer table that customizes the behavior of this waker.
+ vtable: &'static RawWakerVTable,
+}
+
+impl RawWaker {
+ /// Creates a new `RawWaker` from the provided `data` pointer and `vtable`.
+ ///
+ /// The `data` pointer can be used to store arbitrary data as required
+ /// by the executor. This could be e.g. a type-erased pointer to an `Arc`
+ /// that is associated with the task.
+ /// The value of this pointer will get passed to all functions that are part
+ /// of the `vtable` as the first parameter.
+ ///
+ /// The `vtable` customizes the behavior of a `Waker` which gets created
+ /// from a `RawWaker`. For each operation on the `Waker`, the associated
+ /// function in the `vtable` of the underlying `RawWaker` will be called.
+ #[inline]
+ #[rustc_promotable]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ #[rustc_const_stable(feature = "futures_api", since = "1.36.0")]
+ #[must_use]
+ pub const fn new(data: *const (), vtable: &'static RawWakerVTable) -> RawWaker {
+ RawWaker { data, vtable }
+ }
+
+ /// Get the `data` pointer used to create this `RawWaker`.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "waker_getters", issue = "87021")]
+ pub fn data(&self) -> *const () {
+ self.data
+ }
+
+ /// Get the `vtable` pointer used to create this `RawWaker`.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "waker_getters", issue = "87021")]
+ pub fn vtable(&self) -> &'static RawWakerVTable {
+ self.vtable
+ }
+}
+
+/// A virtual function pointer table (vtable) that specifies the behavior
+/// of a [`RawWaker`].
+///
+/// The pointer passed to all functions inside the vtable is the `data` pointer
+/// from the enclosing [`RawWaker`] object.
+///
+/// The functions inside this struct are only intended to be called on the `data`
+/// pointer of a properly constructed [`RawWaker`] object from inside the
+/// [`RawWaker`] implementation. Calling one of the contained functions using
+/// any other `data` pointer will cause undefined behavior.
+#[stable(feature = "futures_api", since = "1.36.0")]
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct RawWakerVTable {
+ /// This function will be called when the [`RawWaker`] gets cloned, e.g. when
+ /// the [`Waker`] in which the [`RawWaker`] is stored gets cloned.
+ ///
+ /// The implementation of this function must retain all resources that are
+ /// required for this additional instance of a [`RawWaker`] and associated
+ /// task. Calling `wake` on the resulting [`RawWaker`] should result in a wakeup
+ /// of the same task that would have been awoken by the original [`RawWaker`].
+ clone: unsafe fn(*const ()) -> RawWaker,
+
+ /// This function will be called when `wake` is called on the [`Waker`].
+ /// It must wake up the task associated with this [`RawWaker`].
+ ///
+ /// The implementation of this function must make sure to release any
+ /// resources that are associated with this instance of a [`RawWaker`] and
+ /// associated task.
+ wake: unsafe fn(*const ()),
+
+ /// This function will be called when `wake_by_ref` is called on the [`Waker`].
+ /// It must wake up the task associated with this [`RawWaker`].
+ ///
+ /// This function is similar to `wake`, but must not consume the provided data
+ /// pointer.
+ wake_by_ref: unsafe fn(*const ()),
+
+ /// This function gets called when a [`RawWaker`] gets dropped.
+ ///
+ /// The implementation of this function must make sure to release any
+ /// resources that are associated with this instance of a [`RawWaker`] and
+ /// associated task.
+ drop: unsafe fn(*const ()),
+}
+
+impl RawWakerVTable {
+ /// Creates a new `RawWakerVTable` from the provided `clone`, `wake`,
+ /// `wake_by_ref`, and `drop` functions.
+ ///
+ /// # `clone`
+ ///
+ /// This function will be called when the [`RawWaker`] gets cloned, e.g. when
+ /// the [`Waker`] in which the [`RawWaker`] is stored gets cloned.
+ ///
+ /// The implementation of this function must retain all resources that are
+ /// required for this additional instance of a [`RawWaker`] and associated
+ /// task. Calling `wake` on the resulting [`RawWaker`] should result in a wakeup
+ /// of the same task that would have been awoken by the original [`RawWaker`].
+ ///
+ /// # `wake`
+ ///
+ /// This function will be called when `wake` is called on the [`Waker`].
+ /// It must wake up the task associated with this [`RawWaker`].
+ ///
+ /// The implementation of this function must make sure to release any
+ /// resources that are associated with this instance of a [`RawWaker`] and
+ /// associated task.
+ ///
+ /// # `wake_by_ref`
+ ///
+ /// This function will be called when `wake_by_ref` is called on the [`Waker`].
+ /// It must wake up the task associated with this [`RawWaker`].
+ ///
+ /// This function is similar to `wake`, but must not consume the provided data
+ /// pointer.
+ ///
+ /// # `drop`
+ ///
+ /// This function gets called when a [`RawWaker`] gets dropped.
+ ///
+ /// The implementation of this function must make sure to release any
+ /// resources that are associated with this instance of a [`RawWaker`] and
+ /// associated task.
+ #[rustc_promotable]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ #[rustc_const_stable(feature = "futures_api", since = "1.36.0")]
+ pub const fn new(
+ clone: unsafe fn(*const ()) -> RawWaker,
+ wake: unsafe fn(*const ()),
+ wake_by_ref: unsafe fn(*const ()),
+ drop: unsafe fn(*const ()),
+ ) -> Self {
+ Self { clone, wake, wake_by_ref, drop }
+ }
+}
+
+/// The `Context` of an asynchronous task.
+///
+/// Currently, `Context` only serves to provide access to a `&Waker`
+/// which can be used to wake the current task.
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub struct Context<'a> {
+ waker: &'a Waker,
+ // Ensure we future-proof against variance changes by forcing
+ // the lifetime to be invariant (argument-position lifetimes
+ // are contravariant while return-position lifetimes are
+ // covariant).
+ _marker: PhantomData<fn(&'a ()) -> &'a ()>,
+}
+
+impl<'a> Context<'a> {
+ /// Create a new `Context` from a `&Waker`.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ #[must_use]
+ #[inline]
+ pub fn from_waker(waker: &'a Waker) -> Self {
+ Context { waker, _marker: PhantomData }
+ }
+
+ /// Returns a reference to the `Waker` for the current task.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ #[must_use]
+ #[inline]
+ pub fn waker(&self) -> &'a Waker {
+ &self.waker
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl fmt::Debug for Context<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Context").field("waker", &self.waker).finish()
+ }
+}
+
+/// A `Waker` is a handle for waking up a task by notifying its executor that it
+/// is ready to be run.
+///
+/// This handle encapsulates a [`RawWaker`] instance, which defines the
+/// executor-specific wakeup behavior.
+///
+/// Implements [`Clone`], [`Send`], and [`Sync`].
+#[repr(transparent)]
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub struct Waker {
+ waker: RawWaker,
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl Unpin for Waker {}
+#[stable(feature = "futures_api", since = "1.36.0")]
+unsafe impl Send for Waker {}
+#[stable(feature = "futures_api", since = "1.36.0")]
+unsafe impl Sync for Waker {}
+
+impl Waker {
+ /// Wake up the task associated with this `Waker`.
+ ///
+ /// As long as the runtime keeps running and the task is not finished, it is
+ /// guaranteed that each invocation of `wake` (or `wake_by_ref`) will be followed
+ /// by at least one `poll` of the task to which this `Waker` belongs. This makes
+ /// it possible to temporarily yield to other tasks while running potentially
+ /// unbounded processing loops.
+ ///
+ /// Note that the above implies that multiple wake-ups may be coalesced into a
+ /// single `poll` invocation by the runtime.
+ ///
+ /// Also note that yielding to competing tasks is not guaranteed: it is the
+ /// executor’s choice which task to run and the executor may choose to run the
+ /// current task again.
+ #[inline]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn wake(self) {
+ // The actual wakeup call is delegated through a virtual function call
+ // to the implementation which is defined by the executor.
+ let wake = self.waker.vtable.wake;
+ let data = self.waker.data;
+
+ // Don't call `drop` -- the waker will be consumed by `wake`.
+ crate::mem::forget(self);
+
+ // SAFETY: This is safe because `Waker::from_raw` is the only way
+ // to initialize `wake` and `data` requiring the user to acknowledge
+ // that the contract of `RawWaker` is upheld.
+ unsafe { (wake)(data) };
+ }
+
+ /// Wake up the task associated with this `Waker` without consuming the `Waker`.
+ ///
+ /// This is similar to `wake`, but may be slightly less efficient in the case
+ /// where an owned `Waker` is available. This method should be preferred to
+ /// calling `waker.clone().wake()`.
+ #[inline]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn wake_by_ref(&self) {
+ // The actual wakeup call is delegated through a virtual function call
+ // to the implementation which is defined by the executor.
+
+ // SAFETY: see `wake`
+ unsafe { (self.waker.vtable.wake_by_ref)(self.waker.data) }
+ }
+
+ /// Returns `true` if this `Waker` and another `Waker` have awoken the same task.
+ ///
+ /// This function works on a best-effort basis, and may return false even
+ /// when the `Waker`s would awaken the same task. However, if this function
+ /// returns `true`, it is guaranteed that the `Waker`s will awaken the same task.
+ ///
+ /// This function is primarily used for optimization purposes.
+ #[inline]
+ #[must_use]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn will_wake(&self, other: &Waker) -> bool {
+ self.waker == other.waker
+ }
+
+ /// Creates a new `Waker` from [`RawWaker`].
+ ///
+ /// The behavior of the returned `Waker` is undefined if the contract defined
+ /// in [`RawWaker`]'s and [`RawWakerVTable`]'s documentation is not upheld.
+ /// Therefore this method is unsafe.
+ #[inline]
+ #[must_use]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub unsafe fn from_raw(waker: RawWaker) -> Waker {
+ Waker { waker }
+ }
+
+ /// Get a reference to the underlying [`RawWaker`].
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "waker_getters", issue = "87021")]
+ pub fn as_raw(&self) -> &RawWaker {
+ &self.waker
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl Clone for Waker {
+ #[inline]
+ fn clone(&self) -> Self {
+ Waker {
+ // SAFETY: This is safe because `Waker::from_raw` is the only way
+ // to initialize `clone` and `data` requiring the user to acknowledge
+ // that the contract of [`RawWaker`] is upheld.
+ waker: unsafe { (self.waker.vtable.clone)(self.waker.data) },
+ }
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl Drop for Waker {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: This is safe because `Waker::from_raw` is the only way
+ // to initialize `drop` and `data` requiring the user to acknowledge
+ // that the contract of `RawWaker` is upheld.
+ unsafe { (self.waker.vtable.drop)(self.waker.data) }
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl fmt::Debug for Waker {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let vtable_ptr = self.waker.vtable as *const RawWakerVTable;
+ f.debug_struct("Waker")
+ .field("data", &self.waker.data)
+ .field("vtable", &vtable_ptr)
+ .finish()
+ }
+}
diff --git a/library/core/src/time.rs b/library/core/src/time.rs
new file mode 100644
index 000000000..756f1a166
--- /dev/null
+++ b/library/core/src/time.rs
@@ -0,0 +1,1480 @@
+#![stable(feature = "duration_core", since = "1.25.0")]
+
+//! Temporal quantification.
+//!
+//! # Examples:
+//!
+//! There are multiple ways to create a new [`Duration`]:
+//!
+//! ```
+//! # use std::time::Duration;
+//! let five_seconds = Duration::from_secs(5);
+//! assert_eq!(five_seconds, Duration::from_millis(5_000));
+//! assert_eq!(five_seconds, Duration::from_micros(5_000_000));
+//! assert_eq!(five_seconds, Duration::from_nanos(5_000_000_000));
+//!
+//! let ten_seconds = Duration::from_secs(10);
+//! let seven_nanos = Duration::from_nanos(7);
+//! let total = ten_seconds + seven_nanos;
+//! assert_eq!(total, Duration::new(10, 7));
+//! ```
+
+use crate::fmt;
+use crate::iter::Sum;
+use crate::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign};
+
+const NANOS_PER_SEC: u32 = 1_000_000_000;
+const NANOS_PER_MILLI: u32 = 1_000_000;
+const NANOS_PER_MICRO: u32 = 1_000;
+const MILLIS_PER_SEC: u64 = 1_000;
+const MICROS_PER_SEC: u64 = 1_000_000;
+
+/// A `Duration` type to represent a span of time, typically used for system
+/// timeouts.
+///
+/// Each `Duration` is composed of a whole number of seconds and a fractional part
+/// represented in nanoseconds. If the underlying system does not support
+/// nanosecond-level precision, APIs binding a system timeout will typically round up
+/// the number of nanoseconds.
+///
+/// [`Duration`]s implement many common traits, including [`Add`], [`Sub`], and other
+/// [`ops`] traits. It implements [`Default`] by returning a zero-length `Duration`.
+///
+/// [`ops`]: crate::ops
+///
+/// # Examples
+///
+/// ```
+/// use std::time::Duration;
+///
+/// let five_seconds = Duration::new(5, 0);
+/// let five_seconds_and_five_nanos = five_seconds + Duration::new(0, 5);
+///
+/// assert_eq!(five_seconds_and_five_nanos.as_secs(), 5);
+/// assert_eq!(five_seconds_and_five_nanos.subsec_nanos(), 5);
+///
+/// let ten_millis = Duration::from_millis(10);
+/// ```
+///
+/// # Formatting `Duration` values
+///
+/// `Duration` intentionally does not have a `Display` impl, as there are a
+/// variety of ways to format spans of time for human readability. `Duration`
+/// provides a `Debug` impl that shows the full precision of the value.
+///
+/// The `Debug` output uses the non-ASCII "µs" suffix for microseconds. If your
+/// program output may appear in contexts that cannot rely on full Unicode
+/// compatibility, you may wish to format `Duration` objects yourself or use a
+/// crate to do so.
+#[stable(feature = "duration", since = "1.3.0")]
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Duration")]
+pub struct Duration {
+ secs: u64,
+ nanos: u32, // Always 0 <= nanos < NANOS_PER_SEC
+}
+
+impl Duration {
+ /// The duration of one second.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::SECOND, Duration::from_secs(1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const SECOND: Duration = Duration::from_secs(1);
+
+ /// The duration of one millisecond.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::MILLISECOND, Duration::from_millis(1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const MILLISECOND: Duration = Duration::from_millis(1);
+
+ /// The duration of one microsecond.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::MICROSECOND, Duration::from_micros(1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const MICROSECOND: Duration = Duration::from_micros(1);
+
+ /// The duration of one nanosecond.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::NANOSECOND, Duration::from_nanos(1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const NANOSECOND: Duration = Duration::from_nanos(1);
+
+ /// A duration of zero time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::ZERO;
+ /// assert!(duration.is_zero());
+ /// assert_eq!(duration.as_nanos(), 0);
+ /// ```
+ #[stable(feature = "duration_zero", since = "1.53.0")]
+ pub const ZERO: Duration = Duration::from_nanos(0);
+
+ /// The maximum duration.
+ ///
+ /// May vary by platform as necessary. Must be able to contain the difference between
+ /// two instances of [`Instant`] or two instances of [`SystemTime`].
+ /// This constraint gives it a value of about 584,942,417,355 years in practice,
+ /// which is currently used on all platforms.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::MAX, Duration::new(u64::MAX, 1_000_000_000 - 1));
+ /// ```
+ /// [`Instant`]: ../../std/time/struct.Instant.html
+ /// [`SystemTime`]: ../../std/time/struct.SystemTime.html
+ #[stable(feature = "duration_saturating_ops", since = "1.53.0")]
+ pub const MAX: Duration = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
+
+ /// Creates a new `Duration` from the specified number of whole seconds and
+ /// additional nanoseconds.
+ ///
+ /// If the number of nanoseconds is greater than 1 billion (the number of
+ /// nanoseconds in a second), then it will carry over into the seconds provided.
+ ///
+ /// # Panics
+ ///
+ /// This constructor will panic if the carry from the nanoseconds overflows
+ /// the seconds counter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let five_seconds = Duration::new(5, 0);
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
+ pub const fn new(secs: u64, nanos: u32) -> Duration {
+ let secs = match secs.checked_add((nanos / NANOS_PER_SEC) as u64) {
+ Some(secs) => secs,
+ None => panic!("overflow in Duration::new"),
+ };
+ let nanos = nanos % NANOS_PER_SEC;
+ Duration { secs, nanos }
+ }
+
+ /// Creates a new `Duration` from the specified number of whole seconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_secs(5);
+ ///
+ /// assert_eq!(5, duration.as_secs());
+ /// assert_eq!(0, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[must_use]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ pub const fn from_secs(secs: u64) -> Duration {
+ Duration { secs, nanos: 0 }
+ }
+
+ /// Creates a new `Duration` from the specified number of milliseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(2569);
+ ///
+ /// assert_eq!(2, duration.as_secs());
+ /// assert_eq!(569_000_000, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[must_use]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ pub const fn from_millis(millis: u64) -> Duration {
+ Duration {
+ secs: millis / MILLIS_PER_SEC,
+ nanos: ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI,
+ }
+ }
+
+ /// Creates a new `Duration` from the specified number of microseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_micros(1_000_002);
+ ///
+ /// assert_eq!(1, duration.as_secs());
+ /// assert_eq!(2000, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration_from_micros", since = "1.27.0")]
+ #[must_use]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ pub const fn from_micros(micros: u64) -> Duration {
+ Duration {
+ secs: micros / MICROS_PER_SEC,
+ nanos: ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO,
+ }
+ }
+
+ /// Creates a new `Duration` from the specified number of nanoseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_nanos(1_000_000_123);
+ ///
+ /// assert_eq!(1, duration.as_secs());
+ /// assert_eq!(123, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration_extras", since = "1.27.0")]
+ #[must_use]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ pub const fn from_nanos(nanos: u64) -> Duration {
+ Duration {
+ secs: nanos / (NANOS_PER_SEC as u64),
+ nanos: (nanos % (NANOS_PER_SEC as u64)) as u32,
+ }
+ }
+
+ /// Returns true if this `Duration` spans no time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert!(Duration::ZERO.is_zero());
+ /// assert!(Duration::new(0, 0).is_zero());
+ /// assert!(Duration::from_nanos(0).is_zero());
+ /// assert!(Duration::from_secs(0).is_zero());
+ ///
+ /// assert!(!Duration::new(1, 1).is_zero());
+ /// assert!(!Duration::from_nanos(1).is_zero());
+ /// assert!(!Duration::from_secs(1).is_zero());
+ /// ```
+ #[must_use]
+ #[stable(feature = "duration_zero", since = "1.53.0")]
+ #[rustc_const_stable(feature = "duration_zero", since = "1.53.0")]
+ #[inline]
+ pub const fn is_zero(&self) -> bool {
+ self.secs == 0 && self.nanos == 0
+ }
+
+ /// Returns the number of _whole_ seconds contained by this `Duration`.
+ ///
+ /// The returned value does not include the fractional (nanosecond) part of the
+ /// duration, which can be obtained using [`subsec_nanos`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// ```
+ ///
+ /// To determine the total number of seconds represented by the `Duration`,
+ /// use `as_secs` in combination with [`subsec_nanos`]:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ ///
+ /// assert_eq!(5.730023852,
+ /// duration.as_secs() as f64
+ /// + duration.subsec_nanos() as f64 * 1e-9);
+ /// ```
+ ///
+ /// [`subsec_nanos`]: Duration::subsec_nanos
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ #[must_use]
+ #[inline]
+ pub const fn as_secs(&self) -> u64 {
+ self.secs
+ }
+
+ /// Returns the fractional part of this `Duration`, in whole milliseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by milliseconds. The returned number always represents a
+ /// fractional portion of a second (i.e., it is less than one thousand).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(5432);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// assert_eq!(duration.subsec_millis(), 432);
+ /// ```
+ #[stable(feature = "duration_extras", since = "1.27.0")]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ #[must_use]
+ #[inline]
+ pub const fn subsec_millis(&self) -> u32 {
+ self.nanos / NANOS_PER_MILLI
+ }
+
+ /// Returns the fractional part of this `Duration`, in whole microseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by microseconds. The returned number always represents a
+ /// fractional portion of a second (i.e., it is less than one million).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_micros(1_234_567);
+ /// assert_eq!(duration.as_secs(), 1);
+ /// assert_eq!(duration.subsec_micros(), 234_567);
+ /// ```
+ #[stable(feature = "duration_extras", since = "1.27.0")]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ #[must_use]
+ #[inline]
+ pub const fn subsec_micros(&self) -> u32 {
+ self.nanos / NANOS_PER_MICRO
+ }
+
+ /// Returns the fractional part of this `Duration`, in nanoseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by nanoseconds. The returned number always represents a
+ /// fractional portion of a second (i.e., it is less than one billion).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(5010);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// assert_eq!(duration.subsec_nanos(), 10_000_000);
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ #[must_use]
+ #[inline]
+ pub const fn subsec_nanos(&self) -> u32 {
+ self.nanos
+ }
+
+ /// Returns the total number of whole milliseconds contained by this `Duration`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_millis(), 5730);
+ /// ```
+ #[stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[rustc_const_stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[must_use]
+ #[inline]
+ pub const fn as_millis(&self) -> u128 {
+ self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos / NANOS_PER_MILLI) as u128
+ }
+
+ /// Returns the total number of whole microseconds contained by this `Duration`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_micros(), 5730023);
+ /// ```
+ #[stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[rustc_const_stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[must_use]
+ #[inline]
+ pub const fn as_micros(&self) -> u128 {
+ self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos / NANOS_PER_MICRO) as u128
+ }
+
+ /// Returns the total number of nanoseconds contained by this `Duration`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_nanos(), 5730023852);
+ /// ```
+ #[stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[rustc_const_stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[must_use]
+ #[inline]
+ pub const fn as_nanos(&self) -> u128 {
+ self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos as u128
+ }
+
+ /// Checked `Duration` addition. Computes `self + other`, returning [`None`]
+ /// if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)), Some(Duration::new(0, 1)));
+ /// assert_eq!(Duration::new(1, 0).checked_add(Duration::new(u64::MAX, 0)), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
+ pub const fn checked_add(self, rhs: Duration) -> Option<Duration> {
+ if let Some(mut secs) = self.secs.checked_add(rhs.secs) {
+ let mut nanos = self.nanos + rhs.nanos;
+ if nanos >= NANOS_PER_SEC {
+ nanos -= NANOS_PER_SEC;
+ if let Some(new_secs) = secs.checked_add(1) {
+ secs = new_secs;
+ } else {
+ return None;
+ }
+ }
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration { secs, nanos })
+ } else {
+ None
+ }
+ }
+
+ /// Saturating `Duration` addition. Computes `self + other`, returning [`Duration::MAX`]
+ /// if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 0).saturating_add(Duration::new(0, 1)), Duration::new(0, 1));
+ /// assert_eq!(Duration::new(1, 0).saturating_add(Duration::new(u64::MAX, 0)), Duration::MAX);
+ /// ```
+ #[stable(feature = "duration_saturating_ops", since = "1.53.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
+ pub const fn saturating_add(self, rhs: Duration) -> Duration {
+ match self.checked_add(rhs) {
+ Some(res) => res,
+ None => Duration::MAX,
+ }
+ }
+
+ /// Checked `Duration` subtraction. Computes `self - other`, returning [`None`]
+ /// if the result would be negative or if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 1).checked_sub(Duration::new(0, 0)), Some(Duration::new(0, 1)));
+ /// assert_eq!(Duration::new(0, 0).checked_sub(Duration::new(0, 1)), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
+ pub const fn checked_sub(self, rhs: Duration) -> Option<Duration> {
+ if let Some(mut secs) = self.secs.checked_sub(rhs.secs) {
+ let nanos = if self.nanos >= rhs.nanos {
+ self.nanos - rhs.nanos
+ } else if let Some(sub_secs) = secs.checked_sub(1) {
+ secs = sub_secs;
+ self.nanos + NANOS_PER_SEC - rhs.nanos
+ } else {
+ return None;
+ };
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration { secs, nanos })
+ } else {
+ None
+ }
+ }
+
+ /// Saturating `Duration` subtraction. Computes `self - other`, returning [`Duration::ZERO`]
+ /// if the result would be negative or if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 1).saturating_sub(Duration::new(0, 0)), Duration::new(0, 1));
+ /// assert_eq!(Duration::new(0, 0).saturating_sub(Duration::new(0, 1)), Duration::ZERO);
+ /// ```
+ #[stable(feature = "duration_saturating_ops", since = "1.53.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
+ pub const fn saturating_sub(self, rhs: Duration) -> Duration {
+ match self.checked_sub(rhs) {
+ Some(res) => res,
+ None => Duration::ZERO,
+ }
+ }
+
+ /// Checked `Duration` multiplication. Computes `self * other`, returning
+ /// [`None`] if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 500_000_001).checked_mul(2), Some(Duration::new(1, 2)));
+ /// assert_eq!(Duration::new(u64::MAX - 1, 0).checked_mul(2), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
+ pub const fn checked_mul(self, rhs: u32) -> Option<Duration> {
+ // Multiply nanoseconds as u64, because it cannot overflow that way.
+ let total_nanos = self.nanos as u64 * rhs as u64;
+ let extra_secs = total_nanos / (NANOS_PER_SEC as u64);
+ let nanos = (total_nanos % (NANOS_PER_SEC as u64)) as u32;
+ if let Some(s) = self.secs.checked_mul(rhs as u64) {
+ if let Some(secs) = s.checked_add(extra_secs) {
+ debug_assert!(nanos < NANOS_PER_SEC);
+ return Some(Duration { secs, nanos });
+ }
+ }
+ None
+ }
+
+ /// Saturating `Duration` multiplication. Computes `self * other`, returning
+ /// [`Duration::MAX`] if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 500_000_001).saturating_mul(2), Duration::new(1, 2));
+ /// assert_eq!(Duration::new(u64::MAX - 1, 0).saturating_mul(2), Duration::MAX);
+ /// ```
+ #[stable(feature = "duration_saturating_ops", since = "1.53.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
+ pub const fn saturating_mul(self, rhs: u32) -> Duration {
+ match self.checked_mul(rhs) {
+ Some(res) => res,
+ None => Duration::MAX,
+ }
+ }
+
+ /// Checked `Duration` division. Computes `self / other`, returning [`None`]
+ /// if `other == 0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
+ /// assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
+ /// assert_eq!(Duration::new(2, 0).checked_div(0), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")]
+ pub const fn checked_div(self, rhs: u32) -> Option<Duration> {
+ if rhs != 0 {
+ let secs = self.secs / (rhs as u64);
+ let carry = self.secs - secs * (rhs as u64);
+ let extra_nanos = carry * (NANOS_PER_SEC as u64) / (rhs as u64);
+ let nanos = self.nanos / rhs + (extra_nanos as u32);
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration { secs, nanos })
+ } else {
+ None
+ }
+ }
+
+ /// Returns the number of seconds contained by this `Duration` as `f64`.
+ ///
+ /// The returned value does include the fractional (nanosecond) part of the duration.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.as_secs_f64(), 2.7);
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[must_use]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn as_secs_f64(&self) -> f64 {
+ (self.secs as f64) + (self.nanos as f64) / (NANOS_PER_SEC as f64)
+ }
+
+ /// Returns the number of seconds contained by this `Duration` as `f32`.
+ ///
+ /// The returned value does include the fractional (nanosecond) part of the duration.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.as_secs_f32(), 2.7);
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[must_use]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn as_secs_f32(&self) -> f32 {
+ (self.secs as f32) + (self.nanos as f32) / (NANOS_PER_SEC as f32)
+ }
+
+ /// Creates a new `Duration` from the specified number of seconds represented
+ /// as `f64`.
+ ///
+ /// # Panics
+ /// This constructor will panic if `secs` is negative, overflows `Duration` or not finite.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let res = Duration::from_secs_f64(0.0);
+ /// assert_eq!(res, Duration::new(0, 0));
+ /// let res = Duration::from_secs_f64(1e-20);
+ /// assert_eq!(res, Duration::new(0, 0));
+ /// let res = Duration::from_secs_f64(4.2e-7);
+ /// assert_eq!(res, Duration::new(0, 420));
+ /// let res = Duration::from_secs_f64(2.7);
+ /// assert_eq!(res, Duration::new(2, 700_000_000));
+ /// let res = Duration::from_secs_f64(3e10);
+ /// assert_eq!(res, Duration::new(30_000_000_000, 0));
+ /// // subnormal float
+ /// let res = Duration::from_secs_f64(f64::from_bits(1));
+ /// assert_eq!(res, Duration::new(0, 0));
+ /// // conversion uses rounding
+ /// let res = Duration::from_secs_f64(0.999e-9);
+ /// assert_eq!(res, Duration::new(0, 1));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[must_use]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn from_secs_f64(secs: f64) -> Duration {
+ match Duration::try_from_secs_f64(secs) {
+ Ok(v) => v,
+ Err(e) => panic!("{}", e.description()),
+ }
+ }
+
+ /// Creates a new `Duration` from the specified number of seconds represented
+ /// as `f32`.
+ ///
+ /// # Panics
+ /// This constructor will panic if `secs` is negative, overflows `Duration` or not finite.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let res = Duration::from_secs_f32(0.0);
+ /// assert_eq!(res, Duration::new(0, 0));
+ /// let res = Duration::from_secs_f32(1e-20);
+ /// assert_eq!(res, Duration::new(0, 0));
+ /// let res = Duration::from_secs_f32(4.2e-7);
+ /// assert_eq!(res, Duration::new(0, 420));
+ /// let res = Duration::from_secs_f32(2.7);
+ /// assert_eq!(res, Duration::new(2, 700_000_048));
+ /// let res = Duration::from_secs_f32(3e10);
+ /// assert_eq!(res, Duration::new(30_000_001_024, 0));
+ /// // subnormal float
+ /// let res = Duration::from_secs_f32(f32::from_bits(1));
+ /// assert_eq!(res, Duration::new(0, 0));
+ /// // conversion uses rounding
+ /// let res = Duration::from_secs_f32(0.999e-9);
+ /// assert_eq!(res, Duration::new(0, 1));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[must_use]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn from_secs_f32(secs: f32) -> Duration {
+ match Duration::try_from_secs_f32(secs) {
+ Ok(v) => v,
+ Err(e) => panic!("{}", e.description()),
+ }
+ }
+
+ /// Multiplies `Duration` by `f64`.
+ ///
+ /// # Panics
+ /// This method will panic if result is negative, overflows `Duration` or not finite.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.mul_f64(3.14), Duration::new(8, 478_000_000));
+ /// assert_eq!(dur.mul_f64(3.14e5), Duration::new(847_800, 0));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn mul_f64(self, rhs: f64) -> Duration {
+ Duration::from_secs_f64(rhs * self.as_secs_f64())
+ }
+
+ /// Multiplies `Duration` by `f32`.
+ ///
+ /// # Panics
+ /// This method will panic if result is negative, overflows `Duration` or not finite.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.mul_f32(3.14), Duration::new(8, 478_000_641));
+ /// assert_eq!(dur.mul_f32(3.14e5), Duration::new(847800, 0));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn mul_f32(self, rhs: f32) -> Duration {
+ Duration::from_secs_f32(rhs * self.as_secs_f32())
+ }
+
+ /// Divide `Duration` by `f64`.
+ ///
+ /// # Panics
+ /// This method will panic if result is negative, overflows `Duration` or not finite.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.div_f64(3.14), Duration::new(0, 859_872_611));
+ /// assert_eq!(dur.div_f64(3.14e5), Duration::new(0, 8_599));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn div_f64(self, rhs: f64) -> Duration {
+ Duration::from_secs_f64(self.as_secs_f64() / rhs)
+ }
+
+ /// Divide `Duration` by `f32`.
+ ///
+ /// # Panics
+ /// This method will panic if result is negative, overflows `Duration` or not finite.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// // note that due to rounding errors result is slightly
+ /// // different from 0.859_872_611
+ /// assert_eq!(dur.div_f32(3.14), Duration::new(0, 859_872_580));
+ /// assert_eq!(dur.div_f32(3.14e5), Duration::new(0, 8_599));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn div_f32(self, rhs: f32) -> Duration {
+ Duration::from_secs_f32(self.as_secs_f32() / rhs)
+ }
+
+ /// Divide `Duration` by `Duration` and return `f64`.
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(div_duration)]
+ /// use std::time::Duration;
+ ///
+ /// let dur1 = Duration::new(2, 700_000_000);
+ /// let dur2 = Duration::new(5, 400_000_000);
+ /// assert_eq!(dur1.div_duration_f64(dur2), 0.5);
+ /// ```
+ #[unstable(feature = "div_duration", issue = "63139")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn div_duration_f64(self, rhs: Duration) -> f64 {
+ self.as_secs_f64() / rhs.as_secs_f64()
+ }
+
+ /// Divide `Duration` by `Duration` and return `f32`.
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(div_duration)]
+ /// use std::time::Duration;
+ ///
+ /// let dur1 = Duration::new(2, 700_000_000);
+ /// let dur2 = Duration::new(5, 400_000_000);
+ /// assert_eq!(dur1.div_duration_f32(dur2), 0.5);
+ /// ```
+ #[unstable(feature = "div_duration", issue = "63139")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")]
+ pub const fn div_duration_f32(self, rhs: Duration) -> f32 {
+ self.as_secs_f32() / rhs.as_secs_f32()
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Add for Duration {
+ type Output = Duration;
+
+ fn add(self, rhs: Duration) -> Duration {
+ self.checked_add(rhs).expect("overflow when adding durations")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign for Duration {
+ fn add_assign(&mut self, rhs: Duration) {
+ *self = *self + rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Sub for Duration {
+ type Output = Duration;
+
+ fn sub(self, rhs: Duration) -> Duration {
+ self.checked_sub(rhs).expect("overflow when subtracting durations")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign for Duration {
+ fn sub_assign(&mut self, rhs: Duration) {
+ *self = *self - rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Mul<u32> for Duration {
+ type Output = Duration;
+
+ fn mul(self, rhs: u32) -> Duration {
+ self.checked_mul(rhs).expect("overflow when multiplying duration by scalar")
+ }
+}
+
+#[stable(feature = "symmetric_u32_duration_mul", since = "1.31.0")]
+impl Mul<Duration> for u32 {
+ type Output = Duration;
+
+ fn mul(self, rhs: Duration) -> Duration {
+ rhs * self
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl MulAssign<u32> for Duration {
+ fn mul_assign(&mut self, rhs: u32) {
+ *self = *self * rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Div<u32> for Duration {
+ type Output = Duration;
+
+ fn div(self, rhs: u32) -> Duration {
+ self.checked_div(rhs).expect("divide by zero error when dividing duration by scalar")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl DivAssign<u32> for Duration {
+ fn div_assign(&mut self, rhs: u32) {
+ *self = *self / rhs;
+ }
+}
+
+macro_rules! sum_durations {
+ ($iter:expr) => {{
+ let mut total_secs: u64 = 0;
+ let mut total_nanos: u64 = 0;
+
+ for entry in $iter {
+ total_secs =
+ total_secs.checked_add(entry.secs).expect("overflow in iter::sum over durations");
+ total_nanos = match total_nanos.checked_add(entry.nanos as u64) {
+ Some(n) => n,
+ None => {
+ total_secs = total_secs
+ .checked_add(total_nanos / NANOS_PER_SEC as u64)
+ .expect("overflow in iter::sum over durations");
+ (total_nanos % NANOS_PER_SEC as u64) + entry.nanos as u64
+ }
+ };
+ }
+ total_secs = total_secs
+ .checked_add(total_nanos / NANOS_PER_SEC as u64)
+ .expect("overflow in iter::sum over durations");
+ total_nanos = total_nanos % NANOS_PER_SEC as u64;
+ Duration { secs: total_secs, nanos: total_nanos as u32 }
+ }};
+}
+
+#[stable(feature = "duration_sum", since = "1.16.0")]
+impl Sum for Duration {
+ fn sum<I: Iterator<Item = Duration>>(iter: I) -> Duration {
+ sum_durations!(iter)
+ }
+}
+
+#[stable(feature = "duration_sum", since = "1.16.0")]
+impl<'a> Sum<&'a Duration> for Duration {
+ fn sum<I: Iterator<Item = &'a Duration>>(iter: I) -> Duration {
+ sum_durations!(iter)
+ }
+}
+
+#[stable(feature = "duration_debug_impl", since = "1.27.0")]
+impl fmt::Debug for Duration {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// Formats a floating point number in decimal notation.
+ ///
+ /// The number is given as the `integer_part` and a fractional part.
+ /// The value of the fractional part is `fractional_part / divisor`. So
+ /// `integer_part` = 3, `fractional_part` = 12 and `divisor` = 100
+ /// represents the number `3.012`. Trailing zeros are omitted.
+ ///
+ /// `divisor` must not be above 100_000_000. It also should be a power
+ /// of 10, everything else doesn't make sense. `fractional_part` has
+ /// to be less than `10 * divisor`!
+ ///
+ /// A prefix and postfix may be added. The whole thing is padded
+ /// to the formatter's `width`, if specified.
+ fn fmt_decimal(
+ f: &mut fmt::Formatter<'_>,
+ mut integer_part: u64,
+ mut fractional_part: u32,
+ mut divisor: u32,
+ prefix: &str,
+ postfix: &str,
+ ) -> fmt::Result {
+ // Encode the fractional part into a temporary buffer. The buffer
+ // only need to hold 9 elements, because `fractional_part` has to
+ // be smaller than 10^9. The buffer is prefilled with '0' digits
+ // to simplify the code below.
+ let mut buf = [b'0'; 9];
+
+ // The next digit is written at this position
+ let mut pos = 0;
+
+ // We keep writing digits into the buffer while there are non-zero
+ // digits left and we haven't written enough digits yet.
+ while fractional_part > 0 && pos < f.precision().unwrap_or(9) {
+ // Write new digit into the buffer
+ buf[pos] = b'0' + (fractional_part / divisor) as u8;
+
+ fractional_part %= divisor;
+ divisor /= 10;
+ pos += 1;
+ }
+
+ // If a precision < 9 was specified, there may be some non-zero
+ // digits left that weren't written into the buffer. In that case we
+ // need to perform rounding to match the semantics of printing
+ // normal floating point numbers. However, we only need to do work
+ // when rounding up. This happens if the first digit of the
+ // remaining ones is >= 5.
+ if fractional_part > 0 && fractional_part >= divisor * 5 {
+ // Round up the number contained in the buffer. We go through
+ // the buffer backwards and keep track of the carry.
+ let mut rev_pos = pos;
+ let mut carry = true;
+ while carry && rev_pos > 0 {
+ rev_pos -= 1;
+
+ // If the digit in the buffer is not '9', we just need to
+ // increment it and can stop then (since we don't have a
+ // carry anymore). Otherwise, we set it to '0' (overflow)
+ // and continue.
+ if buf[rev_pos] < b'9' {
+ buf[rev_pos] += 1;
+ carry = false;
+ } else {
+ buf[rev_pos] = b'0';
+ }
+ }
+
+ // If we still have the carry bit set, that means that we set
+ // the whole buffer to '0's and need to increment the integer
+ // part.
+ if carry {
+ integer_part += 1;
+ }
+ }
+
+ // Determine the end of the buffer: if precision is set, we just
+ // use as many digits from the buffer (capped to 9). If it isn't
+ // set, we only use all digits up to the last non-zero one.
+ let end = f.precision().map(|p| crate::cmp::min(p, 9)).unwrap_or(pos);
+
+ // This closure emits the formatted duration without emitting any
+ // padding (padding is calculated below).
+ let emit_without_padding = |f: &mut fmt::Formatter<'_>| {
+ write!(f, "{}{}", prefix, integer_part)?;
+
+ // Write the decimal point and the fractional part (if any).
+ if end > 0 {
+ // SAFETY: We are only writing ASCII digits into the buffer and
+ // it was initialized with '0's, so it contains valid UTF8.
+ let s = unsafe { crate::str::from_utf8_unchecked(&buf[..end]) };
+
+ // If the user request a precision > 9, we pad '0's at the end.
+ let w = f.precision().unwrap_or(pos);
+ write!(f, ".{:0<width$}", s, width = w)?;
+ }
+
+ write!(f, "{}", postfix)
+ };
+
+ match f.width() {
+ None => {
+ // No `width` specified. There's no need to calculate the
+ // length of the output in this case, just emit it.
+ emit_without_padding(f)
+ }
+ Some(requested_w) => {
+ // A `width` was specified. Calculate the actual width of
+ // the output in order to calculate the required padding.
+ // It consists of 4 parts:
+ // 1. The prefix: is either "+" or "", so we can just use len().
+ // 2. The postfix: can be "µs" so we have to count UTF8 characters.
+ let mut actual_w = prefix.len() + postfix.chars().count();
+ // 3. The integer part:
+ if let Some(log) = integer_part.checked_log10() {
+ // integer_part is > 0, so has length log10(x)+1
+ actual_w += 1 + log as usize;
+ } else {
+ // integer_part is 0, so has length 1.
+ actual_w += 1;
+ }
+ // 4. The fractional part (if any):
+ if end > 0 {
+ let frac_part_w = f.precision().unwrap_or(pos);
+ actual_w += 1 + frac_part_w;
+ }
+
+ if requested_w <= actual_w {
+ // Output is already longer than `width`, so don't pad.
+ emit_without_padding(f)
+ } else {
+ // We need to add padding. Use the `Formatter::padding` helper function.
+ let default_align = crate::fmt::rt::v1::Alignment::Left;
+ let post_padding = f.padding(requested_w - actual_w, default_align)?;
+ emit_without_padding(f)?;
+ post_padding.write(f)
+ }
+ }
+ }
+ }
+
+ // Print leading '+' sign if requested
+ let prefix = if f.sign_plus() { "+" } else { "" };
+
+ if self.secs > 0 {
+ fmt_decimal(f, self.secs, self.nanos, NANOS_PER_SEC / 10, prefix, "s")
+ } else if self.nanos >= NANOS_PER_MILLI {
+ fmt_decimal(
+ f,
+ (self.nanos / NANOS_PER_MILLI) as u64,
+ self.nanos % NANOS_PER_MILLI,
+ NANOS_PER_MILLI / 10,
+ prefix,
+ "ms",
+ )
+ } else if self.nanos >= NANOS_PER_MICRO {
+ fmt_decimal(
+ f,
+ (self.nanos / NANOS_PER_MICRO) as u64,
+ self.nanos % NANOS_PER_MICRO,
+ NANOS_PER_MICRO / 10,
+ prefix,
+ "µs",
+ )
+ } else {
+ fmt_decimal(f, self.nanos as u64, 0, 1, prefix, "ns")
+ }
+ }
+}
+
+/// An error which can be returned when converting a floating-point value of seconds
+/// into a [`Duration`].
+///
+/// This error is used as the error type for [`Duration::try_from_secs_f32`] and
+/// [`Duration::try_from_secs_f64`].
+///
+/// # Example
+///
+/// ```
+/// #![feature(duration_checked_float)]
+/// use std::time::Duration;
+///
+/// if let Err(e) = Duration::try_from_secs_f32(-1.0) {
+/// println!("Failed conversion to Duration: {e}");
+/// }
+/// ```
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[unstable(feature = "duration_checked_float", issue = "83400")]
+pub struct FromFloatSecsError {
+ kind: FromFloatSecsErrorKind,
+}
+
+impl FromFloatSecsError {
+ const fn description(&self) -> &'static str {
+ match self.kind {
+ FromFloatSecsErrorKind::Negative => {
+ "can not convert float seconds to Duration: value is negative"
+ }
+ FromFloatSecsErrorKind::OverflowOrNan => {
+ "can not convert float seconds to Duration: value is either too big or NaN"
+ }
+ }
+ }
+}
+
+#[unstable(feature = "duration_checked_float", issue = "83400")]
+impl fmt::Display for FromFloatSecsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.description().fmt(f)
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum FromFloatSecsErrorKind {
+ // Value is negative.
+ Negative,
+ // Value is either too big to be represented as `Duration` or `NaN`.
+ OverflowOrNan,
+}
+
+macro_rules! try_from_secs {
+ (
+ secs = $secs: expr,
+ mantissa_bits = $mant_bits: literal,
+ exponent_bits = $exp_bits: literal,
+ offset = $offset: literal,
+ bits_ty = $bits_ty:ty,
+ double_ty = $double_ty:ty,
+ ) => {{
+ const MIN_EXP: i16 = 1 - (1i16 << $exp_bits) / 2;
+ const MANT_MASK: $bits_ty = (1 << $mant_bits) - 1;
+ const EXP_MASK: $bits_ty = (1 << $exp_bits) - 1;
+
+ if $secs.is_sign_negative() {
+ return Err(FromFloatSecsError { kind: FromFloatSecsErrorKind::Negative });
+ }
+
+ let bits = $secs.to_bits();
+ let mant = (bits & MANT_MASK) | (MANT_MASK + 1);
+ let exp = ((bits >> $mant_bits) & EXP_MASK) as i16 + MIN_EXP;
+
+ let (secs, nanos) = if exp < -31 {
+ // the input represents less than 1ns and can not be rounded to it
+ (0u64, 0u32)
+ } else if exp < 0 {
+ // the input is less than 1 second
+ let t = <$double_ty>::from(mant) << ($offset + exp);
+ let nanos_offset = $mant_bits + $offset;
+ let nanos_tmp = u128::from(NANOS_PER_SEC) * u128::from(t);
+ let nanos = (nanos_tmp >> nanos_offset) as u32;
+
+ let rem_mask = (1 << nanos_offset) - 1;
+ let rem_msb_mask = 1 << (nanos_offset - 1);
+ let rem = nanos_tmp & rem_mask;
+ let is_tie = rem == rem_msb_mask;
+ let is_even = (nanos & 1) == 0;
+ let rem_msb = nanos_tmp & rem_msb_mask == 0;
+ let add_ns = !(rem_msb || (is_even && is_tie));
+
+ // f32 does not have enough presicion to trigger the second branch
+ // since it can not represent numbers between 0.999_999_940_395 and 1.0.
+ let nanos = nanos + add_ns as u32;
+ if ($mant_bits == 23) || (nanos != NANOS_PER_SEC) { (0, nanos) } else { (1, 0) }
+ } else if exp < $mant_bits {
+ let secs = u64::from(mant >> ($mant_bits - exp));
+ let t = <$double_ty>::from((mant << exp) & MANT_MASK);
+ let nanos_offset = $mant_bits;
+ let nanos_tmp = <$double_ty>::from(NANOS_PER_SEC) * t;
+ let nanos = (nanos_tmp >> nanos_offset) as u32;
+
+ let rem_mask = (1 << nanos_offset) - 1;
+ let rem_msb_mask = 1 << (nanos_offset - 1);
+ let rem = nanos_tmp & rem_mask;
+ let is_tie = rem == rem_msb_mask;
+ let is_even = (nanos & 1) == 0;
+ let rem_msb = nanos_tmp & rem_msb_mask == 0;
+ let add_ns = !(rem_msb || (is_even && is_tie));
+
+ // f32 does not have enough presicion to trigger the second branch.
+ // For example, it can not represent numbers between 1.999_999_880...
+ // and 2.0. Bigger values result in even smaller presicion of the
+ // fractional part.
+ let nanos = nanos + add_ns as u32;
+ if ($mant_bits == 23) || (nanos != NANOS_PER_SEC) {
+ (secs, nanos)
+ } else {
+ (secs + 1, 0)
+ }
+ } else if exp < 64 {
+ // the input has no fractional part
+ let secs = u64::from(mant) << (exp - $mant_bits);
+ (secs, 0)
+ } else {
+ return Err(FromFloatSecsError { kind: FromFloatSecsErrorKind::OverflowOrNan });
+ };
+
+ Ok(Duration { secs, nanos })
+ }};
+}
+
+impl Duration {
+ /// The checked version of [`from_secs_f32`].
+ ///
+ /// [`from_secs_f32`]: Duration::from_secs_f32
+ ///
+ /// This constructor will return an `Err` if `secs` is negative, overflows `Duration` or not finite.
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(duration_checked_float)]
+ ///
+ /// use std::time::Duration;
+ ///
+ /// let res = Duration::try_from_secs_f32(0.0);
+ /// assert_eq!(res, Ok(Duration::new(0, 0)));
+ /// let res = Duration::try_from_secs_f32(1e-20);
+ /// assert_eq!(res, Ok(Duration::new(0, 0)));
+ /// let res = Duration::try_from_secs_f32(4.2e-7);
+ /// assert_eq!(res, Ok(Duration::new(0, 420)));
+ /// let res = Duration::try_from_secs_f32(2.7);
+ /// assert_eq!(res, Ok(Duration::new(2, 700_000_048)));
+ /// let res = Duration::try_from_secs_f32(3e10);
+ /// assert_eq!(res, Ok(Duration::new(30_000_001_024, 0)));
+ /// // subnormal float:
+ /// let res = Duration::try_from_secs_f32(f32::from_bits(1));
+ /// assert_eq!(res, Ok(Duration::new(0, 0)));
+ ///
+ /// let res = Duration::try_from_secs_f32(-5.0);
+ /// assert!(res.is_err());
+ /// let res = Duration::try_from_secs_f32(f32::NAN);
+ /// assert!(res.is_err());
+ /// let res = Duration::try_from_secs_f32(2e19);
+ /// assert!(res.is_err());
+ ///
+ /// // the conversion uses rounding with tie resolution to even
+ /// let res = Duration::try_from_secs_f32(0.999e-9);
+ /// assert_eq!(res, Ok(Duration::new(0, 1)));
+ ///
+ /// // this float represents exactly 976562.5e-9
+ /// let val = f32::from_bits(0x3A80_0000);
+ /// let res = Duration::try_from_secs_f32(val);
+ /// assert_eq!(res, Ok(Duration::new(0, 976_562)));
+ ///
+ /// // this float represents exactly 2929687.5e-9
+ /// let val = f32::from_bits(0x3B40_0000);
+ /// let res = Duration::try_from_secs_f32(val);
+ /// assert_eq!(res, Ok(Duration::new(0, 2_929_688)));
+ ///
+ /// // this float represents exactly 1.000_976_562_5
+ /// let val = f32::from_bits(0x3F802000);
+ /// let res = Duration::try_from_secs_f32(val);
+ /// assert_eq!(res, Ok(Duration::new(1, 976_562)));
+ ///
+ /// // this float represents exactly 1.002_929_687_5
+ /// let val = f32::from_bits(0x3F806000);
+ /// let res = Duration::try_from_secs_f32(val);
+ /// assert_eq!(res, Ok(Duration::new(1, 2_929_688)));
+ /// ```
+ #[unstable(feature = "duration_checked_float", issue = "83400")]
+ #[inline]
+ pub const fn try_from_secs_f32(secs: f32) -> Result<Duration, FromFloatSecsError> {
+ try_from_secs!(
+ secs = secs,
+ mantissa_bits = 23,
+ exponent_bits = 8,
+ offset = 41,
+ bits_ty = u32,
+ double_ty = u64,
+ )
+ }
+
+ /// The checked version of [`from_secs_f64`].
+ ///
+ /// [`from_secs_f64`]: Duration::from_secs_f64
+ ///
+ /// This constructor will return an `Err` if `secs` is negative, overflows `Duration` or not finite.
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(duration_checked_float)]
+ ///
+ /// use std::time::Duration;
+ ///
+ /// let res = Duration::try_from_secs_f64(0.0);
+ /// assert_eq!(res, Ok(Duration::new(0, 0)));
+ /// let res = Duration::try_from_secs_f64(1e-20);
+ /// assert_eq!(res, Ok(Duration::new(0, 0)));
+ /// let res = Duration::try_from_secs_f64(4.2e-7);
+ /// assert_eq!(res, Ok(Duration::new(0, 420)));
+ /// let res = Duration::try_from_secs_f64(2.7);
+ /// assert_eq!(res, Ok(Duration::new(2, 700_000_000)));
+ /// let res = Duration::try_from_secs_f64(3e10);
+ /// assert_eq!(res, Ok(Duration::new(30_000_000_000, 0)));
+ /// // subnormal float
+ /// let res = Duration::try_from_secs_f64(f64::from_bits(1));
+ /// assert_eq!(res, Ok(Duration::new(0, 0)));
+ ///
+ /// let res = Duration::try_from_secs_f64(-5.0);
+ /// assert!(res.is_err());
+ /// let res = Duration::try_from_secs_f64(f64::NAN);
+ /// assert!(res.is_err());
+ /// let res = Duration::try_from_secs_f64(2e19);
+ /// assert!(res.is_err());
+ ///
+ /// // the conversion uses rounding with tie resolution to even
+ /// let res = Duration::try_from_secs_f64(0.999e-9);
+ /// assert_eq!(res, Ok(Duration::new(0, 1)));
+ /// let res = Duration::try_from_secs_f64(0.999_999_999_499);
+ /// assert_eq!(res, Ok(Duration::new(0, 999_999_999)));
+ /// let res = Duration::try_from_secs_f64(0.999_999_999_501);
+ /// assert_eq!(res, Ok(Duration::new(1, 0)));
+ /// let res = Duration::try_from_secs_f64(42.999_999_999_499);
+ /// assert_eq!(res, Ok(Duration::new(42, 999_999_999)));
+ /// let res = Duration::try_from_secs_f64(42.999_999_999_501);
+ /// assert_eq!(res, Ok(Duration::new(43, 0)));
+ ///
+ /// // this float represents exactly 976562.5e-9
+ /// let val = f64::from_bits(0x3F50_0000_0000_0000);
+ /// let res = Duration::try_from_secs_f64(val);
+ /// assert_eq!(res, Ok(Duration::new(0, 976_562)));
+ ///
+ /// // this float represents exactly 2929687.5e-9
+ /// let val = f64::from_bits(0x3F68_0000_0000_0000);
+ /// let res = Duration::try_from_secs_f64(val);
+ /// assert_eq!(res, Ok(Duration::new(0, 2_929_688)));
+ ///
+ /// // this float represents exactly 1.000_976_562_5
+ /// let val = f64::from_bits(0x3FF0_0400_0000_0000);
+ /// let res = Duration::try_from_secs_f64(val);
+ /// assert_eq!(res, Ok(Duration::new(1, 976_562)));
+ ///
+ /// // this float represents exactly 1.002_929_687_5
+ /// let val = f64::from_bits(0x3_FF00_C000_0000_000);
+ /// let res = Duration::try_from_secs_f64(val);
+ /// assert_eq!(res, Ok(Duration::new(1, 2_929_688)));
+ /// ```
+ #[unstable(feature = "duration_checked_float", issue = "83400")]
+ #[inline]
+ pub const fn try_from_secs_f64(secs: f64) -> Result<Duration, FromFloatSecsError> {
+ try_from_secs!(
+ secs = secs,
+ mantissa_bits = 52,
+ exponent_bits = 11,
+ offset = 44,
+ bits_ty = u64,
+ double_ty = u128,
+ )
+ }
+}
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
new file mode 100644
index 000000000..d189e6400
--- /dev/null
+++ b/library/core/src/tuple.rs
@@ -0,0 +1,159 @@
+// See src/libstd/primitive_docs.rs for documentation.
+
+use crate::cmp::Ordering::*;
+use crate::cmp::*;
+
+// Recursive macro for implementing n-ary tuple functions and operations
+//
+// Also provides implementations for tuples with lesser arity. For example, tuple_impls!(A B C)
+// will implement everything for (A, B, C), (A, B) and (A,).
+macro_rules! tuple_impls {
+ // Stopping criteria (1-ary tuple)
+ ($T:ident) => {
+ tuple_impls!(@impl $T);
+ };
+ // Running criteria (n-ary tuple, with n >= 2)
+ ($T:ident $( $U:ident )+) => {
+ tuple_impls!($( $U )+);
+ tuple_impls!(@impl $T $( $U )+);
+ };
+ // "Private" internal implementation
+ (@impl $( $T:ident )+) => {
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:PartialEq),+> PartialEq for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn eq(&self, other: &($($T,)+)) -> bool {
+ $( ${ignore(T)} self.${index()} == other.${index()} )&&+
+ }
+ #[inline]
+ fn ne(&self, other: &($($T,)+)) -> bool {
+ $( ${ignore(T)} self.${index()} != other.${index()} )||+
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:Eq),+> Eq for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:PartialOrd + PartialEq),+> PartialOrd for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn partial_cmp(&self, other: &($($T,)+)) -> Option<Ordering> {
+ lexical_partial_cmp!($( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn lt(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(lt, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn le(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(le, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn ge(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(ge, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn gt(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(gt, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:Ord),+> Ord for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn cmp(&self, other: &($($T,)+)) -> Ordering {
+ lexical_cmp!($( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:Default),+> Default for ($($T,)+) {
+ #[inline]
+ fn default() -> ($($T,)+) {
+ ($({ let x: $T = Default::default(); x},)+)
+ }
+ }
+ }
+ }
+}
+
+// If this is a unary tuple, it adds a doc comment.
+// Otherwise, it hides the docs entirely.
+macro_rules! maybe_tuple_doc {
+ ($a:ident @ #[$meta:meta] $item:item) => {
+ #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc = "This trait is implemented for tuples up to twelve items long."]
+ #[$meta]
+ $item
+ };
+ ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => {
+ #[doc(hidden)]
+ #[$meta]
+ $item
+ };
+}
+
+// Constructs an expression that performs a lexical ordering using method $rel.
+// The values are interleaved, so the macro invocation for
+// `(a1, a2, a3) < (b1, b2, b3)` would be `lexical_ord!(lt, a1, b1, a2, b2,
+// a3, b3)` (and similarly for `lexical_cmp`)
+macro_rules! lexical_ord {
+ ($rel: ident, $a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
+ if $a != $b { lexical_ord!($rel, $a, $b) }
+ else { lexical_ord!($rel, $($rest_a, $rest_b),+) }
+ };
+ ($rel: ident, $a:expr, $b:expr) => { ($a) . $rel (& $b) };
+}
+
+macro_rules! lexical_partial_cmp {
+ ($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
+ match ($a).partial_cmp(&$b) {
+ Some(Equal) => lexical_partial_cmp!($($rest_a, $rest_b),+),
+ ordering => ordering
+ }
+ };
+ ($a:expr, $b:expr) => { ($a).partial_cmp(&$b) };
+}
+
+macro_rules! lexical_cmp {
+ ($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
+ match ($a).cmp(&$b) {
+ Equal => lexical_cmp!($($rest_a, $rest_b),+),
+ ordering => ordering
+ }
+ };
+ ($a:expr, $b:expr) => { ($a).cmp(&$b) };
+}
+
+macro_rules! last_type {
+ ($a:ident,) => { $a };
+ ($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
+}
+
+tuple_impls!(E D C B A Z Y X W V U T);
diff --git a/library/core/src/unicode/mod.rs b/library/core/src/unicode/mod.rs
new file mode 100644
index 000000000..72fa059b7
--- /dev/null
+++ b/library/core/src/unicode/mod.rs
@@ -0,0 +1,31 @@
+#![unstable(feature = "unicode_internals", issue = "none")]
+#![allow(missing_docs)]
+
+pub(crate) mod printable;
+mod unicode_data;
+
+/// The version of [Unicode](https://www.unicode.org/) that the Unicode parts of
+/// `char` and `str` methods are based on.
+///
+/// New versions of Unicode are released regularly and subsequently all methods
+/// in the standard library depending on Unicode are updated. Therefore the
+/// behavior of some `char` and `str` methods and the value of this constant
+/// changes over time. This is *not* considered to be a breaking change.
+///
+/// The version numbering scheme is explained in
+/// [Unicode 11.0 or later, Section 3.1 Versions of the Unicode Standard](https://www.unicode.org/versions/Unicode11.0.0/ch03.pdf#page=4).
+#[stable(feature = "unicode_version", since = "1.45.0")]
+pub const UNICODE_VERSION: (u8, u8, u8) = unicode_data::UNICODE_VERSION;
+
+// For use in liballoc, not re-exported in libstd.
+pub use unicode_data::{
+ case_ignorable::lookup as Case_Ignorable, cased::lookup as Cased, conversions,
+};
+
+pub(crate) use unicode_data::alphabetic::lookup as Alphabetic;
+pub(crate) use unicode_data::cc::lookup as Cc;
+pub(crate) use unicode_data::grapheme_extend::lookup as Grapheme_Extend;
+pub(crate) use unicode_data::lowercase::lookup as Lowercase;
+pub(crate) use unicode_data::n::lookup as N;
+pub(crate) use unicode_data::uppercase::lookup as Uppercase;
+pub(crate) use unicode_data::white_space::lookup as White_Space;
diff --git a/library/core/src/unicode/printable.py b/library/core/src/unicode/printable.py
new file mode 100755
index 000000000..7c37f5f09
--- /dev/null
+++ b/library/core/src/unicode/printable.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+
+# This script uses the following Unicode tables:
+# - UnicodeData.txt
+
+
+from collections import namedtuple
+import csv
+import os
+import subprocess
+
+NUM_CODEPOINTS=0x110000
+
+def to_ranges(iter):
+ current = None
+ for i in iter:
+ if current is None or i != current[1] or i in (0x10000, 0x20000):
+ if current is not None:
+ yield tuple(current)
+ current = [i, i + 1]
+ else:
+ current[1] += 1
+ if current is not None:
+ yield tuple(current)
+
+def get_escaped(codepoints):
+ for c in codepoints:
+ if (c.class_ or "Cn") in "Cc Cf Cs Co Cn Zl Zp Zs".split() and c.value != ord(' '):
+ yield c.value
+
+def get_file(f):
+ try:
+ return open(os.path.basename(f))
+ except FileNotFoundError:
+ subprocess.run(["curl", "-O", f], check=True)
+ return open(os.path.basename(f))
+
+Codepoint = namedtuple('Codepoint', 'value class_')
+
+def get_codepoints(f):
+ r = csv.reader(f, delimiter=";")
+ prev_codepoint = 0
+ class_first = None
+ for row in r:
+ codepoint = int(row[0], 16)
+ name = row[1]
+ class_ = row[2]
+
+ if class_first is not None:
+ if not name.endswith("Last>"):
+ raise ValueError("Missing Last after First")
+
+ for c in range(prev_codepoint + 1, codepoint):
+ yield Codepoint(c, class_first)
+
+ class_first = None
+ if name.endswith("First>"):
+ class_first = class_
+
+ yield Codepoint(codepoint, class_)
+ prev_codepoint = codepoint
+
+ if class_first is not None:
+ raise ValueError("Missing Last after First")
+
+ for c in range(prev_codepoint + 1, NUM_CODEPOINTS):
+ yield Codepoint(c, None)
+
+def compress_singletons(singletons):
+ uppers = [] # (upper, # items in lowers)
+ lowers = []
+
+ for i in singletons:
+ upper = i >> 8
+ lower = i & 0xff
+ if len(uppers) == 0 or uppers[-1][0] != upper:
+ uppers.append((upper, 1))
+ else:
+ upper, count = uppers[-1]
+ uppers[-1] = upper, count + 1
+ lowers.append(lower)
+
+ return uppers, lowers
+
+def compress_normal(normal):
+ # lengths 0x00..0x7f are encoded as 00, 01, ..., 7e, 7f
+ # lengths 0x80..0x7fff are encoded as 80 80, 80 81, ..., ff fe, ff ff
+ compressed = [] # [truelen, (truelenaux), falselen, (falselenaux)]
+
+ prev_start = 0
+ for start, count in normal:
+ truelen = start - prev_start
+ falselen = count
+ prev_start = start + count
+
+ assert truelen < 0x8000 and falselen < 0x8000
+ entry = []
+ if truelen > 0x7f:
+ entry.append(0x80 | (truelen >> 8))
+ entry.append(truelen & 0xff)
+ else:
+ entry.append(truelen & 0x7f)
+ if falselen > 0x7f:
+ entry.append(0x80 | (falselen >> 8))
+ entry.append(falselen & 0xff)
+ else:
+ entry.append(falselen & 0x7f)
+
+ compressed.append(entry)
+
+ return compressed
+
+def print_singletons(uppers, lowers, uppersname, lowersname):
+ print("#[rustfmt::skip]")
+ print("const {}: &[(u8, u8)] = &[".format(uppersname))
+ for u, c in uppers:
+ print(" ({:#04x}, {}),".format(u, c))
+ print("];")
+ print("#[rustfmt::skip]")
+ print("const {}: &[u8] = &[".format(lowersname))
+ for i in range(0, len(lowers), 8):
+ print(" {}".format(" ".join("{:#04x},".format(l) for l in lowers[i:i+8])))
+ print("];")
+
+def print_normal(normal, normalname):
+ print("#[rustfmt::skip]")
+ print("const {}: &[u8] = &[".format(normalname))
+ for v in normal:
+ print(" {}".format(" ".join("{:#04x},".format(i) for i in v)))
+ print("];")
+
+def main():
+ file = get_file("https://www.unicode.org/Public/UNIDATA/UnicodeData.txt")
+
+ codepoints = get_codepoints(file)
+
+ CUTOFF=0x10000
+ singletons0 = []
+ singletons1 = []
+ normal0 = []
+ normal1 = []
+ extra = []
+
+ for a, b in to_ranges(get_escaped(codepoints)):
+ if a > 2 * CUTOFF:
+ extra.append((a, b - a))
+ elif a == b - 1:
+ if a & CUTOFF:
+ singletons1.append(a & ~CUTOFF)
+ else:
+ singletons0.append(a)
+ elif a == b - 2:
+ if a & CUTOFF:
+ singletons1.append(a & ~CUTOFF)
+ singletons1.append((a + 1) & ~CUTOFF)
+ else:
+ singletons0.append(a)
+ singletons0.append(a + 1)
+ else:
+ if a >= 2 * CUTOFF:
+ extra.append((a, b - a))
+ elif a & CUTOFF:
+ normal1.append((a & ~CUTOFF, b - a))
+ else:
+ normal0.append((a, b - a))
+
+ singletons0u, singletons0l = compress_singletons(singletons0)
+ singletons1u, singletons1l = compress_singletons(singletons1)
+ normal0 = compress_normal(normal0)
+ normal1 = compress_normal(normal1)
+
+ print("""\
+// NOTE: The following code was generated by "library/core/src/unicode/printable.py",
+// do not edit directly!
+
+fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], normal: &[u8]) -> bool {
+ let xupper = (x >> 8) as u8;
+ let mut lowerstart = 0;
+ for &(upper, lowercount) in singletonuppers {
+ let lowerend = lowerstart + lowercount as usize;
+ if xupper == upper {
+ for &lower in &singletonlowers[lowerstart..lowerend] {
+ if lower == x as u8 {
+ return false;
+ }
+ }
+ } else if xupper < upper {
+ break;
+ }
+ lowerstart = lowerend;
+ }
+
+ let mut x = x as i32;
+ let mut normal = normal.iter().cloned();
+ let mut current = true;
+ while let Some(v) = normal.next() {
+ let len = if v & 0x80 != 0 {
+ ((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32
+ } else {
+ v as i32
+ };
+ x -= len;
+ if x < 0 {
+ break;
+ }
+ current = !current;
+ }
+ current
+}
+
+pub(crate) fn is_printable(x: char) -> bool {
+ let x = x as u32;
+ let lower = x as u16;
+
+ if x < 32 {
+ // ASCII fast path
+ false
+ } else if x < 127 {
+ // ASCII fast path
+ true
+ } else if x < 0x10000 {
+ check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0)
+ } else if x < 0x20000 {
+ check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1)
+ } else {\
+""")
+ for a, b in extra:
+ print(" if 0x{:x} <= x && x < 0x{:x} {{".format(a, a + b))
+ print(" return false;")
+ print(" }")
+ print("""\
+ true
+ }
+}\
+""")
+ print()
+ print_singletons(singletons0u, singletons0l, 'SINGLETONS0U', 'SINGLETONS0L')
+ print_singletons(singletons1u, singletons1l, 'SINGLETONS1U', 'SINGLETONS1L')
+ print_normal(normal0, 'NORMAL0')
+ print_normal(normal1, 'NORMAL1')
+
+if __name__ == '__main__':
+ main()
diff --git a/library/core/src/unicode/printable.rs b/library/core/src/unicode/printable.rs
new file mode 100644
index 000000000..31cf88a41
--- /dev/null
+++ b/library/core/src/unicode/printable.rs
@@ -0,0 +1,573 @@
+// NOTE: The following code was generated by "library/core/src/unicode/printable.py",
+// do not edit directly!
+
+fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], normal: &[u8]) -> bool {
+ let xupper = (x >> 8) as u8;
+ let mut lowerstart = 0;
+ for &(upper, lowercount) in singletonuppers {
+ let lowerend = lowerstart + lowercount as usize;
+ if xupper == upper {
+ for &lower in &singletonlowers[lowerstart..lowerend] {
+ if lower == x as u8 {
+ return false;
+ }
+ }
+ } else if xupper < upper {
+ break;
+ }
+ lowerstart = lowerend;
+ }
+
+ let mut x = x as i32;
+ let mut normal = normal.iter().cloned();
+ let mut current = true;
+ while let Some(v) = normal.next() {
+ let len = if v & 0x80 != 0 {
+ ((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32
+ } else {
+ v as i32
+ };
+ x -= len;
+ if x < 0 {
+ break;
+ }
+ current = !current;
+ }
+ current
+}
+
+pub(crate) fn is_printable(x: char) -> bool {
+ let x = x as u32;
+ let lower = x as u16;
+
+ if x < 32 {
+ // ASCII fast path
+ false
+ } else if x < 127 {
+ // ASCII fast path
+ true
+ } else if x < 0x10000 {
+ check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0)
+ } else if x < 0x20000 {
+ check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1)
+ } else {
+ if 0x2a6e0 <= x && x < 0x2a700 {
+ return false;
+ }
+ if 0x2b739 <= x && x < 0x2b740 {
+ return false;
+ }
+ if 0x2b81e <= x && x < 0x2b820 {
+ return false;
+ }
+ if 0x2cea2 <= x && x < 0x2ceb0 {
+ return false;
+ }
+ if 0x2ebe1 <= x && x < 0x2f800 {
+ return false;
+ }
+ if 0x2fa1e <= x && x < 0x30000 {
+ return false;
+ }
+ if 0x3134b <= x && x < 0xe0100 {
+ return false;
+ }
+ if 0xe01f0 <= x && x < 0x110000 {
+ return false;
+ }
+ true
+ }
+}
+
+#[rustfmt::skip]
+const SINGLETONS0U: &[(u8, u8)] = &[
+ (0x00, 1),
+ (0x03, 5),
+ (0x05, 6),
+ (0x06, 2),
+ (0x07, 6),
+ (0x08, 7),
+ (0x09, 17),
+ (0x0a, 28),
+ (0x0b, 25),
+ (0x0c, 26),
+ (0x0d, 16),
+ (0x0e, 13),
+ (0x0f, 4),
+ (0x10, 3),
+ (0x12, 18),
+ (0x13, 9),
+ (0x16, 1),
+ (0x17, 4),
+ (0x18, 1),
+ (0x19, 3),
+ (0x1a, 7),
+ (0x1b, 1),
+ (0x1c, 2),
+ (0x1f, 22),
+ (0x20, 3),
+ (0x2b, 3),
+ (0x2d, 11),
+ (0x2e, 1),
+ (0x30, 3),
+ (0x31, 2),
+ (0x32, 1),
+ (0xa7, 2),
+ (0xa9, 2),
+ (0xaa, 4),
+ (0xab, 8),
+ (0xfa, 2),
+ (0xfb, 5),
+ (0xfd, 2),
+ (0xfe, 3),
+ (0xff, 9),
+];
+#[rustfmt::skip]
+const SINGLETONS0L: &[u8] = &[
+ 0xad, 0x78, 0x79, 0x8b, 0x8d, 0xa2, 0x30, 0x57,
+ 0x58, 0x8b, 0x8c, 0x90, 0x1c, 0xdd, 0x0e, 0x0f,
+ 0x4b, 0x4c, 0xfb, 0xfc, 0x2e, 0x2f, 0x3f, 0x5c,
+ 0x5d, 0x5f, 0xe2, 0x84, 0x8d, 0x8e, 0x91, 0x92,
+ 0xa9, 0xb1, 0xba, 0xbb, 0xc5, 0xc6, 0xc9, 0xca,
+ 0xde, 0xe4, 0xe5, 0xff, 0x00, 0x04, 0x11, 0x12,
+ 0x29, 0x31, 0x34, 0x37, 0x3a, 0x3b, 0x3d, 0x49,
+ 0x4a, 0x5d, 0x84, 0x8e, 0x92, 0xa9, 0xb1, 0xb4,
+ 0xba, 0xbb, 0xc6, 0xca, 0xce, 0xcf, 0xe4, 0xe5,
+ 0x00, 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31,
+ 0x34, 0x3a, 0x3b, 0x45, 0x46, 0x49, 0x4a, 0x5e,
+ 0x64, 0x65, 0x84, 0x91, 0x9b, 0x9d, 0xc9, 0xce,
+ 0xcf, 0x0d, 0x11, 0x29, 0x3a, 0x3b, 0x45, 0x49,
+ 0x57, 0x5b, 0x5c, 0x5e, 0x5f, 0x64, 0x65, 0x8d,
+ 0x91, 0xa9, 0xb4, 0xba, 0xbb, 0xc5, 0xc9, 0xdf,
+ 0xe4, 0xe5, 0xf0, 0x0d, 0x11, 0x45, 0x49, 0x64,
+ 0x65, 0x80, 0x84, 0xb2, 0xbc, 0xbe, 0xbf, 0xd5,
+ 0xd7, 0xf0, 0xf1, 0x83, 0x85, 0x8b, 0xa4, 0xa6,
+ 0xbe, 0xbf, 0xc5, 0xc7, 0xce, 0xcf, 0xda, 0xdb,
+ 0x48, 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49,
+ 0x4e, 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e,
+ 0x8f, 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7,
+ 0xd7, 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7,
+ 0xfe, 0xff, 0x80, 0x6d, 0x71, 0xde, 0xdf, 0x0e,
+ 0x1f, 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e,
+ 0xae, 0xaf, 0x7f, 0xbb, 0xbc, 0x16, 0x17, 0x1e,
+ 0x1f, 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c,
+ 0x5e, 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc,
+ 0xf0, 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75,
+ 0x96, 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf,
+ 0xc7, 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98,
+ 0x30, 0x8f, 0x1f, 0xd2, 0xd4, 0xce, 0xff, 0x4e,
+ 0x4f, 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27,
+ 0x2f, 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f,
+ 0x42, 0x45, 0x90, 0x91, 0x53, 0x67, 0x75, 0xc8,
+ 0xc9, 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff,
+];
+#[rustfmt::skip]
+const SINGLETONS1U: &[(u8, u8)] = &[
+ (0x00, 6),
+ (0x01, 1),
+ (0x03, 1),
+ (0x04, 2),
+ (0x05, 7),
+ (0x07, 2),
+ (0x08, 8),
+ (0x09, 2),
+ (0x0a, 5),
+ (0x0b, 2),
+ (0x0e, 4),
+ (0x10, 1),
+ (0x11, 2),
+ (0x12, 5),
+ (0x13, 17),
+ (0x14, 1),
+ (0x15, 2),
+ (0x17, 2),
+ (0x19, 13),
+ (0x1c, 5),
+ (0x1d, 8),
+ (0x24, 1),
+ (0x6a, 4),
+ (0x6b, 2),
+ (0xaf, 3),
+ (0xbc, 2),
+ (0xcf, 2),
+ (0xd1, 2),
+ (0xd4, 12),
+ (0xd5, 9),
+ (0xd6, 2),
+ (0xd7, 2),
+ (0xda, 1),
+ (0xe0, 5),
+ (0xe1, 2),
+ (0xe7, 4),
+ (0xe8, 2),
+ (0xee, 32),
+ (0xf0, 4),
+ (0xf8, 2),
+ (0xfa, 2),
+ (0xfb, 1),
+];
+#[rustfmt::skip]
+const SINGLETONS1L: &[u8] = &[
+ 0x0c, 0x27, 0x3b, 0x3e, 0x4e, 0x4f, 0x8f, 0x9e,
+ 0x9e, 0x9f, 0x7b, 0x8b, 0x93, 0x96, 0xa2, 0xb2,
+ 0xba, 0x86, 0xb1, 0x06, 0x07, 0x09, 0x36, 0x3d,
+ 0x3e, 0x56, 0xf3, 0xd0, 0xd1, 0x04, 0x14, 0x18,
+ 0x36, 0x37, 0x56, 0x57, 0x7f, 0xaa, 0xae, 0xaf,
+ 0xbd, 0x35, 0xe0, 0x12, 0x87, 0x89, 0x8e, 0x9e,
+ 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, 0x34,
+ 0x3a, 0x45, 0x46, 0x49, 0x4a, 0x4e, 0x4f, 0x64,
+ 0x65, 0x5c, 0xb6, 0xb7, 0x1b, 0x1c, 0x07, 0x08,
+ 0x0a, 0x0b, 0x14, 0x17, 0x36, 0x39, 0x3a, 0xa8,
+ 0xa9, 0xd8, 0xd9, 0x09, 0x37, 0x90, 0x91, 0xa8,
+ 0x07, 0x0a, 0x3b, 0x3e, 0x66, 0x69, 0x8f, 0x92,
+ 0x6f, 0x5f, 0xbf, 0xee, 0xef, 0x5a, 0x62, 0xf4,
+ 0xfc, 0xff, 0x9a, 0x9b, 0x2e, 0x2f, 0x27, 0x28,
+ 0x55, 0x9d, 0xa0, 0xa1, 0xa3, 0xa4, 0xa7, 0xa8,
+ 0xad, 0xba, 0xbc, 0xc4, 0x06, 0x0b, 0x0c, 0x15,
+ 0x1d, 0x3a, 0x3f, 0x45, 0x51, 0xa6, 0xa7, 0xcc,
+ 0xcd, 0xa0, 0x07, 0x19, 0x1a, 0x22, 0x25, 0x3e,
+ 0x3f, 0xe7, 0xec, 0xef, 0xff, 0xc5, 0xc6, 0x04,
+ 0x20, 0x23, 0x25, 0x26, 0x28, 0x33, 0x38, 0x3a,
+ 0x48, 0x4a, 0x4c, 0x50, 0x53, 0x55, 0x56, 0x58,
+ 0x5a, 0x5c, 0x5e, 0x60, 0x63, 0x65, 0x66, 0x6b,
+ 0x73, 0x78, 0x7d, 0x7f, 0x8a, 0xa4, 0xaa, 0xaf,
+ 0xb0, 0xc0, 0xd0, 0xae, 0xaf, 0x6e, 0x6f, 0x93,
+];
+#[rustfmt::skip]
+const NORMAL0: &[u8] = &[
+ 0x00, 0x20,
+ 0x5f, 0x22,
+ 0x82, 0xdf, 0x04,
+ 0x82, 0x44, 0x08,
+ 0x1b, 0x04,
+ 0x06, 0x11,
+ 0x81, 0xac, 0x0e,
+ 0x80, 0xab, 0x05,
+ 0x1f, 0x09,
+ 0x81, 0x1b, 0x03,
+ 0x19, 0x08,
+ 0x01, 0x04,
+ 0x2f, 0x04,
+ 0x34, 0x04,
+ 0x07, 0x03,
+ 0x01, 0x07,
+ 0x06, 0x07,
+ 0x11, 0x0a,
+ 0x50, 0x0f,
+ 0x12, 0x07,
+ 0x55, 0x07,
+ 0x03, 0x04,
+ 0x1c, 0x0a,
+ 0x09, 0x03,
+ 0x08, 0x03,
+ 0x07, 0x03,
+ 0x02, 0x03,
+ 0x03, 0x03,
+ 0x0c, 0x04,
+ 0x05, 0x03,
+ 0x0b, 0x06,
+ 0x01, 0x0e,
+ 0x15, 0x05,
+ 0x4e, 0x07,
+ 0x1b, 0x07,
+ 0x57, 0x07,
+ 0x02, 0x06,
+ 0x16, 0x0d,
+ 0x50, 0x04,
+ 0x43, 0x03,
+ 0x2d, 0x03,
+ 0x01, 0x04,
+ 0x11, 0x06,
+ 0x0f, 0x0c,
+ 0x3a, 0x04,
+ 0x1d, 0x25,
+ 0x5f, 0x20,
+ 0x6d, 0x04,
+ 0x6a, 0x25,
+ 0x80, 0xc8, 0x05,
+ 0x82, 0xb0, 0x03,
+ 0x1a, 0x06,
+ 0x82, 0xfd, 0x03,
+ 0x59, 0x07,
+ 0x16, 0x09,
+ 0x18, 0x09,
+ 0x14, 0x0c,
+ 0x14, 0x0c,
+ 0x6a, 0x06,
+ 0x0a, 0x06,
+ 0x1a, 0x06,
+ 0x59, 0x07,
+ 0x2b, 0x05,
+ 0x46, 0x0a,
+ 0x2c, 0x04,
+ 0x0c, 0x04,
+ 0x01, 0x03,
+ 0x31, 0x0b,
+ 0x2c, 0x04,
+ 0x1a, 0x06,
+ 0x0b, 0x03,
+ 0x80, 0xac, 0x06,
+ 0x0a, 0x06,
+ 0x2f, 0x31,
+ 0x4d, 0x03,
+ 0x80, 0xa4, 0x08,
+ 0x3c, 0x03,
+ 0x0f, 0x03,
+ 0x3c, 0x07,
+ 0x38, 0x08,
+ 0x2b, 0x05,
+ 0x82, 0xff, 0x11,
+ 0x18, 0x08,
+ 0x2f, 0x11,
+ 0x2d, 0x03,
+ 0x21, 0x0f,
+ 0x21, 0x0f,
+ 0x80, 0x8c, 0x04,
+ 0x82, 0x97, 0x19,
+ 0x0b, 0x15,
+ 0x88, 0x94, 0x05,
+ 0x2f, 0x05,
+ 0x3b, 0x07,
+ 0x02, 0x0e,
+ 0x18, 0x09,
+ 0x80, 0xbe, 0x22,
+ 0x74, 0x0c,
+ 0x80, 0xd6, 0x1a,
+ 0x0c, 0x05,
+ 0x80, 0xff, 0x05,
+ 0x80, 0xdf, 0x0c,
+ 0xf2, 0x9d, 0x03,
+ 0x37, 0x09,
+ 0x81, 0x5c, 0x14,
+ 0x80, 0xb8, 0x08,
+ 0x80, 0xcb, 0x05,
+ 0x0a, 0x18,
+ 0x3b, 0x03,
+ 0x0a, 0x06,
+ 0x38, 0x08,
+ 0x46, 0x08,
+ 0x0c, 0x06,
+ 0x74, 0x0b,
+ 0x1e, 0x03,
+ 0x5a, 0x04,
+ 0x59, 0x09,
+ 0x80, 0x83, 0x18,
+ 0x1c, 0x0a,
+ 0x16, 0x09,
+ 0x4c, 0x04,
+ 0x80, 0x8a, 0x06,
+ 0xab, 0xa4, 0x0c,
+ 0x17, 0x04,
+ 0x31, 0xa1, 0x04,
+ 0x81, 0xda, 0x26,
+ 0x07, 0x0c,
+ 0x05, 0x05,
+ 0x80, 0xa6, 0x10,
+ 0x81, 0xf5, 0x07,
+ 0x01, 0x20,
+ 0x2a, 0x06,
+ 0x4c, 0x04,
+ 0x80, 0x8d, 0x04,
+ 0x80, 0xbe, 0x03,
+ 0x1b, 0x03,
+ 0x0f, 0x0d,
+];
+#[rustfmt::skip]
+const NORMAL1: &[u8] = &[
+ 0x5e, 0x22,
+ 0x7b, 0x05,
+ 0x03, 0x04,
+ 0x2d, 0x03,
+ 0x66, 0x03,
+ 0x01, 0x2f,
+ 0x2e, 0x80, 0x82,
+ 0x1d, 0x03,
+ 0x31, 0x0f,
+ 0x1c, 0x04,
+ 0x24, 0x09,
+ 0x1e, 0x05,
+ 0x2b, 0x05,
+ 0x44, 0x04,
+ 0x0e, 0x2a,
+ 0x80, 0xaa, 0x06,
+ 0x24, 0x04,
+ 0x24, 0x04,
+ 0x28, 0x08,
+ 0x34, 0x0b,
+ 0x4e, 0x43,
+ 0x81, 0x37, 0x09,
+ 0x16, 0x0a,
+ 0x08, 0x18,
+ 0x3b, 0x45,
+ 0x39, 0x03,
+ 0x63, 0x08,
+ 0x09, 0x30,
+ 0x16, 0x05,
+ 0x21, 0x03,
+ 0x1b, 0x05,
+ 0x01, 0x40,
+ 0x38, 0x04,
+ 0x4b, 0x05,
+ 0x2f, 0x04,
+ 0x0a, 0x07,
+ 0x09, 0x07,
+ 0x40, 0x20,
+ 0x27, 0x04,
+ 0x0c, 0x09,
+ 0x36, 0x03,
+ 0x3a, 0x05,
+ 0x1a, 0x07,
+ 0x04, 0x0c,
+ 0x07, 0x50,
+ 0x49, 0x37,
+ 0x33, 0x0d,
+ 0x33, 0x07,
+ 0x2e, 0x08,
+ 0x0a, 0x81, 0x26,
+ 0x52, 0x4e,
+ 0x28, 0x08,
+ 0x2a, 0x16,
+ 0x1a, 0x26,
+ 0x1c, 0x14,
+ 0x17, 0x09,
+ 0x4e, 0x04,
+ 0x24, 0x09,
+ 0x44, 0x0d,
+ 0x19, 0x07,
+ 0x0a, 0x06,
+ 0x48, 0x08,
+ 0x27, 0x09,
+ 0x75, 0x0b,
+ 0x3f, 0x41,
+ 0x2a, 0x06,
+ 0x3b, 0x05,
+ 0x0a, 0x06,
+ 0x51, 0x06,
+ 0x01, 0x05,
+ 0x10, 0x03,
+ 0x05, 0x80, 0x8b,
+ 0x62, 0x1e,
+ 0x48, 0x08,
+ 0x0a, 0x80, 0xa6,
+ 0x5e, 0x22,
+ 0x45, 0x0b,
+ 0x0a, 0x06,
+ 0x0d, 0x13,
+ 0x3a, 0x06,
+ 0x0a, 0x36,
+ 0x2c, 0x04,
+ 0x17, 0x80, 0xb9,
+ 0x3c, 0x64,
+ 0x53, 0x0c,
+ 0x48, 0x09,
+ 0x0a, 0x46,
+ 0x45, 0x1b,
+ 0x48, 0x08,
+ 0x53, 0x0d,
+ 0x49, 0x81, 0x07,
+ 0x46, 0x0a,
+ 0x1d, 0x03,
+ 0x47, 0x49,
+ 0x37, 0x03,
+ 0x0e, 0x08,
+ 0x0a, 0x06,
+ 0x39, 0x07,
+ 0x0a, 0x81, 0x36,
+ 0x19, 0x80, 0xb7,
+ 0x01, 0x0f,
+ 0x32, 0x0d,
+ 0x83, 0x9b, 0x66,
+ 0x75, 0x0b,
+ 0x80, 0xc4, 0x8a, 0x4c,
+ 0x63, 0x0d,
+ 0x84, 0x2f, 0x8f, 0xd1,
+ 0x82, 0x47, 0xa1, 0xb9,
+ 0x82, 0x39, 0x07,
+ 0x2a, 0x04,
+ 0x5c, 0x06,
+ 0x26, 0x0a,
+ 0x46, 0x0a,
+ 0x28, 0x05,
+ 0x13, 0x82, 0xb0,
+ 0x5b, 0x65,
+ 0x4b, 0x04,
+ 0x39, 0x07,
+ 0x11, 0x40,
+ 0x05, 0x0b,
+ 0x02, 0x0e,
+ 0x97, 0xf8, 0x08,
+ 0x84, 0xd6, 0x2a,
+ 0x09, 0xa2, 0xe7,
+ 0x81, 0x33, 0x2d,
+ 0x03, 0x11,
+ 0x04, 0x08,
+ 0x81, 0x8c, 0x89, 0x04,
+ 0x6b, 0x05,
+ 0x0d, 0x03,
+ 0x09, 0x07,
+ 0x10, 0x92, 0x60,
+ 0x47, 0x09,
+ 0x74, 0x3c,
+ 0x80, 0xf6, 0x0a,
+ 0x73, 0x08,
+ 0x70, 0x15,
+ 0x46, 0x80, 0x9a,
+ 0x14, 0x0c,
+ 0x57, 0x09,
+ 0x19, 0x80, 0x87,
+ 0x81, 0x47, 0x03,
+ 0x85, 0x42, 0x0f,
+ 0x15, 0x84, 0x50,
+ 0x1f, 0x80, 0xe1,
+ 0x2b, 0x80, 0xd5,
+ 0x2d, 0x03,
+ 0x1a, 0x04,
+ 0x02, 0x81, 0x40,
+ 0x1f, 0x11,
+ 0x3a, 0x05,
+ 0x01, 0x84, 0xe0,
+ 0x80, 0xf7, 0x29,
+ 0x4c, 0x04,
+ 0x0a, 0x04,
+ 0x02, 0x83, 0x11,
+ 0x44, 0x4c,
+ 0x3d, 0x80, 0xc2,
+ 0x3c, 0x06,
+ 0x01, 0x04,
+ 0x55, 0x05,
+ 0x1b, 0x34,
+ 0x02, 0x81, 0x0e,
+ 0x2c, 0x04,
+ 0x64, 0x0c,
+ 0x56, 0x0a,
+ 0x80, 0xae, 0x38,
+ 0x1d, 0x0d,
+ 0x2c, 0x04,
+ 0x09, 0x07,
+ 0x02, 0x0e,
+ 0x06, 0x80, 0x9a,
+ 0x83, 0xd8, 0x05,
+ 0x10, 0x03,
+ 0x0d, 0x03,
+ 0x74, 0x0c,
+ 0x59, 0x07,
+ 0x0c, 0x04,
+ 0x01, 0x0f,
+ 0x0c, 0x04,
+ 0x38, 0x08,
+ 0x0a, 0x06,
+ 0x28, 0x08,
+ 0x22, 0x4e,
+ 0x81, 0x54, 0x0c,
+ 0x15, 0x03,
+ 0x05, 0x03,
+ 0x07, 0x09,
+ 0x1d, 0x03,
+ 0x0b, 0x05,
+ 0x06, 0x0a,
+ 0x0a, 0x06,
+ 0x08, 0x08,
+ 0x07, 0x09,
+ 0x80, 0xcb, 0x25,
+ 0x0a, 0x84, 0x06,
+];
diff --git a/library/core/src/unicode/unicode_data.rs b/library/core/src/unicode/unicode_data.rs
new file mode 100644
index 000000000..d2073f86c
--- /dev/null
+++ b/library/core/src/unicode/unicode_data.rs
@@ -0,0 +1,2375 @@
+///! This file is generated by src/tools/unicode-table-generator; do not edit manually!
+
+#[inline(always)]
+fn bitset_search<
+ const N: usize,
+ const CHUNK_SIZE: usize,
+ const N1: usize,
+ const CANONICAL: usize,
+ const CANONICALIZED: usize,
+>(
+ needle: u32,
+ chunk_idx_map: &[u8; N],
+ bitset_chunk_idx: &[[u8; CHUNK_SIZE]; N1],
+ bitset_canonical: &[u64; CANONICAL],
+ bitset_canonicalized: &[(u8, u8); CANONICALIZED],
+) -> bool {
+ let bucket_idx = (needle / 64) as usize;
+ let chunk_map_idx = bucket_idx / CHUNK_SIZE;
+ let chunk_piece = bucket_idx % CHUNK_SIZE;
+ let chunk_idx = if let Some(&v) = chunk_idx_map.get(chunk_map_idx) {
+ v
+ } else {
+ return false;
+ };
+ let idx = bitset_chunk_idx[chunk_idx as usize][chunk_piece] as usize;
+ let word = if let Some(word) = bitset_canonical.get(idx) {
+ *word
+ } else {
+ let (real_idx, mapping) = bitset_canonicalized[idx - bitset_canonical.len()];
+ let mut word = bitset_canonical[real_idx as usize];
+ let should_invert = mapping & (1 << 6) != 0;
+ if should_invert {
+ word = !word;
+ }
+ // Lower 6 bits
+ let quantity = mapping & ((1 << 6) - 1);
+ if mapping & (1 << 7) != 0 {
+ // shift
+ word >>= quantity as u64;
+ } else {
+ word = word.rotate_left(quantity as u32);
+ }
+ word
+ };
+ (word & (1 << (needle % 64) as u64)) != 0
+}
+
+fn decode_prefix_sum(short_offset_run_header: u32) -> u32 {
+ short_offset_run_header & ((1 << 21) - 1)
+}
+
+fn decode_length(short_offset_run_header: u32) -> usize {
+ (short_offset_run_header >> 21) as usize
+}
+
+#[inline(always)]
+fn skip_search<const SOR: usize, const OFFSETS: usize>(
+ needle: u32,
+ short_offset_runs: &[u32; SOR],
+ offsets: &[u8; OFFSETS],
+) -> bool {
+ // Note that this *cannot* be past the end of the array, as the last
+ // element is greater than std::char::MAX (the largest possible needle).
+ //
+ // So, we cannot have found it (i.e. Ok(idx) + 1 != length) and the correct
+ // location cannot be past it, so Err(idx) != length either.
+ //
+ // This means that we can avoid bounds checking for the accesses below, too.
+ let last_idx =
+ match short_offset_runs.binary_search_by_key(&(needle << 11), |header| header << 11) {
+ Ok(idx) => idx + 1,
+ Err(idx) => idx,
+ };
+
+ let mut offset_idx = decode_length(short_offset_runs[last_idx]);
+ let length = if let Some(next) = short_offset_runs.get(last_idx + 1) {
+ decode_length(*next) - offset_idx
+ } else {
+ offsets.len() - offset_idx
+ };
+ let prev =
+ last_idx.checked_sub(1).map(|prev| decode_prefix_sum(short_offset_runs[prev])).unwrap_or(0);
+
+ let total = needle - prev;
+ let mut prefix_sum = 0;
+ for _ in 0..(length - 1) {
+ let offset = offsets[offset_idx];
+ prefix_sum += offset as u32;
+ if prefix_sum > total {
+ break;
+ }
+ offset_idx += 1;
+ }
+ offset_idx % 2 == 1
+}
+
+pub const UNICODE_VERSION: (u8, u8, u8) = (14, 0, 0);
+
+#[rustfmt::skip]
+pub mod alphabetic {
+ static SHORT_OFFSET_RUNS: [u32; 51] = [
+ 706, 33559113, 876615277, 956309270, 1166025910, 1314925568, 1319120901, 1398813696,
+ 1449151936, 1451271309, 1455465997, 1463867300, 1652619520, 1663105646, 1665203518,
+ 1711342208, 1797326647, 1891700352, 2044795904, 2397118176, 2485199770, 2495688592,
+ 2506175535, 2512471040, 2514568775, 2516674560, 2518772281, 2520870464, 2552334328,
+ 2583792854, 2587996144, 2594287907, 2608968444, 2621553664, 2623656960, 2644629158,
+ 2722225920, 2770461328, 2808211424, 2816601600, 2850156848, 2988572672, 3001198304,
+ 3003299641, 3007499938, 3015896033, 3020093440, 3022191134, 3024289792, 3026391883,
+ 3029603147,
+ ];
+ static OFFSETS: [u8; 1445] = [
+ 65, 26, 6, 26, 47, 1, 10, 1, 4, 1, 5, 23, 1, 31, 1, 0, 4, 12, 14, 5, 7, 1, 1, 1, 86, 1, 42,
+ 5, 1, 2, 2, 4, 1, 1, 6, 1, 1, 3, 1, 1, 1, 20, 1, 83, 1, 139, 8, 166, 1, 38, 2, 1, 6, 41, 39,
+ 14, 1, 1, 1, 2, 1, 2, 1, 1, 8, 27, 4, 4, 29, 11, 5, 56, 1, 7, 14, 102, 1, 8, 4, 8, 4, 3, 10,
+ 3, 2, 1, 16, 48, 13, 101, 24, 33, 9, 2, 4, 1, 5, 24, 2, 19, 19, 25, 7, 11, 5, 24, 1, 6, 17,
+ 42, 10, 12, 3, 7, 6, 76, 1, 16, 1, 3, 4, 15, 13, 19, 1, 8, 2, 2, 2, 22, 1, 7, 1, 1, 3, 4, 3,
+ 8, 2, 2, 2, 2, 1, 1, 8, 1, 4, 2, 1, 5, 12, 2, 10, 1, 4, 3, 1, 6, 4, 2, 2, 22, 1, 7, 1, 2, 1,
+ 2, 1, 2, 4, 5, 4, 2, 2, 2, 4, 1, 7, 4, 1, 1, 17, 6, 11, 3, 1, 9, 1, 3, 1, 22, 1, 7, 1, 2, 1,
+ 5, 3, 9, 1, 3, 1, 2, 3, 1, 15, 4, 21, 4, 4, 3, 1, 8, 2, 2, 2, 22, 1, 7, 1, 2, 1, 5, 3, 8, 2,
+ 2, 2, 2, 9, 2, 4, 2, 1, 5, 13, 1, 16, 2, 1, 6, 3, 3, 1, 4, 3, 2, 1, 1, 1, 2, 3, 2, 3, 3, 3,
+ 12, 4, 5, 3, 3, 1, 3, 3, 1, 6, 1, 40, 4, 1, 8, 1, 3, 1, 23, 1, 16, 3, 8, 1, 3, 1, 3, 8, 2,
+ 1, 3, 2, 1, 2, 4, 28, 4, 1, 8, 1, 3, 1, 23, 1, 10, 1, 5, 3, 8, 1, 3, 1, 3, 8, 2, 6, 2, 1, 4,
+ 13, 2, 13, 13, 1, 3, 1, 41, 2, 8, 1, 3, 1, 3, 1, 1, 5, 4, 7, 5, 22, 6, 1, 3, 1, 18, 3, 24,
+ 1, 9, 1, 1, 2, 7, 8, 6, 1, 1, 1, 8, 18, 2, 13, 58, 5, 7, 6, 1, 51, 2, 1, 1, 1, 5, 1, 24, 1,
+ 1, 1, 19, 1, 3, 2, 5, 1, 1, 6, 1, 14, 4, 32, 1, 63, 8, 1, 36, 4, 17, 6, 16, 1, 36, 67, 55,
+ 1, 1, 2, 5, 16, 64, 10, 4, 2, 38, 1, 1, 5, 1, 2, 43, 1, 0, 1, 4, 2, 7, 1, 1, 1, 4, 2, 41, 1,
+ 4, 2, 33, 1, 4, 2, 7, 1, 1, 1, 4, 2, 15, 1, 57, 1, 4, 2, 67, 37, 16, 16, 86, 2, 6, 3, 0, 2,
+ 17, 1, 26, 5, 75, 3, 11, 7, 20, 11, 21, 12, 20, 12, 13, 1, 3, 1, 2, 12, 52, 2, 19, 14, 1, 4,
+ 1, 67, 89, 7, 43, 5, 70, 10, 31, 1, 12, 4, 9, 23, 30, 2, 5, 11, 44, 4, 26, 54, 28, 4, 63, 2,
+ 20, 50, 1, 23, 2, 11, 3, 49, 52, 1, 15, 1, 8, 51, 42, 2, 4, 10, 44, 1, 11, 14, 55, 22, 3,
+ 10, 36, 2, 9, 7, 43, 2, 3, 41, 4, 1, 6, 1, 2, 3, 1, 5, 192, 39, 14, 11, 0, 2, 6, 2, 38, 2,
+ 6, 2, 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13, 5, 3, 1,
+ 7, 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4, 1, 11, 2,
+ 4, 5, 5, 4, 1, 17, 41, 0, 52, 0, 229, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1, 2, 56, 7, 1, 16, 23,
+ 9, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 32, 47, 1, 0, 3, 25, 9, 7, 5, 2, 5, 4,
+ 86, 6, 3, 1, 90, 1, 4, 5, 43, 1, 94, 17, 32, 48, 16, 0, 0, 64, 0, 67, 46, 2, 0, 3, 16, 10,
+ 2, 20, 47, 5, 8, 3, 113, 39, 9, 2, 103, 2, 64, 5, 2, 1, 1, 1, 5, 24, 20, 1, 33, 24, 52, 12,
+ 68, 1, 1, 44, 6, 3, 1, 1, 3, 10, 33, 5, 35, 13, 29, 3, 51, 1, 12, 15, 1, 16, 16, 10, 5, 1,
+ 55, 9, 14, 18, 23, 3, 69, 1, 1, 1, 1, 24, 3, 2, 16, 2, 4, 11, 6, 2, 6, 2, 6, 9, 7, 1, 7, 1,
+ 43, 1, 14, 6, 123, 21, 0, 12, 23, 4, 49, 0, 0, 2, 106, 38, 7, 12, 5, 5, 12, 1, 13, 1, 5, 1,
+ 1, 1, 2, 1, 2, 1, 108, 33, 0, 18, 64, 2, 54, 40, 12, 116, 5, 1, 135, 36, 26, 6, 26, 11, 89,
+ 3, 6, 2, 6, 2, 6, 2, 3, 35, 12, 1, 26, 1, 19, 1, 2, 1, 15, 2, 14, 34, 123, 69, 53, 0, 29, 3,
+ 49, 47, 32, 13, 30, 5, 43, 5, 30, 2, 36, 4, 8, 1, 5, 42, 158, 18, 36, 4, 36, 4, 40, 8, 52,
+ 12, 11, 1, 15, 1, 7, 1, 2, 1, 11, 1, 15, 1, 7, 1, 2, 67, 0, 9, 22, 10, 8, 24, 6, 1, 42, 1,
+ 9, 69, 6, 2, 1, 1, 44, 1, 2, 3, 1, 2, 23, 10, 23, 9, 31, 65, 19, 1, 2, 10, 22, 10, 26, 70,
+ 56, 6, 2, 64, 4, 1, 2, 5, 8, 1, 3, 1, 29, 42, 29, 3, 29, 35, 8, 1, 28, 27, 54, 10, 22, 10,
+ 19, 13, 18, 110, 73, 55, 51, 13, 51, 13, 40, 0, 42, 1, 2, 3, 2, 78, 29, 10, 1, 8, 22, 42,
+ 18, 46, 21, 27, 23, 9, 70, 43, 5, 12, 55, 9, 1, 13, 25, 23, 51, 17, 4, 8, 35, 3, 1, 9, 64,
+ 1, 4, 9, 2, 10, 1, 1, 1, 35, 18, 1, 34, 2, 1, 6, 1, 65, 7, 1, 1, 1, 4, 1, 15, 1, 10, 7, 57,
+ 23, 4, 1, 8, 2, 2, 2, 22, 1, 7, 1, 2, 1, 5, 3, 8, 2, 2, 2, 2, 3, 1, 6, 1, 5, 7, 156, 66, 1,
+ 3, 1, 4, 20, 3, 30, 66, 2, 2, 1, 1, 184, 54, 2, 7, 25, 6, 34, 63, 1, 1, 3, 1, 59, 54, 2, 1,
+ 71, 27, 2, 14, 21, 7, 185, 57, 103, 64, 31, 8, 2, 1, 2, 8, 1, 2, 1, 30, 1, 2, 2, 2, 2, 4,
+ 93, 8, 2, 46, 2, 6, 1, 1, 1, 2, 27, 51, 2, 10, 17, 72, 5, 1, 18, 73, 0, 9, 1, 45, 1, 7, 1,
+ 1, 49, 30, 2, 22, 1, 14, 73, 7, 1, 2, 1, 44, 3, 1, 1, 2, 1, 3, 1, 1, 2, 2, 24, 6, 1, 2, 1,
+ 37, 1, 2, 1, 4, 1, 1, 0, 23, 185, 1, 79, 0, 102, 111, 17, 196, 0, 97, 15, 0, 0, 0, 0, 0, 7,
+ 31, 17, 79, 17, 30, 18, 48, 16, 4, 31, 21, 5, 19, 0, 64, 128, 75, 4, 57, 7, 17, 64, 2, 1, 1,
+ 12, 2, 14, 0, 8, 0, 42, 9, 0, 4, 1, 7, 1, 2, 1, 0, 45, 3, 17, 4, 8, 0, 0, 107, 5, 13, 3, 9,
+ 7, 10, 4, 1, 0, 85, 1, 71, 1, 2, 2, 1, 2, 2, 2, 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1,
+ 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2, 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1,
+ 25, 1, 31, 1, 25, 1, 8, 0, 31, 225, 7, 1, 17, 2, 7, 1, 2, 1, 5, 213, 45, 10, 7, 16, 1, 0,
+ 30, 18, 44, 0, 7, 1, 4, 1, 2, 1, 15, 1, 197, 59, 68, 3, 1, 3, 1, 0, 4, 1, 27, 1, 2, 1, 1, 2,
+ 1, 1, 10, 1, 4, 1, 1, 1, 1, 6, 1, 4, 1, 1, 1, 1, 1, 1, 3, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 1, 1, 2, 4, 1, 7, 1, 4, 1, 4, 1, 1, 1, 10, 1, 17, 5, 3, 1, 5, 1, 17, 0, 26,
+ 6, 26, 6, 26, 0, 0, 32, 0, 7, 222, 2, 0, 14, 0, 0, 0, 0, 0, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod case_ignorable {
+ static SHORT_OFFSET_RUNS: [u32; 35] = [
+ 688, 44045149, 572528402, 576724925, 807414908, 878718981, 903913493, 929080568, 933275148,
+ 937491230, 1138818560, 1147208189, 1210124160, 1222707713, 1235291428, 1260457643,
+ 1264654383, 1491147067, 1499536432, 1558257395, 1621177392, 1625385712, 1629581135,
+ 1642180592, 1658961053, 1671548672, 1679937895, 1688328704, 1709301760, 1734467888,
+ 1755439790, 1759635664, 1768027131, 1777205249, 1782514160,
+ ];
+ static OFFSETS: [u8; 855] = [
+ 39, 1, 6, 1, 11, 1, 35, 1, 1, 1, 71, 1, 4, 1, 1, 1, 4, 1, 2, 2, 0, 192, 4, 2, 4, 1, 9, 2,
+ 1, 1, 251, 7, 207, 1, 5, 1, 49, 45, 1, 1, 1, 2, 1, 2, 1, 1, 44, 1, 11, 6, 10, 11, 1, 1, 35,
+ 1, 10, 21, 16, 1, 101, 8, 1, 10, 1, 4, 33, 1, 1, 1, 30, 27, 91, 11, 58, 11, 4, 1, 2, 1, 24,
+ 24, 43, 3, 44, 1, 7, 2, 6, 8, 41, 58, 55, 1, 1, 1, 4, 8, 4, 1, 3, 7, 10, 2, 13, 1, 15, 1,
+ 58, 1, 4, 4, 8, 1, 20, 2, 26, 1, 2, 2, 57, 1, 4, 2, 4, 2, 2, 3, 3, 1, 30, 2, 3, 1, 11, 2,
+ 57, 1, 4, 5, 1, 2, 4, 1, 20, 2, 22, 6, 1, 1, 58, 1, 2, 1, 1, 4, 8, 1, 7, 2, 11, 2, 30, 1,
+ 61, 1, 12, 1, 50, 1, 3, 1, 55, 1, 1, 3, 5, 3, 1, 4, 7, 2, 11, 2, 29, 1, 58, 1, 2, 1, 6, 1,
+ 5, 2, 20, 2, 28, 2, 57, 2, 4, 4, 8, 1, 20, 2, 29, 1, 72, 1, 7, 3, 1, 1, 90, 1, 2, 7, 11, 9,
+ 98, 1, 2, 9, 9, 1, 1, 6, 74, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1,
+ 102, 4, 1, 6, 1, 2, 2, 2, 25, 2, 4, 3, 16, 4, 13, 1, 2, 2, 6, 1, 15, 1, 94, 1, 0, 3, 0, 3,
+ 29, 2, 30, 2, 30, 2, 64, 2, 1, 7, 8, 1, 2, 11, 3, 1, 5, 1, 45, 5, 51, 1, 65, 2, 34, 1, 118,
+ 3, 4, 2, 9, 1, 6, 3, 219, 2, 2, 1, 58, 1, 1, 7, 1, 1, 1, 1, 2, 8, 6, 10, 2, 1, 39, 1, 8, 31,
+ 49, 4, 48, 1, 1, 5, 1, 1, 5, 1, 40, 9, 12, 2, 32, 4, 2, 2, 1, 3, 56, 1, 1, 2, 3, 1, 1, 3,
+ 58, 8, 2, 2, 64, 6, 82, 3, 1, 13, 1, 7, 4, 1, 6, 1, 3, 2, 50, 63, 13, 1, 34, 101, 0, 1, 1,
+ 3, 11, 3, 13, 3, 13, 3, 13, 2, 12, 5, 8, 2, 10, 1, 2, 1, 2, 5, 49, 5, 1, 10, 1, 1, 13, 1,
+ 16, 13, 51, 33, 0, 2, 113, 3, 125, 1, 15, 1, 96, 32, 47, 1, 0, 1, 36, 4, 3, 5, 5, 1, 93, 6,
+ 93, 3, 0, 1, 0, 6, 0, 1, 98, 4, 1, 10, 1, 1, 28, 4, 80, 2, 14, 34, 78, 1, 23, 3, 103, 3, 3,
+ 2, 8, 1, 3, 1, 4, 1, 25, 2, 5, 1, 151, 2, 26, 18, 13, 1, 38, 8, 25, 11, 46, 3, 48, 1, 2, 4,
+ 2, 2, 17, 1, 21, 2, 66, 6, 2, 2, 2, 2, 12, 1, 8, 1, 35, 1, 11, 1, 51, 1, 1, 3, 2, 2, 5, 2,
+ 1, 1, 27, 1, 14, 2, 5, 2, 1, 1, 100, 5, 9, 3, 121, 1, 2, 1, 4, 1, 0, 1, 147, 17, 0, 16, 3,
+ 1, 12, 16, 34, 1, 2, 1, 169, 1, 7, 1, 6, 1, 11, 1, 35, 1, 1, 1, 47, 1, 45, 2, 67, 1, 21, 3,
+ 0, 1, 226, 1, 149, 5, 0, 6, 1, 42, 1, 9, 0, 3, 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2,
+ 153, 11, 49, 4, 123, 1, 54, 15, 41, 1, 2, 2, 10, 3, 49, 4, 2, 2, 2, 1, 4, 1, 10, 1, 50, 3,
+ 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3, 2, 1, 1, 2, 6, 1, 160, 1, 3, 8, 21, 2,
+ 57, 2, 3, 1, 37, 7, 3, 5, 195, 8, 2, 3, 1, 1, 23, 1, 84, 6, 1, 1, 4, 2, 1, 2, 238, 4, 6, 2,
+ 1, 2, 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 0,
+ 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2, 0,
+ 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1, 1,
+ 0, 2, 0, 9, 0, 5, 59, 7, 9, 4, 0, 1, 63, 17, 64, 2, 1, 2, 0, 4, 1, 7, 1, 2, 0, 2, 1, 4, 0,
+ 46, 2, 23, 0, 3, 9, 16, 2, 7, 30, 4, 148, 3, 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7,
+ 1, 17, 2, 7, 1, 2, 1, 5, 0, 14, 0, 1, 61, 4, 0, 7, 109, 8, 0, 5, 0, 1, 30, 96, 128, 240, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod cased {
+ static SHORT_OFFSET_RUNS: [u32; 21] = [
+ 4256, 115348384, 136322176, 144711446, 163587254, 320875520, 325101120, 350268208,
+ 392231680, 404815649, 413205504, 421595008, 467733632, 484513952, 492924480, 497144832,
+ 501339814, 578936576, 627173632, 635564336, 640872842,
+ ];
+ static OFFSETS: [u8; 311] = [
+ 65, 26, 6, 26, 47, 1, 10, 1, 4, 1, 5, 23, 1, 31, 1, 195, 1, 4, 4, 208, 1, 36, 7, 2, 30, 5,
+ 96, 1, 42, 4, 2, 2, 2, 4, 1, 1, 6, 1, 1, 3, 1, 1, 1, 20, 1, 83, 1, 139, 8, 166, 1, 38, 9,
+ 41, 0, 38, 1, 1, 5, 1, 2, 43, 2, 3, 0, 86, 2, 6, 0, 9, 7, 43, 2, 3, 64, 192, 64, 0, 2, 6, 2,
+ 38, 2, 6, 2, 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13,
+ 5, 3, 1, 7, 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4,
+ 1, 6, 4, 1, 2, 4, 5, 5, 4, 1, 17, 32, 3, 2, 0, 52, 0, 229, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1,
+ 0, 46, 18, 30, 132, 102, 3, 4, 1, 59, 5, 2, 1, 1, 1, 5, 27, 2, 1, 3, 0, 43, 1, 13, 7, 80, 0,
+ 7, 12, 5, 0, 26, 6, 26, 0, 80, 96, 36, 4, 36, 116, 11, 1, 15, 1, 7, 1, 2, 1, 11, 1, 15, 1,
+ 7, 1, 2, 0, 1, 2, 3, 1, 42, 1, 9, 0, 51, 13, 51, 0, 64, 0, 64, 0, 85, 1, 71, 1, 2, 2, 1, 2,
+ 2, 2, 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1, 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2,
+ 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 10, 1, 20, 0,
+ 68, 0, 26, 6, 26, 6, 26, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod cc {
+ static SHORT_OFFSET_RUNS: [u32; 1] = [
+ 1114272,
+ ];
+ static OFFSETS: [u8; 5] = [
+ 0, 32, 95, 33, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod grapheme_extend {
+ static SHORT_OFFSET_RUNS: [u32; 32] = [
+ 768, 2098307, 6292881, 10490717, 522196754, 526393356, 731917551, 740306986, 752920175,
+ 761309186, 778107678, 908131840, 912326558, 920715773, 924912129, 937495844, 962662059,
+ 966858799, 1205935152, 1277239027, 1340173040, 1344368463, 1352776861, 1365364480,
+ 1369559397, 1377950208, 1407311872, 1432478000, 1453449902, 1457645776, 1466826784,
+ 1476329968,
+ ];
+ static OFFSETS: [u8; 707] = [
+ 0, 112, 0, 7, 0, 45, 1, 1, 1, 2, 1, 2, 1, 1, 72, 11, 48, 21, 16, 1, 101, 7, 2, 6, 2, 2, 1,
+ 4, 35, 1, 30, 27, 91, 11, 58, 9, 9, 1, 24, 4, 1, 9, 1, 3, 1, 5, 43, 3, 60, 8, 42, 24, 1, 32,
+ 55, 1, 1, 1, 4, 8, 4, 1, 3, 7, 10, 2, 29, 1, 58, 1, 1, 1, 2, 4, 8, 1, 9, 1, 10, 2, 26, 1, 2,
+ 2, 57, 1, 4, 2, 4, 2, 2, 3, 3, 1, 30, 2, 3, 1, 11, 2, 57, 1, 4, 5, 1, 2, 4, 1, 20, 2, 22, 6,
+ 1, 1, 58, 1, 1, 2, 1, 4, 8, 1, 7, 3, 10, 2, 30, 1, 59, 1, 1, 1, 12, 1, 9, 1, 40, 1, 3, 1,
+ 55, 1, 1, 3, 5, 3, 1, 4, 7, 2, 11, 2, 29, 1, 58, 1, 2, 1, 2, 1, 3, 1, 5, 2, 7, 2, 11, 2, 28,
+ 2, 57, 2, 1, 1, 2, 4, 8, 1, 9, 1, 10, 2, 29, 1, 72, 1, 4, 1, 2, 3, 1, 1, 8, 1, 81, 1, 2, 7,
+ 12, 8, 98, 1, 2, 9, 11, 6, 74, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1,
+ 102, 4, 1, 6, 1, 2, 2, 2, 25, 2, 4, 3, 16, 4, 13, 1, 2, 2, 6, 1, 15, 1, 0, 3, 0, 3, 29, 2,
+ 30, 2, 30, 2, 64, 2, 1, 7, 8, 1, 2, 11, 9, 1, 45, 3, 1, 1, 117, 2, 34, 1, 118, 3, 4, 2, 9,
+ 1, 6, 3, 219, 2, 2, 1, 58, 1, 1, 7, 1, 1, 1, 1, 2, 8, 6, 10, 2, 1, 48, 31, 49, 4, 48, 7, 1,
+ 1, 5, 1, 40, 9, 12, 2, 32, 4, 2, 2, 1, 3, 56, 1, 1, 2, 3, 1, 1, 3, 58, 8, 2, 2, 152, 3, 1,
+ 13, 1, 7, 4, 1, 6, 1, 3, 2, 198, 64, 0, 1, 195, 33, 0, 3, 141, 1, 96, 32, 0, 6, 105, 2, 0,
+ 4, 1, 10, 32, 2, 80, 2, 0, 1, 3, 1, 4, 1, 25, 2, 5, 1, 151, 2, 26, 18, 13, 1, 38, 8, 25, 11,
+ 46, 3, 48, 1, 2, 4, 2, 2, 39, 1, 67, 6, 2, 2, 2, 2, 12, 1, 8, 1, 47, 1, 51, 1, 1, 3, 2, 2,
+ 5, 2, 1, 1, 42, 2, 8, 1, 238, 1, 2, 1, 4, 1, 0, 1, 0, 16, 16, 16, 0, 2, 0, 1, 226, 1, 149,
+ 5, 0, 3, 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2, 153, 11, 49, 4, 123, 1, 54, 15, 41, 1,
+ 2, 2, 10, 3, 49, 4, 2, 2, 7, 1, 61, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3,
+ 2, 1, 1, 2, 6, 1, 160, 1, 3, 8, 21, 2, 57, 2, 1, 1, 1, 1, 22, 1, 14, 7, 3, 5, 195, 8, 2, 3,
+ 1, 1, 23, 1, 81, 1, 2, 6, 1, 1, 2, 1, 1, 2, 1, 2, 235, 1, 2, 4, 6, 2, 1, 2, 27, 2, 85, 8, 2,
+ 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 245, 1, 10, 2, 1, 1, 4,
+ 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2, 0, 7, 1, 6, 1,
+ 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1, 1, 0, 2, 0, 5,
+ 59, 7, 0, 1, 63, 4, 81, 1, 0, 2, 0, 46, 2, 23, 0, 1, 1, 3, 4, 5, 8, 8, 2, 7, 30, 4, 148, 3,
+ 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7, 1, 17, 2, 7, 1, 2, 1, 5, 0, 7, 0, 1, 61, 4,
+ 0, 7, 109, 7, 0, 96, 128, 240, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod lowercase {
+ static BITSET_CHUNKS_MAP: [u8; 123] = [
+ 14, 17, 0, 0, 9, 0, 0, 12, 13, 10, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 4, 1, 0, 15, 0, 8, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0,
+ 3, 0, 0, 7,
+ ];
+ static BITSET_INDEX_CHUNKS: [[u8; 16]; 19] = [
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 14, 55, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 65, 42, 0, 50, 46, 48, 32],
+ [0, 0, 0, 0, 10, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26],
+ [0, 0, 0, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 57, 0, 55, 55, 55, 0, 21, 21, 67, 21, 35, 24, 23, 36],
+ [0, 5, 74, 0, 28, 15, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 64, 33, 17, 22, 51, 52, 47, 45, 8, 34, 40, 0, 27, 13, 30],
+ [11, 58, 0, 4, 0, 0, 29, 0, 0, 0, 0, 0, 0, 0, 31, 0],
+ [16, 25, 21, 37, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [16, 49, 2, 20, 66, 9, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [63, 39, 54, 12, 73, 61, 18, 1, 6, 62, 71, 19, 68, 69, 3, 44],
+ ];
+ static BITSET_CANONICAL: [u64; 55] = [
+ 0b0000000000000000000000000000000000000000000000000000000000000000,
+ 0b1111111111111111110000000000000000000000000011111111111111111111,
+ 0b1010101010101010101010101010101010101010101010101010100000000010,
+ 0b1111111111111111111111000000000000000000000000001111110111111111,
+ 0b0000111111111111111111111111111111111111000000000000000000000000,
+ 0b1000000000000010000000000000000000000000000000000000000000000000,
+ 0b0000111111111111111111111111110000000000000000000000000011111111,
+ 0b0000000000000111111111111111111111111111111111111111111111111111,
+ 0b1111111111111111111111111111111111111111111111111010101010000101,
+ 0b1111111111111111111111111111111100000000000000000000000000000000,
+ 0b1111111111111111111111111111110000000000000000000000000000000000,
+ 0b1111111111111111111111110000000000000000000000000000000000000000,
+ 0b1111111111111111111111000000000000000000000000001111111111101111,
+ 0b1111111111111111111100000000000000000000000000010000000000000000,
+ 0b1111111111111111000000011111111111110111111111111111111111111111,
+ 0b1111111111111111000000000000000000000000000000000100001111000000,
+ 0b1111111111111111000000000000000000000000000000000000000000000000,
+ 0b1111111101111111111111111111111110000000000000000000000000000000,
+ 0b1111110000000000000000000000000011111111111111111111111111000000,
+ 0b1111000000000000000000000000001111110111111111111111111111111100,
+ 0b1010101010101010101010101010101010101010101010101101010101010100,
+ 0b1010101010101010101010101010101010101010101010101010101010101010,
+ 0b0101010110101010101010101010101010101010101010101010101010101010,
+ 0b0100000011011111000000001111111100000000111111110000000011111111,
+ 0b0011111111111111000000001111111100000000111111110000000000111111,
+ 0b0011111111011010000101010110001011111111111111111111111111111111,
+ 0b0011111100000000000000000000000000000000000000000000000000000000,
+ 0b0011110010001010000000000000000000000000000000000000000000100000,
+ 0b0011001000010000100000000000000000000000000010001100010000000000,
+ 0b0001101111111011111111111111101111111111100000000000000000000000,
+ 0b0001100100101111101010101010101010101010111000110111111111111111,
+ 0b0000011111111101111111111111111111111111111111111111111110111001,
+ 0b0000011101000000000000000000000000000010101010100000010100001010,
+ 0b0000010000100000000001000000000000000000000000000000000000000000,
+ 0b0000000111111111111111111111111111111111111011111111111111111111,
+ 0b0000000011111111000000001111111100000000001111110000000011111111,
+ 0b0000000011011100000000001111111100000000110011110000000011011100,
+ 0b0000000000001000010100000001101010101010101010101010101010101010,
+ 0b0000000000000000001000001011111111111111111111111111111111111111,
+ 0b0000000000000000000000001111111111111111110111111100000000000000,
+ 0b0000000000000000000000000001111100000000000000000000000000000011,
+ 0b0000000000000000000000000000000001111111111111111111101111111111,
+ 0b0000000000000000000000000000000000111010101010101010101010101010,
+ 0b0000000000000000000000000000000000000000111110000000000001111111,
+ 0b0000000000000000000000000000000000000000000000000000101111110111,
+ 0b1001001111111010101010101010101010101010101010101010101010101010,
+ 0b1001010111111111101010101010101010101010101010101010101010101010,
+ 0b1010101000101001101010101010101010110101010101010101001001000000,
+ 0b1010101010100000100000101010101010101010101110100101000010101010,
+ 0b1010101010101010101010101010101011111111111111111111111111111111,
+ 0b1010101010101011101010101010100000000000000000000000000000000000,
+ 0b1101010010101010101010101010101010101010101010101010101101010101,
+ 0b1110011001010001001011010010101001001110001001000011000100101001,
+ 0b1110011111111111111111111111111111111111111111110000000000000000,
+ 0b1110101111000000000000000000000000001111111111111111111111111100,
+ ];
+ static BITSET_MAPPING: [(u8, u8); 20] = [
+ (0, 64), (1, 188), (1, 183), (1, 176), (1, 109), (1, 124), (1, 126), (1, 66), (1, 70),
+ (1, 77), (2, 146), (2, 144), (2, 83), (3, 12), (3, 6), (4, 156), (4, 78), (5, 187),
+ (6, 132), (7, 93),
+ ];
+
+ pub fn lookup(c: char) -> bool {
+ super::bitset_search(
+ c as u32,
+ &BITSET_CHUNKS_MAP,
+ &BITSET_INDEX_CHUNKS,
+ &BITSET_CANONICAL,
+ &BITSET_MAPPING,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod n {
+ static SHORT_OFFSET_RUNS: [u32; 38] = [
+ 1632, 18876774, 31461440, 102765417, 111154926, 115349830, 132128880, 165684320, 186656630,
+ 195046653, 199241735, 203436434, 216049184, 241215536, 249605104, 274792208, 278987015,
+ 283181793, 295766104, 320933114, 383848032, 392238160, 434181712, 442570976, 455154768,
+ 463544256, 476128256, 480340576, 484535936, 501338848, 505534414, 513925440, 518120176,
+ 522315975, 526511217, 534900992, 555875312, 561183738,
+ ];
+ static OFFSETS: [u8; 269] = [
+ 48, 10, 120, 2, 5, 1, 2, 3, 0, 10, 134, 10, 198, 10, 0, 10, 118, 10, 4, 6, 108, 10, 118,
+ 10, 118, 10, 2, 6, 110, 13, 115, 10, 8, 7, 103, 10, 104, 7, 7, 19, 109, 10, 96, 10, 118, 10,
+ 70, 20, 0, 10, 70, 10, 0, 20, 0, 3, 239, 10, 6, 10, 22, 10, 0, 10, 128, 11, 165, 10, 6, 10,
+ 182, 10, 86, 10, 134, 10, 6, 10, 0, 1, 3, 6, 6, 10, 198, 51, 2, 5, 0, 60, 78, 22, 0, 30, 0,
+ 1, 0, 1, 25, 9, 14, 3, 0, 4, 138, 10, 30, 8, 1, 15, 32, 10, 39, 15, 0, 10, 188, 10, 0, 6,
+ 154, 10, 38, 10, 198, 10, 22, 10, 86, 10, 0, 10, 0, 10, 0, 45, 12, 57, 17, 2, 0, 27, 36, 4,
+ 29, 1, 8, 1, 134, 5, 202, 10, 0, 8, 25, 7, 39, 9, 75, 5, 22, 6, 160, 2, 2, 16, 2, 46, 64, 9,
+ 52, 2, 30, 3, 75, 5, 104, 8, 24, 8, 41, 7, 0, 6, 48, 10, 0, 31, 158, 10, 42, 4, 112, 7, 134,
+ 30, 128, 10, 60, 10, 144, 10, 7, 20, 251, 10, 0, 10, 118, 10, 0, 10, 102, 10, 102, 12, 0,
+ 19, 93, 10, 0, 29, 227, 10, 70, 10, 0, 21, 0, 111, 0, 10, 86, 10, 134, 10, 1, 7, 0, 23, 0,
+ 20, 108, 25, 0, 50, 0, 10, 0, 10, 0, 9, 128, 10, 0, 59, 1, 3, 1, 4, 76, 45, 1, 15, 0, 13, 0,
+ 10, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod uppercase {
+ static BITSET_CHUNKS_MAP: [u8; 125] = [
+ 12, 15, 6, 6, 0, 6, 6, 2, 4, 11, 6, 16, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 5, 6, 14, 6, 10, 6, 6, 1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 13, 6, 6,
+ 6, 6, 9, 6, 3,
+ ];
+ static BITSET_INDEX_CHUNKS: [[u8; 16]; 17] = [
+ [43, 43, 5, 34, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 5, 1],
+ [43, 43, 5, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [43, 43, 39, 43, 43, 43, 43, 43, 17, 17, 62, 17, 42, 29, 24, 23],
+ [43, 43, 43, 43, 9, 8, 44, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [43, 43, 43, 43, 36, 28, 66, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 0, 43, 43, 43],
+ [43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [43, 43, 43, 43, 43, 43, 43, 43, 43, 54, 43, 43, 43, 43, 43, 43],
+ [43, 43, 43, 43, 43, 43, 43, 43, 43, 61, 60, 43, 20, 14, 16, 4],
+ [43, 43, 43, 43, 55, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [43, 43, 58, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [43, 43, 59, 45, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [43, 48, 43, 31, 35, 21, 22, 15, 13, 33, 43, 43, 43, 11, 30, 38],
+ [51, 53, 26, 49, 12, 7, 25, 50, 40, 52, 6, 3, 65, 64, 63, 67],
+ [56, 43, 9, 46, 43, 41, 32, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [57, 19, 2, 18, 10, 47, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ [57, 37, 17, 27, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43],
+ ];
+ static BITSET_CANONICAL: [u64; 43] = [
+ 0b0000011111111111111111111111111000000000000000000000000000000000,
+ 0b0000000000111111111111111111111111111111111111111111111111111111,
+ 0b0101010101010101010101010101010101010101010101010101010000000001,
+ 0b0000011111111111111111111111110000000000000000000000000000000001,
+ 0b0000000000100000000000000000000000000001010000010000001011110101,
+ 0b1111111111111111111111111111111100000000000000000000000000000000,
+ 0b1111111111111111111111110000000000000000000000000000001111111111,
+ 0b1111111111111111111100000000000000000000000000011111110001011111,
+ 0b1111111111111111000000111111111111111111111111110000001111111111,
+ 0b1111111111111111000000000000000000000000000000000000000000000000,
+ 0b1111111111111110010101010101010101010101010101010101010101010101,
+ 0b1000000001000101000000000000000000000000000000000000000000000000,
+ 0b0111101100000000000000000000000000011111110111111110011110110000,
+ 0b0110110000000101010101010101010101010101010101010101010101010101,
+ 0b0110101000000000010101010101010101010101010101010101010101010101,
+ 0b0101010111010010010101010101010101001010101010101010010010010000,
+ 0b0101010101011111011111010101010101010101010001010010100001010101,
+ 0b0101010101010101010101010101010101010101010101010101010101010101,
+ 0b0101010101010101010101010101010101010101010101010010101010101011,
+ 0b0101010101010101010101010101010100000000000000000000000000000000,
+ 0b0101010101010100010101010101010000000000000000000000000000000000,
+ 0b0010101101010101010101010101010101010101010101010101010010101010,
+ 0b0001000110101110110100101101010110110001110110111100111011010110,
+ 0b0000111100000000000111110000000000001111000000000000111100000000,
+ 0b0000111100000000000000000000000000000000000000000000000000000000,
+ 0b0000001111111111111111111111111100000000000000000000000000111111,
+ 0b0000000000111111110111100110010011010000000000000000000000000011,
+ 0b0000000000000100001010000000010101010101010101010101010101010101,
+ 0b0000000000000000111111111111111100000000000000000000000000100000,
+ 0b0000000000000000111111110000000010101010000000000011111100000000,
+ 0b0000000000000000000011111111101111111111111111101101011101000000,
+ 0b0000000000000000000000000000000001111111011111111111111111111111,
+ 0b0000000000000000000000000000000000000000001101111111011111111111,
+ 0b0000000000000000000000000000000000000000000000000101010101111010,
+ 0b0000000000000000000000000000000000000000000000000010000010111111,
+ 0b1010101001010101010101010101010101010101010101010101010101010101,
+ 0b1100000000001111001111010101000000111110001001110011100010000100,
+ 0b1100000000100101111010101001110100000000000000000000000000000000,
+ 0b1110011010010000010101010101010101010101000111001000000000000000,
+ 0b1110011111111111111111111111111111111111111111110000000000000000,
+ 0b1111000000000000000000000000001111111111111111111111111100000000,
+ 0b1111011111111111000000000000000000000000000000000000000000000000,
+ 0b1111111100000000111111110000000000111111000000001111111100000000,
+ ];
+ static BITSET_MAPPING: [(u8, u8); 25] = [
+ (0, 187), (0, 177), (0, 171), (0, 167), (0, 164), (0, 32), (0, 47), (0, 51), (0, 121),
+ (0, 117), (0, 109), (1, 150), (1, 148), (1, 142), (1, 134), (1, 131), (1, 64), (2, 164),
+ (2, 146), (2, 20), (3, 146), (3, 140), (3, 134), (4, 178), (4, 171),
+ ];
+
+ pub fn lookup(c: char) -> bool {
+ super::bitset_search(
+ c as u32,
+ &BITSET_CHUNKS_MAP,
+ &BITSET_INDEX_CHUNKS,
+ &BITSET_CANONICAL,
+ &BITSET_MAPPING,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod white_space {
+ static SHORT_OFFSET_RUNS: [u32; 4] = [
+ 5760, 18882560, 23080960, 40972289,
+ ];
+ static OFFSETS: [u8; 21] = [
+ 9, 5, 18, 1, 100, 1, 26, 1, 0, 1, 0, 11, 29, 2, 5, 1, 47, 1, 0, 1, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod conversions {
+ pub fn to_lower(c: char) -> [char; 3] {
+ if c.is_ascii() {
+ [(c as u8).to_ascii_lowercase() as char, '\0', '\0']
+ } else {
+ match bsearch_case_table(c, LOWERCASE_TABLE) {
+ None => [c, '\0', '\0'],
+ Some(index) => LOWERCASE_TABLE[index].1,
+ }
+ }
+ }
+
+ pub fn to_upper(c: char) -> [char; 3] {
+ if c.is_ascii() {
+ [(c as u8).to_ascii_uppercase() as char, '\0', '\0']
+ } else {
+ match bsearch_case_table(c, UPPERCASE_TABLE) {
+ None => [c, '\0', '\0'],
+ Some(index) => UPPERCASE_TABLE[index].1,
+ }
+ }
+ }
+
+ fn bsearch_case_table(c: char, table: &[(char, [char; 3])]) -> Option<usize> {
+ table.binary_search_by(|&(key, _)| key.cmp(&c)).ok()
+ }
+ static LOWERCASE_TABLE: &[(char, [char; 3])] = &[
+ ('A', ['a', '\u{0}', '\u{0}']), ('B', ['b', '\u{0}', '\u{0}']),
+ ('C', ['c', '\u{0}', '\u{0}']), ('D', ['d', '\u{0}', '\u{0}']),
+ ('E', ['e', '\u{0}', '\u{0}']), ('F', ['f', '\u{0}', '\u{0}']),
+ ('G', ['g', '\u{0}', '\u{0}']), ('H', ['h', '\u{0}', '\u{0}']),
+ ('I', ['i', '\u{0}', '\u{0}']), ('J', ['j', '\u{0}', '\u{0}']),
+ ('K', ['k', '\u{0}', '\u{0}']), ('L', ['l', '\u{0}', '\u{0}']),
+ ('M', ['m', '\u{0}', '\u{0}']), ('N', ['n', '\u{0}', '\u{0}']),
+ ('O', ['o', '\u{0}', '\u{0}']), ('P', ['p', '\u{0}', '\u{0}']),
+ ('Q', ['q', '\u{0}', '\u{0}']), ('R', ['r', '\u{0}', '\u{0}']),
+ ('S', ['s', '\u{0}', '\u{0}']), ('T', ['t', '\u{0}', '\u{0}']),
+ ('U', ['u', '\u{0}', '\u{0}']), ('V', ['v', '\u{0}', '\u{0}']),
+ ('W', ['w', '\u{0}', '\u{0}']), ('X', ['x', '\u{0}', '\u{0}']),
+ ('Y', ['y', '\u{0}', '\u{0}']), ('Z', ['z', '\u{0}', '\u{0}']),
+ ('\u{c0}', ['\u{e0}', '\u{0}', '\u{0}']), ('\u{c1}', ['\u{e1}', '\u{0}', '\u{0}']),
+ ('\u{c2}', ['\u{e2}', '\u{0}', '\u{0}']), ('\u{c3}', ['\u{e3}', '\u{0}', '\u{0}']),
+ ('\u{c4}', ['\u{e4}', '\u{0}', '\u{0}']), ('\u{c5}', ['\u{e5}', '\u{0}', '\u{0}']),
+ ('\u{c6}', ['\u{e6}', '\u{0}', '\u{0}']), ('\u{c7}', ['\u{e7}', '\u{0}', '\u{0}']),
+ ('\u{c8}', ['\u{e8}', '\u{0}', '\u{0}']), ('\u{c9}', ['\u{e9}', '\u{0}', '\u{0}']),
+ ('\u{ca}', ['\u{ea}', '\u{0}', '\u{0}']), ('\u{cb}', ['\u{eb}', '\u{0}', '\u{0}']),
+ ('\u{cc}', ['\u{ec}', '\u{0}', '\u{0}']), ('\u{cd}', ['\u{ed}', '\u{0}', '\u{0}']),
+ ('\u{ce}', ['\u{ee}', '\u{0}', '\u{0}']), ('\u{cf}', ['\u{ef}', '\u{0}', '\u{0}']),
+ ('\u{d0}', ['\u{f0}', '\u{0}', '\u{0}']), ('\u{d1}', ['\u{f1}', '\u{0}', '\u{0}']),
+ ('\u{d2}', ['\u{f2}', '\u{0}', '\u{0}']), ('\u{d3}', ['\u{f3}', '\u{0}', '\u{0}']),
+ ('\u{d4}', ['\u{f4}', '\u{0}', '\u{0}']), ('\u{d5}', ['\u{f5}', '\u{0}', '\u{0}']),
+ ('\u{d6}', ['\u{f6}', '\u{0}', '\u{0}']), ('\u{d8}', ['\u{f8}', '\u{0}', '\u{0}']),
+ ('\u{d9}', ['\u{f9}', '\u{0}', '\u{0}']), ('\u{da}', ['\u{fa}', '\u{0}', '\u{0}']),
+ ('\u{db}', ['\u{fb}', '\u{0}', '\u{0}']), ('\u{dc}', ['\u{fc}', '\u{0}', '\u{0}']),
+ ('\u{dd}', ['\u{fd}', '\u{0}', '\u{0}']), ('\u{de}', ['\u{fe}', '\u{0}', '\u{0}']),
+ ('\u{100}', ['\u{101}', '\u{0}', '\u{0}']), ('\u{102}', ['\u{103}', '\u{0}', '\u{0}']),
+ ('\u{104}', ['\u{105}', '\u{0}', '\u{0}']), ('\u{106}', ['\u{107}', '\u{0}', '\u{0}']),
+ ('\u{108}', ['\u{109}', '\u{0}', '\u{0}']), ('\u{10a}', ['\u{10b}', '\u{0}', '\u{0}']),
+ ('\u{10c}', ['\u{10d}', '\u{0}', '\u{0}']), ('\u{10e}', ['\u{10f}', '\u{0}', '\u{0}']),
+ ('\u{110}', ['\u{111}', '\u{0}', '\u{0}']), ('\u{112}', ['\u{113}', '\u{0}', '\u{0}']),
+ ('\u{114}', ['\u{115}', '\u{0}', '\u{0}']), ('\u{116}', ['\u{117}', '\u{0}', '\u{0}']),
+ ('\u{118}', ['\u{119}', '\u{0}', '\u{0}']), ('\u{11a}', ['\u{11b}', '\u{0}', '\u{0}']),
+ ('\u{11c}', ['\u{11d}', '\u{0}', '\u{0}']), ('\u{11e}', ['\u{11f}', '\u{0}', '\u{0}']),
+ ('\u{120}', ['\u{121}', '\u{0}', '\u{0}']), ('\u{122}', ['\u{123}', '\u{0}', '\u{0}']),
+ ('\u{124}', ['\u{125}', '\u{0}', '\u{0}']), ('\u{126}', ['\u{127}', '\u{0}', '\u{0}']),
+ ('\u{128}', ['\u{129}', '\u{0}', '\u{0}']), ('\u{12a}', ['\u{12b}', '\u{0}', '\u{0}']),
+ ('\u{12c}', ['\u{12d}', '\u{0}', '\u{0}']), ('\u{12e}', ['\u{12f}', '\u{0}', '\u{0}']),
+ ('\u{130}', ['i', '\u{307}', '\u{0}']), ('\u{132}', ['\u{133}', '\u{0}', '\u{0}']),
+ ('\u{134}', ['\u{135}', '\u{0}', '\u{0}']), ('\u{136}', ['\u{137}', '\u{0}', '\u{0}']),
+ ('\u{139}', ['\u{13a}', '\u{0}', '\u{0}']), ('\u{13b}', ['\u{13c}', '\u{0}', '\u{0}']),
+ ('\u{13d}', ['\u{13e}', '\u{0}', '\u{0}']), ('\u{13f}', ['\u{140}', '\u{0}', '\u{0}']),
+ ('\u{141}', ['\u{142}', '\u{0}', '\u{0}']), ('\u{143}', ['\u{144}', '\u{0}', '\u{0}']),
+ ('\u{145}', ['\u{146}', '\u{0}', '\u{0}']), ('\u{147}', ['\u{148}', '\u{0}', '\u{0}']),
+ ('\u{14a}', ['\u{14b}', '\u{0}', '\u{0}']), ('\u{14c}', ['\u{14d}', '\u{0}', '\u{0}']),
+ ('\u{14e}', ['\u{14f}', '\u{0}', '\u{0}']), ('\u{150}', ['\u{151}', '\u{0}', '\u{0}']),
+ ('\u{152}', ['\u{153}', '\u{0}', '\u{0}']), ('\u{154}', ['\u{155}', '\u{0}', '\u{0}']),
+ ('\u{156}', ['\u{157}', '\u{0}', '\u{0}']), ('\u{158}', ['\u{159}', '\u{0}', '\u{0}']),
+ ('\u{15a}', ['\u{15b}', '\u{0}', '\u{0}']), ('\u{15c}', ['\u{15d}', '\u{0}', '\u{0}']),
+ ('\u{15e}', ['\u{15f}', '\u{0}', '\u{0}']), ('\u{160}', ['\u{161}', '\u{0}', '\u{0}']),
+ ('\u{162}', ['\u{163}', '\u{0}', '\u{0}']), ('\u{164}', ['\u{165}', '\u{0}', '\u{0}']),
+ ('\u{166}', ['\u{167}', '\u{0}', '\u{0}']), ('\u{168}', ['\u{169}', '\u{0}', '\u{0}']),
+ ('\u{16a}', ['\u{16b}', '\u{0}', '\u{0}']), ('\u{16c}', ['\u{16d}', '\u{0}', '\u{0}']),
+ ('\u{16e}', ['\u{16f}', '\u{0}', '\u{0}']), ('\u{170}', ['\u{171}', '\u{0}', '\u{0}']),
+ ('\u{172}', ['\u{173}', '\u{0}', '\u{0}']), ('\u{174}', ['\u{175}', '\u{0}', '\u{0}']),
+ ('\u{176}', ['\u{177}', '\u{0}', '\u{0}']), ('\u{178}', ['\u{ff}', '\u{0}', '\u{0}']),
+ ('\u{179}', ['\u{17a}', '\u{0}', '\u{0}']), ('\u{17b}', ['\u{17c}', '\u{0}', '\u{0}']),
+ ('\u{17d}', ['\u{17e}', '\u{0}', '\u{0}']), ('\u{181}', ['\u{253}', '\u{0}', '\u{0}']),
+ ('\u{182}', ['\u{183}', '\u{0}', '\u{0}']), ('\u{184}', ['\u{185}', '\u{0}', '\u{0}']),
+ ('\u{186}', ['\u{254}', '\u{0}', '\u{0}']), ('\u{187}', ['\u{188}', '\u{0}', '\u{0}']),
+ ('\u{189}', ['\u{256}', '\u{0}', '\u{0}']), ('\u{18a}', ['\u{257}', '\u{0}', '\u{0}']),
+ ('\u{18b}', ['\u{18c}', '\u{0}', '\u{0}']), ('\u{18e}', ['\u{1dd}', '\u{0}', '\u{0}']),
+ ('\u{18f}', ['\u{259}', '\u{0}', '\u{0}']), ('\u{190}', ['\u{25b}', '\u{0}', '\u{0}']),
+ ('\u{191}', ['\u{192}', '\u{0}', '\u{0}']), ('\u{193}', ['\u{260}', '\u{0}', '\u{0}']),
+ ('\u{194}', ['\u{263}', '\u{0}', '\u{0}']), ('\u{196}', ['\u{269}', '\u{0}', '\u{0}']),
+ ('\u{197}', ['\u{268}', '\u{0}', '\u{0}']), ('\u{198}', ['\u{199}', '\u{0}', '\u{0}']),
+ ('\u{19c}', ['\u{26f}', '\u{0}', '\u{0}']), ('\u{19d}', ['\u{272}', '\u{0}', '\u{0}']),
+ ('\u{19f}', ['\u{275}', '\u{0}', '\u{0}']), ('\u{1a0}', ['\u{1a1}', '\u{0}', '\u{0}']),
+ ('\u{1a2}', ['\u{1a3}', '\u{0}', '\u{0}']), ('\u{1a4}', ['\u{1a5}', '\u{0}', '\u{0}']),
+ ('\u{1a6}', ['\u{280}', '\u{0}', '\u{0}']), ('\u{1a7}', ['\u{1a8}', '\u{0}', '\u{0}']),
+ ('\u{1a9}', ['\u{283}', '\u{0}', '\u{0}']), ('\u{1ac}', ['\u{1ad}', '\u{0}', '\u{0}']),
+ ('\u{1ae}', ['\u{288}', '\u{0}', '\u{0}']), ('\u{1af}', ['\u{1b0}', '\u{0}', '\u{0}']),
+ ('\u{1b1}', ['\u{28a}', '\u{0}', '\u{0}']), ('\u{1b2}', ['\u{28b}', '\u{0}', '\u{0}']),
+ ('\u{1b3}', ['\u{1b4}', '\u{0}', '\u{0}']), ('\u{1b5}', ['\u{1b6}', '\u{0}', '\u{0}']),
+ ('\u{1b7}', ['\u{292}', '\u{0}', '\u{0}']), ('\u{1b8}', ['\u{1b9}', '\u{0}', '\u{0}']),
+ ('\u{1bc}', ['\u{1bd}', '\u{0}', '\u{0}']), ('\u{1c4}', ['\u{1c6}', '\u{0}', '\u{0}']),
+ ('\u{1c5}', ['\u{1c6}', '\u{0}', '\u{0}']), ('\u{1c7}', ['\u{1c9}', '\u{0}', '\u{0}']),
+ ('\u{1c8}', ['\u{1c9}', '\u{0}', '\u{0}']), ('\u{1ca}', ['\u{1cc}', '\u{0}', '\u{0}']),
+ ('\u{1cb}', ['\u{1cc}', '\u{0}', '\u{0}']), ('\u{1cd}', ['\u{1ce}', '\u{0}', '\u{0}']),
+ ('\u{1cf}', ['\u{1d0}', '\u{0}', '\u{0}']), ('\u{1d1}', ['\u{1d2}', '\u{0}', '\u{0}']),
+ ('\u{1d3}', ['\u{1d4}', '\u{0}', '\u{0}']), ('\u{1d5}', ['\u{1d6}', '\u{0}', '\u{0}']),
+ ('\u{1d7}', ['\u{1d8}', '\u{0}', '\u{0}']), ('\u{1d9}', ['\u{1da}', '\u{0}', '\u{0}']),
+ ('\u{1db}', ['\u{1dc}', '\u{0}', '\u{0}']), ('\u{1de}', ['\u{1df}', '\u{0}', '\u{0}']),
+ ('\u{1e0}', ['\u{1e1}', '\u{0}', '\u{0}']), ('\u{1e2}', ['\u{1e3}', '\u{0}', '\u{0}']),
+ ('\u{1e4}', ['\u{1e5}', '\u{0}', '\u{0}']), ('\u{1e6}', ['\u{1e7}', '\u{0}', '\u{0}']),
+ ('\u{1e8}', ['\u{1e9}', '\u{0}', '\u{0}']), ('\u{1ea}', ['\u{1eb}', '\u{0}', '\u{0}']),
+ ('\u{1ec}', ['\u{1ed}', '\u{0}', '\u{0}']), ('\u{1ee}', ['\u{1ef}', '\u{0}', '\u{0}']),
+ ('\u{1f1}', ['\u{1f3}', '\u{0}', '\u{0}']), ('\u{1f2}', ['\u{1f3}', '\u{0}', '\u{0}']),
+ ('\u{1f4}', ['\u{1f5}', '\u{0}', '\u{0}']), ('\u{1f6}', ['\u{195}', '\u{0}', '\u{0}']),
+ ('\u{1f7}', ['\u{1bf}', '\u{0}', '\u{0}']), ('\u{1f8}', ['\u{1f9}', '\u{0}', '\u{0}']),
+ ('\u{1fa}', ['\u{1fb}', '\u{0}', '\u{0}']), ('\u{1fc}', ['\u{1fd}', '\u{0}', '\u{0}']),
+ ('\u{1fe}', ['\u{1ff}', '\u{0}', '\u{0}']), ('\u{200}', ['\u{201}', '\u{0}', '\u{0}']),
+ ('\u{202}', ['\u{203}', '\u{0}', '\u{0}']), ('\u{204}', ['\u{205}', '\u{0}', '\u{0}']),
+ ('\u{206}', ['\u{207}', '\u{0}', '\u{0}']), ('\u{208}', ['\u{209}', '\u{0}', '\u{0}']),
+ ('\u{20a}', ['\u{20b}', '\u{0}', '\u{0}']), ('\u{20c}', ['\u{20d}', '\u{0}', '\u{0}']),
+ ('\u{20e}', ['\u{20f}', '\u{0}', '\u{0}']), ('\u{210}', ['\u{211}', '\u{0}', '\u{0}']),
+ ('\u{212}', ['\u{213}', '\u{0}', '\u{0}']), ('\u{214}', ['\u{215}', '\u{0}', '\u{0}']),
+ ('\u{216}', ['\u{217}', '\u{0}', '\u{0}']), ('\u{218}', ['\u{219}', '\u{0}', '\u{0}']),
+ ('\u{21a}', ['\u{21b}', '\u{0}', '\u{0}']), ('\u{21c}', ['\u{21d}', '\u{0}', '\u{0}']),
+ ('\u{21e}', ['\u{21f}', '\u{0}', '\u{0}']), ('\u{220}', ['\u{19e}', '\u{0}', '\u{0}']),
+ ('\u{222}', ['\u{223}', '\u{0}', '\u{0}']), ('\u{224}', ['\u{225}', '\u{0}', '\u{0}']),
+ ('\u{226}', ['\u{227}', '\u{0}', '\u{0}']), ('\u{228}', ['\u{229}', '\u{0}', '\u{0}']),
+ ('\u{22a}', ['\u{22b}', '\u{0}', '\u{0}']), ('\u{22c}', ['\u{22d}', '\u{0}', '\u{0}']),
+ ('\u{22e}', ['\u{22f}', '\u{0}', '\u{0}']), ('\u{230}', ['\u{231}', '\u{0}', '\u{0}']),
+ ('\u{232}', ['\u{233}', '\u{0}', '\u{0}']), ('\u{23a}', ['\u{2c65}', '\u{0}', '\u{0}']),
+ ('\u{23b}', ['\u{23c}', '\u{0}', '\u{0}']), ('\u{23d}', ['\u{19a}', '\u{0}', '\u{0}']),
+ ('\u{23e}', ['\u{2c66}', '\u{0}', '\u{0}']), ('\u{241}', ['\u{242}', '\u{0}', '\u{0}']),
+ ('\u{243}', ['\u{180}', '\u{0}', '\u{0}']), ('\u{244}', ['\u{289}', '\u{0}', '\u{0}']),
+ ('\u{245}', ['\u{28c}', '\u{0}', '\u{0}']), ('\u{246}', ['\u{247}', '\u{0}', '\u{0}']),
+ ('\u{248}', ['\u{249}', '\u{0}', '\u{0}']), ('\u{24a}', ['\u{24b}', '\u{0}', '\u{0}']),
+ ('\u{24c}', ['\u{24d}', '\u{0}', '\u{0}']), ('\u{24e}', ['\u{24f}', '\u{0}', '\u{0}']),
+ ('\u{370}', ['\u{371}', '\u{0}', '\u{0}']), ('\u{372}', ['\u{373}', '\u{0}', '\u{0}']),
+ ('\u{376}', ['\u{377}', '\u{0}', '\u{0}']), ('\u{37f}', ['\u{3f3}', '\u{0}', '\u{0}']),
+ ('\u{386}', ['\u{3ac}', '\u{0}', '\u{0}']), ('\u{388}', ['\u{3ad}', '\u{0}', '\u{0}']),
+ ('\u{389}', ['\u{3ae}', '\u{0}', '\u{0}']), ('\u{38a}', ['\u{3af}', '\u{0}', '\u{0}']),
+ ('\u{38c}', ['\u{3cc}', '\u{0}', '\u{0}']), ('\u{38e}', ['\u{3cd}', '\u{0}', '\u{0}']),
+ ('\u{38f}', ['\u{3ce}', '\u{0}', '\u{0}']), ('\u{391}', ['\u{3b1}', '\u{0}', '\u{0}']),
+ ('\u{392}', ['\u{3b2}', '\u{0}', '\u{0}']), ('\u{393}', ['\u{3b3}', '\u{0}', '\u{0}']),
+ ('\u{394}', ['\u{3b4}', '\u{0}', '\u{0}']), ('\u{395}', ['\u{3b5}', '\u{0}', '\u{0}']),
+ ('\u{396}', ['\u{3b6}', '\u{0}', '\u{0}']), ('\u{397}', ['\u{3b7}', '\u{0}', '\u{0}']),
+ ('\u{398}', ['\u{3b8}', '\u{0}', '\u{0}']), ('\u{399}', ['\u{3b9}', '\u{0}', '\u{0}']),
+ ('\u{39a}', ['\u{3ba}', '\u{0}', '\u{0}']), ('\u{39b}', ['\u{3bb}', '\u{0}', '\u{0}']),
+ ('\u{39c}', ['\u{3bc}', '\u{0}', '\u{0}']), ('\u{39d}', ['\u{3bd}', '\u{0}', '\u{0}']),
+ ('\u{39e}', ['\u{3be}', '\u{0}', '\u{0}']), ('\u{39f}', ['\u{3bf}', '\u{0}', '\u{0}']),
+ ('\u{3a0}', ['\u{3c0}', '\u{0}', '\u{0}']), ('\u{3a1}', ['\u{3c1}', '\u{0}', '\u{0}']),
+ ('\u{3a3}', ['\u{3c3}', '\u{0}', '\u{0}']), ('\u{3a4}', ['\u{3c4}', '\u{0}', '\u{0}']),
+ ('\u{3a5}', ['\u{3c5}', '\u{0}', '\u{0}']), ('\u{3a6}', ['\u{3c6}', '\u{0}', '\u{0}']),
+ ('\u{3a7}', ['\u{3c7}', '\u{0}', '\u{0}']), ('\u{3a8}', ['\u{3c8}', '\u{0}', '\u{0}']),
+ ('\u{3a9}', ['\u{3c9}', '\u{0}', '\u{0}']), ('\u{3aa}', ['\u{3ca}', '\u{0}', '\u{0}']),
+ ('\u{3ab}', ['\u{3cb}', '\u{0}', '\u{0}']), ('\u{3cf}', ['\u{3d7}', '\u{0}', '\u{0}']),
+ ('\u{3d8}', ['\u{3d9}', '\u{0}', '\u{0}']), ('\u{3da}', ['\u{3db}', '\u{0}', '\u{0}']),
+ ('\u{3dc}', ['\u{3dd}', '\u{0}', '\u{0}']), ('\u{3de}', ['\u{3df}', '\u{0}', '\u{0}']),
+ ('\u{3e0}', ['\u{3e1}', '\u{0}', '\u{0}']), ('\u{3e2}', ['\u{3e3}', '\u{0}', '\u{0}']),
+ ('\u{3e4}', ['\u{3e5}', '\u{0}', '\u{0}']), ('\u{3e6}', ['\u{3e7}', '\u{0}', '\u{0}']),
+ ('\u{3e8}', ['\u{3e9}', '\u{0}', '\u{0}']), ('\u{3ea}', ['\u{3eb}', '\u{0}', '\u{0}']),
+ ('\u{3ec}', ['\u{3ed}', '\u{0}', '\u{0}']), ('\u{3ee}', ['\u{3ef}', '\u{0}', '\u{0}']),
+ ('\u{3f4}', ['\u{3b8}', '\u{0}', '\u{0}']), ('\u{3f7}', ['\u{3f8}', '\u{0}', '\u{0}']),
+ ('\u{3f9}', ['\u{3f2}', '\u{0}', '\u{0}']), ('\u{3fa}', ['\u{3fb}', '\u{0}', '\u{0}']),
+ ('\u{3fd}', ['\u{37b}', '\u{0}', '\u{0}']), ('\u{3fe}', ['\u{37c}', '\u{0}', '\u{0}']),
+ ('\u{3ff}', ['\u{37d}', '\u{0}', '\u{0}']), ('\u{400}', ['\u{450}', '\u{0}', '\u{0}']),
+ ('\u{401}', ['\u{451}', '\u{0}', '\u{0}']), ('\u{402}', ['\u{452}', '\u{0}', '\u{0}']),
+ ('\u{403}', ['\u{453}', '\u{0}', '\u{0}']), ('\u{404}', ['\u{454}', '\u{0}', '\u{0}']),
+ ('\u{405}', ['\u{455}', '\u{0}', '\u{0}']), ('\u{406}', ['\u{456}', '\u{0}', '\u{0}']),
+ ('\u{407}', ['\u{457}', '\u{0}', '\u{0}']), ('\u{408}', ['\u{458}', '\u{0}', '\u{0}']),
+ ('\u{409}', ['\u{459}', '\u{0}', '\u{0}']), ('\u{40a}', ['\u{45a}', '\u{0}', '\u{0}']),
+ ('\u{40b}', ['\u{45b}', '\u{0}', '\u{0}']), ('\u{40c}', ['\u{45c}', '\u{0}', '\u{0}']),
+ ('\u{40d}', ['\u{45d}', '\u{0}', '\u{0}']), ('\u{40e}', ['\u{45e}', '\u{0}', '\u{0}']),
+ ('\u{40f}', ['\u{45f}', '\u{0}', '\u{0}']), ('\u{410}', ['\u{430}', '\u{0}', '\u{0}']),
+ ('\u{411}', ['\u{431}', '\u{0}', '\u{0}']), ('\u{412}', ['\u{432}', '\u{0}', '\u{0}']),
+ ('\u{413}', ['\u{433}', '\u{0}', '\u{0}']), ('\u{414}', ['\u{434}', '\u{0}', '\u{0}']),
+ ('\u{415}', ['\u{435}', '\u{0}', '\u{0}']), ('\u{416}', ['\u{436}', '\u{0}', '\u{0}']),
+ ('\u{417}', ['\u{437}', '\u{0}', '\u{0}']), ('\u{418}', ['\u{438}', '\u{0}', '\u{0}']),
+ ('\u{419}', ['\u{439}', '\u{0}', '\u{0}']), ('\u{41a}', ['\u{43a}', '\u{0}', '\u{0}']),
+ ('\u{41b}', ['\u{43b}', '\u{0}', '\u{0}']), ('\u{41c}', ['\u{43c}', '\u{0}', '\u{0}']),
+ ('\u{41d}', ['\u{43d}', '\u{0}', '\u{0}']), ('\u{41e}', ['\u{43e}', '\u{0}', '\u{0}']),
+ ('\u{41f}', ['\u{43f}', '\u{0}', '\u{0}']), ('\u{420}', ['\u{440}', '\u{0}', '\u{0}']),
+ ('\u{421}', ['\u{441}', '\u{0}', '\u{0}']), ('\u{422}', ['\u{442}', '\u{0}', '\u{0}']),
+ ('\u{423}', ['\u{443}', '\u{0}', '\u{0}']), ('\u{424}', ['\u{444}', '\u{0}', '\u{0}']),
+ ('\u{425}', ['\u{445}', '\u{0}', '\u{0}']), ('\u{426}', ['\u{446}', '\u{0}', '\u{0}']),
+ ('\u{427}', ['\u{447}', '\u{0}', '\u{0}']), ('\u{428}', ['\u{448}', '\u{0}', '\u{0}']),
+ ('\u{429}', ['\u{449}', '\u{0}', '\u{0}']), ('\u{42a}', ['\u{44a}', '\u{0}', '\u{0}']),
+ ('\u{42b}', ['\u{44b}', '\u{0}', '\u{0}']), ('\u{42c}', ['\u{44c}', '\u{0}', '\u{0}']),
+ ('\u{42d}', ['\u{44d}', '\u{0}', '\u{0}']), ('\u{42e}', ['\u{44e}', '\u{0}', '\u{0}']),
+ ('\u{42f}', ['\u{44f}', '\u{0}', '\u{0}']), ('\u{460}', ['\u{461}', '\u{0}', '\u{0}']),
+ ('\u{462}', ['\u{463}', '\u{0}', '\u{0}']), ('\u{464}', ['\u{465}', '\u{0}', '\u{0}']),
+ ('\u{466}', ['\u{467}', '\u{0}', '\u{0}']), ('\u{468}', ['\u{469}', '\u{0}', '\u{0}']),
+ ('\u{46a}', ['\u{46b}', '\u{0}', '\u{0}']), ('\u{46c}', ['\u{46d}', '\u{0}', '\u{0}']),
+ ('\u{46e}', ['\u{46f}', '\u{0}', '\u{0}']), ('\u{470}', ['\u{471}', '\u{0}', '\u{0}']),
+ ('\u{472}', ['\u{473}', '\u{0}', '\u{0}']), ('\u{474}', ['\u{475}', '\u{0}', '\u{0}']),
+ ('\u{476}', ['\u{477}', '\u{0}', '\u{0}']), ('\u{478}', ['\u{479}', '\u{0}', '\u{0}']),
+ ('\u{47a}', ['\u{47b}', '\u{0}', '\u{0}']), ('\u{47c}', ['\u{47d}', '\u{0}', '\u{0}']),
+ ('\u{47e}', ['\u{47f}', '\u{0}', '\u{0}']), ('\u{480}', ['\u{481}', '\u{0}', '\u{0}']),
+ ('\u{48a}', ['\u{48b}', '\u{0}', '\u{0}']), ('\u{48c}', ['\u{48d}', '\u{0}', '\u{0}']),
+ ('\u{48e}', ['\u{48f}', '\u{0}', '\u{0}']), ('\u{490}', ['\u{491}', '\u{0}', '\u{0}']),
+ ('\u{492}', ['\u{493}', '\u{0}', '\u{0}']), ('\u{494}', ['\u{495}', '\u{0}', '\u{0}']),
+ ('\u{496}', ['\u{497}', '\u{0}', '\u{0}']), ('\u{498}', ['\u{499}', '\u{0}', '\u{0}']),
+ ('\u{49a}', ['\u{49b}', '\u{0}', '\u{0}']), ('\u{49c}', ['\u{49d}', '\u{0}', '\u{0}']),
+ ('\u{49e}', ['\u{49f}', '\u{0}', '\u{0}']), ('\u{4a0}', ['\u{4a1}', '\u{0}', '\u{0}']),
+ ('\u{4a2}', ['\u{4a3}', '\u{0}', '\u{0}']), ('\u{4a4}', ['\u{4a5}', '\u{0}', '\u{0}']),
+ ('\u{4a6}', ['\u{4a7}', '\u{0}', '\u{0}']), ('\u{4a8}', ['\u{4a9}', '\u{0}', '\u{0}']),
+ ('\u{4aa}', ['\u{4ab}', '\u{0}', '\u{0}']), ('\u{4ac}', ['\u{4ad}', '\u{0}', '\u{0}']),
+ ('\u{4ae}', ['\u{4af}', '\u{0}', '\u{0}']), ('\u{4b0}', ['\u{4b1}', '\u{0}', '\u{0}']),
+ ('\u{4b2}', ['\u{4b3}', '\u{0}', '\u{0}']), ('\u{4b4}', ['\u{4b5}', '\u{0}', '\u{0}']),
+ ('\u{4b6}', ['\u{4b7}', '\u{0}', '\u{0}']), ('\u{4b8}', ['\u{4b9}', '\u{0}', '\u{0}']),
+ ('\u{4ba}', ['\u{4bb}', '\u{0}', '\u{0}']), ('\u{4bc}', ['\u{4bd}', '\u{0}', '\u{0}']),
+ ('\u{4be}', ['\u{4bf}', '\u{0}', '\u{0}']), ('\u{4c0}', ['\u{4cf}', '\u{0}', '\u{0}']),
+ ('\u{4c1}', ['\u{4c2}', '\u{0}', '\u{0}']), ('\u{4c3}', ['\u{4c4}', '\u{0}', '\u{0}']),
+ ('\u{4c5}', ['\u{4c6}', '\u{0}', '\u{0}']), ('\u{4c7}', ['\u{4c8}', '\u{0}', '\u{0}']),
+ ('\u{4c9}', ['\u{4ca}', '\u{0}', '\u{0}']), ('\u{4cb}', ['\u{4cc}', '\u{0}', '\u{0}']),
+ ('\u{4cd}', ['\u{4ce}', '\u{0}', '\u{0}']), ('\u{4d0}', ['\u{4d1}', '\u{0}', '\u{0}']),
+ ('\u{4d2}', ['\u{4d3}', '\u{0}', '\u{0}']), ('\u{4d4}', ['\u{4d5}', '\u{0}', '\u{0}']),
+ ('\u{4d6}', ['\u{4d7}', '\u{0}', '\u{0}']), ('\u{4d8}', ['\u{4d9}', '\u{0}', '\u{0}']),
+ ('\u{4da}', ['\u{4db}', '\u{0}', '\u{0}']), ('\u{4dc}', ['\u{4dd}', '\u{0}', '\u{0}']),
+ ('\u{4de}', ['\u{4df}', '\u{0}', '\u{0}']), ('\u{4e0}', ['\u{4e1}', '\u{0}', '\u{0}']),
+ ('\u{4e2}', ['\u{4e3}', '\u{0}', '\u{0}']), ('\u{4e4}', ['\u{4e5}', '\u{0}', '\u{0}']),
+ ('\u{4e6}', ['\u{4e7}', '\u{0}', '\u{0}']), ('\u{4e8}', ['\u{4e9}', '\u{0}', '\u{0}']),
+ ('\u{4ea}', ['\u{4eb}', '\u{0}', '\u{0}']), ('\u{4ec}', ['\u{4ed}', '\u{0}', '\u{0}']),
+ ('\u{4ee}', ['\u{4ef}', '\u{0}', '\u{0}']), ('\u{4f0}', ['\u{4f1}', '\u{0}', '\u{0}']),
+ ('\u{4f2}', ['\u{4f3}', '\u{0}', '\u{0}']), ('\u{4f4}', ['\u{4f5}', '\u{0}', '\u{0}']),
+ ('\u{4f6}', ['\u{4f7}', '\u{0}', '\u{0}']), ('\u{4f8}', ['\u{4f9}', '\u{0}', '\u{0}']),
+ ('\u{4fa}', ['\u{4fb}', '\u{0}', '\u{0}']), ('\u{4fc}', ['\u{4fd}', '\u{0}', '\u{0}']),
+ ('\u{4fe}', ['\u{4ff}', '\u{0}', '\u{0}']), ('\u{500}', ['\u{501}', '\u{0}', '\u{0}']),
+ ('\u{502}', ['\u{503}', '\u{0}', '\u{0}']), ('\u{504}', ['\u{505}', '\u{0}', '\u{0}']),
+ ('\u{506}', ['\u{507}', '\u{0}', '\u{0}']), ('\u{508}', ['\u{509}', '\u{0}', '\u{0}']),
+ ('\u{50a}', ['\u{50b}', '\u{0}', '\u{0}']), ('\u{50c}', ['\u{50d}', '\u{0}', '\u{0}']),
+ ('\u{50e}', ['\u{50f}', '\u{0}', '\u{0}']), ('\u{510}', ['\u{511}', '\u{0}', '\u{0}']),
+ ('\u{512}', ['\u{513}', '\u{0}', '\u{0}']), ('\u{514}', ['\u{515}', '\u{0}', '\u{0}']),
+ ('\u{516}', ['\u{517}', '\u{0}', '\u{0}']), ('\u{518}', ['\u{519}', '\u{0}', '\u{0}']),
+ ('\u{51a}', ['\u{51b}', '\u{0}', '\u{0}']), ('\u{51c}', ['\u{51d}', '\u{0}', '\u{0}']),
+ ('\u{51e}', ['\u{51f}', '\u{0}', '\u{0}']), ('\u{520}', ['\u{521}', '\u{0}', '\u{0}']),
+ ('\u{522}', ['\u{523}', '\u{0}', '\u{0}']), ('\u{524}', ['\u{525}', '\u{0}', '\u{0}']),
+ ('\u{526}', ['\u{527}', '\u{0}', '\u{0}']), ('\u{528}', ['\u{529}', '\u{0}', '\u{0}']),
+ ('\u{52a}', ['\u{52b}', '\u{0}', '\u{0}']), ('\u{52c}', ['\u{52d}', '\u{0}', '\u{0}']),
+ ('\u{52e}', ['\u{52f}', '\u{0}', '\u{0}']), ('\u{531}', ['\u{561}', '\u{0}', '\u{0}']),
+ ('\u{532}', ['\u{562}', '\u{0}', '\u{0}']), ('\u{533}', ['\u{563}', '\u{0}', '\u{0}']),
+ ('\u{534}', ['\u{564}', '\u{0}', '\u{0}']), ('\u{535}', ['\u{565}', '\u{0}', '\u{0}']),
+ ('\u{536}', ['\u{566}', '\u{0}', '\u{0}']), ('\u{537}', ['\u{567}', '\u{0}', '\u{0}']),
+ ('\u{538}', ['\u{568}', '\u{0}', '\u{0}']), ('\u{539}', ['\u{569}', '\u{0}', '\u{0}']),
+ ('\u{53a}', ['\u{56a}', '\u{0}', '\u{0}']), ('\u{53b}', ['\u{56b}', '\u{0}', '\u{0}']),
+ ('\u{53c}', ['\u{56c}', '\u{0}', '\u{0}']), ('\u{53d}', ['\u{56d}', '\u{0}', '\u{0}']),
+ ('\u{53e}', ['\u{56e}', '\u{0}', '\u{0}']), ('\u{53f}', ['\u{56f}', '\u{0}', '\u{0}']),
+ ('\u{540}', ['\u{570}', '\u{0}', '\u{0}']), ('\u{541}', ['\u{571}', '\u{0}', '\u{0}']),
+ ('\u{542}', ['\u{572}', '\u{0}', '\u{0}']), ('\u{543}', ['\u{573}', '\u{0}', '\u{0}']),
+ ('\u{544}', ['\u{574}', '\u{0}', '\u{0}']), ('\u{545}', ['\u{575}', '\u{0}', '\u{0}']),
+ ('\u{546}', ['\u{576}', '\u{0}', '\u{0}']), ('\u{547}', ['\u{577}', '\u{0}', '\u{0}']),
+ ('\u{548}', ['\u{578}', '\u{0}', '\u{0}']), ('\u{549}', ['\u{579}', '\u{0}', '\u{0}']),
+ ('\u{54a}', ['\u{57a}', '\u{0}', '\u{0}']), ('\u{54b}', ['\u{57b}', '\u{0}', '\u{0}']),
+ ('\u{54c}', ['\u{57c}', '\u{0}', '\u{0}']), ('\u{54d}', ['\u{57d}', '\u{0}', '\u{0}']),
+ ('\u{54e}', ['\u{57e}', '\u{0}', '\u{0}']), ('\u{54f}', ['\u{57f}', '\u{0}', '\u{0}']),
+ ('\u{550}', ['\u{580}', '\u{0}', '\u{0}']), ('\u{551}', ['\u{581}', '\u{0}', '\u{0}']),
+ ('\u{552}', ['\u{582}', '\u{0}', '\u{0}']), ('\u{553}', ['\u{583}', '\u{0}', '\u{0}']),
+ ('\u{554}', ['\u{584}', '\u{0}', '\u{0}']), ('\u{555}', ['\u{585}', '\u{0}', '\u{0}']),
+ ('\u{556}', ['\u{586}', '\u{0}', '\u{0}']), ('\u{10a0}', ['\u{2d00}', '\u{0}', '\u{0}']),
+ ('\u{10a1}', ['\u{2d01}', '\u{0}', '\u{0}']), ('\u{10a2}', ['\u{2d02}', '\u{0}', '\u{0}']),
+ ('\u{10a3}', ['\u{2d03}', '\u{0}', '\u{0}']), ('\u{10a4}', ['\u{2d04}', '\u{0}', '\u{0}']),
+ ('\u{10a5}', ['\u{2d05}', '\u{0}', '\u{0}']), ('\u{10a6}', ['\u{2d06}', '\u{0}', '\u{0}']),
+ ('\u{10a7}', ['\u{2d07}', '\u{0}', '\u{0}']), ('\u{10a8}', ['\u{2d08}', '\u{0}', '\u{0}']),
+ ('\u{10a9}', ['\u{2d09}', '\u{0}', '\u{0}']), ('\u{10aa}', ['\u{2d0a}', '\u{0}', '\u{0}']),
+ ('\u{10ab}', ['\u{2d0b}', '\u{0}', '\u{0}']), ('\u{10ac}', ['\u{2d0c}', '\u{0}', '\u{0}']),
+ ('\u{10ad}', ['\u{2d0d}', '\u{0}', '\u{0}']), ('\u{10ae}', ['\u{2d0e}', '\u{0}', '\u{0}']),
+ ('\u{10af}', ['\u{2d0f}', '\u{0}', '\u{0}']), ('\u{10b0}', ['\u{2d10}', '\u{0}', '\u{0}']),
+ ('\u{10b1}', ['\u{2d11}', '\u{0}', '\u{0}']), ('\u{10b2}', ['\u{2d12}', '\u{0}', '\u{0}']),
+ ('\u{10b3}', ['\u{2d13}', '\u{0}', '\u{0}']), ('\u{10b4}', ['\u{2d14}', '\u{0}', '\u{0}']),
+ ('\u{10b5}', ['\u{2d15}', '\u{0}', '\u{0}']), ('\u{10b6}', ['\u{2d16}', '\u{0}', '\u{0}']),
+ ('\u{10b7}', ['\u{2d17}', '\u{0}', '\u{0}']), ('\u{10b8}', ['\u{2d18}', '\u{0}', '\u{0}']),
+ ('\u{10b9}', ['\u{2d19}', '\u{0}', '\u{0}']), ('\u{10ba}', ['\u{2d1a}', '\u{0}', '\u{0}']),
+ ('\u{10bb}', ['\u{2d1b}', '\u{0}', '\u{0}']), ('\u{10bc}', ['\u{2d1c}', '\u{0}', '\u{0}']),
+ ('\u{10bd}', ['\u{2d1d}', '\u{0}', '\u{0}']), ('\u{10be}', ['\u{2d1e}', '\u{0}', '\u{0}']),
+ ('\u{10bf}', ['\u{2d1f}', '\u{0}', '\u{0}']), ('\u{10c0}', ['\u{2d20}', '\u{0}', '\u{0}']),
+ ('\u{10c1}', ['\u{2d21}', '\u{0}', '\u{0}']), ('\u{10c2}', ['\u{2d22}', '\u{0}', '\u{0}']),
+ ('\u{10c3}', ['\u{2d23}', '\u{0}', '\u{0}']), ('\u{10c4}', ['\u{2d24}', '\u{0}', '\u{0}']),
+ ('\u{10c5}', ['\u{2d25}', '\u{0}', '\u{0}']), ('\u{10c7}', ['\u{2d27}', '\u{0}', '\u{0}']),
+ ('\u{10cd}', ['\u{2d2d}', '\u{0}', '\u{0}']), ('\u{13a0}', ['\u{ab70}', '\u{0}', '\u{0}']),
+ ('\u{13a1}', ['\u{ab71}', '\u{0}', '\u{0}']), ('\u{13a2}', ['\u{ab72}', '\u{0}', '\u{0}']),
+ ('\u{13a3}', ['\u{ab73}', '\u{0}', '\u{0}']), ('\u{13a4}', ['\u{ab74}', '\u{0}', '\u{0}']),
+ ('\u{13a5}', ['\u{ab75}', '\u{0}', '\u{0}']), ('\u{13a6}', ['\u{ab76}', '\u{0}', '\u{0}']),
+ ('\u{13a7}', ['\u{ab77}', '\u{0}', '\u{0}']), ('\u{13a8}', ['\u{ab78}', '\u{0}', '\u{0}']),
+ ('\u{13a9}', ['\u{ab79}', '\u{0}', '\u{0}']), ('\u{13aa}', ['\u{ab7a}', '\u{0}', '\u{0}']),
+ ('\u{13ab}', ['\u{ab7b}', '\u{0}', '\u{0}']), ('\u{13ac}', ['\u{ab7c}', '\u{0}', '\u{0}']),
+ ('\u{13ad}', ['\u{ab7d}', '\u{0}', '\u{0}']), ('\u{13ae}', ['\u{ab7e}', '\u{0}', '\u{0}']),
+ ('\u{13af}', ['\u{ab7f}', '\u{0}', '\u{0}']), ('\u{13b0}', ['\u{ab80}', '\u{0}', '\u{0}']),
+ ('\u{13b1}', ['\u{ab81}', '\u{0}', '\u{0}']), ('\u{13b2}', ['\u{ab82}', '\u{0}', '\u{0}']),
+ ('\u{13b3}', ['\u{ab83}', '\u{0}', '\u{0}']), ('\u{13b4}', ['\u{ab84}', '\u{0}', '\u{0}']),
+ ('\u{13b5}', ['\u{ab85}', '\u{0}', '\u{0}']), ('\u{13b6}', ['\u{ab86}', '\u{0}', '\u{0}']),
+ ('\u{13b7}', ['\u{ab87}', '\u{0}', '\u{0}']), ('\u{13b8}', ['\u{ab88}', '\u{0}', '\u{0}']),
+ ('\u{13b9}', ['\u{ab89}', '\u{0}', '\u{0}']), ('\u{13ba}', ['\u{ab8a}', '\u{0}', '\u{0}']),
+ ('\u{13bb}', ['\u{ab8b}', '\u{0}', '\u{0}']), ('\u{13bc}', ['\u{ab8c}', '\u{0}', '\u{0}']),
+ ('\u{13bd}', ['\u{ab8d}', '\u{0}', '\u{0}']), ('\u{13be}', ['\u{ab8e}', '\u{0}', '\u{0}']),
+ ('\u{13bf}', ['\u{ab8f}', '\u{0}', '\u{0}']), ('\u{13c0}', ['\u{ab90}', '\u{0}', '\u{0}']),
+ ('\u{13c1}', ['\u{ab91}', '\u{0}', '\u{0}']), ('\u{13c2}', ['\u{ab92}', '\u{0}', '\u{0}']),
+ ('\u{13c3}', ['\u{ab93}', '\u{0}', '\u{0}']), ('\u{13c4}', ['\u{ab94}', '\u{0}', '\u{0}']),
+ ('\u{13c5}', ['\u{ab95}', '\u{0}', '\u{0}']), ('\u{13c6}', ['\u{ab96}', '\u{0}', '\u{0}']),
+ ('\u{13c7}', ['\u{ab97}', '\u{0}', '\u{0}']), ('\u{13c8}', ['\u{ab98}', '\u{0}', '\u{0}']),
+ ('\u{13c9}', ['\u{ab99}', '\u{0}', '\u{0}']), ('\u{13ca}', ['\u{ab9a}', '\u{0}', '\u{0}']),
+ ('\u{13cb}', ['\u{ab9b}', '\u{0}', '\u{0}']), ('\u{13cc}', ['\u{ab9c}', '\u{0}', '\u{0}']),
+ ('\u{13cd}', ['\u{ab9d}', '\u{0}', '\u{0}']), ('\u{13ce}', ['\u{ab9e}', '\u{0}', '\u{0}']),
+ ('\u{13cf}', ['\u{ab9f}', '\u{0}', '\u{0}']), ('\u{13d0}', ['\u{aba0}', '\u{0}', '\u{0}']),
+ ('\u{13d1}', ['\u{aba1}', '\u{0}', '\u{0}']), ('\u{13d2}', ['\u{aba2}', '\u{0}', '\u{0}']),
+ ('\u{13d3}', ['\u{aba3}', '\u{0}', '\u{0}']), ('\u{13d4}', ['\u{aba4}', '\u{0}', '\u{0}']),
+ ('\u{13d5}', ['\u{aba5}', '\u{0}', '\u{0}']), ('\u{13d6}', ['\u{aba6}', '\u{0}', '\u{0}']),
+ ('\u{13d7}', ['\u{aba7}', '\u{0}', '\u{0}']), ('\u{13d8}', ['\u{aba8}', '\u{0}', '\u{0}']),
+ ('\u{13d9}', ['\u{aba9}', '\u{0}', '\u{0}']), ('\u{13da}', ['\u{abaa}', '\u{0}', '\u{0}']),
+ ('\u{13db}', ['\u{abab}', '\u{0}', '\u{0}']), ('\u{13dc}', ['\u{abac}', '\u{0}', '\u{0}']),
+ ('\u{13dd}', ['\u{abad}', '\u{0}', '\u{0}']), ('\u{13de}', ['\u{abae}', '\u{0}', '\u{0}']),
+ ('\u{13df}', ['\u{abaf}', '\u{0}', '\u{0}']), ('\u{13e0}', ['\u{abb0}', '\u{0}', '\u{0}']),
+ ('\u{13e1}', ['\u{abb1}', '\u{0}', '\u{0}']), ('\u{13e2}', ['\u{abb2}', '\u{0}', '\u{0}']),
+ ('\u{13e3}', ['\u{abb3}', '\u{0}', '\u{0}']), ('\u{13e4}', ['\u{abb4}', '\u{0}', '\u{0}']),
+ ('\u{13e5}', ['\u{abb5}', '\u{0}', '\u{0}']), ('\u{13e6}', ['\u{abb6}', '\u{0}', '\u{0}']),
+ ('\u{13e7}', ['\u{abb7}', '\u{0}', '\u{0}']), ('\u{13e8}', ['\u{abb8}', '\u{0}', '\u{0}']),
+ ('\u{13e9}', ['\u{abb9}', '\u{0}', '\u{0}']), ('\u{13ea}', ['\u{abba}', '\u{0}', '\u{0}']),
+ ('\u{13eb}', ['\u{abbb}', '\u{0}', '\u{0}']), ('\u{13ec}', ['\u{abbc}', '\u{0}', '\u{0}']),
+ ('\u{13ed}', ['\u{abbd}', '\u{0}', '\u{0}']), ('\u{13ee}', ['\u{abbe}', '\u{0}', '\u{0}']),
+ ('\u{13ef}', ['\u{abbf}', '\u{0}', '\u{0}']), ('\u{13f0}', ['\u{13f8}', '\u{0}', '\u{0}']),
+ ('\u{13f1}', ['\u{13f9}', '\u{0}', '\u{0}']), ('\u{13f2}', ['\u{13fa}', '\u{0}', '\u{0}']),
+ ('\u{13f3}', ['\u{13fb}', '\u{0}', '\u{0}']), ('\u{13f4}', ['\u{13fc}', '\u{0}', '\u{0}']),
+ ('\u{13f5}', ['\u{13fd}', '\u{0}', '\u{0}']), ('\u{1c90}', ['\u{10d0}', '\u{0}', '\u{0}']),
+ ('\u{1c91}', ['\u{10d1}', '\u{0}', '\u{0}']), ('\u{1c92}', ['\u{10d2}', '\u{0}', '\u{0}']),
+ ('\u{1c93}', ['\u{10d3}', '\u{0}', '\u{0}']), ('\u{1c94}', ['\u{10d4}', '\u{0}', '\u{0}']),
+ ('\u{1c95}', ['\u{10d5}', '\u{0}', '\u{0}']), ('\u{1c96}', ['\u{10d6}', '\u{0}', '\u{0}']),
+ ('\u{1c97}', ['\u{10d7}', '\u{0}', '\u{0}']), ('\u{1c98}', ['\u{10d8}', '\u{0}', '\u{0}']),
+ ('\u{1c99}', ['\u{10d9}', '\u{0}', '\u{0}']), ('\u{1c9a}', ['\u{10da}', '\u{0}', '\u{0}']),
+ ('\u{1c9b}', ['\u{10db}', '\u{0}', '\u{0}']), ('\u{1c9c}', ['\u{10dc}', '\u{0}', '\u{0}']),
+ ('\u{1c9d}', ['\u{10dd}', '\u{0}', '\u{0}']), ('\u{1c9e}', ['\u{10de}', '\u{0}', '\u{0}']),
+ ('\u{1c9f}', ['\u{10df}', '\u{0}', '\u{0}']), ('\u{1ca0}', ['\u{10e0}', '\u{0}', '\u{0}']),
+ ('\u{1ca1}', ['\u{10e1}', '\u{0}', '\u{0}']), ('\u{1ca2}', ['\u{10e2}', '\u{0}', '\u{0}']),
+ ('\u{1ca3}', ['\u{10e3}', '\u{0}', '\u{0}']), ('\u{1ca4}', ['\u{10e4}', '\u{0}', '\u{0}']),
+ ('\u{1ca5}', ['\u{10e5}', '\u{0}', '\u{0}']), ('\u{1ca6}', ['\u{10e6}', '\u{0}', '\u{0}']),
+ ('\u{1ca7}', ['\u{10e7}', '\u{0}', '\u{0}']), ('\u{1ca8}', ['\u{10e8}', '\u{0}', '\u{0}']),
+ ('\u{1ca9}', ['\u{10e9}', '\u{0}', '\u{0}']), ('\u{1caa}', ['\u{10ea}', '\u{0}', '\u{0}']),
+ ('\u{1cab}', ['\u{10eb}', '\u{0}', '\u{0}']), ('\u{1cac}', ['\u{10ec}', '\u{0}', '\u{0}']),
+ ('\u{1cad}', ['\u{10ed}', '\u{0}', '\u{0}']), ('\u{1cae}', ['\u{10ee}', '\u{0}', '\u{0}']),
+ ('\u{1caf}', ['\u{10ef}', '\u{0}', '\u{0}']), ('\u{1cb0}', ['\u{10f0}', '\u{0}', '\u{0}']),
+ ('\u{1cb1}', ['\u{10f1}', '\u{0}', '\u{0}']), ('\u{1cb2}', ['\u{10f2}', '\u{0}', '\u{0}']),
+ ('\u{1cb3}', ['\u{10f3}', '\u{0}', '\u{0}']), ('\u{1cb4}', ['\u{10f4}', '\u{0}', '\u{0}']),
+ ('\u{1cb5}', ['\u{10f5}', '\u{0}', '\u{0}']), ('\u{1cb6}', ['\u{10f6}', '\u{0}', '\u{0}']),
+ ('\u{1cb7}', ['\u{10f7}', '\u{0}', '\u{0}']), ('\u{1cb8}', ['\u{10f8}', '\u{0}', '\u{0}']),
+ ('\u{1cb9}', ['\u{10f9}', '\u{0}', '\u{0}']), ('\u{1cba}', ['\u{10fa}', '\u{0}', '\u{0}']),
+ ('\u{1cbd}', ['\u{10fd}', '\u{0}', '\u{0}']), ('\u{1cbe}', ['\u{10fe}', '\u{0}', '\u{0}']),
+ ('\u{1cbf}', ['\u{10ff}', '\u{0}', '\u{0}']), ('\u{1e00}', ['\u{1e01}', '\u{0}', '\u{0}']),
+ ('\u{1e02}', ['\u{1e03}', '\u{0}', '\u{0}']), ('\u{1e04}', ['\u{1e05}', '\u{0}', '\u{0}']),
+ ('\u{1e06}', ['\u{1e07}', '\u{0}', '\u{0}']), ('\u{1e08}', ['\u{1e09}', '\u{0}', '\u{0}']),
+ ('\u{1e0a}', ['\u{1e0b}', '\u{0}', '\u{0}']), ('\u{1e0c}', ['\u{1e0d}', '\u{0}', '\u{0}']),
+ ('\u{1e0e}', ['\u{1e0f}', '\u{0}', '\u{0}']), ('\u{1e10}', ['\u{1e11}', '\u{0}', '\u{0}']),
+ ('\u{1e12}', ['\u{1e13}', '\u{0}', '\u{0}']), ('\u{1e14}', ['\u{1e15}', '\u{0}', '\u{0}']),
+ ('\u{1e16}', ['\u{1e17}', '\u{0}', '\u{0}']), ('\u{1e18}', ['\u{1e19}', '\u{0}', '\u{0}']),
+ ('\u{1e1a}', ['\u{1e1b}', '\u{0}', '\u{0}']), ('\u{1e1c}', ['\u{1e1d}', '\u{0}', '\u{0}']),
+ ('\u{1e1e}', ['\u{1e1f}', '\u{0}', '\u{0}']), ('\u{1e20}', ['\u{1e21}', '\u{0}', '\u{0}']),
+ ('\u{1e22}', ['\u{1e23}', '\u{0}', '\u{0}']), ('\u{1e24}', ['\u{1e25}', '\u{0}', '\u{0}']),
+ ('\u{1e26}', ['\u{1e27}', '\u{0}', '\u{0}']), ('\u{1e28}', ['\u{1e29}', '\u{0}', '\u{0}']),
+ ('\u{1e2a}', ['\u{1e2b}', '\u{0}', '\u{0}']), ('\u{1e2c}', ['\u{1e2d}', '\u{0}', '\u{0}']),
+ ('\u{1e2e}', ['\u{1e2f}', '\u{0}', '\u{0}']), ('\u{1e30}', ['\u{1e31}', '\u{0}', '\u{0}']),
+ ('\u{1e32}', ['\u{1e33}', '\u{0}', '\u{0}']), ('\u{1e34}', ['\u{1e35}', '\u{0}', '\u{0}']),
+ ('\u{1e36}', ['\u{1e37}', '\u{0}', '\u{0}']), ('\u{1e38}', ['\u{1e39}', '\u{0}', '\u{0}']),
+ ('\u{1e3a}', ['\u{1e3b}', '\u{0}', '\u{0}']), ('\u{1e3c}', ['\u{1e3d}', '\u{0}', '\u{0}']),
+ ('\u{1e3e}', ['\u{1e3f}', '\u{0}', '\u{0}']), ('\u{1e40}', ['\u{1e41}', '\u{0}', '\u{0}']),
+ ('\u{1e42}', ['\u{1e43}', '\u{0}', '\u{0}']), ('\u{1e44}', ['\u{1e45}', '\u{0}', '\u{0}']),
+ ('\u{1e46}', ['\u{1e47}', '\u{0}', '\u{0}']), ('\u{1e48}', ['\u{1e49}', '\u{0}', '\u{0}']),
+ ('\u{1e4a}', ['\u{1e4b}', '\u{0}', '\u{0}']), ('\u{1e4c}', ['\u{1e4d}', '\u{0}', '\u{0}']),
+ ('\u{1e4e}', ['\u{1e4f}', '\u{0}', '\u{0}']), ('\u{1e50}', ['\u{1e51}', '\u{0}', '\u{0}']),
+ ('\u{1e52}', ['\u{1e53}', '\u{0}', '\u{0}']), ('\u{1e54}', ['\u{1e55}', '\u{0}', '\u{0}']),
+ ('\u{1e56}', ['\u{1e57}', '\u{0}', '\u{0}']), ('\u{1e58}', ['\u{1e59}', '\u{0}', '\u{0}']),
+ ('\u{1e5a}', ['\u{1e5b}', '\u{0}', '\u{0}']), ('\u{1e5c}', ['\u{1e5d}', '\u{0}', '\u{0}']),
+ ('\u{1e5e}', ['\u{1e5f}', '\u{0}', '\u{0}']), ('\u{1e60}', ['\u{1e61}', '\u{0}', '\u{0}']),
+ ('\u{1e62}', ['\u{1e63}', '\u{0}', '\u{0}']), ('\u{1e64}', ['\u{1e65}', '\u{0}', '\u{0}']),
+ ('\u{1e66}', ['\u{1e67}', '\u{0}', '\u{0}']), ('\u{1e68}', ['\u{1e69}', '\u{0}', '\u{0}']),
+ ('\u{1e6a}', ['\u{1e6b}', '\u{0}', '\u{0}']), ('\u{1e6c}', ['\u{1e6d}', '\u{0}', '\u{0}']),
+ ('\u{1e6e}', ['\u{1e6f}', '\u{0}', '\u{0}']), ('\u{1e70}', ['\u{1e71}', '\u{0}', '\u{0}']),
+ ('\u{1e72}', ['\u{1e73}', '\u{0}', '\u{0}']), ('\u{1e74}', ['\u{1e75}', '\u{0}', '\u{0}']),
+ ('\u{1e76}', ['\u{1e77}', '\u{0}', '\u{0}']), ('\u{1e78}', ['\u{1e79}', '\u{0}', '\u{0}']),
+ ('\u{1e7a}', ['\u{1e7b}', '\u{0}', '\u{0}']), ('\u{1e7c}', ['\u{1e7d}', '\u{0}', '\u{0}']),
+ ('\u{1e7e}', ['\u{1e7f}', '\u{0}', '\u{0}']), ('\u{1e80}', ['\u{1e81}', '\u{0}', '\u{0}']),
+ ('\u{1e82}', ['\u{1e83}', '\u{0}', '\u{0}']), ('\u{1e84}', ['\u{1e85}', '\u{0}', '\u{0}']),
+ ('\u{1e86}', ['\u{1e87}', '\u{0}', '\u{0}']), ('\u{1e88}', ['\u{1e89}', '\u{0}', '\u{0}']),
+ ('\u{1e8a}', ['\u{1e8b}', '\u{0}', '\u{0}']), ('\u{1e8c}', ['\u{1e8d}', '\u{0}', '\u{0}']),
+ ('\u{1e8e}', ['\u{1e8f}', '\u{0}', '\u{0}']), ('\u{1e90}', ['\u{1e91}', '\u{0}', '\u{0}']),
+ ('\u{1e92}', ['\u{1e93}', '\u{0}', '\u{0}']), ('\u{1e94}', ['\u{1e95}', '\u{0}', '\u{0}']),
+ ('\u{1e9e}', ['\u{df}', '\u{0}', '\u{0}']), ('\u{1ea0}', ['\u{1ea1}', '\u{0}', '\u{0}']),
+ ('\u{1ea2}', ['\u{1ea3}', '\u{0}', '\u{0}']), ('\u{1ea4}', ['\u{1ea5}', '\u{0}', '\u{0}']),
+ ('\u{1ea6}', ['\u{1ea7}', '\u{0}', '\u{0}']), ('\u{1ea8}', ['\u{1ea9}', '\u{0}', '\u{0}']),
+ ('\u{1eaa}', ['\u{1eab}', '\u{0}', '\u{0}']), ('\u{1eac}', ['\u{1ead}', '\u{0}', '\u{0}']),
+ ('\u{1eae}', ['\u{1eaf}', '\u{0}', '\u{0}']), ('\u{1eb0}', ['\u{1eb1}', '\u{0}', '\u{0}']),
+ ('\u{1eb2}', ['\u{1eb3}', '\u{0}', '\u{0}']), ('\u{1eb4}', ['\u{1eb5}', '\u{0}', '\u{0}']),
+ ('\u{1eb6}', ['\u{1eb7}', '\u{0}', '\u{0}']), ('\u{1eb8}', ['\u{1eb9}', '\u{0}', '\u{0}']),
+ ('\u{1eba}', ['\u{1ebb}', '\u{0}', '\u{0}']), ('\u{1ebc}', ['\u{1ebd}', '\u{0}', '\u{0}']),
+ ('\u{1ebe}', ['\u{1ebf}', '\u{0}', '\u{0}']), ('\u{1ec0}', ['\u{1ec1}', '\u{0}', '\u{0}']),
+ ('\u{1ec2}', ['\u{1ec3}', '\u{0}', '\u{0}']), ('\u{1ec4}', ['\u{1ec5}', '\u{0}', '\u{0}']),
+ ('\u{1ec6}', ['\u{1ec7}', '\u{0}', '\u{0}']), ('\u{1ec8}', ['\u{1ec9}', '\u{0}', '\u{0}']),
+ ('\u{1eca}', ['\u{1ecb}', '\u{0}', '\u{0}']), ('\u{1ecc}', ['\u{1ecd}', '\u{0}', '\u{0}']),
+ ('\u{1ece}', ['\u{1ecf}', '\u{0}', '\u{0}']), ('\u{1ed0}', ['\u{1ed1}', '\u{0}', '\u{0}']),
+ ('\u{1ed2}', ['\u{1ed3}', '\u{0}', '\u{0}']), ('\u{1ed4}', ['\u{1ed5}', '\u{0}', '\u{0}']),
+ ('\u{1ed6}', ['\u{1ed7}', '\u{0}', '\u{0}']), ('\u{1ed8}', ['\u{1ed9}', '\u{0}', '\u{0}']),
+ ('\u{1eda}', ['\u{1edb}', '\u{0}', '\u{0}']), ('\u{1edc}', ['\u{1edd}', '\u{0}', '\u{0}']),
+ ('\u{1ede}', ['\u{1edf}', '\u{0}', '\u{0}']), ('\u{1ee0}', ['\u{1ee1}', '\u{0}', '\u{0}']),
+ ('\u{1ee2}', ['\u{1ee3}', '\u{0}', '\u{0}']), ('\u{1ee4}', ['\u{1ee5}', '\u{0}', '\u{0}']),
+ ('\u{1ee6}', ['\u{1ee7}', '\u{0}', '\u{0}']), ('\u{1ee8}', ['\u{1ee9}', '\u{0}', '\u{0}']),
+ ('\u{1eea}', ['\u{1eeb}', '\u{0}', '\u{0}']), ('\u{1eec}', ['\u{1eed}', '\u{0}', '\u{0}']),
+ ('\u{1eee}', ['\u{1eef}', '\u{0}', '\u{0}']), ('\u{1ef0}', ['\u{1ef1}', '\u{0}', '\u{0}']),
+ ('\u{1ef2}', ['\u{1ef3}', '\u{0}', '\u{0}']), ('\u{1ef4}', ['\u{1ef5}', '\u{0}', '\u{0}']),
+ ('\u{1ef6}', ['\u{1ef7}', '\u{0}', '\u{0}']), ('\u{1ef8}', ['\u{1ef9}', '\u{0}', '\u{0}']),
+ ('\u{1efa}', ['\u{1efb}', '\u{0}', '\u{0}']), ('\u{1efc}', ['\u{1efd}', '\u{0}', '\u{0}']),
+ ('\u{1efe}', ['\u{1eff}', '\u{0}', '\u{0}']), ('\u{1f08}', ['\u{1f00}', '\u{0}', '\u{0}']),
+ ('\u{1f09}', ['\u{1f01}', '\u{0}', '\u{0}']), ('\u{1f0a}', ['\u{1f02}', '\u{0}', '\u{0}']),
+ ('\u{1f0b}', ['\u{1f03}', '\u{0}', '\u{0}']), ('\u{1f0c}', ['\u{1f04}', '\u{0}', '\u{0}']),
+ ('\u{1f0d}', ['\u{1f05}', '\u{0}', '\u{0}']), ('\u{1f0e}', ['\u{1f06}', '\u{0}', '\u{0}']),
+ ('\u{1f0f}', ['\u{1f07}', '\u{0}', '\u{0}']), ('\u{1f18}', ['\u{1f10}', '\u{0}', '\u{0}']),
+ ('\u{1f19}', ['\u{1f11}', '\u{0}', '\u{0}']), ('\u{1f1a}', ['\u{1f12}', '\u{0}', '\u{0}']),
+ ('\u{1f1b}', ['\u{1f13}', '\u{0}', '\u{0}']), ('\u{1f1c}', ['\u{1f14}', '\u{0}', '\u{0}']),
+ ('\u{1f1d}', ['\u{1f15}', '\u{0}', '\u{0}']), ('\u{1f28}', ['\u{1f20}', '\u{0}', '\u{0}']),
+ ('\u{1f29}', ['\u{1f21}', '\u{0}', '\u{0}']), ('\u{1f2a}', ['\u{1f22}', '\u{0}', '\u{0}']),
+ ('\u{1f2b}', ['\u{1f23}', '\u{0}', '\u{0}']), ('\u{1f2c}', ['\u{1f24}', '\u{0}', '\u{0}']),
+ ('\u{1f2d}', ['\u{1f25}', '\u{0}', '\u{0}']), ('\u{1f2e}', ['\u{1f26}', '\u{0}', '\u{0}']),
+ ('\u{1f2f}', ['\u{1f27}', '\u{0}', '\u{0}']), ('\u{1f38}', ['\u{1f30}', '\u{0}', '\u{0}']),
+ ('\u{1f39}', ['\u{1f31}', '\u{0}', '\u{0}']), ('\u{1f3a}', ['\u{1f32}', '\u{0}', '\u{0}']),
+ ('\u{1f3b}', ['\u{1f33}', '\u{0}', '\u{0}']), ('\u{1f3c}', ['\u{1f34}', '\u{0}', '\u{0}']),
+ ('\u{1f3d}', ['\u{1f35}', '\u{0}', '\u{0}']), ('\u{1f3e}', ['\u{1f36}', '\u{0}', '\u{0}']),
+ ('\u{1f3f}', ['\u{1f37}', '\u{0}', '\u{0}']), ('\u{1f48}', ['\u{1f40}', '\u{0}', '\u{0}']),
+ ('\u{1f49}', ['\u{1f41}', '\u{0}', '\u{0}']), ('\u{1f4a}', ['\u{1f42}', '\u{0}', '\u{0}']),
+ ('\u{1f4b}', ['\u{1f43}', '\u{0}', '\u{0}']), ('\u{1f4c}', ['\u{1f44}', '\u{0}', '\u{0}']),
+ ('\u{1f4d}', ['\u{1f45}', '\u{0}', '\u{0}']), ('\u{1f59}', ['\u{1f51}', '\u{0}', '\u{0}']),
+ ('\u{1f5b}', ['\u{1f53}', '\u{0}', '\u{0}']), ('\u{1f5d}', ['\u{1f55}', '\u{0}', '\u{0}']),
+ ('\u{1f5f}', ['\u{1f57}', '\u{0}', '\u{0}']), ('\u{1f68}', ['\u{1f60}', '\u{0}', '\u{0}']),
+ ('\u{1f69}', ['\u{1f61}', '\u{0}', '\u{0}']), ('\u{1f6a}', ['\u{1f62}', '\u{0}', '\u{0}']),
+ ('\u{1f6b}', ['\u{1f63}', '\u{0}', '\u{0}']), ('\u{1f6c}', ['\u{1f64}', '\u{0}', '\u{0}']),
+ ('\u{1f6d}', ['\u{1f65}', '\u{0}', '\u{0}']), ('\u{1f6e}', ['\u{1f66}', '\u{0}', '\u{0}']),
+ ('\u{1f6f}', ['\u{1f67}', '\u{0}', '\u{0}']), ('\u{1f88}', ['\u{1f80}', '\u{0}', '\u{0}']),
+ ('\u{1f89}', ['\u{1f81}', '\u{0}', '\u{0}']), ('\u{1f8a}', ['\u{1f82}', '\u{0}', '\u{0}']),
+ ('\u{1f8b}', ['\u{1f83}', '\u{0}', '\u{0}']), ('\u{1f8c}', ['\u{1f84}', '\u{0}', '\u{0}']),
+ ('\u{1f8d}', ['\u{1f85}', '\u{0}', '\u{0}']), ('\u{1f8e}', ['\u{1f86}', '\u{0}', '\u{0}']),
+ ('\u{1f8f}', ['\u{1f87}', '\u{0}', '\u{0}']), ('\u{1f98}', ['\u{1f90}', '\u{0}', '\u{0}']),
+ ('\u{1f99}', ['\u{1f91}', '\u{0}', '\u{0}']), ('\u{1f9a}', ['\u{1f92}', '\u{0}', '\u{0}']),
+ ('\u{1f9b}', ['\u{1f93}', '\u{0}', '\u{0}']), ('\u{1f9c}', ['\u{1f94}', '\u{0}', '\u{0}']),
+ ('\u{1f9d}', ['\u{1f95}', '\u{0}', '\u{0}']), ('\u{1f9e}', ['\u{1f96}', '\u{0}', '\u{0}']),
+ ('\u{1f9f}', ['\u{1f97}', '\u{0}', '\u{0}']), ('\u{1fa8}', ['\u{1fa0}', '\u{0}', '\u{0}']),
+ ('\u{1fa9}', ['\u{1fa1}', '\u{0}', '\u{0}']), ('\u{1faa}', ['\u{1fa2}', '\u{0}', '\u{0}']),
+ ('\u{1fab}', ['\u{1fa3}', '\u{0}', '\u{0}']), ('\u{1fac}', ['\u{1fa4}', '\u{0}', '\u{0}']),
+ ('\u{1fad}', ['\u{1fa5}', '\u{0}', '\u{0}']), ('\u{1fae}', ['\u{1fa6}', '\u{0}', '\u{0}']),
+ ('\u{1faf}', ['\u{1fa7}', '\u{0}', '\u{0}']), ('\u{1fb8}', ['\u{1fb0}', '\u{0}', '\u{0}']),
+ ('\u{1fb9}', ['\u{1fb1}', '\u{0}', '\u{0}']), ('\u{1fba}', ['\u{1f70}', '\u{0}', '\u{0}']),
+ ('\u{1fbb}', ['\u{1f71}', '\u{0}', '\u{0}']), ('\u{1fbc}', ['\u{1fb3}', '\u{0}', '\u{0}']),
+ ('\u{1fc8}', ['\u{1f72}', '\u{0}', '\u{0}']), ('\u{1fc9}', ['\u{1f73}', '\u{0}', '\u{0}']),
+ ('\u{1fca}', ['\u{1f74}', '\u{0}', '\u{0}']), ('\u{1fcb}', ['\u{1f75}', '\u{0}', '\u{0}']),
+ ('\u{1fcc}', ['\u{1fc3}', '\u{0}', '\u{0}']), ('\u{1fd8}', ['\u{1fd0}', '\u{0}', '\u{0}']),
+ ('\u{1fd9}', ['\u{1fd1}', '\u{0}', '\u{0}']), ('\u{1fda}', ['\u{1f76}', '\u{0}', '\u{0}']),
+ ('\u{1fdb}', ['\u{1f77}', '\u{0}', '\u{0}']), ('\u{1fe8}', ['\u{1fe0}', '\u{0}', '\u{0}']),
+ ('\u{1fe9}', ['\u{1fe1}', '\u{0}', '\u{0}']), ('\u{1fea}', ['\u{1f7a}', '\u{0}', '\u{0}']),
+ ('\u{1feb}', ['\u{1f7b}', '\u{0}', '\u{0}']), ('\u{1fec}', ['\u{1fe5}', '\u{0}', '\u{0}']),
+ ('\u{1ff8}', ['\u{1f78}', '\u{0}', '\u{0}']), ('\u{1ff9}', ['\u{1f79}', '\u{0}', '\u{0}']),
+ ('\u{1ffa}', ['\u{1f7c}', '\u{0}', '\u{0}']), ('\u{1ffb}', ['\u{1f7d}', '\u{0}', '\u{0}']),
+ ('\u{1ffc}', ['\u{1ff3}', '\u{0}', '\u{0}']), ('\u{2126}', ['\u{3c9}', '\u{0}', '\u{0}']),
+ ('\u{212a}', ['k', '\u{0}', '\u{0}']), ('\u{212b}', ['\u{e5}', '\u{0}', '\u{0}']),
+ ('\u{2132}', ['\u{214e}', '\u{0}', '\u{0}']), ('\u{2160}', ['\u{2170}', '\u{0}', '\u{0}']),
+ ('\u{2161}', ['\u{2171}', '\u{0}', '\u{0}']), ('\u{2162}', ['\u{2172}', '\u{0}', '\u{0}']),
+ ('\u{2163}', ['\u{2173}', '\u{0}', '\u{0}']), ('\u{2164}', ['\u{2174}', '\u{0}', '\u{0}']),
+ ('\u{2165}', ['\u{2175}', '\u{0}', '\u{0}']), ('\u{2166}', ['\u{2176}', '\u{0}', '\u{0}']),
+ ('\u{2167}', ['\u{2177}', '\u{0}', '\u{0}']), ('\u{2168}', ['\u{2178}', '\u{0}', '\u{0}']),
+ ('\u{2169}', ['\u{2179}', '\u{0}', '\u{0}']), ('\u{216a}', ['\u{217a}', '\u{0}', '\u{0}']),
+ ('\u{216b}', ['\u{217b}', '\u{0}', '\u{0}']), ('\u{216c}', ['\u{217c}', '\u{0}', '\u{0}']),
+ ('\u{216d}', ['\u{217d}', '\u{0}', '\u{0}']), ('\u{216e}', ['\u{217e}', '\u{0}', '\u{0}']),
+ ('\u{216f}', ['\u{217f}', '\u{0}', '\u{0}']), ('\u{2183}', ['\u{2184}', '\u{0}', '\u{0}']),
+ ('\u{24b6}', ['\u{24d0}', '\u{0}', '\u{0}']), ('\u{24b7}', ['\u{24d1}', '\u{0}', '\u{0}']),
+ ('\u{24b8}', ['\u{24d2}', '\u{0}', '\u{0}']), ('\u{24b9}', ['\u{24d3}', '\u{0}', '\u{0}']),
+ ('\u{24ba}', ['\u{24d4}', '\u{0}', '\u{0}']), ('\u{24bb}', ['\u{24d5}', '\u{0}', '\u{0}']),
+ ('\u{24bc}', ['\u{24d6}', '\u{0}', '\u{0}']), ('\u{24bd}', ['\u{24d7}', '\u{0}', '\u{0}']),
+ ('\u{24be}', ['\u{24d8}', '\u{0}', '\u{0}']), ('\u{24bf}', ['\u{24d9}', '\u{0}', '\u{0}']),
+ ('\u{24c0}', ['\u{24da}', '\u{0}', '\u{0}']), ('\u{24c1}', ['\u{24db}', '\u{0}', '\u{0}']),
+ ('\u{24c2}', ['\u{24dc}', '\u{0}', '\u{0}']), ('\u{24c3}', ['\u{24dd}', '\u{0}', '\u{0}']),
+ ('\u{24c4}', ['\u{24de}', '\u{0}', '\u{0}']), ('\u{24c5}', ['\u{24df}', '\u{0}', '\u{0}']),
+ ('\u{24c6}', ['\u{24e0}', '\u{0}', '\u{0}']), ('\u{24c7}', ['\u{24e1}', '\u{0}', '\u{0}']),
+ ('\u{24c8}', ['\u{24e2}', '\u{0}', '\u{0}']), ('\u{24c9}', ['\u{24e3}', '\u{0}', '\u{0}']),
+ ('\u{24ca}', ['\u{24e4}', '\u{0}', '\u{0}']), ('\u{24cb}', ['\u{24e5}', '\u{0}', '\u{0}']),
+ ('\u{24cc}', ['\u{24e6}', '\u{0}', '\u{0}']), ('\u{24cd}', ['\u{24e7}', '\u{0}', '\u{0}']),
+ ('\u{24ce}', ['\u{24e8}', '\u{0}', '\u{0}']), ('\u{24cf}', ['\u{24e9}', '\u{0}', '\u{0}']),
+ ('\u{2c00}', ['\u{2c30}', '\u{0}', '\u{0}']), ('\u{2c01}', ['\u{2c31}', '\u{0}', '\u{0}']),
+ ('\u{2c02}', ['\u{2c32}', '\u{0}', '\u{0}']), ('\u{2c03}', ['\u{2c33}', '\u{0}', '\u{0}']),
+ ('\u{2c04}', ['\u{2c34}', '\u{0}', '\u{0}']), ('\u{2c05}', ['\u{2c35}', '\u{0}', '\u{0}']),
+ ('\u{2c06}', ['\u{2c36}', '\u{0}', '\u{0}']), ('\u{2c07}', ['\u{2c37}', '\u{0}', '\u{0}']),
+ ('\u{2c08}', ['\u{2c38}', '\u{0}', '\u{0}']), ('\u{2c09}', ['\u{2c39}', '\u{0}', '\u{0}']),
+ ('\u{2c0a}', ['\u{2c3a}', '\u{0}', '\u{0}']), ('\u{2c0b}', ['\u{2c3b}', '\u{0}', '\u{0}']),
+ ('\u{2c0c}', ['\u{2c3c}', '\u{0}', '\u{0}']), ('\u{2c0d}', ['\u{2c3d}', '\u{0}', '\u{0}']),
+ ('\u{2c0e}', ['\u{2c3e}', '\u{0}', '\u{0}']), ('\u{2c0f}', ['\u{2c3f}', '\u{0}', '\u{0}']),
+ ('\u{2c10}', ['\u{2c40}', '\u{0}', '\u{0}']), ('\u{2c11}', ['\u{2c41}', '\u{0}', '\u{0}']),
+ ('\u{2c12}', ['\u{2c42}', '\u{0}', '\u{0}']), ('\u{2c13}', ['\u{2c43}', '\u{0}', '\u{0}']),
+ ('\u{2c14}', ['\u{2c44}', '\u{0}', '\u{0}']), ('\u{2c15}', ['\u{2c45}', '\u{0}', '\u{0}']),
+ ('\u{2c16}', ['\u{2c46}', '\u{0}', '\u{0}']), ('\u{2c17}', ['\u{2c47}', '\u{0}', '\u{0}']),
+ ('\u{2c18}', ['\u{2c48}', '\u{0}', '\u{0}']), ('\u{2c19}', ['\u{2c49}', '\u{0}', '\u{0}']),
+ ('\u{2c1a}', ['\u{2c4a}', '\u{0}', '\u{0}']), ('\u{2c1b}', ['\u{2c4b}', '\u{0}', '\u{0}']),
+ ('\u{2c1c}', ['\u{2c4c}', '\u{0}', '\u{0}']), ('\u{2c1d}', ['\u{2c4d}', '\u{0}', '\u{0}']),
+ ('\u{2c1e}', ['\u{2c4e}', '\u{0}', '\u{0}']), ('\u{2c1f}', ['\u{2c4f}', '\u{0}', '\u{0}']),
+ ('\u{2c20}', ['\u{2c50}', '\u{0}', '\u{0}']), ('\u{2c21}', ['\u{2c51}', '\u{0}', '\u{0}']),
+ ('\u{2c22}', ['\u{2c52}', '\u{0}', '\u{0}']), ('\u{2c23}', ['\u{2c53}', '\u{0}', '\u{0}']),
+ ('\u{2c24}', ['\u{2c54}', '\u{0}', '\u{0}']), ('\u{2c25}', ['\u{2c55}', '\u{0}', '\u{0}']),
+ ('\u{2c26}', ['\u{2c56}', '\u{0}', '\u{0}']), ('\u{2c27}', ['\u{2c57}', '\u{0}', '\u{0}']),
+ ('\u{2c28}', ['\u{2c58}', '\u{0}', '\u{0}']), ('\u{2c29}', ['\u{2c59}', '\u{0}', '\u{0}']),
+ ('\u{2c2a}', ['\u{2c5a}', '\u{0}', '\u{0}']), ('\u{2c2b}', ['\u{2c5b}', '\u{0}', '\u{0}']),
+ ('\u{2c2c}', ['\u{2c5c}', '\u{0}', '\u{0}']), ('\u{2c2d}', ['\u{2c5d}', '\u{0}', '\u{0}']),
+ ('\u{2c2e}', ['\u{2c5e}', '\u{0}', '\u{0}']), ('\u{2c2f}', ['\u{2c5f}', '\u{0}', '\u{0}']),
+ ('\u{2c60}', ['\u{2c61}', '\u{0}', '\u{0}']), ('\u{2c62}', ['\u{26b}', '\u{0}', '\u{0}']),
+ ('\u{2c63}', ['\u{1d7d}', '\u{0}', '\u{0}']), ('\u{2c64}', ['\u{27d}', '\u{0}', '\u{0}']),
+ ('\u{2c67}', ['\u{2c68}', '\u{0}', '\u{0}']), ('\u{2c69}', ['\u{2c6a}', '\u{0}', '\u{0}']),
+ ('\u{2c6b}', ['\u{2c6c}', '\u{0}', '\u{0}']), ('\u{2c6d}', ['\u{251}', '\u{0}', '\u{0}']),
+ ('\u{2c6e}', ['\u{271}', '\u{0}', '\u{0}']), ('\u{2c6f}', ['\u{250}', '\u{0}', '\u{0}']),
+ ('\u{2c70}', ['\u{252}', '\u{0}', '\u{0}']), ('\u{2c72}', ['\u{2c73}', '\u{0}', '\u{0}']),
+ ('\u{2c75}', ['\u{2c76}', '\u{0}', '\u{0}']), ('\u{2c7e}', ['\u{23f}', '\u{0}', '\u{0}']),
+ ('\u{2c7f}', ['\u{240}', '\u{0}', '\u{0}']), ('\u{2c80}', ['\u{2c81}', '\u{0}', '\u{0}']),
+ ('\u{2c82}', ['\u{2c83}', '\u{0}', '\u{0}']), ('\u{2c84}', ['\u{2c85}', '\u{0}', '\u{0}']),
+ ('\u{2c86}', ['\u{2c87}', '\u{0}', '\u{0}']), ('\u{2c88}', ['\u{2c89}', '\u{0}', '\u{0}']),
+ ('\u{2c8a}', ['\u{2c8b}', '\u{0}', '\u{0}']), ('\u{2c8c}', ['\u{2c8d}', '\u{0}', '\u{0}']),
+ ('\u{2c8e}', ['\u{2c8f}', '\u{0}', '\u{0}']), ('\u{2c90}', ['\u{2c91}', '\u{0}', '\u{0}']),
+ ('\u{2c92}', ['\u{2c93}', '\u{0}', '\u{0}']), ('\u{2c94}', ['\u{2c95}', '\u{0}', '\u{0}']),
+ ('\u{2c96}', ['\u{2c97}', '\u{0}', '\u{0}']), ('\u{2c98}', ['\u{2c99}', '\u{0}', '\u{0}']),
+ ('\u{2c9a}', ['\u{2c9b}', '\u{0}', '\u{0}']), ('\u{2c9c}', ['\u{2c9d}', '\u{0}', '\u{0}']),
+ ('\u{2c9e}', ['\u{2c9f}', '\u{0}', '\u{0}']), ('\u{2ca0}', ['\u{2ca1}', '\u{0}', '\u{0}']),
+ ('\u{2ca2}', ['\u{2ca3}', '\u{0}', '\u{0}']), ('\u{2ca4}', ['\u{2ca5}', '\u{0}', '\u{0}']),
+ ('\u{2ca6}', ['\u{2ca7}', '\u{0}', '\u{0}']), ('\u{2ca8}', ['\u{2ca9}', '\u{0}', '\u{0}']),
+ ('\u{2caa}', ['\u{2cab}', '\u{0}', '\u{0}']), ('\u{2cac}', ['\u{2cad}', '\u{0}', '\u{0}']),
+ ('\u{2cae}', ['\u{2caf}', '\u{0}', '\u{0}']), ('\u{2cb0}', ['\u{2cb1}', '\u{0}', '\u{0}']),
+ ('\u{2cb2}', ['\u{2cb3}', '\u{0}', '\u{0}']), ('\u{2cb4}', ['\u{2cb5}', '\u{0}', '\u{0}']),
+ ('\u{2cb6}', ['\u{2cb7}', '\u{0}', '\u{0}']), ('\u{2cb8}', ['\u{2cb9}', '\u{0}', '\u{0}']),
+ ('\u{2cba}', ['\u{2cbb}', '\u{0}', '\u{0}']), ('\u{2cbc}', ['\u{2cbd}', '\u{0}', '\u{0}']),
+ ('\u{2cbe}', ['\u{2cbf}', '\u{0}', '\u{0}']), ('\u{2cc0}', ['\u{2cc1}', '\u{0}', '\u{0}']),
+ ('\u{2cc2}', ['\u{2cc3}', '\u{0}', '\u{0}']), ('\u{2cc4}', ['\u{2cc5}', '\u{0}', '\u{0}']),
+ ('\u{2cc6}', ['\u{2cc7}', '\u{0}', '\u{0}']), ('\u{2cc8}', ['\u{2cc9}', '\u{0}', '\u{0}']),
+ ('\u{2cca}', ['\u{2ccb}', '\u{0}', '\u{0}']), ('\u{2ccc}', ['\u{2ccd}', '\u{0}', '\u{0}']),
+ ('\u{2cce}', ['\u{2ccf}', '\u{0}', '\u{0}']), ('\u{2cd0}', ['\u{2cd1}', '\u{0}', '\u{0}']),
+ ('\u{2cd2}', ['\u{2cd3}', '\u{0}', '\u{0}']), ('\u{2cd4}', ['\u{2cd5}', '\u{0}', '\u{0}']),
+ ('\u{2cd6}', ['\u{2cd7}', '\u{0}', '\u{0}']), ('\u{2cd8}', ['\u{2cd9}', '\u{0}', '\u{0}']),
+ ('\u{2cda}', ['\u{2cdb}', '\u{0}', '\u{0}']), ('\u{2cdc}', ['\u{2cdd}', '\u{0}', '\u{0}']),
+ ('\u{2cde}', ['\u{2cdf}', '\u{0}', '\u{0}']), ('\u{2ce0}', ['\u{2ce1}', '\u{0}', '\u{0}']),
+ ('\u{2ce2}', ['\u{2ce3}', '\u{0}', '\u{0}']), ('\u{2ceb}', ['\u{2cec}', '\u{0}', '\u{0}']),
+ ('\u{2ced}', ['\u{2cee}', '\u{0}', '\u{0}']), ('\u{2cf2}', ['\u{2cf3}', '\u{0}', '\u{0}']),
+ ('\u{a640}', ['\u{a641}', '\u{0}', '\u{0}']), ('\u{a642}', ['\u{a643}', '\u{0}', '\u{0}']),
+ ('\u{a644}', ['\u{a645}', '\u{0}', '\u{0}']), ('\u{a646}', ['\u{a647}', '\u{0}', '\u{0}']),
+ ('\u{a648}', ['\u{a649}', '\u{0}', '\u{0}']), ('\u{a64a}', ['\u{a64b}', '\u{0}', '\u{0}']),
+ ('\u{a64c}', ['\u{a64d}', '\u{0}', '\u{0}']), ('\u{a64e}', ['\u{a64f}', '\u{0}', '\u{0}']),
+ ('\u{a650}', ['\u{a651}', '\u{0}', '\u{0}']), ('\u{a652}', ['\u{a653}', '\u{0}', '\u{0}']),
+ ('\u{a654}', ['\u{a655}', '\u{0}', '\u{0}']), ('\u{a656}', ['\u{a657}', '\u{0}', '\u{0}']),
+ ('\u{a658}', ['\u{a659}', '\u{0}', '\u{0}']), ('\u{a65a}', ['\u{a65b}', '\u{0}', '\u{0}']),
+ ('\u{a65c}', ['\u{a65d}', '\u{0}', '\u{0}']), ('\u{a65e}', ['\u{a65f}', '\u{0}', '\u{0}']),
+ ('\u{a660}', ['\u{a661}', '\u{0}', '\u{0}']), ('\u{a662}', ['\u{a663}', '\u{0}', '\u{0}']),
+ ('\u{a664}', ['\u{a665}', '\u{0}', '\u{0}']), ('\u{a666}', ['\u{a667}', '\u{0}', '\u{0}']),
+ ('\u{a668}', ['\u{a669}', '\u{0}', '\u{0}']), ('\u{a66a}', ['\u{a66b}', '\u{0}', '\u{0}']),
+ ('\u{a66c}', ['\u{a66d}', '\u{0}', '\u{0}']), ('\u{a680}', ['\u{a681}', '\u{0}', '\u{0}']),
+ ('\u{a682}', ['\u{a683}', '\u{0}', '\u{0}']), ('\u{a684}', ['\u{a685}', '\u{0}', '\u{0}']),
+ ('\u{a686}', ['\u{a687}', '\u{0}', '\u{0}']), ('\u{a688}', ['\u{a689}', '\u{0}', '\u{0}']),
+ ('\u{a68a}', ['\u{a68b}', '\u{0}', '\u{0}']), ('\u{a68c}', ['\u{a68d}', '\u{0}', '\u{0}']),
+ ('\u{a68e}', ['\u{a68f}', '\u{0}', '\u{0}']), ('\u{a690}', ['\u{a691}', '\u{0}', '\u{0}']),
+ ('\u{a692}', ['\u{a693}', '\u{0}', '\u{0}']), ('\u{a694}', ['\u{a695}', '\u{0}', '\u{0}']),
+ ('\u{a696}', ['\u{a697}', '\u{0}', '\u{0}']), ('\u{a698}', ['\u{a699}', '\u{0}', '\u{0}']),
+ ('\u{a69a}', ['\u{a69b}', '\u{0}', '\u{0}']), ('\u{a722}', ['\u{a723}', '\u{0}', '\u{0}']),
+ ('\u{a724}', ['\u{a725}', '\u{0}', '\u{0}']), ('\u{a726}', ['\u{a727}', '\u{0}', '\u{0}']),
+ ('\u{a728}', ['\u{a729}', '\u{0}', '\u{0}']), ('\u{a72a}', ['\u{a72b}', '\u{0}', '\u{0}']),
+ ('\u{a72c}', ['\u{a72d}', '\u{0}', '\u{0}']), ('\u{a72e}', ['\u{a72f}', '\u{0}', '\u{0}']),
+ ('\u{a732}', ['\u{a733}', '\u{0}', '\u{0}']), ('\u{a734}', ['\u{a735}', '\u{0}', '\u{0}']),
+ ('\u{a736}', ['\u{a737}', '\u{0}', '\u{0}']), ('\u{a738}', ['\u{a739}', '\u{0}', '\u{0}']),
+ ('\u{a73a}', ['\u{a73b}', '\u{0}', '\u{0}']), ('\u{a73c}', ['\u{a73d}', '\u{0}', '\u{0}']),
+ ('\u{a73e}', ['\u{a73f}', '\u{0}', '\u{0}']), ('\u{a740}', ['\u{a741}', '\u{0}', '\u{0}']),
+ ('\u{a742}', ['\u{a743}', '\u{0}', '\u{0}']), ('\u{a744}', ['\u{a745}', '\u{0}', '\u{0}']),
+ ('\u{a746}', ['\u{a747}', '\u{0}', '\u{0}']), ('\u{a748}', ['\u{a749}', '\u{0}', '\u{0}']),
+ ('\u{a74a}', ['\u{a74b}', '\u{0}', '\u{0}']), ('\u{a74c}', ['\u{a74d}', '\u{0}', '\u{0}']),
+ ('\u{a74e}', ['\u{a74f}', '\u{0}', '\u{0}']), ('\u{a750}', ['\u{a751}', '\u{0}', '\u{0}']),
+ ('\u{a752}', ['\u{a753}', '\u{0}', '\u{0}']), ('\u{a754}', ['\u{a755}', '\u{0}', '\u{0}']),
+ ('\u{a756}', ['\u{a757}', '\u{0}', '\u{0}']), ('\u{a758}', ['\u{a759}', '\u{0}', '\u{0}']),
+ ('\u{a75a}', ['\u{a75b}', '\u{0}', '\u{0}']), ('\u{a75c}', ['\u{a75d}', '\u{0}', '\u{0}']),
+ ('\u{a75e}', ['\u{a75f}', '\u{0}', '\u{0}']), ('\u{a760}', ['\u{a761}', '\u{0}', '\u{0}']),
+ ('\u{a762}', ['\u{a763}', '\u{0}', '\u{0}']), ('\u{a764}', ['\u{a765}', '\u{0}', '\u{0}']),
+ ('\u{a766}', ['\u{a767}', '\u{0}', '\u{0}']), ('\u{a768}', ['\u{a769}', '\u{0}', '\u{0}']),
+ ('\u{a76a}', ['\u{a76b}', '\u{0}', '\u{0}']), ('\u{a76c}', ['\u{a76d}', '\u{0}', '\u{0}']),
+ ('\u{a76e}', ['\u{a76f}', '\u{0}', '\u{0}']), ('\u{a779}', ['\u{a77a}', '\u{0}', '\u{0}']),
+ ('\u{a77b}', ['\u{a77c}', '\u{0}', '\u{0}']), ('\u{a77d}', ['\u{1d79}', '\u{0}', '\u{0}']),
+ ('\u{a77e}', ['\u{a77f}', '\u{0}', '\u{0}']), ('\u{a780}', ['\u{a781}', '\u{0}', '\u{0}']),
+ ('\u{a782}', ['\u{a783}', '\u{0}', '\u{0}']), ('\u{a784}', ['\u{a785}', '\u{0}', '\u{0}']),
+ ('\u{a786}', ['\u{a787}', '\u{0}', '\u{0}']), ('\u{a78b}', ['\u{a78c}', '\u{0}', '\u{0}']),
+ ('\u{a78d}', ['\u{265}', '\u{0}', '\u{0}']), ('\u{a790}', ['\u{a791}', '\u{0}', '\u{0}']),
+ ('\u{a792}', ['\u{a793}', '\u{0}', '\u{0}']), ('\u{a796}', ['\u{a797}', '\u{0}', '\u{0}']),
+ ('\u{a798}', ['\u{a799}', '\u{0}', '\u{0}']), ('\u{a79a}', ['\u{a79b}', '\u{0}', '\u{0}']),
+ ('\u{a79c}', ['\u{a79d}', '\u{0}', '\u{0}']), ('\u{a79e}', ['\u{a79f}', '\u{0}', '\u{0}']),
+ ('\u{a7a0}', ['\u{a7a1}', '\u{0}', '\u{0}']), ('\u{a7a2}', ['\u{a7a3}', '\u{0}', '\u{0}']),
+ ('\u{a7a4}', ['\u{a7a5}', '\u{0}', '\u{0}']), ('\u{a7a6}', ['\u{a7a7}', '\u{0}', '\u{0}']),
+ ('\u{a7a8}', ['\u{a7a9}', '\u{0}', '\u{0}']), ('\u{a7aa}', ['\u{266}', '\u{0}', '\u{0}']),
+ ('\u{a7ab}', ['\u{25c}', '\u{0}', '\u{0}']), ('\u{a7ac}', ['\u{261}', '\u{0}', '\u{0}']),
+ ('\u{a7ad}', ['\u{26c}', '\u{0}', '\u{0}']), ('\u{a7ae}', ['\u{26a}', '\u{0}', '\u{0}']),
+ ('\u{a7b0}', ['\u{29e}', '\u{0}', '\u{0}']), ('\u{a7b1}', ['\u{287}', '\u{0}', '\u{0}']),
+ ('\u{a7b2}', ['\u{29d}', '\u{0}', '\u{0}']), ('\u{a7b3}', ['\u{ab53}', '\u{0}', '\u{0}']),
+ ('\u{a7b4}', ['\u{a7b5}', '\u{0}', '\u{0}']), ('\u{a7b6}', ['\u{a7b7}', '\u{0}', '\u{0}']),
+ ('\u{a7b8}', ['\u{a7b9}', '\u{0}', '\u{0}']), ('\u{a7ba}', ['\u{a7bb}', '\u{0}', '\u{0}']),
+ ('\u{a7bc}', ['\u{a7bd}', '\u{0}', '\u{0}']), ('\u{a7be}', ['\u{a7bf}', '\u{0}', '\u{0}']),
+ ('\u{a7c0}', ['\u{a7c1}', '\u{0}', '\u{0}']), ('\u{a7c2}', ['\u{a7c3}', '\u{0}', '\u{0}']),
+ ('\u{a7c4}', ['\u{a794}', '\u{0}', '\u{0}']), ('\u{a7c5}', ['\u{282}', '\u{0}', '\u{0}']),
+ ('\u{a7c6}', ['\u{1d8e}', '\u{0}', '\u{0}']), ('\u{a7c7}', ['\u{a7c8}', '\u{0}', '\u{0}']),
+ ('\u{a7c9}', ['\u{a7ca}', '\u{0}', '\u{0}']), ('\u{a7d0}', ['\u{a7d1}', '\u{0}', '\u{0}']),
+ ('\u{a7d6}', ['\u{a7d7}', '\u{0}', '\u{0}']), ('\u{a7d8}', ['\u{a7d9}', '\u{0}', '\u{0}']),
+ ('\u{a7f5}', ['\u{a7f6}', '\u{0}', '\u{0}']), ('\u{ff21}', ['\u{ff41}', '\u{0}', '\u{0}']),
+ ('\u{ff22}', ['\u{ff42}', '\u{0}', '\u{0}']), ('\u{ff23}', ['\u{ff43}', '\u{0}', '\u{0}']),
+ ('\u{ff24}', ['\u{ff44}', '\u{0}', '\u{0}']), ('\u{ff25}', ['\u{ff45}', '\u{0}', '\u{0}']),
+ ('\u{ff26}', ['\u{ff46}', '\u{0}', '\u{0}']), ('\u{ff27}', ['\u{ff47}', '\u{0}', '\u{0}']),
+ ('\u{ff28}', ['\u{ff48}', '\u{0}', '\u{0}']), ('\u{ff29}', ['\u{ff49}', '\u{0}', '\u{0}']),
+ ('\u{ff2a}', ['\u{ff4a}', '\u{0}', '\u{0}']), ('\u{ff2b}', ['\u{ff4b}', '\u{0}', '\u{0}']),
+ ('\u{ff2c}', ['\u{ff4c}', '\u{0}', '\u{0}']), ('\u{ff2d}', ['\u{ff4d}', '\u{0}', '\u{0}']),
+ ('\u{ff2e}', ['\u{ff4e}', '\u{0}', '\u{0}']), ('\u{ff2f}', ['\u{ff4f}', '\u{0}', '\u{0}']),
+ ('\u{ff30}', ['\u{ff50}', '\u{0}', '\u{0}']), ('\u{ff31}', ['\u{ff51}', '\u{0}', '\u{0}']),
+ ('\u{ff32}', ['\u{ff52}', '\u{0}', '\u{0}']), ('\u{ff33}', ['\u{ff53}', '\u{0}', '\u{0}']),
+ ('\u{ff34}', ['\u{ff54}', '\u{0}', '\u{0}']), ('\u{ff35}', ['\u{ff55}', '\u{0}', '\u{0}']),
+ ('\u{ff36}', ['\u{ff56}', '\u{0}', '\u{0}']), ('\u{ff37}', ['\u{ff57}', '\u{0}', '\u{0}']),
+ ('\u{ff38}', ['\u{ff58}', '\u{0}', '\u{0}']), ('\u{ff39}', ['\u{ff59}', '\u{0}', '\u{0}']),
+ ('\u{ff3a}', ['\u{ff5a}', '\u{0}', '\u{0}']),
+ ('\u{10400}', ['\u{10428}', '\u{0}', '\u{0}']),
+ ('\u{10401}', ['\u{10429}', '\u{0}', '\u{0}']),
+ ('\u{10402}', ['\u{1042a}', '\u{0}', '\u{0}']),
+ ('\u{10403}', ['\u{1042b}', '\u{0}', '\u{0}']),
+ ('\u{10404}', ['\u{1042c}', '\u{0}', '\u{0}']),
+ ('\u{10405}', ['\u{1042d}', '\u{0}', '\u{0}']),
+ ('\u{10406}', ['\u{1042e}', '\u{0}', '\u{0}']),
+ ('\u{10407}', ['\u{1042f}', '\u{0}', '\u{0}']),
+ ('\u{10408}', ['\u{10430}', '\u{0}', '\u{0}']),
+ ('\u{10409}', ['\u{10431}', '\u{0}', '\u{0}']),
+ ('\u{1040a}', ['\u{10432}', '\u{0}', '\u{0}']),
+ ('\u{1040b}', ['\u{10433}', '\u{0}', '\u{0}']),
+ ('\u{1040c}', ['\u{10434}', '\u{0}', '\u{0}']),
+ ('\u{1040d}', ['\u{10435}', '\u{0}', '\u{0}']),
+ ('\u{1040e}', ['\u{10436}', '\u{0}', '\u{0}']),
+ ('\u{1040f}', ['\u{10437}', '\u{0}', '\u{0}']),
+ ('\u{10410}', ['\u{10438}', '\u{0}', '\u{0}']),
+ ('\u{10411}', ['\u{10439}', '\u{0}', '\u{0}']),
+ ('\u{10412}', ['\u{1043a}', '\u{0}', '\u{0}']),
+ ('\u{10413}', ['\u{1043b}', '\u{0}', '\u{0}']),
+ ('\u{10414}', ['\u{1043c}', '\u{0}', '\u{0}']),
+ ('\u{10415}', ['\u{1043d}', '\u{0}', '\u{0}']),
+ ('\u{10416}', ['\u{1043e}', '\u{0}', '\u{0}']),
+ ('\u{10417}', ['\u{1043f}', '\u{0}', '\u{0}']),
+ ('\u{10418}', ['\u{10440}', '\u{0}', '\u{0}']),
+ ('\u{10419}', ['\u{10441}', '\u{0}', '\u{0}']),
+ ('\u{1041a}', ['\u{10442}', '\u{0}', '\u{0}']),
+ ('\u{1041b}', ['\u{10443}', '\u{0}', '\u{0}']),
+ ('\u{1041c}', ['\u{10444}', '\u{0}', '\u{0}']),
+ ('\u{1041d}', ['\u{10445}', '\u{0}', '\u{0}']),
+ ('\u{1041e}', ['\u{10446}', '\u{0}', '\u{0}']),
+ ('\u{1041f}', ['\u{10447}', '\u{0}', '\u{0}']),
+ ('\u{10420}', ['\u{10448}', '\u{0}', '\u{0}']),
+ ('\u{10421}', ['\u{10449}', '\u{0}', '\u{0}']),
+ ('\u{10422}', ['\u{1044a}', '\u{0}', '\u{0}']),
+ ('\u{10423}', ['\u{1044b}', '\u{0}', '\u{0}']),
+ ('\u{10424}', ['\u{1044c}', '\u{0}', '\u{0}']),
+ ('\u{10425}', ['\u{1044d}', '\u{0}', '\u{0}']),
+ ('\u{10426}', ['\u{1044e}', '\u{0}', '\u{0}']),
+ ('\u{10427}', ['\u{1044f}', '\u{0}', '\u{0}']),
+ ('\u{104b0}', ['\u{104d8}', '\u{0}', '\u{0}']),
+ ('\u{104b1}', ['\u{104d9}', '\u{0}', '\u{0}']),
+ ('\u{104b2}', ['\u{104da}', '\u{0}', '\u{0}']),
+ ('\u{104b3}', ['\u{104db}', '\u{0}', '\u{0}']),
+ ('\u{104b4}', ['\u{104dc}', '\u{0}', '\u{0}']),
+ ('\u{104b5}', ['\u{104dd}', '\u{0}', '\u{0}']),
+ ('\u{104b6}', ['\u{104de}', '\u{0}', '\u{0}']),
+ ('\u{104b7}', ['\u{104df}', '\u{0}', '\u{0}']),
+ ('\u{104b8}', ['\u{104e0}', '\u{0}', '\u{0}']),
+ ('\u{104b9}', ['\u{104e1}', '\u{0}', '\u{0}']),
+ ('\u{104ba}', ['\u{104e2}', '\u{0}', '\u{0}']),
+ ('\u{104bb}', ['\u{104e3}', '\u{0}', '\u{0}']),
+ ('\u{104bc}', ['\u{104e4}', '\u{0}', '\u{0}']),
+ ('\u{104bd}', ['\u{104e5}', '\u{0}', '\u{0}']),
+ ('\u{104be}', ['\u{104e6}', '\u{0}', '\u{0}']),
+ ('\u{104bf}', ['\u{104e7}', '\u{0}', '\u{0}']),
+ ('\u{104c0}', ['\u{104e8}', '\u{0}', '\u{0}']),
+ ('\u{104c1}', ['\u{104e9}', '\u{0}', '\u{0}']),
+ ('\u{104c2}', ['\u{104ea}', '\u{0}', '\u{0}']),
+ ('\u{104c3}', ['\u{104eb}', '\u{0}', '\u{0}']),
+ ('\u{104c4}', ['\u{104ec}', '\u{0}', '\u{0}']),
+ ('\u{104c5}', ['\u{104ed}', '\u{0}', '\u{0}']),
+ ('\u{104c6}', ['\u{104ee}', '\u{0}', '\u{0}']),
+ ('\u{104c7}', ['\u{104ef}', '\u{0}', '\u{0}']),
+ ('\u{104c8}', ['\u{104f0}', '\u{0}', '\u{0}']),
+ ('\u{104c9}', ['\u{104f1}', '\u{0}', '\u{0}']),
+ ('\u{104ca}', ['\u{104f2}', '\u{0}', '\u{0}']),
+ ('\u{104cb}', ['\u{104f3}', '\u{0}', '\u{0}']),
+ ('\u{104cc}', ['\u{104f4}', '\u{0}', '\u{0}']),
+ ('\u{104cd}', ['\u{104f5}', '\u{0}', '\u{0}']),
+ ('\u{104ce}', ['\u{104f6}', '\u{0}', '\u{0}']),
+ ('\u{104cf}', ['\u{104f7}', '\u{0}', '\u{0}']),
+ ('\u{104d0}', ['\u{104f8}', '\u{0}', '\u{0}']),
+ ('\u{104d1}', ['\u{104f9}', '\u{0}', '\u{0}']),
+ ('\u{104d2}', ['\u{104fa}', '\u{0}', '\u{0}']),
+ ('\u{104d3}', ['\u{104fb}', '\u{0}', '\u{0}']),
+ ('\u{10570}', ['\u{10597}', '\u{0}', '\u{0}']),
+ ('\u{10571}', ['\u{10598}', '\u{0}', '\u{0}']),
+ ('\u{10572}', ['\u{10599}', '\u{0}', '\u{0}']),
+ ('\u{10573}', ['\u{1059a}', '\u{0}', '\u{0}']),
+ ('\u{10574}', ['\u{1059b}', '\u{0}', '\u{0}']),
+ ('\u{10575}', ['\u{1059c}', '\u{0}', '\u{0}']),
+ ('\u{10576}', ['\u{1059d}', '\u{0}', '\u{0}']),
+ ('\u{10577}', ['\u{1059e}', '\u{0}', '\u{0}']),
+ ('\u{10578}', ['\u{1059f}', '\u{0}', '\u{0}']),
+ ('\u{10579}', ['\u{105a0}', '\u{0}', '\u{0}']),
+ ('\u{1057a}', ['\u{105a1}', '\u{0}', '\u{0}']),
+ ('\u{1057c}', ['\u{105a3}', '\u{0}', '\u{0}']),
+ ('\u{1057d}', ['\u{105a4}', '\u{0}', '\u{0}']),
+ ('\u{1057e}', ['\u{105a5}', '\u{0}', '\u{0}']),
+ ('\u{1057f}', ['\u{105a6}', '\u{0}', '\u{0}']),
+ ('\u{10580}', ['\u{105a7}', '\u{0}', '\u{0}']),
+ ('\u{10581}', ['\u{105a8}', '\u{0}', '\u{0}']),
+ ('\u{10582}', ['\u{105a9}', '\u{0}', '\u{0}']),
+ ('\u{10583}', ['\u{105aa}', '\u{0}', '\u{0}']),
+ ('\u{10584}', ['\u{105ab}', '\u{0}', '\u{0}']),
+ ('\u{10585}', ['\u{105ac}', '\u{0}', '\u{0}']),
+ ('\u{10586}', ['\u{105ad}', '\u{0}', '\u{0}']),
+ ('\u{10587}', ['\u{105ae}', '\u{0}', '\u{0}']),
+ ('\u{10588}', ['\u{105af}', '\u{0}', '\u{0}']),
+ ('\u{10589}', ['\u{105b0}', '\u{0}', '\u{0}']),
+ ('\u{1058a}', ['\u{105b1}', '\u{0}', '\u{0}']),
+ ('\u{1058c}', ['\u{105b3}', '\u{0}', '\u{0}']),
+ ('\u{1058d}', ['\u{105b4}', '\u{0}', '\u{0}']),
+ ('\u{1058e}', ['\u{105b5}', '\u{0}', '\u{0}']),
+ ('\u{1058f}', ['\u{105b6}', '\u{0}', '\u{0}']),
+ ('\u{10590}', ['\u{105b7}', '\u{0}', '\u{0}']),
+ ('\u{10591}', ['\u{105b8}', '\u{0}', '\u{0}']),
+ ('\u{10592}', ['\u{105b9}', '\u{0}', '\u{0}']),
+ ('\u{10594}', ['\u{105bb}', '\u{0}', '\u{0}']),
+ ('\u{10595}', ['\u{105bc}', '\u{0}', '\u{0}']),
+ ('\u{10c80}', ['\u{10cc0}', '\u{0}', '\u{0}']),
+ ('\u{10c81}', ['\u{10cc1}', '\u{0}', '\u{0}']),
+ ('\u{10c82}', ['\u{10cc2}', '\u{0}', '\u{0}']),
+ ('\u{10c83}', ['\u{10cc3}', '\u{0}', '\u{0}']),
+ ('\u{10c84}', ['\u{10cc4}', '\u{0}', '\u{0}']),
+ ('\u{10c85}', ['\u{10cc5}', '\u{0}', '\u{0}']),
+ ('\u{10c86}', ['\u{10cc6}', '\u{0}', '\u{0}']),
+ ('\u{10c87}', ['\u{10cc7}', '\u{0}', '\u{0}']),
+ ('\u{10c88}', ['\u{10cc8}', '\u{0}', '\u{0}']),
+ ('\u{10c89}', ['\u{10cc9}', '\u{0}', '\u{0}']),
+ ('\u{10c8a}', ['\u{10cca}', '\u{0}', '\u{0}']),
+ ('\u{10c8b}', ['\u{10ccb}', '\u{0}', '\u{0}']),
+ ('\u{10c8c}', ['\u{10ccc}', '\u{0}', '\u{0}']),
+ ('\u{10c8d}', ['\u{10ccd}', '\u{0}', '\u{0}']),
+ ('\u{10c8e}', ['\u{10cce}', '\u{0}', '\u{0}']),
+ ('\u{10c8f}', ['\u{10ccf}', '\u{0}', '\u{0}']),
+ ('\u{10c90}', ['\u{10cd0}', '\u{0}', '\u{0}']),
+ ('\u{10c91}', ['\u{10cd1}', '\u{0}', '\u{0}']),
+ ('\u{10c92}', ['\u{10cd2}', '\u{0}', '\u{0}']),
+ ('\u{10c93}', ['\u{10cd3}', '\u{0}', '\u{0}']),
+ ('\u{10c94}', ['\u{10cd4}', '\u{0}', '\u{0}']),
+ ('\u{10c95}', ['\u{10cd5}', '\u{0}', '\u{0}']),
+ ('\u{10c96}', ['\u{10cd6}', '\u{0}', '\u{0}']),
+ ('\u{10c97}', ['\u{10cd7}', '\u{0}', '\u{0}']),
+ ('\u{10c98}', ['\u{10cd8}', '\u{0}', '\u{0}']),
+ ('\u{10c99}', ['\u{10cd9}', '\u{0}', '\u{0}']),
+ ('\u{10c9a}', ['\u{10cda}', '\u{0}', '\u{0}']),
+ ('\u{10c9b}', ['\u{10cdb}', '\u{0}', '\u{0}']),
+ ('\u{10c9c}', ['\u{10cdc}', '\u{0}', '\u{0}']),
+ ('\u{10c9d}', ['\u{10cdd}', '\u{0}', '\u{0}']),
+ ('\u{10c9e}', ['\u{10cde}', '\u{0}', '\u{0}']),
+ ('\u{10c9f}', ['\u{10cdf}', '\u{0}', '\u{0}']),
+ ('\u{10ca0}', ['\u{10ce0}', '\u{0}', '\u{0}']),
+ ('\u{10ca1}', ['\u{10ce1}', '\u{0}', '\u{0}']),
+ ('\u{10ca2}', ['\u{10ce2}', '\u{0}', '\u{0}']),
+ ('\u{10ca3}', ['\u{10ce3}', '\u{0}', '\u{0}']),
+ ('\u{10ca4}', ['\u{10ce4}', '\u{0}', '\u{0}']),
+ ('\u{10ca5}', ['\u{10ce5}', '\u{0}', '\u{0}']),
+ ('\u{10ca6}', ['\u{10ce6}', '\u{0}', '\u{0}']),
+ ('\u{10ca7}', ['\u{10ce7}', '\u{0}', '\u{0}']),
+ ('\u{10ca8}', ['\u{10ce8}', '\u{0}', '\u{0}']),
+ ('\u{10ca9}', ['\u{10ce9}', '\u{0}', '\u{0}']),
+ ('\u{10caa}', ['\u{10cea}', '\u{0}', '\u{0}']),
+ ('\u{10cab}', ['\u{10ceb}', '\u{0}', '\u{0}']),
+ ('\u{10cac}', ['\u{10cec}', '\u{0}', '\u{0}']),
+ ('\u{10cad}', ['\u{10ced}', '\u{0}', '\u{0}']),
+ ('\u{10cae}', ['\u{10cee}', '\u{0}', '\u{0}']),
+ ('\u{10caf}', ['\u{10cef}', '\u{0}', '\u{0}']),
+ ('\u{10cb0}', ['\u{10cf0}', '\u{0}', '\u{0}']),
+ ('\u{10cb1}', ['\u{10cf1}', '\u{0}', '\u{0}']),
+ ('\u{10cb2}', ['\u{10cf2}', '\u{0}', '\u{0}']),
+ ('\u{118a0}', ['\u{118c0}', '\u{0}', '\u{0}']),
+ ('\u{118a1}', ['\u{118c1}', '\u{0}', '\u{0}']),
+ ('\u{118a2}', ['\u{118c2}', '\u{0}', '\u{0}']),
+ ('\u{118a3}', ['\u{118c3}', '\u{0}', '\u{0}']),
+ ('\u{118a4}', ['\u{118c4}', '\u{0}', '\u{0}']),
+ ('\u{118a5}', ['\u{118c5}', '\u{0}', '\u{0}']),
+ ('\u{118a6}', ['\u{118c6}', '\u{0}', '\u{0}']),
+ ('\u{118a7}', ['\u{118c7}', '\u{0}', '\u{0}']),
+ ('\u{118a8}', ['\u{118c8}', '\u{0}', '\u{0}']),
+ ('\u{118a9}', ['\u{118c9}', '\u{0}', '\u{0}']),
+ ('\u{118aa}', ['\u{118ca}', '\u{0}', '\u{0}']),
+ ('\u{118ab}', ['\u{118cb}', '\u{0}', '\u{0}']),
+ ('\u{118ac}', ['\u{118cc}', '\u{0}', '\u{0}']),
+ ('\u{118ad}', ['\u{118cd}', '\u{0}', '\u{0}']),
+ ('\u{118ae}', ['\u{118ce}', '\u{0}', '\u{0}']),
+ ('\u{118af}', ['\u{118cf}', '\u{0}', '\u{0}']),
+ ('\u{118b0}', ['\u{118d0}', '\u{0}', '\u{0}']),
+ ('\u{118b1}', ['\u{118d1}', '\u{0}', '\u{0}']),
+ ('\u{118b2}', ['\u{118d2}', '\u{0}', '\u{0}']),
+ ('\u{118b3}', ['\u{118d3}', '\u{0}', '\u{0}']),
+ ('\u{118b4}', ['\u{118d4}', '\u{0}', '\u{0}']),
+ ('\u{118b5}', ['\u{118d5}', '\u{0}', '\u{0}']),
+ ('\u{118b6}', ['\u{118d6}', '\u{0}', '\u{0}']),
+ ('\u{118b7}', ['\u{118d7}', '\u{0}', '\u{0}']),
+ ('\u{118b8}', ['\u{118d8}', '\u{0}', '\u{0}']),
+ ('\u{118b9}', ['\u{118d9}', '\u{0}', '\u{0}']),
+ ('\u{118ba}', ['\u{118da}', '\u{0}', '\u{0}']),
+ ('\u{118bb}', ['\u{118db}', '\u{0}', '\u{0}']),
+ ('\u{118bc}', ['\u{118dc}', '\u{0}', '\u{0}']),
+ ('\u{118bd}', ['\u{118dd}', '\u{0}', '\u{0}']),
+ ('\u{118be}', ['\u{118de}', '\u{0}', '\u{0}']),
+ ('\u{118bf}', ['\u{118df}', '\u{0}', '\u{0}']),
+ ('\u{16e40}', ['\u{16e60}', '\u{0}', '\u{0}']),
+ ('\u{16e41}', ['\u{16e61}', '\u{0}', '\u{0}']),
+ ('\u{16e42}', ['\u{16e62}', '\u{0}', '\u{0}']),
+ ('\u{16e43}', ['\u{16e63}', '\u{0}', '\u{0}']),
+ ('\u{16e44}', ['\u{16e64}', '\u{0}', '\u{0}']),
+ ('\u{16e45}', ['\u{16e65}', '\u{0}', '\u{0}']),
+ ('\u{16e46}', ['\u{16e66}', '\u{0}', '\u{0}']),
+ ('\u{16e47}', ['\u{16e67}', '\u{0}', '\u{0}']),
+ ('\u{16e48}', ['\u{16e68}', '\u{0}', '\u{0}']),
+ ('\u{16e49}', ['\u{16e69}', '\u{0}', '\u{0}']),
+ ('\u{16e4a}', ['\u{16e6a}', '\u{0}', '\u{0}']),
+ ('\u{16e4b}', ['\u{16e6b}', '\u{0}', '\u{0}']),
+ ('\u{16e4c}', ['\u{16e6c}', '\u{0}', '\u{0}']),
+ ('\u{16e4d}', ['\u{16e6d}', '\u{0}', '\u{0}']),
+ ('\u{16e4e}', ['\u{16e6e}', '\u{0}', '\u{0}']),
+ ('\u{16e4f}', ['\u{16e6f}', '\u{0}', '\u{0}']),
+ ('\u{16e50}', ['\u{16e70}', '\u{0}', '\u{0}']),
+ ('\u{16e51}', ['\u{16e71}', '\u{0}', '\u{0}']),
+ ('\u{16e52}', ['\u{16e72}', '\u{0}', '\u{0}']),
+ ('\u{16e53}', ['\u{16e73}', '\u{0}', '\u{0}']),
+ ('\u{16e54}', ['\u{16e74}', '\u{0}', '\u{0}']),
+ ('\u{16e55}', ['\u{16e75}', '\u{0}', '\u{0}']),
+ ('\u{16e56}', ['\u{16e76}', '\u{0}', '\u{0}']),
+ ('\u{16e57}', ['\u{16e77}', '\u{0}', '\u{0}']),
+ ('\u{16e58}', ['\u{16e78}', '\u{0}', '\u{0}']),
+ ('\u{16e59}', ['\u{16e79}', '\u{0}', '\u{0}']),
+ ('\u{16e5a}', ['\u{16e7a}', '\u{0}', '\u{0}']),
+ ('\u{16e5b}', ['\u{16e7b}', '\u{0}', '\u{0}']),
+ ('\u{16e5c}', ['\u{16e7c}', '\u{0}', '\u{0}']),
+ ('\u{16e5d}', ['\u{16e7d}', '\u{0}', '\u{0}']),
+ ('\u{16e5e}', ['\u{16e7e}', '\u{0}', '\u{0}']),
+ ('\u{16e5f}', ['\u{16e7f}', '\u{0}', '\u{0}']),
+ ('\u{1e900}', ['\u{1e922}', '\u{0}', '\u{0}']),
+ ('\u{1e901}', ['\u{1e923}', '\u{0}', '\u{0}']),
+ ('\u{1e902}', ['\u{1e924}', '\u{0}', '\u{0}']),
+ ('\u{1e903}', ['\u{1e925}', '\u{0}', '\u{0}']),
+ ('\u{1e904}', ['\u{1e926}', '\u{0}', '\u{0}']),
+ ('\u{1e905}', ['\u{1e927}', '\u{0}', '\u{0}']),
+ ('\u{1e906}', ['\u{1e928}', '\u{0}', '\u{0}']),
+ ('\u{1e907}', ['\u{1e929}', '\u{0}', '\u{0}']),
+ ('\u{1e908}', ['\u{1e92a}', '\u{0}', '\u{0}']),
+ ('\u{1e909}', ['\u{1e92b}', '\u{0}', '\u{0}']),
+ ('\u{1e90a}', ['\u{1e92c}', '\u{0}', '\u{0}']),
+ ('\u{1e90b}', ['\u{1e92d}', '\u{0}', '\u{0}']),
+ ('\u{1e90c}', ['\u{1e92e}', '\u{0}', '\u{0}']),
+ ('\u{1e90d}', ['\u{1e92f}', '\u{0}', '\u{0}']),
+ ('\u{1e90e}', ['\u{1e930}', '\u{0}', '\u{0}']),
+ ('\u{1e90f}', ['\u{1e931}', '\u{0}', '\u{0}']),
+ ('\u{1e910}', ['\u{1e932}', '\u{0}', '\u{0}']),
+ ('\u{1e911}', ['\u{1e933}', '\u{0}', '\u{0}']),
+ ('\u{1e912}', ['\u{1e934}', '\u{0}', '\u{0}']),
+ ('\u{1e913}', ['\u{1e935}', '\u{0}', '\u{0}']),
+ ('\u{1e914}', ['\u{1e936}', '\u{0}', '\u{0}']),
+ ('\u{1e915}', ['\u{1e937}', '\u{0}', '\u{0}']),
+ ('\u{1e916}', ['\u{1e938}', '\u{0}', '\u{0}']),
+ ('\u{1e917}', ['\u{1e939}', '\u{0}', '\u{0}']),
+ ('\u{1e918}', ['\u{1e93a}', '\u{0}', '\u{0}']),
+ ('\u{1e919}', ['\u{1e93b}', '\u{0}', '\u{0}']),
+ ('\u{1e91a}', ['\u{1e93c}', '\u{0}', '\u{0}']),
+ ('\u{1e91b}', ['\u{1e93d}', '\u{0}', '\u{0}']),
+ ('\u{1e91c}', ['\u{1e93e}', '\u{0}', '\u{0}']),
+ ('\u{1e91d}', ['\u{1e93f}', '\u{0}', '\u{0}']),
+ ('\u{1e91e}', ['\u{1e940}', '\u{0}', '\u{0}']),
+ ('\u{1e91f}', ['\u{1e941}', '\u{0}', '\u{0}']),
+ ('\u{1e920}', ['\u{1e942}', '\u{0}', '\u{0}']),
+ ('\u{1e921}', ['\u{1e943}', '\u{0}', '\u{0}']),
+ ];
+
+ static UPPERCASE_TABLE: &[(char, [char; 3])] = &[
+ ('a', ['A', '\u{0}', '\u{0}']), ('b', ['B', '\u{0}', '\u{0}']),
+ ('c', ['C', '\u{0}', '\u{0}']), ('d', ['D', '\u{0}', '\u{0}']),
+ ('e', ['E', '\u{0}', '\u{0}']), ('f', ['F', '\u{0}', '\u{0}']),
+ ('g', ['G', '\u{0}', '\u{0}']), ('h', ['H', '\u{0}', '\u{0}']),
+ ('i', ['I', '\u{0}', '\u{0}']), ('j', ['J', '\u{0}', '\u{0}']),
+ ('k', ['K', '\u{0}', '\u{0}']), ('l', ['L', '\u{0}', '\u{0}']),
+ ('m', ['M', '\u{0}', '\u{0}']), ('n', ['N', '\u{0}', '\u{0}']),
+ ('o', ['O', '\u{0}', '\u{0}']), ('p', ['P', '\u{0}', '\u{0}']),
+ ('q', ['Q', '\u{0}', '\u{0}']), ('r', ['R', '\u{0}', '\u{0}']),
+ ('s', ['S', '\u{0}', '\u{0}']), ('t', ['T', '\u{0}', '\u{0}']),
+ ('u', ['U', '\u{0}', '\u{0}']), ('v', ['V', '\u{0}', '\u{0}']),
+ ('w', ['W', '\u{0}', '\u{0}']), ('x', ['X', '\u{0}', '\u{0}']),
+ ('y', ['Y', '\u{0}', '\u{0}']), ('z', ['Z', '\u{0}', '\u{0}']),
+ ('\u{b5}', ['\u{39c}', '\u{0}', '\u{0}']), ('\u{df}', ['S', 'S', '\u{0}']),
+ ('\u{e0}', ['\u{c0}', '\u{0}', '\u{0}']), ('\u{e1}', ['\u{c1}', '\u{0}', '\u{0}']),
+ ('\u{e2}', ['\u{c2}', '\u{0}', '\u{0}']), ('\u{e3}', ['\u{c3}', '\u{0}', '\u{0}']),
+ ('\u{e4}', ['\u{c4}', '\u{0}', '\u{0}']), ('\u{e5}', ['\u{c5}', '\u{0}', '\u{0}']),
+ ('\u{e6}', ['\u{c6}', '\u{0}', '\u{0}']), ('\u{e7}', ['\u{c7}', '\u{0}', '\u{0}']),
+ ('\u{e8}', ['\u{c8}', '\u{0}', '\u{0}']), ('\u{e9}', ['\u{c9}', '\u{0}', '\u{0}']),
+ ('\u{ea}', ['\u{ca}', '\u{0}', '\u{0}']), ('\u{eb}', ['\u{cb}', '\u{0}', '\u{0}']),
+ ('\u{ec}', ['\u{cc}', '\u{0}', '\u{0}']), ('\u{ed}', ['\u{cd}', '\u{0}', '\u{0}']),
+ ('\u{ee}', ['\u{ce}', '\u{0}', '\u{0}']), ('\u{ef}', ['\u{cf}', '\u{0}', '\u{0}']),
+ ('\u{f0}', ['\u{d0}', '\u{0}', '\u{0}']), ('\u{f1}', ['\u{d1}', '\u{0}', '\u{0}']),
+ ('\u{f2}', ['\u{d2}', '\u{0}', '\u{0}']), ('\u{f3}', ['\u{d3}', '\u{0}', '\u{0}']),
+ ('\u{f4}', ['\u{d4}', '\u{0}', '\u{0}']), ('\u{f5}', ['\u{d5}', '\u{0}', '\u{0}']),
+ ('\u{f6}', ['\u{d6}', '\u{0}', '\u{0}']), ('\u{f8}', ['\u{d8}', '\u{0}', '\u{0}']),
+ ('\u{f9}', ['\u{d9}', '\u{0}', '\u{0}']), ('\u{fa}', ['\u{da}', '\u{0}', '\u{0}']),
+ ('\u{fb}', ['\u{db}', '\u{0}', '\u{0}']), ('\u{fc}', ['\u{dc}', '\u{0}', '\u{0}']),
+ ('\u{fd}', ['\u{dd}', '\u{0}', '\u{0}']), ('\u{fe}', ['\u{de}', '\u{0}', '\u{0}']),
+ ('\u{ff}', ['\u{178}', '\u{0}', '\u{0}']), ('\u{101}', ['\u{100}', '\u{0}', '\u{0}']),
+ ('\u{103}', ['\u{102}', '\u{0}', '\u{0}']), ('\u{105}', ['\u{104}', '\u{0}', '\u{0}']),
+ ('\u{107}', ['\u{106}', '\u{0}', '\u{0}']), ('\u{109}', ['\u{108}', '\u{0}', '\u{0}']),
+ ('\u{10b}', ['\u{10a}', '\u{0}', '\u{0}']), ('\u{10d}', ['\u{10c}', '\u{0}', '\u{0}']),
+ ('\u{10f}', ['\u{10e}', '\u{0}', '\u{0}']), ('\u{111}', ['\u{110}', '\u{0}', '\u{0}']),
+ ('\u{113}', ['\u{112}', '\u{0}', '\u{0}']), ('\u{115}', ['\u{114}', '\u{0}', '\u{0}']),
+ ('\u{117}', ['\u{116}', '\u{0}', '\u{0}']), ('\u{119}', ['\u{118}', '\u{0}', '\u{0}']),
+ ('\u{11b}', ['\u{11a}', '\u{0}', '\u{0}']), ('\u{11d}', ['\u{11c}', '\u{0}', '\u{0}']),
+ ('\u{11f}', ['\u{11e}', '\u{0}', '\u{0}']), ('\u{121}', ['\u{120}', '\u{0}', '\u{0}']),
+ ('\u{123}', ['\u{122}', '\u{0}', '\u{0}']), ('\u{125}', ['\u{124}', '\u{0}', '\u{0}']),
+ ('\u{127}', ['\u{126}', '\u{0}', '\u{0}']), ('\u{129}', ['\u{128}', '\u{0}', '\u{0}']),
+ ('\u{12b}', ['\u{12a}', '\u{0}', '\u{0}']), ('\u{12d}', ['\u{12c}', '\u{0}', '\u{0}']),
+ ('\u{12f}', ['\u{12e}', '\u{0}', '\u{0}']), ('\u{131}', ['I', '\u{0}', '\u{0}']),
+ ('\u{133}', ['\u{132}', '\u{0}', '\u{0}']), ('\u{135}', ['\u{134}', '\u{0}', '\u{0}']),
+ ('\u{137}', ['\u{136}', '\u{0}', '\u{0}']), ('\u{13a}', ['\u{139}', '\u{0}', '\u{0}']),
+ ('\u{13c}', ['\u{13b}', '\u{0}', '\u{0}']), ('\u{13e}', ['\u{13d}', '\u{0}', '\u{0}']),
+ ('\u{140}', ['\u{13f}', '\u{0}', '\u{0}']), ('\u{142}', ['\u{141}', '\u{0}', '\u{0}']),
+ ('\u{144}', ['\u{143}', '\u{0}', '\u{0}']), ('\u{146}', ['\u{145}', '\u{0}', '\u{0}']),
+ ('\u{148}', ['\u{147}', '\u{0}', '\u{0}']), ('\u{149}', ['\u{2bc}', 'N', '\u{0}']),
+ ('\u{14b}', ['\u{14a}', '\u{0}', '\u{0}']), ('\u{14d}', ['\u{14c}', '\u{0}', '\u{0}']),
+ ('\u{14f}', ['\u{14e}', '\u{0}', '\u{0}']), ('\u{151}', ['\u{150}', '\u{0}', '\u{0}']),
+ ('\u{153}', ['\u{152}', '\u{0}', '\u{0}']), ('\u{155}', ['\u{154}', '\u{0}', '\u{0}']),
+ ('\u{157}', ['\u{156}', '\u{0}', '\u{0}']), ('\u{159}', ['\u{158}', '\u{0}', '\u{0}']),
+ ('\u{15b}', ['\u{15a}', '\u{0}', '\u{0}']), ('\u{15d}', ['\u{15c}', '\u{0}', '\u{0}']),
+ ('\u{15f}', ['\u{15e}', '\u{0}', '\u{0}']), ('\u{161}', ['\u{160}', '\u{0}', '\u{0}']),
+ ('\u{163}', ['\u{162}', '\u{0}', '\u{0}']), ('\u{165}', ['\u{164}', '\u{0}', '\u{0}']),
+ ('\u{167}', ['\u{166}', '\u{0}', '\u{0}']), ('\u{169}', ['\u{168}', '\u{0}', '\u{0}']),
+ ('\u{16b}', ['\u{16a}', '\u{0}', '\u{0}']), ('\u{16d}', ['\u{16c}', '\u{0}', '\u{0}']),
+ ('\u{16f}', ['\u{16e}', '\u{0}', '\u{0}']), ('\u{171}', ['\u{170}', '\u{0}', '\u{0}']),
+ ('\u{173}', ['\u{172}', '\u{0}', '\u{0}']), ('\u{175}', ['\u{174}', '\u{0}', '\u{0}']),
+ ('\u{177}', ['\u{176}', '\u{0}', '\u{0}']), ('\u{17a}', ['\u{179}', '\u{0}', '\u{0}']),
+ ('\u{17c}', ['\u{17b}', '\u{0}', '\u{0}']), ('\u{17e}', ['\u{17d}', '\u{0}', '\u{0}']),
+ ('\u{17f}', ['S', '\u{0}', '\u{0}']), ('\u{180}', ['\u{243}', '\u{0}', '\u{0}']),
+ ('\u{183}', ['\u{182}', '\u{0}', '\u{0}']), ('\u{185}', ['\u{184}', '\u{0}', '\u{0}']),
+ ('\u{188}', ['\u{187}', '\u{0}', '\u{0}']), ('\u{18c}', ['\u{18b}', '\u{0}', '\u{0}']),
+ ('\u{192}', ['\u{191}', '\u{0}', '\u{0}']), ('\u{195}', ['\u{1f6}', '\u{0}', '\u{0}']),
+ ('\u{199}', ['\u{198}', '\u{0}', '\u{0}']), ('\u{19a}', ['\u{23d}', '\u{0}', '\u{0}']),
+ ('\u{19e}', ['\u{220}', '\u{0}', '\u{0}']), ('\u{1a1}', ['\u{1a0}', '\u{0}', '\u{0}']),
+ ('\u{1a3}', ['\u{1a2}', '\u{0}', '\u{0}']), ('\u{1a5}', ['\u{1a4}', '\u{0}', '\u{0}']),
+ ('\u{1a8}', ['\u{1a7}', '\u{0}', '\u{0}']), ('\u{1ad}', ['\u{1ac}', '\u{0}', '\u{0}']),
+ ('\u{1b0}', ['\u{1af}', '\u{0}', '\u{0}']), ('\u{1b4}', ['\u{1b3}', '\u{0}', '\u{0}']),
+ ('\u{1b6}', ['\u{1b5}', '\u{0}', '\u{0}']), ('\u{1b9}', ['\u{1b8}', '\u{0}', '\u{0}']),
+ ('\u{1bd}', ['\u{1bc}', '\u{0}', '\u{0}']), ('\u{1bf}', ['\u{1f7}', '\u{0}', '\u{0}']),
+ ('\u{1c5}', ['\u{1c4}', '\u{0}', '\u{0}']), ('\u{1c6}', ['\u{1c4}', '\u{0}', '\u{0}']),
+ ('\u{1c8}', ['\u{1c7}', '\u{0}', '\u{0}']), ('\u{1c9}', ['\u{1c7}', '\u{0}', '\u{0}']),
+ ('\u{1cb}', ['\u{1ca}', '\u{0}', '\u{0}']), ('\u{1cc}', ['\u{1ca}', '\u{0}', '\u{0}']),
+ ('\u{1ce}', ['\u{1cd}', '\u{0}', '\u{0}']), ('\u{1d0}', ['\u{1cf}', '\u{0}', '\u{0}']),
+ ('\u{1d2}', ['\u{1d1}', '\u{0}', '\u{0}']), ('\u{1d4}', ['\u{1d3}', '\u{0}', '\u{0}']),
+ ('\u{1d6}', ['\u{1d5}', '\u{0}', '\u{0}']), ('\u{1d8}', ['\u{1d7}', '\u{0}', '\u{0}']),
+ ('\u{1da}', ['\u{1d9}', '\u{0}', '\u{0}']), ('\u{1dc}', ['\u{1db}', '\u{0}', '\u{0}']),
+ ('\u{1dd}', ['\u{18e}', '\u{0}', '\u{0}']), ('\u{1df}', ['\u{1de}', '\u{0}', '\u{0}']),
+ ('\u{1e1}', ['\u{1e0}', '\u{0}', '\u{0}']), ('\u{1e3}', ['\u{1e2}', '\u{0}', '\u{0}']),
+ ('\u{1e5}', ['\u{1e4}', '\u{0}', '\u{0}']), ('\u{1e7}', ['\u{1e6}', '\u{0}', '\u{0}']),
+ ('\u{1e9}', ['\u{1e8}', '\u{0}', '\u{0}']), ('\u{1eb}', ['\u{1ea}', '\u{0}', '\u{0}']),
+ ('\u{1ed}', ['\u{1ec}', '\u{0}', '\u{0}']), ('\u{1ef}', ['\u{1ee}', '\u{0}', '\u{0}']),
+ ('\u{1f0}', ['J', '\u{30c}', '\u{0}']), ('\u{1f2}', ['\u{1f1}', '\u{0}', '\u{0}']),
+ ('\u{1f3}', ['\u{1f1}', '\u{0}', '\u{0}']), ('\u{1f5}', ['\u{1f4}', '\u{0}', '\u{0}']),
+ ('\u{1f9}', ['\u{1f8}', '\u{0}', '\u{0}']), ('\u{1fb}', ['\u{1fa}', '\u{0}', '\u{0}']),
+ ('\u{1fd}', ['\u{1fc}', '\u{0}', '\u{0}']), ('\u{1ff}', ['\u{1fe}', '\u{0}', '\u{0}']),
+ ('\u{201}', ['\u{200}', '\u{0}', '\u{0}']), ('\u{203}', ['\u{202}', '\u{0}', '\u{0}']),
+ ('\u{205}', ['\u{204}', '\u{0}', '\u{0}']), ('\u{207}', ['\u{206}', '\u{0}', '\u{0}']),
+ ('\u{209}', ['\u{208}', '\u{0}', '\u{0}']), ('\u{20b}', ['\u{20a}', '\u{0}', '\u{0}']),
+ ('\u{20d}', ['\u{20c}', '\u{0}', '\u{0}']), ('\u{20f}', ['\u{20e}', '\u{0}', '\u{0}']),
+ ('\u{211}', ['\u{210}', '\u{0}', '\u{0}']), ('\u{213}', ['\u{212}', '\u{0}', '\u{0}']),
+ ('\u{215}', ['\u{214}', '\u{0}', '\u{0}']), ('\u{217}', ['\u{216}', '\u{0}', '\u{0}']),
+ ('\u{219}', ['\u{218}', '\u{0}', '\u{0}']), ('\u{21b}', ['\u{21a}', '\u{0}', '\u{0}']),
+ ('\u{21d}', ['\u{21c}', '\u{0}', '\u{0}']), ('\u{21f}', ['\u{21e}', '\u{0}', '\u{0}']),
+ ('\u{223}', ['\u{222}', '\u{0}', '\u{0}']), ('\u{225}', ['\u{224}', '\u{0}', '\u{0}']),
+ ('\u{227}', ['\u{226}', '\u{0}', '\u{0}']), ('\u{229}', ['\u{228}', '\u{0}', '\u{0}']),
+ ('\u{22b}', ['\u{22a}', '\u{0}', '\u{0}']), ('\u{22d}', ['\u{22c}', '\u{0}', '\u{0}']),
+ ('\u{22f}', ['\u{22e}', '\u{0}', '\u{0}']), ('\u{231}', ['\u{230}', '\u{0}', '\u{0}']),
+ ('\u{233}', ['\u{232}', '\u{0}', '\u{0}']), ('\u{23c}', ['\u{23b}', '\u{0}', '\u{0}']),
+ ('\u{23f}', ['\u{2c7e}', '\u{0}', '\u{0}']), ('\u{240}', ['\u{2c7f}', '\u{0}', '\u{0}']),
+ ('\u{242}', ['\u{241}', '\u{0}', '\u{0}']), ('\u{247}', ['\u{246}', '\u{0}', '\u{0}']),
+ ('\u{249}', ['\u{248}', '\u{0}', '\u{0}']), ('\u{24b}', ['\u{24a}', '\u{0}', '\u{0}']),
+ ('\u{24d}', ['\u{24c}', '\u{0}', '\u{0}']), ('\u{24f}', ['\u{24e}', '\u{0}', '\u{0}']),
+ ('\u{250}', ['\u{2c6f}', '\u{0}', '\u{0}']), ('\u{251}', ['\u{2c6d}', '\u{0}', '\u{0}']),
+ ('\u{252}', ['\u{2c70}', '\u{0}', '\u{0}']), ('\u{253}', ['\u{181}', '\u{0}', '\u{0}']),
+ ('\u{254}', ['\u{186}', '\u{0}', '\u{0}']), ('\u{256}', ['\u{189}', '\u{0}', '\u{0}']),
+ ('\u{257}', ['\u{18a}', '\u{0}', '\u{0}']), ('\u{259}', ['\u{18f}', '\u{0}', '\u{0}']),
+ ('\u{25b}', ['\u{190}', '\u{0}', '\u{0}']), ('\u{25c}', ['\u{a7ab}', '\u{0}', '\u{0}']),
+ ('\u{260}', ['\u{193}', '\u{0}', '\u{0}']), ('\u{261}', ['\u{a7ac}', '\u{0}', '\u{0}']),
+ ('\u{263}', ['\u{194}', '\u{0}', '\u{0}']), ('\u{265}', ['\u{a78d}', '\u{0}', '\u{0}']),
+ ('\u{266}', ['\u{a7aa}', '\u{0}', '\u{0}']), ('\u{268}', ['\u{197}', '\u{0}', '\u{0}']),
+ ('\u{269}', ['\u{196}', '\u{0}', '\u{0}']), ('\u{26a}', ['\u{a7ae}', '\u{0}', '\u{0}']),
+ ('\u{26b}', ['\u{2c62}', '\u{0}', '\u{0}']), ('\u{26c}', ['\u{a7ad}', '\u{0}', '\u{0}']),
+ ('\u{26f}', ['\u{19c}', '\u{0}', '\u{0}']), ('\u{271}', ['\u{2c6e}', '\u{0}', '\u{0}']),
+ ('\u{272}', ['\u{19d}', '\u{0}', '\u{0}']), ('\u{275}', ['\u{19f}', '\u{0}', '\u{0}']),
+ ('\u{27d}', ['\u{2c64}', '\u{0}', '\u{0}']), ('\u{280}', ['\u{1a6}', '\u{0}', '\u{0}']),
+ ('\u{282}', ['\u{a7c5}', '\u{0}', '\u{0}']), ('\u{283}', ['\u{1a9}', '\u{0}', '\u{0}']),
+ ('\u{287}', ['\u{a7b1}', '\u{0}', '\u{0}']), ('\u{288}', ['\u{1ae}', '\u{0}', '\u{0}']),
+ ('\u{289}', ['\u{244}', '\u{0}', '\u{0}']), ('\u{28a}', ['\u{1b1}', '\u{0}', '\u{0}']),
+ ('\u{28b}', ['\u{1b2}', '\u{0}', '\u{0}']), ('\u{28c}', ['\u{245}', '\u{0}', '\u{0}']),
+ ('\u{292}', ['\u{1b7}', '\u{0}', '\u{0}']), ('\u{29d}', ['\u{a7b2}', '\u{0}', '\u{0}']),
+ ('\u{29e}', ['\u{a7b0}', '\u{0}', '\u{0}']), ('\u{345}', ['\u{399}', '\u{0}', '\u{0}']),
+ ('\u{371}', ['\u{370}', '\u{0}', '\u{0}']), ('\u{373}', ['\u{372}', '\u{0}', '\u{0}']),
+ ('\u{377}', ['\u{376}', '\u{0}', '\u{0}']), ('\u{37b}', ['\u{3fd}', '\u{0}', '\u{0}']),
+ ('\u{37c}', ['\u{3fe}', '\u{0}', '\u{0}']), ('\u{37d}', ['\u{3ff}', '\u{0}', '\u{0}']),
+ ('\u{390}', ['\u{399}', '\u{308}', '\u{301}']), ('\u{3ac}', ['\u{386}', '\u{0}', '\u{0}']),
+ ('\u{3ad}', ['\u{388}', '\u{0}', '\u{0}']), ('\u{3ae}', ['\u{389}', '\u{0}', '\u{0}']),
+ ('\u{3af}', ['\u{38a}', '\u{0}', '\u{0}']), ('\u{3b0}', ['\u{3a5}', '\u{308}', '\u{301}']),
+ ('\u{3b1}', ['\u{391}', '\u{0}', '\u{0}']), ('\u{3b2}', ['\u{392}', '\u{0}', '\u{0}']),
+ ('\u{3b3}', ['\u{393}', '\u{0}', '\u{0}']), ('\u{3b4}', ['\u{394}', '\u{0}', '\u{0}']),
+ ('\u{3b5}', ['\u{395}', '\u{0}', '\u{0}']), ('\u{3b6}', ['\u{396}', '\u{0}', '\u{0}']),
+ ('\u{3b7}', ['\u{397}', '\u{0}', '\u{0}']), ('\u{3b8}', ['\u{398}', '\u{0}', '\u{0}']),
+ ('\u{3b9}', ['\u{399}', '\u{0}', '\u{0}']), ('\u{3ba}', ['\u{39a}', '\u{0}', '\u{0}']),
+ ('\u{3bb}', ['\u{39b}', '\u{0}', '\u{0}']), ('\u{3bc}', ['\u{39c}', '\u{0}', '\u{0}']),
+ ('\u{3bd}', ['\u{39d}', '\u{0}', '\u{0}']), ('\u{3be}', ['\u{39e}', '\u{0}', '\u{0}']),
+ ('\u{3bf}', ['\u{39f}', '\u{0}', '\u{0}']), ('\u{3c0}', ['\u{3a0}', '\u{0}', '\u{0}']),
+ ('\u{3c1}', ['\u{3a1}', '\u{0}', '\u{0}']), ('\u{3c2}', ['\u{3a3}', '\u{0}', '\u{0}']),
+ ('\u{3c3}', ['\u{3a3}', '\u{0}', '\u{0}']), ('\u{3c4}', ['\u{3a4}', '\u{0}', '\u{0}']),
+ ('\u{3c5}', ['\u{3a5}', '\u{0}', '\u{0}']), ('\u{3c6}', ['\u{3a6}', '\u{0}', '\u{0}']),
+ ('\u{3c7}', ['\u{3a7}', '\u{0}', '\u{0}']), ('\u{3c8}', ['\u{3a8}', '\u{0}', '\u{0}']),
+ ('\u{3c9}', ['\u{3a9}', '\u{0}', '\u{0}']), ('\u{3ca}', ['\u{3aa}', '\u{0}', '\u{0}']),
+ ('\u{3cb}', ['\u{3ab}', '\u{0}', '\u{0}']), ('\u{3cc}', ['\u{38c}', '\u{0}', '\u{0}']),
+ ('\u{3cd}', ['\u{38e}', '\u{0}', '\u{0}']), ('\u{3ce}', ['\u{38f}', '\u{0}', '\u{0}']),
+ ('\u{3d0}', ['\u{392}', '\u{0}', '\u{0}']), ('\u{3d1}', ['\u{398}', '\u{0}', '\u{0}']),
+ ('\u{3d5}', ['\u{3a6}', '\u{0}', '\u{0}']), ('\u{3d6}', ['\u{3a0}', '\u{0}', '\u{0}']),
+ ('\u{3d7}', ['\u{3cf}', '\u{0}', '\u{0}']), ('\u{3d9}', ['\u{3d8}', '\u{0}', '\u{0}']),
+ ('\u{3db}', ['\u{3da}', '\u{0}', '\u{0}']), ('\u{3dd}', ['\u{3dc}', '\u{0}', '\u{0}']),
+ ('\u{3df}', ['\u{3de}', '\u{0}', '\u{0}']), ('\u{3e1}', ['\u{3e0}', '\u{0}', '\u{0}']),
+ ('\u{3e3}', ['\u{3e2}', '\u{0}', '\u{0}']), ('\u{3e5}', ['\u{3e4}', '\u{0}', '\u{0}']),
+ ('\u{3e7}', ['\u{3e6}', '\u{0}', '\u{0}']), ('\u{3e9}', ['\u{3e8}', '\u{0}', '\u{0}']),
+ ('\u{3eb}', ['\u{3ea}', '\u{0}', '\u{0}']), ('\u{3ed}', ['\u{3ec}', '\u{0}', '\u{0}']),
+ ('\u{3ef}', ['\u{3ee}', '\u{0}', '\u{0}']), ('\u{3f0}', ['\u{39a}', '\u{0}', '\u{0}']),
+ ('\u{3f1}', ['\u{3a1}', '\u{0}', '\u{0}']), ('\u{3f2}', ['\u{3f9}', '\u{0}', '\u{0}']),
+ ('\u{3f3}', ['\u{37f}', '\u{0}', '\u{0}']), ('\u{3f5}', ['\u{395}', '\u{0}', '\u{0}']),
+ ('\u{3f8}', ['\u{3f7}', '\u{0}', '\u{0}']), ('\u{3fb}', ['\u{3fa}', '\u{0}', '\u{0}']),
+ ('\u{430}', ['\u{410}', '\u{0}', '\u{0}']), ('\u{431}', ['\u{411}', '\u{0}', '\u{0}']),
+ ('\u{432}', ['\u{412}', '\u{0}', '\u{0}']), ('\u{433}', ['\u{413}', '\u{0}', '\u{0}']),
+ ('\u{434}', ['\u{414}', '\u{0}', '\u{0}']), ('\u{435}', ['\u{415}', '\u{0}', '\u{0}']),
+ ('\u{436}', ['\u{416}', '\u{0}', '\u{0}']), ('\u{437}', ['\u{417}', '\u{0}', '\u{0}']),
+ ('\u{438}', ['\u{418}', '\u{0}', '\u{0}']), ('\u{439}', ['\u{419}', '\u{0}', '\u{0}']),
+ ('\u{43a}', ['\u{41a}', '\u{0}', '\u{0}']), ('\u{43b}', ['\u{41b}', '\u{0}', '\u{0}']),
+ ('\u{43c}', ['\u{41c}', '\u{0}', '\u{0}']), ('\u{43d}', ['\u{41d}', '\u{0}', '\u{0}']),
+ ('\u{43e}', ['\u{41e}', '\u{0}', '\u{0}']), ('\u{43f}', ['\u{41f}', '\u{0}', '\u{0}']),
+ ('\u{440}', ['\u{420}', '\u{0}', '\u{0}']), ('\u{441}', ['\u{421}', '\u{0}', '\u{0}']),
+ ('\u{442}', ['\u{422}', '\u{0}', '\u{0}']), ('\u{443}', ['\u{423}', '\u{0}', '\u{0}']),
+ ('\u{444}', ['\u{424}', '\u{0}', '\u{0}']), ('\u{445}', ['\u{425}', '\u{0}', '\u{0}']),
+ ('\u{446}', ['\u{426}', '\u{0}', '\u{0}']), ('\u{447}', ['\u{427}', '\u{0}', '\u{0}']),
+ ('\u{448}', ['\u{428}', '\u{0}', '\u{0}']), ('\u{449}', ['\u{429}', '\u{0}', '\u{0}']),
+ ('\u{44a}', ['\u{42a}', '\u{0}', '\u{0}']), ('\u{44b}', ['\u{42b}', '\u{0}', '\u{0}']),
+ ('\u{44c}', ['\u{42c}', '\u{0}', '\u{0}']), ('\u{44d}', ['\u{42d}', '\u{0}', '\u{0}']),
+ ('\u{44e}', ['\u{42e}', '\u{0}', '\u{0}']), ('\u{44f}', ['\u{42f}', '\u{0}', '\u{0}']),
+ ('\u{450}', ['\u{400}', '\u{0}', '\u{0}']), ('\u{451}', ['\u{401}', '\u{0}', '\u{0}']),
+ ('\u{452}', ['\u{402}', '\u{0}', '\u{0}']), ('\u{453}', ['\u{403}', '\u{0}', '\u{0}']),
+ ('\u{454}', ['\u{404}', '\u{0}', '\u{0}']), ('\u{455}', ['\u{405}', '\u{0}', '\u{0}']),
+ ('\u{456}', ['\u{406}', '\u{0}', '\u{0}']), ('\u{457}', ['\u{407}', '\u{0}', '\u{0}']),
+ ('\u{458}', ['\u{408}', '\u{0}', '\u{0}']), ('\u{459}', ['\u{409}', '\u{0}', '\u{0}']),
+ ('\u{45a}', ['\u{40a}', '\u{0}', '\u{0}']), ('\u{45b}', ['\u{40b}', '\u{0}', '\u{0}']),
+ ('\u{45c}', ['\u{40c}', '\u{0}', '\u{0}']), ('\u{45d}', ['\u{40d}', '\u{0}', '\u{0}']),
+ ('\u{45e}', ['\u{40e}', '\u{0}', '\u{0}']), ('\u{45f}', ['\u{40f}', '\u{0}', '\u{0}']),
+ ('\u{461}', ['\u{460}', '\u{0}', '\u{0}']), ('\u{463}', ['\u{462}', '\u{0}', '\u{0}']),
+ ('\u{465}', ['\u{464}', '\u{0}', '\u{0}']), ('\u{467}', ['\u{466}', '\u{0}', '\u{0}']),
+ ('\u{469}', ['\u{468}', '\u{0}', '\u{0}']), ('\u{46b}', ['\u{46a}', '\u{0}', '\u{0}']),
+ ('\u{46d}', ['\u{46c}', '\u{0}', '\u{0}']), ('\u{46f}', ['\u{46e}', '\u{0}', '\u{0}']),
+ ('\u{471}', ['\u{470}', '\u{0}', '\u{0}']), ('\u{473}', ['\u{472}', '\u{0}', '\u{0}']),
+ ('\u{475}', ['\u{474}', '\u{0}', '\u{0}']), ('\u{477}', ['\u{476}', '\u{0}', '\u{0}']),
+ ('\u{479}', ['\u{478}', '\u{0}', '\u{0}']), ('\u{47b}', ['\u{47a}', '\u{0}', '\u{0}']),
+ ('\u{47d}', ['\u{47c}', '\u{0}', '\u{0}']), ('\u{47f}', ['\u{47e}', '\u{0}', '\u{0}']),
+ ('\u{481}', ['\u{480}', '\u{0}', '\u{0}']), ('\u{48b}', ['\u{48a}', '\u{0}', '\u{0}']),
+ ('\u{48d}', ['\u{48c}', '\u{0}', '\u{0}']), ('\u{48f}', ['\u{48e}', '\u{0}', '\u{0}']),
+ ('\u{491}', ['\u{490}', '\u{0}', '\u{0}']), ('\u{493}', ['\u{492}', '\u{0}', '\u{0}']),
+ ('\u{495}', ['\u{494}', '\u{0}', '\u{0}']), ('\u{497}', ['\u{496}', '\u{0}', '\u{0}']),
+ ('\u{499}', ['\u{498}', '\u{0}', '\u{0}']), ('\u{49b}', ['\u{49a}', '\u{0}', '\u{0}']),
+ ('\u{49d}', ['\u{49c}', '\u{0}', '\u{0}']), ('\u{49f}', ['\u{49e}', '\u{0}', '\u{0}']),
+ ('\u{4a1}', ['\u{4a0}', '\u{0}', '\u{0}']), ('\u{4a3}', ['\u{4a2}', '\u{0}', '\u{0}']),
+ ('\u{4a5}', ['\u{4a4}', '\u{0}', '\u{0}']), ('\u{4a7}', ['\u{4a6}', '\u{0}', '\u{0}']),
+ ('\u{4a9}', ['\u{4a8}', '\u{0}', '\u{0}']), ('\u{4ab}', ['\u{4aa}', '\u{0}', '\u{0}']),
+ ('\u{4ad}', ['\u{4ac}', '\u{0}', '\u{0}']), ('\u{4af}', ['\u{4ae}', '\u{0}', '\u{0}']),
+ ('\u{4b1}', ['\u{4b0}', '\u{0}', '\u{0}']), ('\u{4b3}', ['\u{4b2}', '\u{0}', '\u{0}']),
+ ('\u{4b5}', ['\u{4b4}', '\u{0}', '\u{0}']), ('\u{4b7}', ['\u{4b6}', '\u{0}', '\u{0}']),
+ ('\u{4b9}', ['\u{4b8}', '\u{0}', '\u{0}']), ('\u{4bb}', ['\u{4ba}', '\u{0}', '\u{0}']),
+ ('\u{4bd}', ['\u{4bc}', '\u{0}', '\u{0}']), ('\u{4bf}', ['\u{4be}', '\u{0}', '\u{0}']),
+ ('\u{4c2}', ['\u{4c1}', '\u{0}', '\u{0}']), ('\u{4c4}', ['\u{4c3}', '\u{0}', '\u{0}']),
+ ('\u{4c6}', ['\u{4c5}', '\u{0}', '\u{0}']), ('\u{4c8}', ['\u{4c7}', '\u{0}', '\u{0}']),
+ ('\u{4ca}', ['\u{4c9}', '\u{0}', '\u{0}']), ('\u{4cc}', ['\u{4cb}', '\u{0}', '\u{0}']),
+ ('\u{4ce}', ['\u{4cd}', '\u{0}', '\u{0}']), ('\u{4cf}', ['\u{4c0}', '\u{0}', '\u{0}']),
+ ('\u{4d1}', ['\u{4d0}', '\u{0}', '\u{0}']), ('\u{4d3}', ['\u{4d2}', '\u{0}', '\u{0}']),
+ ('\u{4d5}', ['\u{4d4}', '\u{0}', '\u{0}']), ('\u{4d7}', ['\u{4d6}', '\u{0}', '\u{0}']),
+ ('\u{4d9}', ['\u{4d8}', '\u{0}', '\u{0}']), ('\u{4db}', ['\u{4da}', '\u{0}', '\u{0}']),
+ ('\u{4dd}', ['\u{4dc}', '\u{0}', '\u{0}']), ('\u{4df}', ['\u{4de}', '\u{0}', '\u{0}']),
+ ('\u{4e1}', ['\u{4e0}', '\u{0}', '\u{0}']), ('\u{4e3}', ['\u{4e2}', '\u{0}', '\u{0}']),
+ ('\u{4e5}', ['\u{4e4}', '\u{0}', '\u{0}']), ('\u{4e7}', ['\u{4e6}', '\u{0}', '\u{0}']),
+ ('\u{4e9}', ['\u{4e8}', '\u{0}', '\u{0}']), ('\u{4eb}', ['\u{4ea}', '\u{0}', '\u{0}']),
+ ('\u{4ed}', ['\u{4ec}', '\u{0}', '\u{0}']), ('\u{4ef}', ['\u{4ee}', '\u{0}', '\u{0}']),
+ ('\u{4f1}', ['\u{4f0}', '\u{0}', '\u{0}']), ('\u{4f3}', ['\u{4f2}', '\u{0}', '\u{0}']),
+ ('\u{4f5}', ['\u{4f4}', '\u{0}', '\u{0}']), ('\u{4f7}', ['\u{4f6}', '\u{0}', '\u{0}']),
+ ('\u{4f9}', ['\u{4f8}', '\u{0}', '\u{0}']), ('\u{4fb}', ['\u{4fa}', '\u{0}', '\u{0}']),
+ ('\u{4fd}', ['\u{4fc}', '\u{0}', '\u{0}']), ('\u{4ff}', ['\u{4fe}', '\u{0}', '\u{0}']),
+ ('\u{501}', ['\u{500}', '\u{0}', '\u{0}']), ('\u{503}', ['\u{502}', '\u{0}', '\u{0}']),
+ ('\u{505}', ['\u{504}', '\u{0}', '\u{0}']), ('\u{507}', ['\u{506}', '\u{0}', '\u{0}']),
+ ('\u{509}', ['\u{508}', '\u{0}', '\u{0}']), ('\u{50b}', ['\u{50a}', '\u{0}', '\u{0}']),
+ ('\u{50d}', ['\u{50c}', '\u{0}', '\u{0}']), ('\u{50f}', ['\u{50e}', '\u{0}', '\u{0}']),
+ ('\u{511}', ['\u{510}', '\u{0}', '\u{0}']), ('\u{513}', ['\u{512}', '\u{0}', '\u{0}']),
+ ('\u{515}', ['\u{514}', '\u{0}', '\u{0}']), ('\u{517}', ['\u{516}', '\u{0}', '\u{0}']),
+ ('\u{519}', ['\u{518}', '\u{0}', '\u{0}']), ('\u{51b}', ['\u{51a}', '\u{0}', '\u{0}']),
+ ('\u{51d}', ['\u{51c}', '\u{0}', '\u{0}']), ('\u{51f}', ['\u{51e}', '\u{0}', '\u{0}']),
+ ('\u{521}', ['\u{520}', '\u{0}', '\u{0}']), ('\u{523}', ['\u{522}', '\u{0}', '\u{0}']),
+ ('\u{525}', ['\u{524}', '\u{0}', '\u{0}']), ('\u{527}', ['\u{526}', '\u{0}', '\u{0}']),
+ ('\u{529}', ['\u{528}', '\u{0}', '\u{0}']), ('\u{52b}', ['\u{52a}', '\u{0}', '\u{0}']),
+ ('\u{52d}', ['\u{52c}', '\u{0}', '\u{0}']), ('\u{52f}', ['\u{52e}', '\u{0}', '\u{0}']),
+ ('\u{561}', ['\u{531}', '\u{0}', '\u{0}']), ('\u{562}', ['\u{532}', '\u{0}', '\u{0}']),
+ ('\u{563}', ['\u{533}', '\u{0}', '\u{0}']), ('\u{564}', ['\u{534}', '\u{0}', '\u{0}']),
+ ('\u{565}', ['\u{535}', '\u{0}', '\u{0}']), ('\u{566}', ['\u{536}', '\u{0}', '\u{0}']),
+ ('\u{567}', ['\u{537}', '\u{0}', '\u{0}']), ('\u{568}', ['\u{538}', '\u{0}', '\u{0}']),
+ ('\u{569}', ['\u{539}', '\u{0}', '\u{0}']), ('\u{56a}', ['\u{53a}', '\u{0}', '\u{0}']),
+ ('\u{56b}', ['\u{53b}', '\u{0}', '\u{0}']), ('\u{56c}', ['\u{53c}', '\u{0}', '\u{0}']),
+ ('\u{56d}', ['\u{53d}', '\u{0}', '\u{0}']), ('\u{56e}', ['\u{53e}', '\u{0}', '\u{0}']),
+ ('\u{56f}', ['\u{53f}', '\u{0}', '\u{0}']), ('\u{570}', ['\u{540}', '\u{0}', '\u{0}']),
+ ('\u{571}', ['\u{541}', '\u{0}', '\u{0}']), ('\u{572}', ['\u{542}', '\u{0}', '\u{0}']),
+ ('\u{573}', ['\u{543}', '\u{0}', '\u{0}']), ('\u{574}', ['\u{544}', '\u{0}', '\u{0}']),
+ ('\u{575}', ['\u{545}', '\u{0}', '\u{0}']), ('\u{576}', ['\u{546}', '\u{0}', '\u{0}']),
+ ('\u{577}', ['\u{547}', '\u{0}', '\u{0}']), ('\u{578}', ['\u{548}', '\u{0}', '\u{0}']),
+ ('\u{579}', ['\u{549}', '\u{0}', '\u{0}']), ('\u{57a}', ['\u{54a}', '\u{0}', '\u{0}']),
+ ('\u{57b}', ['\u{54b}', '\u{0}', '\u{0}']), ('\u{57c}', ['\u{54c}', '\u{0}', '\u{0}']),
+ ('\u{57d}', ['\u{54d}', '\u{0}', '\u{0}']), ('\u{57e}', ['\u{54e}', '\u{0}', '\u{0}']),
+ ('\u{57f}', ['\u{54f}', '\u{0}', '\u{0}']), ('\u{580}', ['\u{550}', '\u{0}', '\u{0}']),
+ ('\u{581}', ['\u{551}', '\u{0}', '\u{0}']), ('\u{582}', ['\u{552}', '\u{0}', '\u{0}']),
+ ('\u{583}', ['\u{553}', '\u{0}', '\u{0}']), ('\u{584}', ['\u{554}', '\u{0}', '\u{0}']),
+ ('\u{585}', ['\u{555}', '\u{0}', '\u{0}']), ('\u{586}', ['\u{556}', '\u{0}', '\u{0}']),
+ ('\u{587}', ['\u{535}', '\u{552}', '\u{0}']), ('\u{10d0}', ['\u{1c90}', '\u{0}', '\u{0}']),
+ ('\u{10d1}', ['\u{1c91}', '\u{0}', '\u{0}']), ('\u{10d2}', ['\u{1c92}', '\u{0}', '\u{0}']),
+ ('\u{10d3}', ['\u{1c93}', '\u{0}', '\u{0}']), ('\u{10d4}', ['\u{1c94}', '\u{0}', '\u{0}']),
+ ('\u{10d5}', ['\u{1c95}', '\u{0}', '\u{0}']), ('\u{10d6}', ['\u{1c96}', '\u{0}', '\u{0}']),
+ ('\u{10d7}', ['\u{1c97}', '\u{0}', '\u{0}']), ('\u{10d8}', ['\u{1c98}', '\u{0}', '\u{0}']),
+ ('\u{10d9}', ['\u{1c99}', '\u{0}', '\u{0}']), ('\u{10da}', ['\u{1c9a}', '\u{0}', '\u{0}']),
+ ('\u{10db}', ['\u{1c9b}', '\u{0}', '\u{0}']), ('\u{10dc}', ['\u{1c9c}', '\u{0}', '\u{0}']),
+ ('\u{10dd}', ['\u{1c9d}', '\u{0}', '\u{0}']), ('\u{10de}', ['\u{1c9e}', '\u{0}', '\u{0}']),
+ ('\u{10df}', ['\u{1c9f}', '\u{0}', '\u{0}']), ('\u{10e0}', ['\u{1ca0}', '\u{0}', '\u{0}']),
+ ('\u{10e1}', ['\u{1ca1}', '\u{0}', '\u{0}']), ('\u{10e2}', ['\u{1ca2}', '\u{0}', '\u{0}']),
+ ('\u{10e3}', ['\u{1ca3}', '\u{0}', '\u{0}']), ('\u{10e4}', ['\u{1ca4}', '\u{0}', '\u{0}']),
+ ('\u{10e5}', ['\u{1ca5}', '\u{0}', '\u{0}']), ('\u{10e6}', ['\u{1ca6}', '\u{0}', '\u{0}']),
+ ('\u{10e7}', ['\u{1ca7}', '\u{0}', '\u{0}']), ('\u{10e8}', ['\u{1ca8}', '\u{0}', '\u{0}']),
+ ('\u{10e9}', ['\u{1ca9}', '\u{0}', '\u{0}']), ('\u{10ea}', ['\u{1caa}', '\u{0}', '\u{0}']),
+ ('\u{10eb}', ['\u{1cab}', '\u{0}', '\u{0}']), ('\u{10ec}', ['\u{1cac}', '\u{0}', '\u{0}']),
+ ('\u{10ed}', ['\u{1cad}', '\u{0}', '\u{0}']), ('\u{10ee}', ['\u{1cae}', '\u{0}', '\u{0}']),
+ ('\u{10ef}', ['\u{1caf}', '\u{0}', '\u{0}']), ('\u{10f0}', ['\u{1cb0}', '\u{0}', '\u{0}']),
+ ('\u{10f1}', ['\u{1cb1}', '\u{0}', '\u{0}']), ('\u{10f2}', ['\u{1cb2}', '\u{0}', '\u{0}']),
+ ('\u{10f3}', ['\u{1cb3}', '\u{0}', '\u{0}']), ('\u{10f4}', ['\u{1cb4}', '\u{0}', '\u{0}']),
+ ('\u{10f5}', ['\u{1cb5}', '\u{0}', '\u{0}']), ('\u{10f6}', ['\u{1cb6}', '\u{0}', '\u{0}']),
+ ('\u{10f7}', ['\u{1cb7}', '\u{0}', '\u{0}']), ('\u{10f8}', ['\u{1cb8}', '\u{0}', '\u{0}']),
+ ('\u{10f9}', ['\u{1cb9}', '\u{0}', '\u{0}']), ('\u{10fa}', ['\u{1cba}', '\u{0}', '\u{0}']),
+ ('\u{10fd}', ['\u{1cbd}', '\u{0}', '\u{0}']), ('\u{10fe}', ['\u{1cbe}', '\u{0}', '\u{0}']),
+ ('\u{10ff}', ['\u{1cbf}', '\u{0}', '\u{0}']), ('\u{13f8}', ['\u{13f0}', '\u{0}', '\u{0}']),
+ ('\u{13f9}', ['\u{13f1}', '\u{0}', '\u{0}']), ('\u{13fa}', ['\u{13f2}', '\u{0}', '\u{0}']),
+ ('\u{13fb}', ['\u{13f3}', '\u{0}', '\u{0}']), ('\u{13fc}', ['\u{13f4}', '\u{0}', '\u{0}']),
+ ('\u{13fd}', ['\u{13f5}', '\u{0}', '\u{0}']), ('\u{1c80}', ['\u{412}', '\u{0}', '\u{0}']),
+ ('\u{1c81}', ['\u{414}', '\u{0}', '\u{0}']), ('\u{1c82}', ['\u{41e}', '\u{0}', '\u{0}']),
+ ('\u{1c83}', ['\u{421}', '\u{0}', '\u{0}']), ('\u{1c84}', ['\u{422}', '\u{0}', '\u{0}']),
+ ('\u{1c85}', ['\u{422}', '\u{0}', '\u{0}']), ('\u{1c86}', ['\u{42a}', '\u{0}', '\u{0}']),
+ ('\u{1c87}', ['\u{462}', '\u{0}', '\u{0}']), ('\u{1c88}', ['\u{a64a}', '\u{0}', '\u{0}']),
+ ('\u{1d79}', ['\u{a77d}', '\u{0}', '\u{0}']), ('\u{1d7d}', ['\u{2c63}', '\u{0}', '\u{0}']),
+ ('\u{1d8e}', ['\u{a7c6}', '\u{0}', '\u{0}']), ('\u{1e01}', ['\u{1e00}', '\u{0}', '\u{0}']),
+ ('\u{1e03}', ['\u{1e02}', '\u{0}', '\u{0}']), ('\u{1e05}', ['\u{1e04}', '\u{0}', '\u{0}']),
+ ('\u{1e07}', ['\u{1e06}', '\u{0}', '\u{0}']), ('\u{1e09}', ['\u{1e08}', '\u{0}', '\u{0}']),
+ ('\u{1e0b}', ['\u{1e0a}', '\u{0}', '\u{0}']), ('\u{1e0d}', ['\u{1e0c}', '\u{0}', '\u{0}']),
+ ('\u{1e0f}', ['\u{1e0e}', '\u{0}', '\u{0}']), ('\u{1e11}', ['\u{1e10}', '\u{0}', '\u{0}']),
+ ('\u{1e13}', ['\u{1e12}', '\u{0}', '\u{0}']), ('\u{1e15}', ['\u{1e14}', '\u{0}', '\u{0}']),
+ ('\u{1e17}', ['\u{1e16}', '\u{0}', '\u{0}']), ('\u{1e19}', ['\u{1e18}', '\u{0}', '\u{0}']),
+ ('\u{1e1b}', ['\u{1e1a}', '\u{0}', '\u{0}']), ('\u{1e1d}', ['\u{1e1c}', '\u{0}', '\u{0}']),
+ ('\u{1e1f}', ['\u{1e1e}', '\u{0}', '\u{0}']), ('\u{1e21}', ['\u{1e20}', '\u{0}', '\u{0}']),
+ ('\u{1e23}', ['\u{1e22}', '\u{0}', '\u{0}']), ('\u{1e25}', ['\u{1e24}', '\u{0}', '\u{0}']),
+ ('\u{1e27}', ['\u{1e26}', '\u{0}', '\u{0}']), ('\u{1e29}', ['\u{1e28}', '\u{0}', '\u{0}']),
+ ('\u{1e2b}', ['\u{1e2a}', '\u{0}', '\u{0}']), ('\u{1e2d}', ['\u{1e2c}', '\u{0}', '\u{0}']),
+ ('\u{1e2f}', ['\u{1e2e}', '\u{0}', '\u{0}']), ('\u{1e31}', ['\u{1e30}', '\u{0}', '\u{0}']),
+ ('\u{1e33}', ['\u{1e32}', '\u{0}', '\u{0}']), ('\u{1e35}', ['\u{1e34}', '\u{0}', '\u{0}']),
+ ('\u{1e37}', ['\u{1e36}', '\u{0}', '\u{0}']), ('\u{1e39}', ['\u{1e38}', '\u{0}', '\u{0}']),
+ ('\u{1e3b}', ['\u{1e3a}', '\u{0}', '\u{0}']), ('\u{1e3d}', ['\u{1e3c}', '\u{0}', '\u{0}']),
+ ('\u{1e3f}', ['\u{1e3e}', '\u{0}', '\u{0}']), ('\u{1e41}', ['\u{1e40}', '\u{0}', '\u{0}']),
+ ('\u{1e43}', ['\u{1e42}', '\u{0}', '\u{0}']), ('\u{1e45}', ['\u{1e44}', '\u{0}', '\u{0}']),
+ ('\u{1e47}', ['\u{1e46}', '\u{0}', '\u{0}']), ('\u{1e49}', ['\u{1e48}', '\u{0}', '\u{0}']),
+ ('\u{1e4b}', ['\u{1e4a}', '\u{0}', '\u{0}']), ('\u{1e4d}', ['\u{1e4c}', '\u{0}', '\u{0}']),
+ ('\u{1e4f}', ['\u{1e4e}', '\u{0}', '\u{0}']), ('\u{1e51}', ['\u{1e50}', '\u{0}', '\u{0}']),
+ ('\u{1e53}', ['\u{1e52}', '\u{0}', '\u{0}']), ('\u{1e55}', ['\u{1e54}', '\u{0}', '\u{0}']),
+ ('\u{1e57}', ['\u{1e56}', '\u{0}', '\u{0}']), ('\u{1e59}', ['\u{1e58}', '\u{0}', '\u{0}']),
+ ('\u{1e5b}', ['\u{1e5a}', '\u{0}', '\u{0}']), ('\u{1e5d}', ['\u{1e5c}', '\u{0}', '\u{0}']),
+ ('\u{1e5f}', ['\u{1e5e}', '\u{0}', '\u{0}']), ('\u{1e61}', ['\u{1e60}', '\u{0}', '\u{0}']),
+ ('\u{1e63}', ['\u{1e62}', '\u{0}', '\u{0}']), ('\u{1e65}', ['\u{1e64}', '\u{0}', '\u{0}']),
+ ('\u{1e67}', ['\u{1e66}', '\u{0}', '\u{0}']), ('\u{1e69}', ['\u{1e68}', '\u{0}', '\u{0}']),
+ ('\u{1e6b}', ['\u{1e6a}', '\u{0}', '\u{0}']), ('\u{1e6d}', ['\u{1e6c}', '\u{0}', '\u{0}']),
+ ('\u{1e6f}', ['\u{1e6e}', '\u{0}', '\u{0}']), ('\u{1e71}', ['\u{1e70}', '\u{0}', '\u{0}']),
+ ('\u{1e73}', ['\u{1e72}', '\u{0}', '\u{0}']), ('\u{1e75}', ['\u{1e74}', '\u{0}', '\u{0}']),
+ ('\u{1e77}', ['\u{1e76}', '\u{0}', '\u{0}']), ('\u{1e79}', ['\u{1e78}', '\u{0}', '\u{0}']),
+ ('\u{1e7b}', ['\u{1e7a}', '\u{0}', '\u{0}']), ('\u{1e7d}', ['\u{1e7c}', '\u{0}', '\u{0}']),
+ ('\u{1e7f}', ['\u{1e7e}', '\u{0}', '\u{0}']), ('\u{1e81}', ['\u{1e80}', '\u{0}', '\u{0}']),
+ ('\u{1e83}', ['\u{1e82}', '\u{0}', '\u{0}']), ('\u{1e85}', ['\u{1e84}', '\u{0}', '\u{0}']),
+ ('\u{1e87}', ['\u{1e86}', '\u{0}', '\u{0}']), ('\u{1e89}', ['\u{1e88}', '\u{0}', '\u{0}']),
+ ('\u{1e8b}', ['\u{1e8a}', '\u{0}', '\u{0}']), ('\u{1e8d}', ['\u{1e8c}', '\u{0}', '\u{0}']),
+ ('\u{1e8f}', ['\u{1e8e}', '\u{0}', '\u{0}']), ('\u{1e91}', ['\u{1e90}', '\u{0}', '\u{0}']),
+ ('\u{1e93}', ['\u{1e92}', '\u{0}', '\u{0}']), ('\u{1e95}', ['\u{1e94}', '\u{0}', '\u{0}']),
+ ('\u{1e96}', ['H', '\u{331}', '\u{0}']), ('\u{1e97}', ['T', '\u{308}', '\u{0}']),
+ ('\u{1e98}', ['W', '\u{30a}', '\u{0}']), ('\u{1e99}', ['Y', '\u{30a}', '\u{0}']),
+ ('\u{1e9a}', ['A', '\u{2be}', '\u{0}']), ('\u{1e9b}', ['\u{1e60}', '\u{0}', '\u{0}']),
+ ('\u{1ea1}', ['\u{1ea0}', '\u{0}', '\u{0}']), ('\u{1ea3}', ['\u{1ea2}', '\u{0}', '\u{0}']),
+ ('\u{1ea5}', ['\u{1ea4}', '\u{0}', '\u{0}']), ('\u{1ea7}', ['\u{1ea6}', '\u{0}', '\u{0}']),
+ ('\u{1ea9}', ['\u{1ea8}', '\u{0}', '\u{0}']), ('\u{1eab}', ['\u{1eaa}', '\u{0}', '\u{0}']),
+ ('\u{1ead}', ['\u{1eac}', '\u{0}', '\u{0}']), ('\u{1eaf}', ['\u{1eae}', '\u{0}', '\u{0}']),
+ ('\u{1eb1}', ['\u{1eb0}', '\u{0}', '\u{0}']), ('\u{1eb3}', ['\u{1eb2}', '\u{0}', '\u{0}']),
+ ('\u{1eb5}', ['\u{1eb4}', '\u{0}', '\u{0}']), ('\u{1eb7}', ['\u{1eb6}', '\u{0}', '\u{0}']),
+ ('\u{1eb9}', ['\u{1eb8}', '\u{0}', '\u{0}']), ('\u{1ebb}', ['\u{1eba}', '\u{0}', '\u{0}']),
+ ('\u{1ebd}', ['\u{1ebc}', '\u{0}', '\u{0}']), ('\u{1ebf}', ['\u{1ebe}', '\u{0}', '\u{0}']),
+ ('\u{1ec1}', ['\u{1ec0}', '\u{0}', '\u{0}']), ('\u{1ec3}', ['\u{1ec2}', '\u{0}', '\u{0}']),
+ ('\u{1ec5}', ['\u{1ec4}', '\u{0}', '\u{0}']), ('\u{1ec7}', ['\u{1ec6}', '\u{0}', '\u{0}']),
+ ('\u{1ec9}', ['\u{1ec8}', '\u{0}', '\u{0}']), ('\u{1ecb}', ['\u{1eca}', '\u{0}', '\u{0}']),
+ ('\u{1ecd}', ['\u{1ecc}', '\u{0}', '\u{0}']), ('\u{1ecf}', ['\u{1ece}', '\u{0}', '\u{0}']),
+ ('\u{1ed1}', ['\u{1ed0}', '\u{0}', '\u{0}']), ('\u{1ed3}', ['\u{1ed2}', '\u{0}', '\u{0}']),
+ ('\u{1ed5}', ['\u{1ed4}', '\u{0}', '\u{0}']), ('\u{1ed7}', ['\u{1ed6}', '\u{0}', '\u{0}']),
+ ('\u{1ed9}', ['\u{1ed8}', '\u{0}', '\u{0}']), ('\u{1edb}', ['\u{1eda}', '\u{0}', '\u{0}']),
+ ('\u{1edd}', ['\u{1edc}', '\u{0}', '\u{0}']), ('\u{1edf}', ['\u{1ede}', '\u{0}', '\u{0}']),
+ ('\u{1ee1}', ['\u{1ee0}', '\u{0}', '\u{0}']), ('\u{1ee3}', ['\u{1ee2}', '\u{0}', '\u{0}']),
+ ('\u{1ee5}', ['\u{1ee4}', '\u{0}', '\u{0}']), ('\u{1ee7}', ['\u{1ee6}', '\u{0}', '\u{0}']),
+ ('\u{1ee9}', ['\u{1ee8}', '\u{0}', '\u{0}']), ('\u{1eeb}', ['\u{1eea}', '\u{0}', '\u{0}']),
+ ('\u{1eed}', ['\u{1eec}', '\u{0}', '\u{0}']), ('\u{1eef}', ['\u{1eee}', '\u{0}', '\u{0}']),
+ ('\u{1ef1}', ['\u{1ef0}', '\u{0}', '\u{0}']), ('\u{1ef3}', ['\u{1ef2}', '\u{0}', '\u{0}']),
+ ('\u{1ef5}', ['\u{1ef4}', '\u{0}', '\u{0}']), ('\u{1ef7}', ['\u{1ef6}', '\u{0}', '\u{0}']),
+ ('\u{1ef9}', ['\u{1ef8}', '\u{0}', '\u{0}']), ('\u{1efb}', ['\u{1efa}', '\u{0}', '\u{0}']),
+ ('\u{1efd}', ['\u{1efc}', '\u{0}', '\u{0}']), ('\u{1eff}', ['\u{1efe}', '\u{0}', '\u{0}']),
+ ('\u{1f00}', ['\u{1f08}', '\u{0}', '\u{0}']), ('\u{1f01}', ['\u{1f09}', '\u{0}', '\u{0}']),
+ ('\u{1f02}', ['\u{1f0a}', '\u{0}', '\u{0}']), ('\u{1f03}', ['\u{1f0b}', '\u{0}', '\u{0}']),
+ ('\u{1f04}', ['\u{1f0c}', '\u{0}', '\u{0}']), ('\u{1f05}', ['\u{1f0d}', '\u{0}', '\u{0}']),
+ ('\u{1f06}', ['\u{1f0e}', '\u{0}', '\u{0}']), ('\u{1f07}', ['\u{1f0f}', '\u{0}', '\u{0}']),
+ ('\u{1f10}', ['\u{1f18}', '\u{0}', '\u{0}']), ('\u{1f11}', ['\u{1f19}', '\u{0}', '\u{0}']),
+ ('\u{1f12}', ['\u{1f1a}', '\u{0}', '\u{0}']), ('\u{1f13}', ['\u{1f1b}', '\u{0}', '\u{0}']),
+ ('\u{1f14}', ['\u{1f1c}', '\u{0}', '\u{0}']), ('\u{1f15}', ['\u{1f1d}', '\u{0}', '\u{0}']),
+ ('\u{1f20}', ['\u{1f28}', '\u{0}', '\u{0}']), ('\u{1f21}', ['\u{1f29}', '\u{0}', '\u{0}']),
+ ('\u{1f22}', ['\u{1f2a}', '\u{0}', '\u{0}']), ('\u{1f23}', ['\u{1f2b}', '\u{0}', '\u{0}']),
+ ('\u{1f24}', ['\u{1f2c}', '\u{0}', '\u{0}']), ('\u{1f25}', ['\u{1f2d}', '\u{0}', '\u{0}']),
+ ('\u{1f26}', ['\u{1f2e}', '\u{0}', '\u{0}']), ('\u{1f27}', ['\u{1f2f}', '\u{0}', '\u{0}']),
+ ('\u{1f30}', ['\u{1f38}', '\u{0}', '\u{0}']), ('\u{1f31}', ['\u{1f39}', '\u{0}', '\u{0}']),
+ ('\u{1f32}', ['\u{1f3a}', '\u{0}', '\u{0}']), ('\u{1f33}', ['\u{1f3b}', '\u{0}', '\u{0}']),
+ ('\u{1f34}', ['\u{1f3c}', '\u{0}', '\u{0}']), ('\u{1f35}', ['\u{1f3d}', '\u{0}', '\u{0}']),
+ ('\u{1f36}', ['\u{1f3e}', '\u{0}', '\u{0}']), ('\u{1f37}', ['\u{1f3f}', '\u{0}', '\u{0}']),
+ ('\u{1f40}', ['\u{1f48}', '\u{0}', '\u{0}']), ('\u{1f41}', ['\u{1f49}', '\u{0}', '\u{0}']),
+ ('\u{1f42}', ['\u{1f4a}', '\u{0}', '\u{0}']), ('\u{1f43}', ['\u{1f4b}', '\u{0}', '\u{0}']),
+ ('\u{1f44}', ['\u{1f4c}', '\u{0}', '\u{0}']), ('\u{1f45}', ['\u{1f4d}', '\u{0}', '\u{0}']),
+ ('\u{1f50}', ['\u{3a5}', '\u{313}', '\u{0}']), ('\u{1f51}', ['\u{1f59}', '\u{0}', '\u{0}']),
+ ('\u{1f52}', ['\u{3a5}', '\u{313}', '\u{300}']),
+ ('\u{1f53}', ['\u{1f5b}', '\u{0}', '\u{0}']),
+ ('\u{1f54}', ['\u{3a5}', '\u{313}', '\u{301}']),
+ ('\u{1f55}', ['\u{1f5d}', '\u{0}', '\u{0}']),
+ ('\u{1f56}', ['\u{3a5}', '\u{313}', '\u{342}']),
+ ('\u{1f57}', ['\u{1f5f}', '\u{0}', '\u{0}']), ('\u{1f60}', ['\u{1f68}', '\u{0}', '\u{0}']),
+ ('\u{1f61}', ['\u{1f69}', '\u{0}', '\u{0}']), ('\u{1f62}', ['\u{1f6a}', '\u{0}', '\u{0}']),
+ ('\u{1f63}', ['\u{1f6b}', '\u{0}', '\u{0}']), ('\u{1f64}', ['\u{1f6c}', '\u{0}', '\u{0}']),
+ ('\u{1f65}', ['\u{1f6d}', '\u{0}', '\u{0}']), ('\u{1f66}', ['\u{1f6e}', '\u{0}', '\u{0}']),
+ ('\u{1f67}', ['\u{1f6f}', '\u{0}', '\u{0}']), ('\u{1f70}', ['\u{1fba}', '\u{0}', '\u{0}']),
+ ('\u{1f71}', ['\u{1fbb}', '\u{0}', '\u{0}']), ('\u{1f72}', ['\u{1fc8}', '\u{0}', '\u{0}']),
+ ('\u{1f73}', ['\u{1fc9}', '\u{0}', '\u{0}']), ('\u{1f74}', ['\u{1fca}', '\u{0}', '\u{0}']),
+ ('\u{1f75}', ['\u{1fcb}', '\u{0}', '\u{0}']), ('\u{1f76}', ['\u{1fda}', '\u{0}', '\u{0}']),
+ ('\u{1f77}', ['\u{1fdb}', '\u{0}', '\u{0}']), ('\u{1f78}', ['\u{1ff8}', '\u{0}', '\u{0}']),
+ ('\u{1f79}', ['\u{1ff9}', '\u{0}', '\u{0}']), ('\u{1f7a}', ['\u{1fea}', '\u{0}', '\u{0}']),
+ ('\u{1f7b}', ['\u{1feb}', '\u{0}', '\u{0}']), ('\u{1f7c}', ['\u{1ffa}', '\u{0}', '\u{0}']),
+ ('\u{1f7d}', ['\u{1ffb}', '\u{0}', '\u{0}']),
+ ('\u{1f80}', ['\u{1f08}', '\u{399}', '\u{0}']),
+ ('\u{1f81}', ['\u{1f09}', '\u{399}', '\u{0}']),
+ ('\u{1f82}', ['\u{1f0a}', '\u{399}', '\u{0}']),
+ ('\u{1f83}', ['\u{1f0b}', '\u{399}', '\u{0}']),
+ ('\u{1f84}', ['\u{1f0c}', '\u{399}', '\u{0}']),
+ ('\u{1f85}', ['\u{1f0d}', '\u{399}', '\u{0}']),
+ ('\u{1f86}', ['\u{1f0e}', '\u{399}', '\u{0}']),
+ ('\u{1f87}', ['\u{1f0f}', '\u{399}', '\u{0}']),
+ ('\u{1f88}', ['\u{1f08}', '\u{399}', '\u{0}']),
+ ('\u{1f89}', ['\u{1f09}', '\u{399}', '\u{0}']),
+ ('\u{1f8a}', ['\u{1f0a}', '\u{399}', '\u{0}']),
+ ('\u{1f8b}', ['\u{1f0b}', '\u{399}', '\u{0}']),
+ ('\u{1f8c}', ['\u{1f0c}', '\u{399}', '\u{0}']),
+ ('\u{1f8d}', ['\u{1f0d}', '\u{399}', '\u{0}']),
+ ('\u{1f8e}', ['\u{1f0e}', '\u{399}', '\u{0}']),
+ ('\u{1f8f}', ['\u{1f0f}', '\u{399}', '\u{0}']),
+ ('\u{1f90}', ['\u{1f28}', '\u{399}', '\u{0}']),
+ ('\u{1f91}', ['\u{1f29}', '\u{399}', '\u{0}']),
+ ('\u{1f92}', ['\u{1f2a}', '\u{399}', '\u{0}']),
+ ('\u{1f93}', ['\u{1f2b}', '\u{399}', '\u{0}']),
+ ('\u{1f94}', ['\u{1f2c}', '\u{399}', '\u{0}']),
+ ('\u{1f95}', ['\u{1f2d}', '\u{399}', '\u{0}']),
+ ('\u{1f96}', ['\u{1f2e}', '\u{399}', '\u{0}']),
+ ('\u{1f97}', ['\u{1f2f}', '\u{399}', '\u{0}']),
+ ('\u{1f98}', ['\u{1f28}', '\u{399}', '\u{0}']),
+ ('\u{1f99}', ['\u{1f29}', '\u{399}', '\u{0}']),
+ ('\u{1f9a}', ['\u{1f2a}', '\u{399}', '\u{0}']),
+ ('\u{1f9b}', ['\u{1f2b}', '\u{399}', '\u{0}']),
+ ('\u{1f9c}', ['\u{1f2c}', '\u{399}', '\u{0}']),
+ ('\u{1f9d}', ['\u{1f2d}', '\u{399}', '\u{0}']),
+ ('\u{1f9e}', ['\u{1f2e}', '\u{399}', '\u{0}']),
+ ('\u{1f9f}', ['\u{1f2f}', '\u{399}', '\u{0}']),
+ ('\u{1fa0}', ['\u{1f68}', '\u{399}', '\u{0}']),
+ ('\u{1fa1}', ['\u{1f69}', '\u{399}', '\u{0}']),
+ ('\u{1fa2}', ['\u{1f6a}', '\u{399}', '\u{0}']),
+ ('\u{1fa3}', ['\u{1f6b}', '\u{399}', '\u{0}']),
+ ('\u{1fa4}', ['\u{1f6c}', '\u{399}', '\u{0}']),
+ ('\u{1fa5}', ['\u{1f6d}', '\u{399}', '\u{0}']),
+ ('\u{1fa6}', ['\u{1f6e}', '\u{399}', '\u{0}']),
+ ('\u{1fa7}', ['\u{1f6f}', '\u{399}', '\u{0}']),
+ ('\u{1fa8}', ['\u{1f68}', '\u{399}', '\u{0}']),
+ ('\u{1fa9}', ['\u{1f69}', '\u{399}', '\u{0}']),
+ ('\u{1faa}', ['\u{1f6a}', '\u{399}', '\u{0}']),
+ ('\u{1fab}', ['\u{1f6b}', '\u{399}', '\u{0}']),
+ ('\u{1fac}', ['\u{1f6c}', '\u{399}', '\u{0}']),
+ ('\u{1fad}', ['\u{1f6d}', '\u{399}', '\u{0}']),
+ ('\u{1fae}', ['\u{1f6e}', '\u{399}', '\u{0}']),
+ ('\u{1faf}', ['\u{1f6f}', '\u{399}', '\u{0}']),
+ ('\u{1fb0}', ['\u{1fb8}', '\u{0}', '\u{0}']), ('\u{1fb1}', ['\u{1fb9}', '\u{0}', '\u{0}']),
+ ('\u{1fb2}', ['\u{1fba}', '\u{399}', '\u{0}']),
+ ('\u{1fb3}', ['\u{391}', '\u{399}', '\u{0}']),
+ ('\u{1fb4}', ['\u{386}', '\u{399}', '\u{0}']),
+ ('\u{1fb6}', ['\u{391}', '\u{342}', '\u{0}']),
+ ('\u{1fb7}', ['\u{391}', '\u{342}', '\u{399}']),
+ ('\u{1fbc}', ['\u{391}', '\u{399}', '\u{0}']), ('\u{1fbe}', ['\u{399}', '\u{0}', '\u{0}']),
+ ('\u{1fc2}', ['\u{1fca}', '\u{399}', '\u{0}']),
+ ('\u{1fc3}', ['\u{397}', '\u{399}', '\u{0}']),
+ ('\u{1fc4}', ['\u{389}', '\u{399}', '\u{0}']),
+ ('\u{1fc6}', ['\u{397}', '\u{342}', '\u{0}']),
+ ('\u{1fc7}', ['\u{397}', '\u{342}', '\u{399}']),
+ ('\u{1fcc}', ['\u{397}', '\u{399}', '\u{0}']), ('\u{1fd0}', ['\u{1fd8}', '\u{0}', '\u{0}']),
+ ('\u{1fd1}', ['\u{1fd9}', '\u{0}', '\u{0}']),
+ ('\u{1fd2}', ['\u{399}', '\u{308}', '\u{300}']),
+ ('\u{1fd3}', ['\u{399}', '\u{308}', '\u{301}']),
+ ('\u{1fd6}', ['\u{399}', '\u{342}', '\u{0}']),
+ ('\u{1fd7}', ['\u{399}', '\u{308}', '\u{342}']),
+ ('\u{1fe0}', ['\u{1fe8}', '\u{0}', '\u{0}']), ('\u{1fe1}', ['\u{1fe9}', '\u{0}', '\u{0}']),
+ ('\u{1fe2}', ['\u{3a5}', '\u{308}', '\u{300}']),
+ ('\u{1fe3}', ['\u{3a5}', '\u{308}', '\u{301}']),
+ ('\u{1fe4}', ['\u{3a1}', '\u{313}', '\u{0}']), ('\u{1fe5}', ['\u{1fec}', '\u{0}', '\u{0}']),
+ ('\u{1fe6}', ['\u{3a5}', '\u{342}', '\u{0}']),
+ ('\u{1fe7}', ['\u{3a5}', '\u{308}', '\u{342}']),
+ ('\u{1ff2}', ['\u{1ffa}', '\u{399}', '\u{0}']),
+ ('\u{1ff3}', ['\u{3a9}', '\u{399}', '\u{0}']),
+ ('\u{1ff4}', ['\u{38f}', '\u{399}', '\u{0}']),
+ ('\u{1ff6}', ['\u{3a9}', '\u{342}', '\u{0}']),
+ ('\u{1ff7}', ['\u{3a9}', '\u{342}', '\u{399}']),
+ ('\u{1ffc}', ['\u{3a9}', '\u{399}', '\u{0}']), ('\u{214e}', ['\u{2132}', '\u{0}', '\u{0}']),
+ ('\u{2170}', ['\u{2160}', '\u{0}', '\u{0}']), ('\u{2171}', ['\u{2161}', '\u{0}', '\u{0}']),
+ ('\u{2172}', ['\u{2162}', '\u{0}', '\u{0}']), ('\u{2173}', ['\u{2163}', '\u{0}', '\u{0}']),
+ ('\u{2174}', ['\u{2164}', '\u{0}', '\u{0}']), ('\u{2175}', ['\u{2165}', '\u{0}', '\u{0}']),
+ ('\u{2176}', ['\u{2166}', '\u{0}', '\u{0}']), ('\u{2177}', ['\u{2167}', '\u{0}', '\u{0}']),
+ ('\u{2178}', ['\u{2168}', '\u{0}', '\u{0}']), ('\u{2179}', ['\u{2169}', '\u{0}', '\u{0}']),
+ ('\u{217a}', ['\u{216a}', '\u{0}', '\u{0}']), ('\u{217b}', ['\u{216b}', '\u{0}', '\u{0}']),
+ ('\u{217c}', ['\u{216c}', '\u{0}', '\u{0}']), ('\u{217d}', ['\u{216d}', '\u{0}', '\u{0}']),
+ ('\u{217e}', ['\u{216e}', '\u{0}', '\u{0}']), ('\u{217f}', ['\u{216f}', '\u{0}', '\u{0}']),
+ ('\u{2184}', ['\u{2183}', '\u{0}', '\u{0}']), ('\u{24d0}', ['\u{24b6}', '\u{0}', '\u{0}']),
+ ('\u{24d1}', ['\u{24b7}', '\u{0}', '\u{0}']), ('\u{24d2}', ['\u{24b8}', '\u{0}', '\u{0}']),
+ ('\u{24d3}', ['\u{24b9}', '\u{0}', '\u{0}']), ('\u{24d4}', ['\u{24ba}', '\u{0}', '\u{0}']),
+ ('\u{24d5}', ['\u{24bb}', '\u{0}', '\u{0}']), ('\u{24d6}', ['\u{24bc}', '\u{0}', '\u{0}']),
+ ('\u{24d7}', ['\u{24bd}', '\u{0}', '\u{0}']), ('\u{24d8}', ['\u{24be}', '\u{0}', '\u{0}']),
+ ('\u{24d9}', ['\u{24bf}', '\u{0}', '\u{0}']), ('\u{24da}', ['\u{24c0}', '\u{0}', '\u{0}']),
+ ('\u{24db}', ['\u{24c1}', '\u{0}', '\u{0}']), ('\u{24dc}', ['\u{24c2}', '\u{0}', '\u{0}']),
+ ('\u{24dd}', ['\u{24c3}', '\u{0}', '\u{0}']), ('\u{24de}', ['\u{24c4}', '\u{0}', '\u{0}']),
+ ('\u{24df}', ['\u{24c5}', '\u{0}', '\u{0}']), ('\u{24e0}', ['\u{24c6}', '\u{0}', '\u{0}']),
+ ('\u{24e1}', ['\u{24c7}', '\u{0}', '\u{0}']), ('\u{24e2}', ['\u{24c8}', '\u{0}', '\u{0}']),
+ ('\u{24e3}', ['\u{24c9}', '\u{0}', '\u{0}']), ('\u{24e4}', ['\u{24ca}', '\u{0}', '\u{0}']),
+ ('\u{24e5}', ['\u{24cb}', '\u{0}', '\u{0}']), ('\u{24e6}', ['\u{24cc}', '\u{0}', '\u{0}']),
+ ('\u{24e7}', ['\u{24cd}', '\u{0}', '\u{0}']), ('\u{24e8}', ['\u{24ce}', '\u{0}', '\u{0}']),
+ ('\u{24e9}', ['\u{24cf}', '\u{0}', '\u{0}']), ('\u{2c30}', ['\u{2c00}', '\u{0}', '\u{0}']),
+ ('\u{2c31}', ['\u{2c01}', '\u{0}', '\u{0}']), ('\u{2c32}', ['\u{2c02}', '\u{0}', '\u{0}']),
+ ('\u{2c33}', ['\u{2c03}', '\u{0}', '\u{0}']), ('\u{2c34}', ['\u{2c04}', '\u{0}', '\u{0}']),
+ ('\u{2c35}', ['\u{2c05}', '\u{0}', '\u{0}']), ('\u{2c36}', ['\u{2c06}', '\u{0}', '\u{0}']),
+ ('\u{2c37}', ['\u{2c07}', '\u{0}', '\u{0}']), ('\u{2c38}', ['\u{2c08}', '\u{0}', '\u{0}']),
+ ('\u{2c39}', ['\u{2c09}', '\u{0}', '\u{0}']), ('\u{2c3a}', ['\u{2c0a}', '\u{0}', '\u{0}']),
+ ('\u{2c3b}', ['\u{2c0b}', '\u{0}', '\u{0}']), ('\u{2c3c}', ['\u{2c0c}', '\u{0}', '\u{0}']),
+ ('\u{2c3d}', ['\u{2c0d}', '\u{0}', '\u{0}']), ('\u{2c3e}', ['\u{2c0e}', '\u{0}', '\u{0}']),
+ ('\u{2c3f}', ['\u{2c0f}', '\u{0}', '\u{0}']), ('\u{2c40}', ['\u{2c10}', '\u{0}', '\u{0}']),
+ ('\u{2c41}', ['\u{2c11}', '\u{0}', '\u{0}']), ('\u{2c42}', ['\u{2c12}', '\u{0}', '\u{0}']),
+ ('\u{2c43}', ['\u{2c13}', '\u{0}', '\u{0}']), ('\u{2c44}', ['\u{2c14}', '\u{0}', '\u{0}']),
+ ('\u{2c45}', ['\u{2c15}', '\u{0}', '\u{0}']), ('\u{2c46}', ['\u{2c16}', '\u{0}', '\u{0}']),
+ ('\u{2c47}', ['\u{2c17}', '\u{0}', '\u{0}']), ('\u{2c48}', ['\u{2c18}', '\u{0}', '\u{0}']),
+ ('\u{2c49}', ['\u{2c19}', '\u{0}', '\u{0}']), ('\u{2c4a}', ['\u{2c1a}', '\u{0}', '\u{0}']),
+ ('\u{2c4b}', ['\u{2c1b}', '\u{0}', '\u{0}']), ('\u{2c4c}', ['\u{2c1c}', '\u{0}', '\u{0}']),
+ ('\u{2c4d}', ['\u{2c1d}', '\u{0}', '\u{0}']), ('\u{2c4e}', ['\u{2c1e}', '\u{0}', '\u{0}']),
+ ('\u{2c4f}', ['\u{2c1f}', '\u{0}', '\u{0}']), ('\u{2c50}', ['\u{2c20}', '\u{0}', '\u{0}']),
+ ('\u{2c51}', ['\u{2c21}', '\u{0}', '\u{0}']), ('\u{2c52}', ['\u{2c22}', '\u{0}', '\u{0}']),
+ ('\u{2c53}', ['\u{2c23}', '\u{0}', '\u{0}']), ('\u{2c54}', ['\u{2c24}', '\u{0}', '\u{0}']),
+ ('\u{2c55}', ['\u{2c25}', '\u{0}', '\u{0}']), ('\u{2c56}', ['\u{2c26}', '\u{0}', '\u{0}']),
+ ('\u{2c57}', ['\u{2c27}', '\u{0}', '\u{0}']), ('\u{2c58}', ['\u{2c28}', '\u{0}', '\u{0}']),
+ ('\u{2c59}', ['\u{2c29}', '\u{0}', '\u{0}']), ('\u{2c5a}', ['\u{2c2a}', '\u{0}', '\u{0}']),
+ ('\u{2c5b}', ['\u{2c2b}', '\u{0}', '\u{0}']), ('\u{2c5c}', ['\u{2c2c}', '\u{0}', '\u{0}']),
+ ('\u{2c5d}', ['\u{2c2d}', '\u{0}', '\u{0}']), ('\u{2c5e}', ['\u{2c2e}', '\u{0}', '\u{0}']),
+ ('\u{2c5f}', ['\u{2c2f}', '\u{0}', '\u{0}']), ('\u{2c61}', ['\u{2c60}', '\u{0}', '\u{0}']),
+ ('\u{2c65}', ['\u{23a}', '\u{0}', '\u{0}']), ('\u{2c66}', ['\u{23e}', '\u{0}', '\u{0}']),
+ ('\u{2c68}', ['\u{2c67}', '\u{0}', '\u{0}']), ('\u{2c6a}', ['\u{2c69}', '\u{0}', '\u{0}']),
+ ('\u{2c6c}', ['\u{2c6b}', '\u{0}', '\u{0}']), ('\u{2c73}', ['\u{2c72}', '\u{0}', '\u{0}']),
+ ('\u{2c76}', ['\u{2c75}', '\u{0}', '\u{0}']), ('\u{2c81}', ['\u{2c80}', '\u{0}', '\u{0}']),
+ ('\u{2c83}', ['\u{2c82}', '\u{0}', '\u{0}']), ('\u{2c85}', ['\u{2c84}', '\u{0}', '\u{0}']),
+ ('\u{2c87}', ['\u{2c86}', '\u{0}', '\u{0}']), ('\u{2c89}', ['\u{2c88}', '\u{0}', '\u{0}']),
+ ('\u{2c8b}', ['\u{2c8a}', '\u{0}', '\u{0}']), ('\u{2c8d}', ['\u{2c8c}', '\u{0}', '\u{0}']),
+ ('\u{2c8f}', ['\u{2c8e}', '\u{0}', '\u{0}']), ('\u{2c91}', ['\u{2c90}', '\u{0}', '\u{0}']),
+ ('\u{2c93}', ['\u{2c92}', '\u{0}', '\u{0}']), ('\u{2c95}', ['\u{2c94}', '\u{0}', '\u{0}']),
+ ('\u{2c97}', ['\u{2c96}', '\u{0}', '\u{0}']), ('\u{2c99}', ['\u{2c98}', '\u{0}', '\u{0}']),
+ ('\u{2c9b}', ['\u{2c9a}', '\u{0}', '\u{0}']), ('\u{2c9d}', ['\u{2c9c}', '\u{0}', '\u{0}']),
+ ('\u{2c9f}', ['\u{2c9e}', '\u{0}', '\u{0}']), ('\u{2ca1}', ['\u{2ca0}', '\u{0}', '\u{0}']),
+ ('\u{2ca3}', ['\u{2ca2}', '\u{0}', '\u{0}']), ('\u{2ca5}', ['\u{2ca4}', '\u{0}', '\u{0}']),
+ ('\u{2ca7}', ['\u{2ca6}', '\u{0}', '\u{0}']), ('\u{2ca9}', ['\u{2ca8}', '\u{0}', '\u{0}']),
+ ('\u{2cab}', ['\u{2caa}', '\u{0}', '\u{0}']), ('\u{2cad}', ['\u{2cac}', '\u{0}', '\u{0}']),
+ ('\u{2caf}', ['\u{2cae}', '\u{0}', '\u{0}']), ('\u{2cb1}', ['\u{2cb0}', '\u{0}', '\u{0}']),
+ ('\u{2cb3}', ['\u{2cb2}', '\u{0}', '\u{0}']), ('\u{2cb5}', ['\u{2cb4}', '\u{0}', '\u{0}']),
+ ('\u{2cb7}', ['\u{2cb6}', '\u{0}', '\u{0}']), ('\u{2cb9}', ['\u{2cb8}', '\u{0}', '\u{0}']),
+ ('\u{2cbb}', ['\u{2cba}', '\u{0}', '\u{0}']), ('\u{2cbd}', ['\u{2cbc}', '\u{0}', '\u{0}']),
+ ('\u{2cbf}', ['\u{2cbe}', '\u{0}', '\u{0}']), ('\u{2cc1}', ['\u{2cc0}', '\u{0}', '\u{0}']),
+ ('\u{2cc3}', ['\u{2cc2}', '\u{0}', '\u{0}']), ('\u{2cc5}', ['\u{2cc4}', '\u{0}', '\u{0}']),
+ ('\u{2cc7}', ['\u{2cc6}', '\u{0}', '\u{0}']), ('\u{2cc9}', ['\u{2cc8}', '\u{0}', '\u{0}']),
+ ('\u{2ccb}', ['\u{2cca}', '\u{0}', '\u{0}']), ('\u{2ccd}', ['\u{2ccc}', '\u{0}', '\u{0}']),
+ ('\u{2ccf}', ['\u{2cce}', '\u{0}', '\u{0}']), ('\u{2cd1}', ['\u{2cd0}', '\u{0}', '\u{0}']),
+ ('\u{2cd3}', ['\u{2cd2}', '\u{0}', '\u{0}']), ('\u{2cd5}', ['\u{2cd4}', '\u{0}', '\u{0}']),
+ ('\u{2cd7}', ['\u{2cd6}', '\u{0}', '\u{0}']), ('\u{2cd9}', ['\u{2cd8}', '\u{0}', '\u{0}']),
+ ('\u{2cdb}', ['\u{2cda}', '\u{0}', '\u{0}']), ('\u{2cdd}', ['\u{2cdc}', '\u{0}', '\u{0}']),
+ ('\u{2cdf}', ['\u{2cde}', '\u{0}', '\u{0}']), ('\u{2ce1}', ['\u{2ce0}', '\u{0}', '\u{0}']),
+ ('\u{2ce3}', ['\u{2ce2}', '\u{0}', '\u{0}']), ('\u{2cec}', ['\u{2ceb}', '\u{0}', '\u{0}']),
+ ('\u{2cee}', ['\u{2ced}', '\u{0}', '\u{0}']), ('\u{2cf3}', ['\u{2cf2}', '\u{0}', '\u{0}']),
+ ('\u{2d00}', ['\u{10a0}', '\u{0}', '\u{0}']), ('\u{2d01}', ['\u{10a1}', '\u{0}', '\u{0}']),
+ ('\u{2d02}', ['\u{10a2}', '\u{0}', '\u{0}']), ('\u{2d03}', ['\u{10a3}', '\u{0}', '\u{0}']),
+ ('\u{2d04}', ['\u{10a4}', '\u{0}', '\u{0}']), ('\u{2d05}', ['\u{10a5}', '\u{0}', '\u{0}']),
+ ('\u{2d06}', ['\u{10a6}', '\u{0}', '\u{0}']), ('\u{2d07}', ['\u{10a7}', '\u{0}', '\u{0}']),
+ ('\u{2d08}', ['\u{10a8}', '\u{0}', '\u{0}']), ('\u{2d09}', ['\u{10a9}', '\u{0}', '\u{0}']),
+ ('\u{2d0a}', ['\u{10aa}', '\u{0}', '\u{0}']), ('\u{2d0b}', ['\u{10ab}', '\u{0}', '\u{0}']),
+ ('\u{2d0c}', ['\u{10ac}', '\u{0}', '\u{0}']), ('\u{2d0d}', ['\u{10ad}', '\u{0}', '\u{0}']),
+ ('\u{2d0e}', ['\u{10ae}', '\u{0}', '\u{0}']), ('\u{2d0f}', ['\u{10af}', '\u{0}', '\u{0}']),
+ ('\u{2d10}', ['\u{10b0}', '\u{0}', '\u{0}']), ('\u{2d11}', ['\u{10b1}', '\u{0}', '\u{0}']),
+ ('\u{2d12}', ['\u{10b2}', '\u{0}', '\u{0}']), ('\u{2d13}', ['\u{10b3}', '\u{0}', '\u{0}']),
+ ('\u{2d14}', ['\u{10b4}', '\u{0}', '\u{0}']), ('\u{2d15}', ['\u{10b5}', '\u{0}', '\u{0}']),
+ ('\u{2d16}', ['\u{10b6}', '\u{0}', '\u{0}']), ('\u{2d17}', ['\u{10b7}', '\u{0}', '\u{0}']),
+ ('\u{2d18}', ['\u{10b8}', '\u{0}', '\u{0}']), ('\u{2d19}', ['\u{10b9}', '\u{0}', '\u{0}']),
+ ('\u{2d1a}', ['\u{10ba}', '\u{0}', '\u{0}']), ('\u{2d1b}', ['\u{10bb}', '\u{0}', '\u{0}']),
+ ('\u{2d1c}', ['\u{10bc}', '\u{0}', '\u{0}']), ('\u{2d1d}', ['\u{10bd}', '\u{0}', '\u{0}']),
+ ('\u{2d1e}', ['\u{10be}', '\u{0}', '\u{0}']), ('\u{2d1f}', ['\u{10bf}', '\u{0}', '\u{0}']),
+ ('\u{2d20}', ['\u{10c0}', '\u{0}', '\u{0}']), ('\u{2d21}', ['\u{10c1}', '\u{0}', '\u{0}']),
+ ('\u{2d22}', ['\u{10c2}', '\u{0}', '\u{0}']), ('\u{2d23}', ['\u{10c3}', '\u{0}', '\u{0}']),
+ ('\u{2d24}', ['\u{10c4}', '\u{0}', '\u{0}']), ('\u{2d25}', ['\u{10c5}', '\u{0}', '\u{0}']),
+ ('\u{2d27}', ['\u{10c7}', '\u{0}', '\u{0}']), ('\u{2d2d}', ['\u{10cd}', '\u{0}', '\u{0}']),
+ ('\u{a641}', ['\u{a640}', '\u{0}', '\u{0}']), ('\u{a643}', ['\u{a642}', '\u{0}', '\u{0}']),
+ ('\u{a645}', ['\u{a644}', '\u{0}', '\u{0}']), ('\u{a647}', ['\u{a646}', '\u{0}', '\u{0}']),
+ ('\u{a649}', ['\u{a648}', '\u{0}', '\u{0}']), ('\u{a64b}', ['\u{a64a}', '\u{0}', '\u{0}']),
+ ('\u{a64d}', ['\u{a64c}', '\u{0}', '\u{0}']), ('\u{a64f}', ['\u{a64e}', '\u{0}', '\u{0}']),
+ ('\u{a651}', ['\u{a650}', '\u{0}', '\u{0}']), ('\u{a653}', ['\u{a652}', '\u{0}', '\u{0}']),
+ ('\u{a655}', ['\u{a654}', '\u{0}', '\u{0}']), ('\u{a657}', ['\u{a656}', '\u{0}', '\u{0}']),
+ ('\u{a659}', ['\u{a658}', '\u{0}', '\u{0}']), ('\u{a65b}', ['\u{a65a}', '\u{0}', '\u{0}']),
+ ('\u{a65d}', ['\u{a65c}', '\u{0}', '\u{0}']), ('\u{a65f}', ['\u{a65e}', '\u{0}', '\u{0}']),
+ ('\u{a661}', ['\u{a660}', '\u{0}', '\u{0}']), ('\u{a663}', ['\u{a662}', '\u{0}', '\u{0}']),
+ ('\u{a665}', ['\u{a664}', '\u{0}', '\u{0}']), ('\u{a667}', ['\u{a666}', '\u{0}', '\u{0}']),
+ ('\u{a669}', ['\u{a668}', '\u{0}', '\u{0}']), ('\u{a66b}', ['\u{a66a}', '\u{0}', '\u{0}']),
+ ('\u{a66d}', ['\u{a66c}', '\u{0}', '\u{0}']), ('\u{a681}', ['\u{a680}', '\u{0}', '\u{0}']),
+ ('\u{a683}', ['\u{a682}', '\u{0}', '\u{0}']), ('\u{a685}', ['\u{a684}', '\u{0}', '\u{0}']),
+ ('\u{a687}', ['\u{a686}', '\u{0}', '\u{0}']), ('\u{a689}', ['\u{a688}', '\u{0}', '\u{0}']),
+ ('\u{a68b}', ['\u{a68a}', '\u{0}', '\u{0}']), ('\u{a68d}', ['\u{a68c}', '\u{0}', '\u{0}']),
+ ('\u{a68f}', ['\u{a68e}', '\u{0}', '\u{0}']), ('\u{a691}', ['\u{a690}', '\u{0}', '\u{0}']),
+ ('\u{a693}', ['\u{a692}', '\u{0}', '\u{0}']), ('\u{a695}', ['\u{a694}', '\u{0}', '\u{0}']),
+ ('\u{a697}', ['\u{a696}', '\u{0}', '\u{0}']), ('\u{a699}', ['\u{a698}', '\u{0}', '\u{0}']),
+ ('\u{a69b}', ['\u{a69a}', '\u{0}', '\u{0}']), ('\u{a723}', ['\u{a722}', '\u{0}', '\u{0}']),
+ ('\u{a725}', ['\u{a724}', '\u{0}', '\u{0}']), ('\u{a727}', ['\u{a726}', '\u{0}', '\u{0}']),
+ ('\u{a729}', ['\u{a728}', '\u{0}', '\u{0}']), ('\u{a72b}', ['\u{a72a}', '\u{0}', '\u{0}']),
+ ('\u{a72d}', ['\u{a72c}', '\u{0}', '\u{0}']), ('\u{a72f}', ['\u{a72e}', '\u{0}', '\u{0}']),
+ ('\u{a733}', ['\u{a732}', '\u{0}', '\u{0}']), ('\u{a735}', ['\u{a734}', '\u{0}', '\u{0}']),
+ ('\u{a737}', ['\u{a736}', '\u{0}', '\u{0}']), ('\u{a739}', ['\u{a738}', '\u{0}', '\u{0}']),
+ ('\u{a73b}', ['\u{a73a}', '\u{0}', '\u{0}']), ('\u{a73d}', ['\u{a73c}', '\u{0}', '\u{0}']),
+ ('\u{a73f}', ['\u{a73e}', '\u{0}', '\u{0}']), ('\u{a741}', ['\u{a740}', '\u{0}', '\u{0}']),
+ ('\u{a743}', ['\u{a742}', '\u{0}', '\u{0}']), ('\u{a745}', ['\u{a744}', '\u{0}', '\u{0}']),
+ ('\u{a747}', ['\u{a746}', '\u{0}', '\u{0}']), ('\u{a749}', ['\u{a748}', '\u{0}', '\u{0}']),
+ ('\u{a74b}', ['\u{a74a}', '\u{0}', '\u{0}']), ('\u{a74d}', ['\u{a74c}', '\u{0}', '\u{0}']),
+ ('\u{a74f}', ['\u{a74e}', '\u{0}', '\u{0}']), ('\u{a751}', ['\u{a750}', '\u{0}', '\u{0}']),
+ ('\u{a753}', ['\u{a752}', '\u{0}', '\u{0}']), ('\u{a755}', ['\u{a754}', '\u{0}', '\u{0}']),
+ ('\u{a757}', ['\u{a756}', '\u{0}', '\u{0}']), ('\u{a759}', ['\u{a758}', '\u{0}', '\u{0}']),
+ ('\u{a75b}', ['\u{a75a}', '\u{0}', '\u{0}']), ('\u{a75d}', ['\u{a75c}', '\u{0}', '\u{0}']),
+ ('\u{a75f}', ['\u{a75e}', '\u{0}', '\u{0}']), ('\u{a761}', ['\u{a760}', '\u{0}', '\u{0}']),
+ ('\u{a763}', ['\u{a762}', '\u{0}', '\u{0}']), ('\u{a765}', ['\u{a764}', '\u{0}', '\u{0}']),
+ ('\u{a767}', ['\u{a766}', '\u{0}', '\u{0}']), ('\u{a769}', ['\u{a768}', '\u{0}', '\u{0}']),
+ ('\u{a76b}', ['\u{a76a}', '\u{0}', '\u{0}']), ('\u{a76d}', ['\u{a76c}', '\u{0}', '\u{0}']),
+ ('\u{a76f}', ['\u{a76e}', '\u{0}', '\u{0}']), ('\u{a77a}', ['\u{a779}', '\u{0}', '\u{0}']),
+ ('\u{a77c}', ['\u{a77b}', '\u{0}', '\u{0}']), ('\u{a77f}', ['\u{a77e}', '\u{0}', '\u{0}']),
+ ('\u{a781}', ['\u{a780}', '\u{0}', '\u{0}']), ('\u{a783}', ['\u{a782}', '\u{0}', '\u{0}']),
+ ('\u{a785}', ['\u{a784}', '\u{0}', '\u{0}']), ('\u{a787}', ['\u{a786}', '\u{0}', '\u{0}']),
+ ('\u{a78c}', ['\u{a78b}', '\u{0}', '\u{0}']), ('\u{a791}', ['\u{a790}', '\u{0}', '\u{0}']),
+ ('\u{a793}', ['\u{a792}', '\u{0}', '\u{0}']), ('\u{a794}', ['\u{a7c4}', '\u{0}', '\u{0}']),
+ ('\u{a797}', ['\u{a796}', '\u{0}', '\u{0}']), ('\u{a799}', ['\u{a798}', '\u{0}', '\u{0}']),
+ ('\u{a79b}', ['\u{a79a}', '\u{0}', '\u{0}']), ('\u{a79d}', ['\u{a79c}', '\u{0}', '\u{0}']),
+ ('\u{a79f}', ['\u{a79e}', '\u{0}', '\u{0}']), ('\u{a7a1}', ['\u{a7a0}', '\u{0}', '\u{0}']),
+ ('\u{a7a3}', ['\u{a7a2}', '\u{0}', '\u{0}']), ('\u{a7a5}', ['\u{a7a4}', '\u{0}', '\u{0}']),
+ ('\u{a7a7}', ['\u{a7a6}', '\u{0}', '\u{0}']), ('\u{a7a9}', ['\u{a7a8}', '\u{0}', '\u{0}']),
+ ('\u{a7b5}', ['\u{a7b4}', '\u{0}', '\u{0}']), ('\u{a7b7}', ['\u{a7b6}', '\u{0}', '\u{0}']),
+ ('\u{a7b9}', ['\u{a7b8}', '\u{0}', '\u{0}']), ('\u{a7bb}', ['\u{a7ba}', '\u{0}', '\u{0}']),
+ ('\u{a7bd}', ['\u{a7bc}', '\u{0}', '\u{0}']), ('\u{a7bf}', ['\u{a7be}', '\u{0}', '\u{0}']),
+ ('\u{a7c1}', ['\u{a7c0}', '\u{0}', '\u{0}']), ('\u{a7c3}', ['\u{a7c2}', '\u{0}', '\u{0}']),
+ ('\u{a7c8}', ['\u{a7c7}', '\u{0}', '\u{0}']), ('\u{a7ca}', ['\u{a7c9}', '\u{0}', '\u{0}']),
+ ('\u{a7d1}', ['\u{a7d0}', '\u{0}', '\u{0}']), ('\u{a7d7}', ['\u{a7d6}', '\u{0}', '\u{0}']),
+ ('\u{a7d9}', ['\u{a7d8}', '\u{0}', '\u{0}']), ('\u{a7f6}', ['\u{a7f5}', '\u{0}', '\u{0}']),
+ ('\u{ab53}', ['\u{a7b3}', '\u{0}', '\u{0}']), ('\u{ab70}', ['\u{13a0}', '\u{0}', '\u{0}']),
+ ('\u{ab71}', ['\u{13a1}', '\u{0}', '\u{0}']), ('\u{ab72}', ['\u{13a2}', '\u{0}', '\u{0}']),
+ ('\u{ab73}', ['\u{13a3}', '\u{0}', '\u{0}']), ('\u{ab74}', ['\u{13a4}', '\u{0}', '\u{0}']),
+ ('\u{ab75}', ['\u{13a5}', '\u{0}', '\u{0}']), ('\u{ab76}', ['\u{13a6}', '\u{0}', '\u{0}']),
+ ('\u{ab77}', ['\u{13a7}', '\u{0}', '\u{0}']), ('\u{ab78}', ['\u{13a8}', '\u{0}', '\u{0}']),
+ ('\u{ab79}', ['\u{13a9}', '\u{0}', '\u{0}']), ('\u{ab7a}', ['\u{13aa}', '\u{0}', '\u{0}']),
+ ('\u{ab7b}', ['\u{13ab}', '\u{0}', '\u{0}']), ('\u{ab7c}', ['\u{13ac}', '\u{0}', '\u{0}']),
+ ('\u{ab7d}', ['\u{13ad}', '\u{0}', '\u{0}']), ('\u{ab7e}', ['\u{13ae}', '\u{0}', '\u{0}']),
+ ('\u{ab7f}', ['\u{13af}', '\u{0}', '\u{0}']), ('\u{ab80}', ['\u{13b0}', '\u{0}', '\u{0}']),
+ ('\u{ab81}', ['\u{13b1}', '\u{0}', '\u{0}']), ('\u{ab82}', ['\u{13b2}', '\u{0}', '\u{0}']),
+ ('\u{ab83}', ['\u{13b3}', '\u{0}', '\u{0}']), ('\u{ab84}', ['\u{13b4}', '\u{0}', '\u{0}']),
+ ('\u{ab85}', ['\u{13b5}', '\u{0}', '\u{0}']), ('\u{ab86}', ['\u{13b6}', '\u{0}', '\u{0}']),
+ ('\u{ab87}', ['\u{13b7}', '\u{0}', '\u{0}']), ('\u{ab88}', ['\u{13b8}', '\u{0}', '\u{0}']),
+ ('\u{ab89}', ['\u{13b9}', '\u{0}', '\u{0}']), ('\u{ab8a}', ['\u{13ba}', '\u{0}', '\u{0}']),
+ ('\u{ab8b}', ['\u{13bb}', '\u{0}', '\u{0}']), ('\u{ab8c}', ['\u{13bc}', '\u{0}', '\u{0}']),
+ ('\u{ab8d}', ['\u{13bd}', '\u{0}', '\u{0}']), ('\u{ab8e}', ['\u{13be}', '\u{0}', '\u{0}']),
+ ('\u{ab8f}', ['\u{13bf}', '\u{0}', '\u{0}']), ('\u{ab90}', ['\u{13c0}', '\u{0}', '\u{0}']),
+ ('\u{ab91}', ['\u{13c1}', '\u{0}', '\u{0}']), ('\u{ab92}', ['\u{13c2}', '\u{0}', '\u{0}']),
+ ('\u{ab93}', ['\u{13c3}', '\u{0}', '\u{0}']), ('\u{ab94}', ['\u{13c4}', '\u{0}', '\u{0}']),
+ ('\u{ab95}', ['\u{13c5}', '\u{0}', '\u{0}']), ('\u{ab96}', ['\u{13c6}', '\u{0}', '\u{0}']),
+ ('\u{ab97}', ['\u{13c7}', '\u{0}', '\u{0}']), ('\u{ab98}', ['\u{13c8}', '\u{0}', '\u{0}']),
+ ('\u{ab99}', ['\u{13c9}', '\u{0}', '\u{0}']), ('\u{ab9a}', ['\u{13ca}', '\u{0}', '\u{0}']),
+ ('\u{ab9b}', ['\u{13cb}', '\u{0}', '\u{0}']), ('\u{ab9c}', ['\u{13cc}', '\u{0}', '\u{0}']),
+ ('\u{ab9d}', ['\u{13cd}', '\u{0}', '\u{0}']), ('\u{ab9e}', ['\u{13ce}', '\u{0}', '\u{0}']),
+ ('\u{ab9f}', ['\u{13cf}', '\u{0}', '\u{0}']), ('\u{aba0}', ['\u{13d0}', '\u{0}', '\u{0}']),
+ ('\u{aba1}', ['\u{13d1}', '\u{0}', '\u{0}']), ('\u{aba2}', ['\u{13d2}', '\u{0}', '\u{0}']),
+ ('\u{aba3}', ['\u{13d3}', '\u{0}', '\u{0}']), ('\u{aba4}', ['\u{13d4}', '\u{0}', '\u{0}']),
+ ('\u{aba5}', ['\u{13d5}', '\u{0}', '\u{0}']), ('\u{aba6}', ['\u{13d6}', '\u{0}', '\u{0}']),
+ ('\u{aba7}', ['\u{13d7}', '\u{0}', '\u{0}']), ('\u{aba8}', ['\u{13d8}', '\u{0}', '\u{0}']),
+ ('\u{aba9}', ['\u{13d9}', '\u{0}', '\u{0}']), ('\u{abaa}', ['\u{13da}', '\u{0}', '\u{0}']),
+ ('\u{abab}', ['\u{13db}', '\u{0}', '\u{0}']), ('\u{abac}', ['\u{13dc}', '\u{0}', '\u{0}']),
+ ('\u{abad}', ['\u{13dd}', '\u{0}', '\u{0}']), ('\u{abae}', ['\u{13de}', '\u{0}', '\u{0}']),
+ ('\u{abaf}', ['\u{13df}', '\u{0}', '\u{0}']), ('\u{abb0}', ['\u{13e0}', '\u{0}', '\u{0}']),
+ ('\u{abb1}', ['\u{13e1}', '\u{0}', '\u{0}']), ('\u{abb2}', ['\u{13e2}', '\u{0}', '\u{0}']),
+ ('\u{abb3}', ['\u{13e3}', '\u{0}', '\u{0}']), ('\u{abb4}', ['\u{13e4}', '\u{0}', '\u{0}']),
+ ('\u{abb5}', ['\u{13e5}', '\u{0}', '\u{0}']), ('\u{abb6}', ['\u{13e6}', '\u{0}', '\u{0}']),
+ ('\u{abb7}', ['\u{13e7}', '\u{0}', '\u{0}']), ('\u{abb8}', ['\u{13e8}', '\u{0}', '\u{0}']),
+ ('\u{abb9}', ['\u{13e9}', '\u{0}', '\u{0}']), ('\u{abba}', ['\u{13ea}', '\u{0}', '\u{0}']),
+ ('\u{abbb}', ['\u{13eb}', '\u{0}', '\u{0}']), ('\u{abbc}', ['\u{13ec}', '\u{0}', '\u{0}']),
+ ('\u{abbd}', ['\u{13ed}', '\u{0}', '\u{0}']), ('\u{abbe}', ['\u{13ee}', '\u{0}', '\u{0}']),
+ ('\u{abbf}', ['\u{13ef}', '\u{0}', '\u{0}']), ('\u{fb00}', ['F', 'F', '\u{0}']),
+ ('\u{fb01}', ['F', 'I', '\u{0}']), ('\u{fb02}', ['F', 'L', '\u{0}']),
+ ('\u{fb03}', ['F', 'F', 'I']), ('\u{fb04}', ['F', 'F', 'L']),
+ ('\u{fb05}', ['S', 'T', '\u{0}']), ('\u{fb06}', ['S', 'T', '\u{0}']),
+ ('\u{fb13}', ['\u{544}', '\u{546}', '\u{0}']),
+ ('\u{fb14}', ['\u{544}', '\u{535}', '\u{0}']),
+ ('\u{fb15}', ['\u{544}', '\u{53b}', '\u{0}']),
+ ('\u{fb16}', ['\u{54e}', '\u{546}', '\u{0}']),
+ ('\u{fb17}', ['\u{544}', '\u{53d}', '\u{0}']), ('\u{ff41}', ['\u{ff21}', '\u{0}', '\u{0}']),
+ ('\u{ff42}', ['\u{ff22}', '\u{0}', '\u{0}']), ('\u{ff43}', ['\u{ff23}', '\u{0}', '\u{0}']),
+ ('\u{ff44}', ['\u{ff24}', '\u{0}', '\u{0}']), ('\u{ff45}', ['\u{ff25}', '\u{0}', '\u{0}']),
+ ('\u{ff46}', ['\u{ff26}', '\u{0}', '\u{0}']), ('\u{ff47}', ['\u{ff27}', '\u{0}', '\u{0}']),
+ ('\u{ff48}', ['\u{ff28}', '\u{0}', '\u{0}']), ('\u{ff49}', ['\u{ff29}', '\u{0}', '\u{0}']),
+ ('\u{ff4a}', ['\u{ff2a}', '\u{0}', '\u{0}']), ('\u{ff4b}', ['\u{ff2b}', '\u{0}', '\u{0}']),
+ ('\u{ff4c}', ['\u{ff2c}', '\u{0}', '\u{0}']), ('\u{ff4d}', ['\u{ff2d}', '\u{0}', '\u{0}']),
+ ('\u{ff4e}', ['\u{ff2e}', '\u{0}', '\u{0}']), ('\u{ff4f}', ['\u{ff2f}', '\u{0}', '\u{0}']),
+ ('\u{ff50}', ['\u{ff30}', '\u{0}', '\u{0}']), ('\u{ff51}', ['\u{ff31}', '\u{0}', '\u{0}']),
+ ('\u{ff52}', ['\u{ff32}', '\u{0}', '\u{0}']), ('\u{ff53}', ['\u{ff33}', '\u{0}', '\u{0}']),
+ ('\u{ff54}', ['\u{ff34}', '\u{0}', '\u{0}']), ('\u{ff55}', ['\u{ff35}', '\u{0}', '\u{0}']),
+ ('\u{ff56}', ['\u{ff36}', '\u{0}', '\u{0}']), ('\u{ff57}', ['\u{ff37}', '\u{0}', '\u{0}']),
+ ('\u{ff58}', ['\u{ff38}', '\u{0}', '\u{0}']), ('\u{ff59}', ['\u{ff39}', '\u{0}', '\u{0}']),
+ ('\u{ff5a}', ['\u{ff3a}', '\u{0}', '\u{0}']),
+ ('\u{10428}', ['\u{10400}', '\u{0}', '\u{0}']),
+ ('\u{10429}', ['\u{10401}', '\u{0}', '\u{0}']),
+ ('\u{1042a}', ['\u{10402}', '\u{0}', '\u{0}']),
+ ('\u{1042b}', ['\u{10403}', '\u{0}', '\u{0}']),
+ ('\u{1042c}', ['\u{10404}', '\u{0}', '\u{0}']),
+ ('\u{1042d}', ['\u{10405}', '\u{0}', '\u{0}']),
+ ('\u{1042e}', ['\u{10406}', '\u{0}', '\u{0}']),
+ ('\u{1042f}', ['\u{10407}', '\u{0}', '\u{0}']),
+ ('\u{10430}', ['\u{10408}', '\u{0}', '\u{0}']),
+ ('\u{10431}', ['\u{10409}', '\u{0}', '\u{0}']),
+ ('\u{10432}', ['\u{1040a}', '\u{0}', '\u{0}']),
+ ('\u{10433}', ['\u{1040b}', '\u{0}', '\u{0}']),
+ ('\u{10434}', ['\u{1040c}', '\u{0}', '\u{0}']),
+ ('\u{10435}', ['\u{1040d}', '\u{0}', '\u{0}']),
+ ('\u{10436}', ['\u{1040e}', '\u{0}', '\u{0}']),
+ ('\u{10437}', ['\u{1040f}', '\u{0}', '\u{0}']),
+ ('\u{10438}', ['\u{10410}', '\u{0}', '\u{0}']),
+ ('\u{10439}', ['\u{10411}', '\u{0}', '\u{0}']),
+ ('\u{1043a}', ['\u{10412}', '\u{0}', '\u{0}']),
+ ('\u{1043b}', ['\u{10413}', '\u{0}', '\u{0}']),
+ ('\u{1043c}', ['\u{10414}', '\u{0}', '\u{0}']),
+ ('\u{1043d}', ['\u{10415}', '\u{0}', '\u{0}']),
+ ('\u{1043e}', ['\u{10416}', '\u{0}', '\u{0}']),
+ ('\u{1043f}', ['\u{10417}', '\u{0}', '\u{0}']),
+ ('\u{10440}', ['\u{10418}', '\u{0}', '\u{0}']),
+ ('\u{10441}', ['\u{10419}', '\u{0}', '\u{0}']),
+ ('\u{10442}', ['\u{1041a}', '\u{0}', '\u{0}']),
+ ('\u{10443}', ['\u{1041b}', '\u{0}', '\u{0}']),
+ ('\u{10444}', ['\u{1041c}', '\u{0}', '\u{0}']),
+ ('\u{10445}', ['\u{1041d}', '\u{0}', '\u{0}']),
+ ('\u{10446}', ['\u{1041e}', '\u{0}', '\u{0}']),
+ ('\u{10447}', ['\u{1041f}', '\u{0}', '\u{0}']),
+ ('\u{10448}', ['\u{10420}', '\u{0}', '\u{0}']),
+ ('\u{10449}', ['\u{10421}', '\u{0}', '\u{0}']),
+ ('\u{1044a}', ['\u{10422}', '\u{0}', '\u{0}']),
+ ('\u{1044b}', ['\u{10423}', '\u{0}', '\u{0}']),
+ ('\u{1044c}', ['\u{10424}', '\u{0}', '\u{0}']),
+ ('\u{1044d}', ['\u{10425}', '\u{0}', '\u{0}']),
+ ('\u{1044e}', ['\u{10426}', '\u{0}', '\u{0}']),
+ ('\u{1044f}', ['\u{10427}', '\u{0}', '\u{0}']),
+ ('\u{104d8}', ['\u{104b0}', '\u{0}', '\u{0}']),
+ ('\u{104d9}', ['\u{104b1}', '\u{0}', '\u{0}']),
+ ('\u{104da}', ['\u{104b2}', '\u{0}', '\u{0}']),
+ ('\u{104db}', ['\u{104b3}', '\u{0}', '\u{0}']),
+ ('\u{104dc}', ['\u{104b4}', '\u{0}', '\u{0}']),
+ ('\u{104dd}', ['\u{104b5}', '\u{0}', '\u{0}']),
+ ('\u{104de}', ['\u{104b6}', '\u{0}', '\u{0}']),
+ ('\u{104df}', ['\u{104b7}', '\u{0}', '\u{0}']),
+ ('\u{104e0}', ['\u{104b8}', '\u{0}', '\u{0}']),
+ ('\u{104e1}', ['\u{104b9}', '\u{0}', '\u{0}']),
+ ('\u{104e2}', ['\u{104ba}', '\u{0}', '\u{0}']),
+ ('\u{104e3}', ['\u{104bb}', '\u{0}', '\u{0}']),
+ ('\u{104e4}', ['\u{104bc}', '\u{0}', '\u{0}']),
+ ('\u{104e5}', ['\u{104bd}', '\u{0}', '\u{0}']),
+ ('\u{104e6}', ['\u{104be}', '\u{0}', '\u{0}']),
+ ('\u{104e7}', ['\u{104bf}', '\u{0}', '\u{0}']),
+ ('\u{104e8}', ['\u{104c0}', '\u{0}', '\u{0}']),
+ ('\u{104e9}', ['\u{104c1}', '\u{0}', '\u{0}']),
+ ('\u{104ea}', ['\u{104c2}', '\u{0}', '\u{0}']),
+ ('\u{104eb}', ['\u{104c3}', '\u{0}', '\u{0}']),
+ ('\u{104ec}', ['\u{104c4}', '\u{0}', '\u{0}']),
+ ('\u{104ed}', ['\u{104c5}', '\u{0}', '\u{0}']),
+ ('\u{104ee}', ['\u{104c6}', '\u{0}', '\u{0}']),
+ ('\u{104ef}', ['\u{104c7}', '\u{0}', '\u{0}']),
+ ('\u{104f0}', ['\u{104c8}', '\u{0}', '\u{0}']),
+ ('\u{104f1}', ['\u{104c9}', '\u{0}', '\u{0}']),
+ ('\u{104f2}', ['\u{104ca}', '\u{0}', '\u{0}']),
+ ('\u{104f3}', ['\u{104cb}', '\u{0}', '\u{0}']),
+ ('\u{104f4}', ['\u{104cc}', '\u{0}', '\u{0}']),
+ ('\u{104f5}', ['\u{104cd}', '\u{0}', '\u{0}']),
+ ('\u{104f6}', ['\u{104ce}', '\u{0}', '\u{0}']),
+ ('\u{104f7}', ['\u{104cf}', '\u{0}', '\u{0}']),
+ ('\u{104f8}', ['\u{104d0}', '\u{0}', '\u{0}']),
+ ('\u{104f9}', ['\u{104d1}', '\u{0}', '\u{0}']),
+ ('\u{104fa}', ['\u{104d2}', '\u{0}', '\u{0}']),
+ ('\u{104fb}', ['\u{104d3}', '\u{0}', '\u{0}']),
+ ('\u{10597}', ['\u{10570}', '\u{0}', '\u{0}']),
+ ('\u{10598}', ['\u{10571}', '\u{0}', '\u{0}']),
+ ('\u{10599}', ['\u{10572}', '\u{0}', '\u{0}']),
+ ('\u{1059a}', ['\u{10573}', '\u{0}', '\u{0}']),
+ ('\u{1059b}', ['\u{10574}', '\u{0}', '\u{0}']),
+ ('\u{1059c}', ['\u{10575}', '\u{0}', '\u{0}']),
+ ('\u{1059d}', ['\u{10576}', '\u{0}', '\u{0}']),
+ ('\u{1059e}', ['\u{10577}', '\u{0}', '\u{0}']),
+ ('\u{1059f}', ['\u{10578}', '\u{0}', '\u{0}']),
+ ('\u{105a0}', ['\u{10579}', '\u{0}', '\u{0}']),
+ ('\u{105a1}', ['\u{1057a}', '\u{0}', '\u{0}']),
+ ('\u{105a3}', ['\u{1057c}', '\u{0}', '\u{0}']),
+ ('\u{105a4}', ['\u{1057d}', '\u{0}', '\u{0}']),
+ ('\u{105a5}', ['\u{1057e}', '\u{0}', '\u{0}']),
+ ('\u{105a6}', ['\u{1057f}', '\u{0}', '\u{0}']),
+ ('\u{105a7}', ['\u{10580}', '\u{0}', '\u{0}']),
+ ('\u{105a8}', ['\u{10581}', '\u{0}', '\u{0}']),
+ ('\u{105a9}', ['\u{10582}', '\u{0}', '\u{0}']),
+ ('\u{105aa}', ['\u{10583}', '\u{0}', '\u{0}']),
+ ('\u{105ab}', ['\u{10584}', '\u{0}', '\u{0}']),
+ ('\u{105ac}', ['\u{10585}', '\u{0}', '\u{0}']),
+ ('\u{105ad}', ['\u{10586}', '\u{0}', '\u{0}']),
+ ('\u{105ae}', ['\u{10587}', '\u{0}', '\u{0}']),
+ ('\u{105af}', ['\u{10588}', '\u{0}', '\u{0}']),
+ ('\u{105b0}', ['\u{10589}', '\u{0}', '\u{0}']),
+ ('\u{105b1}', ['\u{1058a}', '\u{0}', '\u{0}']),
+ ('\u{105b3}', ['\u{1058c}', '\u{0}', '\u{0}']),
+ ('\u{105b4}', ['\u{1058d}', '\u{0}', '\u{0}']),
+ ('\u{105b5}', ['\u{1058e}', '\u{0}', '\u{0}']),
+ ('\u{105b6}', ['\u{1058f}', '\u{0}', '\u{0}']),
+ ('\u{105b7}', ['\u{10590}', '\u{0}', '\u{0}']),
+ ('\u{105b8}', ['\u{10591}', '\u{0}', '\u{0}']),
+ ('\u{105b9}', ['\u{10592}', '\u{0}', '\u{0}']),
+ ('\u{105bb}', ['\u{10594}', '\u{0}', '\u{0}']),
+ ('\u{105bc}', ['\u{10595}', '\u{0}', '\u{0}']),
+ ('\u{10cc0}', ['\u{10c80}', '\u{0}', '\u{0}']),
+ ('\u{10cc1}', ['\u{10c81}', '\u{0}', '\u{0}']),
+ ('\u{10cc2}', ['\u{10c82}', '\u{0}', '\u{0}']),
+ ('\u{10cc3}', ['\u{10c83}', '\u{0}', '\u{0}']),
+ ('\u{10cc4}', ['\u{10c84}', '\u{0}', '\u{0}']),
+ ('\u{10cc5}', ['\u{10c85}', '\u{0}', '\u{0}']),
+ ('\u{10cc6}', ['\u{10c86}', '\u{0}', '\u{0}']),
+ ('\u{10cc7}', ['\u{10c87}', '\u{0}', '\u{0}']),
+ ('\u{10cc8}', ['\u{10c88}', '\u{0}', '\u{0}']),
+ ('\u{10cc9}', ['\u{10c89}', '\u{0}', '\u{0}']),
+ ('\u{10cca}', ['\u{10c8a}', '\u{0}', '\u{0}']),
+ ('\u{10ccb}', ['\u{10c8b}', '\u{0}', '\u{0}']),
+ ('\u{10ccc}', ['\u{10c8c}', '\u{0}', '\u{0}']),
+ ('\u{10ccd}', ['\u{10c8d}', '\u{0}', '\u{0}']),
+ ('\u{10cce}', ['\u{10c8e}', '\u{0}', '\u{0}']),
+ ('\u{10ccf}', ['\u{10c8f}', '\u{0}', '\u{0}']),
+ ('\u{10cd0}', ['\u{10c90}', '\u{0}', '\u{0}']),
+ ('\u{10cd1}', ['\u{10c91}', '\u{0}', '\u{0}']),
+ ('\u{10cd2}', ['\u{10c92}', '\u{0}', '\u{0}']),
+ ('\u{10cd3}', ['\u{10c93}', '\u{0}', '\u{0}']),
+ ('\u{10cd4}', ['\u{10c94}', '\u{0}', '\u{0}']),
+ ('\u{10cd5}', ['\u{10c95}', '\u{0}', '\u{0}']),
+ ('\u{10cd6}', ['\u{10c96}', '\u{0}', '\u{0}']),
+ ('\u{10cd7}', ['\u{10c97}', '\u{0}', '\u{0}']),
+ ('\u{10cd8}', ['\u{10c98}', '\u{0}', '\u{0}']),
+ ('\u{10cd9}', ['\u{10c99}', '\u{0}', '\u{0}']),
+ ('\u{10cda}', ['\u{10c9a}', '\u{0}', '\u{0}']),
+ ('\u{10cdb}', ['\u{10c9b}', '\u{0}', '\u{0}']),
+ ('\u{10cdc}', ['\u{10c9c}', '\u{0}', '\u{0}']),
+ ('\u{10cdd}', ['\u{10c9d}', '\u{0}', '\u{0}']),
+ ('\u{10cde}', ['\u{10c9e}', '\u{0}', '\u{0}']),
+ ('\u{10cdf}', ['\u{10c9f}', '\u{0}', '\u{0}']),
+ ('\u{10ce0}', ['\u{10ca0}', '\u{0}', '\u{0}']),
+ ('\u{10ce1}', ['\u{10ca1}', '\u{0}', '\u{0}']),
+ ('\u{10ce2}', ['\u{10ca2}', '\u{0}', '\u{0}']),
+ ('\u{10ce3}', ['\u{10ca3}', '\u{0}', '\u{0}']),
+ ('\u{10ce4}', ['\u{10ca4}', '\u{0}', '\u{0}']),
+ ('\u{10ce5}', ['\u{10ca5}', '\u{0}', '\u{0}']),
+ ('\u{10ce6}', ['\u{10ca6}', '\u{0}', '\u{0}']),
+ ('\u{10ce7}', ['\u{10ca7}', '\u{0}', '\u{0}']),
+ ('\u{10ce8}', ['\u{10ca8}', '\u{0}', '\u{0}']),
+ ('\u{10ce9}', ['\u{10ca9}', '\u{0}', '\u{0}']),
+ ('\u{10cea}', ['\u{10caa}', '\u{0}', '\u{0}']),
+ ('\u{10ceb}', ['\u{10cab}', '\u{0}', '\u{0}']),
+ ('\u{10cec}', ['\u{10cac}', '\u{0}', '\u{0}']),
+ ('\u{10ced}', ['\u{10cad}', '\u{0}', '\u{0}']),
+ ('\u{10cee}', ['\u{10cae}', '\u{0}', '\u{0}']),
+ ('\u{10cef}', ['\u{10caf}', '\u{0}', '\u{0}']),
+ ('\u{10cf0}', ['\u{10cb0}', '\u{0}', '\u{0}']),
+ ('\u{10cf1}', ['\u{10cb1}', '\u{0}', '\u{0}']),
+ ('\u{10cf2}', ['\u{10cb2}', '\u{0}', '\u{0}']),
+ ('\u{118c0}', ['\u{118a0}', '\u{0}', '\u{0}']),
+ ('\u{118c1}', ['\u{118a1}', '\u{0}', '\u{0}']),
+ ('\u{118c2}', ['\u{118a2}', '\u{0}', '\u{0}']),
+ ('\u{118c3}', ['\u{118a3}', '\u{0}', '\u{0}']),
+ ('\u{118c4}', ['\u{118a4}', '\u{0}', '\u{0}']),
+ ('\u{118c5}', ['\u{118a5}', '\u{0}', '\u{0}']),
+ ('\u{118c6}', ['\u{118a6}', '\u{0}', '\u{0}']),
+ ('\u{118c7}', ['\u{118a7}', '\u{0}', '\u{0}']),
+ ('\u{118c8}', ['\u{118a8}', '\u{0}', '\u{0}']),
+ ('\u{118c9}', ['\u{118a9}', '\u{0}', '\u{0}']),
+ ('\u{118ca}', ['\u{118aa}', '\u{0}', '\u{0}']),
+ ('\u{118cb}', ['\u{118ab}', '\u{0}', '\u{0}']),
+ ('\u{118cc}', ['\u{118ac}', '\u{0}', '\u{0}']),
+ ('\u{118cd}', ['\u{118ad}', '\u{0}', '\u{0}']),
+ ('\u{118ce}', ['\u{118ae}', '\u{0}', '\u{0}']),
+ ('\u{118cf}', ['\u{118af}', '\u{0}', '\u{0}']),
+ ('\u{118d0}', ['\u{118b0}', '\u{0}', '\u{0}']),
+ ('\u{118d1}', ['\u{118b1}', '\u{0}', '\u{0}']),
+ ('\u{118d2}', ['\u{118b2}', '\u{0}', '\u{0}']),
+ ('\u{118d3}', ['\u{118b3}', '\u{0}', '\u{0}']),
+ ('\u{118d4}', ['\u{118b4}', '\u{0}', '\u{0}']),
+ ('\u{118d5}', ['\u{118b5}', '\u{0}', '\u{0}']),
+ ('\u{118d6}', ['\u{118b6}', '\u{0}', '\u{0}']),
+ ('\u{118d7}', ['\u{118b7}', '\u{0}', '\u{0}']),
+ ('\u{118d8}', ['\u{118b8}', '\u{0}', '\u{0}']),
+ ('\u{118d9}', ['\u{118b9}', '\u{0}', '\u{0}']),
+ ('\u{118da}', ['\u{118ba}', '\u{0}', '\u{0}']),
+ ('\u{118db}', ['\u{118bb}', '\u{0}', '\u{0}']),
+ ('\u{118dc}', ['\u{118bc}', '\u{0}', '\u{0}']),
+ ('\u{118dd}', ['\u{118bd}', '\u{0}', '\u{0}']),
+ ('\u{118de}', ['\u{118be}', '\u{0}', '\u{0}']),
+ ('\u{118df}', ['\u{118bf}', '\u{0}', '\u{0}']),
+ ('\u{16e60}', ['\u{16e40}', '\u{0}', '\u{0}']),
+ ('\u{16e61}', ['\u{16e41}', '\u{0}', '\u{0}']),
+ ('\u{16e62}', ['\u{16e42}', '\u{0}', '\u{0}']),
+ ('\u{16e63}', ['\u{16e43}', '\u{0}', '\u{0}']),
+ ('\u{16e64}', ['\u{16e44}', '\u{0}', '\u{0}']),
+ ('\u{16e65}', ['\u{16e45}', '\u{0}', '\u{0}']),
+ ('\u{16e66}', ['\u{16e46}', '\u{0}', '\u{0}']),
+ ('\u{16e67}', ['\u{16e47}', '\u{0}', '\u{0}']),
+ ('\u{16e68}', ['\u{16e48}', '\u{0}', '\u{0}']),
+ ('\u{16e69}', ['\u{16e49}', '\u{0}', '\u{0}']),
+ ('\u{16e6a}', ['\u{16e4a}', '\u{0}', '\u{0}']),
+ ('\u{16e6b}', ['\u{16e4b}', '\u{0}', '\u{0}']),
+ ('\u{16e6c}', ['\u{16e4c}', '\u{0}', '\u{0}']),
+ ('\u{16e6d}', ['\u{16e4d}', '\u{0}', '\u{0}']),
+ ('\u{16e6e}', ['\u{16e4e}', '\u{0}', '\u{0}']),
+ ('\u{16e6f}', ['\u{16e4f}', '\u{0}', '\u{0}']),
+ ('\u{16e70}', ['\u{16e50}', '\u{0}', '\u{0}']),
+ ('\u{16e71}', ['\u{16e51}', '\u{0}', '\u{0}']),
+ ('\u{16e72}', ['\u{16e52}', '\u{0}', '\u{0}']),
+ ('\u{16e73}', ['\u{16e53}', '\u{0}', '\u{0}']),
+ ('\u{16e74}', ['\u{16e54}', '\u{0}', '\u{0}']),
+ ('\u{16e75}', ['\u{16e55}', '\u{0}', '\u{0}']),
+ ('\u{16e76}', ['\u{16e56}', '\u{0}', '\u{0}']),
+ ('\u{16e77}', ['\u{16e57}', '\u{0}', '\u{0}']),
+ ('\u{16e78}', ['\u{16e58}', '\u{0}', '\u{0}']),
+ ('\u{16e79}', ['\u{16e59}', '\u{0}', '\u{0}']),
+ ('\u{16e7a}', ['\u{16e5a}', '\u{0}', '\u{0}']),
+ ('\u{16e7b}', ['\u{16e5b}', '\u{0}', '\u{0}']),
+ ('\u{16e7c}', ['\u{16e5c}', '\u{0}', '\u{0}']),
+ ('\u{16e7d}', ['\u{16e5d}', '\u{0}', '\u{0}']),
+ ('\u{16e7e}', ['\u{16e5e}', '\u{0}', '\u{0}']),
+ ('\u{16e7f}', ['\u{16e5f}', '\u{0}', '\u{0}']),
+ ('\u{1e922}', ['\u{1e900}', '\u{0}', '\u{0}']),
+ ('\u{1e923}', ['\u{1e901}', '\u{0}', '\u{0}']),
+ ('\u{1e924}', ['\u{1e902}', '\u{0}', '\u{0}']),
+ ('\u{1e925}', ['\u{1e903}', '\u{0}', '\u{0}']),
+ ('\u{1e926}', ['\u{1e904}', '\u{0}', '\u{0}']),
+ ('\u{1e927}', ['\u{1e905}', '\u{0}', '\u{0}']),
+ ('\u{1e928}', ['\u{1e906}', '\u{0}', '\u{0}']),
+ ('\u{1e929}', ['\u{1e907}', '\u{0}', '\u{0}']),
+ ('\u{1e92a}', ['\u{1e908}', '\u{0}', '\u{0}']),
+ ('\u{1e92b}', ['\u{1e909}', '\u{0}', '\u{0}']),
+ ('\u{1e92c}', ['\u{1e90a}', '\u{0}', '\u{0}']),
+ ('\u{1e92d}', ['\u{1e90b}', '\u{0}', '\u{0}']),
+ ('\u{1e92e}', ['\u{1e90c}', '\u{0}', '\u{0}']),
+ ('\u{1e92f}', ['\u{1e90d}', '\u{0}', '\u{0}']),
+ ('\u{1e930}', ['\u{1e90e}', '\u{0}', '\u{0}']),
+ ('\u{1e931}', ['\u{1e90f}', '\u{0}', '\u{0}']),
+ ('\u{1e932}', ['\u{1e910}', '\u{0}', '\u{0}']),
+ ('\u{1e933}', ['\u{1e911}', '\u{0}', '\u{0}']),
+ ('\u{1e934}', ['\u{1e912}', '\u{0}', '\u{0}']),
+ ('\u{1e935}', ['\u{1e913}', '\u{0}', '\u{0}']),
+ ('\u{1e936}', ['\u{1e914}', '\u{0}', '\u{0}']),
+ ('\u{1e937}', ['\u{1e915}', '\u{0}', '\u{0}']),
+ ('\u{1e938}', ['\u{1e916}', '\u{0}', '\u{0}']),
+ ('\u{1e939}', ['\u{1e917}', '\u{0}', '\u{0}']),
+ ('\u{1e93a}', ['\u{1e918}', '\u{0}', '\u{0}']),
+ ('\u{1e93b}', ['\u{1e919}', '\u{0}', '\u{0}']),
+ ('\u{1e93c}', ['\u{1e91a}', '\u{0}', '\u{0}']),
+ ('\u{1e93d}', ['\u{1e91b}', '\u{0}', '\u{0}']),
+ ('\u{1e93e}', ['\u{1e91c}', '\u{0}', '\u{0}']),
+ ('\u{1e93f}', ['\u{1e91d}', '\u{0}', '\u{0}']),
+ ('\u{1e940}', ['\u{1e91e}', '\u{0}', '\u{0}']),
+ ('\u{1e941}', ['\u{1e91f}', '\u{0}', '\u{0}']),
+ ('\u{1e942}', ['\u{1e920}', '\u{0}', '\u{0}']),
+ ('\u{1e943}', ['\u{1e921}', '\u{0}', '\u{0}']),
+ ];
+}
diff --git a/library/core/src/unit.rs b/library/core/src/unit.rs
new file mode 100644
index 000000000..6656dd5c4
--- /dev/null
+++ b/library/core/src/unit.rs
@@ -0,0 +1,21 @@
+use crate::iter::FromIterator;
+
+/// Collapses all unit items from an iterator into one.
+///
+/// This is more useful when combined with higher-level abstractions, like
+/// collecting to a `Result<(), E>` where you only care about errors:
+///
+/// ```
+/// use std::io::*;
+/// let data = vec![1, 2, 3, 4, 5];
+/// let res: Result<()> = data.iter()
+/// .map(|x| writeln!(stdout(), "{x}"))
+/// .collect();
+/// assert!(res.is_ok());
+/// ```
+#[stable(feature = "unit_from_iter", since = "1.23.0")]
+impl FromIterator<()> for () {
+ fn from_iter<I: IntoIterator<Item = ()>>(iter: I) -> Self {
+ iter.into_iter().for_each(|()| {})
+ }
+}
diff --git a/library/core/tests/alloc.rs b/library/core/tests/alloc.rs
new file mode 100644
index 000000000..8a5a06b34
--- /dev/null
+++ b/library/core/tests/alloc.rs
@@ -0,0 +1,31 @@
+use core::alloc::Layout;
+use core::ptr::{self, NonNull};
+
+#[test]
+fn const_unchecked_layout() {
+ const SIZE: usize = 0x2000;
+ const ALIGN: usize = 0x1000;
+ const LAYOUT: Layout = unsafe { Layout::from_size_align_unchecked(SIZE, ALIGN) };
+ const DANGLING: NonNull<u8> = LAYOUT.dangling();
+ assert_eq!(LAYOUT.size(), SIZE);
+ assert_eq!(LAYOUT.align(), ALIGN);
+ assert_eq!(Some(DANGLING), NonNull::new(ptr::invalid_mut(ALIGN)));
+}
+
+#[test]
+fn layout_debug_shows_log2_of_alignment() {
+ // `Debug` is not stable, but here's what it does right now
+ let layout = Layout::from_size_align(24576, 8192).unwrap();
+ let s = format!("{:?}", layout);
+ assert_eq!(s, "Layout { size: 24576, align: 8192 (1 << 13) }");
+}
+
+// Running this normally doesn't do much, but it's also run in Miri, which
+// will double-check that these are allowed by the validity invariants.
+#[test]
+fn layout_accepts_all_valid_alignments() {
+ for align in 0..usize::BITS {
+ let layout = Layout::from_size_align(0, 1_usize << align).unwrap();
+ assert_eq!(layout.align(), 1_usize << align);
+ }
+}
diff --git a/library/core/tests/any.rs b/library/core/tests/any.rs
new file mode 100644
index 000000000..8ed0c8880
--- /dev/null
+++ b/library/core/tests/any.rs
@@ -0,0 +1,194 @@
+use core::any::*;
+
+#[derive(PartialEq, Debug)]
+struct Test;
+
+static TEST: &'static str = "Test";
+
+#[test]
+fn any_referenced() {
+ let (a, b, c) = (&5 as &dyn Any, &TEST as &dyn Any, &Test as &dyn Any);
+
+ assert!(a.is::<i32>());
+ assert!(!b.is::<i32>());
+ assert!(!c.is::<i32>());
+
+ assert!(!a.is::<&'static str>());
+ assert!(b.is::<&'static str>());
+ assert!(!c.is::<&'static str>());
+
+ assert!(!a.is::<Test>());
+ assert!(!b.is::<Test>());
+ assert!(c.is::<Test>());
+}
+
+#[test]
+fn any_owning() {
+ let (a, b, c) = (
+ Box::new(5_usize) as Box<dyn Any>,
+ Box::new(TEST) as Box<dyn Any>,
+ Box::new(Test) as Box<dyn Any>,
+ );
+
+ assert!(a.is::<usize>());
+ assert!(!b.is::<usize>());
+ assert!(!c.is::<usize>());
+
+ assert!(!a.is::<&'static str>());
+ assert!(b.is::<&'static str>());
+ assert!(!c.is::<&'static str>());
+
+ assert!(!a.is::<Test>());
+ assert!(!b.is::<Test>());
+ assert!(c.is::<Test>());
+}
+
+#[test]
+fn any_downcast_ref() {
+ let a = &5_usize as &dyn Any;
+
+ match a.downcast_ref::<usize>() {
+ Some(&5) => {}
+ x => panic!("Unexpected value {x:?}"),
+ }
+
+ match a.downcast_ref::<Test>() {
+ None => {}
+ x => panic!("Unexpected value {x:?}"),
+ }
+}
+
+#[test]
+fn any_downcast_mut() {
+ let mut a = 5_usize;
+ let mut b: Box<_> = Box::new(7_usize);
+
+ let a_r = &mut a as &mut dyn Any;
+ let tmp: &mut usize = &mut *b;
+ let b_r = tmp as &mut dyn Any;
+
+ match a_r.downcast_mut::<usize>() {
+ Some(x) => {
+ assert_eq!(*x, 5);
+ *x = 612;
+ }
+ x => panic!("Unexpected value {x:?}"),
+ }
+
+ match b_r.downcast_mut::<usize>() {
+ Some(x) => {
+ assert_eq!(*x, 7);
+ *x = 413;
+ }
+ x => panic!("Unexpected value {x:?}"),
+ }
+
+ match a_r.downcast_mut::<Test>() {
+ None => (),
+ x => panic!("Unexpected value {x:?}"),
+ }
+
+ match b_r.downcast_mut::<Test>() {
+ None => (),
+ x => panic!("Unexpected value {x:?}"),
+ }
+
+ match a_r.downcast_mut::<usize>() {
+ Some(&mut 612) => {}
+ x => panic!("Unexpected value {x:?}"),
+ }
+
+ match b_r.downcast_mut::<usize>() {
+ Some(&mut 413) => {}
+ x => panic!("Unexpected value {x:?}"),
+ }
+}
+
+#[test]
+fn any_fixed_vec() {
+ let test = [0_usize; 8];
+ let test = &test as &dyn Any;
+ assert!(test.is::<[usize; 8]>());
+ assert!(!test.is::<[usize; 10]>());
+}
+
+#[test]
+fn any_unsized() {
+ fn is_any<T: Any + ?Sized>() {}
+ is_any::<[i32]>();
+}
+
+#[test]
+fn distinct_type_names() {
+ // https://github.com/rust-lang/rust/issues/84666
+
+ struct Velocity(f32, f32);
+
+ fn type_name_of_val<T>(_: T) -> &'static str {
+ type_name::<T>()
+ }
+
+ assert_ne!(type_name_of_val(Velocity), type_name_of_val(Velocity(0.0, -9.8)),);
+}
+
+// Test the `Provider` API.
+
+struct SomeConcreteType {
+ some_string: String,
+}
+
+impl Provider for SomeConcreteType {
+ fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
+ demand
+ .provide_ref::<String>(&self.some_string)
+ .provide_ref::<str>(&self.some_string)
+ .provide_value::<String>(|| "bye".to_owned());
+ }
+}
+
+// Test the provide and request mechanisms with a by-reference trait object.
+#[test]
+fn test_provider() {
+ let obj: &dyn Provider = &SomeConcreteType { some_string: "hello".to_owned() };
+
+ assert_eq!(&**request_ref::<String>(obj).unwrap(), "hello");
+ assert_eq!(&*request_value::<String>(obj).unwrap(), "bye");
+ assert_eq!(request_value::<u8>(obj), None);
+}
+
+// Test the provide and request mechanisms with a boxed trait object.
+#[test]
+fn test_provider_boxed() {
+ let obj: Box<dyn Provider> = Box::new(SomeConcreteType { some_string: "hello".to_owned() });
+
+ assert_eq!(&**request_ref::<String>(&*obj).unwrap(), "hello");
+ assert_eq!(&*request_value::<String>(&*obj).unwrap(), "bye");
+ assert_eq!(request_value::<u8>(&*obj), None);
+}
+
+// Test the provide and request mechanisms with a concrete object.
+#[test]
+fn test_provider_concrete() {
+ let obj = SomeConcreteType { some_string: "hello".to_owned() };
+
+ assert_eq!(&**request_ref::<String>(&obj).unwrap(), "hello");
+ assert_eq!(&*request_value::<String>(&obj).unwrap(), "bye");
+ assert_eq!(request_value::<u8>(&obj), None);
+}
+
+trait OtherTrait: Provider {}
+
+impl OtherTrait for SomeConcreteType {}
+
+impl dyn OtherTrait {
+ fn get_ref<T: 'static + ?Sized>(&self) -> Option<&T> {
+ request_ref::<T>(self)
+ }
+}
+
+// Test the provide and request mechanisms via an intermediate trait.
+#[test]
+fn test_provider_intermediate() {
+ let obj: &dyn OtherTrait = &SomeConcreteType { some_string: "hello".to_owned() };
+ assert_eq!(obj.get_ref::<str>().unwrap(), "hello");
+}
diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
new file mode 100644
index 000000000..f268fe3ae
--- /dev/null
+++ b/library/core/tests/array.rs
@@ -0,0 +1,702 @@
+use core::array;
+use core::convert::TryFrom;
+use core::sync::atomic::{AtomicUsize, Ordering};
+
+#[test]
+fn array_from_ref() {
+ let value: String = "Hello World!".into();
+ let arr: &[String; 1] = array::from_ref(&value);
+ assert_eq!(&[value.clone()], arr);
+
+ const VALUE: &&str = &"Hello World!";
+ const ARR: &[&str; 1] = array::from_ref(VALUE);
+ assert_eq!(&[*VALUE], ARR);
+ assert!(core::ptr::eq(VALUE, &ARR[0]));
+}
+
+#[test]
+fn array_from_mut() {
+ let mut value: String = "Hello World".into();
+ let arr: &mut [String; 1] = array::from_mut(&mut value);
+ arr[0].push_str("!");
+ assert_eq!(&value, "Hello World!");
+}
+
+#[test]
+fn array_try_from() {
+ macro_rules! test {
+ ($($N:expr)+) => {
+ $({
+ type Array = [u8; $N];
+ let mut array: Array = [0; $N];
+ let slice: &[u8] = &array[..];
+
+ let result = <&Array>::try_from(slice);
+ assert_eq!(&array, result.unwrap());
+
+ let result = <Array>::try_from(slice);
+ assert_eq!(&array, &result.unwrap());
+
+ let mut_slice: &mut [u8] = &mut array[..];
+ let result = <&mut Array>::try_from(mut_slice);
+ assert_eq!(&[0; $N], result.unwrap());
+
+ let mut_slice: &mut [u8] = &mut array[..];
+ let result = <Array>::try_from(mut_slice);
+ assert_eq!(&array, &result.unwrap());
+ })+
+ }
+ }
+ test! {
+ 0 1 2 3 4 5 6 7 8 9
+ 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32
+ }
+}
+
+#[test]
+fn iterator_collect() {
+ let arr = [0, 1, 2, 5, 9];
+ let v: Vec<_> = IntoIterator::into_iter(arr.clone()).collect();
+ assert_eq!(&arr[..], &v[..]);
+}
+
+#[test]
+fn iterator_rev_collect() {
+ let arr = [0, 1, 2, 5, 9];
+ let v: Vec<_> = IntoIterator::into_iter(arr.clone()).rev().collect();
+ assert_eq!(&v[..], &[9, 5, 2, 1, 0]);
+}
+
+#[test]
+fn iterator_nth() {
+ let v = [0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(IntoIterator::into_iter(v.clone()).nth(i).unwrap(), v[i]);
+ }
+ assert_eq!(IntoIterator::into_iter(v.clone()).nth(v.len()), None);
+
+ let mut iter = IntoIterator::into_iter(v);
+ assert_eq!(iter.nth(2).unwrap(), v[2]);
+ assert_eq!(iter.nth(1).unwrap(), v[4]);
+}
+
+#[test]
+fn iterator_last() {
+ let v = [0, 1, 2, 3, 4];
+ assert_eq!(IntoIterator::into_iter(v).last().unwrap(), 4);
+ assert_eq!(IntoIterator::into_iter([0]).last().unwrap(), 0);
+
+ let mut it = IntoIterator::into_iter([0, 9, 2, 4]);
+ assert_eq!(it.next_back(), Some(4));
+ assert_eq!(it.last(), Some(2));
+}
+
+#[test]
+fn iterator_clone() {
+ let mut it = IntoIterator::into_iter([0, 2, 4, 6, 8]);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let mut clone = it.clone();
+ assert_eq!(it.next_back(), Some(6));
+ assert_eq!(clone.next_back(), Some(6));
+ assert_eq!(it.next_back(), Some(4));
+ assert_eq!(clone.next_back(), Some(4));
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(clone.next(), Some(2));
+}
+
+#[test]
+fn iterator_fused() {
+ let mut it = IntoIterator::into_iter([0, 9, 2]);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(9));
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn iterator_len() {
+ let mut it = IntoIterator::into_iter([0, 1, 2, 5, 9]);
+ assert_eq!(it.size_hint(), (5, Some(5)));
+ assert_eq!(it.len(), 5);
+ assert_eq!(it.is_empty(), false);
+
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.size_hint(), (4, Some(4)));
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.is_empty(), false);
+
+ assert_eq!(it.next_back(), Some(9));
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.is_empty(), false);
+
+ // Empty
+ let it = IntoIterator::into_iter([] as [String; 0]);
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.is_empty(), true);
+}
+
+#[test]
+fn iterator_count() {
+ let v = [0, 1, 2, 3, 4];
+ assert_eq!(IntoIterator::into_iter(v.clone()).count(), 5);
+
+ let mut iter2 = IntoIterator::into_iter(v);
+ iter2.next();
+ iter2.next();
+ assert_eq!(iter2.count(), 3);
+}
+
+#[test]
+fn iterator_flat_map() {
+ assert!((0..5).flat_map(|i| IntoIterator::into_iter([2 * i, 2 * i + 1])).eq(0..10));
+}
+
+#[test]
+fn iterator_debug() {
+ let arr = [0, 1, 2, 5, 9];
+ assert_eq!(format!("{:?}", IntoIterator::into_iter(arr)), "IntoIter([0, 1, 2, 5, 9])",);
+}
+
+#[test]
+fn iterator_drops() {
+ use core::cell::Cell;
+
+ // This test makes sure the correct number of elements are dropped. The `R`
+ // type is just a reference to a `Cell` that is incremented when an `R` is
+ // dropped.
+
+ #[derive(Clone)]
+ struct Foo<'a>(&'a Cell<usize>);
+
+ impl Drop for Foo<'_> {
+ fn drop(&mut self) {
+ self.0.set(self.0.get() + 1);
+ }
+ }
+
+ fn five(i: &Cell<usize>) -> [Foo<'_>; 5] {
+ // This is somewhat verbose because `Foo` does not implement `Copy`
+ // since it implements `Drop`. Consequently, we cannot write
+ // `[Foo(i); 5]`.
+ [Foo(i), Foo(i), Foo(i), Foo(i), Foo(i)]
+ }
+
+ // Simple: drop new iterator.
+ let i = Cell::new(0);
+ {
+ IntoIterator::into_iter(five(&i));
+ }
+ assert_eq!(i.get(), 5);
+
+ // Call `next()` once.
+ let i = Cell::new(0);
+ {
+ let mut iter = IntoIterator::into_iter(five(&i));
+ let _x = iter.next();
+ assert_eq!(i.get(), 0);
+ assert_eq!(iter.count(), 4);
+ assert_eq!(i.get(), 4);
+ }
+ assert_eq!(i.get(), 5);
+
+ // Check `clone` and calling `next`/`next_back`.
+ let i = Cell::new(0);
+ {
+ let mut iter = IntoIterator::into_iter(five(&i));
+ iter.next();
+ assert_eq!(i.get(), 1);
+ iter.next_back();
+ assert_eq!(i.get(), 2);
+
+ let mut clone = iter.clone();
+ assert_eq!(i.get(), 2);
+
+ iter.next();
+ assert_eq!(i.get(), 3);
+
+ clone.next();
+ assert_eq!(i.get(), 4);
+
+ assert_eq!(clone.count(), 2);
+ assert_eq!(i.get(), 6);
+ }
+ assert_eq!(i.get(), 8);
+
+ // Check via `nth`.
+ let i = Cell::new(0);
+ {
+ let mut iter = IntoIterator::into_iter(five(&i));
+ let _x = iter.nth(2);
+ assert_eq!(i.get(), 2);
+ let _y = iter.last();
+ assert_eq!(i.get(), 3);
+ }
+ assert_eq!(i.get(), 5);
+
+ // Check every element.
+ let i = Cell::new(0);
+ for (index, _x) in IntoIterator::into_iter(five(&i)).enumerate() {
+ assert_eq!(i.get(), index);
+ }
+ assert_eq!(i.get(), 5);
+
+ let i = Cell::new(0);
+ for (index, _x) in IntoIterator::into_iter(five(&i)).rev().enumerate() {
+ assert_eq!(i.get(), index);
+ }
+ assert_eq!(i.get(), 5);
+}
+
+// This test does not work on targets without panic=unwind support.
+// To work around this problem, test is marked is should_panic, so it will
+// be automagically skipped on unsuitable targets, such as
+// wasm32-unknown-unknown.
+//
+// It means that we use panic for indicating success.
+#[test]
+#[should_panic(expected = "test succeeded")]
+fn array_default_impl_avoids_leaks_on_panic() {
+ use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+ static COUNTER: AtomicUsize = AtomicUsize::new(0);
+ #[derive(Debug)]
+ struct Bomb(usize);
+
+ impl Default for Bomb {
+ fn default() -> Bomb {
+ if COUNTER.load(Relaxed) == 3 {
+ panic!("bomb limit exceeded");
+ }
+
+ COUNTER.fetch_add(1, Relaxed);
+ Bomb(COUNTER.load(Relaxed))
+ }
+ }
+
+ impl Drop for Bomb {
+ fn drop(&mut self) {
+ COUNTER.fetch_sub(1, Relaxed);
+ }
+ }
+
+ let res = std::panic::catch_unwind(|| <[Bomb; 5]>::default());
+ let panic_msg = match res {
+ Ok(_) => unreachable!(),
+ Err(p) => p.downcast::<&'static str>().unwrap(),
+ };
+ assert_eq!(*panic_msg, "bomb limit exceeded");
+ // check that all bombs are successfully dropped
+ assert_eq!(COUNTER.load(Relaxed), 0);
+ panic!("test succeeded")
+}
+
+#[test]
+fn empty_array_is_always_default() {
+ struct DoesNotImplDefault;
+
+ let _arr = <[DoesNotImplDefault; 0]>::default();
+}
+
+#[test]
+fn array_map() {
+ let a = [1, 2, 3];
+ let b = a.map(|v| v + 1);
+ assert_eq!(b, [2, 3, 4]);
+
+ let a = [1u8, 2, 3];
+ let b = a.map(|v| v as u64);
+ assert_eq!(b, [1, 2, 3]);
+}
+
+// See note on above test for why `should_panic` is used.
+#[test]
+#[should_panic(expected = "test succeeded")]
+fn array_map_drop_safety() {
+ static DROPPED: AtomicUsize = AtomicUsize::new(0);
+ struct DropCounter;
+ impl Drop for DropCounter {
+ fn drop(&mut self) {
+ DROPPED.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let num_to_create = 5;
+ let success = std::panic::catch_unwind(|| {
+ let items = [0; 10];
+ let mut nth = 0;
+ items.map(|_| {
+ assert!(nth < num_to_create);
+ nth += 1;
+ DropCounter
+ });
+ });
+ assert!(success.is_err());
+ assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
+ panic!("test succeeded")
+}
+
+#[test]
+fn cell_allows_array_cycle() {
+ use core::cell::Cell;
+
+ #[derive(Debug)]
+ struct B<'a> {
+ a: [Cell<Option<&'a B<'a>>>; 2],
+ }
+
+ impl<'a> B<'a> {
+ fn new() -> B<'a> {
+ B { a: [Cell::new(None), Cell::new(None)] }
+ }
+ }
+
+ let b1 = B::new();
+ let b2 = B::new();
+ let b3 = B::new();
+
+ b1.a[0].set(Some(&b2));
+ b1.a[1].set(Some(&b3));
+
+ b2.a[0].set(Some(&b2));
+ b2.a[1].set(Some(&b3));
+
+ b3.a[0].set(Some(&b1));
+ b3.a[1].set(Some(&b2));
+}
+
+#[test]
+fn array_from_fn() {
+ let array = core::array::from_fn(|idx| idx);
+ assert_eq!(array, [0, 1, 2, 3, 4]);
+}
+
+#[test]
+fn array_try_from_fn() {
+ #[derive(Debug, PartialEq)]
+ enum SomeError {
+ Foo,
+ }
+
+ let array = core::array::try_from_fn(|i| Ok::<_, SomeError>(i));
+ assert_eq!(array, Ok([0, 1, 2, 3, 4]));
+
+ let another_array = core::array::try_from_fn::<Result<(), _>, 2, _>(|_| Err(SomeError::Foo));
+ assert_eq!(another_array, Err(SomeError::Foo));
+}
+
+#[cfg(not(panic = "abort"))]
+#[test]
+fn array_try_from_fn_drops_inserted_elements_on_err() {
+ static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+ struct CountDrop;
+ impl Drop for CountDrop {
+ fn drop(&mut self) {
+ DROP_COUNTER.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let _ = catch_unwind_silent(move || {
+ let _: Result<[CountDrop; 4], ()> = core::array::try_from_fn(|idx| {
+ if idx == 2 {
+ return Err(());
+ }
+ Ok(CountDrop)
+ });
+ });
+
+ assert_eq!(DROP_COUNTER.load(Ordering::SeqCst), 2);
+}
+
+#[cfg(not(panic = "abort"))]
+#[test]
+fn array_try_from_fn_drops_inserted_elements_on_panic() {
+ static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+ struct CountDrop;
+ impl Drop for CountDrop {
+ fn drop(&mut self) {
+ DROP_COUNTER.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let _ = catch_unwind_silent(move || {
+ let _: Result<[CountDrop; 4], ()> = core::array::try_from_fn(|idx| {
+ if idx == 2 {
+ panic!("peek a boo");
+ }
+ Ok(CountDrop)
+ });
+ });
+
+ assert_eq!(DROP_COUNTER.load(Ordering::SeqCst), 2);
+}
+
+#[cfg(not(panic = "abort"))]
+// https://stackoverflow.com/a/59211505
+fn catch_unwind_silent<F, R>(f: F) -> std::thread::Result<R>
+where
+ F: FnOnce() -> R + core::panic::UnwindSafe,
+{
+ let prev_hook = std::panic::take_hook();
+ std::panic::set_hook(Box::new(|_| {}));
+ let result = std::panic::catch_unwind(f);
+ std::panic::set_hook(prev_hook);
+ result
+}
+
+#[test]
+fn array_split_array_mut() {
+ let mut v = [1, 2, 3, 4, 5, 6];
+
+ {
+ let (left, right) = v.split_array_mut::<0>();
+ assert_eq!(left, &mut []);
+ assert_eq!(right, &mut [1, 2, 3, 4, 5, 6]);
+ }
+
+ {
+ let (left, right) = v.split_array_mut::<6>();
+ assert_eq!(left, &mut [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, &mut []);
+ }
+}
+
+#[test]
+fn array_rsplit_array_mut() {
+ let mut v = [1, 2, 3, 4, 5, 6];
+
+ {
+ let (left, right) = v.rsplit_array_mut::<0>();
+ assert_eq!(left, &mut [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, &mut []);
+ }
+
+ {
+ let (left, right) = v.rsplit_array_mut::<6>();
+ assert_eq!(left, &mut []);
+ assert_eq!(right, &mut [1, 2, 3, 4, 5, 6]);
+ }
+}
+
+#[should_panic]
+#[test]
+fn array_split_array_ref_out_of_bounds() {
+ let v = [1, 2, 3, 4, 5, 6];
+
+ v.split_array_ref::<7>();
+}
+
+#[should_panic]
+#[test]
+fn array_split_array_mut_out_of_bounds() {
+ let mut v = [1, 2, 3, 4, 5, 6];
+
+ v.split_array_mut::<7>();
+}
+
+#[should_panic]
+#[test]
+fn array_rsplit_array_ref_out_of_bounds() {
+ let v = [1, 2, 3, 4, 5, 6];
+
+ v.rsplit_array_ref::<7>();
+}
+
+#[should_panic]
+#[test]
+fn array_rsplit_array_mut_out_of_bounds() {
+ let mut v = [1, 2, 3, 4, 5, 6];
+
+ v.rsplit_array_mut::<7>();
+}
+
+#[test]
+fn array_intoiter_advance_by() {
+ use std::cell::Cell;
+ struct DropCounter<'a>(usize, &'a Cell<usize>);
+ impl Drop for DropCounter<'_> {
+ fn drop(&mut self) {
+ let x = self.1.get();
+ self.1.set(x + 1);
+ }
+ }
+
+ let counter = Cell::new(0);
+ let a: [_; 100] = std::array::from_fn(|i| DropCounter(i, &counter));
+ let mut it = IntoIterator::into_iter(a);
+
+ let r = it.advance_by(1);
+ assert_eq!(r, Ok(()));
+ assert_eq!(it.len(), 99);
+ assert_eq!(counter.get(), 1);
+
+ let r = it.advance_by(0);
+ assert_eq!(r, Ok(()));
+ assert_eq!(it.len(), 99);
+ assert_eq!(counter.get(), 1);
+
+ let r = it.advance_by(11);
+ assert_eq!(r, Ok(()));
+ assert_eq!(it.len(), 88);
+ assert_eq!(counter.get(), 12);
+
+ let x = it.next();
+ assert_eq!(x.as_ref().map(|x| x.0), Some(12));
+ assert_eq!(it.len(), 87);
+ assert_eq!(counter.get(), 12);
+ drop(x);
+ assert_eq!(counter.get(), 13);
+
+ let r = it.advance_by(123456);
+ assert_eq!(r, Err(87));
+ assert_eq!(it.len(), 0);
+ assert_eq!(counter.get(), 100);
+
+ let r = it.advance_by(0);
+ assert_eq!(r, Ok(()));
+ assert_eq!(it.len(), 0);
+ assert_eq!(counter.get(), 100);
+
+ let r = it.advance_by(10);
+ assert_eq!(r, Err(0));
+ assert_eq!(it.len(), 0);
+ assert_eq!(counter.get(), 100);
+}
+
+#[test]
+fn array_intoiter_advance_back_by() {
+ use std::cell::Cell;
+ struct DropCounter<'a>(usize, &'a Cell<usize>);
+ impl Drop for DropCounter<'_> {
+ fn drop(&mut self) {
+ let x = self.1.get();
+ self.1.set(x + 1);
+ }
+ }
+
+ let counter = Cell::new(0);
+ let a: [_; 100] = std::array::from_fn(|i| DropCounter(i, &counter));
+ let mut it = IntoIterator::into_iter(a);
+
+ let r = it.advance_back_by(1);
+ assert_eq!(r, Ok(()));
+ assert_eq!(it.len(), 99);
+ assert_eq!(counter.get(), 1);
+
+ let r = it.advance_back_by(0);
+ assert_eq!(r, Ok(()));
+ assert_eq!(it.len(), 99);
+ assert_eq!(counter.get(), 1);
+
+ let r = it.advance_back_by(11);
+ assert_eq!(r, Ok(()));
+ assert_eq!(it.len(), 88);
+ assert_eq!(counter.get(), 12);
+
+ let x = it.next_back();
+ assert_eq!(x.as_ref().map(|x| x.0), Some(87));
+ assert_eq!(it.len(), 87);
+ assert_eq!(counter.get(), 12);
+ drop(x);
+ assert_eq!(counter.get(), 13);
+
+ let r = it.advance_back_by(123456);
+ assert_eq!(r, Err(87));
+ assert_eq!(it.len(), 0);
+ assert_eq!(counter.get(), 100);
+
+ let r = it.advance_back_by(0);
+ assert_eq!(r, Ok(()));
+ assert_eq!(it.len(), 0);
+ assert_eq!(counter.get(), 100);
+
+ let r = it.advance_back_by(10);
+ assert_eq!(r, Err(0));
+ assert_eq!(it.len(), 0);
+ assert_eq!(counter.get(), 100);
+}
+
+#[test]
+fn array_mixed_equality_integers() {
+ let array3: [i32; 3] = [1, 2, 3];
+ let array3b: [i32; 3] = [3, 2, 1];
+ let array4: [i32; 4] = [1, 2, 3, 4];
+
+ let slice3: &[i32] = &{ array3 };
+ let slice3b: &[i32] = &{ array3b };
+ let slice4: &[i32] = &{ array4 };
+ assert!(array3 == slice3);
+ assert!(array3 != slice3b);
+ assert!(array3 != slice4);
+ assert!(slice3 == array3);
+ assert!(slice3b != array3);
+ assert!(slice4 != array3);
+
+ let mut3: &mut [i32] = &mut { array3 };
+ let mut3b: &mut [i32] = &mut { array3b };
+ let mut4: &mut [i32] = &mut { array4 };
+ assert!(array3 == mut3);
+ assert!(array3 != mut3b);
+ assert!(array3 != mut4);
+ assert!(mut3 == array3);
+ assert!(mut3b != array3);
+ assert!(mut4 != array3);
+}
+
+#[test]
+fn array_mixed_equality_nans() {
+ let array3: [f32; 3] = [1.0, std::f32::NAN, 3.0];
+
+ let slice3: &[f32] = &{ array3 };
+ assert!(!(array3 == slice3));
+ assert!(array3 != slice3);
+ assert!(!(slice3 == array3));
+ assert!(slice3 != array3);
+
+ let mut3: &mut [f32] = &mut { array3 };
+ assert!(!(array3 == mut3));
+ assert!(array3 != mut3);
+ assert!(!(mut3 == array3));
+ assert!(mut3 != array3);
+}
+
+#[test]
+fn array_into_iter_fold() {
+ // Strings to help MIRI catch if we double-free or something
+ let a = ["Aa".to_string(), "Bb".to_string(), "Cc".to_string()];
+ let mut s = "s".to_string();
+ a.into_iter().for_each(|b| s += &b);
+ assert_eq!(s, "sAaBbCc");
+
+ let a = [1, 2, 3, 4, 5, 6];
+ let mut it = a.into_iter();
+ it.advance_by(1).unwrap();
+ it.advance_back_by(2).unwrap();
+ let s = it.fold(10, |a, b| 10 * a + b);
+ assert_eq!(s, 10234);
+}
+
+#[test]
+fn array_into_iter_rfold() {
+ // Strings to help MIRI catch if we double-free or something
+ let a = ["Aa".to_string(), "Bb".to_string(), "Cc".to_string()];
+ let mut s = "s".to_string();
+ a.into_iter().rev().for_each(|b| s += &b);
+ assert_eq!(s, "sCcBbAa");
+
+ let a = [1, 2, 3, 4, 5, 6];
+ let mut it = a.into_iter();
+ it.advance_by(1).unwrap();
+ it.advance_back_by(2).unwrap();
+ let s = it.rfold(10, |a, b| 10 * a + b);
+ assert_eq!(s, 10432);
+}
diff --git a/library/core/tests/ascii.rs b/library/core/tests/ascii.rs
new file mode 100644
index 000000000..6d2cf3e83
--- /dev/null
+++ b/library/core/tests/ascii.rs
@@ -0,0 +1,463 @@
+use core::char::from_u32;
+
+#[test]
+fn test_is_ascii() {
+ assert!(b"".is_ascii());
+ assert!(b"banana\0\x7F".is_ascii());
+ assert!(b"banana\0\x7F".iter().all(|b| b.is_ascii()));
+ assert!(!b"Vi\xe1\xbb\x87t Nam".is_ascii());
+ assert!(!b"Vi\xe1\xbb\x87t Nam".iter().all(|b| b.is_ascii()));
+ assert!(!b"\xe1\xbb\x87".iter().any(|b| b.is_ascii()));
+
+ assert!("".is_ascii());
+ assert!("banana\0\u{7F}".is_ascii());
+ assert!("banana\0\u{7F}".chars().all(|c| c.is_ascii()));
+ assert!(!"ประเทศไทย中华Việt Nam".chars().all(|c| c.is_ascii()));
+ assert!(!"ประเทศไทย中华ệ ".chars().any(|c| c.is_ascii()));
+}
+
+#[test]
+fn test_to_ascii_uppercase() {
+ assert_eq!("url()URL()uRl()ürl".to_ascii_uppercase(), "URL()URL()URL()üRL");
+ assert_eq!("hıKß".to_ascii_uppercase(), "HıKß");
+
+ for i in 0..501 {
+ let upper =
+ if 'a' as u32 <= i && i <= 'z' as u32 { i + 'A' as u32 - 'a' as u32 } else { i };
+ assert_eq!(
+ (from_u32(i).unwrap()).to_string().to_ascii_uppercase(),
+ (from_u32(upper).unwrap()).to_string()
+ );
+ }
+}
+
+#[test]
+fn test_to_ascii_lowercase() {
+ assert_eq!("url()URL()uRl()Ürl".to_ascii_lowercase(), "url()url()url()Ürl");
+ // Dotted capital I, Kelvin sign, Sharp S.
+ assert_eq!("HİKß".to_ascii_lowercase(), "hİKß");
+
+ for i in 0..501 {
+ let lower =
+ if 'A' as u32 <= i && i <= 'Z' as u32 { i + 'a' as u32 - 'A' as u32 } else { i };
+ assert_eq!(
+ (from_u32(i).unwrap()).to_string().to_ascii_lowercase(),
+ (from_u32(lower).unwrap()).to_string()
+ );
+ }
+}
+
+#[test]
+fn test_make_ascii_lower_case() {
+ macro_rules! test {
+ ($from: expr, $to: expr) => {{
+ let mut x = $from;
+ x.make_ascii_lowercase();
+ assert_eq!(x, $to);
+ }};
+ }
+ test!(b'A', b'a');
+ test!(b'a', b'a');
+ test!(b'!', b'!');
+ test!('A', 'a');
+ test!('À', 'À');
+ test!('a', 'a');
+ test!('!', '!');
+ test!(b"H\xc3\x89".to_vec(), b"h\xc3\x89");
+ test!("HİKß".to_string(), "hİKß");
+}
+
+#[test]
+fn test_make_ascii_upper_case() {
+ macro_rules! test {
+ ($from: expr, $to: expr) => {{
+ let mut x = $from;
+ x.make_ascii_uppercase();
+ assert_eq!(x, $to);
+ }};
+ }
+ test!(b'a', b'A');
+ test!(b'A', b'A');
+ test!(b'!', b'!');
+ test!('a', 'A');
+ test!('à', 'à');
+ test!('A', 'A');
+ test!('!', '!');
+ test!(b"h\xc3\xa9".to_vec(), b"H\xc3\xa9");
+ test!("hıKß".to_string(), "HıKß");
+
+ let mut x = "Hello".to_string();
+ x[..3].make_ascii_uppercase(); // Test IndexMut on String.
+ assert_eq!(x, "HELlo")
+}
+
+#[test]
+fn test_eq_ignore_ascii_case() {
+ assert!("url()URL()uRl()Ürl".eq_ignore_ascii_case("url()url()url()Ürl"));
+ assert!(!"Ürl".eq_ignore_ascii_case("ürl"));
+ // Dotted capital I, Kelvin sign, Sharp S.
+ assert!("HİKß".eq_ignore_ascii_case("hİKß"));
+ assert!(!"İ".eq_ignore_ascii_case("i"));
+ assert!(!"K".eq_ignore_ascii_case("k"));
+ assert!(!"ß".eq_ignore_ascii_case("s"));
+
+ for i in 0..501 {
+ let lower =
+ if 'A' as u32 <= i && i <= 'Z' as u32 { i + 'a' as u32 - 'A' as u32 } else { i };
+ assert!(
+ (from_u32(i).unwrap())
+ .to_string()
+ .eq_ignore_ascii_case(&from_u32(lower).unwrap().to_string())
+ );
+ }
+}
+
+#[test]
+fn inference_works() {
+ let x = "a".to_string();
+ let _ = x.eq_ignore_ascii_case("A");
+}
+
+// Shorthands used by the is_ascii_* tests.
+macro_rules! assert_all {
+ ($what:ident, $($str:tt),+) => {{
+ $(
+ for b in $str.chars() {
+ if !b.$what() {
+ panic!("expected {}({}) but it isn't",
+ stringify!($what), b);
+ }
+ }
+ for b in $str.as_bytes().iter() {
+ if !b.$what() {
+ panic!("expected {}(0x{:02x})) but it isn't",
+ stringify!($what), b);
+ }
+ }
+ )+
+ }};
+ ($what:ident, $($str:tt),+,) => (assert_all!($what,$($str),+))
+}
+macro_rules! assert_none {
+ ($what:ident, $($str:tt),+) => {{
+ $(
+ for b in $str.chars() {
+ if b.$what() {
+ panic!("expected not-{}({}) but it is",
+ stringify!($what), b);
+ }
+ }
+ for b in $str.as_bytes().iter() {
+ if b.$what() {
+ panic!("expected not-{}(0x{:02x})) but it is",
+ stringify!($what), b);
+ }
+ }
+ )+
+ }};
+ ($what:ident, $($str:tt),+,) => (assert_none!($what,$($str),+))
+}
+
+#[test]
+fn test_is_ascii_alphabetic() {
+ assert_all!(
+ is_ascii_alphabetic,
+ "",
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ );
+ assert_none!(
+ is_ascii_alphabetic,
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_uppercase() {
+ assert_all!(is_ascii_uppercase, "", "ABCDEFGHIJKLMNOQPRSTUVWXYZ",);
+ assert_none!(
+ is_ascii_uppercase,
+ "abcdefghijklmnopqrstuvwxyz",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_lowercase() {
+ assert_all!(is_ascii_lowercase, "abcdefghijklmnopqrstuvwxyz",);
+ assert_none!(
+ is_ascii_lowercase,
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_alphanumeric() {
+ assert_all!(
+ is_ascii_alphanumeric,
+ "",
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ );
+ assert_none!(
+ is_ascii_alphanumeric,
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_digit() {
+ assert_all!(is_ascii_digit, "", "0123456789",);
+ assert_none!(
+ is_ascii_digit,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_hexdigit() {
+ assert_all!(is_ascii_hexdigit, "", "0123456789", "abcdefABCDEF",);
+ assert_none!(
+ is_ascii_hexdigit,
+ "ghijklmnopqrstuvwxyz",
+ "GHIJKLMNOQPRSTUVWXYZ",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_punctuation() {
+ assert_all!(is_ascii_punctuation, "", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",);
+ assert_none!(
+ is_ascii_punctuation,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_graphic() {
+ assert_all!(
+ is_ascii_graphic,
+ "",
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ );
+ assert_none!(
+ is_ascii_graphic,
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_whitespace() {
+ assert_all!(is_ascii_whitespace, "", " \t\n\x0c\r",);
+ assert_none!(
+ is_ascii_whitespace,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x0b\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_control() {
+ assert_all!(
+ is_ascii_control,
+ "",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+ assert_none!(
+ is_ascii_control,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " ",
+ );
+}
+
+// `is_ascii` does a good amount of pointer manipulation and has
+// alignment-dependent computation. This is all sanity-checked via
+// `debug_assert!`s, so we test various sizes/alignments thoroughly versus an
+// "obviously correct" baseline function.
+#[test]
+fn test_is_ascii_align_size_thoroughly() {
+ // The "obviously-correct" baseline mentioned above.
+ fn is_ascii_baseline(s: &[u8]) -> bool {
+ s.iter().all(|b| b.is_ascii())
+ }
+
+ // Helper to repeat `l` copies of `b0` followed by `l` copies of `b1`.
+ fn repeat_concat(b0: u8, b1: u8, l: usize) -> Vec<u8> {
+ use core::iter::repeat;
+ repeat(b0).take(l).chain(repeat(b1).take(l)).collect()
+ }
+
+ // Miri is too slow
+ let iter = if cfg!(miri) { 0..20 } else { 0..100 };
+
+ for i in iter {
+ #[cfg(not(miri))]
+ let cases = &[
+ b"a".repeat(i),
+ b"\0".repeat(i),
+ b"\x7f".repeat(i),
+ b"\x80".repeat(i),
+ b"\xff".repeat(i),
+ repeat_concat(b'a', 0x80u8, i),
+ repeat_concat(0x80u8, b'a', i),
+ ];
+
+ #[cfg(miri)]
+ let cases = &[b"a".repeat(i), b"\x80".repeat(i), repeat_concat(b'a', 0x80u8, i)];
+
+ for case in cases {
+ for pos in 0..=case.len() {
+ // Potentially misaligned head
+ let prefix = &case[pos..];
+ assert_eq!(is_ascii_baseline(prefix), prefix.is_ascii(),);
+
+ // Potentially misaligned tail
+ let suffix = &case[..case.len() - pos];
+
+ assert_eq!(is_ascii_baseline(suffix), suffix.is_ascii(),);
+
+ // Both head and tail are potentially misaligned
+ let mid = &case[(pos / 2)..(case.len() - (pos / 2))];
+ assert_eq!(is_ascii_baseline(mid), mid.is_ascii(),);
+ }
+ }
+ }
+}
+
+#[test]
+fn ascii_const() {
+ // test that the `is_ascii` methods of `char` and `u8` are usable in a const context
+
+ const CHAR_IS_ASCII: bool = 'a'.is_ascii();
+ assert!(CHAR_IS_ASCII);
+
+ const BYTE_IS_ASCII: bool = 97u8.is_ascii();
+ assert!(BYTE_IS_ASCII);
+}
+
+#[test]
+fn ascii_ctype_const() {
+ macro_rules! suite {
+ ( $( $fn:ident => [$a:ident, $A:ident, $nine:ident, $dot:ident, $space:ident]; )* ) => {
+ $(
+ mod $fn {
+ const CHAR_A_LOWER: bool = 'a'.$fn();
+ const CHAR_A_UPPER: bool = 'A'.$fn();
+ const CHAR_NINE: bool = '9'.$fn();
+ const CHAR_DOT: bool = '.'.$fn();
+ const CHAR_SPACE: bool = ' '.$fn();
+
+ const U8_A_LOWER: bool = b'a'.$fn();
+ const U8_A_UPPER: bool = b'A'.$fn();
+ const U8_NINE: bool = b'9'.$fn();
+ const U8_DOT: bool = b'.'.$fn();
+ const U8_SPACE: bool = b' '.$fn();
+
+ pub fn run() {
+ assert_eq!(CHAR_A_LOWER, $a);
+ assert_eq!(CHAR_A_UPPER, $A);
+ assert_eq!(CHAR_NINE, $nine);
+ assert_eq!(CHAR_DOT, $dot);
+ assert_eq!(CHAR_SPACE, $space);
+
+ assert_eq!(U8_A_LOWER, $a);
+ assert_eq!(U8_A_UPPER, $A);
+ assert_eq!(U8_NINE, $nine);
+ assert_eq!(U8_DOT, $dot);
+ assert_eq!(U8_SPACE, $space);
+ }
+ }
+ )*
+
+ $( $fn::run(); )*
+ }
+ }
+
+ suite! {
+ // 'a' 'A' '9' '.' ' '
+ is_ascii_alphabetic => [true, true, false, false, false];
+ is_ascii_uppercase => [false, true, false, false, false];
+ is_ascii_lowercase => [true, false, false, false, false];
+ is_ascii_alphanumeric => [true, true, true, false, false];
+ is_ascii_digit => [false, false, true, false, false];
+ is_ascii_hexdigit => [true, true, true, false, false];
+ is_ascii_punctuation => [false, false, false, true, false];
+ is_ascii_graphic => [true, true, true, true, false];
+ is_ascii_whitespace => [false, false, false, false, true];
+ is_ascii_control => [false, false, false, false, false];
+ }
+}
diff --git a/library/core/tests/asserting.rs b/library/core/tests/asserting.rs
new file mode 100644
index 000000000..4b626ba6f
--- /dev/null
+++ b/library/core/tests/asserting.rs
@@ -0,0 +1,37 @@
+use core::asserting::{Capture, TryCaptureGeneric, TryCapturePrintable, Wrapper};
+
+macro_rules! test {
+ ($test_name:ident, $elem:expr, $captured_elem:expr, $output:literal) => {
+ #[test]
+ fn $test_name() {
+ let elem = $elem;
+ let mut capture = Capture::new();
+ assert!(capture.elem == None);
+ (&Wrapper(&elem)).try_capture(&mut capture);
+ assert!(capture.elem == $captured_elem);
+ assert_eq!(format!("{:?}", capture), $output);
+ }
+ };
+}
+
+#[derive(Debug, PartialEq)]
+struct NoCopy;
+
+#[derive(PartialEq)]
+struct NoCopyNoDebug;
+
+#[derive(Clone, Copy, PartialEq)]
+struct NoDebug;
+
+test!(
+ capture_with_non_copyable_and_non_debugabble_elem_has_correct_params,
+ NoCopyNoDebug,
+ None,
+ "N/A"
+);
+
+test!(capture_with_non_copyable_elem_has_correct_params, NoCopy, None, "N/A");
+
+test!(capture_with_non_debugabble_elem_has_correct_params, NoDebug, None, "N/A");
+
+test!(capture_with_copyable_and_debugabble_elem_has_correct_params, 1i32, Some(1i32), "1");
diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
new file mode 100644
index 000000000..13b12db20
--- /dev/null
+++ b/library/core/tests/atomic.rs
@@ -0,0 +1,314 @@
+use core::sync::atomic::Ordering::SeqCst;
+use core::sync::atomic::*;
+
+#[test]
+fn bool_() {
+ let a = AtomicBool::new(false);
+ assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
+ assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Err(true));
+
+ a.store(false, SeqCst);
+ assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
+}
+
+#[test]
+fn bool_and() {
+ let a = AtomicBool::new(true);
+ assert_eq!(a.fetch_and(false, SeqCst), true);
+ assert_eq!(a.load(SeqCst), false);
+}
+
+#[test]
+fn bool_nand() {
+ let a = AtomicBool::new(false);
+ assert_eq!(a.fetch_nand(false, SeqCst), false);
+ assert_eq!(a.load(SeqCst), true);
+ assert_eq!(a.fetch_nand(false, SeqCst), true);
+ assert_eq!(a.load(SeqCst), true);
+ assert_eq!(a.fetch_nand(true, SeqCst), true);
+ assert_eq!(a.load(SeqCst), false);
+ assert_eq!(a.fetch_nand(true, SeqCst), false);
+ assert_eq!(a.load(SeqCst), true);
+}
+
+#[test]
+fn uint_and() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 & 0x137f);
+}
+
+#[test]
+fn uint_nand() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_nand(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), !(0xf731 & 0x137f));
+}
+
+#[test]
+fn uint_or() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 | 0x137f);
+}
+
+#[test]
+fn uint_xor() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
+}
+
+#[test]
+#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+fn uint_min() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_min(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0x137f);
+ assert_eq!(x.fetch_min(0xf731, SeqCst), 0x137f);
+ assert_eq!(x.load(SeqCst), 0x137f);
+}
+
+#[test]
+#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+fn uint_max() {
+ let x = AtomicUsize::new(0x137f);
+ assert_eq!(x.fetch_max(0xf731, SeqCst), 0x137f);
+ assert_eq!(x.load(SeqCst), 0xf731);
+ assert_eq!(x.fetch_max(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731);
+}
+
+#[test]
+fn int_and() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 & 0x137f);
+}
+
+#[test]
+fn int_nand() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_nand(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), !(0xf731 & 0x137f));
+}
+
+#[test]
+fn int_or() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 | 0x137f);
+}
+
+#[test]
+fn int_xor() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
+}
+
+#[test]
+#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+fn int_min() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_min(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0x137f);
+ assert_eq!(x.fetch_min(0xf731, SeqCst), 0x137f);
+ assert_eq!(x.load(SeqCst), 0x137f);
+}
+
+#[test]
+#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+fn int_max() {
+ let x = AtomicIsize::new(0x137f);
+ assert_eq!(x.fetch_max(0xf731, SeqCst), 0x137f);
+ assert_eq!(x.load(SeqCst), 0xf731);
+ assert_eq!(x.fetch_max(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731);
+}
+
+#[test]
+#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+fn ptr_add_null() {
+ let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
+ assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0);
+ assert_eq!(atom.load(SeqCst).addr(), 8);
+
+ assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8);
+ assert_eq!(atom.load(SeqCst).addr(), 9);
+
+ assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9);
+ assert_eq!(atom.load(SeqCst).addr(), 1);
+
+ assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1);
+ assert_eq!(atom.load(SeqCst).addr(), 0);
+}
+
+#[test]
+#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+fn ptr_add_data() {
+ let num = 0i64;
+ let n = &num as *const i64 as *mut _;
+ let atom = AtomicPtr::<i64>::new(n);
+ assert_eq!(atom.fetch_ptr_add(1, SeqCst), n);
+ assert_eq!(atom.load(SeqCst), n.wrapping_add(1));
+
+ assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1));
+ assert_eq!(atom.load(SeqCst), n);
+ let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>();
+
+ assert_eq!(atom.fetch_byte_add(1, SeqCst), n);
+ assert_eq!(atom.load(SeqCst), bytes_from_n(1));
+
+ assert_eq!(atom.fetch_byte_add(5, SeqCst), bytes_from_n(1));
+ assert_eq!(atom.load(SeqCst), bytes_from_n(6));
+
+ assert_eq!(atom.fetch_byte_sub(1, SeqCst), bytes_from_n(6));
+ assert_eq!(atom.load(SeqCst), bytes_from_n(5));
+
+ assert_eq!(atom.fetch_byte_sub(5, SeqCst), bytes_from_n(5));
+ assert_eq!(atom.load(SeqCst), n);
+}
+
+#[test]
+#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+fn ptr_bitops() {
+ let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
+ assert_eq!(atom.fetch_or(0b0111, SeqCst).addr(), 0);
+ assert_eq!(atom.load(SeqCst).addr(), 0b0111);
+
+ assert_eq!(atom.fetch_and(0b1101, SeqCst).addr(), 0b0111);
+ assert_eq!(atom.load(SeqCst).addr(), 0b0101);
+
+ assert_eq!(atom.fetch_xor(0b1111, SeqCst).addr(), 0b0101);
+ assert_eq!(atom.load(SeqCst).addr(), 0b1010);
+}
+
+#[test]
+#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+fn ptr_bitops_tagging() {
+ #[repr(align(16))]
+ struct Tagme(u128);
+
+ let tagme = Tagme(1000);
+ let ptr = &tagme as *const Tagme as *mut Tagme;
+ let atom: AtomicPtr<Tagme> = AtomicPtr::new(ptr);
+
+ const MASK_TAG: usize = 0b1111;
+ const MASK_PTR: usize = !MASK_TAG;
+
+ assert_eq!(ptr.addr() & MASK_TAG, 0);
+
+ assert_eq!(atom.fetch_or(0b0111, SeqCst), ptr);
+ assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b111));
+
+ assert_eq!(atom.fetch_and(MASK_PTR | 0b0010, SeqCst), ptr.map_addr(|a| a | 0b111));
+ assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b0010));
+
+ assert_eq!(atom.fetch_xor(0b1011, SeqCst), ptr.map_addr(|a| a | 0b0010));
+ assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b1001));
+
+ assert_eq!(atom.fetch_and(MASK_PTR, SeqCst), ptr.map_addr(|a| a | 0b1001));
+ assert_eq!(atom.load(SeqCst), ptr);
+}
+
+static S_FALSE: AtomicBool = AtomicBool::new(false);
+static S_TRUE: AtomicBool = AtomicBool::new(true);
+static S_INT: AtomicIsize = AtomicIsize::new(0);
+static S_UINT: AtomicUsize = AtomicUsize::new(0);
+
+#[test]
+fn static_init() {
+ // Note that we're not really testing the mutability here but it's important
+ // on Android at the moment (#49775)
+ assert!(!S_FALSE.swap(true, SeqCst));
+ assert!(S_TRUE.swap(false, SeqCst));
+ assert!(S_INT.fetch_add(1, SeqCst) == 0);
+ assert!(S_UINT.fetch_add(1, SeqCst) == 0);
+}
+
+#[test]
+fn atomic_access_bool() {
+ static mut ATOMIC: AtomicBool = AtomicBool::new(false);
+
+ unsafe {
+ assert_eq!(*ATOMIC.get_mut(), false);
+ ATOMIC.store(true, SeqCst);
+ assert_eq!(*ATOMIC.get_mut(), true);
+ ATOMIC.fetch_or(false, SeqCst);
+ assert_eq!(*ATOMIC.get_mut(), true);
+ ATOMIC.fetch_and(false, SeqCst);
+ assert_eq!(*ATOMIC.get_mut(), false);
+ ATOMIC.fetch_nand(true, SeqCst);
+ assert_eq!(*ATOMIC.get_mut(), true);
+ ATOMIC.fetch_xor(true, SeqCst);
+ assert_eq!(*ATOMIC.get_mut(), false);
+ }
+}
+
+#[test]
+fn atomic_alignment() {
+ use std::mem::{align_of, size_of};
+
+ #[cfg(target_has_atomic = "8")]
+ assert_eq!(align_of::<AtomicBool>(), size_of::<AtomicBool>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicPtr<u8>>(), size_of::<AtomicPtr<u8>>());
+ #[cfg(target_has_atomic = "8")]
+ assert_eq!(align_of::<AtomicU8>(), size_of::<AtomicU8>());
+ #[cfg(target_has_atomic = "8")]
+ assert_eq!(align_of::<AtomicI8>(), size_of::<AtomicI8>());
+ #[cfg(target_has_atomic = "16")]
+ assert_eq!(align_of::<AtomicU16>(), size_of::<AtomicU16>());
+ #[cfg(target_has_atomic = "16")]
+ assert_eq!(align_of::<AtomicI16>(), size_of::<AtomicI16>());
+ #[cfg(target_has_atomic = "32")]
+ assert_eq!(align_of::<AtomicU32>(), size_of::<AtomicU32>());
+ #[cfg(target_has_atomic = "32")]
+ assert_eq!(align_of::<AtomicI32>(), size_of::<AtomicI32>());
+ #[cfg(target_has_atomic = "64")]
+ assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
+ #[cfg(target_has_atomic = "64")]
+ assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
+ #[cfg(target_has_atomic = "128")]
+ assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
+ #[cfg(target_has_atomic = "128")]
+ assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicIsize>(), size_of::<AtomicIsize>());
+}
+
+#[test]
+fn atomic_compare_exchange() {
+ use Ordering::*;
+
+ static ATOMIC: AtomicIsize = AtomicIsize::new(0);
+
+ ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed).ok();
+ ATOMIC.compare_exchange(0, 1, Acquire, Relaxed).ok();
+ ATOMIC.compare_exchange(0, 1, Release, Relaxed).ok();
+ ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed).ok();
+ ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed).ok();
+ ATOMIC.compare_exchange(0, 1, Acquire, Acquire).ok();
+ ATOMIC.compare_exchange(0, 1, AcqRel, Acquire).ok();
+ ATOMIC.compare_exchange(0, 1, SeqCst, Acquire).ok();
+ ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok();
+ ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed).ok();
+ ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed).ok();
+ ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed).ok();
+ ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok();
+ ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok();
+ ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok();
+ ATOMIC.compare_exchange_weak(0, 1, AcqRel, Acquire).ok();
+ ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire).ok();
+ ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst).ok();
+}
+
+#[test]
+fn atomic_const_from() {
+ const _ATOMIC_U8: AtomicU8 = AtomicU8::from(1);
+ const _ATOMIC_BOOL: AtomicBool = AtomicBool::from(true);
+ const _ATOMIC_PTR: AtomicPtr<u32> = AtomicPtr::from(core::ptr::null_mut());
+}
diff --git a/library/core/tests/bool.rs b/library/core/tests/bool.rs
new file mode 100644
index 000000000..4819ce911
--- /dev/null
+++ b/library/core/tests/bool.rs
@@ -0,0 +1,105 @@
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::ops::{BitAnd, BitOr, BitXor};
+
+#[test]
+fn test_bool() {
+ assert_eq!(false.eq(&true), false);
+ assert_eq!(false == false, true);
+ assert_eq!(false != true, true);
+ assert_eq!(false.ne(&false), false);
+
+ assert_eq!(false.bitand(false), false);
+ assert_eq!(true.bitand(false), false);
+ assert_eq!(false.bitand(true), false);
+ assert_eq!(true.bitand(true), true);
+
+ assert_eq!(false & false, false);
+ assert_eq!(true & false, false);
+ assert_eq!(false & true, false);
+ assert_eq!(true & true, true);
+
+ assert_eq!(false.bitor(false), false);
+ assert_eq!(true.bitor(false), true);
+ assert_eq!(false.bitor(true), true);
+ assert_eq!(true.bitor(true), true);
+
+ assert_eq!(false | false, false);
+ assert_eq!(true | false, true);
+ assert_eq!(false | true, true);
+ assert_eq!(true | true, true);
+
+ assert_eq!(false.bitxor(false), false);
+ assert_eq!(true.bitxor(false), true);
+ assert_eq!(false.bitxor(true), true);
+ assert_eq!(true.bitxor(true), false);
+
+ assert_eq!(false ^ false, false);
+ assert_eq!(true ^ false, true);
+ assert_eq!(false ^ true, true);
+ assert_eq!(true ^ true, false);
+
+ assert_eq!(!true, false);
+ assert_eq!(!false, true);
+
+ let s = false.to_string();
+ assert_eq!(s, "false");
+ let s = true.to_string();
+ assert_eq!(s, "true");
+
+ assert!(true > false);
+ assert!(!(false > true));
+
+ assert!(false < true);
+ assert!(!(true < false));
+
+ assert!(false <= false);
+ assert!(false >= false);
+ assert!(true <= true);
+ assert!(true >= true);
+
+ assert!(false <= true);
+ assert!(!(false >= true));
+ assert!(true >= false);
+ assert!(!(true <= false));
+
+ assert_eq!(true.cmp(&true), Equal);
+ assert_eq!(false.cmp(&false), Equal);
+ assert_eq!(true.cmp(&false), Greater);
+ assert_eq!(false.cmp(&true), Less);
+}
+
+#[test]
+pub fn test_bool_not() {
+ if !false {
+ assert!((true));
+ } else {
+ assert!((false));
+ }
+ if !true {
+ assert!((false));
+ } else {
+ assert!((true));
+ }
+}
+
+#[test]
+fn test_bool_to_option() {
+ assert_eq!(false.then_some(0), None);
+ assert_eq!(true.then_some(0), Some(0));
+ assert_eq!(false.then(|| 0), None);
+ assert_eq!(true.then(|| 0), Some(0));
+
+ const fn zero() -> i32 {
+ 0
+ }
+
+ const A: Option<i32> = false.then_some(0);
+ const B: Option<i32> = true.then_some(0);
+ const C: Option<i32> = false.then(zero);
+ const D: Option<i32> = true.then(zero);
+
+ assert_eq!(A, None);
+ assert_eq!(B, Some(0));
+ assert_eq!(C, None);
+ assert_eq!(D, Some(0));
+}
diff --git a/library/core/tests/cell.rs b/library/core/tests/cell.rs
new file mode 100644
index 000000000..7b77b2134
--- /dev/null
+++ b/library/core/tests/cell.rs
@@ -0,0 +1,479 @@
+use core::cell::*;
+use core::default::Default;
+use std::mem::drop;
+
+#[test]
+fn smoketest_unsafe_cell() {
+ let mut x = UnsafeCell::new(10);
+ let ref_mut = &mut x;
+ unsafe {
+ // The asserts are repeated in order to ensure that `get()`
+ // is non-mutating.
+ assert_eq!(*ref_mut.get(), 10);
+ assert_eq!(*ref_mut.get(), 10);
+ *ref_mut.get_mut() += 5;
+ assert_eq!(*ref_mut.get(), 15);
+ assert_eq!(*ref_mut.get(), 15);
+ assert_eq!(x.into_inner(), 15);
+ }
+}
+
+#[test]
+fn unsafe_cell_raw_get() {
+ let x = UnsafeCell::new(10);
+ let ptr = &x as *const UnsafeCell<i32>;
+ unsafe {
+ // The asserts are repeated in order to ensure that `raw_get()`
+ // is non-mutating.
+ assert_eq!(*UnsafeCell::raw_get(ptr), 10);
+ assert_eq!(*UnsafeCell::raw_get(ptr), 10);
+ *UnsafeCell::raw_get(ptr) += 5;
+ assert_eq!(*UnsafeCell::raw_get(ptr), 15);
+ assert_eq!(*UnsafeCell::raw_get(ptr), 15);
+ assert_eq!(x.into_inner(), 15);
+ }
+}
+
+#[test]
+fn smoketest_cell() {
+ let x = Cell::new(10);
+ assert_eq!(x, Cell::new(10));
+ assert_eq!(x.get(), 10);
+ x.set(20);
+ assert_eq!(x, Cell::new(20));
+ assert_eq!(x.get(), 20);
+
+ let y = Cell::new((30, 40));
+ assert_eq!(y, Cell::new((30, 40)));
+ assert_eq!(y.get(), (30, 40));
+}
+
+#[test]
+fn cell_update() {
+ let x = Cell::new(10);
+
+ assert_eq!(x.update(|x| x + 5), 15);
+ assert_eq!(x.get(), 15);
+
+ assert_eq!(x.update(|x| x / 3), 5);
+ assert_eq!(x.get(), 5);
+}
+
+#[test]
+fn cell_has_sensible_show() {
+ let x = Cell::new("foo bar");
+ assert!(format!("{x:?}").contains(x.get()));
+
+ x.set("baz qux");
+ assert!(format!("{x:?}").contains(x.get()));
+}
+
+#[test]
+fn ref_and_refmut_have_sensible_show() {
+ let refcell = RefCell::new("foo");
+
+ let refcell_refmut = refcell.borrow_mut();
+ assert_eq!(format!("{refcell_refmut}"), "foo"); // Display
+ assert!(format!("{refcell_refmut:?}").contains("foo")); // Debug
+ drop(refcell_refmut);
+
+ let refcell_ref = refcell.borrow();
+ assert_eq!(format!("{refcell_ref}"), "foo"); // Display
+ assert!(format!("{refcell_ref:?}").contains("foo")); // Debug
+ drop(refcell_ref);
+}
+
+#[test]
+fn double_imm_borrow() {
+ let x = RefCell::new(0);
+ let _b1 = x.borrow();
+ x.borrow();
+}
+
+#[test]
+fn no_mut_then_imm_borrow() {
+ let x = RefCell::new(0);
+ let _b1 = x.borrow_mut();
+ assert!(x.try_borrow().is_err());
+}
+
+#[test]
+fn no_imm_then_borrow_mut() {
+ let x = RefCell::new(0);
+ let _b1 = x.borrow();
+ assert!(x.try_borrow_mut().is_err());
+}
+
+#[test]
+fn no_double_borrow_mut() {
+ let x = RefCell::new(0);
+ assert!(x.try_borrow().is_ok());
+ let _b1 = x.borrow_mut();
+ assert!(x.try_borrow().is_err());
+}
+
+#[test]
+fn imm_release_borrow_mut() {
+ let x = RefCell::new(0);
+ {
+ let _b1 = x.borrow();
+ }
+ x.borrow_mut();
+}
+
+#[test]
+fn mut_release_borrow_mut() {
+ let x = RefCell::new(0);
+ {
+ let _b1 = x.borrow_mut();
+ }
+ x.borrow();
+}
+
+#[test]
+fn double_borrow_single_release_no_borrow_mut() {
+ let x = RefCell::new(0);
+ let _b1 = x.borrow();
+ {
+ let _b2 = x.borrow();
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+}
+
+#[test]
+#[should_panic]
+fn discard_doesnt_unborrow() {
+ let x = RefCell::new(0);
+ let _b = x.borrow();
+ let _ = _b;
+ let _b = x.borrow_mut();
+}
+
+#[test]
+fn ref_clone_updates_flag() {
+ let x = RefCell::new(0);
+ {
+ let b1 = x.borrow();
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ {
+ let _b2 = Ref::clone(&b1);
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+}
+
+#[test]
+fn ref_map_does_not_update_flag() {
+ let x = RefCell::new(Some(5));
+ {
+ let b1: Ref<'_, Option<u32>> = x.borrow();
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ {
+ let b2: Ref<'_, u32> = Ref::map(b1, |o| o.as_ref().unwrap());
+ assert_eq!(*b2, 5);
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+}
+
+#[test]
+fn ref_map_split_updates_flag() {
+ let x = RefCell::new([1, 2]);
+ {
+ let b1 = x.borrow();
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ {
+ let (_b2, _b3) = Ref::map_split(b1, |slc| slc.split_at(1));
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+
+ {
+ let b1 = x.borrow_mut();
+ assert!(x.try_borrow().is_err());
+ assert!(x.try_borrow_mut().is_err());
+ {
+ let (_b2, _b3) = RefMut::map_split(b1, |slc| slc.split_at_mut(1));
+ assert!(x.try_borrow().is_err());
+ assert!(x.try_borrow_mut().is_err());
+ drop(_b2);
+ assert!(x.try_borrow().is_err());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+}
+
+#[test]
+fn ref_map_split() {
+ let x = RefCell::new([1, 2]);
+ let (b1, b2) = Ref::map_split(x.borrow(), |slc| slc.split_at(1));
+ assert_eq!(*b1, [1]);
+ assert_eq!(*b2, [2]);
+}
+
+#[test]
+fn ref_mut_map_split() {
+ let x = RefCell::new([1, 2]);
+ {
+ let (mut b1, mut b2) = RefMut::map_split(x.borrow_mut(), |slc| slc.split_at_mut(1));
+ assert_eq!(*b1, [1]);
+ assert_eq!(*b2, [2]);
+ b1[0] = 2;
+ b2[0] = 1;
+ }
+ assert_eq!(*x.borrow(), [2, 1]);
+}
+
+#[test]
+fn ref_map_accessor() {
+ struct X(RefCell<(u32, char)>);
+ impl X {
+ fn accessor(&self) -> Ref<'_, u32> {
+ Ref::map(self.0.borrow(), |tuple| &tuple.0)
+ }
+ }
+ let x = X(RefCell::new((7, 'z')));
+ let d: Ref<'_, u32> = x.accessor();
+ assert_eq!(*d, 7);
+}
+
+#[test]
+fn ref_mut_map_accessor() {
+ struct X(RefCell<(u32, char)>);
+ impl X {
+ fn accessor(&self) -> RefMut<'_, u32> {
+ RefMut::map(self.0.borrow_mut(), |tuple| &mut tuple.0)
+ }
+ }
+ let x = X(RefCell::new((7, 'z')));
+ {
+ let mut d: RefMut<'_, u32> = x.accessor();
+ assert_eq!(*d, 7);
+ *d += 1;
+ }
+ assert_eq!(*x.0.borrow(), (8, 'z'));
+}
+
+#[test]
+fn as_ptr() {
+ let c1: Cell<usize> = Cell::new(0);
+ c1.set(1);
+ assert_eq!(1, unsafe { *c1.as_ptr() });
+
+ let c2: Cell<usize> = Cell::new(0);
+ unsafe {
+ *c2.as_ptr() = 1;
+ }
+ assert_eq!(1, c2.get());
+
+ let r1: RefCell<usize> = RefCell::new(0);
+ *r1.borrow_mut() = 1;
+ assert_eq!(1, unsafe { *r1.as_ptr() });
+
+ let r2: RefCell<usize> = RefCell::new(0);
+ unsafe {
+ *r2.as_ptr() = 1;
+ }
+ assert_eq!(1, *r2.borrow());
+}
+
+#[test]
+fn cell_default() {
+ let cell: Cell<u32> = Default::default();
+ assert_eq!(0, cell.get());
+}
+
+#[test]
+fn cell_set() {
+ let cell = Cell::new(10);
+ cell.set(20);
+ assert_eq!(20, cell.get());
+
+ let cell = Cell::new("Hello".to_owned());
+ cell.set("World".to_owned());
+ assert_eq!("World".to_owned(), cell.into_inner());
+}
+
+#[test]
+fn cell_replace() {
+ let cell = Cell::new(10);
+ assert_eq!(10, cell.replace(20));
+ assert_eq!(20, cell.get());
+
+ let cell = Cell::new("Hello".to_owned());
+ assert_eq!("Hello".to_owned(), cell.replace("World".to_owned()));
+ assert_eq!("World".to_owned(), cell.into_inner());
+}
+
+#[test]
+fn cell_into_inner() {
+ let cell = Cell::new(10);
+ assert_eq!(10, cell.into_inner());
+
+ let cell = Cell::new("Hello world".to_owned());
+ assert_eq!("Hello world".to_owned(), cell.into_inner());
+}
+
+#[test]
+fn cell_exterior() {
+ #[derive(Copy, Clone)]
+ #[allow(dead_code)]
+ struct Point {
+ x: isize,
+ y: isize,
+ z: isize,
+ }
+
+ fn f(p: &Cell<Point>) {
+ assert_eq!(p.get().z, 12);
+ p.set(Point { x: 10, y: 11, z: 13 });
+ assert_eq!(p.get().z, 13);
+ }
+
+ let a = Point { x: 10, y: 11, z: 12 };
+ let b = &Cell::new(a);
+ assert_eq!(b.get().z, 12);
+ f(b);
+ assert_eq!(a.z, 12);
+ assert_eq!(b.get().z, 13);
+}
+
+#[test]
+fn cell_does_not_clone() {
+ #[derive(Copy)]
+ #[allow(dead_code)]
+ struct Foo {
+ x: isize,
+ }
+
+ impl Clone for Foo {
+ fn clone(&self) -> Foo {
+ // Using Cell in any way should never cause clone() to be
+ // invoked -- after all, that would permit evil user code to
+ // abuse `Cell` and trigger crashes.
+
+ panic!();
+ }
+ }
+
+ let x = Cell::new(Foo { x: 22 });
+ let _y = x.get();
+ let _z = x.clone();
+}
+
+#[test]
+fn refcell_default() {
+ let cell: RefCell<u64> = Default::default();
+ assert_eq!(0, *cell.borrow());
+}
+
+#[test]
+fn unsafe_cell_unsized() {
+ let cell: &UnsafeCell<[i32]> = &UnsafeCell::new([1, 2, 3]);
+ {
+ let val: &mut [i32] = unsafe { &mut *cell.get() };
+ val[0] = 4;
+ val[2] = 5;
+ }
+ let comp: &mut [i32] = &mut [4, 2, 5];
+ assert_eq!(unsafe { &mut *cell.get() }, comp);
+}
+
+#[test]
+fn refcell_unsized() {
+ let cell: &RefCell<[i32]> = &RefCell::new([1, 2, 3]);
+ {
+ let b = &mut *cell.borrow_mut();
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &mut [i32] = &mut [4, 2, 5];
+ assert_eq!(&*cell.borrow(), comp);
+}
+
+#[test]
+fn refcell_ref_coercion() {
+ let cell: RefCell<[i32; 3]> = RefCell::new([1, 2, 3]);
+ {
+ let mut cellref: RefMut<'_, [i32; 3]> = cell.borrow_mut();
+ cellref[0] = 4;
+ let mut coerced: RefMut<'_, [i32]> = cellref;
+ coerced[2] = 5;
+ }
+ {
+ let comp: &mut [i32] = &mut [4, 2, 5];
+ let cellref: Ref<'_, [i32; 3]> = cell.borrow();
+ assert_eq!(&*cellref, comp);
+ let coerced: Ref<'_, [i32]> = cellref;
+ assert_eq!(&*coerced, comp);
+ }
+}
+
+#[test]
+#[should_panic]
+fn refcell_swap_borrows() {
+ let x = RefCell::new(0);
+ let _b = x.borrow();
+ let y = RefCell::new(1);
+ x.swap(&y);
+}
+
+#[test]
+#[should_panic]
+fn refcell_replace_borrows() {
+ let x = RefCell::new(0);
+ let _b = x.borrow();
+ x.replace(1);
+}
+
+#[test]
+fn refcell_format() {
+ let name = RefCell::new("rust");
+ let what = RefCell::new("rocks");
+ let msg = format!("{name} {}", &*what.borrow(), name = &*name.borrow());
+ assert_eq!(msg, "rust rocks".to_string());
+}
+
+#[allow(dead_code)]
+fn const_cells() {
+ const UNSAFE_CELL: UnsafeCell<i32> = UnsafeCell::new(3);
+ const _: i32 = UNSAFE_CELL.into_inner();
+
+ const REF_CELL: RefCell<i32> = RefCell::new(3);
+ const _: i32 = REF_CELL.into_inner();
+
+ const CELL: Cell<i32> = Cell::new(3);
+ const _: i32 = CELL.into_inner();
+
+ const UNSAFE_CELL_FROM: UnsafeCell<i32> = UnsafeCell::from(3);
+ const _: i32 = UNSAFE_CELL.into_inner();
+
+ const REF_CELL_FROM: RefCell<i32> = RefCell::from(3);
+ const _: i32 = REF_CELL.into_inner();
+
+ const CELL_FROM: Cell<i32> = Cell::from(3);
+ const _: i32 = CELL.into_inner();
+}
diff --git a/library/core/tests/char.rs b/library/core/tests/char.rs
new file mode 100644
index 000000000..8542e5c70
--- /dev/null
+++ b/library/core/tests/char.rs
@@ -0,0 +1,415 @@
+use std::convert::TryFrom;
+use std::str::FromStr;
+use std::{char, str};
+
+#[test]
+fn test_convert() {
+ assert_eq!(u32::from('a'), 0x61);
+ assert_eq!(u64::from('b'), 0x62);
+ assert_eq!(u128::from('c'), 0x63);
+ assert_eq!(char::from(b'\0'), '\0');
+ assert_eq!(char::from(b'a'), 'a');
+ assert_eq!(char::from(b'\xFF'), '\u{FF}');
+ assert_eq!(char::try_from(0_u32), Ok('\0'));
+ assert_eq!(char::try_from(0x61_u32), Ok('a'));
+ assert_eq!(char::try_from(0xD7FF_u32), Ok('\u{D7FF}'));
+ assert!(char::try_from(0xD800_u32).is_err());
+ assert!(char::try_from(0xDFFF_u32).is_err());
+ assert_eq!(char::try_from(0xE000_u32), Ok('\u{E000}'));
+ assert_eq!(char::try_from(0x10FFFF_u32), Ok('\u{10FFFF}'));
+ assert!(char::try_from(0x110000_u32).is_err());
+ assert!(char::try_from(0xFFFF_FFFF_u32).is_err());
+}
+
+#[test]
+const fn test_convert_const() {
+ assert!(u32::from('a') == 0x61);
+ assert!(u64::from('b') == 0x62);
+ assert!(u128::from('c') == 0x63);
+ assert!(char::from(b'\0') == '\0');
+ assert!(char::from(b'a') == 'a');
+ assert!(char::from(b'\xFF') == '\u{FF}');
+}
+
+#[test]
+fn test_from_str() {
+ assert_eq!(char::from_str("a").unwrap(), 'a');
+ assert_eq!(char::from_str("\0").unwrap(), '\0');
+ assert_eq!(char::from_str("\u{D7FF}").unwrap(), '\u{d7FF}');
+ assert!(char::from_str("").is_err());
+ assert!(char::from_str("abc").is_err());
+}
+
+#[test]
+fn test_is_lowercase() {
+ assert!('a'.is_lowercase());
+ assert!('ö'.is_lowercase());
+ assert!('ß'.is_lowercase());
+ assert!(!'Ü'.is_lowercase());
+ assert!(!'P'.is_lowercase());
+}
+
+#[test]
+fn test_is_uppercase() {
+ assert!(!'h'.is_uppercase());
+ assert!(!'ä'.is_uppercase());
+ assert!(!'ß'.is_uppercase());
+ assert!('Ö'.is_uppercase());
+ assert!('T'.is_uppercase());
+}
+
+#[test]
+fn test_is_whitespace() {
+ assert!(' '.is_whitespace());
+ assert!('\u{2007}'.is_whitespace());
+ assert!('\t'.is_whitespace());
+ assert!('\n'.is_whitespace());
+ assert!(!'a'.is_whitespace());
+ assert!(!'_'.is_whitespace());
+ assert!(!'\u{0}'.is_whitespace());
+}
+
+#[test]
+fn test_to_digit() {
+ assert_eq!('0'.to_digit(10), Some(0));
+ assert_eq!('1'.to_digit(2), Some(1));
+ assert_eq!('2'.to_digit(3), Some(2));
+ assert_eq!('9'.to_digit(10), Some(9));
+ assert_eq!('a'.to_digit(16), Some(10));
+ assert_eq!('A'.to_digit(16), Some(10));
+ assert_eq!('b'.to_digit(16), Some(11));
+ assert_eq!('B'.to_digit(16), Some(11));
+ assert_eq!('A'.to_digit(36), Some(10));
+ assert_eq!('z'.to_digit(36), Some(35));
+ assert_eq!('Z'.to_digit(36), Some(35));
+ assert_eq!('['.to_digit(36), None);
+ assert_eq!('`'.to_digit(36), None);
+ assert_eq!('{'.to_digit(36), None);
+ assert_eq!('$'.to_digit(36), None);
+ assert_eq!('@'.to_digit(16), None);
+ assert_eq!('G'.to_digit(16), None);
+ assert_eq!('g'.to_digit(16), None);
+ assert_eq!(' '.to_digit(10), None);
+ assert_eq!('/'.to_digit(10), None);
+ assert_eq!(':'.to_digit(10), None);
+ assert_eq!(':'.to_digit(11), None);
+}
+
+#[test]
+fn test_to_lowercase() {
+ fn lower(c: char) -> String {
+ let to_lowercase = c.to_lowercase();
+ assert_eq!(to_lowercase.len(), to_lowercase.count());
+ let iter: String = c.to_lowercase().collect();
+ let disp: String = c.to_lowercase().to_string();
+ assert_eq!(iter, disp);
+ let iter_rev: String = c.to_lowercase().rev().collect();
+ let disp_rev: String = disp.chars().rev().collect();
+ assert_eq!(iter_rev, disp_rev);
+ iter
+ }
+ assert_eq!(lower('A'), "a");
+ assert_eq!(lower('Ö'), "ö");
+ assert_eq!(lower('ß'), "ß");
+ assert_eq!(lower('Ü'), "ü");
+ assert_eq!(lower('💩'), "💩");
+ assert_eq!(lower('Σ'), "σ");
+ assert_eq!(lower('Τ'), "τ");
+ assert_eq!(lower('Ι'), "ι");
+ assert_eq!(lower('Γ'), "γ");
+ assert_eq!(lower('Μ'), "μ");
+ assert_eq!(lower('Α'), "α");
+ assert_eq!(lower('Σ'), "σ");
+ assert_eq!(lower('Dž'), "dž");
+ assert_eq!(lower('fi'), "fi");
+ assert_eq!(lower('İ'), "i\u{307}");
+}
+
+#[test]
+fn test_to_uppercase() {
+ fn upper(c: char) -> String {
+ let to_uppercase = c.to_uppercase();
+ assert_eq!(to_uppercase.len(), to_uppercase.count());
+ let iter: String = c.to_uppercase().collect();
+ let disp: String = c.to_uppercase().to_string();
+ assert_eq!(iter, disp);
+ let iter_rev: String = c.to_uppercase().rev().collect();
+ let disp_rev: String = disp.chars().rev().collect();
+ assert_eq!(iter_rev, disp_rev);
+ iter
+ }
+ assert_eq!(upper('a'), "A");
+ assert_eq!(upper('ö'), "Ö");
+ assert_eq!(upper('ß'), "SS"); // not ẞ: Latin capital letter sharp s
+ assert_eq!(upper('ü'), "Ü");
+ assert_eq!(upper('💩'), "💩");
+
+ assert_eq!(upper('σ'), "Σ");
+ assert_eq!(upper('τ'), "Τ");
+ assert_eq!(upper('ι'), "Ι");
+ assert_eq!(upper('γ'), "Γ");
+ assert_eq!(upper('μ'), "Μ");
+ assert_eq!(upper('α'), "Α");
+ assert_eq!(upper('ς'), "Σ");
+ assert_eq!(upper('Dž'), "DŽ");
+ assert_eq!(upper('fi'), "FI");
+ assert_eq!(upper('ᾀ'), "ἈΙ");
+}
+
+#[test]
+fn test_is_control() {
+ assert!('\u{0}'.is_control());
+ assert!('\u{3}'.is_control());
+ assert!('\u{6}'.is_control());
+ assert!('\u{9}'.is_control());
+ assert!('\u{7f}'.is_control());
+ assert!('\u{92}'.is_control());
+ assert!(!'\u{20}'.is_control());
+ assert!(!'\u{55}'.is_control());
+ assert!(!'\u{68}'.is_control());
+}
+
+#[test]
+fn test_is_numeric() {
+ assert!('2'.is_numeric());
+ assert!('7'.is_numeric());
+ assert!('¾'.is_numeric());
+ assert!(!'c'.is_numeric());
+ assert!(!'i'.is_numeric());
+ assert!(!'z'.is_numeric());
+ assert!(!'Q'.is_numeric());
+}
+
+#[test]
+fn test_escape_debug() {
+ fn string(c: char) -> String {
+ let iter: String = c.escape_debug().collect();
+ let disp: String = c.escape_debug().to_string();
+ assert_eq!(iter, disp);
+ iter
+ }
+ assert_eq!(string('\n'), "\\n");
+ assert_eq!(string('\r'), "\\r");
+ assert_eq!(string('\''), "\\'");
+ assert_eq!(string('"'), "\\\"");
+ assert_eq!(string(' '), " ");
+ assert_eq!(string('a'), "a");
+ assert_eq!(string('~'), "~");
+ assert_eq!(string('é'), "é");
+ assert_eq!(string('文'), "文");
+ assert_eq!(string('\x00'), "\\0");
+ assert_eq!(string('\x1f'), "\\u{1f}");
+ assert_eq!(string('\x7f'), "\\u{7f}");
+ assert_eq!(string('\u{80}'), "\\u{80}");
+ assert_eq!(string('\u{ff}'), "\u{ff}");
+ assert_eq!(string('\u{11b}'), "\u{11b}");
+ assert_eq!(string('\u{1d4b6}'), "\u{1d4b6}");
+ assert_eq!(string('\u{301}'), "\\u{301}"); // combining character
+ assert_eq!(string('\u{200b}'), "\\u{200b}"); // zero width space
+ assert_eq!(string('\u{e000}'), "\\u{e000}"); // private use 1
+ assert_eq!(string('\u{100000}'), "\\u{100000}"); // private use 2
+}
+
+#[test]
+fn test_escape_default() {
+ fn string(c: char) -> String {
+ let iter: String = c.escape_default().collect();
+ let disp: String = c.escape_default().to_string();
+ assert_eq!(iter, disp);
+ iter
+ }
+ assert_eq!(string('\n'), "\\n");
+ assert_eq!(string('\r'), "\\r");
+ assert_eq!(string('\''), "\\'");
+ assert_eq!(string('"'), "\\\"");
+ assert_eq!(string(' '), " ");
+ assert_eq!(string('a'), "a");
+ assert_eq!(string('~'), "~");
+ assert_eq!(string('é'), "\\u{e9}");
+ assert_eq!(string('\x00'), "\\u{0}");
+ assert_eq!(string('\x1f'), "\\u{1f}");
+ assert_eq!(string('\x7f'), "\\u{7f}");
+ assert_eq!(string('\u{80}'), "\\u{80}");
+ assert_eq!(string('\u{ff}'), "\\u{ff}");
+ assert_eq!(string('\u{11b}'), "\\u{11b}");
+ assert_eq!(string('\u{1d4b6}'), "\\u{1d4b6}");
+ assert_eq!(string('\u{200b}'), "\\u{200b}"); // zero width space
+ assert_eq!(string('\u{e000}'), "\\u{e000}"); // private use 1
+ assert_eq!(string('\u{100000}'), "\\u{100000}"); // private use 2
+}
+
+#[test]
+fn test_escape_unicode() {
+ fn string(c: char) -> String {
+ let iter: String = c.escape_unicode().collect();
+ let disp: String = c.escape_unicode().to_string();
+ assert_eq!(iter, disp);
+ iter
+ }
+
+ assert_eq!(string('\x00'), "\\u{0}");
+ assert_eq!(string('\n'), "\\u{a}");
+ assert_eq!(string(' '), "\\u{20}");
+ assert_eq!(string('a'), "\\u{61}");
+ assert_eq!(string('\u{11b}'), "\\u{11b}");
+ assert_eq!(string('\u{1d4b6}'), "\\u{1d4b6}");
+}
+
+#[test]
+fn test_encode_utf8() {
+ fn check(input: char, expect: &[u8]) {
+ let mut buf = [0; 4];
+ let ptr = buf.as_ptr();
+ let s = input.encode_utf8(&mut buf);
+ assert_eq!(s.as_ptr() as usize, ptr as usize);
+ assert!(str::from_utf8(s.as_bytes()).is_ok());
+ assert_eq!(s.as_bytes(), expect);
+ }
+
+ check('x', &[0x78]);
+ check('\u{e9}', &[0xc3, 0xa9]);
+ check('\u{a66e}', &[0xea, 0x99, 0xae]);
+ check('\u{1f4a9}', &[0xf0, 0x9f, 0x92, 0xa9]);
+}
+
+#[test]
+fn test_encode_utf16() {
+ fn check(input: char, expect: &[u16]) {
+ let mut buf = [0; 2];
+ let ptr = buf.as_mut_ptr();
+ let b = input.encode_utf16(&mut buf);
+ assert_eq!(b.as_mut_ptr() as usize, ptr as usize);
+ assert_eq!(b, expect);
+ }
+
+ check('x', &[0x0078]);
+ check('\u{e9}', &[0x00e9]);
+ check('\u{a66e}', &[0xa66e]);
+ check('\u{1f4a9}', &[0xd83d, 0xdca9]);
+}
+
+#[test]
+fn test_len_utf16() {
+ assert!('x'.len_utf16() == 1);
+ assert!('\u{e9}'.len_utf16() == 1);
+ assert!('\u{a66e}'.len_utf16() == 1);
+ assert!('\u{1f4a9}'.len_utf16() == 2);
+}
+
+#[test]
+fn test_decode_utf16() {
+ fn check(s: &[u16], expected: &[Result<char, u16>]) {
+ let v = char::decode_utf16(s.iter().cloned())
+ .map(|r| r.map_err(|e| e.unpaired_surrogate()))
+ .collect::<Vec<_>>();
+ assert_eq!(v, expected);
+ }
+ check(&[0xD800, 0x41, 0x42], &[Err(0xD800), Ok('A'), Ok('B')]);
+ check(&[0xD800, 0], &[Err(0xD800), Ok('\0')]);
+}
+
+#[test]
+fn test_decode_utf16_size_hint() {
+ fn check(s: &[u16]) {
+ let mut iter = char::decode_utf16(s.iter().cloned());
+
+ loop {
+ let count = iter.clone().count();
+ let (lower, upper) = iter.size_hint();
+
+ assert!(
+ lower <= count && count <= upper.unwrap(),
+ "lower = {lower}, count = {count}, upper = {upper:?}"
+ );
+
+ if let None = iter.next() {
+ break;
+ }
+ }
+ }
+
+ check(&[0xD800, 0xD800, 0xDC00]);
+ check(&[0xD800, 0xD800, 0x0]);
+ check(&[0xD800, 0x41, 0x42]);
+ check(&[0xD800, 0]);
+ check(&[0xD834, 0x006d]);
+}
+
+#[test]
+fn ed_iterator_specializations() {
+ // Check counting
+ assert_eq!('\n'.escape_default().count(), 2);
+ assert_eq!('c'.escape_default().count(), 1);
+ assert_eq!(' '.escape_default().count(), 1);
+ assert_eq!('\\'.escape_default().count(), 2);
+ assert_eq!('\''.escape_default().count(), 2);
+
+ // Check nth
+
+ // Check that OoB is handled correctly
+ assert_eq!('\n'.escape_default().nth(2), None);
+ assert_eq!('c'.escape_default().nth(1), None);
+ assert_eq!(' '.escape_default().nth(1), None);
+ assert_eq!('\\'.escape_default().nth(2), None);
+ assert_eq!('\''.escape_default().nth(2), None);
+
+ // Check the first char
+ assert_eq!('\n'.escape_default().nth(0), Some('\\'));
+ assert_eq!('c'.escape_default().nth(0), Some('c'));
+ assert_eq!(' '.escape_default().nth(0), Some(' '));
+ assert_eq!('\\'.escape_default().nth(0), Some('\\'));
+ assert_eq!('\''.escape_default().nth(0), Some('\\'));
+
+ // Check the second char
+ assert_eq!('\n'.escape_default().nth(1), Some('n'));
+ assert_eq!('\\'.escape_default().nth(1), Some('\\'));
+ assert_eq!('\''.escape_default().nth(1), Some('\''));
+
+ // Check the last char
+ assert_eq!('\n'.escape_default().last(), Some('n'));
+ assert_eq!('c'.escape_default().last(), Some('c'));
+ assert_eq!(' '.escape_default().last(), Some(' '));
+ assert_eq!('\\'.escape_default().last(), Some('\\'));
+ assert_eq!('\''.escape_default().last(), Some('\''));
+}
+
+#[test]
+fn eu_iterator_specializations() {
+ fn check(c: char) {
+ let len = c.escape_unicode().count();
+
+ // Check OoB
+ assert_eq!(c.escape_unicode().nth(len), None);
+
+ // For all possible in-bound offsets
+ let mut iter = c.escape_unicode();
+ for offset in 0..len {
+ // Check last
+ assert_eq!(iter.clone().last(), Some('}'));
+
+ // Check len
+ assert_eq!(iter.len(), len - offset);
+
+ // Check size_hint (= len in ExactSizeIterator)
+ assert_eq!(iter.size_hint(), (iter.len(), Some(iter.len())));
+
+ // Check counting
+ assert_eq!(iter.clone().count(), len - offset);
+
+ // Check nth
+ assert_eq!(c.escape_unicode().nth(offset), iter.next());
+ }
+
+ // Check post-last
+ assert_eq!(iter.clone().last(), None);
+ assert_eq!(iter.clone().count(), 0);
+ }
+
+ check('\u{0}');
+ check('\u{1}');
+ check('\u{12}');
+ check('\u{123}');
+ check('\u{1234}');
+ check('\u{12340}');
+ check('\u{10FFFF}');
+}
diff --git a/library/core/tests/clone.rs b/library/core/tests/clone.rs
new file mode 100644
index 000000000..33ca9f2c6
--- /dev/null
+++ b/library/core/tests/clone.rs
@@ -0,0 +1,15 @@
+#[test]
+fn test_borrowed_clone() {
+ let x = 5;
+ let y: &i32 = &x;
+ let z: &i32 = (&y).clone();
+ assert_eq!(*z, 5);
+}
+
+#[test]
+fn test_clone_from() {
+ let a = Box::new(5);
+ let mut b = Box::new(10);
+ b.clone_from(&a);
+ assert_eq!(*b, 5);
+}
diff --git a/library/core/tests/cmp.rs b/library/core/tests/cmp.rs
new file mode 100644
index 000000000..8d0e59d5a
--- /dev/null
+++ b/library/core/tests/cmp.rs
@@ -0,0 +1,250 @@
+use core::cmp::{
+ self,
+ Ordering::{self, *},
+};
+
+#[test]
+fn test_int_totalord() {
+ assert_eq!(5.cmp(&10), Less);
+ assert_eq!(10.cmp(&5), Greater);
+ assert_eq!(5.cmp(&5), Equal);
+ assert_eq!((-5).cmp(&12), Less);
+ assert_eq!(12.cmp(&-5), Greater);
+}
+
+#[test]
+fn test_bool_totalord() {
+ assert_eq!(true.cmp(&false), Greater);
+ assert_eq!(false.cmp(&true), Less);
+ assert_eq!(true.cmp(&true), Equal);
+ assert_eq!(false.cmp(&false), Equal);
+}
+
+#[test]
+fn test_mut_int_totalord() {
+ assert_eq!((&mut 5).cmp(&&mut 10), Less);
+ assert_eq!((&mut 10).cmp(&&mut 5), Greater);
+ assert_eq!((&mut 5).cmp(&&mut 5), Equal);
+ assert_eq!((&mut -5).cmp(&&mut 12), Less);
+ assert_eq!((&mut 12).cmp(&&mut -5), Greater);
+}
+
+#[test]
+fn test_ord_max_min() {
+ assert_eq!(1.max(2), 2);
+ assert_eq!(2.max(1), 2);
+ assert_eq!(1.min(2), 1);
+ assert_eq!(2.min(1), 1);
+ assert_eq!(1.max(1), 1);
+ assert_eq!(1.min(1), 1);
+}
+
+#[test]
+fn test_ord_min_max_by() {
+ let f = |x: &i32, y: &i32| x.abs().cmp(&y.abs());
+ assert_eq!(cmp::min_by(1, -1, f), 1);
+ assert_eq!(cmp::min_by(1, -2, f), 1);
+ assert_eq!(cmp::min_by(2, -1, f), -1);
+ assert_eq!(cmp::max_by(1, -1, f), -1);
+ assert_eq!(cmp::max_by(1, -2, f), -2);
+ assert_eq!(cmp::max_by(2, -1, f), 2);
+}
+
+#[test]
+fn test_ord_min_max_by_key() {
+ let f = |x: &i32| x.abs();
+ assert_eq!(cmp::min_by_key(1, -1, f), 1);
+ assert_eq!(cmp::min_by_key(1, -2, f), 1);
+ assert_eq!(cmp::min_by_key(2, -1, f), -1);
+ assert_eq!(cmp::max_by_key(1, -1, f), -1);
+ assert_eq!(cmp::max_by_key(1, -2, f), -2);
+ assert_eq!(cmp::max_by_key(2, -1, f), 2);
+}
+
+#[test]
+fn test_ordering_reverse() {
+ assert_eq!(Less.reverse(), Greater);
+ assert_eq!(Equal.reverse(), Equal);
+ assert_eq!(Greater.reverse(), Less);
+}
+
+#[test]
+fn test_ordering_order() {
+ assert!(Less < Equal);
+ assert_eq!(Greater.cmp(&Less), Greater);
+}
+
+#[test]
+fn test_ordering_then() {
+ assert_eq!(Equal.then(Less), Less);
+ assert_eq!(Equal.then(Equal), Equal);
+ assert_eq!(Equal.then(Greater), Greater);
+ assert_eq!(Less.then(Less), Less);
+ assert_eq!(Less.then(Equal), Less);
+ assert_eq!(Less.then(Greater), Less);
+ assert_eq!(Greater.then(Less), Greater);
+ assert_eq!(Greater.then(Equal), Greater);
+ assert_eq!(Greater.then(Greater), Greater);
+}
+
+#[test]
+fn test_ordering_then_with() {
+ assert_eq!(Equal.then_with(|| Less), Less);
+ assert_eq!(Equal.then_with(|| Equal), Equal);
+ assert_eq!(Equal.then_with(|| Greater), Greater);
+ assert_eq!(Less.then_with(|| Less), Less);
+ assert_eq!(Less.then_with(|| Equal), Less);
+ assert_eq!(Less.then_with(|| Greater), Less);
+ assert_eq!(Greater.then_with(|| Less), Greater);
+ assert_eq!(Greater.then_with(|| Equal), Greater);
+ assert_eq!(Greater.then_with(|| Greater), Greater);
+}
+
+#[test]
+fn test_user_defined_eq() {
+ // Our type.
+ struct SketchyNum {
+ num: isize,
+ }
+
+ // Our implementation of `PartialEq` to support `==` and `!=`.
+ impl PartialEq for SketchyNum {
+ // Our custom eq allows numbers which are near each other to be equal! :D
+ fn eq(&self, other: &SketchyNum) -> bool {
+ (self.num - other.num).abs() < 5
+ }
+ }
+
+ // Now these binary operators will work when applied!
+ assert!(SketchyNum { num: 37 } == SketchyNum { num: 34 });
+ assert!(SketchyNum { num: 25 } != SketchyNum { num: 57 });
+}
+
+#[test]
+fn ordering_const() {
+ // test that the methods of `Ordering` are usable in a const context
+
+ const ORDERING: Ordering = Greater;
+
+ const REVERSE: Ordering = ORDERING.reverse();
+ assert_eq!(REVERSE, Less);
+
+ const THEN: Ordering = Equal.then(ORDERING);
+ assert_eq!(THEN, Greater);
+}
+
+#[test]
+fn ordering_structural_eq() {
+ // test that consts of type `Ordering` are usable in patterns
+
+ const ORDERING: Ordering = Greater;
+
+ const REVERSE: Ordering = ORDERING.reverse();
+ match Ordering::Less {
+ REVERSE => {}
+ _ => unreachable!(),
+ };
+}
+
+#[test]
+fn cmp_default() {
+ // Test default methods in PartialOrd and PartialEq
+
+ #[derive(Debug)]
+ struct Fool(bool);
+
+ impl PartialEq for Fool {
+ fn eq(&self, other: &Fool) -> bool {
+ let Fool(this) = *self;
+ let Fool(other) = *other;
+ this != other
+ }
+ }
+
+ struct Int(isize);
+
+ impl PartialEq for Int {
+ fn eq(&self, other: &Int) -> bool {
+ let Int(this) = *self;
+ let Int(other) = *other;
+ this == other
+ }
+ }
+
+ impl PartialOrd for Int {
+ fn partial_cmp(&self, other: &Int) -> Option<Ordering> {
+ let Int(this) = *self;
+ let Int(other) = *other;
+ this.partial_cmp(&other)
+ }
+ }
+
+ struct RevInt(isize);
+
+ impl PartialEq for RevInt {
+ fn eq(&self, other: &RevInt) -> bool {
+ let RevInt(this) = *self;
+ let RevInt(other) = *other;
+ this == other
+ }
+ }
+
+ impl PartialOrd for RevInt {
+ fn partial_cmp(&self, other: &RevInt) -> Option<Ordering> {
+ let RevInt(this) = *self;
+ let RevInt(other) = *other;
+ other.partial_cmp(&this)
+ }
+ }
+
+ assert!(Int(2) > Int(1));
+ assert!(Int(2) >= Int(1));
+ assert!(Int(1) >= Int(1));
+ assert!(Int(1) < Int(2));
+ assert!(Int(1) <= Int(2));
+ assert!(Int(1) <= Int(1));
+
+ assert!(RevInt(2) < RevInt(1));
+ assert!(RevInt(2) <= RevInt(1));
+ assert!(RevInt(1) <= RevInt(1));
+ assert!(RevInt(1) > RevInt(2));
+ assert!(RevInt(1) >= RevInt(2));
+ assert!(RevInt(1) >= RevInt(1));
+
+ assert_eq!(Fool(true), Fool(false));
+ assert!(Fool(true) != Fool(true));
+ assert!(Fool(false) != Fool(false));
+ assert_eq!(Fool(false), Fool(true));
+}
+
+mod const_cmp {
+ use super::*;
+
+ struct S(i32);
+
+ impl const PartialEq for S {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+ }
+
+ impl const PartialOrd for S {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ let ret = match (self.0, other.0) {
+ (a, b) if a > b => Ordering::Greater,
+ (a, b) if a < b => Ordering::Less,
+ _ => Ordering::Equal,
+ };
+
+ Some(ret)
+ }
+ }
+
+ const _: () = assert!(S(1) == S(1));
+ const _: () = assert!(S(0) != S(1));
+
+ const _: () = assert!(S(1) <= S(1));
+ const _: () = assert!(S(1) >= S(1));
+ const _: () = assert!(S(0) < S(1));
+ const _: () = assert!(S(1) > S(0));
+}
diff --git a/library/core/tests/const_ptr.rs b/library/core/tests/const_ptr.rs
new file mode 100644
index 000000000..152fed803
--- /dev/null
+++ b/library/core/tests/const_ptr.rs
@@ -0,0 +1,101 @@
+// Aligned to two bytes
+const DATA: [u16; 2] = [u16::from_ne_bytes([0x01, 0x23]), u16::from_ne_bytes([0x45, 0x67])];
+
+const fn unaligned_ptr() -> *const u16 {
+ // Since DATA.as_ptr() is aligned to two bytes, adding 1 byte to that produces an unaligned *const u16
+ unsafe { (DATA.as_ptr() as *const u8).add(1) as *const u16 }
+}
+
+#[test]
+fn read() {
+ use core::ptr;
+
+ const FOO: i32 = unsafe { ptr::read(&42 as *const i32) };
+ assert_eq!(FOO, 42);
+
+ const ALIGNED: i32 = unsafe { ptr::read_unaligned(&42 as *const i32) };
+ assert_eq!(ALIGNED, 42);
+
+ const UNALIGNED_PTR: *const u16 = unaligned_ptr();
+
+ const UNALIGNED: u16 = unsafe { ptr::read_unaligned(UNALIGNED_PTR) };
+ assert_eq!(UNALIGNED, u16::from_ne_bytes([0x23, 0x45]));
+}
+
+#[test]
+fn const_ptr_read() {
+ const FOO: i32 = unsafe { (&42 as *const i32).read() };
+ assert_eq!(FOO, 42);
+
+ const ALIGNED: i32 = unsafe { (&42 as *const i32).read_unaligned() };
+ assert_eq!(ALIGNED, 42);
+
+ const UNALIGNED_PTR: *const u16 = unaligned_ptr();
+
+ const UNALIGNED: u16 = unsafe { UNALIGNED_PTR.read_unaligned() };
+ assert_eq!(UNALIGNED, u16::from_ne_bytes([0x23, 0x45]));
+}
+
+#[test]
+fn mut_ptr_read() {
+ const FOO: i32 = unsafe { (&42 as *const i32 as *mut i32).read() };
+ assert_eq!(FOO, 42);
+
+ const ALIGNED: i32 = unsafe { (&42 as *const i32 as *mut i32).read_unaligned() };
+ assert_eq!(ALIGNED, 42);
+
+ const UNALIGNED_PTR: *mut u16 = unaligned_ptr() as *mut u16;
+
+ const UNALIGNED: u16 = unsafe { UNALIGNED_PTR.read_unaligned() };
+ assert_eq!(UNALIGNED, u16::from_ne_bytes([0x23, 0x45]));
+}
+
+#[test]
+fn write() {
+ use core::ptr;
+
+ const fn write_aligned() -> i32 {
+ let mut res = 0;
+ unsafe {
+ ptr::write(&mut res as *mut _, 42);
+ }
+ res
+ }
+ const ALIGNED: i32 = write_aligned();
+ assert_eq!(ALIGNED, 42);
+
+ const fn write_unaligned() -> [u16; 2] {
+ let mut two_aligned = [0u16; 2];
+ unsafe {
+ let unaligned_ptr = (two_aligned.as_mut_ptr() as *mut u8).add(1) as *mut u16;
+ ptr::write_unaligned(unaligned_ptr, u16::from_ne_bytes([0x23, 0x45]));
+ }
+ two_aligned
+ }
+ const UNALIGNED: [u16; 2] = write_unaligned();
+ assert_eq!(UNALIGNED, [u16::from_ne_bytes([0x00, 0x23]), u16::from_ne_bytes([0x45, 0x00])]);
+}
+
+#[test]
+fn mut_ptr_write() {
+ const fn aligned() -> i32 {
+ let mut res = 0;
+ unsafe {
+ (&mut res as *mut i32).write(42);
+ }
+ res
+ }
+ const ALIGNED: i32 = aligned();
+ assert_eq!(ALIGNED, 42);
+
+ const fn write_unaligned() -> [u16; 2] {
+ let mut two_aligned = [0u16; 2];
+ unsafe {
+ let unaligned_ptr = (two_aligned.as_mut_ptr() as *mut u8).add(1) as *mut u16;
+ unaligned_ptr.write_unaligned(u16::from_ne_bytes([0x23, 0x45]));
+ }
+ two_aligned
+ }
+ const UNALIGNED: [u16; 2] = write_unaligned();
+ assert_eq!(UNALIGNED, [u16::from_ne_bytes([0x00, 0x23]), u16::from_ne_bytes([0x45, 0x00])]);
+}
diff --git a/library/core/tests/convert.rs b/library/core/tests/convert.rs
new file mode 100644
index 000000000..f1048f4cf
--- /dev/null
+++ b/library/core/tests/convert.rs
@@ -0,0 +1,16 @@
+#[test]
+fn convert() {
+ const fn from(x: i32) -> i32 {
+ i32::from(x)
+ }
+
+ const FOO: i32 = from(42);
+ assert_eq!(FOO, 42);
+
+ const fn into(x: Vec<String>) -> Vec<String> {
+ x.into()
+ }
+
+ const BAR: Vec<String> = into(Vec::new());
+ assert_eq!(BAR, Vec::<String>::new());
+}
diff --git a/library/core/tests/fmt/builders.rs b/library/core/tests/fmt/builders.rs
new file mode 100644
index 000000000..487ce46be
--- /dev/null
+++ b/library/core/tests/fmt/builders.rs
@@ -0,0 +1,726 @@
+mod debug_struct {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo").finish()
+ }
+ }
+
+ assert_eq!("Foo", format!("{Foo:?}"));
+ assert_eq!("Foo", format!("{Foo:#?}"));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo").field("bar", &true).finish()
+ }
+ }
+
+ assert_eq!("Foo { bar: true }", format!("{Foo:?}"));
+ assert_eq!(
+ "Foo {
+ bar: true,
+}",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo")
+ .field("bar", &true)
+ .field("baz", &format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ assert_eq!("Foo { bar: true, baz: 10/20 }", format!("{Foo:?}"));
+ assert_eq!(
+ "Foo {
+ bar: true,
+ baz: 10/20,
+}",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo")
+ .field("bar", &true)
+ .field("baz", &format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Bar").field("foo", &Foo).field("hello", &"world").finish()
+ }
+ }
+
+ assert_eq!(
+ "Bar { foo: Foo { bar: true, baz: 10/20 }, hello: \"world\" }",
+ format!("{Bar:?}")
+ );
+ assert_eq!(
+ "Bar {
+ foo: Foo {
+ bar: true,
+ baz: 10/20,
+ },
+ hello: \"world\",
+}",
+ format!("{Bar:#?}")
+ );
+ }
+
+ #[test]
+ fn test_only_non_exhaustive() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo").finish_non_exhaustive()
+ }
+ }
+
+ assert_eq!("Foo { .. }", format!("{Foo:?}"));
+ assert_eq!("Foo { .. }", format!("{Foo:#?}"));
+ }
+
+ #[test]
+ fn test_multiple_and_non_exhaustive() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo")
+ .field("bar", &true)
+ .field("baz", &format_args!("{}/{}", 10, 20))
+ .finish_non_exhaustive()
+ }
+ }
+
+ assert_eq!("Foo { bar: true, baz: 10/20, .. }", format!("{Foo:?}"));
+ assert_eq!(
+ "Foo {
+ bar: true,
+ baz: 10/20,
+ ..
+}",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_nested_non_exhaustive() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo")
+ .field("bar", &true)
+ .field("baz", &format_args!("{}/{}", 10, 20))
+ .finish_non_exhaustive()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Bar")
+ .field("foo", &Foo)
+ .field("hello", &"world")
+ .finish_non_exhaustive()
+ }
+ }
+
+ assert_eq!(
+ "Bar { foo: Foo { bar: true, baz: 10/20, .. }, hello: \"world\", .. }",
+ format!("{Bar:?}")
+ );
+ assert_eq!(
+ "Bar {
+ foo: Foo {
+ bar: true,
+ baz: 10/20,
+ ..
+ },
+ hello: \"world\",
+ ..
+}",
+ format!("{Bar:#?}")
+ );
+ }
+}
+
+mod debug_tuple {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Foo").finish()
+ }
+ }
+
+ assert_eq!("Foo", format!("{Foo:?}"));
+ assert_eq!("Foo", format!("{Foo:#?}"));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Foo").field(&true).finish()
+ }
+ }
+
+ assert_eq!("Foo(true)", format!("{Foo:?}"));
+ assert_eq!(
+ "Foo(
+ true,
+)",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Foo").field(&true).field(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ assert_eq!("Foo(true, 10/20)", format!("{Foo:?}"));
+ assert_eq!(
+ "Foo(
+ true,
+ 10/20,
+)",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Foo").field(&true).field(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Bar").field(&Foo).field(&"world").finish()
+ }
+ }
+
+ assert_eq!("Bar(Foo(true, 10/20), \"world\")", format!("{Bar:?}"));
+ assert_eq!(
+ "Bar(
+ Foo(
+ true,
+ 10/20,
+ ),
+ \"world\",
+)",
+ format!("{Bar:#?}")
+ );
+ }
+}
+
+mod debug_map {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().finish()
+ }
+ }
+
+ assert_eq!("{}", format!("{Foo:?}"));
+ assert_eq!("{}", format!("{Foo:#?}"));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Entry;
+
+ impl fmt::Debug for Entry {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().entry(&"bar", &true).finish()
+ }
+ }
+
+ struct KeyValue;
+
+ impl fmt::Debug for KeyValue {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().key(&"bar").value(&true).finish()
+ }
+ }
+
+ assert_eq!(format!("{Entry:?}"), format!("{KeyValue:?}"));
+ assert_eq!(format!("{Entry:#?}"), format!("{KeyValue:#?}"));
+
+ assert_eq!("{\"bar\": true}", format!("{Entry:?}"));
+ assert_eq!(
+ "{
+ \"bar\": true,
+}",
+ format!("{Entry:#?}")
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Entry;
+
+ impl fmt::Debug for Entry {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map()
+ .entry(&"bar", &true)
+ .entry(&10, &format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ struct KeyValue;
+
+ impl fmt::Debug for KeyValue {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map()
+ .key(&"bar")
+ .value(&true)
+ .key(&10)
+ .value(&format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ assert_eq!(format!("{Entry:?}"), format!("{KeyValue:?}"));
+ assert_eq!(format!("{Entry:#?}"), format!("{KeyValue:#?}"));
+
+ assert_eq!("{\"bar\": true, 10: 10/20}", format!("{Entry:?}"));
+ assert_eq!(
+ "{
+ \"bar\": true,
+ 10: 10/20,
+}",
+ format!("{Entry:#?}")
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map()
+ .entry(&"bar", &true)
+ .entry(&10, &format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().entry(&"foo", &Foo).entry(&Foo, &"world").finish()
+ }
+ }
+
+ assert_eq!(
+ "{\"foo\": {\"bar\": true, 10: 10/20}, \
+ {\"bar\": true, 10: 10/20}: \"world\"}",
+ format!("{Bar:?}")
+ );
+ assert_eq!(
+ "{
+ \"foo\": {
+ \"bar\": true,
+ 10: 10/20,
+ },
+ {
+ \"bar\": true,
+ 10: 10/20,
+ }: \"world\",
+}",
+ format!("{Bar:#?}")
+ );
+ }
+
+ #[test]
+ fn test_entry_err() {
+ // Ensure errors in a map entry don't trigger panics (#65231)
+ use std::fmt::Write;
+
+ struct ErrorFmt;
+
+ impl fmt::Debug for ErrorFmt {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Err(fmt::Error)
+ }
+ }
+
+ struct KeyValue<K, V>(usize, K, V);
+
+ impl<K, V> fmt::Debug for KeyValue<K, V>
+ where
+ K: fmt::Debug,
+ V: fmt::Debug,
+ {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut map = fmt.debug_map();
+
+ for _ in 0..self.0 {
+ map.entry(&self.1, &self.2);
+ }
+
+ map.finish()
+ }
+ }
+
+ let mut buf = String::new();
+
+ assert!(write!(&mut buf, "{:?}", KeyValue(1, ErrorFmt, "bar")).is_err());
+ assert!(write!(&mut buf, "{:?}", KeyValue(1, "foo", ErrorFmt)).is_err());
+
+ assert!(write!(&mut buf, "{:?}", KeyValue(2, ErrorFmt, "bar")).is_err());
+ assert!(write!(&mut buf, "{:?}", KeyValue(2, "foo", ErrorFmt)).is_err());
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_invalid_key_when_entry_is_incomplete() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().key(&"bar").key(&"invalid").finish()
+ }
+ }
+
+ format!("{Foo:?}");
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_invalid_finish_incomplete_entry() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().key(&"bar").finish()
+ }
+ }
+
+ format!("{Foo:?}");
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_invalid_value_before_key() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().value(&"invalid").key(&"bar").finish()
+ }
+ }
+
+ format!("{Foo:?}");
+ }
+}
+
+mod debug_set {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().finish()
+ }
+ }
+
+ assert_eq!("{}", format!("{Foo:?}"));
+ assert_eq!("{}", format!("{Foo:#?}"));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().entry(&true).finish()
+ }
+ }
+
+ assert_eq!("{true}", format!("{Foo:?}"));
+ assert_eq!(
+ "{
+ true,
+}",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().entry(&true).entry(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ assert_eq!("{true, 10/20}", format!("{Foo:?}"));
+ assert_eq!(
+ "{
+ true,
+ 10/20,
+}",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().entry(&true).entry(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().entry(&Foo).entry(&"world").finish()
+ }
+ }
+
+ assert_eq!("{{true, 10/20}, \"world\"}", format!("{Bar:?}"));
+ assert_eq!(
+ "{
+ {
+ true,
+ 10/20,
+ },
+ \"world\",
+}",
+ format!("{Bar:#?}")
+ );
+ }
+}
+
+mod debug_list {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().finish()
+ }
+ }
+
+ assert_eq!("[]", format!("{Foo:?}"));
+ assert_eq!("[]", format!("{Foo:#?}"));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().entry(&true).finish()
+ }
+ }
+
+ assert_eq!("[true]", format!("{Foo:?}"));
+ assert_eq!(
+ "[
+ true,
+]",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().entry(&true).entry(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ assert_eq!("[true, 10/20]", format!("{Foo:?}"));
+ assert_eq!(
+ "[
+ true,
+ 10/20,
+]",
+ format!("{Foo:#?}")
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().entry(&true).entry(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().entry(&Foo).entry(&"world").finish()
+ }
+ }
+
+ assert_eq!("[[true, 10/20], \"world\"]", format!("{Bar:?}"));
+ assert_eq!(
+ "[
+ [
+ true,
+ 10/20,
+ ],
+ \"world\",
+]",
+ format!("{Bar:#?}")
+ );
+ }
+}
+
+#[test]
+fn test_formatting_parameters_are_forwarded() {
+ use std::collections::{BTreeMap, BTreeSet};
+ #[derive(Debug)]
+ #[allow(dead_code)]
+ struct Foo {
+ bar: u32,
+ baz: u32,
+ }
+ let struct_ = Foo { bar: 1024, baz: 7 };
+ let tuple = (1024, 7);
+ let list = [1024, 7];
+ let mut map = BTreeMap::new();
+ map.insert("bar", 1024);
+ map.insert("baz", 7);
+ let mut set = BTreeSet::new();
+ set.insert(1024);
+ set.insert(7);
+
+ assert_eq!(format!("{struct_:03?}"), "Foo { bar: 1024, baz: 007 }");
+ assert_eq!(format!("{tuple:03?}"), "(1024, 007)");
+ assert_eq!(format!("{list:03?}"), "[1024, 007]");
+ assert_eq!(format!("{map:03?}"), r#"{"bar": 1024, "baz": 007}"#);
+ assert_eq!(format!("{set:03?}"), "{007, 1024}");
+ assert_eq!(
+ format!("{struct_:#03?}"),
+ "
+Foo {
+ bar: 1024,
+ baz: 007,
+}
+ "
+ .trim()
+ );
+ assert_eq!(
+ format!("{tuple:#03?}"),
+ "
+(
+ 1024,
+ 007,
+)
+ "
+ .trim()
+ );
+ assert_eq!(
+ format!("{list:#03?}"),
+ "
+[
+ 1024,
+ 007,
+]
+ "
+ .trim()
+ );
+ assert_eq!(
+ format!("{map:#03?}"),
+ r#"
+{
+ "bar": 1024,
+ "baz": 007,
+}
+ "#
+ .trim()
+ );
+ assert_eq!(
+ format!("{set:#03?}"),
+ "
+{
+ 007,
+ 1024,
+}
+ "
+ .trim()
+ );
+}
diff --git a/library/core/tests/fmt/float.rs b/library/core/tests/fmt/float.rs
new file mode 100644
index 000000000..47a7400f7
--- /dev/null
+++ b/library/core/tests/fmt/float.rs
@@ -0,0 +1,55 @@
+#[test]
+fn test_format_f64() {
+ assert_eq!("1", format!("{:.0}", 1.0f64));
+ assert_eq!("9", format!("{:.0}", 9.4f64));
+ assert_eq!("10", format!("{:.0}", 9.9f64));
+ assert_eq!("9.8", format!("{:.1}", 9.849f64));
+ assert_eq!("9.9", format!("{:.1}", 9.851f64));
+ assert_eq!("1", format!("{:.0}", 0.5f64));
+ assert_eq!("1.23456789e6", format!("{:e}", 1234567.89f64));
+ assert_eq!("1.23456789e3", format!("{:e}", 1234.56789f64));
+ assert_eq!("1.23456789E6", format!("{:E}", 1234567.89f64));
+ assert_eq!("1.23456789E3", format!("{:E}", 1234.56789f64));
+ assert_eq!("0.0", format!("{:?}", 0.0f64));
+ assert_eq!("1.01", format!("{:?}", 1.01f64));
+
+ let high_cutoff = 1e16_f64;
+ assert_eq!("1e16", format!("{:?}", high_cutoff));
+ assert_eq!("-1e16", format!("{:?}", -high_cutoff));
+ assert!(!is_exponential(&format!("{:?}", high_cutoff * (1.0 - 2.0 * f64::EPSILON))));
+ assert_eq!("-3.0", format!("{:?}", -3f64));
+ assert_eq!("0.0001", format!("{:?}", 0.0001f64));
+ assert_eq!("9e-5", format!("{:?}", 0.00009f64));
+ assert_eq!("1234567.9", format!("{:.1?}", 1234567.89f64));
+ assert_eq!("1234.6", format!("{:.1?}", 1234.56789f64));
+}
+
+#[test]
+fn test_format_f32() {
+ assert_eq!("1", format!("{:.0}", 1.0f32));
+ assert_eq!("9", format!("{:.0}", 9.4f32));
+ assert_eq!("10", format!("{:.0}", 9.9f32));
+ assert_eq!("9.8", format!("{:.1}", 9.849f32));
+ assert_eq!("9.9", format!("{:.1}", 9.851f32));
+ assert_eq!("1", format!("{:.0}", 0.5f32));
+ assert_eq!("1.2345679e6", format!("{:e}", 1234567.89f32));
+ assert_eq!("1.2345679e3", format!("{:e}", 1234.56789f32));
+ assert_eq!("1.2345679E6", format!("{:E}", 1234567.89f32));
+ assert_eq!("1.2345679E3", format!("{:E}", 1234.56789f32));
+ assert_eq!("0.0", format!("{:?}", 0.0f32));
+ assert_eq!("1.01", format!("{:?}", 1.01f32));
+
+ let high_cutoff = 1e16_f32;
+ assert_eq!("1e16", format!("{:?}", high_cutoff));
+ assert_eq!("-1e16", format!("{:?}", -high_cutoff));
+ assert!(!is_exponential(&format!("{:?}", high_cutoff * (1.0 - 2.0 * f32::EPSILON))));
+ assert_eq!("-3.0", format!("{:?}", -3f32));
+ assert_eq!("0.0001", format!("{:?}", 0.0001f32));
+ assert_eq!("9e-5", format!("{:?}", 0.00009f32));
+ assert_eq!("1234567.9", format!("{:.1?}", 1234567.89f32));
+ assert_eq!("1234.6", format!("{:.1?}", 1234.56789f32));
+}
+
+fn is_exponential(s: &str) -> bool {
+ s.contains("e") || s.contains("E")
+}
diff --git a/library/core/tests/fmt/mod.rs b/library/core/tests/fmt/mod.rs
new file mode 100644
index 000000000..618076358
--- /dev/null
+++ b/library/core/tests/fmt/mod.rs
@@ -0,0 +1,45 @@
+mod builders;
+mod float;
+mod num;
+
+#[test]
+fn test_format_flags() {
+ // No residual flags left by pointer formatting
+ let p = "".as_ptr();
+ assert_eq!(format!("{:p} {:x}", p, 16), format!("{p:p} 10"));
+
+ assert_eq!(format!("{: >3}", 'a'), " a");
+}
+
+#[test]
+fn test_pointer_formats_data_pointer() {
+ let b: &[u8] = b"";
+ let s: &str = "";
+ assert_eq!(format!("{s:p}"), format!("{:p}", s.as_ptr()));
+ assert_eq!(format!("{b:p}"), format!("{:p}", b.as_ptr()));
+}
+
+#[test]
+fn test_estimated_capacity() {
+ assert_eq!(format_args!("").estimated_capacity(), 0);
+ assert_eq!(format_args!("{}", "").estimated_capacity(), 0);
+ assert_eq!(format_args!("Hello").estimated_capacity(), 5);
+ assert_eq!(format_args!("Hello, {}!", "").estimated_capacity(), 16);
+ assert_eq!(format_args!("{}, hello!", "World").estimated_capacity(), 0);
+ assert_eq!(format_args!("{}. 16-bytes piece", "World").estimated_capacity(), 32);
+}
+
+#[test]
+fn pad_integral_resets() {
+ struct Bar;
+
+ impl core::fmt::Display for Bar {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ "1".fmt(f)?;
+ f.pad_integral(true, "", "5")?;
+ "1".fmt(f)
+ }
+ }
+
+ assert_eq!(format!("{Bar:<03}"), "1 0051 ");
+}
diff --git a/library/core/tests/fmt/num.rs b/library/core/tests/fmt/num.rs
new file mode 100644
index 000000000..b9ede65c9
--- /dev/null
+++ b/library/core/tests/fmt/num.rs
@@ -0,0 +1,225 @@
+#[test]
+fn test_format_int() {
+ // Formatting integers should select the right implementation based off
+ // the type of the argument. Also, hex/octal/binary should be defined
+ // for integers, but they shouldn't emit the negative sign.
+ assert_eq!(format!("{}", 1isize), "1");
+ assert_eq!(format!("{}", 1i8), "1");
+ assert_eq!(format!("{}", 1i16), "1");
+ assert_eq!(format!("{}", 1i32), "1");
+ assert_eq!(format!("{}", 1i64), "1");
+ assert_eq!(format!("{}", -1isize), "-1");
+ assert_eq!(format!("{}", -1i8), "-1");
+ assert_eq!(format!("{}", -1i16), "-1");
+ assert_eq!(format!("{}", -1i32), "-1");
+ assert_eq!(format!("{}", -1i64), "-1");
+ assert_eq!(format!("{:?}", 1isize), "1");
+ assert_eq!(format!("{:?}", 1i8), "1");
+ assert_eq!(format!("{:?}", 1i16), "1");
+ assert_eq!(format!("{:?}", 1i32), "1");
+ assert_eq!(format!("{:?}", 1i64), "1");
+ assert_eq!(format!("{:b}", 1isize), "1");
+ assert_eq!(format!("{:b}", 1i8), "1");
+ assert_eq!(format!("{:b}", 1i16), "1");
+ assert_eq!(format!("{:b}", 1i32), "1");
+ assert_eq!(format!("{:b}", 1i64), "1");
+ assert_eq!(format!("{:x}", 1isize), "1");
+ assert_eq!(format!("{:x}", 1i8), "1");
+ assert_eq!(format!("{:x}", 1i16), "1");
+ assert_eq!(format!("{:x}", 1i32), "1");
+ assert_eq!(format!("{:x}", 1i64), "1");
+ assert_eq!(format!("{:X}", 1isize), "1");
+ assert_eq!(format!("{:X}", 1i8), "1");
+ assert_eq!(format!("{:X}", 1i16), "1");
+ assert_eq!(format!("{:X}", 1i32), "1");
+ assert_eq!(format!("{:X}", 1i64), "1");
+ assert_eq!(format!("{:o}", 1isize), "1");
+ assert_eq!(format!("{:o}", 1i8), "1");
+ assert_eq!(format!("{:o}", 1i16), "1");
+ assert_eq!(format!("{:o}", 1i32), "1");
+ assert_eq!(format!("{:o}", 1i64), "1");
+ assert_eq!(format!("{:e}", 1isize), "1e0");
+ assert_eq!(format!("{:e}", 1i8), "1e0");
+ assert_eq!(format!("{:e}", 1i16), "1e0");
+ assert_eq!(format!("{:e}", 1i32), "1e0");
+ assert_eq!(format!("{:e}", 1i64), "1e0");
+ assert_eq!(format!("{:E}", 1isize), "1E0");
+ assert_eq!(format!("{:E}", 1i8), "1E0");
+ assert_eq!(format!("{:E}", 1i16), "1E0");
+ assert_eq!(format!("{:E}", 1i32), "1E0");
+ assert_eq!(format!("{:E}", 1i64), "1E0");
+
+ assert_eq!(format!("{}", 1usize), "1");
+ assert_eq!(format!("{}", 1u8), "1");
+ assert_eq!(format!("{}", 1u16), "1");
+ assert_eq!(format!("{}", 1u32), "1");
+ assert_eq!(format!("{}", 1u64), "1");
+ assert_eq!(format!("{:?}", 1usize), "1");
+ assert_eq!(format!("{:?}", 1u8), "1");
+ assert_eq!(format!("{:?}", 1u16), "1");
+ assert_eq!(format!("{:?}", 1u32), "1");
+ assert_eq!(format!("{:?}", 1u64), "1");
+ assert_eq!(format!("{:b}", 1usize), "1");
+ assert_eq!(format!("{:b}", 1u8), "1");
+ assert_eq!(format!("{:b}", 1u16), "1");
+ assert_eq!(format!("{:b}", 1u32), "1");
+ assert_eq!(format!("{:b}", 1u64), "1");
+ assert_eq!(format!("{:x}", 1usize), "1");
+ assert_eq!(format!("{:x}", 1u8), "1");
+ assert_eq!(format!("{:x}", 1u16), "1");
+ assert_eq!(format!("{:x}", 1u32), "1");
+ assert_eq!(format!("{:x}", 1u64), "1");
+ assert_eq!(format!("{:X}", 1usize), "1");
+ assert_eq!(format!("{:X}", 1u8), "1");
+ assert_eq!(format!("{:X}", 1u16), "1");
+ assert_eq!(format!("{:X}", 1u32), "1");
+ assert_eq!(format!("{:X}", 1u64), "1");
+ assert_eq!(format!("{:o}", 1usize), "1");
+ assert_eq!(format!("{:o}", 1u8), "1");
+ assert_eq!(format!("{:o}", 1u16), "1");
+ assert_eq!(format!("{:o}", 1u32), "1");
+ assert_eq!(format!("{:o}", 1u64), "1");
+ assert_eq!(format!("{:e}", 1u8), "1e0");
+ assert_eq!(format!("{:e}", 1u16), "1e0");
+ assert_eq!(format!("{:e}", 1u32), "1e0");
+ assert_eq!(format!("{:e}", 1u64), "1e0");
+ assert_eq!(format!("{:E}", 1u8), "1E0");
+ assert_eq!(format!("{:E}", 1u16), "1E0");
+ assert_eq!(format!("{:E}", 1u32), "1E0");
+ assert_eq!(format!("{:E}", 1u64), "1E0");
+
+ // Test a larger number
+ assert_eq!(format!("{:b}", 55), "110111");
+ assert_eq!(format!("{:o}", 55), "67");
+ assert_eq!(format!("{}", 55), "55");
+ assert_eq!(format!("{:x}", 55), "37");
+ assert_eq!(format!("{:X}", 55), "37");
+ assert_eq!(format!("{:e}", 55), "5.5e1");
+ assert_eq!(format!("{:E}", 55), "5.5E1");
+ assert_eq!(format!("{:e}", 10000000000u64), "1e10");
+ assert_eq!(format!("{:E}", 10000000000u64), "1E10");
+ assert_eq!(format!("{:e}", 10000000001u64), "1.0000000001e10");
+ assert_eq!(format!("{:E}", 10000000001u64), "1.0000000001E10");
+}
+
+#[test]
+fn test_format_int_exp_limits() {
+ assert_eq!(format!("{:e}", i8::MIN), "-1.28e2");
+ assert_eq!(format!("{:e}", i8::MAX), "1.27e2");
+ assert_eq!(format!("{:e}", i16::MIN), "-3.2768e4");
+ assert_eq!(format!("{:e}", i16::MAX), "3.2767e4");
+ assert_eq!(format!("{:e}", i32::MIN), "-2.147483648e9");
+ assert_eq!(format!("{:e}", i32::MAX), "2.147483647e9");
+ assert_eq!(format!("{:e}", i64::MIN), "-9.223372036854775808e18");
+ assert_eq!(format!("{:e}", i64::MAX), "9.223372036854775807e18");
+ assert_eq!(format!("{:e}", i128::MIN), "-1.70141183460469231731687303715884105728e38");
+ assert_eq!(format!("{:e}", i128::MAX), "1.70141183460469231731687303715884105727e38");
+
+ assert_eq!(format!("{:e}", u8::MAX), "2.55e2");
+ assert_eq!(format!("{:e}", u16::MAX), "6.5535e4");
+ assert_eq!(format!("{:e}", u32::MAX), "4.294967295e9");
+ assert_eq!(format!("{:e}", u64::MAX), "1.8446744073709551615e19");
+ assert_eq!(format!("{:e}", u128::MAX), "3.40282366920938463463374607431768211455e38");
+}
+
+#[test]
+fn test_format_int_exp_precision() {
+ //test that float and integer match
+ let big_int: u32 = 314_159_265;
+ assert_eq!(format!("{big_int:.1e}"), format!("{:.1e}", f64::from(big_int)));
+
+ //test adding precision
+ assert_eq!(format!("{:.10e}", i8::MIN), "-1.2800000000e2");
+ assert_eq!(format!("{:.10e}", i16::MIN), "-3.2768000000e4");
+ assert_eq!(format!("{:.10e}", i32::MIN), "-2.1474836480e9");
+ assert_eq!(format!("{:.20e}", i64::MIN), "-9.22337203685477580800e18");
+ assert_eq!(format!("{:.40e}", i128::MIN), "-1.7014118346046923173168730371588410572800e38");
+
+ //test rounding
+ assert_eq!(format!("{:.1e}", i8::MIN), "-1.3e2");
+ assert_eq!(format!("{:.1e}", i16::MIN), "-3.3e4");
+ assert_eq!(format!("{:.1e}", i32::MIN), "-2.1e9");
+ assert_eq!(format!("{:.1e}", i64::MIN), "-9.2e18");
+ assert_eq!(format!("{:.1e}", i128::MIN), "-1.7e38");
+
+ //test huge precision
+ assert_eq!(format!("{:.1000e}", 1), format!("1.{}e0", "0".repeat(1000)));
+ //test zero precision
+ assert_eq!(format!("{:.0e}", 1), format!("1e0",));
+ assert_eq!(format!("{:.0e}", 35), format!("4e1",));
+
+ //test padding with precision (and sign)
+ assert_eq!(format!("{:+10.3e}", 1), " +1.000e0");
+}
+
+#[test]
+fn test_format_int_zero() {
+ assert_eq!(format!("{}", 0), "0");
+ assert_eq!(format!("{:?}", 0), "0");
+ assert_eq!(format!("{:b}", 0), "0");
+ assert_eq!(format!("{:o}", 0), "0");
+ assert_eq!(format!("{:x}", 0), "0");
+ assert_eq!(format!("{:X}", 0), "0");
+ assert_eq!(format!("{:e}", 0), "0e0");
+ assert_eq!(format!("{:E}", 0), "0E0");
+
+ assert_eq!(format!("{}", 0u32), "0");
+ assert_eq!(format!("{:?}", 0u32), "0");
+ assert_eq!(format!("{:b}", 0u32), "0");
+ assert_eq!(format!("{:o}", 0u32), "0");
+ assert_eq!(format!("{:x}", 0u32), "0");
+ assert_eq!(format!("{:X}", 0u32), "0");
+ assert_eq!(format!("{:e}", 0u32), "0e0");
+ assert_eq!(format!("{:E}", 0u32), "0E0");
+}
+
+#[test]
+fn test_format_int_flags() {
+ assert_eq!(format!("{:3}", 1), " 1");
+ assert_eq!(format!("{:>3}", 1), " 1");
+ assert_eq!(format!("{:>+3}", 1), " +1");
+ assert_eq!(format!("{:<3}", 1), "1 ");
+ assert_eq!(format!("{:#}", 1), "1");
+ assert_eq!(format!("{:#x}", 10), "0xa");
+ assert_eq!(format!("{:#X}", 10), "0xA");
+ assert_eq!(format!("{:#5x}", 10), " 0xa");
+ assert_eq!(format!("{:#o}", 10), "0o12");
+ assert_eq!(format!("{:08x}", 10), "0000000a");
+ assert_eq!(format!("{:8x}", 10), " a");
+ assert_eq!(format!("{:<8x}", 10), "a ");
+ assert_eq!(format!("{:>8x}", 10), " a");
+ assert_eq!(format!("{:#08x}", 10), "0x00000a");
+ assert_eq!(format!("{:08}", -10), "-0000010");
+ assert_eq!(format!("{:x}", !0u8), "ff");
+ assert_eq!(format!("{:X}", !0u8), "FF");
+ assert_eq!(format!("{:b}", !0u8), "11111111");
+ assert_eq!(format!("{:o}", !0u8), "377");
+ assert_eq!(format!("{:#x}", !0u8), "0xff");
+ assert_eq!(format!("{:#X}", !0u8), "0xFF");
+ assert_eq!(format!("{:#b}", !0u8), "0b11111111");
+ assert_eq!(format!("{:#o}", !0u8), "0o377");
+}
+
+#[test]
+fn test_format_int_sign_padding() {
+ assert_eq!(format!("{:+5}", 1), " +1");
+ assert_eq!(format!("{:+5}", -1), " -1");
+ assert_eq!(format!("{:05}", 1), "00001");
+ assert_eq!(format!("{:05}", -1), "-0001");
+ assert_eq!(format!("{:+05}", 1), "+0001");
+ assert_eq!(format!("{:+05}", -1), "-0001");
+}
+
+#[test]
+fn test_format_int_twos_complement() {
+ assert_eq!(format!("{}", i8::MIN), "-128");
+ assert_eq!(format!("{}", i16::MIN), "-32768");
+ assert_eq!(format!("{}", i32::MIN), "-2147483648");
+ assert_eq!(format!("{}", i64::MIN), "-9223372036854775808");
+}
+
+#[test]
+fn test_format_debug_hex() {
+ assert_eq!(format!("{:02x?}", b"Foo\0"), "[46, 6f, 6f, 00]");
+ assert_eq!(format!("{:02X?}", b"Foo\0"), "[46, 6F, 6F, 00]");
+}
diff --git a/library/core/tests/future.rs b/library/core/tests/future.rs
new file mode 100644
index 000000000..74b6f74e4
--- /dev/null
+++ b/library/core/tests/future.rs
@@ -0,0 +1,128 @@
+use std::future::{join, Future};
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll, Wake};
+use std::thread;
+
+struct PollN {
+ val: usize,
+ polled: usize,
+ num: usize,
+}
+
+impl Future for PollN {
+ type Output = usize;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.polled += 1;
+
+ if self.polled == self.num {
+ return Poll::Ready(self.val);
+ }
+
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+}
+
+fn poll_n(val: usize, num: usize) -> PollN {
+ PollN { val, num, polled: 0 }
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // self-referential generators do not work with Miri's aliasing checks
+fn test_join() {
+ block_on(async move {
+ let x = join!(async { 0 }).await;
+ assert_eq!(x, 0);
+
+ let x = join!(async { 0 }, async { 1 }).await;
+ assert_eq!(x, (0, 1));
+
+ let x = join!(async { 0 }, async { 1 }, async { 2 }).await;
+ assert_eq!(x, (0, 1, 2));
+
+ let x = join!(
+ poll_n(0, 1),
+ poll_n(1, 5),
+ poll_n(2, 2),
+ poll_n(3, 1),
+ poll_n(4, 2),
+ poll_n(5, 3),
+ poll_n(6, 4),
+ poll_n(7, 1)
+ )
+ .await;
+ assert_eq!(x, (0, 1, 2, 3, 4, 5, 6, 7));
+
+ let y = String::new();
+ let x = join!(async {
+ println!("{}", &y);
+ 1
+ })
+ .await;
+ assert_eq!(x, 1);
+ });
+}
+
+/// Tests that `join!(…)` behaves "like a function": evaluating its arguments
+/// before applying any of its own logic.
+///
+/// _e.g._, `join!(async_fn(&borrowed), …)` does not consume `borrowed`;
+/// and `join!(opt_fut?, …)` does let that `?` refer to the callsite scope.
+mod test_join_function_like_value_arg_semantics {
+ use super::*;
+
+ async fn async_fn(_: impl Sized) {}
+
+ // no need to _run_ this test, just to compile it.
+ fn _join_does_not_unnecessarily_move_mentioned_bindings() {
+ let not_copy = vec![()];
+ let _ = join!(async_fn(&not_copy)); // should not move `not_copy`
+ let _ = &not_copy; // OK
+ }
+
+ #[test]
+ fn join_lets_control_flow_effects_such_as_try_flow_through() {
+ let maybe_fut = None;
+ if false {
+ *&mut { maybe_fut } = Some(async {});
+ loop {}
+ }
+ assert!(Option::is_none(&try { join!(maybe_fut?, async { unreachable!() }) }));
+ }
+
+ #[test]
+ fn join_is_able_to_handle_temporaries() {
+ let _ = join!(async_fn(&String::from("temporary")));
+ let () = block_on(join!(async_fn(&String::from("temporary"))));
+ }
+}
+
+fn block_on(fut: impl Future) {
+ struct Waker;
+ impl Wake for Waker {
+ fn wake(self: Arc<Self>) {
+ thread::current().unpark()
+ }
+ }
+
+ let waker = Arc::new(Waker).into();
+ let mut cx = Context::from_waker(&waker);
+ let mut fut = Box::pin(fut);
+
+ loop {
+ match fut.as_mut().poll(&mut cx) {
+ Poll::Ready(_) => break,
+ Poll::Pending => thread::park(),
+ }
+ }
+}
+
+// just tests by whether or not this compiles
+fn _pending_impl_all_auto_traits<T>() {
+ use std::panic::{RefUnwindSafe, UnwindSafe};
+ fn all_auto_traits<T: Send + Sync + Unpin + UnwindSafe + RefUnwindSafe>() {}
+
+ all_auto_traits::<std::future::Pending<T>>();
+}
diff --git a/library/core/tests/hash/mod.rs b/library/core/tests/hash/mod.rs
new file mode 100644
index 000000000..f7934d062
--- /dev/null
+++ b/library/core/tests/hash/mod.rs
@@ -0,0 +1,161 @@
+mod sip;
+
+use std::default::Default;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::ptr;
+use std::rc::Rc;
+
+struct MyHasher {
+ hash: u64,
+}
+
+impl Default for MyHasher {
+ fn default() -> MyHasher {
+ MyHasher { hash: 0 }
+ }
+}
+
+impl Hasher for MyHasher {
+ fn write(&mut self, buf: &[u8]) {
+ for byte in buf {
+ self.hash += *byte as u64;
+ }
+ }
+ fn write_str(&mut self, s: &str) {
+ self.write(s.as_bytes());
+ self.write_u8(0xFF);
+ }
+ fn finish(&self) -> u64 {
+ self.hash
+ }
+}
+
+#[test]
+fn test_writer_hasher() {
+ fn hash<T: Hash>(t: &T) -> u64 {
+ let mut s = MyHasher { hash: 0 };
+ t.hash(&mut s);
+ s.finish()
+ }
+
+ assert_eq!(hash(&()), 0);
+
+ assert_eq!(hash(&5_u8), 5);
+ assert_eq!(hash(&5_u16), 5);
+ assert_eq!(hash(&5_u32), 5);
+ assert_eq!(hash(&5_u64), 5);
+ assert_eq!(hash(&5_usize), 5);
+
+ assert_eq!(hash(&5_i8), 5);
+ assert_eq!(hash(&5_i16), 5);
+ assert_eq!(hash(&5_i32), 5);
+ assert_eq!(hash(&5_i64), 5);
+ assert_eq!(hash(&5_isize), 5);
+
+ assert_eq!(hash(&false), 0);
+ assert_eq!(hash(&true), 1);
+
+ assert_eq!(hash(&'a'), 97);
+
+ let s: &str = "a";
+ assert_eq!(hash(&s), 97 + 0xFF);
+ let s: Box<str> = String::from("a").into_boxed_str();
+ assert_eq!(hash(&s), 97 + 0xFF);
+ let s: Rc<&str> = Rc::new("a");
+ assert_eq!(hash(&s), 97 + 0xFF);
+ let cs: &[u8] = &[1, 2, 3];
+ assert_eq!(hash(&cs), 9);
+ let cs: Box<[u8]> = Box::new([1, 2, 3]);
+ assert_eq!(hash(&cs), 9);
+ let cs: Rc<[u8]> = Rc::new([1, 2, 3]);
+ assert_eq!(hash(&cs), 9);
+
+ let ptr = ptr::invalid::<i32>(5_usize);
+ assert_eq!(hash(&ptr), 5);
+
+ let ptr = ptr::invalid_mut::<i32>(5_usize);
+ assert_eq!(hash(&ptr), 5);
+
+ if cfg!(miri) {
+ // Miri cannot hash pointers
+ return;
+ }
+
+ let cs: &mut [u8] = &mut [1, 2, 3];
+ let ptr = cs.as_ptr();
+ let slice_ptr = cs as *const [u8];
+ assert_eq!(hash(&slice_ptr), hash(&ptr) + cs.len() as u64);
+
+ let slice_ptr = cs as *mut [u8];
+ assert_eq!(hash(&slice_ptr), hash(&ptr) + cs.len() as u64);
+}
+
+struct Custom {
+ hash: u64,
+}
+struct CustomHasher {
+ output: u64,
+}
+
+impl Hasher for CustomHasher {
+ fn finish(&self) -> u64 {
+ self.output
+ }
+ fn write(&mut self, _: &[u8]) {
+ panic!()
+ }
+ fn write_u64(&mut self, data: u64) {
+ self.output = data;
+ }
+}
+
+impl Default for CustomHasher {
+ fn default() -> CustomHasher {
+ CustomHasher { output: 0 }
+ }
+}
+
+impl Hash for Custom {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_u64(self.hash);
+ }
+}
+
+#[test]
+fn test_custom_state() {
+ fn hash<T: Hash>(t: &T) -> u64 {
+ let mut c = CustomHasher { output: 0 };
+ t.hash(&mut c);
+ c.finish()
+ }
+
+ assert_eq!(hash(&Custom { hash: 5 }), 5);
+}
+
+// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten.
+// See https://github.com/kripken/emscripten-fastcomp/issues/169
+#[cfg(not(target_os = "emscripten"))]
+#[test]
+fn test_indirect_hasher() {
+ let mut hasher = MyHasher { hash: 0 };
+ {
+ let mut indirect_hasher: &mut dyn Hasher = &mut hasher;
+ 5u32.hash(&mut indirect_hasher);
+ }
+ assert_eq!(hasher.hash, 5);
+}
+
+#[test]
+fn test_build_hasher_object_safe() {
+ use std::collections::hash_map::{DefaultHasher, RandomState};
+
+ let _: &dyn BuildHasher<Hasher = DefaultHasher> = &RandomState::new();
+}
+
+// just tests by whether or not this compiles
+fn _build_hasher_default_impl_all_auto_traits<T>() {
+ use std::panic::{RefUnwindSafe, UnwindSafe};
+ fn all_auto_traits<T: Send + Sync + Unpin + UnwindSafe + RefUnwindSafe>() {}
+
+ all_auto_traits::<std::hash::BuildHasherDefault<T>>();
+}
diff --git a/library/core/tests/hash/sip.rs b/library/core/tests/hash/sip.rs
new file mode 100644
index 000000000..877d08418
--- /dev/null
+++ b/library/core/tests/hash/sip.rs
@@ -0,0 +1,309 @@
+#![allow(deprecated)]
+
+use core::hash::{Hash, Hasher};
+use core::hash::{SipHasher, SipHasher13};
+use core::{mem, slice};
+
+// Hash just the bytes of the slice, without length prefix
+struct Bytes<'a>(&'a [u8]);
+
+impl<'a> Hash for Bytes<'a> {
+ #[allow(unused_must_use)]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ let Bytes(v) = *self;
+ state.write(v);
+ }
+}
+
+fn hash_with<H: Hasher, T: Hash>(mut st: H, x: &T) -> u64 {
+ x.hash(&mut st);
+ st.finish()
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ hash_with(SipHasher::new(), x)
+}
+
+#[test]
+#[allow(unused_must_use)]
+fn test_siphash_1_3() {
+ let vecs: [[u8; 8]; 64] = [
+ [0xdc, 0xc4, 0x0f, 0x05, 0x58, 0x01, 0xac, 0xab],
+ [0x93, 0xca, 0x57, 0x7d, 0xf3, 0x9b, 0xf4, 0xc9],
+ [0x4d, 0xd4, 0xc7, 0x4d, 0x02, 0x9b, 0xcb, 0x82],
+ [0xfb, 0xf7, 0xdd, 0xe7, 0xb8, 0x0a, 0xf8, 0x8b],
+ [0x28, 0x83, 0xd3, 0x88, 0x60, 0x57, 0x75, 0xcf],
+ [0x67, 0x3b, 0x53, 0x49, 0x2f, 0xd5, 0xf9, 0xde],
+ [0xa7, 0x22, 0x9f, 0xc5, 0x50, 0x2b, 0x0d, 0xc5],
+ [0x40, 0x11, 0xb1, 0x9b, 0x98, 0x7d, 0x92, 0xd3],
+ [0x8e, 0x9a, 0x29, 0x8d, 0x11, 0x95, 0x90, 0x36],
+ [0xe4, 0x3d, 0x06, 0x6c, 0xb3, 0x8e, 0xa4, 0x25],
+ [0x7f, 0x09, 0xff, 0x92, 0xee, 0x85, 0xde, 0x79],
+ [0x52, 0xc3, 0x4d, 0xf9, 0xc1, 0x18, 0xc1, 0x70],
+ [0xa2, 0xd9, 0xb4, 0x57, 0xb1, 0x84, 0xa3, 0x78],
+ [0xa7, 0xff, 0x29, 0x12, 0x0c, 0x76, 0x6f, 0x30],
+ [0x34, 0x5d, 0xf9, 0xc0, 0x11, 0xa1, 0x5a, 0x60],
+ [0x56, 0x99, 0x51, 0x2a, 0x6d, 0xd8, 0x20, 0xd3],
+ [0x66, 0x8b, 0x90, 0x7d, 0x1a, 0xdd, 0x4f, 0xcc],
+ [0x0c, 0xd8, 0xdb, 0x63, 0x90, 0x68, 0xf2, 0x9c],
+ [0x3e, 0xe6, 0x73, 0xb4, 0x9c, 0x38, 0xfc, 0x8f],
+ [0x1c, 0x7d, 0x29, 0x8d, 0xe5, 0x9d, 0x1f, 0xf2],
+ [0x40, 0xe0, 0xcc, 0xa6, 0x46, 0x2f, 0xdc, 0xc0],
+ [0x44, 0xf8, 0x45, 0x2b, 0xfe, 0xab, 0x92, 0xb9],
+ [0x2e, 0x87, 0x20, 0xa3, 0x9b, 0x7b, 0xfe, 0x7f],
+ [0x23, 0xc1, 0xe6, 0xda, 0x7f, 0x0e, 0x5a, 0x52],
+ [0x8c, 0x9c, 0x34, 0x67, 0xb2, 0xae, 0x64, 0xf4],
+ [0x79, 0x09, 0x5b, 0x70, 0x28, 0x59, 0xcd, 0x45],
+ [0xa5, 0x13, 0x99, 0xca, 0xe3, 0x35, 0x3e, 0x3a],
+ [0x35, 0x3b, 0xde, 0x4a, 0x4e, 0xc7, 0x1d, 0xa9],
+ [0x0d, 0xd0, 0x6c, 0xef, 0x02, 0xed, 0x0b, 0xfb],
+ [0xf4, 0xe1, 0xb1, 0x4a, 0xb4, 0x3c, 0xd9, 0x88],
+ [0x63, 0xe6, 0xc5, 0x43, 0xd6, 0x11, 0x0f, 0x54],
+ [0xbc, 0xd1, 0x21, 0x8c, 0x1f, 0xdd, 0x70, 0x23],
+ [0x0d, 0xb6, 0xa7, 0x16, 0x6c, 0x7b, 0x15, 0x81],
+ [0xbf, 0xf9, 0x8f, 0x7a, 0xe5, 0xb9, 0x54, 0x4d],
+ [0x3e, 0x75, 0x2a, 0x1f, 0x78, 0x12, 0x9f, 0x75],
+ [0x91, 0x6b, 0x18, 0xbf, 0xbe, 0xa3, 0xa1, 0xce],
+ [0x06, 0x62, 0xa2, 0xad, 0xd3, 0x08, 0xf5, 0x2c],
+ [0x57, 0x30, 0xc3, 0xa3, 0x2d, 0x1c, 0x10, 0xb6],
+ [0xa1, 0x36, 0x3a, 0xae, 0x96, 0x74, 0xf4, 0xb3],
+ [0x92, 0x83, 0x10, 0x7b, 0x54, 0x57, 0x6b, 0x62],
+ [0x31, 0x15, 0xe4, 0x99, 0x32, 0x36, 0xd2, 0xc1],
+ [0x44, 0xd9, 0x1a, 0x3f, 0x92, 0xc1, 0x7c, 0x66],
+ [0x25, 0x88, 0x13, 0xc8, 0xfe, 0x4f, 0x70, 0x65],
+ [0xa6, 0x49, 0x89, 0xc2, 0xd1, 0x80, 0xf2, 0x24],
+ [0x6b, 0x87, 0xf8, 0xfa, 0xed, 0x1c, 0xca, 0xc2],
+ [0x96, 0x21, 0x04, 0x9f, 0xfc, 0x4b, 0x16, 0xc2],
+ [0x23, 0xd6, 0xb1, 0x68, 0x93, 0x9c, 0x6e, 0xa1],
+ [0xfd, 0x14, 0x51, 0x8b, 0x9c, 0x16, 0xfb, 0x49],
+ [0x46, 0x4c, 0x07, 0xdf, 0xf8, 0x43, 0x31, 0x9f],
+ [0xb3, 0x86, 0xcc, 0x12, 0x24, 0xaf, 0xfd, 0xc6],
+ [0x8f, 0x09, 0x52, 0x0a, 0xd1, 0x49, 0xaf, 0x7e],
+ [0x9a, 0x2f, 0x29, 0x9d, 0x55, 0x13, 0xf3, 0x1c],
+ [0x12, 0x1f, 0xf4, 0xa2, 0xdd, 0x30, 0x4a, 0xc4],
+ [0xd0, 0x1e, 0xa7, 0x43, 0x89, 0xe9, 0xfa, 0x36],
+ [0xe6, 0xbc, 0xf0, 0x73, 0x4c, 0xb3, 0x8f, 0x31],
+ [0x80, 0xe9, 0xa7, 0x70, 0x36, 0xbf, 0x7a, 0xa2],
+ [0x75, 0x6d, 0x3c, 0x24, 0xdb, 0xc0, 0xbc, 0xb4],
+ [0x13, 0x15, 0xb7, 0xfd, 0x52, 0xd8, 0xf8, 0x23],
+ [0x08, 0x8a, 0x7d, 0xa6, 0x4d, 0x5f, 0x03, 0x8f],
+ [0x48, 0xf1, 0xe8, 0xb7, 0xe5, 0xd0, 0x9c, 0xd8],
+ [0xee, 0x44, 0xa6, 0xf7, 0xbc, 0xe6, 0xf4, 0xf6],
+ [0xf2, 0x37, 0x18, 0x0f, 0xd8, 0x9a, 0xc5, 0xae],
+ [0xe0, 0x94, 0x66, 0x4b, 0x15, 0xf6, 0xb2, 0xc3],
+ [0xa8, 0xb3, 0xbb, 0xb7, 0x62, 0x90, 0x19, 0x9d],
+ ];
+
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+ let mut buf = Vec::new();
+ let mut t = 0;
+ let mut state_inc = SipHasher13::new_with_keys(k0, k1);
+
+ while t < 64 {
+ let vec = u64::from_le_bytes(vecs[t]);
+ let out = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf));
+ assert_eq!(vec, out);
+
+ let full = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf));
+ let i = state_inc.finish();
+
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
+
+ buf.push(t as u8);
+ Hasher::write(&mut state_inc, &[t as u8]);
+
+ t += 1;
+ }
+}
+
+#[test]
+#[allow(unused_must_use)]
+fn test_siphash_2_4() {
+ let vecs: [[u8; 8]; 64] = [
+ [0x31, 0x0e, 0x0e, 0xdd, 0x47, 0xdb, 0x6f, 0x72],
+ [0xfd, 0x67, 0xdc, 0x93, 0xc5, 0x39, 0xf8, 0x74],
+ [0x5a, 0x4f, 0xa9, 0xd9, 0x09, 0x80, 0x6c, 0x0d],
+ [0x2d, 0x7e, 0xfb, 0xd7, 0x96, 0x66, 0x67, 0x85],
+ [0xb7, 0x87, 0x71, 0x27, 0xe0, 0x94, 0x27, 0xcf],
+ [0x8d, 0xa6, 0x99, 0xcd, 0x64, 0x55, 0x76, 0x18],
+ [0xce, 0xe3, 0xfe, 0x58, 0x6e, 0x46, 0xc9, 0xcb],
+ [0x37, 0xd1, 0x01, 0x8b, 0xf5, 0x00, 0x02, 0xab],
+ [0x62, 0x24, 0x93, 0x9a, 0x79, 0xf5, 0xf5, 0x93],
+ [0xb0, 0xe4, 0xa9, 0x0b, 0xdf, 0x82, 0x00, 0x9e],
+ [0xf3, 0xb9, 0xdd, 0x94, 0xc5, 0xbb, 0x5d, 0x7a],
+ [0xa7, 0xad, 0x6b, 0x22, 0x46, 0x2f, 0xb3, 0xf4],
+ [0xfb, 0xe5, 0x0e, 0x86, 0xbc, 0x8f, 0x1e, 0x75],
+ [0x90, 0x3d, 0x84, 0xc0, 0x27, 0x56, 0xea, 0x14],
+ [0xee, 0xf2, 0x7a, 0x8e, 0x90, 0xca, 0x23, 0xf7],
+ [0xe5, 0x45, 0xbe, 0x49, 0x61, 0xca, 0x29, 0xa1],
+ [0xdb, 0x9b, 0xc2, 0x57, 0x7f, 0xcc, 0x2a, 0x3f],
+ [0x94, 0x47, 0xbe, 0x2c, 0xf5, 0xe9, 0x9a, 0x69],
+ [0x9c, 0xd3, 0x8d, 0x96, 0xf0, 0xb3, 0xc1, 0x4b],
+ [0xbd, 0x61, 0x79, 0xa7, 0x1d, 0xc9, 0x6d, 0xbb],
+ [0x98, 0xee, 0xa2, 0x1a, 0xf2, 0x5c, 0xd6, 0xbe],
+ [0xc7, 0x67, 0x3b, 0x2e, 0xb0, 0xcb, 0xf2, 0xd0],
+ [0x88, 0x3e, 0xa3, 0xe3, 0x95, 0x67, 0x53, 0x93],
+ [0xc8, 0xce, 0x5c, 0xcd, 0x8c, 0x03, 0x0c, 0xa8],
+ [0x94, 0xaf, 0x49, 0xf6, 0xc6, 0x50, 0xad, 0xb8],
+ [0xea, 0xb8, 0x85, 0x8a, 0xde, 0x92, 0xe1, 0xbc],
+ [0xf3, 0x15, 0xbb, 0x5b, 0xb8, 0x35, 0xd8, 0x17],
+ [0xad, 0xcf, 0x6b, 0x07, 0x63, 0x61, 0x2e, 0x2f],
+ [0xa5, 0xc9, 0x1d, 0xa7, 0xac, 0xaa, 0x4d, 0xde],
+ [0x71, 0x65, 0x95, 0x87, 0x66, 0x50, 0xa2, 0xa6],
+ [0x28, 0xef, 0x49, 0x5c, 0x53, 0xa3, 0x87, 0xad],
+ [0x42, 0xc3, 0x41, 0xd8, 0xfa, 0x92, 0xd8, 0x32],
+ [0xce, 0x7c, 0xf2, 0x72, 0x2f, 0x51, 0x27, 0x71],
+ [0xe3, 0x78, 0x59, 0xf9, 0x46, 0x23, 0xf3, 0xa7],
+ [0x38, 0x12, 0x05, 0xbb, 0x1a, 0xb0, 0xe0, 0x12],
+ [0xae, 0x97, 0xa1, 0x0f, 0xd4, 0x34, 0xe0, 0x15],
+ [0xb4, 0xa3, 0x15, 0x08, 0xbe, 0xff, 0x4d, 0x31],
+ [0x81, 0x39, 0x62, 0x29, 0xf0, 0x90, 0x79, 0x02],
+ [0x4d, 0x0c, 0xf4, 0x9e, 0xe5, 0xd4, 0xdc, 0xca],
+ [0x5c, 0x73, 0x33, 0x6a, 0x76, 0xd8, 0xbf, 0x9a],
+ [0xd0, 0xa7, 0x04, 0x53, 0x6b, 0xa9, 0x3e, 0x0e],
+ [0x92, 0x59, 0x58, 0xfc, 0xd6, 0x42, 0x0c, 0xad],
+ [0xa9, 0x15, 0xc2, 0x9b, 0xc8, 0x06, 0x73, 0x18],
+ [0x95, 0x2b, 0x79, 0xf3, 0xbc, 0x0a, 0xa6, 0xd4],
+ [0xf2, 0x1d, 0xf2, 0xe4, 0x1d, 0x45, 0x35, 0xf9],
+ [0x87, 0x57, 0x75, 0x19, 0x04, 0x8f, 0x53, 0xa9],
+ [0x10, 0xa5, 0x6c, 0xf5, 0xdf, 0xcd, 0x9a, 0xdb],
+ [0xeb, 0x75, 0x09, 0x5c, 0xcd, 0x98, 0x6c, 0xd0],
+ [0x51, 0xa9, 0xcb, 0x9e, 0xcb, 0xa3, 0x12, 0xe6],
+ [0x96, 0xaf, 0xad, 0xfc, 0x2c, 0xe6, 0x66, 0xc7],
+ [0x72, 0xfe, 0x52, 0x97, 0x5a, 0x43, 0x64, 0xee],
+ [0x5a, 0x16, 0x45, 0xb2, 0x76, 0xd5, 0x92, 0xa1],
+ [0xb2, 0x74, 0xcb, 0x8e, 0xbf, 0x87, 0x87, 0x0a],
+ [0x6f, 0x9b, 0xb4, 0x20, 0x3d, 0xe7, 0xb3, 0x81],
+ [0xea, 0xec, 0xb2, 0xa3, 0x0b, 0x22, 0xa8, 0x7f],
+ [0x99, 0x24, 0xa4, 0x3c, 0xc1, 0x31, 0x57, 0x24],
+ [0xbd, 0x83, 0x8d, 0x3a, 0xaf, 0xbf, 0x8d, 0xb7],
+ [0x0b, 0x1a, 0x2a, 0x32, 0x65, 0xd5, 0x1a, 0xea],
+ [0x13, 0x50, 0x79, 0xa3, 0x23, 0x1c, 0xe6, 0x60],
+ [0x93, 0x2b, 0x28, 0x46, 0xe4, 0xd7, 0x06, 0x66],
+ [0xe1, 0x91, 0x5f, 0x5c, 0xb1, 0xec, 0xa4, 0x6c],
+ [0xf3, 0x25, 0x96, 0x5c, 0xa1, 0x6d, 0x62, 0x9f],
+ [0x57, 0x5f, 0xf2, 0x8e, 0x60, 0x38, 0x1b, 0xe5],
+ [0x72, 0x45, 0x06, 0xeb, 0x4c, 0x32, 0x8a, 0x95],
+ ];
+
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+ let mut buf = Vec::new();
+ let mut t = 0;
+ let mut state_inc = SipHasher::new_with_keys(k0, k1);
+
+ while t < 64 {
+ let vec = u64::from_le_bytes(vecs[t]);
+ let out = hash_with(SipHasher::new_with_keys(k0, k1), &Bytes(&buf));
+ assert_eq!(vec, out);
+
+ let full = hash_with(SipHasher::new_with_keys(k0, k1), &Bytes(&buf));
+ let i = state_inc.finish();
+
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
+
+ buf.push(t as u8);
+ Hasher::write(&mut state_inc, &[t as u8]);
+
+ t += 1;
+ }
+}
+
+#[test]
+#[cfg(target_pointer_width = "32")]
+fn test_hash_usize() {
+ let val = 0xdeadbeef_deadbeef_u64;
+ assert_ne!(hash(&(val as u64)), hash(&(val as usize)));
+ assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn test_hash_usize() {
+ let val = 0xdeadbeef_deadbeef_u64;
+ assert_eq!(hash(&(val as u64)), hash(&(val as usize)));
+ assert_ne!(hash(&(val as u32)), hash(&(val as usize)));
+}
+
+#[test]
+fn test_hash_idempotent() {
+ let val64 = 0xdeadbeef_deadbeef_u64;
+ assert_eq!(hash(&val64), hash(&val64));
+ let val32 = 0xdeadbeef_u32;
+ assert_eq!(hash(&val32), hash(&val32));
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_64() {
+ let val = 0xdeadbeef_deadbeef_u64;
+
+ assert_ne!(hash(&val), hash(&zero_byte(val, 0)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 1)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 2)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 3)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 4)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 5)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 6)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 7)));
+
+ fn zero_byte(val: u64, byte: usize) -> u64 {
+ assert!(byte < 8);
+ val & !(0xff << (byte * 8))
+ }
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_32() {
+ let val = 0xdeadbeef_u32;
+
+ assert_ne!(hash(&val), hash(&zero_byte(val, 0)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 1)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 2)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 3)));
+
+ fn zero_byte(val: u32, byte: usize) -> u32 {
+ assert!(byte < 4);
+ val & !(0xff << (byte * 8))
+ }
+}
+
+#[test]
+fn test_hash_no_concat_alias() {
+ let s = ("aa", "bb");
+ let t = ("aabb", "");
+ let u = ("a", "abb");
+
+ assert_ne!(s, t);
+ assert_ne!(t, u);
+ assert_ne!(hash(&s), hash(&t));
+ assert_ne!(hash(&s), hash(&u));
+
+ let u = [1, 0, 0, 0];
+ let v = (&u[..1], &u[1..3], &u[3..]);
+ let w = (&u[..], &u[4..4], &u[4..4]);
+
+ assert_ne!(v, w);
+ assert_ne!(hash(&v), hash(&w));
+}
+
+#[test]
+fn test_write_short_works() {
+ let test_usize = 0xd0c0b0a0usize;
+ let mut h1 = SipHasher::new();
+ h1.write_usize(test_usize);
+ h1.write(b"bytes");
+ h1.write(b"string");
+ h1.write_u8(0xFFu8);
+ h1.write_u8(0x01u8);
+ let mut h2 = SipHasher::new();
+ h2.write(unsafe {
+ slice::from_raw_parts(&test_usize as *const _ as *const u8, mem::size_of::<usize>())
+ });
+ h2.write(b"bytes");
+ h2.write(b"string");
+ h2.write(&[0xFFu8, 0x01u8]);
+ assert_eq!(h1.finish(), h2.finish());
+}
diff --git a/library/core/tests/intrinsics.rs b/library/core/tests/intrinsics.rs
new file mode 100644
index 000000000..06870c6d0
--- /dev/null
+++ b/library/core/tests/intrinsics.rs
@@ -0,0 +1,101 @@
+use core::any::TypeId;
+use core::intrinsics::assume;
+
+#[test]
+fn test_typeid_sized_types() {
+ struct X;
+ struct Y(u32);
+
+ assert_eq!(TypeId::of::<X>(), TypeId::of::<X>());
+ assert_eq!(TypeId::of::<Y>(), TypeId::of::<Y>());
+ assert!(TypeId::of::<X>() != TypeId::of::<Y>());
+}
+
+#[test]
+fn test_typeid_unsized_types() {
+ trait Z {}
+ struct X(str);
+ struct Y(dyn Z + 'static);
+
+ assert_eq!(TypeId::of::<X>(), TypeId::of::<X>());
+ assert_eq!(TypeId::of::<Y>(), TypeId::of::<Y>());
+ assert!(TypeId::of::<X>() != TypeId::of::<Y>());
+}
+
+// Check that `const_assume` feature allow `assume` intrinsic
+// to be used in const contexts.
+#[test]
+fn test_assume_can_be_in_const_contexts() {
+ const unsafe fn foo(x: usize, y: usize) -> usize {
+ // SAFETY: the entire function is not safe,
+ // but it is just an example not used elsewhere.
+ unsafe { assume(y != 0) };
+ x / y
+ }
+ let rs = unsafe { foo(42, 97) };
+ assert_eq!(rs, 0);
+}
+
+#[test]
+const fn test_write_bytes_in_const_contexts() {
+ use core::intrinsics::write_bytes;
+
+ const TEST: [u32; 3] = {
+ let mut arr = [1u32, 2, 3];
+ unsafe {
+ write_bytes(arr.as_mut_ptr(), 0, 2);
+ }
+ arr
+ };
+
+ assert!(TEST[0] == 0);
+ assert!(TEST[1] == 0);
+ assert!(TEST[2] == 3);
+
+ const TEST2: [u32; 3] = {
+ let mut arr = [1u32, 2, 3];
+ unsafe {
+ write_bytes(arr.as_mut_ptr(), 1, 2);
+ }
+ arr
+ };
+
+ assert!(TEST2[0] == 16843009);
+ assert!(TEST2[1] == 16843009);
+ assert!(TEST2[2] == 3);
+}
+
+#[test]
+fn test_hints_in_const_contexts() {
+ use core::intrinsics::{likely, unlikely};
+
+ // In const contexts, they just return their argument.
+ const {
+ assert!(true == likely(true));
+ assert!(false == likely(false));
+ assert!(true == unlikely(true));
+ assert!(false == unlikely(false));
+ assert!(42u32 == core::intrinsics::black_box(42u32));
+ assert!(42u32 == core::hint::black_box(42u32));
+ }
+}
+
+#[test]
+fn test_const_allocate_at_runtime() {
+ use core::intrinsics::const_allocate;
+ unsafe {
+ assert!(const_allocate(4, 4).is_null());
+ }
+}
+
+#[test]
+fn test_const_deallocate_at_runtime() {
+ use core::intrinsics::const_deallocate;
+ const X: &u32 = &42u32;
+ let x = &0u32;
+ unsafe {
+ const_deallocate(X as *const _ as *mut u8, 4, 4); // nop
+ const_deallocate(x as *const _ as *mut u8, 4, 4); // nop
+ const_deallocate(core::ptr::null_mut(), 1, 1); // nop
+ }
+}
diff --git a/library/core/tests/iter/adapters/chain.rs b/library/core/tests/iter/adapters/chain.rs
new file mode 100644
index 000000000..f419f9cec
--- /dev/null
+++ b/library/core/tests/iter/adapters/chain.rs
@@ -0,0 +1,280 @@
+use super::*;
+use core::iter::*;
+
+#[test]
+fn test_iterator_chain() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let expected = [0, 1, 2, 3, 4, 5, 30, 40, 50, 60];
+ let it = xs.iter().chain(&ys);
+ let mut i = 0;
+ for &x in it {
+ assert_eq!(x, expected[i]);
+ i += 1;
+ }
+ assert_eq!(i, expected.len());
+
+ let ys = (30..).step_by(10).take(4);
+ let it = xs.iter().cloned().chain(ys);
+ let mut i = 0;
+ for x in it {
+ assert_eq!(x, expected[i]);
+ i += 1;
+ }
+ assert_eq!(i, expected.len());
+}
+
+#[test]
+fn test_iterator_chain_advance_by() {
+ fn test_chain(xs: &[i32], ys: &[i32]) {
+ let len = xs.len() + ys.len();
+
+ for i in 0..xs.len() {
+ let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
+ iter.advance_by(i).unwrap();
+ assert_eq!(iter.next(), Some(&xs[i]));
+ assert_eq!(iter.advance_by(100), Err(len - i - 1));
+ iter.advance_by(0).unwrap();
+ }
+
+ for i in 0..ys.len() {
+ let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
+ iter.advance_by(xs.len() + i).unwrap();
+ assert_eq!(iter.next(), Some(&ys[i]));
+ assert_eq!(iter.advance_by(100), Err(ys.len() - i - 1));
+ iter.advance_by(0).unwrap();
+ }
+
+ let mut iter = xs.iter().chain(ys);
+ iter.advance_by(len).unwrap();
+ assert_eq!(iter.next(), None);
+ iter.advance_by(0).unwrap();
+
+ let mut iter = xs.iter().chain(ys);
+ assert_eq!(iter.advance_by(len + 1), Err(len));
+ iter.advance_by(0).unwrap();
+ }
+
+ test_chain(&[], &[]);
+ test_chain(&[], &[0, 1, 2, 3, 4, 5]);
+ test_chain(&[0, 1, 2, 3, 4, 5], &[]);
+ test_chain(&[0, 1, 2, 3, 4, 5], &[30, 40, 50, 60]);
+}
+
+#[test]
+fn test_iterator_chain_advance_back_by() {
+ fn test_chain(xs: &[i32], ys: &[i32]) {
+ let len = xs.len() + ys.len();
+
+ for i in 0..ys.len() {
+ let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
+ iter.advance_back_by(i).unwrap();
+ assert_eq!(iter.next_back(), Some(&ys[ys.len() - i - 1]));
+ assert_eq!(iter.advance_back_by(100), Err(len - i - 1));
+ iter.advance_back_by(0).unwrap();
+ }
+
+ for i in 0..xs.len() {
+ let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
+ iter.advance_back_by(ys.len() + i).unwrap();
+ assert_eq!(iter.next_back(), Some(&xs[xs.len() - i - 1]));
+ assert_eq!(iter.advance_back_by(100), Err(xs.len() - i - 1));
+ iter.advance_back_by(0).unwrap();
+ }
+
+ let mut iter = xs.iter().chain(ys);
+ iter.advance_back_by(len).unwrap();
+ assert_eq!(iter.next_back(), None);
+ iter.advance_back_by(0).unwrap();
+
+ let mut iter = xs.iter().chain(ys);
+ assert_eq!(iter.advance_back_by(len + 1), Err(len));
+ iter.advance_back_by(0).unwrap();
+ }
+
+ test_chain(&[], &[]);
+ test_chain(&[], &[0, 1, 2, 3, 4, 5]);
+ test_chain(&[0, 1, 2, 3, 4, 5], &[]);
+ test_chain(&[0, 1, 2, 3, 4, 5], &[30, 40, 50, 60]);
+}
+
+#[test]
+fn test_iterator_chain_nth() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let zs = [];
+ let expected = [0, 1, 2, 3, 4, 5, 30, 40, 50, 60];
+ for (i, x) in expected.iter().enumerate() {
+ assert_eq!(Some(x), xs.iter().chain(&ys).nth(i));
+ }
+ assert_eq!(zs.iter().chain(&xs).nth(0), Some(&0));
+
+ let mut it = xs.iter().chain(&zs);
+ assert_eq!(it.nth(5), Some(&5));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iterator_chain_nth_back() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let zs = [];
+ let expected = [0, 1, 2, 3, 4, 5, 30, 40, 50, 60];
+ for (i, x) in expected.iter().rev().enumerate() {
+ assert_eq!(Some(x), xs.iter().chain(&ys).nth_back(i));
+ }
+ assert_eq!(zs.iter().chain(&xs).nth_back(0), Some(&5));
+
+ let mut it = xs.iter().chain(&zs);
+ assert_eq!(it.nth_back(5), Some(&0));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iterator_chain_last() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let zs = [];
+ assert_eq!(xs.iter().chain(&ys).last(), Some(&60));
+ assert_eq!(zs.iter().chain(&ys).last(), Some(&60));
+ assert_eq!(ys.iter().chain(&zs).last(), Some(&60));
+ assert_eq!(zs.iter().chain(&zs).last(), None);
+}
+
+#[test]
+fn test_iterator_chain_count() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let zs = [];
+ assert_eq!(xs.iter().chain(&ys).count(), 10);
+ assert_eq!(zs.iter().chain(&ys).count(), 4);
+}
+
+#[test]
+fn test_iterator_chain_find() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let mut iter = xs.iter().chain(&ys);
+ assert_eq!(iter.find(|&&i| i == 4), Some(&4));
+ assert_eq!(iter.next(), Some(&5));
+ assert_eq!(iter.find(|&&i| i == 40), Some(&40));
+ assert_eq!(iter.next(), Some(&50));
+ assert_eq!(iter.find(|&&i| i == 100), None);
+ assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn test_iterator_chain_size_hint() {
+ // this chains an iterator of length 0 with an iterator of length 1,
+ // so after calling `.next()` once, the iterator is empty and the
+ // state is `ChainState::Back`. `.size_hint()` should now disregard
+ // the size hint of the left iterator
+ let mut iter = Toggle { is_empty: true }.chain(once(()));
+ assert_eq!(iter.next(), Some(()));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+
+ let mut iter = once(()).chain(Toggle { is_empty: true });
+ assert_eq!(iter.next_back(), Some(()));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+}
+
+#[test]
+fn test_iterator_chain_unfused() {
+ // Chain shouldn't be fused in its second iterator, depending on direction
+ let mut iter = NonFused::new(empty()).chain(Toggle { is_empty: true });
+ assert!(iter.next().is_none());
+ assert!(iter.next().is_some());
+ assert!(iter.next().is_none());
+
+ let mut iter = Toggle { is_empty: true }.chain(NonFused::new(empty()));
+ assert!(iter.next_back().is_none());
+ assert!(iter.next_back().is_some());
+ assert!(iter.next_back().is_none());
+}
+
+#[test]
+fn test_chain_fold() {
+ let xs = [1, 2, 3];
+ let ys = [1, 2, 0];
+
+ let mut iter = xs.iter().chain(&ys);
+ iter.next();
+ let mut result = Vec::new();
+ iter.fold((), |(), &elt| result.push(elt));
+ assert_eq!(&[2, 3, 1, 2, 0], &result[..]);
+}
+
+#[test]
+fn test_chain_try_folds() {
+ let c = || (0..10).chain(10..20);
+
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!(c().try_fold(7, f), (0..20).try_fold(7, f));
+ assert_eq!(c().try_rfold(7, f), (0..20).rev().try_fold(7, f));
+
+ let mut iter = c();
+ assert_eq!(iter.position(|x| x == 5), Some(5));
+ assert_eq!(iter.next(), Some(6), "stopped in front, state Both");
+ assert_eq!(iter.position(|x| x == 13), Some(6));
+ assert_eq!(iter.next(), Some(14), "stopped in back, state Back");
+ assert_eq!(iter.try_fold(0, |acc, x| Some(acc + x)), Some((15..20).sum()));
+
+ let mut iter = c().rev(); // use rev to access try_rfold
+ assert_eq!(iter.position(|x| x == 15), Some(4));
+ assert_eq!(iter.next(), Some(14), "stopped in back, state Both");
+ assert_eq!(iter.position(|x| x == 5), Some(8));
+ assert_eq!(iter.next(), Some(4), "stopped in front, state Front");
+ assert_eq!(iter.try_fold(0, |acc, x| Some(acc + x)), Some((0..4).sum()));
+
+ let mut iter = c();
+ iter.by_ref().rev().nth(14); // skip the last 15, ending in state Front
+ assert_eq!(iter.try_fold(7, f), (0..5).try_fold(7, f));
+
+ let mut iter = c();
+ iter.nth(14); // skip the first 15, ending in state Back
+ assert_eq!(iter.try_rfold(7, f), (15..20).try_rfold(7, f));
+}
+
+#[test]
+fn test_double_ended_chain() {
+ let xs = [1, 2, 3, 4, 5];
+ let ys = [7, 9, 11];
+ let mut it = xs.iter().chain(&ys).rev();
+ assert_eq!(it.next().unwrap(), &11);
+ assert_eq!(it.next().unwrap(), &9);
+ assert_eq!(it.next_back().unwrap(), &1);
+ assert_eq!(it.next_back().unwrap(), &2);
+ assert_eq!(it.next_back().unwrap(), &3);
+ assert_eq!(it.next_back().unwrap(), &4);
+ assert_eq!(it.next_back().unwrap(), &5);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back(), None);
+
+ // test that .chain() is well behaved with an unfused iterator
+ struct CrazyIterator(bool);
+ impl CrazyIterator {
+ fn new() -> CrazyIterator {
+ CrazyIterator(false)
+ }
+ }
+ impl Iterator for CrazyIterator {
+ type Item = i32;
+ fn next(&mut self) -> Option<i32> {
+ if self.0 {
+ Some(99)
+ } else {
+ self.0 = true;
+ None
+ }
+ }
+ }
+
+ impl DoubleEndedIterator for CrazyIterator {
+ fn next_back(&mut self) -> Option<i32> {
+ self.next()
+ }
+ }
+
+ assert_eq!(CrazyIterator::new().chain(0..10).rev().last(), Some(0));
+ assert!((0..10).chain(CrazyIterator::new()).rev().any(|i| i == 0));
+}
diff --git a/library/core/tests/iter/adapters/cloned.rs b/library/core/tests/iter/adapters/cloned.rs
new file mode 100644
index 000000000..78babb7fe
--- /dev/null
+++ b/library/core/tests/iter/adapters/cloned.rs
@@ -0,0 +1,52 @@
+use core::iter::*;
+
+#[test]
+fn test_cloned() {
+ let xs = [2, 4, 6, 8];
+
+ let mut it = xs.iter().cloned();
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next(), Some(4));
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next_back(), Some(8));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back(), Some(6));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_cloned_side_effects() {
+ let mut count = 0;
+ {
+ let iter = [1, 2, 3]
+ .iter()
+ .map(|x| {
+ count += 1;
+ x
+ })
+ .cloned()
+ .zip(&[1]);
+ for _ in iter {}
+ }
+ assert_eq!(count, 2);
+}
+
+#[test]
+fn test_cloned_try_folds() {
+ let a = [1, 2, 3, 4, 5, 6, 7, 8, 9];
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ let f_ref = &|acc, &x| i32::checked_add(2 * acc, x);
+ assert_eq!(a.iter().cloned().try_fold(7, f), a.iter().try_fold(7, f_ref));
+ assert_eq!(a.iter().cloned().try_rfold(7, f), a.iter().try_rfold(7, f_ref));
+
+ let a = [10, 20, 30, 40, 100, 60, 70, 80, 90];
+ let mut iter = a.iter().cloned();
+ assert_eq!(iter.try_fold(0_i8, |acc, x| acc.checked_add(x)), None);
+ assert_eq!(iter.next(), Some(60));
+ let mut iter = a.iter().cloned();
+ assert_eq!(iter.try_rfold(0_i8, |acc, x| acc.checked_add(x)), None);
+ assert_eq!(iter.next_back(), Some(70));
+}
diff --git a/library/core/tests/iter/adapters/copied.rs b/library/core/tests/iter/adapters/copied.rs
new file mode 100644
index 000000000..b12f2035d
--- /dev/null
+++ b/library/core/tests/iter/adapters/copied.rs
@@ -0,0 +1,18 @@
+use core::iter::*;
+
+#[test]
+fn test_copied() {
+ let xs = [2, 4, 6, 8];
+
+ let mut it = xs.iter().copied();
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next(), Some(4));
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next_back(), Some(8));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back(), Some(6));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next_back(), None);
+}
diff --git a/library/core/tests/iter/adapters/cycle.rs b/library/core/tests/iter/adapters/cycle.rs
new file mode 100644
index 000000000..8831c09b4
--- /dev/null
+++ b/library/core/tests/iter/adapters/cycle.rs
@@ -0,0 +1,31 @@
+use core::iter::*;
+
+#[test]
+fn test_cycle() {
+ let cycle_len = 3;
+ let it = (0..).step_by(1).take(cycle_len).cycle();
+ assert_eq!(it.size_hint(), (usize::MAX, None));
+ for (i, x) in it.take(100).enumerate() {
+ assert_eq!(i % cycle_len, x);
+ }
+
+ let mut it = (0..).step_by(1).take(0).cycle();
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None);
+
+ assert_eq!(empty::<i32>().cycle().fold(0, |acc, x| acc + x), 0);
+
+ assert_eq!(once(1).cycle().skip(1).take(4).fold(0, |acc, x| acc + x), 4);
+
+ assert_eq!((0..10).cycle().take(5).sum::<i32>(), 10);
+ assert_eq!((0..10).cycle().take(15).sum::<i32>(), 55);
+ assert_eq!((0..10).cycle().take(25).sum::<i32>(), 100);
+
+ let mut iter = (0..10).cycle();
+ iter.nth(14);
+ assert_eq!(iter.take(8).sum::<i32>(), 38);
+
+ let mut iter = (0..10).cycle();
+ iter.nth(9);
+ assert_eq!(iter.take(3).sum::<i32>(), 3);
+}
diff --git a/library/core/tests/iter/adapters/enumerate.rs b/library/core/tests/iter/adapters/enumerate.rs
new file mode 100644
index 000000000..0e6033878
--- /dev/null
+++ b/library/core/tests/iter/adapters/enumerate.rs
@@ -0,0 +1,107 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_enumerate() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let it = xs.iter().enumerate();
+ for (i, &x) in it {
+ assert_eq!(i, x);
+ }
+}
+
+#[test]
+fn test_iterator_enumerate_nth() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ for (i, &x) in xs.iter().enumerate() {
+ assert_eq!(i, x);
+ }
+
+ let mut it = xs.iter().enumerate();
+ while let Some((i, &x)) = it.nth(0) {
+ assert_eq!(i, x);
+ }
+
+ let mut it = xs.iter().enumerate();
+ while let Some((i, &x)) = it.nth(1) {
+ assert_eq!(i, x);
+ }
+
+ let (i, &x) = xs.iter().enumerate().nth(3).unwrap();
+ assert_eq!(i, x);
+ assert_eq!(i, 3);
+}
+
+#[test]
+fn test_iterator_enumerate_nth_back() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().enumerate();
+ while let Some((i, &x)) = it.nth_back(0) {
+ assert_eq!(i, x);
+ }
+
+ let mut it = xs.iter().enumerate();
+ while let Some((i, &x)) = it.nth_back(1) {
+ assert_eq!(i, x);
+ }
+
+ let (i, &x) = xs.iter().enumerate().nth_back(3).unwrap();
+ assert_eq!(i, x);
+ assert_eq!(i, 2);
+}
+
+#[test]
+fn test_iterator_enumerate_count() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ assert_eq!(xs.iter().enumerate().count(), 6);
+}
+
+#[test]
+fn test_iterator_enumerate_fold() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().enumerate();
+ // steal a couple to get an interesting offset
+ assert_eq!(it.next(), Some((0, &0)));
+ assert_eq!(it.next(), Some((1, &1)));
+ let i = it.fold(2, |i, (j, &x)| {
+ assert_eq!(i, j);
+ assert_eq!(x, xs[j]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+
+ let mut it = xs.iter().enumerate();
+ assert_eq!(it.next(), Some((0, &0)));
+ let i = it.rfold(xs.len() - 1, |i, (j, &x)| {
+ assert_eq!(i, j);
+ assert_eq!(x, xs[j]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_enumerate_try_folds() {
+ let f = &|acc, (i, x)| usize::checked_add(2 * acc, x / (i + 1) + i);
+ assert_eq!((9..18).enumerate().try_fold(7, f), (0..9).map(|i| (i, i + 9)).try_fold(7, f));
+ assert_eq!((9..18).enumerate().try_rfold(7, f), (0..9).map(|i| (i, i + 9)).try_rfold(7, f));
+
+ let mut iter = (100..200).enumerate();
+ let f = &|acc, (i, x)| u8::checked_add(acc, u8::checked_div(x, i as u8 + 1)?);
+ assert_eq!(iter.try_fold(0, f), None);
+ assert_eq!(iter.next(), Some((7, 107)));
+ assert_eq!(iter.try_rfold(0, f), None);
+ assert_eq!(iter.next_back(), Some((11, 111)));
+}
+
+#[test]
+fn test_double_ended_enumerate() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut it = xs.iter().cloned().enumerate();
+ assert_eq!(it.next(), Some((0, 1)));
+ assert_eq!(it.next(), Some((1, 2)));
+ assert_eq!(it.next_back(), Some((5, 6)));
+ assert_eq!(it.next_back(), Some((4, 5)));
+ assert_eq!(it.next_back(), Some((3, 4)));
+ assert_eq!(it.next_back(), Some((2, 3)));
+ assert_eq!(it.next(), None);
+}
diff --git a/library/core/tests/iter/adapters/filter.rs b/library/core/tests/iter/adapters/filter.rs
new file mode 100644
index 000000000..a2050d89d
--- /dev/null
+++ b/library/core/tests/iter/adapters/filter.rs
@@ -0,0 +1,52 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_filter_count() {
+ let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ assert_eq!(xs.iter().filter(|&&x| x % 2 == 0).count(), 5);
+}
+
+#[test]
+fn test_iterator_filter_fold() {
+ let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let ys = [0, 2, 4, 6, 8];
+ let it = xs.iter().filter(|&&x| x % 2 == 0);
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let it = xs.iter().filter(|&&x| x % 2 == 0);
+ let i = it.rfold(ys.len(), |i, &x| {
+ assert_eq!(x, ys[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_filter_try_folds() {
+ fn p(&x: &i32) -> bool {
+ 0 <= x && x < 10
+ }
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((-10..20).filter(p).try_fold(7, f), (0..10).try_fold(7, f));
+ assert_eq!((-10..20).filter(p).try_rfold(7, f), (0..10).try_rfold(7, f));
+
+ let mut iter = (0..40).filter(|&x| x % 2 == 1);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(25));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(31));
+}
+
+#[test]
+fn test_double_ended_filter() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut it = xs.iter().filter(|&x| *x & 1 == 0);
+ assert_eq!(it.next_back().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &4);
+ assert_eq!(it.next().unwrap(), &2);
+ assert_eq!(it.next_back(), None);
+}
diff --git a/library/core/tests/iter/adapters/filter_map.rs b/library/core/tests/iter/adapters/filter_map.rs
new file mode 100644
index 000000000..46738eda6
--- /dev/null
+++ b/library/core/tests/iter/adapters/filter_map.rs
@@ -0,0 +1,50 @@
+use core::iter::*;
+
+#[test]
+fn test_filter_map() {
+ let it = (0..).step_by(1).take(10).filter_map(|x| if x % 2 == 0 { Some(x * x) } else { None });
+ assert_eq!(it.collect::<Vec<usize>>(), [0 * 0, 2 * 2, 4 * 4, 6 * 6, 8 * 8]);
+}
+
+#[test]
+fn test_filter_map_fold() {
+ let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let ys = [0 * 0, 2 * 2, 4 * 4, 6 * 6, 8 * 8];
+ let it = xs.iter().filter_map(|&x| if x % 2 == 0 { Some(x * x) } else { None });
+ let i = it.fold(0, |i, x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let it = xs.iter().filter_map(|&x| if x % 2 == 0 { Some(x * x) } else { None });
+ let i = it.rfold(ys.len(), |i, x| {
+ assert_eq!(x, ys[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_filter_map_try_folds() {
+ let mp = &|x| if 0 <= x && x < 10 { Some(x * 2) } else { None };
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((-9..20).filter_map(mp).try_fold(7, f), (0..10).map(|x| 2 * x).try_fold(7, f));
+ assert_eq!((-9..20).filter_map(mp).try_rfold(7, f), (0..10).map(|x| 2 * x).try_rfold(7, f));
+
+ let mut iter = (0..40).filter_map(|x| if x % 2 == 1 { None } else { Some(x * 2 + 10) });
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(38));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(78));
+}
+
+#[test]
+fn test_double_ended_filter_map() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut it = xs.iter().filter_map(|&x| if x & 1 == 0 { Some(x * 2) } else { None });
+ assert_eq!(it.next_back().unwrap(), 12);
+ assert_eq!(it.next_back().unwrap(), 8);
+ assert_eq!(it.next().unwrap(), 4);
+ assert_eq!(it.next_back(), None);
+}
diff --git a/library/core/tests/iter/adapters/flat_map.rs b/library/core/tests/iter/adapters/flat_map.rs
new file mode 100644
index 000000000..ee945e698
--- /dev/null
+++ b/library/core/tests/iter/adapters/flat_map.rs
@@ -0,0 +1,74 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_flat_map() {
+ let xs = [0, 3, 6];
+ let ys = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let it = xs.iter().flat_map(|&x| (x..).step_by(1).take(3));
+ let mut i = 0;
+ for x in it {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+/// Tests `FlatMap::fold` with items already picked off the front and back,
+/// to make sure all parts of the `FlatMap` are folded correctly.
+#[test]
+fn test_iterator_flat_map_fold() {
+ let xs = [0, 3, 6];
+ let ys = [1, 2, 3, 4, 5, 6, 7];
+ let mut it = xs.iter().flat_map(|&x| x..x + 3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let i = it.fold(0, |i, x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let mut it = xs.iter().flat_map(|&x| x..x + 3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let i = it.rfold(ys.len(), |i, x| {
+ assert_eq!(x, ys[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_flat_map_try_folds() {
+ let f = &|acc, x| i32::checked_add(acc * 2 / 3, x);
+ let mr = &|x| (5 * x)..(5 * x + 5);
+ assert_eq!((0..10).flat_map(mr).try_fold(7, f), (0..50).try_fold(7, f));
+ assert_eq!((0..10).flat_map(mr).try_rfold(7, f), (0..50).try_rfold(7, f));
+ let mut iter = (0..10).flat_map(mr);
+ iter.next();
+ iter.next_back(); // have front and back iters in progress
+ assert_eq!(iter.try_rfold(7, f), (1..49).try_rfold(7, f));
+
+ let mut iter = (0..10).flat_map(|x| (4 * x)..(4 * x + 4));
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(17));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(35));
+}
+
+#[test]
+fn test_double_ended_flat_map() {
+ let u = [0, 1];
+ let v = [5, 6, 7, 8];
+ let mut it = u.iter().flat_map(|x| &v[*x..v.len()]);
+ assert_eq!(it.next_back().unwrap(), &8);
+ assert_eq!(it.next().unwrap(), &5);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &8);
+ assert_eq!(it.next().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next_back(), None);
+}
diff --git a/library/core/tests/iter/adapters/flatten.rs b/library/core/tests/iter/adapters/flatten.rs
new file mode 100644
index 000000000..f8ab8c9d4
--- /dev/null
+++ b/library/core/tests/iter/adapters/flatten.rs
@@ -0,0 +1,170 @@
+use super::*;
+use core::iter::*;
+
+#[test]
+fn test_iterator_flatten() {
+ let xs = [0, 3, 6];
+ let ys = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let it = xs.iter().map(|&x| (x..).step_by(1).take(3)).flatten();
+ let mut i = 0;
+ for x in it {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+/// Tests `Flatten::fold` with items already picked off the front and back,
+/// to make sure all parts of the `Flatten` are folded correctly.
+#[test]
+fn test_iterator_flatten_fold() {
+ let xs = [0, 3, 6];
+ let ys = [1, 2, 3, 4, 5, 6, 7];
+ let mut it = xs.iter().map(|&x| x..x + 3).flatten();
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let i = it.fold(0, |i, x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let mut it = xs.iter().map(|&x| x..x + 3).flatten();
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let i = it.rfold(ys.len(), |i, x| {
+ assert_eq!(x, ys[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_flatten_try_folds() {
+ let f = &|acc, x| i32::checked_add(acc * 2 / 3, x);
+ let mr = &|x| (5 * x)..(5 * x + 5);
+ assert_eq!((0..10).map(mr).flatten().try_fold(7, f), (0..50).try_fold(7, f));
+ assert_eq!((0..10).map(mr).flatten().try_rfold(7, f), (0..50).try_rfold(7, f));
+ let mut iter = (0..10).map(mr).flatten();
+ iter.next();
+ iter.next_back(); // have front and back iters in progress
+ assert_eq!(iter.try_rfold(7, f), (1..49).try_rfold(7, f));
+
+ let mut iter = (0..10).map(|x| (4 * x)..(4 * x + 4)).flatten();
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(17));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(35));
+}
+
+#[test]
+fn test_flatten_advance_by() {
+ let mut it = once(0..10).chain(once(10..30)).chain(once(30..40)).flatten();
+
+ it.advance_by(5).unwrap();
+ assert_eq!(it.next(), Some(5));
+ it.advance_by(9).unwrap();
+ assert_eq!(it.next(), Some(15));
+ it.advance_back_by(4).unwrap();
+ assert_eq!(it.next_back(), Some(35));
+ it.advance_back_by(9).unwrap();
+ assert_eq!(it.next_back(), Some(25));
+
+ assert_eq!(it.advance_by(usize::MAX), Err(9));
+ assert_eq!(it.advance_back_by(usize::MAX), Err(0));
+ it.advance_by(0).unwrap();
+ it.advance_back_by(0).unwrap();
+ assert_eq!(it.size_hint(), (0, Some(0)));
+}
+
+#[test]
+fn test_flatten_non_fused_outer() {
+ let mut iter = NonFused::new(once(0..2)).flatten();
+
+ assert_eq!(iter.next_back(), Some(1));
+ assert_eq!(iter.next(), Some(0));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.next(), None);
+
+ let mut iter = NonFused::new(once(0..2)).flatten();
+
+ assert_eq!(iter.next(), Some(0));
+ assert_eq!(iter.next_back(), Some(1));
+ assert_eq!(iter.next_back(), None);
+ assert_eq!(iter.next_back(), None);
+}
+
+#[test]
+fn test_flatten_non_fused_inner() {
+ let mut iter = once(0..1).chain(once(1..3)).flat_map(NonFused::new);
+
+ assert_eq!(iter.next_back(), Some(2));
+ assert_eq!(iter.next(), Some(0));
+ assert_eq!(iter.next(), Some(1));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.next(), None);
+
+ let mut iter = once(0..1).chain(once(1..3)).flat_map(NonFused::new);
+
+ assert_eq!(iter.next(), Some(0));
+ assert_eq!(iter.next_back(), Some(2));
+ assert_eq!(iter.next_back(), Some(1));
+ assert_eq!(iter.next_back(), None);
+ assert_eq!(iter.next_back(), None);
+}
+
+#[test]
+fn test_double_ended_flatten() {
+ let u = [0, 1];
+ let v = [5, 6, 7, 8];
+ let mut it = u.iter().map(|x| &v[*x..v.len()]).flatten();
+ assert_eq!(it.next_back().unwrap(), &8);
+ assert_eq!(it.next().unwrap(), &5);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &8);
+ assert_eq!(it.next().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_trusted_len_flatten() {
+ fn assert_trusted_len<T: TrustedLen>(_: &T) {}
+ let mut iter = IntoIterator::into_iter([[0; 3]; 4]).flatten();
+ assert_trusted_len(&iter);
+
+ assert_eq!(iter.size_hint(), (12, Some(12)));
+ iter.next();
+ assert_eq!(iter.size_hint(), (11, Some(11)));
+ iter.next_back();
+ assert_eq!(iter.size_hint(), (10, Some(10)));
+
+ let iter = IntoIterator::into_iter([[(); usize::MAX]; 1]).flatten();
+ assert_eq!(iter.size_hint(), (usize::MAX, Some(usize::MAX)));
+
+ let iter = IntoIterator::into_iter([[(); usize::MAX]; 2]).flatten();
+ assert_eq!(iter.size_hint(), (usize::MAX, None));
+
+ let mut a = [(); 10];
+ let mut b = [(); 10];
+
+ let iter = IntoIterator::into_iter([&mut a, &mut b]).flatten();
+ assert_trusted_len(&iter);
+ assert_eq!(iter.size_hint(), (20, Some(20)));
+ core::mem::drop(iter);
+
+ let iter = IntoIterator::into_iter([&a, &b]).flatten();
+ assert_trusted_len(&iter);
+ assert_eq!(iter.size_hint(), (20, Some(20)));
+
+ let iter = [(), (), ()].iter().flat_map(|_| [(); 1000]);
+ assert_trusted_len(&iter);
+ assert_eq!(iter.size_hint(), (3000, Some(3000)));
+
+ let iter = [(), ()].iter().flat_map(|_| &a);
+ assert_trusted_len(&iter);
+ assert_eq!(iter.size_hint(), (20, Some(20)));
+}
diff --git a/library/core/tests/iter/adapters/fuse.rs b/library/core/tests/iter/adapters/fuse.rs
new file mode 100644
index 000000000..f41b379b3
--- /dev/null
+++ b/library/core/tests/iter/adapters/fuse.rs
@@ -0,0 +1,75 @@
+use core::iter::*;
+
+#[test]
+fn test_fuse_nth() {
+ let xs = [0, 1, 2];
+ let mut it = xs.iter();
+
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.nth(2), Some(&2));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.nth(2), None);
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_fuse_last() {
+ let xs = [0, 1, 2];
+ let it = xs.iter();
+
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.last(), Some(&2));
+}
+
+#[test]
+fn test_fuse_count() {
+ let xs = [0, 1, 2];
+ let it = xs.iter();
+
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.count(), 3);
+ // Can't check len now because count consumes.
+}
+
+#[test]
+fn test_fuse_fold() {
+ let xs = [0, 1, 2];
+ let it = xs.iter(); // `FusedIterator`
+ let i = it.fuse().fold(0, |i, &x| {
+ assert_eq!(x, xs[i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+
+ let it = xs.iter(); // `FusedIterator`
+ let i = it.fuse().rfold(xs.len(), |i, &x| {
+ assert_eq!(x, xs[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+
+ let it = xs.iter().scan((), |_, &x| Some(x)); // `!FusedIterator`
+ let i = it.fuse().fold(0, |i, x| {
+ assert_eq!(x, xs[i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+}
+
+#[test]
+fn test_fuse() {
+ let mut it = 0..3;
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+}
diff --git a/library/core/tests/iter/adapters/inspect.rs b/library/core/tests/iter/adapters/inspect.rs
new file mode 100644
index 000000000..939e3a28a
--- /dev/null
+++ b/library/core/tests/iter/adapters/inspect.rs
@@ -0,0 +1,38 @@
+use core::iter::*;
+
+#[test]
+fn test_inspect() {
+ let xs = [1, 2, 3, 4];
+ let mut n = 0;
+
+ let ys = xs.iter().cloned().inspect(|_| n += 1).collect::<Vec<usize>>();
+
+ assert_eq!(n, xs.len());
+ assert_eq!(&xs[..], &ys[..]);
+}
+
+#[test]
+fn test_inspect_fold() {
+ let xs = [1, 2, 3, 4];
+ let mut n = 0;
+ {
+ let it = xs.iter().inspect(|_| n += 1);
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, xs[i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+ }
+ assert_eq!(n, xs.len());
+
+ let mut n = 0;
+ {
+ let it = xs.iter().inspect(|_| n += 1);
+ let i = it.rfold(xs.len(), |i, &x| {
+ assert_eq!(x, xs[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+ }
+ assert_eq!(n, xs.len());
+}
diff --git a/library/core/tests/iter/adapters/intersperse.rs b/library/core/tests/iter/adapters/intersperse.rs
new file mode 100644
index 000000000..72ae59b6b
--- /dev/null
+++ b/library/core/tests/iter/adapters/intersperse.rs
@@ -0,0 +1,154 @@
+use core::iter::*;
+
+#[test]
+fn test_intersperse() {
+ let v = std::iter::empty().intersperse(0u32).collect::<Vec<_>>();
+ assert_eq!(v, vec![]);
+
+ let v = std::iter::once(1).intersperse(0).collect::<Vec<_>>();
+ assert_eq!(v, vec![1]);
+
+ let xs = ["a", "", "b", "c"];
+ let v: Vec<&str> = xs.iter().map(|x| *x).intersperse(", ").collect();
+ let text: String = v.concat();
+ assert_eq!(text, "a, , b, c".to_string());
+
+ let ys = [0, 1, 2, 3];
+ let mut it = ys[..0].iter().map(|x| *x).intersperse(1);
+ assert!(it.next() == None);
+}
+
+#[test]
+fn test_intersperse_size_hint() {
+ let iter = std::iter::empty::<i32>().intersperse(0);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+
+ let xs = ["a", "", "b", "c"];
+ let mut iter = xs.iter().map(|x| *x).intersperse(", ");
+ assert_eq!(iter.size_hint(), (7, Some(7)));
+
+ assert_eq!(iter.next(), Some("a"));
+ assert_eq!(iter.size_hint(), (6, Some(6)));
+ assert_eq!(iter.next(), Some(", "));
+ assert_eq!(iter.size_hint(), (5, Some(5)));
+
+ assert_eq!([].iter().intersperse(&()).size_hint(), (0, Some(0)));
+}
+
+#[test]
+fn test_fold_specialization_intersperse() {
+ let mut iter = (1..2).intersperse(0);
+ iter.clone().for_each(|x| assert_eq!(Some(x), iter.next()));
+
+ let mut iter = (1..3).intersperse(0);
+ iter.clone().for_each(|x| assert_eq!(Some(x), iter.next()));
+
+ let mut iter = (1..4).intersperse(0);
+ iter.clone().for_each(|x| assert_eq!(Some(x), iter.next()));
+}
+
+#[test]
+fn test_try_fold_specialization_intersperse_ok() {
+ let mut iter = (1..2).intersperse(0);
+ iter.clone().try_for_each(|x| {
+ assert_eq!(Some(x), iter.next());
+ Some(())
+ });
+
+ let mut iter = (1..3).intersperse(0);
+ iter.clone().try_for_each(|x| {
+ assert_eq!(Some(x), iter.next());
+ Some(())
+ });
+
+ let mut iter = (1..4).intersperse(0);
+ iter.clone().try_for_each(|x| {
+ assert_eq!(Some(x), iter.next());
+ Some(())
+ });
+}
+
+#[test]
+fn test_intersperse_with() {
+ #[derive(PartialEq, Debug)]
+ struct NotClone {
+ u: u32,
+ }
+ let r = [NotClone { u: 0 }, NotClone { u: 1 }]
+ .into_iter()
+ .intersperse_with(|| NotClone { u: 2 })
+ .collect::<Vec<_>>();
+ assert_eq!(r, vec![NotClone { u: 0 }, NotClone { u: 2 }, NotClone { u: 1 }]);
+
+ let mut ctr = 100;
+ let separator = || {
+ ctr *= 2;
+ ctr
+ };
+ let r = (0..3).intersperse_with(separator).collect::<Vec<_>>();
+ assert_eq!(r, vec![0, 200, 1, 400, 2]);
+}
+
+#[test]
+fn test_intersperse_fold() {
+ let v = (1..4).intersperse(9).fold(Vec::new(), |mut acc, x| {
+ acc.push(x);
+ acc
+ });
+ assert_eq!(v.as_slice(), [1, 9, 2, 9, 3]);
+
+ let mut iter = (1..4).intersperse(9);
+ assert_eq!(iter.next(), Some(1));
+ let v = iter.fold(Vec::new(), |mut acc, x| {
+ acc.push(x);
+ acc
+ });
+ assert_eq!(v.as_slice(), [9, 2, 9, 3]);
+
+ struct NoneAtStart(i32); // Produces: None, Some(2), Some(3), None, ...
+ impl Iterator for NoneAtStart {
+ type Item = i32;
+ fn next(&mut self) -> Option<i32> {
+ self.0 += 1;
+ Some(self.0).filter(|i| i % 3 != 1)
+ }
+ }
+
+ let v = NoneAtStart(0).intersperse(1000).fold(0, |a, b| a + b);
+ assert_eq!(v, 0);
+}
+
+#[test]
+fn test_intersperse_collect_string() {
+ let contents = [1, 2, 3];
+
+ let contents_string = contents
+ .into_iter()
+ .map(|id| id.to_string())
+ .intersperse(", ".to_owned())
+ .collect::<String>();
+ assert_eq!(contents_string, "1, 2, 3");
+}
+
+#[test]
+fn test_try_fold_specialization_intersperse_err() {
+ let orig_iter = ["a", "b"].iter().copied().intersperse("-");
+
+ // Abort after the first item.
+ let mut iter = orig_iter.clone();
+ iter.try_for_each(|_| None::<()>);
+ assert_eq!(iter.next(), Some("-"));
+ assert_eq!(iter.next(), Some("b"));
+ assert_eq!(iter.next(), None);
+
+ // Abort after the second item.
+ let mut iter = orig_iter.clone();
+ iter.try_for_each(|item| if item == "-" { None } else { Some(()) });
+ assert_eq!(iter.next(), Some("b"));
+ assert_eq!(iter.next(), None);
+
+ // Abort after the third item.
+ let mut iter = orig_iter.clone();
+ iter.try_for_each(|item| if item == "b" { None } else { Some(()) });
+ assert_eq!(iter.next(), None);
+}
diff --git a/library/core/tests/iter/adapters/map.rs b/library/core/tests/iter/adapters/map.rs
new file mode 100644
index 000000000..77ce3819b
--- /dev/null
+++ b/library/core/tests/iter/adapters/map.rs
@@ -0,0 +1,27 @@
+use core::iter::*;
+
+#[test]
+fn test_map_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((0..10).map(|x| x + 3).try_fold(7, f), (3..13).try_fold(7, f));
+ assert_eq!((0..10).map(|x| x + 3).try_rfold(7, f), (3..13).try_rfold(7, f));
+
+ let mut iter = (0..40).map(|x| x + 10);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(20));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(46));
+}
+
+#[test]
+fn test_double_ended_map() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut it = xs.iter().map(|&x| x * -1);
+ assert_eq!(it.next(), Some(-1));
+ assert_eq!(it.next(), Some(-2));
+ assert_eq!(it.next_back(), Some(-6));
+ assert_eq!(it.next_back(), Some(-5));
+ assert_eq!(it.next(), Some(-3));
+ assert_eq!(it.next_back(), Some(-4));
+ assert_eq!(it.next(), None);
+}
diff --git a/library/core/tests/iter/adapters/mod.rs b/library/core/tests/iter/adapters/mod.rs
new file mode 100644
index 000000000..567d9fe49
--- /dev/null
+++ b/library/core/tests/iter/adapters/mod.rs
@@ -0,0 +1,185 @@
+mod chain;
+mod cloned;
+mod copied;
+mod cycle;
+mod enumerate;
+mod filter;
+mod filter_map;
+mod flat_map;
+mod flatten;
+mod fuse;
+mod inspect;
+mod intersperse;
+mod map;
+mod peekable;
+mod scan;
+mod skip;
+mod skip_while;
+mod step_by;
+mod take;
+mod take_while;
+mod zip;
+
+use core::cell::Cell;
+
+/// An iterator that panics whenever `next` or next_back` is called
+/// after `None` has already been returned. This does not violate
+/// `Iterator`'s contract. Used to test that iterator adapters don't
+/// poll their inner iterators after exhausting them.
+pub struct NonFused<I> {
+ iter: I,
+ done: bool,
+}
+
+impl<I> NonFused<I> {
+ pub fn new(iter: I) -> Self {
+ Self { iter, done: false }
+ }
+}
+
+impl<I> Iterator for NonFused<I>
+where
+ I: Iterator,
+{
+ type Item = I::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ assert!(!self.done, "this iterator has already returned None");
+ self.iter.next().or_else(|| {
+ self.done = true;
+ None
+ })
+ }
+}
+
+impl<I> DoubleEndedIterator for NonFused<I>
+where
+ I: DoubleEndedIterator,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ assert!(!self.done, "this iterator has already returned None");
+ self.iter.next_back().or_else(|| {
+ self.done = true;
+ None
+ })
+ }
+}
+
+/// An iterator wrapper that panics whenever `next` or `next_back` is called
+/// after `None` has been returned.
+pub struct Unfuse<I> {
+ iter: I,
+ exhausted: bool,
+}
+
+impl<I> Unfuse<I> {
+ pub fn new<T>(iter: T) -> Self
+ where
+ T: IntoIterator<IntoIter = I>,
+ {
+ Self { iter: iter.into_iter(), exhausted: false }
+ }
+}
+
+impl<I> Iterator for Unfuse<I>
+where
+ I: Iterator,
+{
+ type Item = I::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ assert!(!self.exhausted);
+ let next = self.iter.next();
+ self.exhausted = next.is_none();
+ next
+ }
+}
+
+impl<I> DoubleEndedIterator for Unfuse<I>
+where
+ I: DoubleEndedIterator,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ assert!(!self.exhausted);
+ let next = self.iter.next_back();
+ self.exhausted = next.is_none();
+ next
+ }
+}
+
+pub struct Toggle {
+ is_empty: bool,
+}
+
+impl Iterator for Toggle {
+ type Item = ();
+
+ // alternates between `None` and `Some(())`
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.is_empty {
+ self.is_empty = false;
+ None
+ } else {
+ self.is_empty = true;
+ Some(())
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.is_empty { (0, Some(0)) } else { (1, Some(1)) }
+ }
+}
+
+impl DoubleEndedIterator for Toggle {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.next()
+ }
+}
+
+/// This is an iterator that follows the Iterator contract,
+/// but it is not fused. After having returned None once, it will start
+/// producing elements if .next() is called again.
+pub struct CycleIter<'a, T> {
+ index: usize,
+ data: &'a [T],
+}
+
+impl<'a, T> CycleIter<'a, T> {
+ pub fn new(data: &'a [T]) -> Self {
+ Self { index: 0, data }
+ }
+}
+
+impl<'a, T> Iterator for CycleIter<'a, T> {
+ type Item = &'a T;
+ fn next(&mut self) -> Option<Self::Item> {
+ let elt = self.data.get(self.index);
+ self.index += 1;
+ self.index %= 1 + self.data.len();
+ elt
+ }
+}
+
+#[derive(Debug)]
+struct CountClone(Cell<i32>);
+
+impl CountClone {
+ pub fn new() -> Self {
+ Self(Cell::new(0))
+ }
+}
+
+impl PartialEq<i32> for CountClone {
+ fn eq(&self, rhs: &i32) -> bool {
+ self.0.get() == *rhs
+ }
+}
+
+impl Clone for CountClone {
+ fn clone(&self) -> Self {
+ let ret = CountClone(self.0.clone());
+ let n = self.0.get();
+ self.0.set(n + 1);
+ ret
+ }
+}
diff --git a/library/core/tests/iter/adapters/peekable.rs b/library/core/tests/iter/adapters/peekable.rs
new file mode 100644
index 000000000..c1a1c29b6
--- /dev/null
+++ b/library/core/tests/iter/adapters/peekable.rs
@@ -0,0 +1,272 @@
+use super::*;
+use core::iter::*;
+
+#[test]
+fn test_iterator_peekable() {
+ let xs = vec![0, 1, 2, 3, 4, 5];
+
+ let mut it = xs.iter().cloned().peekable();
+ assert_eq!(it.len(), 6);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 6);
+ assert_eq!(it.next().unwrap(), 0);
+ assert_eq!(it.len(), 5);
+ assert_eq!(it.next().unwrap(), 1);
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.next().unwrap(), 2);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.peek().unwrap(), &3);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.peek().unwrap(), &3);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next().unwrap(), 3);
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next().unwrap(), 4);
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.peek().unwrap(), &5);
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next().unwrap(), 5);
+ assert_eq!(it.len(), 0);
+ assert!(it.peek().is_none());
+ assert_eq!(it.len(), 0);
+ assert!(it.next().is_none());
+ assert_eq!(it.len(), 0);
+
+ let mut it = xs.iter().cloned().peekable();
+ assert_eq!(it.len(), 6);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 6);
+ assert_eq!(it.next_back().unwrap(), 5);
+ assert_eq!(it.len(), 5);
+ assert_eq!(it.next_back().unwrap(), 4);
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.next_back().unwrap(), 3);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next_back().unwrap(), 2);
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next_back().unwrap(), 1);
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back().unwrap(), 0);
+ assert_eq!(it.len(), 0);
+ assert!(it.peek().is_none());
+ assert_eq!(it.len(), 0);
+ assert!(it.next_back().is_none());
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_iterator_peekable_count() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [10];
+ let zs: [i32; 0] = [];
+
+ assert_eq!(xs.iter().peekable().count(), 6);
+
+ let mut it = xs.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ assert_eq!(it.count(), 6);
+
+ assert_eq!(ys.iter().peekable().count(), 1);
+
+ let mut it = ys.iter().peekable();
+ assert_eq!(it.peek(), Some(&&10));
+ assert_eq!(it.count(), 1);
+
+ assert_eq!(zs.iter().peekable().count(), 0);
+
+ let mut it = zs.iter().peekable();
+ assert_eq!(it.peek(), None);
+}
+
+#[test]
+fn test_iterator_peekable_nth() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().peekable();
+
+ assert_eq!(it.peek(), Some(&&0));
+ assert_eq!(it.nth(0), Some(&0));
+ assert_eq!(it.peek(), Some(&&1));
+ assert_eq!(it.nth(1), Some(&2));
+ assert_eq!(it.peek(), Some(&&3));
+ assert_eq!(it.nth(2), Some(&5));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iterator_peekable_last() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [0];
+
+ let mut it = xs.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ assert_eq!(it.last(), Some(&5));
+
+ let mut it = ys.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ assert_eq!(it.last(), Some(&0));
+
+ let mut it = ys.iter().peekable();
+ assert_eq!(it.next(), Some(&0));
+ assert_eq!(it.peek(), None);
+ assert_eq!(it.last(), None);
+}
+
+#[test]
+fn test_iterator_peekable_fold() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, xs[i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+}
+
+#[test]
+fn test_iterator_peekable_rfold() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ let i = it.rfold(0, |i, &x| {
+ assert_eq!(x, xs[xs.len() - 1 - i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+}
+
+#[test]
+fn test_iterator_peekable_next_if_eq() {
+ // first, try on references
+ let xs = ["Heart", "of", "Gold"];
+ let mut it = xs.into_iter().peekable();
+ // try before `peek()`
+ assert_eq!(it.next_if_eq(&"trillian"), None);
+ assert_eq!(it.next_if_eq(&"Heart"), Some("Heart"));
+ // try after peek()
+ assert_eq!(it.peek(), Some(&"of"));
+ assert_eq!(it.next_if_eq(&"of"), Some("of"));
+ assert_eq!(it.next_if_eq(&"zaphod"), None);
+ // make sure `next()` still behaves
+ assert_eq!(it.next(), Some("Gold"));
+
+ // make sure comparison works for owned values
+ let xs = [String::from("Ludicrous"), "speed".into()];
+ let mut it = xs.into_iter().peekable();
+ // make sure basic functionality works
+ assert_eq!(it.next_if_eq("Ludicrous"), Some("Ludicrous".into()));
+ assert_eq!(it.next_if_eq("speed"), Some("speed".into()));
+ assert_eq!(it.next_if_eq(""), None);
+}
+
+#[test]
+fn test_iterator_peekable_mut() {
+ let mut it = [1, 2, 3].into_iter().peekable();
+ if let Some(p) = it.peek_mut() {
+ if *p == 1 {
+ *p = 5;
+ }
+ }
+ assert_eq!(it.collect::<Vec<_>>(), vec![5, 2, 3]);
+}
+
+#[test]
+fn test_iterator_peekable_remember_peek_none_1() {
+ // Check that the loop using .peek() terminates
+ let data = [1, 2, 3];
+ let mut iter = CycleIter::new(&data).peekable();
+
+ let mut n = 0;
+ while let Some(_) = iter.next() {
+ let is_the_last = iter.peek().is_none();
+ assert_eq!(is_the_last, n == data.len() - 1);
+ n += 1;
+ if n > data.len() {
+ break;
+ }
+ }
+ assert_eq!(n, data.len());
+}
+
+#[test]
+fn test_iterator_peekable_remember_peek_none_2() {
+ let data = [0];
+ let mut iter = CycleIter::new(&data).peekable();
+ iter.next();
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.last(), None);
+}
+
+#[test]
+fn test_iterator_peekable_remember_peek_none_3() {
+ let data = [0];
+ let mut iter = CycleIter::new(&data).peekable();
+ iter.peek();
+ assert_eq!(iter.nth(0), Some(&0));
+
+ let mut iter = CycleIter::new(&data).peekable();
+ iter.next();
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.nth(0), None);
+}
+
+#[test]
+fn test_peek_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+
+ assert_eq!((1..20).peekable().try_fold(7, f), (1..20).try_fold(7, f));
+ assert_eq!((1..20).peekable().try_rfold(7, f), (1..20).try_rfold(7, f));
+
+ let mut iter = (1..20).peekable();
+ assert_eq!(iter.peek(), Some(&1));
+ assert_eq!(iter.try_fold(7, f), (1..20).try_fold(7, f));
+
+ let mut iter = (1..20).peekable();
+ assert_eq!(iter.peek(), Some(&1));
+ assert_eq!(iter.try_rfold(7, f), (1..20).try_rfold(7, f));
+
+ let mut iter = [100, 20, 30, 40, 50, 60, 70].iter().cloned().peekable();
+ assert_eq!(iter.peek(), Some(&100));
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.peek(), Some(&40));
+
+ let mut iter = [100, 20, 30, 40, 50, 60, 70].iter().cloned().peekable();
+ assert_eq!(iter.peek(), Some(&100));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.peek(), Some(&100));
+ assert_eq!(iter.next_back(), Some(50));
+
+ let mut iter = (2..5).peekable();
+ assert_eq!(iter.peek(), Some(&2));
+ assert_eq!(iter.try_for_each(Err), Err(2));
+ assert_eq!(iter.peek(), Some(&3));
+ assert_eq!(iter.try_for_each(Err), Err(3));
+ assert_eq!(iter.peek(), Some(&4));
+ assert_eq!(iter.try_for_each(Err), Err(4));
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.try_for_each(Err), Ok(()));
+
+ let mut iter = (2..5).peekable();
+ assert_eq!(iter.peek(), Some(&2));
+ assert_eq!(iter.try_rfold((), |(), x| Err(x)), Err(4));
+ assert_eq!(iter.peek(), Some(&2));
+ assert_eq!(iter.try_rfold((), |(), x| Err(x)), Err(3));
+ assert_eq!(iter.peek(), Some(&2));
+ assert_eq!(iter.try_rfold((), |(), x| Err(x)), Err(2));
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.try_rfold((), |(), x| Err(x)), Ok(()));
+}
+
+#[test]
+fn test_peekable_non_fused() {
+ let mut iter = NonFused::new(empty::<i32>()).peekable();
+
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.next_back(), None);
+}
diff --git a/library/core/tests/iter/adapters/scan.rs b/library/core/tests/iter/adapters/scan.rs
new file mode 100644
index 000000000..1d28ca6b7
--- /dev/null
+++ b/library/core/tests/iter/adapters/scan.rs
@@ -0,0 +1,20 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_scan() {
+ // test the type inference
+ fn add(old: &mut isize, new: &usize) -> Option<f64> {
+ *old += *new as isize;
+ Some(*old as f64)
+ }
+ let xs = [0, 1, 2, 3, 4];
+ let ys = [0f64, 1.0, 3.0, 6.0, 10.0];
+
+ let it = xs.iter().scan(0, add);
+ let mut i = 0;
+ for x in it {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
diff --git a/library/core/tests/iter/adapters/skip.rs b/library/core/tests/iter/adapters/skip.rs
new file mode 100644
index 000000000..65f235e86
--- /dev/null
+++ b/library/core/tests/iter/adapters/skip.rs
@@ -0,0 +1,203 @@
+use core::iter::*;
+
+use super::Unfuse;
+
+#[test]
+fn test_iterator_skip() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+ let ys = [13, 15, 16, 17, 19, 20, 30];
+ let mut it = xs.iter().skip(5);
+ let mut i = 0;
+ while let Some(&x) = it.next() {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ assert_eq!(it.len(), xs.len() - 5 - i);
+ }
+ assert_eq!(i, ys.len());
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_iterator_skip_doubleended() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+ let mut it = xs.iter().rev().skip(5);
+ assert_eq!(it.next(), Some(&15));
+ assert_eq!(it.by_ref().rev().next(), Some(&0));
+ assert_eq!(it.next(), Some(&13));
+ assert_eq!(it.by_ref().rev().next(), Some(&1));
+ assert_eq!(it.next(), Some(&5));
+ assert_eq!(it.by_ref().rev().next(), Some(&2));
+ assert_eq!(it.next(), Some(&3));
+ assert_eq!(it.next(), None);
+ let mut it = xs.iter().rev().skip(5).rev();
+ assert_eq!(it.next(), Some(&0));
+ assert_eq!(it.rev().next(), Some(&15));
+ let mut it_base = xs.iter();
+ {
+ let mut it = it_base.by_ref().skip(5).rev();
+ assert_eq!(it.next(), Some(&30));
+ assert_eq!(it.next(), Some(&20));
+ assert_eq!(it.next(), Some(&19));
+ assert_eq!(it.next(), Some(&17));
+ assert_eq!(it.next(), Some(&16));
+ assert_eq!(it.next(), Some(&15));
+ assert_eq!(it.next(), Some(&13));
+ assert_eq!(it.next(), None);
+ }
+ // make sure the skipped parts have not been consumed
+ assert_eq!(it_base.next(), Some(&0));
+ assert_eq!(it_base.next(), Some(&1));
+ assert_eq!(it_base.next(), Some(&2));
+ assert_eq!(it_base.next(), Some(&3));
+ assert_eq!(it_base.next(), Some(&5));
+ assert_eq!(it_base.next(), None);
+ let it = xs.iter().skip(5).rev();
+ assert_eq!(it.last(), Some(&13));
+}
+
+#[test]
+fn test_iterator_skip_nth() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+
+ let mut it = xs.iter().skip(0);
+ assert_eq!(it.nth(0), Some(&0));
+ assert_eq!(it.nth(1), Some(&2));
+
+ let mut it = xs.iter().skip(5);
+ assert_eq!(it.nth(0), Some(&13));
+ assert_eq!(it.nth(1), Some(&16));
+
+ let mut it = xs.iter().skip(12);
+ assert_eq!(it.nth(0), None);
+}
+
+#[test]
+fn test_skip_advance_by() {
+ assert_eq!((0..0).skip(10).advance_by(0), Ok(()));
+ assert_eq!((0..0).skip(10).advance_by(1), Err(0));
+ assert_eq!((0u128..(usize::MAX as u128) + 1).skip(usize::MAX).advance_by(usize::MAX), Err(1));
+ assert_eq!((0u128..u128::MAX).skip(usize::MAX).advance_by(1), Ok(()));
+
+ assert_eq!((0..2).skip(1).advance_back_by(10), Err(1));
+ assert_eq!((0..0).skip(1).advance_back_by(0), Ok(()));
+}
+
+#[test]
+fn test_iterator_skip_count() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+
+ assert_eq!(xs.iter().skip(0).count(), 12);
+ assert_eq!(xs.iter().skip(1).count(), 11);
+ assert_eq!(xs.iter().skip(11).count(), 1);
+ assert_eq!(xs.iter().skip(12).count(), 0);
+ assert_eq!(xs.iter().skip(13).count(), 0);
+}
+
+#[test]
+fn test_iterator_skip_last() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+
+ assert_eq!(xs.iter().skip(0).last(), Some(&30));
+ assert_eq!(xs.iter().skip(1).last(), Some(&30));
+ assert_eq!(xs.iter().skip(11).last(), Some(&30));
+ assert_eq!(xs.iter().skip(12).last(), None);
+ assert_eq!(xs.iter().skip(13).last(), None);
+
+ let mut it = xs.iter().skip(5);
+ assert_eq!(it.next(), Some(&13));
+ assert_eq!(it.last(), Some(&30));
+}
+
+#[test]
+fn test_iterator_skip_fold() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+ let ys = [13, 15, 16, 17, 19, 20, 30];
+
+ let it = xs.iter().skip(5);
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let mut it = xs.iter().skip(5);
+ assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
+ let i = it.fold(1, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let it = xs.iter().skip(5);
+ let i = it.rfold(ys.len(), |i, &x| {
+ let i = i - 1;
+ assert_eq!(x, ys[i]);
+ i
+ });
+ assert_eq!(i, 0);
+
+ let mut it = xs.iter().skip(5);
+ assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
+ let i = it.rfold(ys.len(), |i, &x| {
+ let i = i - 1;
+ assert_eq!(x, ys[i]);
+ i
+ });
+ assert_eq!(i, 1);
+}
+
+#[test]
+fn test_skip_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((1..20).skip(9).try_fold(7, f), (10..20).try_fold(7, f));
+ assert_eq!((1..20).skip(9).try_rfold(7, f), (10..20).try_rfold(7, f));
+
+ let mut iter = (0..30).skip(10);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(20));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(24));
+}
+
+#[test]
+fn test_skip_nth_back() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().skip(2);
+ assert_eq!(it.nth_back(0), Some(&5));
+ assert_eq!(it.nth_back(1), Some(&3));
+ assert_eq!(it.nth_back(0), Some(&2));
+ assert_eq!(it.nth_back(0), None);
+
+ let ys = [2, 3, 4, 5];
+ let mut ity = ys.iter();
+ let mut it = xs.iter().skip(2);
+ assert_eq!(it.nth_back(1), ity.nth_back(1));
+ assert_eq!(it.clone().nth(0), ity.clone().nth(0));
+ assert_eq!(it.nth_back(0), ity.nth_back(0));
+ assert_eq!(it.clone().nth(0), ity.clone().nth(0));
+ assert_eq!(it.nth_back(0), ity.nth_back(0));
+ assert_eq!(it.clone().nth(0), ity.clone().nth(0));
+ assert_eq!(it.nth_back(0), ity.nth_back(0));
+ assert_eq!(it.clone().nth(0), ity.clone().nth(0));
+
+ let mut it = xs.iter().skip(2);
+ assert_eq!(it.nth_back(4), None);
+ assert_eq!(it.nth_back(0), None);
+
+ let mut it = xs.iter();
+ it.by_ref().skip(2).nth_back(3);
+ assert_eq!(it.next_back(), Some(&1));
+
+ let mut it = xs.iter();
+ it.by_ref().skip(2).nth_back(10);
+ assert_eq!(it.next_back(), Some(&1));
+}
+
+#[test]
+fn test_skip_non_fused() {
+ let non_fused = Unfuse::new(0..10);
+
+ // `Skip` would previously exhaust the iterator in this `next` call and then erroneously try to
+ // advance it further. `Unfuse` tests that this doesn't happen by panicking in that scenario.
+ let _ = non_fused.skip(20).next();
+}
diff --git a/library/core/tests/iter/adapters/skip_while.rs b/library/core/tests/iter/adapters/skip_while.rs
new file mode 100644
index 000000000..929d4f6e6
--- /dev/null
+++ b/library/core/tests/iter/adapters/skip_while.rs
@@ -0,0 +1,50 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_skip_while() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
+ let ys = [15, 16, 17, 19];
+ let it = xs.iter().skip_while(|&x| *x < 15);
+ let mut i = 0;
+ for x in it {
+ assert_eq!(*x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+#[test]
+fn test_iterator_skip_while_fold() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
+ let ys = [15, 16, 17, 19];
+ let it = xs.iter().skip_while(|&x| *x < 15);
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let mut it = xs.iter().skip_while(|&x| *x < 15);
+ assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
+ let i = it.fold(1, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+}
+
+#[test]
+fn test_skip_while_try_fold() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ fn p(&x: &i32) -> bool {
+ (x % 10) <= 5
+ }
+ assert_eq!((1..20).skip_while(p).try_fold(7, f), (6..20).try_fold(7, f));
+ let mut iter = (1..20).skip_while(p);
+ assert_eq!(iter.nth(5), Some(11));
+ assert_eq!(iter.try_fold(7, f), (12..20).try_fold(7, f));
+
+ let mut iter = (0..50).skip_while(|&x| (x % 20) < 15);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(23));
+}
diff --git a/library/core/tests/iter/adapters/step_by.rs b/library/core/tests/iter/adapters/step_by.rs
new file mode 100644
index 000000000..94f2fa8c2
--- /dev/null
+++ b/library/core/tests/iter/adapters/step_by.rs
@@ -0,0 +1,246 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_step_by() {
+ // Identity
+ let mut it = (0..).step_by(1).take(3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.next(), None);
+
+ let mut it = (0..).step_by(3).take(4);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(3));
+ assert_eq!(it.next(), Some(6));
+ assert_eq!(it.next(), Some(9));
+ assert_eq!(it.next(), None);
+
+ let mut it = (0..3).step_by(1);
+ assert_eq!(it.next_back(), Some(2));
+ assert_eq!(it.next_back(), Some(1));
+ assert_eq!(it.next_back(), Some(0));
+ assert_eq!(it.next_back(), None);
+
+ let mut it = (0..11).step_by(3);
+ assert_eq!(it.next_back(), Some(9));
+ assert_eq!(it.next_back(), Some(6));
+ assert_eq!(it.next_back(), Some(3));
+ assert_eq!(it.next_back(), Some(0));
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_iterator_step_by_nth() {
+ let mut it = (0..16).step_by(5);
+ assert_eq!(it.nth(0), Some(0));
+ assert_eq!(it.nth(0), Some(5));
+ assert_eq!(it.nth(0), Some(10));
+ assert_eq!(it.nth(0), Some(15));
+ assert_eq!(it.nth(0), None);
+
+ let it = (0..18).step_by(5);
+ assert_eq!(it.clone().nth(0), Some(0));
+ assert_eq!(it.clone().nth(1), Some(5));
+ assert_eq!(it.clone().nth(2), Some(10));
+ assert_eq!(it.clone().nth(3), Some(15));
+ assert_eq!(it.clone().nth(4), None);
+ assert_eq!(it.clone().nth(42), None);
+}
+
+#[test]
+fn test_iterator_step_by_nth_overflow() {
+ #[cfg(target_pointer_width = "16")]
+ type Bigger = u32;
+ #[cfg(target_pointer_width = "32")]
+ type Bigger = u64;
+ #[cfg(target_pointer_width = "64")]
+ type Bigger = u128;
+
+ #[derive(Clone)]
+ struct Test(Bigger);
+ impl Iterator for &mut Test {
+ type Item = i32;
+ fn next(&mut self) -> Option<Self::Item> {
+ Some(21)
+ }
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.0 += n as Bigger + 1;
+ Some(42)
+ }
+ }
+
+ let mut it = Test(0);
+ let root = usize::MAX >> (usize::BITS / 2);
+ let n = root + 20;
+ (&mut it).step_by(n).nth(n);
+ assert_eq!(it.0, n as Bigger * n as Bigger);
+
+ // large step
+ let mut it = Test(0);
+ (&mut it).step_by(usize::MAX).nth(5);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 5);
+
+ // n + 1 overflows
+ let mut it = Test(0);
+ (&mut it).step_by(2).nth(usize::MAX);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 2);
+
+ // n + 1 overflows
+ let mut it = Test(0);
+ (&mut it).step_by(1).nth(usize::MAX);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 1);
+}
+
+#[test]
+fn test_iterator_step_by_nth_try_fold() {
+ let mut it = (0..).step_by(10);
+ assert_eq!(it.try_fold(0, i8::checked_add), None);
+ assert_eq!(it.next(), Some(60));
+ assert_eq!(it.try_fold(0, i8::checked_add), None);
+ assert_eq!(it.next(), Some(90));
+
+ let mut it = (100..).step_by(10);
+ assert_eq!(it.try_fold(50, i8::checked_add), None);
+ assert_eq!(it.next(), Some(110));
+
+ let mut it = (100..=100).step_by(10);
+ assert_eq!(it.next(), Some(100));
+ assert_eq!(it.try_fold(0, i8::checked_add), Some(0));
+}
+
+#[test]
+fn test_iterator_step_by_nth_back() {
+ let mut it = (0..16).step_by(5);
+ assert_eq!(it.nth_back(0), Some(15));
+ assert_eq!(it.nth_back(0), Some(10));
+ assert_eq!(it.nth_back(0), Some(5));
+ assert_eq!(it.nth_back(0), Some(0));
+ assert_eq!(it.nth_back(0), None);
+
+ let mut it = (0..16).step_by(5);
+ assert_eq!(it.next(), Some(0)); // to set `first_take` to `false`
+ assert_eq!(it.nth_back(0), Some(15));
+ assert_eq!(it.nth_back(0), Some(10));
+ assert_eq!(it.nth_back(0), Some(5));
+ assert_eq!(it.nth_back(0), None);
+
+ let it = || (0..18).step_by(5);
+ assert_eq!(it().nth_back(0), Some(15));
+ assert_eq!(it().nth_back(1), Some(10));
+ assert_eq!(it().nth_back(2), Some(5));
+ assert_eq!(it().nth_back(3), Some(0));
+ assert_eq!(it().nth_back(4), None);
+ assert_eq!(it().nth_back(42), None);
+}
+
+#[test]
+fn test_iterator_step_by_nth_try_rfold() {
+ let mut it = (0..100).step_by(10);
+ assert_eq!(it.try_rfold(0, i8::checked_add), None);
+ assert_eq!(it.next_back(), Some(70));
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.try_rfold(0, i8::checked_add), None);
+ assert_eq!(it.next_back(), Some(30));
+
+ let mut it = (0..100).step_by(10);
+ assert_eq!(it.try_rfold(50, i8::checked_add), None);
+ assert_eq!(it.next_back(), Some(80));
+
+ let mut it = (100..=100).step_by(10);
+ assert_eq!(it.next_back(), Some(100));
+ assert_eq!(it.try_fold(0, i8::checked_add), Some(0));
+}
+
+#[test]
+#[should_panic]
+fn test_iterator_step_by_zero() {
+ let mut it = (0..).step_by(0);
+ it.next();
+}
+
+#[test]
+fn test_iterator_step_by_size_hint() {
+ struct StubSizeHint(usize, Option<usize>);
+ impl Iterator for StubSizeHint {
+ type Item = ();
+ fn next(&mut self) -> Option<()> {
+ self.0 -= 1;
+ if let Some(ref mut upper) = self.1 {
+ *upper -= 1;
+ }
+ Some(())
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.0, self.1)
+ }
+ }
+
+ // The two checks in each case are needed because the logic
+ // is different before the first call to `next()`.
+
+ let mut it = StubSizeHint(10, Some(10)).step_by(1);
+ assert_eq!(it.size_hint(), (10, Some(10)));
+ it.next();
+ assert_eq!(it.size_hint(), (9, Some(9)));
+
+ // exact multiple
+ let mut it = StubSizeHint(10, Some(10)).step_by(3);
+ assert_eq!(it.size_hint(), (4, Some(4)));
+ it.next();
+ assert_eq!(it.size_hint(), (3, Some(3)));
+
+ // larger base range, but not enough to get another element
+ let mut it = StubSizeHint(12, Some(12)).step_by(3);
+ assert_eq!(it.size_hint(), (4, Some(4)));
+ it.next();
+ assert_eq!(it.size_hint(), (3, Some(3)));
+
+ // smaller base range, so fewer resulting elements
+ let mut it = StubSizeHint(9, Some(9)).step_by(3);
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ it.next();
+ assert_eq!(it.size_hint(), (2, Some(2)));
+
+ // infinite upper bound
+ let mut it = StubSizeHint(usize::MAX, None).step_by(1);
+ assert_eq!(it.size_hint(), (usize::MAX, None));
+ it.next();
+ assert_eq!(it.size_hint(), (usize::MAX - 1, None));
+
+ // still infinite with larger step
+ let mut it = StubSizeHint(7, None).step_by(3);
+ assert_eq!(it.size_hint(), (3, None));
+ it.next();
+ assert_eq!(it.size_hint(), (2, None));
+
+ // propagates ExactSizeIterator
+ let a = [1, 2, 3, 4, 5];
+ let it = a.iter().step_by(2);
+ assert_eq!(it.len(), 3);
+
+ // Cannot be TrustedLen as a step greater than one makes an iterator
+ // with (usize::MAX, None) no longer meet the safety requirements
+ trait TrustedLenCheck {
+ fn test(self) -> bool;
+ }
+ impl<T: Iterator> TrustedLenCheck for T {
+ default fn test(self) -> bool {
+ false
+ }
+ }
+ impl<T: TrustedLen> TrustedLenCheck for T {
+ fn test(self) -> bool {
+ true
+ }
+ }
+ assert!(TrustedLenCheck::test(a.iter()));
+ assert!(!TrustedLenCheck::test(a.iter().step_by(1)));
+}
+
+#[test]
+fn test_step_by_skip() {
+ assert_eq!((0..640).step_by(128).skip(1).collect::<Vec<_>>(), [128, 256, 384, 512]);
+ assert_eq!((0..=50).step_by(10).nth(3), Some(30));
+ assert_eq!((200..=255u8).step_by(10).nth(3), Some(230));
+}
diff --git a/library/core/tests/iter/adapters/take.rs b/library/core/tests/iter/adapters/take.rs
new file mode 100644
index 000000000..bfb659f0a
--- /dev/null
+++ b/library/core/tests/iter/adapters/take.rs
@@ -0,0 +1,148 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_take() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
+ let ys = [0, 1, 2, 3, 5];
+
+ let mut it = xs.iter().take(ys.len());
+ let mut i = 0;
+ assert_eq!(it.len(), ys.len());
+ while let Some(&x) = it.next() {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ assert_eq!(it.len(), ys.len() - i);
+ }
+ assert_eq!(i, ys.len());
+ assert_eq!(it.len(), 0);
+
+ let mut it = xs.iter().take(ys.len());
+ let mut i = 0;
+ assert_eq!(it.len(), ys.len());
+ while let Some(&x) = it.next_back() {
+ i += 1;
+ assert_eq!(x, ys[ys.len() - i]);
+ assert_eq!(it.len(), ys.len() - i);
+ }
+ assert_eq!(i, ys.len());
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_iterator_take_nth() {
+ let xs = [0, 1, 2, 4, 5];
+ let mut it = xs.iter();
+ {
+ let mut take = it.by_ref().take(3);
+ let mut i = 0;
+ while let Some(&x) = take.nth(0) {
+ assert_eq!(x, i);
+ i += 1;
+ }
+ }
+ assert_eq!(it.nth(1), Some(&5));
+ assert_eq!(it.nth(0), None);
+
+ let xs = [0, 1, 2, 3, 4];
+ let mut it = xs.iter().take(7);
+ let mut i = 1;
+ while let Some(&x) = it.nth(1) {
+ assert_eq!(x, i);
+ i += 2;
+ }
+}
+
+#[test]
+fn test_iterator_take_nth_back() {
+ let xs = [0, 1, 2, 4, 5];
+ let mut it = xs.iter();
+ {
+ let mut take = it.by_ref().take(3);
+ let mut i = 0;
+ while let Some(&x) = take.nth_back(0) {
+ i += 1;
+ assert_eq!(x, 3 - i);
+ }
+ }
+ assert_eq!(it.nth_back(0), None);
+
+ let xs = [0, 1, 2, 3, 4];
+ let mut it = xs.iter().take(7);
+ assert_eq!(it.nth_back(1), Some(&3));
+ assert_eq!(it.nth_back(1), Some(&1));
+ assert_eq!(it.nth_back(1), None);
+}
+
+#[test]
+fn test_take_advance_by() {
+ let mut take = (0..10).take(3);
+ assert_eq!(take.advance_by(2), Ok(()));
+ assert_eq!(take.next(), Some(2));
+ assert_eq!(take.advance_by(1), Err(0));
+
+ assert_eq!((0..0).take(10).advance_by(0), Ok(()));
+ assert_eq!((0..0).take(10).advance_by(1), Err(0));
+ assert_eq!((0..10).take(4).advance_by(5), Err(4));
+
+ let mut take = (0..10).take(3);
+ assert_eq!(take.advance_back_by(2), Ok(()));
+ assert_eq!(take.next(), Some(0));
+ assert_eq!(take.advance_back_by(1), Err(0));
+
+ assert_eq!((0..2).take(1).advance_back_by(10), Err(1));
+ assert_eq!((0..0).take(1).advance_back_by(1), Err(0));
+ assert_eq!((0..0).take(1).advance_back_by(0), Ok(()));
+ assert_eq!((0..usize::MAX).take(100).advance_back_by(usize::MAX), Err(100));
+}
+
+#[test]
+fn test_iterator_take_short() {
+ let xs = [0, 1, 2, 3];
+
+ let mut it = xs.iter().take(5);
+ let mut i = 0;
+ assert_eq!(it.len(), xs.len());
+ while let Some(&x) = it.next() {
+ assert_eq!(x, xs[i]);
+ i += 1;
+ assert_eq!(it.len(), xs.len() - i);
+ }
+ assert_eq!(i, xs.len());
+ assert_eq!(it.len(), 0);
+
+ let mut it = xs.iter().take(5);
+ let mut i = 0;
+ assert_eq!(it.len(), xs.len());
+ while let Some(&x) = it.next_back() {
+ i += 1;
+ assert_eq!(x, xs[xs.len() - i]);
+ assert_eq!(it.len(), xs.len() - i);
+ }
+ assert_eq!(i, xs.len());
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_take_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((10..30).take(10).try_fold(7, f), (10..20).try_fold(7, f));
+ assert_eq!((10..30).take(10).try_rfold(7, f), (10..20).try_rfold(7, f));
+
+ let mut iter = (10..30).take(20);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(20));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(24));
+
+ let mut iter = (2..20).take(3);
+ assert_eq!(iter.try_for_each(Err), Err(2));
+ assert_eq!(iter.try_for_each(Err), Err(3));
+ assert_eq!(iter.try_for_each(Err), Err(4));
+ assert_eq!(iter.try_for_each(Err), Ok(()));
+
+ let mut iter = (2..20).take(3).rev();
+ assert_eq!(iter.try_for_each(Err), Err(4));
+ assert_eq!(iter.try_for_each(Err), Err(3));
+ assert_eq!(iter.try_for_each(Err), Err(2));
+ assert_eq!(iter.try_for_each(Err), Ok(()));
+}
diff --git a/library/core/tests/iter/adapters/take_while.rs b/library/core/tests/iter/adapters/take_while.rs
new file mode 100644
index 000000000..6f1ebab29
--- /dev/null
+++ b/library/core/tests/iter/adapters/take_while.rs
@@ -0,0 +1,29 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_take_while() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
+ let ys = [0, 1, 2, 3, 5, 13];
+ let it = xs.iter().take_while(|&x| *x < 15);
+ let mut i = 0;
+ for x in it {
+ assert_eq!(*x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+#[test]
+fn test_take_while_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((1..20).take_while(|&x| x != 10).try_fold(7, f), (1..10).try_fold(7, f));
+ let mut iter = (1..20).take_while(|&x| x != 10);
+ assert_eq!(iter.try_fold(0, |x, y| Some(x + y)), Some((1..10).sum()));
+ assert_eq!(iter.next(), None, "flag should be set");
+ let iter = (1..20).take_while(|&x| x != 10);
+ assert_eq!(iter.fold(0, |x, y| x + y), (1..10).sum());
+
+ let mut iter = (10..50).take_while(|&x| x != 40);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(20));
+}
diff --git a/library/core/tests/iter/adapters/zip.rs b/library/core/tests/iter/adapters/zip.rs
new file mode 100644
index 000000000..585cfbb90
--- /dev/null
+++ b/library/core/tests/iter/adapters/zip.rs
@@ -0,0 +1,315 @@
+use super::*;
+use core::iter::*;
+
+#[test]
+fn test_zip_nth() {
+ let xs = [0, 1, 2, 4, 5];
+ let ys = [10, 11, 12];
+
+ let mut it = xs.iter().zip(&ys);
+ assert_eq!(it.nth(0), Some((&0, &10)));
+ assert_eq!(it.nth(1), Some((&2, &12)));
+ assert_eq!(it.nth(0), None);
+
+ let mut it = xs.iter().zip(&ys);
+ assert_eq!(it.nth(3), None);
+
+ let mut it = ys.iter().zip(&xs);
+ assert_eq!(it.nth(3), None);
+}
+
+#[test]
+fn test_zip_nth_side_effects() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let value = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4, 5, 6, 7, 8].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }))
+ .skip(1)
+ .nth(3);
+ assert_eq!(value, Some((50, 6000)));
+ assert_eq!(a, vec![1, 2, 3, 4, 5]);
+ assert_eq!(b, vec![200, 300, 400, 500, 600]);
+}
+
+#[test]
+fn test_zip_next_back_side_effects() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let mut iter = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4, 5, 6, 7, 8].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }));
+
+ // The second iterator is one item longer, so `next_back` is called on it
+ // one more time.
+ assert_eq!(iter.next_back(), Some((60, 7000)));
+ assert_eq!(iter.next_back(), Some((50, 6000)));
+ assert_eq!(iter.next_back(), Some((40, 5000)));
+ assert_eq!(iter.next_back(), Some((30, 4000)));
+ assert_eq!(a, vec![6, 5, 4, 3]);
+ assert_eq!(b, vec![800, 700, 600, 500, 400]);
+}
+
+#[test]
+fn test_zip_nth_back_side_effects() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let value = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4, 5, 6, 7, 8].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }))
+ .nth_back(3);
+ assert_eq!(value, Some((30, 4000)));
+ assert_eq!(a, vec![6, 5, 4, 3]);
+ assert_eq!(b, vec![800, 700, 600, 500, 400]);
+}
+
+#[test]
+fn test_zip_next_back_side_effects_exhausted() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let mut iter = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }));
+
+ iter.next();
+ iter.next();
+ iter.next();
+ iter.next();
+ assert_eq!(iter.next_back(), None);
+ assert_eq!(a, vec![1, 2, 3, 4, 6, 5]);
+ assert_eq!(b, vec![200, 300, 400]);
+}
+
+#[test]
+fn test_zip_cloned_sideffectful() {
+ let xs = [CountClone::new(), CountClone::new(), CountClone::new(), CountClone::new()];
+ let ys = [CountClone::new(), CountClone::new()];
+
+ for _ in xs.iter().cloned().zip(ys.iter().cloned()) {}
+
+ assert_eq!(&xs, &[1, 1, 1, 0][..]);
+ assert_eq!(&ys, &[1, 1][..]);
+
+ let xs = [CountClone::new(), CountClone::new()];
+ let ys = [CountClone::new(), CountClone::new(), CountClone::new(), CountClone::new()];
+
+ for _ in xs.iter().cloned().zip(ys.iter().cloned()) {}
+
+ assert_eq!(&xs, &[1, 1][..]);
+ assert_eq!(&ys, &[1, 1, 0, 0][..]);
+}
+
+#[test]
+fn test_zip_map_sideffectful() {
+ let mut xs = [0; 6];
+ let mut ys = [0; 4];
+
+ for _ in xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1)) {}
+
+ assert_eq!(&xs, &[1, 1, 1, 1, 1, 0]);
+ assert_eq!(&ys, &[1, 1, 1, 1]);
+
+ let mut xs = [0; 4];
+ let mut ys = [0; 6];
+
+ for _ in xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1)) {}
+
+ assert_eq!(&xs, &[1, 1, 1, 1]);
+ assert_eq!(&ys, &[1, 1, 1, 1, 0, 0]);
+}
+
+#[test]
+fn test_zip_map_rev_sideffectful() {
+ let mut xs = [0; 6];
+ let mut ys = [0; 4];
+
+ {
+ let mut it = xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1));
+ it.next_back();
+ }
+ assert_eq!(&xs, &[0, 0, 0, 1, 1, 1]);
+ assert_eq!(&ys, &[0, 0, 0, 1]);
+
+ let mut xs = [0; 6];
+ let mut ys = [0; 4];
+
+ {
+ let mut it = xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1));
+ (&mut it).take(5).count();
+ it.next_back();
+ }
+ assert_eq!(&xs, &[1, 1, 1, 1, 1, 1]);
+ assert_eq!(&ys, &[1, 1, 1, 1]);
+}
+
+#[test]
+fn test_zip_nested_sideffectful() {
+ let mut xs = [0; 6];
+ let ys = [0; 4];
+
+ {
+ // test that it has the side effect nested inside enumerate
+ let it = xs.iter_mut().map(|x| *x = 1).enumerate().zip(&ys);
+ it.count();
+ }
+ assert_eq!(&xs, &[1, 1, 1, 1, 1, 0]);
+}
+
+#[test]
+fn test_zip_nth_back_side_effects_exhausted() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let mut iter = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }));
+
+ iter.next();
+ iter.next();
+ iter.next();
+ iter.next();
+ assert_eq!(iter.nth_back(0), None);
+ assert_eq!(a, vec![1, 2, 3, 4, 6, 5]);
+ assert_eq!(b, vec![200, 300, 400]);
+}
+
+#[test]
+fn test_zip_trusted_random_access_composition() {
+ let a = [0, 1, 2, 3, 4];
+ let b = a;
+ let c = a;
+
+ let a = a.iter().copied();
+ let b = b.iter().copied();
+ let mut c = c.iter().copied();
+ c.next();
+
+ let mut z1 = a.zip(b);
+ assert_eq!(z1.next().unwrap(), (0, 0));
+
+ let mut z2 = z1.zip(c);
+ fn assert_trusted_random_access<T: TrustedRandomAccess>(_a: &T) {}
+ assert_trusted_random_access(&z2);
+ assert_eq!(z2.next().unwrap(), ((1, 1), 1));
+}
+
+#[test]
+#[cfg(panic = "unwind")]
+fn test_zip_trusted_random_access_next_back_drop() {
+ use std::panic::catch_unwind;
+ use std::panic::AssertUnwindSafe;
+
+ let mut counter = 0;
+
+ let it = [42].iter().map(|e| {
+ let c = counter;
+ counter += 1;
+ if c == 0 {
+ panic!("bomb");
+ }
+
+ e
+ });
+ let it2 = [(); 0].iter();
+ let mut zip = it.zip(it2);
+ catch_unwind(AssertUnwindSafe(|| {
+ zip.next_back();
+ }))
+ .unwrap_err();
+ assert!(zip.next().is_none());
+ assert_eq!(counter, 1);
+}
+
+#[test]
+fn test_double_ended_zip() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let ys = [1, 2, 3, 7];
+ let mut it = xs.iter().cloned().zip(ys);
+ assert_eq!(it.next(), Some((1, 1)));
+ assert_eq!(it.next(), Some((2, 2)));
+ assert_eq!(it.next_back(), Some((4, 7)));
+ assert_eq!(it.next_back(), Some((3, 3)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_issue_82282() {
+ fn overflowed_zip(arr: &[i32]) -> impl Iterator<Item = (i32, &())> {
+ static UNIT_EMPTY_ARR: [(); 0] = [];
+
+ let mapped = arr.into_iter().map(|i| *i);
+ let mut zipped = mapped.zip(UNIT_EMPTY_ARR.iter());
+ zipped.next();
+ zipped
+ }
+
+ let arr = [1, 2, 3];
+ let zip = overflowed_zip(&arr).zip(overflowed_zip(&arr));
+
+ assert_eq!(zip.size_hint(), (0, Some(0)));
+ for _ in zip {
+ panic!();
+ }
+}
+
+#[test]
+fn test_issue_82291() {
+ use std::cell::Cell;
+
+ let mut v1 = [()];
+ let v2 = [()];
+
+ let called = Cell::new(0);
+
+ let mut zip = v1
+ .iter_mut()
+ .map(|r| {
+ called.set(called.get() + 1);
+ r
+ })
+ .zip(&v2);
+
+ zip.next_back();
+ assert_eq!(called.get(), 1);
+ zip.next();
+ assert_eq!(called.get(), 1);
+}
diff --git a/library/core/tests/iter/mod.rs b/library/core/tests/iter/mod.rs
new file mode 100644
index 000000000..770b6f760
--- /dev/null
+++ b/library/core/tests/iter/mod.rs
@@ -0,0 +1,102 @@
+//! Note
+//! ----
+//! You're probably viewing this file because you're adding a test (or you might
+//! just be browsing, in that case, hey there!).
+//!
+//! The iter test suite is split into two big modules, and some miscellaneous
+//! smaller modules. The two big modules are `adapters` and `traits`.
+//!
+//! `adapters` are for methods on `Iterator` that adapt the data inside the
+//! iterator, whether it be by emitting another iterator or returning an item
+//! from inside the iterator after executing a closure on each item.
+//!
+//! `traits` are for trait's that extend an `Iterator` (and the `Iterator`
+//! trait itself, mostly containing miscellaneous methods). For the most part,
+//! if a test in `traits` uses a specific adapter, then it should be moved to
+//! that adapter's test file in `adapters`.
+
+mod adapters;
+mod range;
+mod sources;
+mod traits;
+
+use core::cell::Cell;
+use core::convert::TryFrom;
+use core::iter::*;
+
+pub fn is_trusted_len<I: TrustedLen>(_: I) {}
+
+#[test]
+fn test_multi_iter() {
+ let xs = [1, 2, 3, 4];
+ let ys = [4, 3, 2, 1];
+ assert!(xs.iter().eq(ys.iter().rev()));
+ assert!(xs.iter().lt(xs.iter().skip(2)));
+}
+
+#[test]
+fn test_counter_from_iter() {
+ let it = (0..).step_by(5).take(10);
+ let xs: Vec<isize> = FromIterator::from_iter(it);
+ assert_eq!(xs, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45]);
+}
+
+#[test]
+fn test_functor_laws() {
+ // identity:
+ fn identity<T>(x: T) -> T {
+ x
+ }
+ assert_eq!((0..10).map(identity).sum::<usize>(), (0..10).sum());
+
+ // composition:
+ fn f(x: usize) -> usize {
+ x + 3
+ }
+ fn g(x: usize) -> usize {
+ x * 2
+ }
+ fn h(x: usize) -> usize {
+ g(f(x))
+ }
+ assert_eq!((0..10).map(f).map(g).sum::<usize>(), (0..10).map(h).sum());
+}
+
+#[test]
+fn test_monad_laws_left_identity() {
+ fn f(x: usize) -> impl Iterator<Item = usize> {
+ (0..10).map(move |y| x * y)
+ }
+ assert_eq!(once(42).flat_map(f.clone()).sum::<usize>(), f(42).sum());
+}
+
+#[test]
+fn test_monad_laws_right_identity() {
+ assert_eq!((0..10).flat_map(|x| once(x)).sum::<usize>(), (0..10).sum());
+}
+
+#[test]
+fn test_monad_laws_associativity() {
+ fn f(x: usize) -> impl Iterator<Item = usize> {
+ 0..x
+ }
+ fn g(x: usize) -> impl Iterator<Item = usize> {
+ (0..x).rev()
+ }
+ assert_eq!(
+ (0..10).flat_map(f).flat_map(g).sum::<usize>(),
+ (0..10).flat_map(|x| f(x).flat_map(g)).sum::<usize>()
+ );
+}
+
+#[test]
+pub fn extend_for_unit() {
+ let mut x = 0;
+ {
+ let iter = (0..5).map(|_| {
+ x += 1;
+ });
+ ().extend(iter);
+ }
+ assert_eq!(x, 5);
+}
diff --git a/library/core/tests/iter/range.rs b/library/core/tests/iter/range.rs
new file mode 100644
index 000000000..84498a8ea
--- /dev/null
+++ b/library/core/tests/iter/range.rs
@@ -0,0 +1,472 @@
+use super::*;
+
+#[test]
+fn test_range() {
+ assert_eq!((0..5).collect::<Vec<_>>(), [0, 1, 2, 3, 4]);
+ assert_eq!((-10..-1).collect::<Vec<_>>(), [-10, -9, -8, -7, -6, -5, -4, -3, -2]);
+ assert_eq!((0..5).rev().collect::<Vec<_>>(), [4, 3, 2, 1, 0]);
+ assert_eq!((200..-5).count(), 0);
+ assert_eq!((200..-5).rev().count(), 0);
+ assert_eq!((200..200).count(), 0);
+ assert_eq!((200..200).rev().count(), 0);
+
+ assert_eq!((0..100).size_hint(), (100, Some(100)));
+ // this test is only meaningful when sizeof usize < sizeof u64
+ assert_eq!((usize::MAX - 1..usize::MAX).size_hint(), (1, Some(1)));
+ assert_eq!((-10..-1).size_hint(), (9, Some(9)));
+ assert_eq!((-1..-10).size_hint(), (0, Some(0)));
+
+ assert_eq!((-70..58).size_hint(), (128, Some(128)));
+ assert_eq!((-128..127).size_hint(), (255, Some(255)));
+ assert_eq!(
+ (-2..isize::MAX).size_hint(),
+ (isize::MAX as usize + 2, Some(isize::MAX as usize + 2))
+ );
+}
+
+#[test]
+fn test_char_range() {
+ use std::char;
+ // Miri is too slow
+ let from = if cfg!(miri) { char::from_u32(0xD800 - 10).unwrap() } else { '\0' };
+ let to = if cfg!(miri) { char::from_u32(0xDFFF + 10).unwrap() } else { char::MAX };
+ assert!((from..=to).eq((from as u32..=to as u32).filter_map(char::from_u32)));
+ assert!((from..=to).rev().eq((from as u32..=to as u32).filter_map(char::from_u32).rev()));
+
+ assert_eq!(('\u{D7FF}'..='\u{E000}').count(), 2);
+ assert_eq!(('\u{D7FF}'..='\u{E000}').size_hint(), (2, Some(2)));
+ assert_eq!(('\u{D7FF}'..'\u{E000}').count(), 1);
+ assert_eq!(('\u{D7FF}'..'\u{E000}').size_hint(), (1, Some(1)));
+}
+
+#[test]
+fn test_range_exhaustion() {
+ let mut r = 10..10;
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+ assert_eq!(r.next_back(), None);
+ assert_eq!(r, 10..10);
+
+ let mut r = 10..12;
+ assert_eq!(r.next(), Some(10));
+ assert_eq!(r.next(), Some(11));
+ assert!(r.is_empty());
+ assert_eq!(r, 12..12);
+ assert_eq!(r.next(), None);
+
+ let mut r = 10..12;
+ assert_eq!(r.next_back(), Some(11));
+ assert_eq!(r.next_back(), Some(10));
+ assert!(r.is_empty());
+ assert_eq!(r, 10..10);
+ assert_eq!(r.next_back(), None);
+
+ let mut r = 100..10;
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+ assert_eq!(r.next_back(), None);
+ assert_eq!(r, 100..10);
+}
+
+#[test]
+fn test_range_inclusive_exhaustion() {
+ let mut r = 10..=10;
+ assert_eq!(r.next(), Some(10));
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+ assert_eq!(r.next(), None);
+
+ assert_eq!(*r.start(), 10);
+ assert_eq!(*r.end(), 10);
+ assert_ne!(r, 10..=10);
+
+ let mut r = 10..=10;
+ assert_eq!(r.next_back(), Some(10));
+ assert!(r.is_empty());
+ assert_eq!(r.next_back(), None);
+
+ assert_eq!(*r.start(), 10);
+ assert_eq!(*r.end(), 10);
+ assert_ne!(r, 10..=10);
+
+ let mut r = 10..=12;
+ assert_eq!(r.next(), Some(10));
+ assert_eq!(r.next(), Some(11));
+ assert_eq!(r.next(), Some(12));
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+
+ let mut r = 10..=12;
+ assert_eq!(r.next_back(), Some(12));
+ assert_eq!(r.next_back(), Some(11));
+ assert_eq!(r.next_back(), Some(10));
+ assert!(r.is_empty());
+ assert_eq!(r.next_back(), None);
+
+ let mut r = 10..=12;
+ assert_eq!(r.nth(2), Some(12));
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+
+ let mut r = 10..=12;
+ assert_eq!(r.nth(5), None);
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+
+ let mut r = 100..=10;
+ assert_eq!(r.next(), None);
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+ assert_eq!(r.next(), None);
+ assert_eq!(r, 100..=10);
+
+ let mut r = 100..=10;
+ assert_eq!(r.next_back(), None);
+ assert!(r.is_empty());
+ assert_eq!(r.next_back(), None);
+ assert_eq!(r.next_back(), None);
+ assert_eq!(r, 100..=10);
+}
+
+#[test]
+fn test_range_nth() {
+ assert_eq!((10..15).nth(0), Some(10));
+ assert_eq!((10..15).nth(1), Some(11));
+ assert_eq!((10..15).nth(4), Some(14));
+ assert_eq!((10..15).nth(5), None);
+
+ let mut r = 10..20;
+ assert_eq!(r.nth(2), Some(12));
+ assert_eq!(r, 13..20);
+ assert_eq!(r.nth(2), Some(15));
+ assert_eq!(r, 16..20);
+ assert_eq!(r.nth(10), None);
+ assert_eq!(r, 20..20);
+}
+
+#[test]
+fn test_range_nth_back() {
+ assert_eq!((10..15).nth_back(0), Some(14));
+ assert_eq!((10..15).nth_back(1), Some(13));
+ assert_eq!((10..15).nth_back(4), Some(10));
+ assert_eq!((10..15).nth_back(5), None);
+ assert_eq!((-120..80_i8).nth_back(199), Some(-120));
+
+ let mut r = 10..20;
+ assert_eq!(r.nth_back(2), Some(17));
+ assert_eq!(r, 10..17);
+ assert_eq!(r.nth_back(2), Some(14));
+ assert_eq!(r, 10..14);
+ assert_eq!(r.nth_back(10), None);
+ assert_eq!(r, 10..10);
+}
+
+#[test]
+fn test_range_from_nth() {
+ assert_eq!((10..).nth(0), Some(10));
+ assert_eq!((10..).nth(1), Some(11));
+ assert_eq!((10..).nth(4), Some(14));
+
+ let mut r = 10..;
+ assert_eq!(r.nth(2), Some(12));
+ assert_eq!(r, 13..);
+ assert_eq!(r.nth(2), Some(15));
+ assert_eq!(r, 16..);
+ assert_eq!(r.nth(10), Some(26));
+ assert_eq!(r, 27..);
+
+ assert_eq!((0..).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_range_from_take() {
+ let mut it = (0..).take(3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.next(), None);
+ is_trusted_len((0..).take(3));
+ assert_eq!((0..).take(3).size_hint(), (3, Some(3)));
+ assert_eq!((0..).take(0).size_hint(), (0, Some(0)));
+ assert_eq!((0..).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+}
+
+#[test]
+fn test_range_from_take_collect() {
+ let v: Vec<_> = (0..).take(3).collect();
+ assert_eq!(v, vec![0, 1, 2]);
+}
+
+#[test]
+fn test_range_inclusive_nth() {
+ assert_eq!((10..=15).nth(0), Some(10));
+ assert_eq!((10..=15).nth(1), Some(11));
+ assert_eq!((10..=15).nth(5), Some(15));
+ assert_eq!((10..=15).nth(6), None);
+
+ let mut exhausted_via_next = 10_u8..=20;
+ while exhausted_via_next.next().is_some() {}
+
+ let mut r = 10_u8..=20;
+ assert_eq!(r.nth(2), Some(12));
+ assert_eq!(r, 13..=20);
+ assert_eq!(r.nth(2), Some(15));
+ assert_eq!(r, 16..=20);
+ assert_eq!(r.is_empty(), false);
+ assert_eq!(ExactSizeIterator::is_empty(&r), false);
+ assert_eq!(r.nth(10), None);
+ assert_eq!(r.is_empty(), true);
+ assert_eq!(r, exhausted_via_next);
+ assert_eq!(ExactSizeIterator::is_empty(&r), true);
+}
+
+#[test]
+fn test_range_inclusive_nth_back() {
+ assert_eq!((10..=15).nth_back(0), Some(15));
+ assert_eq!((10..=15).nth_back(1), Some(14));
+ assert_eq!((10..=15).nth_back(5), Some(10));
+ assert_eq!((10..=15).nth_back(6), None);
+ assert_eq!((-120..=80_i8).nth_back(200), Some(-120));
+
+ let mut exhausted_via_next_back = 10_u8..=20;
+ while exhausted_via_next_back.next_back().is_some() {}
+
+ let mut r = 10_u8..=20;
+ assert_eq!(r.nth_back(2), Some(18));
+ assert_eq!(r, 10..=17);
+ assert_eq!(r.nth_back(2), Some(15));
+ assert_eq!(r, 10..=14);
+ assert_eq!(r.is_empty(), false);
+ assert_eq!(ExactSizeIterator::is_empty(&r), false);
+ assert_eq!(r.nth_back(10), None);
+ assert_eq!(r.is_empty(), true);
+ assert_eq!(r, exhausted_via_next_back);
+ assert_eq!(ExactSizeIterator::is_empty(&r), true);
+}
+
+#[test]
+fn test_range_len() {
+ assert_eq!((0..10_u8).len(), 10);
+ assert_eq!((9..10_u8).len(), 1);
+ assert_eq!((10..10_u8).len(), 0);
+ assert_eq!((11..10_u8).len(), 0);
+ assert_eq!((100..10_u8).len(), 0);
+}
+
+#[test]
+fn test_range_inclusive_len() {
+ assert_eq!((0..=10_u8).len(), 11);
+ assert_eq!((9..=10_u8).len(), 2);
+ assert_eq!((10..=10_u8).len(), 1);
+ assert_eq!((11..=10_u8).len(), 0);
+ assert_eq!((100..=10_u8).len(), 0);
+}
+
+#[test]
+fn test_range_step() {
+ #![allow(deprecated)]
+
+ assert_eq!((0..20).step_by(5).collect::<Vec<isize>>(), [0, 5, 10, 15]);
+ assert_eq!((1..21).rev().step_by(5).collect::<Vec<isize>>(), [20, 15, 10, 5]);
+ assert_eq!((1..21).rev().step_by(6).collect::<Vec<isize>>(), [20, 14, 8, 2]);
+ assert_eq!((200..255).step_by(50).collect::<Vec<u8>>(), [200, 250]);
+ assert_eq!((200..-5).step_by(1).collect::<Vec<isize>>(), []);
+ assert_eq!((200..200).step_by(1).collect::<Vec<isize>>(), []);
+
+ assert_eq!((0..20).step_by(1).size_hint(), (20, Some(20)));
+ assert_eq!((0..20).step_by(21).size_hint(), (1, Some(1)));
+ assert_eq!((0..20).step_by(5).size_hint(), (4, Some(4)));
+ assert_eq!((1..21).rev().step_by(5).size_hint(), (4, Some(4)));
+ assert_eq!((1..21).rev().step_by(6).size_hint(), (4, Some(4)));
+ assert_eq!((20..-5).step_by(1).size_hint(), (0, Some(0)));
+ assert_eq!((20..20).step_by(1).size_hint(), (0, Some(0)));
+ assert_eq!((i8::MIN..i8::MAX).step_by(-(i8::MIN as i32) as usize).size_hint(), (2, Some(2)));
+ assert_eq!((i16::MIN..i16::MAX).step_by(i16::MAX as usize).size_hint(), (3, Some(3)));
+ assert_eq!((isize::MIN..isize::MAX).step_by(1).size_hint(), (usize::MAX, Some(usize::MAX)));
+}
+
+#[test]
+fn test_range_advance_by() {
+ let mut r = 0..usize::MAX;
+ r.advance_by(0).unwrap();
+ r.advance_back_by(0).unwrap();
+
+ assert_eq!(r.len(), usize::MAX);
+
+ r.advance_by(1).unwrap();
+ r.advance_back_by(1).unwrap();
+
+ assert_eq!((r.start, r.end), (1, usize::MAX - 1));
+
+ assert_eq!(r.advance_by(usize::MAX), Err(usize::MAX - 2));
+
+ r.advance_by(0).unwrap();
+ r.advance_back_by(0).unwrap();
+
+ let mut r = 0u128..u128::MAX;
+
+ r.advance_by(usize::MAX).unwrap();
+ r.advance_back_by(usize::MAX).unwrap();
+
+ assert_eq!((r.start, r.end), (0u128 + usize::MAX as u128, u128::MAX - usize::MAX as u128));
+}
+
+#[test]
+fn test_range_inclusive_step() {
+ assert_eq!((0..=50).step_by(10).collect::<Vec<_>>(), [0, 10, 20, 30, 40, 50]);
+ assert_eq!((0..=5).step_by(1).collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5]);
+ assert_eq!((200..=255u8).step_by(10).collect::<Vec<_>>(), [200, 210, 220, 230, 240, 250]);
+ assert_eq!((250..=255u8).step_by(1).collect::<Vec<_>>(), [250, 251, 252, 253, 254, 255]);
+}
+
+#[test]
+fn test_range_last_max() {
+ assert_eq!((0..20).last(), Some(19));
+ assert_eq!((-20..0).last(), Some(-1));
+ assert_eq!((5..5).last(), None);
+
+ assert_eq!((0..20).max(), Some(19));
+ assert_eq!((-20..0).max(), Some(-1));
+ assert_eq!((5..5).max(), None);
+}
+
+#[test]
+fn test_range_inclusive_last_max() {
+ assert_eq!((0..=20).last(), Some(20));
+ assert_eq!((-20..=0).last(), Some(0));
+ assert_eq!((5..=5).last(), Some(5));
+ let mut r = 10..=10;
+ r.next();
+ assert_eq!(r.last(), None);
+
+ assert_eq!((0..=20).max(), Some(20));
+ assert_eq!((-20..=0).max(), Some(0));
+ assert_eq!((5..=5).max(), Some(5));
+ let mut r = 10..=10;
+ r.next();
+ assert_eq!(r.max(), None);
+}
+
+#[test]
+fn test_range_min() {
+ assert_eq!((0..20).min(), Some(0));
+ assert_eq!((-20..0).min(), Some(-20));
+ assert_eq!((5..5).min(), None);
+}
+
+#[test]
+fn test_range_inclusive_min() {
+ assert_eq!((0..=20).min(), Some(0));
+ assert_eq!((-20..=0).min(), Some(-20));
+ assert_eq!((5..=5).min(), Some(5));
+ let mut r = 10..=10;
+ r.next();
+ assert_eq!(r.min(), None);
+}
+
+#[test]
+fn test_range_inclusive_folds() {
+ assert_eq!((1..=10).sum::<i32>(), 55);
+ assert_eq!((1..=10).rev().sum::<i32>(), 55);
+
+ let mut it = 44..=50;
+ assert_eq!(it.try_fold(0, i8::checked_add), None);
+ assert_eq!(it, 47..=50);
+ assert_eq!(it.try_fold(0, i8::checked_add), None);
+ assert_eq!(it, 50..=50);
+ assert_eq!(it.try_fold(0, i8::checked_add), Some(50));
+ assert!(it.is_empty());
+ assert_eq!(it.try_fold(0, i8::checked_add), Some(0));
+ assert!(it.is_empty());
+
+ let mut it = 40..=47;
+ assert_eq!(it.try_rfold(0, i8::checked_add), None);
+ assert_eq!(it, 40..=44);
+ assert_eq!(it.try_rfold(0, i8::checked_add), None);
+ assert_eq!(it, 40..=41);
+ assert_eq!(it.try_rfold(0, i8::checked_add), Some(81));
+ assert!(it.is_empty());
+ assert_eq!(it.try_rfold(0, i8::checked_add), Some(0));
+ assert!(it.is_empty());
+
+ let mut it = 10..=20;
+ assert_eq!(it.try_fold(0, |a, b| Some(a + b)), Some(165));
+ assert!(it.is_empty());
+ assert_eq!(it.try_fold(0, |a, b| Some(a + b)), Some(0));
+ assert!(it.is_empty());
+
+ let mut it = 10..=20;
+ assert_eq!(it.try_rfold(0, |a, b| Some(a + b)), Some(165));
+ assert!(it.is_empty());
+ assert_eq!(it.try_rfold(0, |a, b| Some(a + b)), Some(0));
+ assert!(it.is_empty());
+}
+
+#[test]
+fn test_range_size_hint() {
+ assert_eq!((0..0usize).size_hint(), (0, Some(0)));
+ assert_eq!((0..100usize).size_hint(), (100, Some(100)));
+ assert_eq!((0..usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+
+ let umax = u128::try_from(usize::MAX).unwrap();
+ assert_eq!((0..0u128).size_hint(), (0, Some(0)));
+ assert_eq!((0..100u128).size_hint(), (100, Some(100)));
+ assert_eq!((0..umax).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((0..umax + 1).size_hint(), (usize::MAX, None));
+
+ assert_eq!((0..0isize).size_hint(), (0, Some(0)));
+ assert_eq!((-100..100isize).size_hint(), (200, Some(200)));
+ assert_eq!((isize::MIN..isize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+
+ let imin = i128::try_from(isize::MIN).unwrap();
+ let imax = i128::try_from(isize::MAX).unwrap();
+ assert_eq!((0..0i128).size_hint(), (0, Some(0)));
+ assert_eq!((-100..100i128).size_hint(), (200, Some(200)));
+ assert_eq!((imin..imax).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((imin..imax + 1).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_range_inclusive_size_hint() {
+ assert_eq!((1..=0usize).size_hint(), (0, Some(0)));
+ assert_eq!((0..=0usize).size_hint(), (1, Some(1)));
+ assert_eq!((0..=100usize).size_hint(), (101, Some(101)));
+ assert_eq!((0..=usize::MAX - 1).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((0..=usize::MAX).size_hint(), (usize::MAX, None));
+
+ let umax = u128::try_from(usize::MAX).unwrap();
+ assert_eq!((1..=0u128).size_hint(), (0, Some(0)));
+ assert_eq!((0..=0u128).size_hint(), (1, Some(1)));
+ assert_eq!((0..=100u128).size_hint(), (101, Some(101)));
+ assert_eq!((0..=umax - 1).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((0..=umax).size_hint(), (usize::MAX, None));
+ assert_eq!((0..=umax + 1).size_hint(), (usize::MAX, None));
+
+ assert_eq!((0..=-1isize).size_hint(), (0, Some(0)));
+ assert_eq!((0..=0isize).size_hint(), (1, Some(1)));
+ assert_eq!((-100..=100isize).size_hint(), (201, Some(201)));
+ assert_eq!((isize::MIN..=isize::MAX - 1).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((isize::MIN..=isize::MAX).size_hint(), (usize::MAX, None));
+
+ let imin = i128::try_from(isize::MIN).unwrap();
+ let imax = i128::try_from(isize::MAX).unwrap();
+ assert_eq!((0..=-1i128).size_hint(), (0, Some(0)));
+ assert_eq!((0..=0i128).size_hint(), (1, Some(1)));
+ assert_eq!((-100..=100i128).size_hint(), (201, Some(201)));
+ assert_eq!((imin..=imax - 1).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((imin..=imax).size_hint(), (usize::MAX, None));
+ assert_eq!((imin..=imax + 1).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_double_ended_range() {
+ assert_eq!((11..14).rev().collect::<Vec<_>>(), [13, 12, 11]);
+ for _ in (10..0).rev() {
+ panic!("unreachable");
+ }
+
+ assert_eq!((11..14).rev().collect::<Vec<_>>(), [13, 12, 11]);
+ for _ in (10..0).rev() {
+ panic!("unreachable");
+ }
+}
diff --git a/library/core/tests/iter/sources.rs b/library/core/tests/iter/sources.rs
new file mode 100644
index 000000000..d0114ade6
--- /dev/null
+++ b/library/core/tests/iter/sources.rs
@@ -0,0 +1,108 @@
+use super::*;
+use core::iter::*;
+
+#[test]
+fn test_repeat() {
+ let mut it = repeat(42);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(repeat(42).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_repeat_take() {
+ let mut it = repeat(42).take(3);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), None);
+ is_trusted_len(repeat(42).take(3));
+ assert_eq!(repeat(42).take(3).size_hint(), (3, Some(3)));
+ assert_eq!(repeat(42).take(0).size_hint(), (0, Some(0)));
+ assert_eq!(repeat(42).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+}
+
+#[test]
+fn test_repeat_take_collect() {
+ let v: Vec<_> = repeat(42).take(3).collect();
+ assert_eq!(v, vec![42, 42, 42]);
+}
+
+#[test]
+fn test_repeat_with() {
+ #[derive(PartialEq, Debug)]
+ struct NotClone(usize);
+ let mut it = repeat_with(|| NotClone(42));
+ assert_eq!(it.next(), Some(NotClone(42)));
+ assert_eq!(it.next(), Some(NotClone(42)));
+ assert_eq!(it.next(), Some(NotClone(42)));
+ assert_eq!(repeat_with(|| NotClone(42)).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_repeat_with_take() {
+ let mut it = repeat_with(|| 42).take(3);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), None);
+ is_trusted_len(repeat_with(|| 42).take(3));
+ assert_eq!(repeat_with(|| 42).take(3).size_hint(), (3, Some(3)));
+ assert_eq!(repeat_with(|| 42).take(0).size_hint(), (0, Some(0)));
+ assert_eq!(repeat_with(|| 42).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+}
+
+#[test]
+fn test_repeat_with_take_collect() {
+ let mut curr = 1;
+ let v: Vec<_> = repeat_with(|| {
+ let tmp = curr;
+ curr *= 2;
+ tmp
+ })
+ .take(5)
+ .collect();
+ assert_eq!(v, vec![1, 2, 4, 8, 16]);
+}
+
+#[test]
+fn test_successors() {
+ let mut powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10));
+ assert_eq!(powers_of_10.by_ref().collect::<Vec<_>>(), &[1, 10, 100, 1_000, 10_000]);
+ assert_eq!(powers_of_10.next(), None);
+
+ let mut empty = successors(None::<u32>, |_| unimplemented!());
+ assert_eq!(empty.next(), None);
+ assert_eq!(empty.next(), None);
+}
+
+#[test]
+fn test_once() {
+ let mut it = once(42);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_once_with() {
+ let count = Cell::new(0);
+ let mut it = once_with(|| {
+ count.set(count.get() + 1);
+ 42
+ });
+
+ assert_eq!(count.get(), 0);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(count.get(), 1);
+ assert_eq!(it.next(), None);
+ assert_eq!(count.get(), 1);
+ assert_eq!(it.next(), None);
+ assert_eq!(count.get(), 1);
+}
+
+#[test]
+fn test_empty() {
+ let mut it = empty::<i32>();
+ assert_eq!(it.next(), None);
+}
diff --git a/library/core/tests/iter/traits/accum.rs b/library/core/tests/iter/traits/accum.rs
new file mode 100644
index 000000000..f3eeb31fe
--- /dev/null
+++ b/library/core/tests/iter/traits/accum.rs
@@ -0,0 +1,66 @@
+use core::iter::*;
+
+#[test]
+fn test_iterator_sum() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().cloned().sum::<i32>(), 6);
+ assert_eq!(v.iter().cloned().sum::<i32>(), 55);
+ assert_eq!(v[..0].iter().cloned().sum::<i32>(), 0);
+}
+
+#[test]
+fn test_iterator_sum_result() {
+ let v: &[Result<i32, ()>] = &[Ok(1), Ok(2), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().sum::<Result<i32, _>>(), Ok(10));
+ let v: &[Result<i32, ()>] = &[Ok(1), Err(()), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().sum::<Result<i32, _>>(), Err(()));
+
+ #[derive(PartialEq, Debug)]
+ struct S(Result<i32, ()>);
+
+ impl Sum<Result<i32, ()>> for S {
+ fn sum<I: Iterator<Item = Result<i32, ()>>>(mut iter: I) -> Self {
+ // takes the sum by repeatedly calling `next` on `iter`,
+ // thus testing that repeated calls to `ResultShunt::try_fold`
+ // produce the expected results
+ Self(iter.by_ref().sum())
+ }
+ }
+
+ let v: &[Result<i32, ()>] = &[Ok(1), Ok(2), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().sum::<S>(), S(Ok(10)));
+ let v: &[Result<i32, ()>] = &[Ok(1), Err(()), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().sum::<S>(), S(Err(())));
+}
+
+#[test]
+fn test_iterator_sum_option() {
+ let v: &[Option<i32>] = &[Some(1), Some(2), Some(3), Some(4)];
+ assert_eq!(v.iter().cloned().sum::<Option<i32>>(), Some(10));
+ let v: &[Option<i32>] = &[Some(1), None, Some(3), Some(4)];
+ assert_eq!(v.iter().cloned().sum::<Option<i32>>(), None);
+}
+
+#[test]
+fn test_iterator_product() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().cloned().product::<i32>(), 0);
+ assert_eq!(v[1..5].iter().cloned().product::<i32>(), 24);
+ assert_eq!(v[..0].iter().cloned().product::<i32>(), 1);
+}
+
+#[test]
+fn test_iterator_product_result() {
+ let v: &[Result<i32, ()>] = &[Ok(1), Ok(2), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().product::<Result<i32, _>>(), Ok(24));
+ let v: &[Result<i32, ()>] = &[Ok(1), Err(()), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().product::<Result<i32, _>>(), Err(()));
+}
+
+#[test]
+fn test_iterator_product_option() {
+ let v: &[Option<i32>] = &[Some(1), Some(2), Some(3), Some(4)];
+ assert_eq!(v.iter().cloned().product::<Option<i32>>(), Some(24));
+ let v: &[Option<i32>] = &[Some(1), None, Some(3), Some(4)];
+ assert_eq!(v.iter().cloned().product::<Option<i32>>(), None);
+}
diff --git a/library/core/tests/iter/traits/double_ended.rs b/library/core/tests/iter/traits/double_ended.rs
new file mode 100644
index 000000000..00ef4a6e6
--- /dev/null
+++ b/library/core/tests/iter/traits/double_ended.rs
@@ -0,0 +1,91 @@
+//! Note
+//! ----
+//! You're probably viewing this file because you're adding a test (or you might
+//! just be browsing, in that case, hey there!).
+//!
+//! If you've made a test that happens to use one of DoubleEnded's methods, but
+//! it tests another adapter or trait, you should *add it to the adapter or
+//! trait's test file*.
+//!
+//! Some examples would be `adapters::cloned::test_cloned_try_folds` or
+//! `adapters::flat_map::test_double_ended_flat_map`, which use `try_fold` and
+//! `next_back`, but test their own adapter.
+
+#[test]
+fn test_iterator_rev_nth_back() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().rev().nth_back(i).unwrap(), &v[i]);
+ }
+ assert_eq!(v.iter().rev().nth_back(v.len()), None);
+}
+
+#[test]
+fn test_iterator_rev_nth() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().rev().nth(i).unwrap(), &v[v.len() - 1 - i]);
+ }
+ assert_eq!(v.iter().rev().nth(v.len()), None);
+}
+
+#[test]
+fn test_rev() {
+ let xs = [2, 4, 6, 8, 10, 12, 14, 16];
+ let mut it = xs.iter();
+ it.next();
+ it.next();
+ assert!(it.rev().cloned().collect::<Vec<isize>>() == vec![16, 14, 12, 10, 8, 6]);
+}
+
+#[test]
+fn test_rev_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((1..10).rev().try_fold(7, f), (1..10).try_rfold(7, f));
+ assert_eq!((1..10).rev().try_rfold(7, f), (1..10).try_fold(7, f));
+
+ let a = [10, 20, 30, 40, 100, 60, 70, 80, 90];
+ let mut iter = a.iter().rev();
+ assert_eq!(iter.try_fold(0_i8, |acc, &x| acc.checked_add(x)), None);
+ assert_eq!(iter.next(), Some(&70));
+ let mut iter = a.iter().rev();
+ assert_eq!(iter.try_rfold(0_i8, |acc, &x| acc.checked_add(x)), None);
+ assert_eq!(iter.next_back(), Some(&60));
+}
+
+#[test]
+fn test_rposition() {
+ fn f(xy: &(isize, char)) -> bool {
+ let (_x, y) = *xy;
+ y == 'b'
+ }
+ fn g(xy: &(isize, char)) -> bool {
+ let (_x, y) = *xy;
+ y == 'd'
+ }
+ let v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')];
+
+ assert_eq!(v.iter().rposition(f), Some(3));
+ assert!(v.iter().rposition(g).is_none());
+}
+
+#[test]
+fn test_rev_rposition() {
+ let v = [0, 0, 1, 1];
+ assert_eq!(v.iter().rev().rposition(|&x| x == 1), Some(1));
+}
+
+#[test]
+#[should_panic]
+fn test_rposition_panic() {
+ let u = (Box::new(0), Box::new(0));
+ let v: [(Box<_>, Box<_>); 4] = [u.clone(), u.clone(), u.clone(), u];
+ let mut i = 0;
+ v.iter().rposition(|_elt| {
+ if i == 2 {
+ panic!()
+ }
+ i += 1;
+ false
+ });
+}
diff --git a/library/core/tests/iter/traits/iterator.rs b/library/core/tests/iter/traits/iterator.rs
new file mode 100644
index 000000000..37345c1d3
--- /dev/null
+++ b/library/core/tests/iter/traits/iterator.rs
@@ -0,0 +1,593 @@
+/// A wrapper struct that implements `Eq` and `Ord` based on the wrapped
+/// integer modulo 3. Used to test that `Iterator::max` and `Iterator::min`
+/// return the correct element if some of them are equal.
+#[derive(Debug)]
+struct Mod3(i32);
+
+impl PartialEq for Mod3 {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 % 3 == other.0 % 3
+ }
+}
+
+impl Eq for Mod3 {}
+
+impl PartialOrd for Mod3 {
+ fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for Mod3 {
+ fn cmp(&self, other: &Self) -> core::cmp::Ordering {
+ (self.0 % 3).cmp(&(other.0 % 3))
+ }
+}
+
+#[test]
+fn test_lt() {
+ let empty: [isize; 0] = [];
+ let xs = [1, 2, 3];
+ let ys = [1, 2, 0];
+
+ assert!(!xs.iter().lt(ys.iter()));
+ assert!(!xs.iter().le(ys.iter()));
+ assert!(xs.iter().gt(ys.iter()));
+ assert!(xs.iter().ge(ys.iter()));
+
+ assert!(ys.iter().lt(xs.iter()));
+ assert!(ys.iter().le(xs.iter()));
+ assert!(!ys.iter().gt(xs.iter()));
+ assert!(!ys.iter().ge(xs.iter()));
+
+ assert!(empty.iter().lt(xs.iter()));
+ assert!(empty.iter().le(xs.iter()));
+ assert!(!empty.iter().gt(xs.iter()));
+ assert!(!empty.iter().ge(xs.iter()));
+
+ // Sequence with NaN
+ let u = [1.0f64, 2.0];
+ let v = [0.0f64 / 0.0, 3.0];
+
+ assert!(!u.iter().lt(v.iter()));
+ assert!(!u.iter().le(v.iter()));
+ assert!(!u.iter().gt(v.iter()));
+ assert!(!u.iter().ge(v.iter()));
+
+ let a = [0.0f64 / 0.0];
+ let b = [1.0f64];
+ let c = [2.0f64];
+
+ assert!(a.iter().lt(b.iter()) == (a[0] < b[0]));
+ assert!(a.iter().le(b.iter()) == (a[0] <= b[0]));
+ assert!(a.iter().gt(b.iter()) == (a[0] > b[0]));
+ assert!(a.iter().ge(b.iter()) == (a[0] >= b[0]));
+
+ assert!(c.iter().lt(b.iter()) == (c[0] < b[0]));
+ assert!(c.iter().le(b.iter()) == (c[0] <= b[0]));
+ assert!(c.iter().gt(b.iter()) == (c[0] > b[0]));
+ assert!(c.iter().ge(b.iter()) == (c[0] >= b[0]));
+}
+
+#[test]
+fn test_cmp_by() {
+ use core::cmp::Ordering;
+
+ let f = |x: i32, y: i32| (x * x).cmp(&y);
+ let xs = || [1, 2, 3, 4].iter().copied();
+ let ys = || [1, 4, 16].iter().copied();
+
+ assert_eq!(xs().cmp_by(ys(), f), Ordering::Less);
+ assert_eq!(ys().cmp_by(xs(), f), Ordering::Greater);
+ assert_eq!(xs().cmp_by(xs().map(|x| x * x), f), Ordering::Equal);
+ assert_eq!(xs().rev().cmp_by(ys().rev(), f), Ordering::Greater);
+ assert_eq!(xs().cmp_by(ys().rev(), f), Ordering::Less);
+ assert_eq!(xs().cmp_by(ys().take(2), f), Ordering::Greater);
+}
+
+#[test]
+fn test_partial_cmp_by() {
+ use core::cmp::Ordering;
+
+ let f = |x: i32, y: i32| (x * x).partial_cmp(&y);
+ let xs = || [1, 2, 3, 4].iter().copied();
+ let ys = || [1, 4, 16].iter().copied();
+
+ assert_eq!(xs().partial_cmp_by(ys(), f), Some(Ordering::Less));
+ assert_eq!(ys().partial_cmp_by(xs(), f), Some(Ordering::Greater));
+ assert_eq!(xs().partial_cmp_by(xs().map(|x| x * x), f), Some(Ordering::Equal));
+ assert_eq!(xs().rev().partial_cmp_by(ys().rev(), f), Some(Ordering::Greater));
+ assert_eq!(xs().partial_cmp_by(xs().rev(), f), Some(Ordering::Less));
+ assert_eq!(xs().partial_cmp_by(ys().take(2), f), Some(Ordering::Greater));
+
+ let f = |x: f64, y: f64| (x * x).partial_cmp(&y);
+ let xs = || [1.0, 2.0, 3.0, 4.0].iter().copied();
+ let ys = || [1.0, 4.0, f64::NAN, 16.0].iter().copied();
+
+ assert_eq!(xs().partial_cmp_by(ys(), f), None);
+ assert_eq!(ys().partial_cmp_by(xs(), f), Some(Ordering::Greater));
+}
+
+#[test]
+fn test_eq_by() {
+ let f = |x: i32, y: i32| x * x == y;
+ let xs = || [1, 2, 3, 4].iter().copied();
+ let ys = || [1, 4, 9, 16].iter().copied();
+
+ assert!(xs().eq_by(ys(), f));
+ assert!(!ys().eq_by(xs(), f));
+ assert!(!xs().eq_by(xs(), f));
+ assert!(!ys().eq_by(ys(), f));
+
+ assert!(!xs().take(3).eq_by(ys(), f));
+ assert!(!xs().eq_by(ys().take(3), f));
+ assert!(xs().take(3).eq_by(ys().take(3), f));
+}
+
+#[test]
+fn test_iterator_nth() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().nth(i).unwrap(), &v[i]);
+ }
+ assert_eq!(v.iter().nth(v.len()), None);
+}
+
+#[test]
+fn test_iterator_nth_back() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().nth_back(i).unwrap(), &v[v.len() - 1 - i]);
+ }
+ assert_eq!(v.iter().nth_back(v.len()), None);
+}
+
+#[test]
+fn test_iterator_advance_by() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+
+ for i in 0..v.len() {
+ let mut iter = v.iter();
+ assert_eq!(iter.advance_by(i), Ok(()));
+ assert_eq!(iter.next().unwrap(), &v[i]);
+ assert_eq!(iter.advance_by(100), Err(v.len() - 1 - i));
+ }
+
+ assert_eq!(v.iter().advance_by(v.len()), Ok(()));
+ assert_eq!(v.iter().advance_by(100), Err(v.len()));
+}
+
+#[test]
+fn test_iterator_advance_back_by() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+
+ for i in 0..v.len() {
+ let mut iter = v.iter();
+ assert_eq!(iter.advance_back_by(i), Ok(()));
+ assert_eq!(iter.next_back().unwrap(), &v[v.len() - 1 - i]);
+ assert_eq!(iter.advance_back_by(100), Err(v.len() - 1 - i));
+ }
+
+ assert_eq!(v.iter().advance_back_by(v.len()), Ok(()));
+ assert_eq!(v.iter().advance_back_by(100), Err(v.len()));
+}
+
+#[test]
+fn test_iterator_rev_advance_back_by() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+
+ for i in 0..v.len() {
+ let mut iter = v.iter().rev();
+ assert_eq!(iter.advance_back_by(i), Ok(()));
+ assert_eq!(iter.next_back().unwrap(), &v[i]);
+ assert_eq!(iter.advance_back_by(100), Err(v.len() - 1 - i));
+ }
+
+ assert_eq!(v.iter().rev().advance_back_by(v.len()), Ok(()));
+ assert_eq!(v.iter().rev().advance_back_by(100), Err(v.len()));
+}
+
+#[test]
+fn test_iterator_last() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ assert_eq!(v.iter().last().unwrap(), &4);
+ assert_eq!(v[..1].iter().last().unwrap(), &0);
+}
+
+#[test]
+fn test_iterator_max() {
+ let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().cloned().max(), Some(3));
+ assert_eq!(v.iter().cloned().max(), Some(10));
+ assert_eq!(v[..0].iter().cloned().max(), None);
+ assert_eq!(v.iter().cloned().map(Mod3).max().map(|x| x.0), Some(8));
+}
+
+#[test]
+fn test_iterator_min() {
+ let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().cloned().min(), Some(0));
+ assert_eq!(v.iter().cloned().min(), Some(0));
+ assert_eq!(v[..0].iter().cloned().min(), None);
+ assert_eq!(v.iter().cloned().map(Mod3).min().map(|x| x.0), Some(0));
+}
+
+#[test]
+fn test_iterator_size_hint() {
+ let c = (0..).step_by(1);
+ let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ let v2 = &[10, 11, 12];
+ let vi = v.iter();
+
+ assert_eq!((0..).size_hint(), (usize::MAX, None));
+ assert_eq!(c.size_hint(), (usize::MAX, None));
+ assert_eq!(vi.clone().size_hint(), (10, Some(10)));
+
+ assert_eq!(c.clone().take(5).size_hint(), (5, Some(5)));
+ assert_eq!(c.clone().skip(5).size_hint().1, None);
+ assert_eq!(c.clone().take_while(|_| false).size_hint(), (0, None));
+ assert_eq!(c.clone().map_while(|_| None::<()>).size_hint(), (0, None));
+ assert_eq!(c.clone().skip_while(|_| false).size_hint(), (0, None));
+ assert_eq!(c.clone().enumerate().size_hint(), (usize::MAX, None));
+ assert_eq!(c.clone().chain(vi.clone().cloned()).size_hint(), (usize::MAX, None));
+ assert_eq!(c.clone().zip(vi.clone()).size_hint(), (10, Some(10)));
+ assert_eq!(c.clone().scan(0, |_, _| Some(0)).size_hint(), (0, None));
+ assert_eq!(c.clone().filter(|_| false).size_hint(), (0, None));
+ assert_eq!(c.clone().map(|_| 0).size_hint(), (usize::MAX, None));
+ assert_eq!(c.filter_map(|_| Some(0)).size_hint(), (0, None));
+
+ assert_eq!(vi.clone().take(5).size_hint(), (5, Some(5)));
+ assert_eq!(vi.clone().take(12).size_hint(), (10, Some(10)));
+ assert_eq!(vi.clone().skip(3).size_hint(), (7, Some(7)));
+ assert_eq!(vi.clone().skip(12).size_hint(), (0, Some(0)));
+ assert_eq!(vi.clone().take_while(|_| false).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().map_while(|_| None::<()>).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().skip_while(|_| false).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().enumerate().size_hint(), (10, Some(10)));
+ assert_eq!(vi.clone().chain(v2).size_hint(), (13, Some(13)));
+ assert_eq!(vi.clone().zip(v2).size_hint(), (3, Some(3)));
+ assert_eq!(vi.clone().scan(0, |_, _| Some(0)).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().filter(|_| false).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().map(|&i| i + 1).size_hint(), (10, Some(10)));
+ assert_eq!(vi.filter_map(|_| Some(0)).size_hint(), (0, Some(10)));
+}
+
+#[test]
+fn test_all() {
+ let v: Box<[isize]> = Box::new([1, 2, 3, 4, 5]);
+ assert!(v.iter().all(|&x| x < 10));
+ assert!(!v.iter().all(|&x| x % 2 == 0));
+ assert!(!v.iter().all(|&x| x > 100));
+ assert!(v[..0].iter().all(|_| panic!()));
+}
+
+#[test]
+fn test_any() {
+ let v: Box<[isize]> = Box::new([1, 2, 3, 4, 5]);
+ assert!(v.iter().any(|&x| x < 10));
+ assert!(v.iter().any(|&x| x % 2 == 0));
+ assert!(!v.iter().any(|&x| x > 100));
+ assert!(!v[..0].iter().any(|_| panic!()));
+}
+
+#[test]
+fn test_find() {
+ let v: &[isize] = &[1, 3, 9, 27, 103, 14, 11];
+ assert_eq!(*v.iter().find(|&&x| x & 1 == 0).unwrap(), 14);
+ assert_eq!(*v.iter().find(|&&x| x % 3 == 0).unwrap(), 3);
+ assert!(v.iter().find(|&&x| x % 12 == 0).is_none());
+}
+
+#[test]
+fn test_try_find() {
+ let xs: &[isize] = &[];
+ assert_eq!(xs.iter().try_find(testfn), Ok(None));
+ let xs: &[isize] = &[1, 2, 3, 4];
+ assert_eq!(xs.iter().try_find(testfn), Ok(Some(&2)));
+ let xs: &[isize] = &[1, 3, 4];
+ assert_eq!(xs.iter().try_find(testfn), Err(()));
+
+ let xs: &[isize] = &[1, 2, 3, 4, 5, 6, 7];
+ let mut iter = xs.iter();
+ assert_eq!(iter.try_find(testfn), Ok(Some(&2)));
+ assert_eq!(iter.try_find(testfn), Err(()));
+ assert_eq!(iter.next(), Some(&5));
+
+ fn testfn(x: &&isize) -> Result<bool, ()> {
+ if **x == 2 {
+ return Ok(true);
+ }
+ if **x == 4 {
+ return Err(());
+ }
+ Ok(false)
+ }
+}
+
+#[test]
+fn test_try_find_api_usability() -> Result<(), Box<dyn std::error::Error>> {
+ let a = ["1", "2"];
+
+ let is_my_num = |s: &str, search: i32| -> Result<bool, std::num::ParseIntError> {
+ Ok(s.parse::<i32>()? == search)
+ };
+
+ let val = a.iter().try_find(|&&s| is_my_num(s, 2))?;
+ assert_eq!(val, Some(&"2"));
+
+ Ok(())
+}
+
+#[test]
+fn test_position() {
+ let v = &[1, 3, 9, 27, 103, 14, 11];
+ assert_eq!(v.iter().position(|x| *x & 1 == 0).unwrap(), 5);
+ assert_eq!(v.iter().position(|x| *x % 3 == 0).unwrap(), 1);
+ assert!(v.iter().position(|x| *x % 12 == 0).is_none());
+}
+
+#[test]
+fn test_count() {
+ let xs = &[1, 2, 2, 1, 5, 9, 0, 2];
+ assert_eq!(xs.iter().filter(|x| **x == 2).count(), 3);
+ assert_eq!(xs.iter().filter(|x| **x == 5).count(), 1);
+ assert_eq!(xs.iter().filter(|x| **x == 95).count(), 0);
+}
+
+#[test]
+fn test_max_by_key() {
+ let xs: &[isize] = &[-3, 0, 1, 5, -10];
+ assert_eq!(*xs.iter().max_by_key(|x| x.abs()).unwrap(), -10);
+}
+
+#[test]
+fn test_max_by() {
+ let xs: &[isize] = &[-3, 0, 1, 5, -10];
+ assert_eq!(*xs.iter().max_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), -10);
+}
+
+#[test]
+fn test_min_by_key() {
+ let xs: &[isize] = &[-3, 0, 1, 5, -10];
+ assert_eq!(*xs.iter().min_by_key(|x| x.abs()).unwrap(), 0);
+}
+
+#[test]
+fn test_min_by() {
+ let xs: &[isize] = &[-3, 0, 1, 5, -10];
+ assert_eq!(*xs.iter().min_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), 0);
+}
+
+#[test]
+fn test_by_ref() {
+ let mut xs = 0..10;
+ // sum the first five values
+ let partial_sum = xs.by_ref().take(5).fold(0, |a, b| a + b);
+ assert_eq!(partial_sum, 10);
+ assert_eq!(xs.next(), Some(5));
+}
+
+#[test]
+fn test_is_sorted() {
+ assert!([1, 2, 2, 9].iter().is_sorted());
+ assert!(![1, 3, 2].iter().is_sorted());
+ assert!([0].iter().is_sorted());
+ assert!(std::iter::empty::<i32>().is_sorted());
+ assert!(![0.0, 1.0, f32::NAN].iter().is_sorted());
+ assert!([-2, -1, 0, 3].iter().is_sorted());
+ assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs()));
+ assert!(!["c", "bb", "aaa"].iter().is_sorted());
+ assert!(["c", "bb", "aaa"].iter().is_sorted_by_key(|s| s.len()));
+}
+
+#[test]
+fn test_partition() {
+ fn check(xs: &mut [i32], ref p: impl Fn(&i32) -> bool, expected: usize) {
+ let i = xs.iter_mut().partition_in_place(p);
+ assert_eq!(expected, i);
+ assert!(xs[..i].iter().all(p));
+ assert!(!xs[i..].iter().any(p));
+ assert!(xs.iter().is_partitioned(p));
+ if i == 0 || i == xs.len() {
+ assert!(xs.iter().rev().is_partitioned(p));
+ } else {
+ assert!(!xs.iter().rev().is_partitioned(p));
+ }
+ }
+
+ check(&mut [], |_| true, 0);
+ check(&mut [], |_| false, 0);
+
+ check(&mut [0], |_| true, 1);
+ check(&mut [0], |_| false, 0);
+
+ check(&mut [-1, 1], |&x| x > 0, 1);
+ check(&mut [-1, 1], |&x| x < 0, 1);
+
+ let ref mut xs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ check(xs, |_| true, 10);
+ check(xs, |_| false, 0);
+ check(xs, |&x| x % 2 == 0, 5); // evens
+ check(xs, |&x| x % 2 == 1, 5); // odds
+ check(xs, |&x| x % 3 == 0, 4); // multiple of 3
+ check(xs, |&x| x % 4 == 0, 3); // multiple of 4
+ check(xs, |&x| x % 5 == 0, 2); // multiple of 5
+ check(xs, |&x| x < 3, 3); // small
+ check(xs, |&x| x > 6, 3); // large
+}
+
+#[test]
+fn test_iterator_rev_advance_by() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+
+ for i in 0..v.len() {
+ let mut iter = v.iter().rev();
+ assert_eq!(iter.advance_by(i), Ok(()));
+ assert_eq!(iter.next().unwrap(), &v[v.len() - 1 - i]);
+ assert_eq!(iter.advance_by(100), Err(v.len() - 1 - i));
+ }
+
+ assert_eq!(v.iter().rev().advance_by(v.len()), Ok(()));
+ assert_eq!(v.iter().rev().advance_by(100), Err(v.len()));
+}
+
+#[test]
+fn test_find_map() {
+ let xs: &[isize] = &[];
+ assert_eq!(xs.iter().find_map(half_if_even), None);
+ let xs: &[isize] = &[3, 5];
+ assert_eq!(xs.iter().find_map(half_if_even), None);
+ let xs: &[isize] = &[4, 5];
+ assert_eq!(xs.iter().find_map(half_if_even), Some(2));
+ let xs: &[isize] = &[3, 6];
+ assert_eq!(xs.iter().find_map(half_if_even), Some(3));
+
+ let xs: &[isize] = &[1, 2, 3, 4, 5, 6, 7];
+ let mut iter = xs.iter();
+ assert_eq!(iter.find_map(half_if_even), Some(1));
+ assert_eq!(iter.find_map(half_if_even), Some(2));
+ assert_eq!(iter.find_map(half_if_even), Some(3));
+ assert_eq!(iter.next(), Some(&7));
+
+ fn half_if_even(x: &isize) -> Option<isize> {
+ if x % 2 == 0 { Some(x / 2) } else { None }
+ }
+}
+
+#[test]
+fn test_try_reduce() {
+ let v = [1usize, 2, 3, 4, 5];
+ let sum = v.into_iter().try_reduce(|x, y| x.checked_add(y));
+ assert_eq!(sum, Some(Some(15)));
+
+ let v = [1, 2, 3, 4, 5, usize::MAX];
+ let sum = v.into_iter().try_reduce(|x, y| x.checked_add(y));
+ assert_eq!(sum, None);
+
+ let v: [usize; 0] = [];
+ let sum = v.into_iter().try_reduce(|x, y| x.checked_add(y));
+ assert_eq!(sum, Some(None));
+
+ let v = ["1", "2", "3", "4", "5"];
+ let max = v.into_iter().try_reduce(|x, y| {
+ if x.parse::<usize>().ok()? > y.parse::<usize>().ok()? { Some(x) } else { Some(y) }
+ });
+ assert_eq!(max, Some(Some("5")));
+
+ let v = ["1", "2", "3", "4", "5"];
+ let max: Result<Option<_>, <usize as std::str::FromStr>::Err> =
+ v.into_iter().try_reduce(|x, y| {
+ if x.parse::<usize>()? > y.parse::<usize>()? { Ok(x) } else { Ok(y) }
+ });
+ assert_eq!(max, Ok(Some("5")));
+}
+
+#[test]
+fn test_iterator_len() {
+ let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().count(), 4);
+ assert_eq!(v[..10].iter().count(), 10);
+ assert_eq!(v[..0].iter().count(), 0);
+}
+
+#[test]
+fn test_collect() {
+ let a = vec![1, 2, 3, 4, 5];
+ let b: Vec<isize> = a.iter().cloned().collect();
+ assert!(a == b);
+}
+
+#[test]
+fn test_try_collect() {
+ use core::ops::ControlFlow::{Break, Continue};
+
+ let u = vec![Some(1), Some(2), Some(3)];
+ let v = u.into_iter().try_collect::<Vec<i32>>();
+ assert_eq!(v, Some(vec![1, 2, 3]));
+
+ let u = vec![Some(1), Some(2), None, Some(3)];
+ let mut it = u.into_iter();
+ let v = it.try_collect::<Vec<i32>>();
+ assert_eq!(v, None);
+ let v = it.try_collect::<Vec<i32>>();
+ assert_eq!(v, Some(vec![3]));
+
+ let u: Vec<Result<i32, ()>> = vec![Ok(1), Ok(2), Ok(3)];
+ let v = u.into_iter().try_collect::<Vec<i32>>();
+ assert_eq!(v, Ok(vec![1, 2, 3]));
+
+ let u = vec![Ok(1), Ok(2), Err(()), Ok(3)];
+ let v = u.into_iter().try_collect::<Vec<i32>>();
+ assert_eq!(v, Err(()));
+
+ let numbers = vec![1, 2, 3, 4, 5];
+ let all_positive = numbers
+ .iter()
+ .cloned()
+ .map(|n| if n > 0 { Some(n) } else { None })
+ .try_collect::<Vec<i32>>();
+ assert_eq!(all_positive, Some(numbers));
+
+ let numbers = vec![-2, -1, 0, 1, 2];
+ let all_positive =
+ numbers.into_iter().map(|n| if n > 0 { Some(n) } else { None }).try_collect::<Vec<i32>>();
+ assert_eq!(all_positive, None);
+
+ let u = [Continue(1), Continue(2), Break(3), Continue(4), Continue(5)];
+ let mut it = u.into_iter();
+
+ let v = it.try_collect::<Vec<_>>();
+ assert_eq!(v, Break(3));
+
+ let v = it.try_collect::<Vec<_>>();
+ assert_eq!(v, Continue(vec![4, 5]));
+}
+
+#[test]
+fn test_collect_into() {
+ let a = vec![1, 2, 3, 4, 5];
+ let mut b = Vec::new();
+ a.iter().cloned().collect_into(&mut b);
+ assert!(a == b);
+}
+
+#[test]
+fn iter_try_collect_uses_try_fold_not_next() {
+ // This makes sure it picks up optimizations, and doesn't use the `&mut I` impl.
+ struct PanicOnNext<I>(I);
+ impl<I: Iterator> Iterator for PanicOnNext<I> {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<Self::Item> {
+ panic!("Iterator::next should not be called!")
+ }
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: std::ops::Try<Output = B>,
+ {
+ self.0.try_fold(init, f)
+ }
+ }
+
+ let it = (0..10).map(Some);
+ let _ = PanicOnNext(it).try_collect::<Vec<_>>();
+ // validation is just that it didn't panic.
+}
+
+#[test]
+fn test_next_chunk() {
+ let mut it = 0..12;
+ assert_eq!(it.next_chunk().unwrap(), [0, 1, 2, 3]);
+ assert_eq!(it.next_chunk().unwrap(), []);
+ assert_eq!(it.next_chunk().unwrap(), [4, 5, 6, 7, 8, 9]);
+ assert_eq!(it.next_chunk::<4>().unwrap_err().as_slice(), &[10, 11]);
+}
+
+// just tests by whether or not this compiles
+fn _empty_impl_all_auto_traits<T>() {
+ use std::panic::{RefUnwindSafe, UnwindSafe};
+ fn all_auto_traits<T: Send + Sync + Unpin + UnwindSafe + RefUnwindSafe>() {}
+
+ all_auto_traits::<std::iter::Empty<T>>();
+}
diff --git a/library/core/tests/iter/traits/mod.rs b/library/core/tests/iter/traits/mod.rs
new file mode 100644
index 000000000..80619f53f
--- /dev/null
+++ b/library/core/tests/iter/traits/mod.rs
@@ -0,0 +1,4 @@
+mod accum;
+mod double_ended;
+mod iterator;
+mod step;
diff --git a/library/core/tests/iter/traits/step.rs b/library/core/tests/iter/traits/step.rs
new file mode 100644
index 000000000..3d82a40cd
--- /dev/null
+++ b/library/core/tests/iter/traits/step.rs
@@ -0,0 +1,89 @@
+use core::iter::*;
+
+#[test]
+fn test_steps_between() {
+ assert_eq!(Step::steps_between(&20_u8, &200_u8), Some(180_usize));
+ assert_eq!(Step::steps_between(&-20_i8, &80_i8), Some(100_usize));
+ assert_eq!(Step::steps_between(&-120_i8, &80_i8), Some(200_usize));
+ assert_eq!(Step::steps_between(&20_u32, &4_000_100_u32), Some(4_000_080_usize));
+ assert_eq!(Step::steps_between(&-20_i32, &80_i32), Some(100_usize));
+ assert_eq!(Step::steps_between(&-2_000_030_i32, &2_000_050_i32), Some(4_000_080_usize));
+
+ // Skip u64/i64 to avoid differences with 32-bit vs 64-bit platforms
+
+ assert_eq!(Step::steps_between(&20_u128, &200_u128), Some(180_usize));
+ assert_eq!(Step::steps_between(&-20_i128, &80_i128), Some(100_usize));
+ if cfg!(target_pointer_width = "64") {
+ assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_0009_u128), Some(usize::MAX));
+ }
+ assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_000a_u128), None);
+ assert_eq!(Step::steps_between(&10_i128, &0x1_0000_0000_0000_000a_i128), None);
+ assert_eq!(
+ Step::steps_between(&-0x1_0000_0000_0000_0000_i128, &0x1_0000_0000_0000_0000_i128,),
+ None,
+ );
+}
+
+#[test]
+fn test_step_forward() {
+ assert_eq!(Step::forward_checked(55_u8, 200_usize), Some(255_u8));
+ assert_eq!(Step::forward_checked(252_u8, 200_usize), None);
+ assert_eq!(Step::forward_checked(0_u8, 256_usize), None);
+ assert_eq!(Step::forward_checked(-110_i8, 200_usize), Some(90_i8));
+ assert_eq!(Step::forward_checked(-110_i8, 248_usize), None);
+ assert_eq!(Step::forward_checked(-126_i8, 256_usize), None);
+
+ assert_eq!(Step::forward_checked(35_u16, 100_usize), Some(135_u16));
+ assert_eq!(Step::forward_checked(35_u16, 65500_usize), Some(u16::MAX));
+ assert_eq!(Step::forward_checked(36_u16, 65500_usize), None);
+ assert_eq!(Step::forward_checked(-110_i16, 200_usize), Some(90_i16));
+ assert_eq!(Step::forward_checked(-20_030_i16, 50_050_usize), Some(30_020_i16));
+ assert_eq!(Step::forward_checked(-10_i16, 40_000_usize), None);
+ assert_eq!(Step::forward_checked(-10_i16, 70_000_usize), None);
+
+ assert_eq!(Step::forward_checked(10_u128, 70_000_usize), Some(70_010_u128));
+ assert_eq!(Step::forward_checked(10_i128, 70_030_usize), Some(70_040_i128));
+ assert_eq!(
+ Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0xff_usize),
+ Some(u128::MAX),
+ );
+ assert_eq!(
+ Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0x100_usize),
+ None
+ );
+ assert_eq!(
+ Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0xff_usize),
+ Some(i128::MAX),
+ );
+ assert_eq!(
+ Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize),
+ None
+ );
+}
+
+#[test]
+fn test_step_backward() {
+ assert_eq!(Step::backward_checked(255_u8, 200_usize), Some(55_u8));
+ assert_eq!(Step::backward_checked(100_u8, 200_usize), None);
+ assert_eq!(Step::backward_checked(255_u8, 256_usize), None);
+ assert_eq!(Step::backward_checked(90_i8, 200_usize), Some(-110_i8));
+ assert_eq!(Step::backward_checked(110_i8, 248_usize), None);
+ assert_eq!(Step::backward_checked(127_i8, 256_usize), None);
+
+ assert_eq!(Step::backward_checked(135_u16, 100_usize), Some(35_u16));
+ assert_eq!(Step::backward_checked(u16::MAX, 65500_usize), Some(35_u16));
+ assert_eq!(Step::backward_checked(10_u16, 11_usize), None);
+ assert_eq!(Step::backward_checked(90_i16, 200_usize), Some(-110_i16));
+ assert_eq!(Step::backward_checked(30_020_i16, 50_050_usize), Some(-20_030_i16));
+ assert_eq!(Step::backward_checked(-10_i16, 40_000_usize), None);
+ assert_eq!(Step::backward_checked(-10_i16, 70_000_usize), None);
+
+ assert_eq!(Step::backward_checked(70_010_u128, 70_000_usize), Some(10_u128));
+ assert_eq!(Step::backward_checked(70_020_i128, 70_030_usize), Some(-10_i128));
+ assert_eq!(Step::backward_checked(10_u128, 7_usize), Some(3_u128));
+ assert_eq!(Step::backward_checked(10_u128, 11_usize), None);
+ assert_eq!(
+ Step::backward_checked(-0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize),
+ Some(i128::MIN)
+ );
+}
diff --git a/library/core/tests/lazy.rs b/library/core/tests/lazy.rs
new file mode 100644
index 000000000..70fcc6d2d
--- /dev/null
+++ b/library/core/tests/lazy.rs
@@ -0,0 +1,138 @@
+use core::{
+ cell::{Cell, LazyCell, OnceCell},
+ sync::atomic::{AtomicUsize, Ordering::SeqCst},
+};
+
+#[test]
+fn once_cell() {
+ let c = OnceCell::new();
+ assert!(c.get().is_none());
+ c.get_or_init(|| 92);
+ assert_eq!(c.get(), Some(&92));
+
+ c.get_or_init(|| panic!("Kabom!"));
+ assert_eq!(c.get(), Some(&92));
+}
+
+#[test]
+fn once_cell_get_mut() {
+ let mut c = OnceCell::new();
+ assert!(c.get_mut().is_none());
+ c.set(90).unwrap();
+ *c.get_mut().unwrap() += 2;
+ assert_eq!(c.get_mut(), Some(&mut 92));
+}
+
+#[test]
+fn once_cell_drop() {
+ static DROP_CNT: AtomicUsize = AtomicUsize::new(0);
+ struct Dropper;
+ impl Drop for Dropper {
+ fn drop(&mut self) {
+ DROP_CNT.fetch_add(1, SeqCst);
+ }
+ }
+
+ let x = OnceCell::new();
+ x.get_or_init(|| Dropper);
+ assert_eq!(DROP_CNT.load(SeqCst), 0);
+ drop(x);
+ assert_eq!(DROP_CNT.load(SeqCst), 1);
+}
+
+#[test]
+fn unsync_once_cell_drop_empty() {
+ let x = OnceCell::<&'static str>::new();
+ drop(x);
+}
+
+#[test]
+const fn once_cell_const() {
+ let _once_cell: OnceCell<u32> = OnceCell::new();
+ let _once_cell: OnceCell<u32> = OnceCell::from(32);
+}
+
+#[test]
+fn clone() {
+ let s = OnceCell::new();
+ let c = s.clone();
+ assert!(c.get().is_none());
+
+ s.set("hello").unwrap();
+ let c = s.clone();
+ assert_eq!(c.get().map(|c| *c), Some("hello"));
+}
+
+#[test]
+fn from_impl() {
+ assert_eq!(OnceCell::from("value").get(), Some(&"value"));
+ assert_ne!(OnceCell::from("foo").get(), Some(&"bar"));
+}
+
+#[test]
+fn partialeq_impl() {
+ assert!(OnceCell::from("value") == OnceCell::from("value"));
+ assert!(OnceCell::from("foo") != OnceCell::from("bar"));
+
+ assert!(OnceCell::<&'static str>::new() == OnceCell::new());
+ assert!(OnceCell::<&'static str>::new() != OnceCell::from("value"));
+}
+
+#[test]
+fn into_inner() {
+ let cell: OnceCell<&'static str> = OnceCell::new();
+ assert_eq!(cell.into_inner(), None);
+ let cell = OnceCell::new();
+ cell.set("hello").unwrap();
+ assert_eq!(cell.into_inner(), Some("hello"));
+}
+
+#[test]
+fn lazy_new() {
+ let called = Cell::new(0);
+ let x = LazyCell::new(|| {
+ called.set(called.get() + 1);
+ 92
+ });
+
+ assert_eq!(called.get(), 0);
+
+ let y = *x - 30;
+ assert_eq!(y, 62);
+ assert_eq!(called.get(), 1);
+
+ let y = *x - 30;
+ assert_eq!(y, 62);
+ assert_eq!(called.get(), 1);
+}
+
+#[test]
+fn aliasing_in_get() {
+ let x = OnceCell::new();
+ x.set(42).unwrap();
+ let at_x = x.get().unwrap(); // --- (shared) borrow of inner `Option<T>` --+
+ let _ = x.set(27); // <-- temporary (unique) borrow of inner `Option<T>` |
+ println!("{at_x}"); // <------- up until here ---------------------------+
+}
+
+#[test]
+#[should_panic(expected = "reentrant init")]
+fn reentrant_init() {
+ let x: OnceCell<Box<i32>> = OnceCell::new();
+ let dangling_ref: Cell<Option<&i32>> = Cell::new(None);
+ x.get_or_init(|| {
+ let r = x.get_or_init(|| Box::new(92));
+ dangling_ref.set(Some(r));
+ Box::new(62)
+ });
+ eprintln!("use after free: {:?}", dangling_ref.get().unwrap());
+}
+
+#[test]
+fn dropck() {
+ let cell = OnceCell::new();
+ {
+ let s = String::new();
+ cell.set(&s).unwrap();
+ }
+}
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
new file mode 100644
index 000000000..db94368f6
--- /dev/null
+++ b/library/core/tests/lib.rs
@@ -0,0 +1,142 @@
+#![feature(alloc_layout_extra)]
+#![feature(array_chunks)]
+#![feature(array_methods)]
+#![feature(array_windows)]
+#![feature(bench_black_box)]
+#![feature(cell_update)]
+#![feature(const_assume)]
+#![feature(const_black_box)]
+#![feature(const_bool_to_option)]
+#![feature(const_cell_into_inner)]
+#![feature(const_convert)]
+#![feature(const_heap)]
+#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_maybe_uninit_assume_init_read)]
+#![feature(const_nonnull_new)]
+#![feature(const_num_from_num)]
+#![feature(const_ptr_as_ref)]
+#![feature(const_ptr_read)]
+#![feature(const_ptr_write)]
+#![feature(const_trait_impl)]
+#![feature(const_likely)]
+#![feature(core_intrinsics)]
+#![feature(core_private_bignum)]
+#![feature(core_private_diy_float)]
+#![feature(dec2flt)]
+#![feature(div_duration)]
+#![feature(duration_consts_float)]
+#![feature(duration_constants)]
+#![feature(exact_size_is_empty)]
+#![feature(extern_types)]
+#![feature(flt2dec)]
+#![feature(fmt_internals)]
+#![feature(float_minimum_maximum)]
+#![feature(future_join)]
+#![feature(generic_assert_internals)]
+#![feature(array_try_from_fn)]
+#![feature(hasher_prefixfree_extras)]
+#![feature(hashmap_internals)]
+#![feature(try_find)]
+#![feature(inline_const)]
+#![feature(is_sorted)]
+#![feature(pattern)]
+#![feature(pin_macro)]
+#![feature(sort_internals)]
+#![feature(slice_take)]
+#![feature(slice_from_ptr_range)]
+#![feature(split_as_slice)]
+#![feature(maybe_uninit_uninit_array)]
+#![feature(maybe_uninit_array_assume_init)]
+#![feature(maybe_uninit_write_slice)]
+#![feature(min_specialization)]
+#![feature(numfmt)]
+#![feature(step_trait)]
+#![feature(str_internals)]
+#![feature(std_internals)]
+#![feature(test)]
+#![feature(trusted_len)]
+#![feature(try_blocks)]
+#![feature(try_trait_v2)]
+#![feature(slice_internals)]
+#![feature(slice_partition_dedup)]
+#![feature(int_log)]
+#![feature(iter_advance_by)]
+#![feature(iter_collect_into)]
+#![feature(iter_partition_in_place)]
+#![feature(iter_intersperse)]
+#![feature(iter_is_partitioned)]
+#![feature(iter_next_chunk)]
+#![feature(iter_order_by)]
+#![feature(iterator_try_collect)]
+#![feature(iterator_try_reduce)]
+#![feature(const_mut_refs)]
+#![feature(const_pin)]
+#![feature(never_type)]
+#![feature(unwrap_infallible)]
+#![feature(result_into_ok_or_err)]
+#![feature(portable_simd)]
+#![feature(ptr_metadata)]
+#![feature(once_cell)]
+#![feature(option_result_contains)]
+#![feature(unsized_tuple_coercion)]
+#![feature(const_option)]
+#![feature(const_option_ext)]
+#![feature(const_result)]
+#![feature(integer_atomics)]
+#![feature(int_roundings)]
+#![feature(slice_group_by)]
+#![feature(split_array)]
+#![feature(strict_provenance)]
+#![feature(strict_provenance_atomic_ptr)]
+#![feature(trusted_random_access)]
+#![feature(unsize)]
+#![feature(unzip_option)]
+#![feature(const_array_from_ref)]
+#![feature(const_slice_from_ref)]
+#![feature(waker_getters)]
+#![feature(slice_flatten)]
+#![feature(provide_any)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+extern crate test;
+
+mod alloc;
+mod any;
+mod array;
+mod ascii;
+mod asserting;
+mod atomic;
+mod bool;
+mod cell;
+mod char;
+mod clone;
+mod cmp;
+mod const_ptr;
+mod convert;
+mod fmt;
+mod future;
+mod hash;
+mod intrinsics;
+mod iter;
+mod lazy;
+mod macros;
+mod manually_drop;
+mod mem;
+mod nonzero;
+mod num;
+mod ops;
+mod option;
+mod pattern;
+mod pin;
+mod pin_macro;
+mod ptr;
+mod result;
+mod simd;
+mod slice;
+mod str;
+mod str_lossy;
+mod task;
+mod time;
+mod tuple;
+mod unicode;
+mod waker;
diff --git a/library/core/tests/macros.rs b/library/core/tests/macros.rs
new file mode 100644
index 000000000..ff3632e35
--- /dev/null
+++ b/library/core/tests/macros.rs
@@ -0,0 +1,20 @@
+#[test]
+fn assert_eq_trailing_comma() {
+ assert_eq!(1, 1,);
+}
+
+#[test]
+fn assert_escape() {
+ assert!(r#"☃\backslash"#.contains("\\"));
+}
+
+#[test]
+fn assert_ne_trailing_comma() {
+ assert_ne!(1, 2,);
+}
+
+#[rustfmt::skip]
+#[test]
+fn matches_leading_pipe() {
+ matches!(1, | 1 | 2 | 3);
+}
diff --git a/library/core/tests/manually_drop.rs b/library/core/tests/manually_drop.rs
new file mode 100644
index 000000000..9eac27973
--- /dev/null
+++ b/library/core/tests/manually_drop.rs
@@ -0,0 +1,27 @@
+use core::mem::ManuallyDrop;
+
+#[test]
+fn smoke() {
+ #[derive(Clone)]
+ struct TypeWithDrop;
+ impl Drop for TypeWithDrop {
+ fn drop(&mut self) {
+ unreachable!("Should not get dropped");
+ }
+ }
+
+ let x = ManuallyDrop::new(TypeWithDrop);
+ drop(x);
+
+ // also test unsizing
+ let x: Box<ManuallyDrop<[TypeWithDrop]>> =
+ Box::new(ManuallyDrop::new([TypeWithDrop, TypeWithDrop]));
+ drop(x);
+
+ // test clone and clone_from implementations
+ let mut x = ManuallyDrop::new(TypeWithDrop);
+ let y = x.clone();
+ x.clone_from(&y);
+ drop(x);
+ drop(y);
+}
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
new file mode 100644
index 000000000..6856d1a1f
--- /dev/null
+++ b/library/core/tests/mem.rs
@@ -0,0 +1,343 @@
+use core::mem::*;
+
+#[cfg(panic = "unwind")]
+use std::rc::Rc;
+
+#[test]
+fn size_of_basic() {
+ assert_eq!(size_of::<u8>(), 1);
+ assert_eq!(size_of::<u16>(), 2);
+ assert_eq!(size_of::<u32>(), 4);
+ assert_eq!(size_of::<u64>(), 8);
+}
+
+#[test]
+#[cfg(target_pointer_width = "16")]
+fn size_of_16() {
+ assert_eq!(size_of::<usize>(), 2);
+ assert_eq!(size_of::<*const usize>(), 2);
+}
+
+#[test]
+#[cfg(target_pointer_width = "32")]
+fn size_of_32() {
+ assert_eq!(size_of::<usize>(), 4);
+ assert_eq!(size_of::<*const usize>(), 4);
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn size_of_64() {
+ assert_eq!(size_of::<usize>(), 8);
+ assert_eq!(size_of::<*const usize>(), 8);
+}
+
+#[test]
+fn size_of_val_basic() {
+ assert_eq!(size_of_val(&1u8), 1);
+ assert_eq!(size_of_val(&1u16), 2);
+ assert_eq!(size_of_val(&1u32), 4);
+ assert_eq!(size_of_val(&1u64), 8);
+}
+
+#[test]
+fn align_of_basic() {
+ assert_eq!(align_of::<u8>(), 1);
+ assert_eq!(align_of::<u16>(), 2);
+ assert_eq!(align_of::<u32>(), 4);
+}
+
+#[test]
+#[cfg(target_pointer_width = "16")]
+fn align_of_16() {
+ assert_eq!(align_of::<usize>(), 2);
+ assert_eq!(align_of::<*const usize>(), 2);
+}
+
+#[test]
+#[cfg(target_pointer_width = "32")]
+fn align_of_32() {
+ assert_eq!(align_of::<usize>(), 4);
+ assert_eq!(align_of::<*const usize>(), 4);
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn align_of_64() {
+ assert_eq!(align_of::<usize>(), 8);
+ assert_eq!(align_of::<*const usize>(), 8);
+}
+
+#[test]
+fn align_of_val_basic() {
+ assert_eq!(align_of_val(&1u8), 1);
+ assert_eq!(align_of_val(&1u16), 2);
+ assert_eq!(align_of_val(&1u32), 4);
+}
+
+#[test]
+fn test_swap() {
+ let mut x = 31337;
+ let mut y = 42;
+ swap(&mut x, &mut y);
+ assert_eq!(x, 42);
+ assert_eq!(y, 31337);
+}
+
+#[test]
+fn test_replace() {
+ let mut x = Some("test".to_string());
+ let y = replace(&mut x, None);
+ assert!(x.is_none());
+ assert!(y.is_some());
+}
+
+#[test]
+fn test_transmute_copy() {
+ assert_eq!(1, unsafe { transmute_copy(&1) });
+}
+
+#[test]
+fn test_transmute_copy_shrink() {
+ assert_eq!(0_u8, unsafe { transmute_copy(&0_u64) });
+}
+
+#[test]
+fn test_transmute_copy_unaligned() {
+ #[repr(C)]
+ #[derive(Default)]
+ struct Unaligned {
+ a: u8,
+ b: [u8; 8],
+ }
+
+ let u = Unaligned::default();
+ assert_eq!(0_u64, unsafe { transmute_copy(&u.b) });
+}
+
+#[test]
+#[cfg(panic = "unwind")]
+fn test_transmute_copy_grow_panics() {
+ use std::panic;
+
+ let err = panic::catch_unwind(panic::AssertUnwindSafe(|| unsafe {
+ let _unused: u64 = transmute_copy(&1_u8);
+ }));
+
+ match err {
+ Ok(_) => unreachable!(),
+ Err(payload) => {
+ payload
+ .downcast::<&'static str>()
+ .and_then(|s| {
+ if *s == "cannot transmute_copy if U is larger than T" { Ok(s) } else { Err(s) }
+ })
+ .unwrap_or_else(|p| panic::resume_unwind(p));
+ }
+ }
+}
+
+#[test]
+#[allow(dead_code)]
+fn test_discriminant_send_sync() {
+ enum Regular {
+ A,
+ B(i32),
+ }
+ enum NotSendSync {
+ A(*const i32),
+ }
+
+ fn is_send_sync<T: Send + Sync>() {}
+
+ is_send_sync::<Discriminant<Regular>>();
+ is_send_sync::<Discriminant<NotSendSync>>();
+}
+
+#[test]
+fn assume_init_good() {
+ const TRUE: bool = unsafe { MaybeUninit::<bool>::new(true).assume_init() };
+
+ assert!(TRUE);
+}
+
+#[test]
+fn uninit_array_assume_init() {
+ let mut array: [MaybeUninit<i16>; 5] = MaybeUninit::uninit_array();
+ array[0].write(3);
+ array[1].write(1);
+ array[2].write(4);
+ array[3].write(1);
+ array[4].write(5);
+
+ let array = unsafe { MaybeUninit::array_assume_init(array) };
+
+ assert_eq!(array, [3, 1, 4, 1, 5]);
+
+ let [] = unsafe { MaybeUninit::<!>::array_assume_init([]) };
+}
+
+#[test]
+fn uninit_write_slice() {
+ let mut dst = [MaybeUninit::new(255); 64];
+ let src = [0; 64];
+
+ assert_eq!(MaybeUninit::write_slice(&mut dst, &src), &src);
+}
+
+#[test]
+#[should_panic(expected = "source slice length (32) does not match destination slice length (64)")]
+fn uninit_write_slice_panic_lt() {
+ let mut dst = [MaybeUninit::uninit(); 64];
+ let src = [0; 32];
+
+ MaybeUninit::write_slice(&mut dst, &src);
+}
+
+#[test]
+#[should_panic(expected = "source slice length (128) does not match destination slice length (64)")]
+fn uninit_write_slice_panic_gt() {
+ let mut dst = [MaybeUninit::uninit(); 64];
+ let src = [0; 128];
+
+ MaybeUninit::write_slice(&mut dst, &src);
+}
+
+#[test]
+fn uninit_clone_from_slice() {
+ let mut dst = [MaybeUninit::new(255); 64];
+ let src = [0; 64];
+
+ assert_eq!(MaybeUninit::write_slice_cloned(&mut dst, &src), &src);
+}
+
+#[test]
+#[should_panic(expected = "destination and source slices have different lengths")]
+fn uninit_write_slice_cloned_panic_lt() {
+ let mut dst = [MaybeUninit::uninit(); 64];
+ let src = [0; 32];
+
+ MaybeUninit::write_slice_cloned(&mut dst, &src);
+}
+
+#[test]
+#[should_panic(expected = "destination and source slices have different lengths")]
+fn uninit_write_slice_cloned_panic_gt() {
+ let mut dst = [MaybeUninit::uninit(); 64];
+ let src = [0; 128];
+
+ MaybeUninit::write_slice_cloned(&mut dst, &src);
+}
+
+#[test]
+#[cfg(panic = "unwind")]
+fn uninit_write_slice_cloned_mid_panic() {
+ use std::panic;
+
+ enum IncrementOrPanic {
+ Increment(Rc<()>),
+ ExpectedPanic,
+ UnexpectedPanic,
+ }
+
+ impl Clone for IncrementOrPanic {
+ fn clone(&self) -> Self {
+ match self {
+ Self::Increment(rc) => Self::Increment(rc.clone()),
+ Self::ExpectedPanic => panic!("expected panic on clone"),
+ Self::UnexpectedPanic => panic!("unexpected panic on clone"),
+ }
+ }
+ }
+
+ let rc = Rc::new(());
+
+ let mut dst = [
+ MaybeUninit::uninit(),
+ MaybeUninit::uninit(),
+ MaybeUninit::uninit(),
+ MaybeUninit::uninit(),
+ ];
+
+ let src = [
+ IncrementOrPanic::Increment(rc.clone()),
+ IncrementOrPanic::Increment(rc.clone()),
+ IncrementOrPanic::ExpectedPanic,
+ IncrementOrPanic::UnexpectedPanic,
+ ];
+
+ let err = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ MaybeUninit::write_slice_cloned(&mut dst, &src);
+ }));
+
+ drop(src);
+
+ match err {
+ Ok(_) => unreachable!(),
+ Err(payload) => {
+ payload
+ .downcast::<&'static str>()
+ .and_then(|s| if *s == "expected panic on clone" { Ok(s) } else { Err(s) })
+ .unwrap_or_else(|p| panic::resume_unwind(p));
+
+ assert_eq!(Rc::strong_count(&rc), 1)
+ }
+ }
+}
+
+#[test]
+fn uninit_write_slice_cloned_no_drop() {
+ #[derive(Clone)]
+ struct Bomb;
+
+ impl Drop for Bomb {
+ fn drop(&mut self) {
+ panic!("dropped a bomb! kaboom")
+ }
+ }
+
+ let mut dst = [MaybeUninit::uninit()];
+ let src = [Bomb];
+
+ MaybeUninit::write_slice_cloned(&mut dst, &src);
+
+ forget(src);
+}
+
+#[test]
+fn uninit_const_assume_init_read() {
+ const FOO: u32 = unsafe { MaybeUninit::new(42).assume_init_read() };
+ assert_eq!(FOO, 42);
+}
+
+#[test]
+fn const_maybe_uninit() {
+ use std::ptr;
+
+ #[derive(Debug, PartialEq)]
+ struct Foo {
+ x: u8,
+ y: u8,
+ }
+
+ const FIELD_BY_FIELD: Foo = unsafe {
+ let mut val = MaybeUninit::uninit();
+ init_y(&mut val); // order shouldn't matter
+ init_x(&mut val);
+ val.assume_init()
+ };
+
+ const fn init_x(foo: &mut MaybeUninit<Foo>) {
+ unsafe {
+ *ptr::addr_of_mut!((*foo.as_mut_ptr()).x) = 1;
+ }
+ }
+
+ const fn init_y(foo: &mut MaybeUninit<Foo>) {
+ unsafe {
+ *ptr::addr_of_mut!((*foo.as_mut_ptr()).y) = 2;
+ }
+ }
+
+ assert_eq!(FIELD_BY_FIELD, Foo { x: 1, y: 2 });
+}
diff --git a/library/core/tests/nonzero.rs b/library/core/tests/nonzero.rs
new file mode 100644
index 000000000..a0ca919a8
--- /dev/null
+++ b/library/core/tests/nonzero.rs
@@ -0,0 +1,336 @@
+use core::convert::TryFrom;
+use core::num::{
+ IntErrorKind, NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize,
+ NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,
+};
+use core::option::Option::{self, None, Some};
+use std::mem::size_of;
+
+#[test]
+fn test_create_nonzero_instance() {
+ let _a = unsafe { NonZeroU32::new_unchecked(21) };
+}
+
+#[test]
+fn test_size_nonzero_in_option() {
+ assert_eq!(size_of::<NonZeroU32>(), size_of::<Option<NonZeroU32>>());
+ assert_eq!(size_of::<NonZeroI32>(), size_of::<Option<NonZeroI32>>());
+}
+
+#[test]
+fn test_match_on_nonzero_option() {
+ let a = Some(unsafe { NonZeroU32::new_unchecked(42) });
+ match a {
+ Some(val) => assert_eq!(val.get(), 42),
+ None => panic!("unexpected None while matching on Some(NonZeroU32(_))"),
+ }
+
+ match unsafe { Some(NonZeroU32::new_unchecked(43)) } {
+ Some(val) => assert_eq!(val.get(), 43),
+ None => panic!("unexpected None while matching on Some(NonZeroU32(_))"),
+ }
+}
+
+#[test]
+fn test_match_option_empty_vec() {
+ let a: Option<Vec<isize>> = Some(vec![]);
+ match a {
+ None => panic!("unexpected None while matching on Some(vec![])"),
+ _ => {}
+ }
+}
+
+#[test]
+fn test_match_option_vec() {
+ let a = Some(vec![1, 2, 3, 4]);
+ match a {
+ Some(v) => assert_eq!(v, [1, 2, 3, 4]),
+ None => panic!("unexpected None while matching on Some(vec![1, 2, 3, 4])"),
+ }
+}
+
+#[test]
+fn test_match_option_rc() {
+ use std::rc::Rc;
+
+ let five = Rc::new(5);
+ match Some(five) {
+ Some(r) => assert_eq!(*r, 5),
+ None => panic!("unexpected None while matching on Some(Rc::new(5))"),
+ }
+}
+
+#[test]
+fn test_match_option_arc() {
+ use std::sync::Arc;
+
+ let five = Arc::new(5);
+ match Some(five) {
+ Some(a) => assert_eq!(*a, 5),
+ None => panic!("unexpected None while matching on Some(Arc::new(5))"),
+ }
+}
+
+#[test]
+fn test_match_option_empty_string() {
+ let a = Some(String::new());
+ match a {
+ None => panic!("unexpected None while matching on Some(String::new())"),
+ _ => {}
+ }
+}
+
+#[test]
+fn test_match_option_string() {
+ let five = "Five".to_string();
+ match Some(five) {
+ Some(s) => assert_eq!(s, "Five"),
+ None => panic!("{}", "unexpected None while matching on Some(String { ... })"),
+ }
+}
+
+mod atom {
+ use core::num::NonZeroU32;
+
+ #[derive(PartialEq, Eq)]
+ pub struct Atom {
+ index: NonZeroU32, // private
+ }
+ pub const FOO_ATOM: Atom = Atom { index: unsafe { NonZeroU32::new_unchecked(7) } };
+}
+
+macro_rules! atom {
+ ("foo") => {
+ atom::FOO_ATOM
+ };
+}
+
+#[test]
+fn test_match_nonzero_const_pattern() {
+ match atom!("foo") {
+ // Using as a pattern is supported by the compiler:
+ atom!("foo") => {}
+ _ => panic!("Expected the const item as a pattern to match."),
+ }
+}
+
+#[test]
+fn test_from_nonzero() {
+ let nz = NonZeroU32::new(1).unwrap();
+ let num: u32 = nz.into();
+ assert_eq!(num, 1u32);
+}
+
+#[test]
+fn test_from_signed_nonzero() {
+ let nz = NonZeroI32::new(1).unwrap();
+ let num: i32 = nz.into();
+ assert_eq!(num, 1i32);
+}
+
+#[test]
+fn test_from_str() {
+ assert_eq!("123".parse::<NonZeroU8>(), Ok(NonZeroU8::new(123).unwrap()));
+ assert_eq!("0".parse::<NonZeroU8>().err().map(|e| e.kind().clone()), Some(IntErrorKind::Zero));
+ assert_eq!(
+ "-1".parse::<NonZeroU8>().err().map(|e| e.kind().clone()),
+ Some(IntErrorKind::InvalidDigit)
+ );
+ assert_eq!(
+ "-129".parse::<NonZeroI8>().err().map(|e| e.kind().clone()),
+ Some(IntErrorKind::NegOverflow)
+ );
+ assert_eq!(
+ "257".parse::<NonZeroU8>().err().map(|e| e.kind().clone()),
+ Some(IntErrorKind::PosOverflow)
+ );
+}
+
+#[test]
+fn test_nonzero_bitor() {
+ let nz_alt = NonZeroU8::new(0b1010_1010).unwrap();
+ let nz_low = NonZeroU8::new(0b0000_1111).unwrap();
+
+ let both_nz: NonZeroU8 = nz_alt | nz_low;
+ assert_eq!(both_nz.get(), 0b1010_1111);
+
+ let rhs_int: NonZeroU8 = nz_low | 0b1100_0000u8;
+ assert_eq!(rhs_int.get(), 0b1100_1111);
+
+ let rhs_zero: NonZeroU8 = nz_alt | 0u8;
+ assert_eq!(rhs_zero.get(), 0b1010_1010);
+
+ let lhs_int: NonZeroU8 = 0b0110_0110u8 | nz_alt;
+ assert_eq!(lhs_int.get(), 0b1110_1110);
+
+ let lhs_zero: NonZeroU8 = 0u8 | nz_low;
+ assert_eq!(lhs_zero.get(), 0b0000_1111);
+}
+
+#[test]
+fn test_nonzero_bitor_assign() {
+ let mut target = NonZeroU8::new(0b1010_1010).unwrap();
+
+ target |= NonZeroU8::new(0b0000_1111).unwrap();
+ assert_eq!(target.get(), 0b1010_1111);
+
+ target |= 0b0001_0000;
+ assert_eq!(target.get(), 0b1011_1111);
+
+ target |= 0;
+ assert_eq!(target.get(), 0b1011_1111);
+}
+
+#[test]
+fn test_nonzero_from_int_on_success() {
+ assert_eq!(NonZeroU8::try_from(5), Ok(NonZeroU8::new(5).unwrap()));
+ assert_eq!(NonZeroU32::try_from(5), Ok(NonZeroU32::new(5).unwrap()));
+
+ assert_eq!(NonZeroI8::try_from(-5), Ok(NonZeroI8::new(-5).unwrap()));
+ assert_eq!(NonZeroI32::try_from(-5), Ok(NonZeroI32::new(-5).unwrap()));
+}
+
+#[test]
+fn test_nonzero_from_int_on_err() {
+ assert!(NonZeroU8::try_from(0).is_err());
+ assert!(NonZeroU32::try_from(0).is_err());
+
+ assert!(NonZeroI8::try_from(0).is_err());
+ assert!(NonZeroI32::try_from(0).is_err());
+}
+
+#[test]
+fn nonzero_const() {
+ // test that the methods of `NonZeroX>` are usable in a const context
+ // Note: only tests NonZero8
+
+ const NONZERO_U8: NonZeroU8 = unsafe { NonZeroU8::new_unchecked(5) };
+
+ const GET: u8 = NONZERO_U8.get();
+ assert_eq!(GET, 5);
+
+ const ZERO: Option<NonZeroU8> = NonZeroU8::new(0);
+ assert!(ZERO.is_none());
+
+ const ONE: Option<NonZeroU8> = NonZeroU8::new(1);
+ assert!(ONE.is_some());
+
+ const FROM_NONZERO_U8: u8 = u8::from(NONZERO_U8);
+ assert_eq!(FROM_NONZERO_U8, 5);
+
+ const NONZERO_CONVERT: NonZeroU32 = NonZeroU32::from(NONZERO_U8);
+ assert_eq!(NONZERO_CONVERT.get(), 5);
+}
+
+#[test]
+fn nonzero_leading_zeros() {
+ assert_eq!(NonZeroU8::new(1).unwrap().leading_zeros(), 7);
+ assert_eq!(NonZeroI8::new(1).unwrap().leading_zeros(), 7);
+ assert_eq!(NonZeroU16::new(1).unwrap().leading_zeros(), 15);
+ assert_eq!(NonZeroI16::new(1).unwrap().leading_zeros(), 15);
+ assert_eq!(NonZeroU32::new(1).unwrap().leading_zeros(), 31);
+ assert_eq!(NonZeroI32::new(1).unwrap().leading_zeros(), 31);
+ assert_eq!(NonZeroU64::new(1).unwrap().leading_zeros(), 63);
+ assert_eq!(NonZeroI64::new(1).unwrap().leading_zeros(), 63);
+ assert_eq!(NonZeroU128::new(1).unwrap().leading_zeros(), 127);
+ assert_eq!(NonZeroI128::new(1).unwrap().leading_zeros(), 127);
+ assert_eq!(NonZeroUsize::new(1).unwrap().leading_zeros(), usize::BITS - 1);
+ assert_eq!(NonZeroIsize::new(1).unwrap().leading_zeros(), usize::BITS - 1);
+
+ assert_eq!(NonZeroU8::new(u8::MAX >> 2).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroI8::new((u8::MAX >> 2) as i8).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroU16::new(u16::MAX >> 2).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroI16::new((u16::MAX >> 2) as i16).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroU32::new(u32::MAX >> 2).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroI32::new((u32::MAX >> 2) as i32).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroU64::new(u64::MAX >> 2).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroI64::new((u64::MAX >> 2) as i64).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroU128::new(u128::MAX >> 2).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroI128::new((u128::MAX >> 2) as i128).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroUsize::new(usize::MAX >> 2).unwrap().leading_zeros(), 2);
+ assert_eq!(NonZeroIsize::new((usize::MAX >> 2) as isize).unwrap().leading_zeros(), 2);
+
+ assert_eq!(NonZeroU8::new(u8::MAX).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroI8::new(-1i8).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroU16::new(u16::MAX).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroI16::new(-1i16).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroU32::new(u32::MAX).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroI32::new(-1i32).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroU64::new(u64::MAX).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroI64::new(-1i64).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroU128::new(u128::MAX).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroI128::new(-1i128).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroUsize::new(usize::MAX).unwrap().leading_zeros(), 0);
+ assert_eq!(NonZeroIsize::new(-1isize).unwrap().leading_zeros(), 0);
+
+ const LEADING_ZEROS: u32 = NonZeroU16::new(1).unwrap().leading_zeros();
+ assert_eq!(LEADING_ZEROS, 15);
+}
+
+#[test]
+fn nonzero_trailing_zeros() {
+ assert_eq!(NonZeroU8::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroI8::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroU16::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroI16::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroU32::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroI32::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroU64::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroI64::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroU128::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroI128::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroUsize::new(1).unwrap().trailing_zeros(), 0);
+ assert_eq!(NonZeroIsize::new(1).unwrap().trailing_zeros(), 0);
+
+ assert_eq!(NonZeroU8::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroI8::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroU16::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroI16::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroU32::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroI32::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroU64::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroI64::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroU128::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroI128::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroUsize::new(1 << 2).unwrap().trailing_zeros(), 2);
+ assert_eq!(NonZeroIsize::new(1 << 2).unwrap().trailing_zeros(), 2);
+
+ assert_eq!(NonZeroU8::new(1 << 7).unwrap().trailing_zeros(), 7);
+ assert_eq!(NonZeroI8::new(1 << 7).unwrap().trailing_zeros(), 7);
+ assert_eq!(NonZeroU16::new(1 << 15).unwrap().trailing_zeros(), 15);
+ assert_eq!(NonZeroI16::new(1 << 15).unwrap().trailing_zeros(), 15);
+ assert_eq!(NonZeroU32::new(1 << 31).unwrap().trailing_zeros(), 31);
+ assert_eq!(NonZeroI32::new(1 << 31).unwrap().trailing_zeros(), 31);
+ assert_eq!(NonZeroU64::new(1 << 63).unwrap().trailing_zeros(), 63);
+ assert_eq!(NonZeroI64::new(1 << 63).unwrap().trailing_zeros(), 63);
+ assert_eq!(NonZeroU128::new(1 << 127).unwrap().trailing_zeros(), 127);
+ assert_eq!(NonZeroI128::new(1 << 127).unwrap().trailing_zeros(), 127);
+
+ assert_eq!(
+ NonZeroUsize::new(1 << (usize::BITS - 1)).unwrap().trailing_zeros(),
+ usize::BITS - 1
+ );
+ assert_eq!(
+ NonZeroIsize::new(1 << (usize::BITS - 1)).unwrap().trailing_zeros(),
+ usize::BITS - 1
+ );
+
+ const TRAILING_ZEROS: u32 = NonZeroU16::new(1 << 2).unwrap().trailing_zeros();
+ assert_eq!(TRAILING_ZEROS, 2);
+}
+
+#[test]
+fn test_nonzero_uint_div() {
+ let nz = NonZeroU32::new(1).unwrap();
+
+ let x: u32 = 42u32 / nz;
+ assert_eq!(x, 42u32);
+}
+
+#[test]
+fn test_nonzero_uint_rem() {
+ let nz = NonZeroU32::new(10).unwrap();
+
+ let x: u32 = 42u32 % nz;
+ assert_eq!(x, 2u32);
+}
diff --git a/library/core/tests/num/bignum.rs b/library/core/tests/num/bignum.rs
new file mode 100644
index 000000000..416e7cea7
--- /dev/null
+++ b/library/core/tests/num/bignum.rs
@@ -0,0 +1,276 @@
+use core::num::bignum::tests::Big8x3 as Big;
+use core::num::bignum::Big32x40;
+
+#[test]
+#[should_panic]
+fn test_from_u64_overflow() {
+ Big::from_u64(0x1000000);
+}
+
+#[test]
+fn test_add() {
+ assert_eq!(*Big::from_small(3).add(&Big::from_small(4)), Big::from_small(7));
+ assert_eq!(*Big::from_small(3).add(&Big::from_small(0)), Big::from_small(3));
+ assert_eq!(*Big::from_small(0).add(&Big::from_small(3)), Big::from_small(3));
+ assert_eq!(*Big::from_small(3).add(&Big::from_u64(0xfffe)), Big::from_u64(0x10001));
+ assert_eq!(*Big::from_u64(0xfedc).add(&Big::from_u64(0x789)), Big::from_u64(0x10665));
+ assert_eq!(*Big::from_u64(0x789).add(&Big::from_u64(0xfedc)), Big::from_u64(0x10665));
+}
+
+#[test]
+#[should_panic]
+fn test_add_overflow_1() {
+ Big::from_small(1).add(&Big::from_u64(0xffffff));
+}
+
+#[test]
+#[should_panic]
+fn test_add_overflow_2() {
+ Big::from_u64(0xffffff).add(&Big::from_small(1));
+}
+
+#[test]
+fn test_add_small() {
+ assert_eq!(*Big::from_small(3).add_small(4), Big::from_small(7));
+ assert_eq!(*Big::from_small(3).add_small(0), Big::from_small(3));
+ assert_eq!(*Big::from_small(0).add_small(3), Big::from_small(3));
+ assert_eq!(*Big::from_small(7).add_small(250), Big::from_u64(257));
+ assert_eq!(*Big::from_u64(0x7fff).add_small(1), Big::from_u64(0x8000));
+ assert_eq!(*Big::from_u64(0x2ffe).add_small(0x35), Big::from_u64(0x3033));
+ assert_eq!(*Big::from_small(0xdc).add_small(0x89), Big::from_u64(0x165));
+}
+
+#[test]
+#[should_panic]
+fn test_add_small_overflow() {
+ Big::from_u64(0xffffff).add_small(1);
+}
+
+#[test]
+fn test_sub() {
+ assert_eq!(*Big::from_small(7).sub(&Big::from_small(4)), Big::from_small(3));
+ assert_eq!(*Big::from_u64(0x10665).sub(&Big::from_u64(0x789)), Big::from_u64(0xfedc));
+ assert_eq!(*Big::from_u64(0x10665).sub(&Big::from_u64(0xfedc)), Big::from_u64(0x789));
+ assert_eq!(*Big::from_u64(0x10665).sub(&Big::from_u64(0x10664)), Big::from_small(1));
+ assert_eq!(*Big::from_u64(0x10665).sub(&Big::from_u64(0x10665)), Big::from_small(0));
+}
+
+#[test]
+#[should_panic]
+fn test_sub_underflow_1() {
+ Big::from_u64(0x10665).sub(&Big::from_u64(0x10666));
+}
+
+#[test]
+#[should_panic]
+fn test_sub_underflow_2() {
+ Big::from_small(0).sub(&Big::from_u64(0x123456));
+}
+
+#[test]
+fn test_mul_small() {
+ assert_eq!(*Big::from_small(7).mul_small(5), Big::from_small(35));
+ assert_eq!(*Big::from_small(0xff).mul_small(0xff), Big::from_u64(0xfe01));
+ assert_eq!(*Big::from_u64(0xffffff / 13).mul_small(13), Big::from_u64(0xffffff));
+}
+
+#[test]
+#[should_panic]
+fn test_mul_small_overflow() {
+ Big::from_u64(0x800000).mul_small(2);
+}
+
+#[test]
+fn test_mul_pow2() {
+ assert_eq!(*Big::from_small(0x7).mul_pow2(4), Big::from_small(0x70));
+ assert_eq!(*Big::from_small(0xff).mul_pow2(1), Big::from_u64(0x1fe));
+ assert_eq!(*Big::from_small(0xff).mul_pow2(12), Big::from_u64(0xff000));
+ assert_eq!(*Big::from_small(0x1).mul_pow2(23), Big::from_u64(0x800000));
+ assert_eq!(*Big::from_u64(0x123).mul_pow2(0), Big::from_u64(0x123));
+ assert_eq!(*Big::from_u64(0x123).mul_pow2(7), Big::from_u64(0x9180));
+ assert_eq!(*Big::from_u64(0x123).mul_pow2(15), Big::from_u64(0x918000));
+ assert_eq!(*Big::from_small(0).mul_pow2(23), Big::from_small(0));
+}
+
+#[test]
+#[should_panic]
+fn test_mul_pow2_overflow_1() {
+ Big::from_u64(0x1).mul_pow2(24);
+}
+
+#[test]
+#[should_panic]
+fn test_mul_pow2_overflow_2() {
+ Big::from_u64(0x123).mul_pow2(16);
+}
+
+#[test]
+fn test_mul_pow5() {
+ assert_eq!(*Big::from_small(42).mul_pow5(0), Big::from_small(42));
+ assert_eq!(*Big::from_small(1).mul_pow5(2), Big::from_small(25));
+ assert_eq!(*Big::from_small(1).mul_pow5(4), Big::from_u64(25 * 25));
+ assert_eq!(*Big::from_small(4).mul_pow5(3), Big::from_u64(500));
+ assert_eq!(*Big::from_small(140).mul_pow5(2), Big::from_u64(25 * 140));
+ assert_eq!(*Big::from_small(25).mul_pow5(1), Big::from_small(125));
+ assert_eq!(*Big::from_small(125).mul_pow5(7), Big::from_u64(9765625));
+ assert_eq!(*Big::from_small(0).mul_pow5(127), Big::from_small(0));
+}
+
+#[test]
+#[should_panic]
+fn test_mul_pow5_overflow_1() {
+ Big::from_small(1).mul_pow5(12);
+}
+
+#[test]
+#[should_panic]
+fn test_mul_pow5_overflow_2() {
+ Big::from_small(230).mul_pow5(8);
+}
+
+#[test]
+fn test_mul_digits() {
+ assert_eq!(*Big::from_small(3).mul_digits(&[5]), Big::from_small(15));
+ assert_eq!(*Big::from_small(0xff).mul_digits(&[0xff]), Big::from_u64(0xfe01));
+ assert_eq!(*Big::from_u64(0x123).mul_digits(&[0x56, 0x4]), Big::from_u64(0x4edc2));
+ assert_eq!(*Big::from_u64(0x12345).mul_digits(&[0x67]), Big::from_u64(0x7530c3));
+ assert_eq!(*Big::from_small(0x12).mul_digits(&[0x67, 0x45, 0x3]), Big::from_u64(0x3ae13e));
+ assert_eq!(*Big::from_u64(0xffffff / 13).mul_digits(&[13]), Big::from_u64(0xffffff));
+ assert_eq!(*Big::from_small(13).mul_digits(&[0x3b, 0xb1, 0x13]), Big::from_u64(0xffffff));
+}
+
+#[test]
+#[should_panic]
+fn test_mul_digits_overflow_1() {
+ Big::from_u64(0x800000).mul_digits(&[2]);
+}
+
+#[test]
+#[should_panic]
+fn test_mul_digits_overflow_2() {
+ Big::from_u64(0x1000).mul_digits(&[0, 0x10]);
+}
+
+#[test]
+fn test_div_rem_small() {
+ let as_val = |(q, r): (&mut Big, u8)| (q.clone(), r);
+ assert_eq!(as_val(Big::from_small(0xff).div_rem_small(15)), (Big::from_small(17), 0));
+ assert_eq!(as_val(Big::from_small(0xff).div_rem_small(16)), (Big::from_small(15), 15));
+ assert_eq!(as_val(Big::from_small(3).div_rem_small(40)), (Big::from_small(0), 3));
+ assert_eq!(
+ as_val(Big::from_u64(0xffffff).div_rem_small(123)),
+ (Big::from_u64(0xffffff / 123), (0xffffffu64 % 123) as u8)
+ );
+ assert_eq!(
+ as_val(Big::from_u64(0x10000).div_rem_small(123)),
+ (Big::from_u64(0x10000 / 123), (0x10000u64 % 123) as u8)
+ );
+}
+
+#[test]
+fn test_div_rem() {
+ fn div_rem(n: u64, d: u64) -> (Big, Big) {
+ let mut q = Big::from_small(42);
+ let mut r = Big::from_small(42);
+ Big::from_u64(n).div_rem(&Big::from_u64(d), &mut q, &mut r);
+ (q, r)
+ }
+ assert_eq!(div_rem(1, 1), (Big::from_small(1), Big::from_small(0)));
+ assert_eq!(div_rem(4, 3), (Big::from_small(1), Big::from_small(1)));
+ assert_eq!(div_rem(1, 7), (Big::from_small(0), Big::from_small(1)));
+ assert_eq!(div_rem(45, 9), (Big::from_small(5), Big::from_small(0)));
+ assert_eq!(div_rem(103, 9), (Big::from_small(11), Big::from_small(4)));
+ assert_eq!(div_rem(123456, 77), (Big::from_u64(1603), Big::from_small(25)));
+ assert_eq!(div_rem(0xffff, 1), (Big::from_u64(0xffff), Big::from_small(0)));
+ assert_eq!(div_rem(0xeeee, 0xffff), (Big::from_small(0), Big::from_u64(0xeeee)));
+ assert_eq!(div_rem(2_000_000, 2), (Big::from_u64(1_000_000), Big::from_u64(0)));
+}
+
+#[test]
+fn test_is_zero() {
+ assert!(Big::from_small(0).is_zero());
+ assert!(!Big::from_small(3).is_zero());
+ assert!(!Big::from_u64(0x123).is_zero());
+ assert!(!Big::from_u64(0xffffff).sub(&Big::from_u64(0xfffffe)).is_zero());
+ assert!(Big::from_u64(0xffffff).sub(&Big::from_u64(0xffffff)).is_zero());
+}
+
+#[test]
+fn test_get_bit() {
+ let x = Big::from_small(0b1101);
+ assert_eq!(x.get_bit(0), 1);
+ assert_eq!(x.get_bit(1), 0);
+ assert_eq!(x.get_bit(2), 1);
+ assert_eq!(x.get_bit(3), 1);
+ let y = Big::from_u64(1 << 15);
+ assert_eq!(y.get_bit(14), 0);
+ assert_eq!(y.get_bit(15), 1);
+ assert_eq!(y.get_bit(16), 0);
+}
+
+#[test]
+#[should_panic]
+fn test_get_bit_out_of_range() {
+ Big::from_small(42).get_bit(24);
+}
+
+#[test]
+fn test_bit_length() {
+ for i in 0..8 * 3 {
+ // 010000...000
+ assert_eq!(Big::from_small(1).mul_pow2(i).bit_length(), i + 1);
+ }
+ for i in 1..8 * 3 - 1 {
+ // 010000...001
+ assert_eq!(Big::from_small(1).mul_pow2(i).add(&Big::from_small(1)).bit_length(), i + 1);
+ // 110000...000
+ assert_eq!(Big::from_small(3).mul_pow2(i).bit_length(), i + 2);
+ }
+ assert_eq!(Big::from_small(0).bit_length(), 0);
+ assert_eq!(Big::from_small(1).bit_length(), 1);
+ assert_eq!(Big::from_small(5).bit_length(), 3);
+ assert_eq!(Big::from_small(0x18).bit_length(), 5);
+ assert_eq!(Big::from_u64(0x4073).bit_length(), 15);
+ assert_eq!(Big::from_u64(0xffffff).bit_length(), 24);
+}
+
+#[test]
+fn test_bit_length_32x40() {
+ for i in 0..32 * 40 {
+ // 010000...000
+ assert_eq!(Big32x40::from_small(1).mul_pow2(i).bit_length(), i + 1);
+ }
+ for i in 1..32 * 40 - 1 {
+ // 010000...001
+ assert_eq!(
+ Big32x40::from_small(1).mul_pow2(i).add(&Big32x40::from_small(1)).bit_length(),
+ i + 1
+ );
+ // 110000...000
+ assert_eq!(Big32x40::from_small(3).mul_pow2(i).bit_length(), i + 2);
+ }
+ assert_eq!(Big32x40::from_small(0).bit_length(), 0);
+ assert_eq!(Big32x40::from_small(1).bit_length(), 1);
+ assert_eq!(Big32x40::from_small(5).bit_length(), 3);
+ assert_eq!(Big32x40::from_small(0x18).bit_length(), 5);
+ assert_eq!(Big32x40::from_u64(0x4073).bit_length(), 15);
+ assert_eq!(Big32x40::from_u64(0xffffff).bit_length(), 24);
+ assert_eq!(Big32x40::from_u64(0xffffffffffffffff).bit_length(), 64);
+}
+
+#[test]
+fn test_ord() {
+ assert!(Big::from_u64(0) < Big::from_u64(0xffffff));
+ assert!(Big::from_u64(0x102) < Big::from_u64(0x201));
+}
+
+#[test]
+fn test_fmt() {
+ assert_eq!(format!("{:?}", Big::from_u64(0)), "0x0");
+ assert_eq!(format!("{:?}", Big::from_u64(0x1)), "0x1");
+ assert_eq!(format!("{:?}", Big::from_u64(0x12)), "0x12");
+ assert_eq!(format!("{:?}", Big::from_u64(0x123)), "0x1_23");
+ assert_eq!(format!("{:?}", Big::from_u64(0x1234)), "0x12_34");
+ assert_eq!(format!("{:?}", Big::from_u64(0x12345)), "0x1_23_45");
+ assert_eq!(format!("{:?}", Big::from_u64(0x123456)), "0x12_34_56");
+}
diff --git a/library/core/tests/num/const_from.rs b/library/core/tests/num/const_from.rs
new file mode 100644
index 000000000..aca18ef39
--- /dev/null
+++ b/library/core/tests/num/const_from.rs
@@ -0,0 +1,25 @@
+#[test]
+fn from() {
+ use core::convert::TryFrom;
+ use core::num::TryFromIntError;
+
+ // From
+ const FROM: i64 = i64::from(1i32);
+ assert_eq!(FROM, 1i64);
+
+ // From int to float
+ const FROM_F64: f64 = f64::from(42u8);
+ assert_eq!(FROM_F64, 42f64);
+
+ // Upper bounded
+ const U8_FROM_U16: Result<u8, TryFromIntError> = u8::try_from(1u16);
+ assert_eq!(U8_FROM_U16, Ok(1u8));
+
+ // Both bounded
+ const I8_FROM_I16: Result<i8, TryFromIntError> = i8::try_from(1i16);
+ assert_eq!(I8_FROM_I16, Ok(1i8));
+
+ // Lower bounded
+ const I16_FROM_U16: Result<i16, TryFromIntError> = i16::try_from(1u16);
+ assert_eq!(I16_FROM_U16, Ok(1i16));
+}
diff --git a/library/core/tests/num/dec2flt/float.rs b/library/core/tests/num/dec2flt/float.rs
new file mode 100644
index 000000000..7a9587a18
--- /dev/null
+++ b/library/core/tests/num/dec2flt/float.rs
@@ -0,0 +1,33 @@
+use core::num::dec2flt::float::RawFloat;
+
+#[test]
+fn test_f32_integer_decode() {
+ assert_eq!(3.14159265359f32.integer_decode(), (13176795, -22, 1));
+ assert_eq!((-8573.5918555f32).integer_decode(), (8779358, -10, -1));
+ assert_eq!(2f32.powf(100.0).integer_decode(), (8388608, 77, 1));
+ assert_eq!(0f32.integer_decode(), (0, -150, 1));
+ assert_eq!((-0f32).integer_decode(), (0, -150, -1));
+ assert_eq!(f32::INFINITY.integer_decode(), (8388608, 105, 1));
+ assert_eq!(f32::NEG_INFINITY.integer_decode(), (8388608, 105, -1));
+
+ // Ignore the "sign" (quiet / signalling flag) of NAN.
+ // It can vary between runtime operations and LLVM folding.
+ let (nan_m, nan_e, _nan_s) = f32::NAN.integer_decode();
+ assert_eq!((nan_m, nan_e), (12582912, 105));
+}
+
+#[test]
+fn test_f64_integer_decode() {
+ assert_eq!(3.14159265359f64.integer_decode(), (7074237752028906, -51, 1));
+ assert_eq!((-8573.5918555f64).integer_decode(), (4713381968463931, -39, -1));
+ assert_eq!(2f64.powf(100.0).integer_decode(), (4503599627370496, 48, 1));
+ assert_eq!(0f64.integer_decode(), (0, -1075, 1));
+ assert_eq!((-0f64).integer_decode(), (0, -1075, -1));
+ assert_eq!(f64::INFINITY.integer_decode(), (4503599627370496, 972, 1));
+ assert_eq!(f64::NEG_INFINITY.integer_decode(), (4503599627370496, 972, -1));
+
+ // Ignore the "sign" (quiet / signalling flag) of NAN.
+ // It can vary between runtime operations and LLVM folding.
+ let (nan_m, nan_e, _nan_s) = f64::NAN.integer_decode();
+ assert_eq!((nan_m, nan_e), (6755399441055744, 972));
+}
diff --git a/library/core/tests/num/dec2flt/lemire.rs b/library/core/tests/num/dec2flt/lemire.rs
new file mode 100644
index 000000000..f71bbb7c7
--- /dev/null
+++ b/library/core/tests/num/dec2flt/lemire.rs
@@ -0,0 +1,53 @@
+use core::num::dec2flt::lemire::compute_float;
+
+fn compute_float32(q: i64, w: u64) -> (i32, u64) {
+ let fp = compute_float::<f32>(q, w);
+ (fp.e, fp.f)
+}
+
+fn compute_float64(q: i64, w: u64) -> (i32, u64) {
+ let fp = compute_float::<f64>(q, w);
+ (fp.e, fp.f)
+}
+
+#[test]
+fn compute_float_f32_rounding() {
+ // These test near-halfway cases for single-precision floats.
+ assert_eq!(compute_float32(0, 16777216), (151, 0));
+ assert_eq!(compute_float32(0, 16777217), (151, 0));
+ assert_eq!(compute_float32(0, 16777218), (151, 1));
+ assert_eq!(compute_float32(0, 16777219), (151, 2));
+ assert_eq!(compute_float32(0, 16777220), (151, 2));
+
+ // These are examples of the above tests, with
+ // digits from the exponent shifted to the mantissa.
+ assert_eq!(compute_float32(-10, 167772160000000000), (151, 0));
+ assert_eq!(compute_float32(-10, 167772170000000000), (151, 0));
+ assert_eq!(compute_float32(-10, 167772180000000000), (151, 1));
+ // Let's check the lines to see if anything is different in table...
+ assert_eq!(compute_float32(-10, 167772190000000000), (151, 2));
+ assert_eq!(compute_float32(-10, 167772200000000000), (151, 2));
+}
+
+#[test]
+fn compute_float_f64_rounding() {
+ // These test near-halfway cases for double-precision floats.
+ assert_eq!(compute_float64(0, 9007199254740992), (1076, 0));
+ assert_eq!(compute_float64(0, 9007199254740993), (1076, 0));
+ assert_eq!(compute_float64(0, 9007199254740994), (1076, 1));
+ assert_eq!(compute_float64(0, 9007199254740995), (1076, 2));
+ assert_eq!(compute_float64(0, 9007199254740996), (1076, 2));
+ assert_eq!(compute_float64(0, 18014398509481984), (1077, 0));
+ assert_eq!(compute_float64(0, 18014398509481986), (1077, 0));
+ assert_eq!(compute_float64(0, 18014398509481988), (1077, 1));
+ assert_eq!(compute_float64(0, 18014398509481990), (1077, 2));
+ assert_eq!(compute_float64(0, 18014398509481992), (1077, 2));
+
+ // These are examples of the above tests, with
+ // digits from the exponent shifted to the mantissa.
+ assert_eq!(compute_float64(-3, 9007199254740992000), (1076, 0));
+ assert_eq!(compute_float64(-3, 9007199254740993000), (1076, 0));
+ assert_eq!(compute_float64(-3, 9007199254740994000), (1076, 1));
+ assert_eq!(compute_float64(-3, 9007199254740995000), (1076, 2));
+ assert_eq!(compute_float64(-3, 9007199254740996000), (1076, 2));
+}
diff --git a/library/core/tests/num/dec2flt/mod.rs b/library/core/tests/num/dec2flt/mod.rs
new file mode 100644
index 000000000..c4e105cba
--- /dev/null
+++ b/library/core/tests/num/dec2flt/mod.rs
@@ -0,0 +1,140 @@
+#![allow(overflowing_literals)]
+
+mod float;
+mod lemire;
+mod parse;
+
+// Take a float literal, turn it into a string in various ways (that are all trusted
+// to be correct) and see if those strings are parsed back to the value of the literal.
+// Requires a *polymorphic literal*, i.e., one that can serve as f64 as well as f32.
+macro_rules! test_literal {
+ ($x: expr) => {{
+ let x32: f32 = $x;
+ let x64: f64 = $x;
+ let inputs = &[stringify!($x).into(), format!("{:?}", x64), format!("{:e}", x64)];
+ for input in inputs {
+ assert_eq!(input.parse(), Ok(x64));
+ assert_eq!(input.parse(), Ok(x32));
+ let neg_input = &format!("-{input}");
+ assert_eq!(neg_input.parse(), Ok(-x64));
+ assert_eq!(neg_input.parse(), Ok(-x32));
+ }
+ }};
+}
+
+#[test]
+fn ordinary() {
+ test_literal!(1.0);
+ test_literal!(3e-5);
+ test_literal!(0.1);
+ test_literal!(12345.);
+ test_literal!(0.9999999);
+ test_literal!(2.2250738585072014e-308);
+}
+
+#[test]
+fn special_code_paths() {
+ test_literal!(36893488147419103229.0); // 2^65 - 3, triggers half-to-even with even significand
+ test_literal!(101e-33); // Triggers the tricky underflow case in AlgorithmM (for f32)
+ test_literal!(1e23); // Triggers AlgorithmR
+ test_literal!(2075e23); // Triggers another path through AlgorithmR
+ test_literal!(8713e-23); // ... and yet another.
+}
+
+#[test]
+fn large() {
+ test_literal!(1e300);
+ test_literal!(123456789.34567e250);
+ test_literal!(943794359898089732078308743689303290943794359843568973207830874368930329.);
+}
+
+#[test]
+fn subnormals() {
+ test_literal!(5e-324);
+ test_literal!(91e-324);
+ test_literal!(1e-322);
+ test_literal!(13245643e-320);
+ test_literal!(2.22507385851e-308);
+ test_literal!(2.1e-308);
+ test_literal!(4.9406564584124654e-324);
+}
+
+#[test]
+fn infinity() {
+ test_literal!(1e400);
+ test_literal!(1e309);
+ test_literal!(2e308);
+ test_literal!(1.7976931348624e308);
+}
+
+#[test]
+fn zero() {
+ test_literal!(0.0);
+ test_literal!(1e-325);
+ test_literal!(1e-326);
+ test_literal!(1e-500);
+}
+
+#[test]
+fn fast_path_correct() {
+ // This number triggers the fast path and is handled incorrectly when compiling on
+ // x86 without SSE2 (i.e., using the x87 FPU stack).
+ test_literal!(1.448997445238699);
+}
+
+#[test]
+fn lonely_dot() {
+ assert!(".".parse::<f32>().is_err());
+ assert!(".".parse::<f64>().is_err());
+}
+
+#[test]
+fn exponentiated_dot() {
+ assert!(".e0".parse::<f32>().is_err());
+ assert!(".e0".parse::<f64>().is_err());
+}
+
+#[test]
+fn lonely_sign() {
+ assert!("+".parse::<f32>().is_err());
+ assert!("-".parse::<f64>().is_err());
+}
+
+#[test]
+fn whitespace() {
+ assert!(" 1.0".parse::<f32>().is_err());
+ assert!("1.0 ".parse::<f64>().is_err());
+}
+
+#[test]
+fn nan() {
+ assert!("NaN".parse::<f32>().unwrap().is_nan());
+ assert!("NaN".parse::<f64>().unwrap().is_nan());
+}
+
+#[test]
+fn inf() {
+ assert_eq!("inf".parse(), Ok(f64::INFINITY));
+ assert_eq!("-inf".parse(), Ok(f64::NEG_INFINITY));
+ assert_eq!("inf".parse(), Ok(f32::INFINITY));
+ assert_eq!("-inf".parse(), Ok(f32::NEG_INFINITY));
+}
+
+#[test]
+fn massive_exponent() {
+ let max = i64::MAX;
+ assert_eq!(format!("1e{max}000").parse(), Ok(f64::INFINITY));
+ assert_eq!(format!("1e-{max}000").parse(), Ok(0.0));
+ assert_eq!(format!("1e{max}000").parse(), Ok(f64::INFINITY));
+}
+
+#[test]
+fn borderline_overflow() {
+ let mut s = "0.".to_string();
+ for _ in 0..375 {
+ s.push('3');
+ }
+ // At the time of this writing, this returns Err(..), but this is a bug that should be fixed.
+ // It makes no sense to enshrine that in a test, the important part is that it doesn't panic.
+ let _ = s.parse::<f64>();
+}
diff --git a/library/core/tests/num/dec2flt/parse.rs b/library/core/tests/num/dec2flt/parse.rs
new file mode 100644
index 000000000..edc77377d
--- /dev/null
+++ b/library/core/tests/num/dec2flt/parse.rs
@@ -0,0 +1,177 @@
+use core::num::dec2flt::number::Number;
+use core::num::dec2flt::parse::parse_number;
+use core::num::dec2flt::{dec2flt, pfe_invalid};
+
+fn new_number(e: i64, m: u64) -> Number {
+ Number { exponent: e, mantissa: m, negative: false, many_digits: false }
+}
+
+#[test]
+fn missing_pieces() {
+ let permutations = &[".e", "1e", "e4", "e", ".12e", "321.e", "32.12e+", "12.32e-"];
+ for &s in permutations {
+ assert_eq!(dec2flt::<f64>(s), Err(pfe_invalid()));
+ }
+}
+
+#[test]
+fn invalid_chars() {
+ let invalid = "r,?<j";
+ let error = Err(pfe_invalid());
+ let valid_strings = &["123", "666.", ".1", "5e1", "7e-3", "0.0e+1"];
+ for c in invalid.chars() {
+ for s in valid_strings {
+ for i in 0..s.len() {
+ let mut input = String::new();
+ input.push_str(s);
+ input.insert(i, c);
+ assert!(dec2flt::<f64>(&input) == error, "did not reject invalid {:?}", input);
+ }
+ }
+ }
+}
+
+fn parse_positive(s: &[u8]) -> Option<Number> {
+ parse_number(s, false)
+}
+
+#[test]
+fn valid() {
+ assert_eq!(parse_positive(b"123.456e789"), Some(new_number(786, 123456)));
+ assert_eq!(parse_positive(b"123.456e+789"), Some(new_number(786, 123456)));
+ assert_eq!(parse_positive(b"123.456e-789"), Some(new_number(-792, 123456)));
+ assert_eq!(parse_positive(b".050"), Some(new_number(-3, 50)));
+ assert_eq!(parse_positive(b"999"), Some(new_number(0, 999)));
+ assert_eq!(parse_positive(b"1.e300"), Some(new_number(300, 1)));
+ assert_eq!(parse_positive(b".1e300"), Some(new_number(299, 1)));
+ assert_eq!(parse_positive(b"101e-33"), Some(new_number(-33, 101)));
+ let zeros = "0".repeat(25);
+ let s = format!("1.5e{zeros}");
+ assert_eq!(parse_positive(s.as_bytes()), Some(new_number(-1, 15)));
+}
+
+macro_rules! assert_float_result_bits_eq {
+ ($bits:literal, $ty:ty, $str:literal) => {{
+ let p = dec2flt::<$ty>($str);
+ assert_eq!(p.map(|x| x.to_bits()), Ok($bits));
+ }};
+}
+
+#[test]
+fn issue31109() {
+ // Regression test for #31109.
+ // Ensure the test produces a valid float with the expected bit pattern.
+ assert_float_result_bits_eq!(
+ 0x3fd5555555555555,
+ f64,
+ "0.3333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333"
+ );
+}
+
+#[test]
+fn issue31407() {
+ // Regression test for #31407.
+ // Ensure the test produces a valid float with the expected bit pattern.
+ assert_float_result_bits_eq!(
+ 0x1752a64e34ba0d3,
+ f64,
+ "1234567890123456789012345678901234567890e-340"
+ );
+ assert_float_result_bits_eq!(
+ 0xfffffffffffff,
+ f64,
+ "2.225073858507201136057409796709131975934819546351645648023426109724822222021076945516529523908135087914149158913039621106870086438694594645527657207407820621743379988141063267329253552286881372149012981122451451889849057222307285255133155755015914397476397983411801999323962548289017107081850690630666655994938275772572015763062690663332647565300009245888316433037779791869612049497390377829704905051080609940730262937128958950003583799967207254304360284078895771796150945516748243471030702609144621572289880258182545180325707018860872113128079512233426288368622321503775666622503982534335974568884423900265498198385487948292206894721689831099698365846814022854243330660339850886445804001034933970427567186443383770486037861622771738545623065874679014086723327636718749999999999999999999999999999999999999e-308"
+ );
+ assert_float_result_bits_eq!(
+ 0x10000000000000,
+ f64,
+ "2.22507385850720113605740979670913197593481954635164564802342610972482222202107694551652952390813508791414915891303962110687008643869459464552765720740782062174337998814106326732925355228688137214901298112245145188984905722230728525513315575501591439747639798341180199932396254828901710708185069063066665599493827577257201576306269066333264756530000924588831643303777979186961204949739037782970490505108060994073026293712895895000358379996720725430436028407889577179615094551674824347103070260914462157228988025818254518032570701886087211312807951223342628836862232150377566662250398253433597456888442390026549819838548794829220689472168983109969836584681402285424333066033985088644580400103493397042756718644338377048603786162277173854562306587467901408672332763671875e-308"
+ );
+ assert_float_result_bits_eq!(
+ 0x10000000000000,
+ f64,
+ "0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000222507385850720138309023271733240406421921598046233183055332741688720443481391819585428315901251102056406733973103581100515243416155346010885601238537771882113077799353200233047961014744258363607192156504694250373420837525080665061665815894872049117996859163964850063590877011830487479978088775374994945158045160505091539985658247081864511353793580499211598108576605199243335211435239014879569960959128889160299264151106346631339366347758651302937176204732563178148566435087212282863764204484681140761391147706280168985324411002416144742161856716615054015428508471675290190316132277889672970737312333408698898317506783884692609277397797285865965494109136909540613646756870239867831529068098461721092462539672851562500000000000000001"
+ );
+ assert_float_result_bits_eq!(
+ 0x7fefffffffffffff,
+ f64,
+ "179769313486231580793728971405303415079934132710037826936173778980444968292764750946649017977587207096330286416692887910946555547851940402630657488671505820681908902000708383676273854845817711531764475730270069855571366959622842914819860834936475292719074168444365510704342711559699508093042880177904174497791.9999999999999999999999999999999999999999999999999999999999999999999999"
+ );
+ assert_float_result_bits_eq!(0x0, f64, "2.47032822920623272e-324");
+ assert_float_result_bits_eq!(
+ 0x8000000,
+ f64,
+ "6.631236871469758276785396630275967243399099947355303144249971758736286630139265439618068200788048744105960420552601852889715006376325666595539603330361800519107591783233358492337208057849499360899425128640718856616503093444922854759159988160304439909868291973931426625698663157749836252274523485312442358651207051292453083278116143932569727918709786004497872322193856150225415211997283078496319412124640111777216148110752815101775295719811974338451936095907419622417538473679495148632480391435931767981122396703443803335529756003353209830071832230689201383015598792184172909927924176339315507402234836120730914783168400715462440053817592702766213559042115986763819482654128770595766806872783349146967171293949598850675682115696218943412532098591327667236328125E-316"
+ );
+ assert_float_result_bits_eq!(
+ 0x10000,
+ f64,
+ "3.237883913302901289588352412501532174863037669423108059901297049552301970670676565786835742587799557860615776559838283435514391084153169252689190564396459577394618038928365305143463955100356696665629202017331344031730044369360205258345803431471660032699580731300954848363975548690010751530018881758184174569652173110473696022749934638425380623369774736560008997404060967498028389191878963968575439222206416981462690113342524002724385941651051293552601421155333430225237291523843322331326138431477823591142408800030775170625915670728657003151953664260769822494937951845801530895238439819708403389937873241463484205608000027270531106827387907791444918534771598750162812548862768493201518991668028251730299953143924168545708663913273994694463908672332763671875E-319"
+ );
+ assert_float_result_bits_eq!(
+ 0x800000000100,
+ f64,
+ "6.953355807847677105972805215521891690222119817145950754416205607980030131549636688806115726399441880065386399864028691275539539414652831584795668560082999889551357784961446896042113198284213107935110217162654939802416034676213829409720583759540476786936413816541621287843248433202369209916612249676005573022703244799714622116542188837770376022371172079559125853382801396219552418839469770514904192657627060319372847562301074140442660237844114174497210955449896389180395827191602886654488182452409583981389442783377001505462015745017848754574668342161759496661766020028752888783387074850773192997102997936619876226688096314989645766000479009083731736585750335262099860150896718774401964796827166283225641992040747894382698751809812609536720628966577351093292236328125E-310"
+ );
+ assert_float_result_bits_eq!(
+ 0x10800,
+ f64,
+ "3.339068557571188581835713701280943911923401916998521771655656997328440314559615318168849149074662609099998113009465566426808170378434065722991659642619467706034884424989741080790766778456332168200464651593995817371782125010668346652995912233993254584461125868481633343674905074271064409763090708017856584019776878812425312008812326260363035474811532236853359905334625575404216060622858633280744301892470300555678734689978476870369853549413277156622170245846166991655321535529623870646888786637528995592800436177901746286272273374471701452991433047257863864601424252024791567368195056077320885329384322332391564645264143400798619665040608077549162173963649264049738362290606875883456826586710961041737908872035803481241600376705491726170293986797332763671875E-319"
+ );
+ assert_float_result_bits_eq!(
+ 0x0,
+ f64,
+ "2.4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328124999e-324"
+ );
+ assert_float_result_bits_eq!(
+ 0x0,
+ f64,
+ "2.4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328125e-324"
+ );
+ assert_float_result_bits_eq!(
+ 0x1,
+ f64,
+ "2.4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328125001e-324"
+ );
+ assert_float_result_bits_eq!(
+ 0x1,
+ f64,
+ "7.4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984374999e-324"
+ );
+ assert_float_result_bits_eq!(
+ 0x2,
+ f64,
+ "7.4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375e-324"
+ );
+ assert_float_result_bits_eq!(
+ 0x2,
+ f64,
+ "7.4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375001e-324"
+ );
+ assert_float_result_bits_eq!(
+ 0x6c9a143590c14,
+ f64,
+ "94393431193180696942841837085033647913224148539854e-358"
+ );
+ assert_float_result_bits_eq!(
+ 0x7802665fd9600,
+ f64,
+ "104308485241983990666713401708072175773165034278685682646111762292409330928739751702404658197872319129036519947435319418387839758990478549477777586673075945844895981012024387992135617064532141489278815239849108105951619997829153633535314849999674266169258928940692239684771590065027025835804863585454872499320500023126142553932654370362024104462255244034053203998964360882487378334860197725139151265590832887433736189468858614521708567646743455601905935595381852723723645799866672558576993978025033590728687206296379801363024094048327273913079612469982585674824156000783167963081616214710691759864332339239688734656548790656486646106983450809073750535624894296242072010195710276073042036425579852459556183541199012652571123898996574563824424330960027873516082763671875e-1075"
+ );
+}
+
+#[test]
+fn many_digits() {
+ // Check large numbers of digits to ensure we have cases where significant
+ // digits (above Decimal::MAX_DIGITS) occurs.
+ assert_float_result_bits_eq!(
+ 0x7ffffe,
+ f32,
+ "1.175494140627517859246175898662808184331245864732796240031385942718174675986064769972472277004271745681762695312500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e-38"
+ );
+ assert_float_result_bits_eq!(
+ 0x7ffffe,
+ f32,
+ "1.175494140627517859246175898662808184331245864732796240031385942718174675986064769972472277004271745681762695312500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e-38"
+ );
+}
diff --git a/library/core/tests/num/flt2dec/estimator.rs b/library/core/tests/num/flt2dec/estimator.rs
new file mode 100644
index 000000000..da203b5f3
--- /dev/null
+++ b/library/core/tests/num/flt2dec/estimator.rs
@@ -0,0 +1,62 @@
+use core::num::flt2dec::estimator::*;
+
+#[test]
+fn test_estimate_scaling_factor() {
+ macro_rules! assert_almost_eq {
+ ($actual:expr, $expected:expr) => {{
+ let actual = $actual;
+ let expected = $expected;
+ println!(
+ "{} - {} = {} - {} = {}",
+ stringify!($expected),
+ stringify!($actual),
+ expected,
+ actual,
+ expected - actual
+ );
+ assert!(
+ expected == actual || expected == actual + 1,
+ "expected {}, actual {}",
+ expected,
+ actual
+ );
+ }};
+ }
+
+ assert_almost_eq!(estimate_scaling_factor(1, 0), 0);
+ assert_almost_eq!(estimate_scaling_factor(2, 0), 1);
+ assert_almost_eq!(estimate_scaling_factor(10, 0), 1);
+ assert_almost_eq!(estimate_scaling_factor(11, 0), 2);
+ assert_almost_eq!(estimate_scaling_factor(100, 0), 2);
+ assert_almost_eq!(estimate_scaling_factor(101, 0), 3);
+ assert_almost_eq!(estimate_scaling_factor(10000000000000000000, 0), 19);
+ assert_almost_eq!(estimate_scaling_factor(10000000000000000001, 0), 20);
+
+ // 1/2^20 = 0.00000095367...
+ assert_almost_eq!(estimate_scaling_factor(1 * 1048576 / 1000000, -20), -6);
+ assert_almost_eq!(estimate_scaling_factor(1 * 1048576 / 1000000 + 1, -20), -5);
+ assert_almost_eq!(estimate_scaling_factor(10 * 1048576 / 1000000, -20), -5);
+ assert_almost_eq!(estimate_scaling_factor(10 * 1048576 / 1000000 + 1, -20), -4);
+ assert_almost_eq!(estimate_scaling_factor(100 * 1048576 / 1000000, -20), -4);
+ assert_almost_eq!(estimate_scaling_factor(100 * 1048576 / 1000000 + 1, -20), -3);
+ assert_almost_eq!(estimate_scaling_factor(1048575, -20), 0);
+ assert_almost_eq!(estimate_scaling_factor(1048576, -20), 0);
+ assert_almost_eq!(estimate_scaling_factor(1048577, -20), 1);
+ assert_almost_eq!(estimate_scaling_factor(10485759999999999999, -20), 13);
+ assert_almost_eq!(estimate_scaling_factor(10485760000000000000, -20), 13);
+ assert_almost_eq!(estimate_scaling_factor(10485760000000000001, -20), 14);
+
+ // extreme values:
+ // 2^-1074 = 4.94065... * 10^-324
+ // (2^53-1) * 2^971 = 1.79763... * 10^308
+ assert_almost_eq!(estimate_scaling_factor(1, -1074), -323);
+ assert_almost_eq!(estimate_scaling_factor(0x1fffffffffffff, 971), 309);
+
+ // Miri is too slow
+ let step = if cfg!(miri) { 37 } else { 1 };
+
+ for i in (-1074..972).step_by(step) {
+ let expected = super::ldexp_f64(1.0, i).log10().ceil();
+ assert_almost_eq!(estimate_scaling_factor(1, i as i16), expected as i16);
+ }
+}
diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
new file mode 100644
index 000000000..798473bbd
--- /dev/null
+++ b/library/core/tests/num/flt2dec/mod.rs
@@ -0,0 +1,1172 @@
+use std::mem::MaybeUninit;
+use std::{fmt, str};
+
+use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
+use core::num::flt2dec::{round_up, Sign, MAX_SIG_DIGITS};
+use core::num::flt2dec::{
+ to_exact_exp_str, to_exact_fixed_str, to_shortest_exp_str, to_shortest_str,
+};
+use core::num::fmt::{Formatted, Part};
+
+pub use test::Bencher;
+
+mod estimator;
+mod strategy {
+ mod dragon;
+ mod grisu;
+}
+mod random;
+
+pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+ FullDecoded::Finite(decoded) => decoded,
+ full_decoded => panic!("expected finite, got {full_decoded:?} instead"),
+ }
+}
+
+macro_rules! check_shortest {
+ ($f:ident($v:expr) => $buf:expr, $exp:expr) => (
+ check_shortest!($f($v) => $buf, $exp;
+ "shortest mismatch for v={v}: actual {actual:?}, expected {expected:?}",
+ v = stringify!($v))
+ );
+
+ ($f:ident{$($k:ident: $v:expr),+} => $buf:expr, $exp:expr) => (
+ check_shortest!($f{$($k: $v),+} => $buf, $exp;
+ "shortest mismatch for {v:?}: actual {actual:?}, expected {expected:?}",
+ v = Decoded { $($k: $v),+ })
+ );
+
+ ($f:ident($v:expr) => $buf:expr, $exp:expr; $fmt:expr, $($key:ident = $val:expr),*) => ({
+ let mut buf = [MaybeUninit::new(b'_'); MAX_SIG_DIGITS];
+ let (buf, k) = $f(&decode_finite($v), &mut buf);
+ assert!((buf, k) == ($buf, $exp),
+ $fmt, actual = (str::from_utf8(buf).unwrap(), k),
+ expected = (str::from_utf8($buf).unwrap(), $exp),
+ $($key = $val),*);
+ });
+
+ ($f:ident{$($k:ident: $v:expr),+} => $buf:expr, $exp:expr;
+ $fmt:expr, $($key:ident = $val:expr),*) => ({
+ let mut buf = [MaybeUninit::new(b'_'); MAX_SIG_DIGITS];
+ let (buf, k) = $f(&Decoded { $($k: $v),+ }, &mut buf);
+ assert!((buf, k) == ($buf, $exp),
+ $fmt, actual = (str::from_utf8(buf).unwrap(), k),
+ expected = (str::from_utf8($buf).unwrap(), $exp),
+ $($key = $val),*);
+ })
+}
+
+macro_rules! try_exact {
+ ($f:ident($decoded:expr) => $buf:expr, $expected:expr, $expectedk:expr;
+ $fmt:expr, $($key:ident = $val:expr),*) => ({
+ let (buf, k) = $f($decoded, &mut $buf[..$expected.len()], i16::MIN);
+ assert!((buf, k) == ($expected, $expectedk),
+ $fmt, actual = (str::from_utf8(buf).unwrap(), k),
+ expected = (str::from_utf8($expected).unwrap(), $expectedk),
+ $($key = $val),*);
+ })
+}
+
+macro_rules! try_fixed {
+ ($f:ident($decoded:expr) => $buf:expr, $request:expr, $expected:expr, $expectedk:expr;
+ $fmt:expr, $($key:ident = $val:expr),*) => ({
+ let (buf, k) = $f($decoded, &mut $buf[..], $request);
+ assert!((buf, k) == ($expected, $expectedk),
+ $fmt, actual = (str::from_utf8(buf).unwrap(), k),
+ expected = (str::from_utf8($expected).unwrap(), $expectedk),
+ $($key = $val),*);
+ })
+}
+
+fn ldexp_f32(a: f32, b: i32) -> f32 {
+ ldexp_f64(a as f64, b) as f32
+}
+
+fn ldexp_f64(a: f64, b: i32) -> f64 {
+ extern "C" {
+ fn ldexp(x: f64, n: i32) -> f64;
+ }
+ // SAFETY: assuming a correct `ldexp` has been supplied, the given arguments cannot possibly
+ // cause undefined behavior
+ unsafe { ldexp(a, b) }
+}
+
+fn check_exact<F, T>(mut f: F, v: T, vstr: &str, expected: &[u8], expectedk: i16)
+where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ // use a large enough buffer
+ let mut buf = [MaybeUninit::new(b'_'); 1024];
+ let mut expected_ = [b'_'; 1024];
+
+ let decoded = decode_finite(v);
+ let cut = expected.iter().position(|&c| c == b' ');
+
+ // check significant digits
+ for i in 1..cut.unwrap_or(expected.len() - 1) {
+ expected_[..i].copy_from_slice(&expected[..i]);
+ let mut expectedk_ = expectedk;
+ if expected[i] >= b'5' {
+ // check if this is a rounding-to-even case.
+ // we avoid rounding ...x5000... (with infinite zeroes) to ...(x+1) when x is even.
+ if !(i + 1 < expected.len()
+ && expected[i - 1] & 1 == 0
+ && expected[i] == b'5'
+ && expected[i + 1] == b' ')
+ {
+ // if this returns true, expected_[..i] is all `9`s and being rounded up.
+ // we should always return `100..00` (`i` digits) instead, since that's
+ // what we can came up with `i` digits anyway. `round_up` assumes that
+ // the adjustment to the length is done by caller, which we simply ignore.
+ if let Some(_) = round_up(&mut expected_[..i]) {
+ expectedk_ += 1;
+ }
+ }
+ }
+
+ try_exact!(f(&decoded) => &mut buf, &expected_[..i], expectedk_;
+ "exact sigdigit mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = i);
+ try_fixed!(f(&decoded) => &mut buf, expectedk_ - i as i16, &expected_[..i], expectedk_;
+ "fixed sigdigit mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = i);
+ }
+
+ // check exact rounding for zero- and negative-width cases
+ let start;
+ if expected[0] >= b'5' {
+ try_fixed!(f(&decoded) => &mut buf, expectedk, b"1", expectedk + 1;
+ "zero-width rounding-up mismatch for v={v}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr);
+ start = 1;
+ } else {
+ start = 0;
+ }
+ for i in start..-10 {
+ try_fixed!(f(&decoded) => &mut buf, expectedk - i, b"", expectedk;
+ "rounding-down mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = -i);
+ }
+
+ // check infinite zero digits
+ if let Some(cut) = cut {
+ for i in cut..expected.len() - 1 {
+ expected_[..cut].copy_from_slice(&expected[..cut]);
+ for c in &mut expected_[cut..i] {
+ *c = b'0';
+ }
+
+ try_exact!(f(&decoded) => &mut buf, &expected_[..i], expectedk;
+ "exact infzero mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = i);
+ try_fixed!(f(&decoded) => &mut buf, expectedk - i as i16, &expected_[..i], expectedk;
+ "fixed infzero mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = i);
+ }
+ }
+}
+
+trait TestableFloat: DecodableFloat + fmt::Display {
+ /// Returns `x * 2^exp`. Almost same to `std::{f32,f64}::ldexp`.
+ /// This is used for testing.
+ fn ldexpi(f: i64, exp: isize) -> Self;
+}
+
+impl TestableFloat for f32 {
+ fn ldexpi(f: i64, exp: isize) -> Self {
+ f as Self * (exp as Self).exp2()
+ }
+}
+
+impl TestableFloat for f64 {
+ fn ldexpi(f: i64, exp: isize) -> Self {
+ f as Self * (exp as Self).exp2()
+ }
+}
+
+fn check_exact_one<F, T>(mut f: F, x: i64, e: isize, tstr: &str, expected: &[u8], expectedk: i16)
+where
+ T: TestableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ // use a large enough buffer
+ let mut buf = [MaybeUninit::new(b'_'); 1024];
+ let v: T = TestableFloat::ldexpi(x, e);
+ let decoded = decode_finite(v);
+
+ try_exact!(f(&decoded) => &mut buf, &expected, expectedk;
+ "exact mismatch for v={x}p{e}{t}: actual {actual:?}, expected {expected:?}",
+ x = x, e = e, t = tstr);
+ try_fixed!(f(&decoded) => &mut buf, expectedk - expected.len() as i16, &expected, expectedk;
+ "fixed mismatch for v={x}p{e}{t}: actual {actual:?}, expected {expected:?}",
+ x = x, e = e, t = tstr);
+}
+
+macro_rules! check_exact {
+ ($f:ident($v:expr) => $buf:expr, $exp:expr) => {
+ check_exact(|d, b, k| $f(d, b, k), $v, stringify!($v), $buf, $exp)
+ };
+}
+
+macro_rules! check_exact_one {
+ ($f:ident($x:expr, $e:expr; $t:ty) => $buf:expr, $exp:expr) => {
+ check_exact_one::<_, $t>(|d, b, k| $f(d, b, k), $x, $e, stringify!($t), $buf, $exp)
+ };
+}
+
+// in the following comments, three numbers are spaced by 1 ulp apart,
+// and the second one is being formatted.
+//
+// some tests are derived from [1].
+//
+// [1] Vern Paxson, A Program for Testing IEEE Decimal-Binary Conversion
+// ftp://ftp.ee.lbl.gov/testbase-report.ps.Z
+
+pub fn f32_shortest_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ // 0.0999999940395355224609375
+ // 0.100000001490116119384765625
+ // 0.10000000894069671630859375
+ check_shortest!(f(0.1f32) => b"1", 0);
+
+ // 0.333333313465118408203125
+ // 0.3333333432674407958984375 (1/3 in the default rounding)
+ // 0.33333337306976318359375
+ check_shortest!(f(1.0f32/3.0) => b"33333334", 0);
+
+ // 10^1 * 0.31415917873382568359375
+ // 10^1 * 0.31415920257568359375
+ // 10^1 * 0.31415922641754150390625
+ check_shortest!(f(3.141592f32) => b"3141592", 1);
+
+ // 10^18 * 0.31415916243714048
+ // 10^18 * 0.314159196796878848
+ // 10^18 * 0.314159231156617216
+ check_shortest!(f(3.141592e17f32) => b"3141592", 18);
+
+ // regression test for decoders
+ // 10^8 * 0.3355443
+ // 10^8 * 0.33554432
+ // 10^8 * 0.33554436
+ check_shortest!(f(ldexp_f32(1.0, 25)) => b"33554432", 8);
+
+ // 10^39 * 0.340282326356119256160033759537265639424
+ // 10^39 * 0.34028234663852885981170418348451692544
+ // 10^39 * 0.340282366920938463463374607431768211456
+ check_shortest!(f(f32::MAX) => b"34028235", 39);
+
+ // 10^-37 * 0.1175494210692441075487029444849287348827...
+ // 10^-37 * 0.1175494350822287507968736537222245677818...
+ // 10^-37 * 0.1175494490952133940450443629595204006810...
+ check_shortest!(f(f32::MIN_POSITIVE) => b"11754944", -37);
+
+ // 10^-44 * 0
+ // 10^-44 * 0.1401298464324817070923729583289916131280...
+ // 10^-44 * 0.2802596928649634141847459166579832262560...
+ let minf32 = ldexp_f32(1.0, -149);
+ check_shortest!(f(minf32) => b"1", -44);
+}
+
+pub fn f32_exact_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ let minf32 = ldexp_f32(1.0, -149);
+
+ check_exact!(f(0.1f32) => b"100000001490116119384765625 ", 0);
+ check_exact!(f(0.5f32) => b"5 ", 0);
+ check_exact!(f(1.0f32/3.0) => b"3333333432674407958984375 ", 0);
+ check_exact!(f(3.141592f32) => b"31415920257568359375 ", 1);
+ check_exact!(f(3.141592e17f32) => b"314159196796878848 ", 18);
+ check_exact!(f(f32::MAX) => b"34028234663852885981170418348451692544 ", 39);
+ check_exact!(f(f32::MIN_POSITIVE) => b"1175494350822287507968736537222245677818", -37);
+ check_exact!(f(minf32) => b"1401298464324817070923729583289916131280", -44);
+
+ // [1], Table 16: Stress Inputs for Converting 24-bit Binary to Decimal, < 1/2 ULP
+ check_exact_one!(f(12676506, -102; f32) => b"2", -23);
+ check_exact_one!(f(12676506, -103; f32) => b"12", -23);
+ check_exact_one!(f(15445013, 86; f32) => b"119", 34);
+ check_exact_one!(f(13734123, -138; f32) => b"3941", -34);
+ check_exact_one!(f(12428269, -130; f32) => b"91308", -32);
+ check_exact_one!(f(15334037, -146; f32) => b"171900", -36);
+ check_exact_one!(f(11518287, -41; f32) => b"5237910", -5);
+ check_exact_one!(f(12584953, -145; f32) => b"28216440", -36);
+ check_exact_one!(f(15961084, -125; f32) => b"375243281", -30);
+ check_exact_one!(f(14915817, -146; f32) => b"1672120916", -36);
+ check_exact_one!(f(10845484, -102; f32) => b"21388945814", -23);
+ check_exact_one!(f(16431059, -61; f32) => b"712583594561", -11);
+
+ // [1], Table 17: Stress Inputs for Converting 24-bit Binary to Decimal, > 1/2 ULP
+ check_exact_one!(f(16093626, 69; f32) => b"1", 29);
+ check_exact_one!(f( 9983778, 25; f32) => b"34", 15);
+ check_exact_one!(f(12745034, 104; f32) => b"259", 39);
+ check_exact_one!(f(12706553, 72; f32) => b"6001", 29);
+ check_exact_one!(f(11005028, 45; f32) => b"38721", 21);
+ check_exact_one!(f(15059547, 71; f32) => b"355584", 29);
+ check_exact_one!(f(16015691, -99; f32) => b"2526831", -22);
+ check_exact_one!(f( 8667859, 56; f32) => b"62458507", 24);
+ check_exact_one!(f(14855922, -82; f32) => b"307213267", -17);
+ check_exact_one!(f(14855922, -83; f32) => b"1536066333", -17);
+ check_exact_one!(f(10144164, -110; f32) => b"78147796834", -26);
+ check_exact_one!(f(13248074, 95; f32) => b"524810279937", 36);
+}
+
+pub fn f64_shortest_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ // 0.0999999999999999777955395074968691915273...
+ // 0.1000000000000000055511151231257827021181...
+ // 0.1000000000000000333066907387546962127089...
+ check_shortest!(f(0.1f64) => b"1", 0);
+
+ // this example is explicitly mentioned in the paper.
+ // 10^3 * 0.0999999999999999857891452847979962825775...
+ // 10^3 * 0.1 (exact)
+ // 10^3 * 0.1000000000000000142108547152020037174224...
+ check_shortest!(f(100.0f64) => b"1", 3);
+
+ // 0.3333333333333332593184650249895639717578...
+ // 0.3333333333333333148296162562473909929394... (1/3 in the default rounding)
+ // 0.3333333333333333703407674875052180141210...
+ check_shortest!(f(1.0f64/3.0) => b"3333333333333333", 0);
+
+ // explicit test case for equally closest representations.
+ // Dragon has its own tie-breaking rule; Grisu should fall back.
+ // 10^1 * 0.1000007629394531027955395074968691915273...
+ // 10^1 * 0.100000762939453125 (exact)
+ // 10^1 * 0.1000007629394531472044604925031308084726...
+ check_shortest!(f(1.00000762939453125f64) => b"10000076293945313", 1);
+
+ // 10^1 * 0.3141591999999999718085064159822650253772...
+ // 10^1 * 0.3141592000000000162174274009885266423225...
+ // 10^1 * 0.3141592000000000606263483859947882592678...
+ check_shortest!(f(3.141592f64) => b"3141592", 1);
+
+ // 10^18 * 0.314159199999999936
+ // 10^18 * 0.3141592 (exact)
+ // 10^18 * 0.314159200000000064
+ check_shortest!(f(3.141592e17f64) => b"3141592", 18);
+
+ // regression test for decoders
+ // 10^20 * 0.18446744073709549568
+ // 10^20 * 0.18446744073709551616
+ // 10^20 * 0.18446744073709555712
+ check_shortest!(f(ldexp_f64(1.0, 64)) => b"18446744073709552", 20);
+
+ // pathological case: high = 10^23 (exact). tie breaking should always prefer that.
+ // 10^24 * 0.099999999999999974834176
+ // 10^24 * 0.099999999999999991611392
+ // 10^24 * 0.100000000000000008388608
+ check_shortest!(f(1.0e23f64) => b"1", 24);
+
+ // 10^309 * 0.1797693134862315508561243283845062402343...
+ // 10^309 * 0.1797693134862315708145274237317043567980...
+ // 10^309 * 0.1797693134862315907729305190789024733617...
+ check_shortest!(f(f64::MAX) => b"17976931348623157", 309);
+
+ // 10^-307 * 0.2225073858507200889024586876085859887650...
+ // 10^-307 * 0.2225073858507201383090232717332404064219...
+ // 10^-307 * 0.2225073858507201877155878558578948240788...
+ check_shortest!(f(f64::MIN_POSITIVE) => b"22250738585072014", -307);
+
+ // 10^-323 * 0
+ // 10^-323 * 0.4940656458412465441765687928682213723650...
+ // 10^-323 * 0.9881312916824930883531375857364427447301...
+ let minf64 = ldexp_f64(1.0, -1074);
+ check_shortest!(f(minf64) => b"5", -323);
+}
+
+pub fn f64_exact_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ let minf64 = ldexp_f64(1.0, -1074);
+
+ check_exact!(f(0.1f64) => b"1000000000000000055511151231257827021181", 0);
+ check_exact!(f(0.45f64) => b"4500000000000000111022302462515654042363", 0);
+ check_exact!(f(0.5f64) => b"5 ", 0);
+ check_exact!(f(0.95f64) => b"9499999999999999555910790149937383830547", 0);
+ check_exact!(f(100.0f64) => b"1 ", 3);
+ check_exact!(f(999.5f64) => b"9995000000000000000000000000000000000000", 3);
+ check_exact!(f(1.0f64/3.0) => b"3333333333333333148296162562473909929394", 0);
+ check_exact!(f(3.141592f64) => b"3141592000000000162174274009885266423225", 1);
+ check_exact!(f(3.141592e17f64) => b"3141592 ", 18);
+ check_exact!(f(1.0e23f64) => b"99999999999999991611392 ", 23);
+ check_exact!(f(f64::MAX) => b"1797693134862315708145274237317043567980", 309);
+ check_exact!(f(f64::MIN_POSITIVE) => b"2225073858507201383090232717332404064219", -307);
+ check_exact!(f(minf64) => b"4940656458412465441765687928682213723650\
+ 5980261432476442558568250067550727020875\
+ 1865299836361635992379796564695445717730\
+ 9266567103559397963987747960107818781263\
+ 0071319031140452784581716784898210368871\
+ 8636056998730723050006387409153564984387\
+ 3124733972731696151400317153853980741262\
+ 3856559117102665855668676818703956031062\
+ 4931945271591492455329305456544401127480\
+ 1297099995419319894090804165633245247571\
+ 4786901472678015935523861155013480352649\
+ 3472019379026810710749170333222684475333\
+ 5720832431936092382893458368060106011506\
+ 1698097530783422773183292479049825247307\
+ 7637592724787465608477820373446969953364\
+ 7017972677717585125660551199131504891101\
+ 4510378627381672509558373897335989936648\
+ 0994116420570263709027924276754456522908\
+ 7538682506419718265533447265625 ", -323);
+
+ // [1], Table 3: Stress Inputs for Converting 53-bit Binary to Decimal, < 1/2 ULP
+ check_exact_one!(f(8511030020275656, -342; f64) => b"9", -87);
+ check_exact_one!(f(5201988407066741, -824; f64) => b"46", -232);
+ check_exact_one!(f(6406892948269899, 237; f64) => b"141", 88);
+ check_exact_one!(f(8431154198732492, 72; f64) => b"3981", 38);
+ check_exact_one!(f(6475049196144587, 99; f64) => b"41040", 46);
+ check_exact_one!(f(8274307542972842, 726; f64) => b"292084", 235);
+ check_exact_one!(f(5381065484265332, -456; f64) => b"2891946", -121);
+ check_exact_one!(f(6761728585499734, -1057; f64) => b"43787718", -302);
+ check_exact_one!(f(7976538478610756, 376; f64) => b"122770163", 130);
+ check_exact_one!(f(5982403858958067, 377; f64) => b"1841552452", 130);
+ check_exact_one!(f(5536995190630837, 93; f64) => b"54835744350", 44);
+ check_exact_one!(f(7225450889282194, 710; f64) => b"389190181146", 230);
+ check_exact_one!(f(7225450889282194, 709; f64) => b"1945950905732", 230);
+ check_exact_one!(f(8703372741147379, 117; f64) => b"14460958381605", 52);
+ check_exact_one!(f(8944262675275217, -1001; f64) => b"417367747458531", -285);
+ check_exact_one!(f(7459803696087692, -707; f64) => b"1107950772878888", -196);
+ check_exact_one!(f(6080469016670379, -381; f64) => b"12345501366327440", -98);
+ check_exact_one!(f(8385515147034757, 721; f64) => b"925031711960365024", 233);
+ check_exact_one!(f(7514216811389786, -828; f64) => b"4198047150284889840", -233);
+ check_exact_one!(f(8397297803260511, -345; f64) => b"11716315319786511046", -87);
+ check_exact_one!(f(6733459239310543, 202; f64) => b"432810072844612493629", 77);
+ check_exact_one!(f(8091450587292794, -473; f64) => b"3317710118160031081518", -126);
+
+ // [1], Table 4: Stress Inputs for Converting 53-bit Binary to Decimal, > 1/2 ULP
+ check_exact_one!(f(6567258882077402, 952; f64) => b"3", 303);
+ check_exact_one!(f(6712731423444934, 535; f64) => b"76", 177);
+ check_exact_one!(f(6712731423444934, 534; f64) => b"378", 177);
+ check_exact_one!(f(5298405411573037, -957; f64) => b"4350", -272);
+ check_exact_one!(f(5137311167659507, -144; f64) => b"23037", -27);
+ check_exact_one!(f(6722280709661868, 363; f64) => b"126301", 126);
+ check_exact_one!(f(5344436398034927, -169; f64) => b"7142211", -35);
+ check_exact_one!(f(8369123604277281, -853; f64) => b"13934574", -240);
+ check_exact_one!(f(8995822108487663, -780; f64) => b"141463449", -218);
+ check_exact_one!(f(8942832835564782, -383; f64) => b"4539277920", -99);
+ check_exact_one!(f(8942832835564782, -384; f64) => b"22696389598", -99);
+ check_exact_one!(f(8942832835564782, -385; f64) => b"113481947988", -99);
+ check_exact_one!(f(6965949469487146, -249; f64) => b"7700366561890", -59);
+ check_exact_one!(f(6965949469487146, -250; f64) => b"38501832809448", -59);
+ check_exact_one!(f(6965949469487146, -251; f64) => b"192509164047238", -59);
+ check_exact_one!(f(7487252720986826, 548; f64) => b"6898586531774201", 181);
+ check_exact_one!(f(5592117679628511, 164; f64) => b"13076622631878654", 66);
+ check_exact_one!(f(8887055249355788, 665; f64) => b"136052020756121240", 217);
+ check_exact_one!(f(6994187472632449, 690; f64) => b"3592810217475959676", 224);
+ check_exact_one!(f(8797576579012143, 588; f64) => b"89125197712484551899", 193);
+ check_exact_one!(f(7363326733505337, 272; f64) => b"558769757362301140950", 98);
+ check_exact_one!(f(8549497411294502, -448; f64) => b"1176257830728540379990", -118);
+}
+
+pub fn more_shortest_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ check_shortest!(f{mant: 99_999_999_999_999_999, minus: 1, plus: 1,
+ exp: 0, inclusive: true} => b"1", 18);
+ check_shortest!(f{mant: 99_999_999_999_999_999, minus: 1, plus: 1,
+ exp: 0, inclusive: false} => b"99999999999999999", 17);
+}
+
+fn to_string_with_parts<F>(mut f: F) -> String
+where
+ F: for<'a> FnMut(&'a mut [MaybeUninit<u8>], &'a mut [MaybeUninit<Part<'a>>]) -> Formatted<'a>,
+{
+ let mut buf = [MaybeUninit::new(0); 1024];
+ let mut parts = [MaybeUninit::new(Part::Zero(0)); 16];
+ let formatted = f(&mut buf, &mut parts);
+ let mut ret = vec![0; formatted.len()];
+ assert_eq!(formatted.write(&mut ret), Some(ret.len()));
+ String::from_utf8(ret).unwrap()
+}
+
+pub fn to_shortest_str_test<F>(mut f_: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ use core::num::flt2dec::Sign::*;
+
+ fn to_string<T, F>(f: &mut F, v: T, sign: Sign, frac_digits: usize) -> String
+ where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+ {
+ to_string_with_parts(|buf, parts| {
+ to_shortest_str(|d, b| f(d, b), v, sign, frac_digits, buf, parts)
+ })
+ }
+
+ let f = &mut f_;
+
+ assert_eq!(to_string(f, 0.0, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.0, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 0), "+0");
+ assert_eq!(to_string(f, -0.0, Minus, 0), "-0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 0), "-0");
+ assert_eq!(to_string(f, 0.0, Minus, 1), "0.0");
+ assert_eq!(to_string(f, 0.0, Minus, 1), "0.0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 1), "+0.0");
+ assert_eq!(to_string(f, -0.0, Minus, 8), "-0.00000000");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 8), "-0.00000000");
+
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 0), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 0), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlus, 0), "+inf");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 0), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 1), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlus, 64), "NaN");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 0), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 1), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlus, 64), "-inf");
+
+ assert_eq!(to_string(f, 3.14, Minus, 0), "3.14");
+ assert_eq!(to_string(f, 3.14, Minus, 0), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 0), "+3.14");
+ assert_eq!(to_string(f, -3.14, Minus, 0), "-3.14");
+ assert_eq!(to_string(f, -3.14, Minus, 0), "-3.14");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 0), "-3.14");
+ assert_eq!(to_string(f, 3.14, Minus, 1), "3.14");
+ assert_eq!(to_string(f, 3.14, Minus, 2), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 4), "+3.1400");
+ assert_eq!(to_string(f, -3.14, Minus, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, Minus, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 8), "-3.14000000");
+
+ assert_eq!(to_string(f, 7.5e-11, Minus, 0), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 3), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 12), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 13), "0.0000000000750");
+
+ assert_eq!(to_string(f, 1.9971e20, Minus, 0), "199710000000000000000");
+ assert_eq!(to_string(f, 1.9971e20, Minus, 1), "199710000000000000000.0");
+ assert_eq!(to_string(f, 1.9971e20, Minus, 8), "199710000000000000000.00000000");
+
+ assert_eq!(to_string(f, f32::MAX, Minus, 0), format!("34028235{:0>31}", ""));
+ assert_eq!(to_string(f, f32::MAX, Minus, 1), format!("34028235{:0>31}.0", ""));
+ assert_eq!(to_string(f, f32::MAX, Minus, 8), format!("34028235{:0>31}.00000000", ""));
+
+ let minf32 = ldexp_f32(1.0, -149);
+ assert_eq!(to_string(f, minf32, Minus, 0), format!("0.{:0>44}1", ""));
+ assert_eq!(to_string(f, minf32, Minus, 45), format!("0.{:0>44}1", ""));
+ assert_eq!(to_string(f, minf32, Minus, 46), format!("0.{:0>44}10", ""));
+
+ assert_eq!(to_string(f, f64::MAX, Minus, 0), format!("17976931348623157{:0>292}", ""));
+ assert_eq!(to_string(f, f64::MAX, Minus, 1), format!("17976931348623157{:0>292}.0", ""));
+ assert_eq!(to_string(f, f64::MAX, Minus, 8), format!("17976931348623157{:0>292}.00000000", ""));
+
+ let minf64 = ldexp_f64(1.0, -1074);
+ assert_eq!(to_string(f, minf64, Minus, 0), format!("0.{:0>323}5", ""));
+ assert_eq!(to_string(f, minf64, Minus, 324), format!("0.{:0>323}5", ""));
+ assert_eq!(to_string(f, minf64, Minus, 325), format!("0.{:0>323}50", ""));
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ // very large output
+ assert_eq!(to_string(f, 1.1, Minus, 80000), format!("1.1{:0>79999}", ""));
+}
+
+pub fn to_shortest_exp_str_test<F>(mut f_: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ use core::num::flt2dec::Sign::*;
+
+ fn to_string<T, F>(f: &mut F, v: T, sign: Sign, exp_bounds: (i16, i16), upper: bool) -> String
+ where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+ {
+ to_string_with_parts(|buf, parts| {
+ to_shortest_exp_str(|d, b| f(d, b), v, sign, exp_bounds, upper, buf, parts)
+ })
+ }
+
+ let f = &mut f_;
+
+ assert_eq!(to_string(f, 0.0, Minus, (-4, 16), false), "0");
+ assert_eq!(to_string(f, 0.0, Minus, (-4, 16), false), "0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, (-4, 16), false), "+0");
+ assert_eq!(to_string(f, -0.0, Minus, (-4, 16), false), "-0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, (-4, 16), false), "-0");
+ assert_eq!(to_string(f, 0.0, Minus, (0, 0), true), "0E0");
+ assert_eq!(to_string(f, 0.0, Minus, (0, 0), false), "0e0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, (5, 9), false), "+0e0");
+ assert_eq!(to_string(f, -0.0, Minus, (0, 0), true), "-0E0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, (5, 9), false), "-0e0");
+
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, (-4, 16), false), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, (-4, 16), true), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlus, (-4, 16), true), "+inf");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, (0, 0), false), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, (0, 0), true), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlus, (5, 9), true), "NaN");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, (0, 0), false), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, (0, 0), true), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlus, (5, 9), true), "-inf");
+
+ assert_eq!(to_string(f, 3.14, Minus, (-4, 16), false), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlus, (-4, 16), false), "+3.14");
+ assert_eq!(to_string(f, -3.14, Minus, (-4, 16), false), "-3.14");
+ assert_eq!(to_string(f, -3.14, MinusPlus, (-4, 16), false), "-3.14");
+ assert_eq!(to_string(f, 3.14, Minus, (0, 0), true), "3.14E0");
+ assert_eq!(to_string(f, 3.14, Minus, (0, 0), false), "3.14e0");
+ assert_eq!(to_string(f, 3.14, MinusPlus, (5, 9), false), "+3.14e0");
+ assert_eq!(to_string(f, -3.14, Minus, (0, 0), true), "-3.14E0");
+ assert_eq!(to_string(f, -3.14, Minus, (0, 0), false), "-3.14e0");
+ assert_eq!(to_string(f, -3.14, MinusPlus, (5, 9), false), "-3.14e0");
+
+ assert_eq!(to_string(f, 0.1, Minus, (-4, 16), false), "0.1");
+ assert_eq!(to_string(f, 0.1, Minus, (-4, 16), false), "0.1");
+ assert_eq!(to_string(f, 0.1, MinusPlus, (-4, 16), false), "+0.1");
+ assert_eq!(to_string(f, -0.1, Minus, (-4, 16), false), "-0.1");
+ assert_eq!(to_string(f, -0.1, MinusPlus, (-4, 16), false), "-0.1");
+ assert_eq!(to_string(f, 0.1, Minus, (0, 0), true), "1E-1");
+ assert_eq!(to_string(f, 0.1, Minus, (0, 0), false), "1e-1");
+ assert_eq!(to_string(f, 0.1, MinusPlus, (5, 9), false), "+1e-1");
+ assert_eq!(to_string(f, -0.1, Minus, (0, 0), true), "-1E-1");
+ assert_eq!(to_string(f, -0.1, Minus, (0, 0), false), "-1e-1");
+ assert_eq!(to_string(f, -0.1, MinusPlus, (5, 9), false), "-1e-1");
+
+ assert_eq!(to_string(f, 7.5e-11, Minus, (-4, 16), false), "7.5e-11");
+ assert_eq!(to_string(f, 7.5e-11, Minus, (-11, 10), false), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, (-10, 11), false), "7.5e-11");
+
+ assert_eq!(to_string(f, 1.9971e20, Minus, (-4, 16), false), "1.9971e20");
+ assert_eq!(to_string(f, 1.9971e20, Minus, (-20, 21), false), "199710000000000000000");
+ assert_eq!(to_string(f, 1.9971e20, Minus, (-21, 20), false), "1.9971e20");
+
+ // the true value of 1.0e23f64 is less than 10^23, but that shouldn't matter here
+ assert_eq!(to_string(f, 1.0e23, Minus, (22, 23), false), "1e23");
+ assert_eq!(to_string(f, 1.0e23, Minus, (23, 24), false), "100000000000000000000000");
+ assert_eq!(to_string(f, 1.0e23, Minus, (24, 25), false), "1e23");
+
+ assert_eq!(to_string(f, f32::MAX, Minus, (-4, 16), false), "3.4028235e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, (-39, 38), false), "3.4028235e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, (-38, 39), false), format!("34028235{:0>31}", ""));
+
+ let minf32 = ldexp_f32(1.0, -149);
+ assert_eq!(to_string(f, minf32, Minus, (-4, 16), false), "1e-45");
+ assert_eq!(to_string(f, minf32, Minus, (-44, 45), false), "1e-45");
+ assert_eq!(to_string(f, minf32, Minus, (-45, 44), false), format!("0.{:0>44}1", ""));
+
+ assert_eq!(to_string(f, f64::MAX, Minus, (-4, 16), false), "1.7976931348623157e308");
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, (-308, 309), false),
+ format!("17976931348623157{:0>292}", "")
+ );
+ assert_eq!(to_string(f, f64::MAX, Minus, (-309, 308), false), "1.7976931348623157e308");
+
+ let minf64 = ldexp_f64(1.0, -1074);
+ assert_eq!(to_string(f, minf64, Minus, (-4, 16), false), "5e-324");
+ assert_eq!(to_string(f, minf64, Minus, (-324, 323), false), format!("0.{:0>323}5", ""));
+ assert_eq!(to_string(f, minf64, Minus, (-323, 324), false), "5e-324");
+
+ assert_eq!(to_string(f, 1.1, Minus, (i16::MIN, i16::MAX), false), "1.1");
+}
+
+pub fn to_exact_exp_str_test<F>(mut f_: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ use core::num::flt2dec::Sign::*;
+
+ fn to_string<T, F>(f: &mut F, v: T, sign: Sign, ndigits: usize, upper: bool) -> String
+ where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+ {
+ to_string_with_parts(|buf, parts| {
+ to_exact_exp_str(|d, b, l| f(d, b, l), v, sign, ndigits, upper, buf, parts)
+ })
+ }
+
+ let f = &mut f_;
+
+ assert_eq!(to_string(f, 0.0, Minus, 1, true), "0E0");
+ assert_eq!(to_string(f, 0.0, Minus, 1, false), "0e0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 1, false), "+0e0");
+ assert_eq!(to_string(f, -0.0, Minus, 1, true), "-0E0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 1, false), "-0e0");
+ assert_eq!(to_string(f, 0.0, Minus, 2, true), "0.0E0");
+ assert_eq!(to_string(f, 0.0, Minus, 2, false), "0.0e0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 2, false), "+0.0e0");
+ assert_eq!(to_string(f, -0.0, Minus, 8, false), "-0.0000000e0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 8, false), "-0.0000000e0");
+
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 1, false), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 1, true), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlus, 1, true), "+inf");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 8, false), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 8, true), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlus, 8, true), "NaN");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 64, false), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 64, true), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlus, 64, true), "-inf");
+
+ assert_eq!(to_string(f, 3.14, Minus, 1, true), "3E0");
+ assert_eq!(to_string(f, 3.14, Minus, 1, false), "3e0");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 1, false), "+3e0");
+ assert_eq!(to_string(f, -3.14, Minus, 2, true), "-3.1E0");
+ assert_eq!(to_string(f, -3.14, Minus, 2, false), "-3.1e0");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 2, false), "-3.1e0");
+ assert_eq!(to_string(f, 3.14, Minus, 3, true), "3.14E0");
+ assert_eq!(to_string(f, 3.14, Minus, 3, false), "3.14e0");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 3, false), "+3.14e0");
+ assert_eq!(to_string(f, -3.14, Minus, 4, true), "-3.140E0");
+ assert_eq!(to_string(f, -3.14, Minus, 4, false), "-3.140e0");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 4, false), "-3.140e0");
+
+ assert_eq!(to_string(f, 0.195, Minus, 1, false), "2e-1");
+ assert_eq!(to_string(f, 0.195, Minus, 1, true), "2E-1");
+ assert_eq!(to_string(f, 0.195, MinusPlus, 1, true), "+2E-1");
+ assert_eq!(to_string(f, -0.195, Minus, 2, false), "-2.0e-1");
+ assert_eq!(to_string(f, -0.195, Minus, 2, true), "-2.0E-1");
+ assert_eq!(to_string(f, -0.195, MinusPlus, 2, true), "-2.0E-1");
+ assert_eq!(to_string(f, 0.195, Minus, 3, false), "1.95e-1");
+ assert_eq!(to_string(f, 0.195, Minus, 3, true), "1.95E-1");
+ assert_eq!(to_string(f, 0.195, MinusPlus, 3, true), "+1.95E-1");
+ assert_eq!(to_string(f, -0.195, Minus, 4, false), "-1.950e-1");
+ assert_eq!(to_string(f, -0.195, Minus, 4, true), "-1.950E-1");
+ assert_eq!(to_string(f, -0.195, MinusPlus, 4, true), "-1.950E-1");
+
+ assert_eq!(to_string(f, 9.5, Minus, 1, false), "1e1");
+ assert_eq!(to_string(f, 9.5, Minus, 2, false), "9.5e0");
+ assert_eq!(to_string(f, 9.5, Minus, 3, false), "9.50e0");
+ assert_eq!(to_string(f, 9.5, Minus, 30, false), "9.50000000000000000000000000000e0");
+
+ assert_eq!(to_string(f, 1.0e25, Minus, 1, false), "1e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 2, false), "1.0e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 15, false), "1.00000000000000e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 16, false), "1.000000000000000e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 17, false), "1.0000000000000001e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 18, false), "1.00000000000000009e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 19, false), "1.000000000000000091e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 20, false), "1.0000000000000000906e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 21, false), "1.00000000000000009060e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 22, false), "1.000000000000000090597e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 23, false), "1.0000000000000000905970e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 24, false), "1.00000000000000009059697e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 25, false), "1.000000000000000090596966e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 26, false), "1.0000000000000000905969664e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 27, false), "1.00000000000000009059696640e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 30, false), "1.00000000000000009059696640000e25");
+
+ assert_eq!(to_string(f, 1.0e-6, Minus, 1, false), "1e-6");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 2, false), "1.0e-6");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 16, false), "1.000000000000000e-6");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 17, false), "9.9999999999999995e-7");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 18, false), "9.99999999999999955e-7");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 19, false), "9.999999999999999547e-7");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 20, false), "9.9999999999999995475e-7");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 30, false), "9.99999999999999954748111825886e-7");
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 40, false),
+ "9.999999999999999547481118258862586856139e-7"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 50, false),
+ "9.9999999999999995474811182588625868561393872369081e-7"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 60, false),
+ "9.99999999999999954748111825886258685613938723690807819366455e-7"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 70, false),
+ "9.999999999999999547481118258862586856139387236908078193664550781250000e-7"
+ );
+
+ assert_eq!(to_string(f, f32::MAX, Minus, 1, false), "3e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 2, false), "3.4e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 4, false), "3.403e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 8, false), "3.4028235e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 16, false), "3.402823466385289e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 32, false), "3.4028234663852885981170418348452e38");
+ assert_eq!(
+ to_string(f, f32::MAX, Minus, 64, false),
+ "3.402823466385288598117041834845169254400000000000000000000000000e38"
+ );
+
+ let minf32 = ldexp_f32(1.0, -149);
+ assert_eq!(to_string(f, minf32, Minus, 1, false), "1e-45");
+ assert_eq!(to_string(f, minf32, Minus, 2, false), "1.4e-45");
+ assert_eq!(to_string(f, minf32, Minus, 4, false), "1.401e-45");
+ assert_eq!(to_string(f, minf32, Minus, 8, false), "1.4012985e-45");
+ assert_eq!(to_string(f, minf32, Minus, 16, false), "1.401298464324817e-45");
+ assert_eq!(to_string(f, minf32, Minus, 32, false), "1.4012984643248170709237295832899e-45");
+ assert_eq!(
+ to_string(f, minf32, Minus, 64, false),
+ "1.401298464324817070923729583289916131280261941876515771757068284e-45"
+ );
+ assert_eq!(
+ to_string(f, minf32, Minus, 128, false),
+ "1.401298464324817070923729583289916131280261941876515771757068283\
+ 8897910826858606014866381883621215820312500000000000000000000000e-45"
+ );
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ assert_eq!(to_string(f, f64::MAX, Minus, 1, false), "2e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 2, false), "1.8e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 4, false), "1.798e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 8, false), "1.7976931e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 16, false), "1.797693134862316e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 32, false), "1.7976931348623157081452742373170e308");
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 64, false),
+ "1.797693134862315708145274237317043567980705675258449965989174768e308"
+ );
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 128, false),
+ "1.797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432133e308"
+ );
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 256, false),
+ "1.797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432132\
+ 6889464182768467546703537516986049910576551282076245490090389328\
+ 9440758685084551339423045832369032229481658085593321233482747978e308"
+ );
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 512, false),
+ "1.797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432132\
+ 6889464182768467546703537516986049910576551282076245490090389328\
+ 9440758685084551339423045832369032229481658085593321233482747978\
+ 2620414472316873817718091929988125040402618412485836800000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000e308"
+ );
+
+ // okay, this is becoming tough. fortunately for us, this is almost the worst case.
+ let minf64 = ldexp_f64(1.0, -1074);
+ assert_eq!(to_string(f, minf64, Minus, 1, false), "5e-324");
+ assert_eq!(to_string(f, minf64, Minus, 2, false), "4.9e-324");
+ assert_eq!(to_string(f, minf64, Minus, 4, false), "4.941e-324");
+ assert_eq!(to_string(f, minf64, Minus, 8, false), "4.9406565e-324");
+ assert_eq!(to_string(f, minf64, Minus, 16, false), "4.940656458412465e-324");
+ assert_eq!(to_string(f, minf64, Minus, 32, false), "4.9406564584124654417656879286822e-324");
+ assert_eq!(
+ to_string(f, minf64, Minus, 64, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825e-324"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 128, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825\
+ 0067550727020875186529983636163599237979656469544571773092665671e-324"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 256, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825\
+ 0067550727020875186529983636163599237979656469544571773092665671\
+ 0355939796398774796010781878126300713190311404527845817167848982\
+ 1036887186360569987307230500063874091535649843873124733972731696e-324"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 512, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825\
+ 0067550727020875186529983636163599237979656469544571773092665671\
+ 0355939796398774796010781878126300713190311404527845817167848982\
+ 1036887186360569987307230500063874091535649843873124733972731696\
+ 1514003171538539807412623856559117102665855668676818703956031062\
+ 4931945271591492455329305456544401127480129709999541931989409080\
+ 4165633245247571478690147267801593552386115501348035264934720193\
+ 7902681071074917033322268447533357208324319360923828934583680601e-324"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 1024, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825\
+ 0067550727020875186529983636163599237979656469544571773092665671\
+ 0355939796398774796010781878126300713190311404527845817167848982\
+ 1036887186360569987307230500063874091535649843873124733972731696\
+ 1514003171538539807412623856559117102665855668676818703956031062\
+ 4931945271591492455329305456544401127480129709999541931989409080\
+ 4165633245247571478690147267801593552386115501348035264934720193\
+ 7902681071074917033322268447533357208324319360923828934583680601\
+ 0601150616980975307834227731832924790498252473077637592724787465\
+ 6084778203734469699533647017972677717585125660551199131504891101\
+ 4510378627381672509558373897335989936648099411642057026370902792\
+ 4276754456522908753868250641971826553344726562500000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000e-324"
+ );
+
+ // very large output
+ assert_eq!(to_string(f, 0.0, Minus, 80000, false), format!("0.{:0>79999}e0", ""));
+ assert_eq!(to_string(f, 1.0e1, Minus, 80000, false), format!("1.{:0>79999}e1", ""));
+ assert_eq!(to_string(f, 1.0e0, Minus, 80000, false), format!("1.{:0>79999}e0", ""));
+ assert_eq!(
+ to_string(f, 1.0e-1, Minus, 80000, false),
+ format!(
+ "1.000000000000000055511151231257827021181583404541015625{:0>79945}\
+ e-1",
+ ""
+ )
+ );
+ assert_eq!(
+ to_string(f, 1.0e-20, Minus, 80000, false),
+ format!(
+ "9.999999999999999451532714542095716517295037027873924471077157760\
+ 66783064379706047475337982177734375{:0>79901}e-21",
+ ""
+ )
+ );
+}
+
+pub fn to_exact_fixed_str_test<F>(mut f_: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ use core::num::flt2dec::Sign::*;
+
+ fn to_string<T, F>(f: &mut F, v: T, sign: Sign, frac_digits: usize) -> String
+ where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+ {
+ to_string_with_parts(|buf, parts| {
+ to_exact_fixed_str(|d, b, l| f(d, b, l), v, sign, frac_digits, buf, parts)
+ })
+ }
+
+ let f = &mut f_;
+
+ assert_eq!(to_string(f, 0.0, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 0), "+0");
+ assert_eq!(to_string(f, -0.0, Minus, 0), "-0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 0), "-0");
+ assert_eq!(to_string(f, 0.0, Minus, 1), "0.0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 1), "+0.0");
+ assert_eq!(to_string(f, -0.0, Minus, 8), "-0.00000000");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 8), "-0.00000000");
+
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 0), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 1), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlus, 64), "+inf");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 0), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 1), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlus, 64), "NaN");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 0), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 1), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlus, 64), "-inf");
+
+ assert_eq!(to_string(f, 3.14, Minus, 0), "3");
+ assert_eq!(to_string(f, 3.14, Minus, 0), "3");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 0), "+3");
+ assert_eq!(to_string(f, -3.14, Minus, 0), "-3");
+ assert_eq!(to_string(f, -3.14, Minus, 0), "-3");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 0), "-3");
+ assert_eq!(to_string(f, 3.14, Minus, 1), "3.1");
+ assert_eq!(to_string(f, 3.14, Minus, 2), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 4), "+3.1400");
+ assert_eq!(to_string(f, -3.14, Minus, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, Minus, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 8), "-3.14000000");
+
+ assert_eq!(to_string(f, 0.195, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.195, MinusPlus, 0), "+0");
+ assert_eq!(to_string(f, -0.195, Minus, 0), "-0");
+ assert_eq!(to_string(f, -0.195, Minus, 0), "-0");
+ assert_eq!(to_string(f, -0.195, MinusPlus, 0), "-0");
+ assert_eq!(to_string(f, 0.195, Minus, 1), "0.2");
+ assert_eq!(to_string(f, 0.195, Minus, 2), "0.20");
+ assert_eq!(to_string(f, 0.195, MinusPlus, 4), "+0.1950");
+ assert_eq!(to_string(f, -0.195, Minus, 5), "-0.19500");
+ assert_eq!(to_string(f, -0.195, Minus, 6), "-0.195000");
+ assert_eq!(to_string(f, -0.195, MinusPlus, 8), "-0.19500000");
+
+ assert_eq!(to_string(f, 999.5, Minus, 0), "1000");
+ assert_eq!(to_string(f, 999.5, Minus, 1), "999.5");
+ assert_eq!(to_string(f, 999.5, Minus, 2), "999.50");
+ assert_eq!(to_string(f, 999.5, Minus, 3), "999.500");
+ assert_eq!(to_string(f, 999.5, Minus, 30), "999.500000000000000000000000000000");
+
+ assert_eq!(to_string(f, 0.5, Minus, 0), "1");
+ assert_eq!(to_string(f, 0.5, Minus, 1), "0.5");
+ assert_eq!(to_string(f, 0.5, Minus, 2), "0.50");
+ assert_eq!(to_string(f, 0.5, Minus, 3), "0.500");
+
+ assert_eq!(to_string(f, 0.95, Minus, 0), "1");
+ assert_eq!(to_string(f, 0.95, Minus, 1), "0.9"); // because it really is less than 0.95
+ assert_eq!(to_string(f, 0.95, Minus, 2), "0.95");
+ assert_eq!(to_string(f, 0.95, Minus, 3), "0.950");
+ assert_eq!(to_string(f, 0.95, Minus, 10), "0.9500000000");
+ assert_eq!(to_string(f, 0.95, Minus, 30), "0.949999999999999955591079014994");
+
+ assert_eq!(to_string(f, 0.095, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.095, Minus, 1), "0.1");
+ assert_eq!(to_string(f, 0.095, Minus, 2), "0.10");
+ assert_eq!(to_string(f, 0.095, Minus, 3), "0.095");
+ assert_eq!(to_string(f, 0.095, Minus, 4), "0.0950");
+ assert_eq!(to_string(f, 0.095, Minus, 10), "0.0950000000");
+ assert_eq!(to_string(f, 0.095, Minus, 30), "0.095000000000000001110223024625");
+
+ assert_eq!(to_string(f, 0.0095, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.0095, Minus, 1), "0.0");
+ assert_eq!(to_string(f, 0.0095, Minus, 2), "0.01");
+ assert_eq!(to_string(f, 0.0095, Minus, 3), "0.009"); // really is less than 0.0095
+ assert_eq!(to_string(f, 0.0095, Minus, 4), "0.0095");
+ assert_eq!(to_string(f, 0.0095, Minus, 5), "0.00950");
+ assert_eq!(to_string(f, 0.0095, Minus, 10), "0.0095000000");
+ assert_eq!(to_string(f, 0.0095, Minus, 30), "0.009499999999999999764077607267");
+
+ assert_eq!(to_string(f, 7.5e-11, Minus, 0), "0");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 3), "0.000");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 10), "0.0000000001");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 11), "0.00000000007"); // ditto
+ assert_eq!(to_string(f, 7.5e-11, Minus, 12), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 13), "0.0000000000750");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 20), "0.00000000007500000000");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 30), "0.000000000074999999999999999501");
+
+ assert_eq!(to_string(f, 1.0e25, Minus, 0), "10000000000000000905969664");
+ assert_eq!(to_string(f, 1.0e25, Minus, 1), "10000000000000000905969664.0");
+ assert_eq!(to_string(f, 1.0e25, Minus, 3), "10000000000000000905969664.000");
+
+ assert_eq!(to_string(f, 1.0e-6, Minus, 0), "0");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 3), "0.000");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 6), "0.000001");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 9), "0.000001000");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 12), "0.000001000000");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 22), "0.0000010000000000000000");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 23), "0.00000099999999999999995");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 24), "0.000000999999999999999955");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 25), "0.0000009999999999999999547");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 35), "0.00000099999999999999995474811182589");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 45), "0.000000999999999999999954748111825886258685614");
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 55),
+ "0.0000009999999999999999547481118258862586856139387236908"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 65),
+ "0.00000099999999999999995474811182588625868561393872369080781936646"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 75),
+ "0.000000999999999999999954748111825886258685613938723690807819366455078125000"
+ );
+
+ assert_eq!(to_string(f, f32::MAX, Minus, 0), "340282346638528859811704183484516925440");
+ assert_eq!(to_string(f, f32::MAX, Minus, 1), "340282346638528859811704183484516925440.0");
+ assert_eq!(to_string(f, f32::MAX, Minus, 2), "340282346638528859811704183484516925440.00");
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let minf32 = ldexp_f32(1.0, -149);
+ assert_eq!(to_string(f, minf32, Minus, 0), "0");
+ assert_eq!(to_string(f, minf32, Minus, 1), "0.0");
+ assert_eq!(to_string(f, minf32, Minus, 2), "0.00");
+ assert_eq!(to_string(f, minf32, Minus, 4), "0.0000");
+ assert_eq!(to_string(f, minf32, Minus, 8), "0.00000000");
+ assert_eq!(to_string(f, minf32, Minus, 16), "0.0000000000000000");
+ assert_eq!(to_string(f, minf32, Minus, 32), "0.00000000000000000000000000000000");
+ assert_eq!(
+ to_string(f, minf32, Minus, 64),
+ "0.0000000000000000000000000000000000000000000014012984643248170709"
+ );
+ assert_eq!(
+ to_string(f, minf32, Minus, 128),
+ "0.0000000000000000000000000000000000000000000014012984643248170709\
+ 2372958328991613128026194187651577175706828388979108268586060149"
+ );
+ assert_eq!(
+ to_string(f, minf32, Minus, 256),
+ "0.0000000000000000000000000000000000000000000014012984643248170709\
+ 2372958328991613128026194187651577175706828388979108268586060148\
+ 6638188362121582031250000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000"
+ );
+
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 0),
+ "1797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432132\
+ 6889464182768467546703537516986049910576551282076245490090389328\
+ 9440758685084551339423045832369032229481658085593321233482747978\
+ 26204144723168738177180919299881250404026184124858368"
+ );
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 10),
+ "1797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432132\
+ 6889464182768467546703537516986049910576551282076245490090389328\
+ 9440758685084551339423045832369032229481658085593321233482747978\
+ 26204144723168738177180919299881250404026184124858368.0000000000"
+ );
+
+ let minf64 = ldexp_f64(1.0, -1074);
+ assert_eq!(to_string(f, minf64, Minus, 0), "0");
+ assert_eq!(to_string(f, minf64, Minus, 1), "0.0");
+ assert_eq!(to_string(f, minf64, Minus, 10), "0.0000000000");
+ assert_eq!(
+ to_string(f, minf64, Minus, 100),
+ "0.0000000000000000000000000000000000000000000000000000000000000000\
+ 000000000000000000000000000000000000"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 1000),
+ "0.0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0004940656458412465441765687928682213723650598026143247644255856\
+ 8250067550727020875186529983636163599237979656469544571773092665\
+ 6710355939796398774796010781878126300713190311404527845817167848\
+ 9821036887186360569987307230500063874091535649843873124733972731\
+ 6961514003171538539807412623856559117102665855668676818703956031\
+ 0624931945271591492455329305456544401127480129709999541931989409\
+ 0804165633245247571478690147267801593552386115501348035264934720\
+ 1937902681071074917033322268447533357208324319360923828934583680\
+ 6010601150616980975307834227731832924790498252473077637592724787\
+ 4656084778203734469699533647017972677717585125660551199131504891\
+ 1014510378627381672509558373897335989937"
+ );
+
+ // very large output
+ assert_eq!(to_string(f, 0.0, Minus, 80000), format!("0.{:0>80000}", ""));
+ assert_eq!(to_string(f, 1.0e1, Minus, 80000), format!("10.{:0>80000}", ""));
+ assert_eq!(to_string(f, 1.0e0, Minus, 80000), format!("1.{:0>80000}", ""));
+ assert_eq!(
+ to_string(f, 1.0e-1, Minus, 80000),
+ format!("0.1000000000000000055511151231257827021181583404541015625{:0>79945}", "")
+ );
+ assert_eq!(
+ to_string(f, 1.0e-20, Minus, 80000),
+ format!(
+ "0.0000000000000000000099999999999999994515327145420957165172950370\
+ 2787392447107715776066783064379706047475337982177734375{:0>79881}",
+ ""
+ )
+ );
+}
diff --git a/library/core/tests/num/flt2dec/random.rs b/library/core/tests/num/flt2dec/random.rs
new file mode 100644
index 000000000..d09500393
--- /dev/null
+++ b/library/core/tests/num/flt2dec/random.rs
@@ -0,0 +1,202 @@
+#![cfg(not(target_arch = "wasm32"))]
+
+use std::mem::MaybeUninit;
+use std::str;
+
+use core::num::flt2dec::strategy::grisu::format_exact_opt;
+use core::num::flt2dec::strategy::grisu::format_shortest_opt;
+use core::num::flt2dec::MAX_SIG_DIGITS;
+use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
+
+use rand::distributions::{Distribution, Uniform};
+use rand::rngs::StdRng;
+use rand::SeedableRng;
+
+pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+ FullDecoded::Finite(decoded) => decoded,
+ full_decoded => panic!("expected finite, got {full_decoded:?} instead"),
+ }
+}
+
+fn iterate<F, G, V>(func: &str, k: usize, n: usize, mut f: F, mut g: G, mut v: V) -> (usize, usize)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> Option<(&'a [u8], i16)>,
+ G: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+ V: FnMut(usize) -> Decoded,
+{
+ assert!(k <= 1024);
+
+ let mut npassed = 0; // f(x) = Some(g(x))
+ let mut nignored = 0; // f(x) = None
+
+ for i in 0..n {
+ if (i & 0xfffff) == 0 {
+ println!(
+ "in progress, {:x}/{:x} (ignored={} passed={} failed={})",
+ i,
+ n,
+ nignored,
+ npassed,
+ i - nignored - npassed
+ );
+ }
+
+ let decoded = v(i);
+ let mut buf1 = [MaybeUninit::new(0); 1024];
+ if let Some((buf1, e1)) = f(&decoded, &mut buf1[..k]) {
+ let mut buf2 = [MaybeUninit::new(0); 1024];
+ let (buf2, e2) = g(&decoded, &mut buf2[..k]);
+ if e1 == e2 && buf1 == buf2 {
+ npassed += 1;
+ } else {
+ println!(
+ "equivalence test failed, {:x}/{:x}: {:?} f(i)={}e{} g(i)={}e{}",
+ i,
+ n,
+ decoded,
+ str::from_utf8(buf1).unwrap(),
+ e1,
+ str::from_utf8(buf2).unwrap(),
+ e2
+ );
+ }
+ } else {
+ nignored += 1;
+ }
+ }
+ println!(
+ "{}({}): done, ignored={} passed={} failed={}",
+ func,
+ k,
+ nignored,
+ npassed,
+ n - nignored - npassed
+ );
+ assert!(
+ nignored + npassed == n,
+ "{}({}): {} out of {} values returns an incorrect value!",
+ func,
+ k,
+ n - nignored - npassed,
+ n
+ );
+ (npassed, nignored)
+}
+
+pub fn f32_random_equivalence_test<F, G>(f: F, g: G, k: usize, n: usize)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> Option<(&'a [u8], i16)>,
+ G: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ if cfg!(target_os = "emscripten") {
+ return; // using rng pulls in i128 support, which doesn't work
+ }
+ let mut rng = StdRng::from_entropy();
+ let f32_range = Uniform::new(0x0000_0001u32, 0x7f80_0000);
+ iterate("f32_random_equivalence_test", k, n, f, g, |_| {
+ let x = f32::from_bits(f32_range.sample(&mut rng));
+ decode_finite(x)
+ });
+}
+
+pub fn f64_random_equivalence_test<F, G>(f: F, g: G, k: usize, n: usize)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> Option<(&'a [u8], i16)>,
+ G: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ if cfg!(target_os = "emscripten") {
+ return; // using rng pulls in i128 support, which doesn't work
+ }
+ let mut rng = StdRng::from_entropy();
+ let f64_range = Uniform::new(0x0000_0000_0000_0001u64, 0x7ff0_0000_0000_0000);
+ iterate("f64_random_equivalence_test", k, n, f, g, |_| {
+ let x = f64::from_bits(f64_range.sample(&mut rng));
+ decode_finite(x)
+ });
+}
+
+pub fn f32_exhaustive_equivalence_test<F, G>(f: F, g: G, k: usize)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> Option<(&'a [u8], i16)>,
+ G: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ // we have only 2^23 * (2^8 - 1) - 1 = 2,139,095,039 positive finite f32 values,
+ // so why not simply testing all of them?
+ //
+ // this is of course very stressful (and thus should be behind an `#[ignore]` attribute),
+ // but with `-C opt-level=3 -C lto` this only takes about an hour or so.
+
+ // iterate from 0x0000_0001 to 0x7f7f_ffff, i.e., all finite ranges
+ let (npassed, nignored) =
+ iterate("f32_exhaustive_equivalence_test", k, 0x7f7f_ffff, f, g, |i: usize| {
+ let x = f32::from_bits(i as u32 + 1);
+ decode_finite(x)
+ });
+ assert_eq!((npassed, nignored), (2121451881, 17643158));
+}
+
+#[test]
+fn shortest_random_equivalence_test() {
+ use core::num::flt2dec::strategy::dragon::format_shortest as fallback;
+ // Miri is too slow
+ let n = if cfg!(miri) { 10 } else { 10_000 };
+
+ f64_random_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS, n);
+ f32_random_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS, n);
+}
+
+#[test]
+#[ignore] // it is too expensive
+fn shortest_f32_exhaustive_equivalence_test() {
+ // it is hard to directly test the optimality of the output, but we can at least test if
+ // two different algorithms agree to each other.
+ //
+ // this reports the progress and the number of f32 values returned `None`.
+ // with `--nocapture` (and plenty of time and appropriate rustc flags), this should print:
+ // `done, ignored=17643158 passed=2121451881 failed=0`.
+
+ use core::num::flt2dec::strategy::dragon::format_shortest as fallback;
+ f32_exhaustive_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS);
+}
+
+#[test]
+#[ignore] // it is too expensive
+fn shortest_f64_hard_random_equivalence_test() {
+ // this again probably has to use appropriate rustc flags.
+
+ use core::num::flt2dec::strategy::dragon::format_shortest as fallback;
+ f64_random_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS, 100_000_000);
+}
+
+#[test]
+fn exact_f32_random_equivalence_test() {
+ use core::num::flt2dec::strategy::dragon::format_exact as fallback;
+ // Miri is too slow
+ let n = if cfg!(miri) { 3 } else { 1_000 };
+
+ for k in 1..21 {
+ f32_random_equivalence_test(
+ |d, buf| format_exact_opt(d, buf, i16::MIN),
+ |d, buf| fallback(d, buf, i16::MIN),
+ k,
+ n,
+ );
+ }
+}
+
+#[test]
+fn exact_f64_random_equivalence_test() {
+ use core::num::flt2dec::strategy::dragon::format_exact as fallback;
+ // Miri is too slow
+ let n = if cfg!(miri) { 2 } else { 1_000 };
+
+ for k in 1..21 {
+ f64_random_equivalence_test(
+ |d, buf| format_exact_opt(d, buf, i16::MIN),
+ |d, buf| fallback(d, buf, i16::MIN),
+ k,
+ n,
+ );
+ }
+}
diff --git a/library/core/tests/num/flt2dec/strategy/dragon.rs b/library/core/tests/num/flt2dec/strategy/dragon.rs
new file mode 100644
index 000000000..fc2e724a2
--- /dev/null
+++ b/library/core/tests/num/flt2dec/strategy/dragon.rs
@@ -0,0 +1,63 @@
+use super::super::*;
+use core::num::bignum::Big32x40 as Big;
+use core::num::flt2dec::strategy::dragon::*;
+
+#[test]
+fn test_mul_pow10() {
+ let mut prevpow10 = Big::from_small(1);
+ for i in 1..340 {
+ let mut curpow10 = Big::from_small(1);
+ mul_pow10(&mut curpow10, i);
+ assert_eq!(curpow10, *prevpow10.clone().mul_small(10));
+ prevpow10 = curpow10;
+ }
+}
+
+#[test]
+fn shortest_sanity_test() {
+ f64_shortest_sanity_test(format_shortest);
+ f32_shortest_sanity_test(format_shortest);
+ more_shortest_sanity_test(format_shortest);
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn exact_sanity_test() {
+ // This test ends up running what I can only assume is some corner-ish case
+ // of the `exp2` library function, defined in whatever C runtime we're
+ // using. In VS 2013 this function apparently had a bug as this test fails
+ // when linked, but with VS 2015 the bug appears fixed as the test runs just
+ // fine.
+ //
+ // The bug seems to be a difference in return value of `exp2(-1057)`, where
+ // in VS 2013 it returns a double with the bit pattern 0x2 and in VS 2015 it
+ // returns 0x20000.
+ //
+ // For now just ignore this test entirely on MSVC as it's tested elsewhere
+ // anyway and we're not super interested in testing each platform's exp2
+ // implementation.
+ if !cfg!(target_env = "msvc") {
+ f64_exact_sanity_test(format_exact);
+ }
+ f32_exact_sanity_test(format_exact);
+}
+
+#[test]
+fn test_to_shortest_str() {
+ to_shortest_str_test(format_shortest);
+}
+
+#[test]
+fn test_to_shortest_exp_str() {
+ to_shortest_exp_str_test(format_shortest);
+}
+
+#[test]
+fn test_to_exact_exp_str() {
+ to_exact_exp_str_test(format_exact);
+}
+
+#[test]
+fn test_to_exact_fixed_str() {
+ to_exact_fixed_str_test(format_exact);
+}
diff --git a/library/core/tests/num/flt2dec/strategy/grisu.rs b/library/core/tests/num/flt2dec/strategy/grisu.rs
new file mode 100644
index 000000000..b59a3b9b7
--- /dev/null
+++ b/library/core/tests/num/flt2dec/strategy/grisu.rs
@@ -0,0 +1,72 @@
+use super::super::*;
+use core::num::flt2dec::strategy::grisu::*;
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_cached_power() {
+ assert_eq!(CACHED_POW10.first().unwrap().1, CACHED_POW10_FIRST_E);
+ assert_eq!(CACHED_POW10.last().unwrap().1, CACHED_POW10_LAST_E);
+
+ for e in -1137..961 {
+ // full range for f64
+ let low = ALPHA - e - 64;
+ let high = GAMMA - e - 64;
+ let (_k, cached) = cached_power(low, high);
+ assert!(
+ low <= cached.e && cached.e <= high,
+ "cached_power({}, {}) = {:?} is incorrect",
+ low,
+ high,
+ cached
+ );
+ }
+}
+
+#[test]
+fn test_max_pow10_no_more_than() {
+ let mut prevtenk = 1;
+ for k in 1..10 {
+ let tenk = prevtenk * 10;
+ assert_eq!(max_pow10_no_more_than(tenk - 1), (k - 1, prevtenk));
+ assert_eq!(max_pow10_no_more_than(tenk), (k, tenk));
+ prevtenk = tenk;
+ }
+}
+
+#[test]
+fn shortest_sanity_test() {
+ f64_shortest_sanity_test(format_shortest);
+ f32_shortest_sanity_test(format_shortest);
+ more_shortest_sanity_test(format_shortest);
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn exact_sanity_test() {
+ // See comments in dragon.rs's exact_sanity_test for why this test is
+ // ignored on MSVC
+ if !cfg!(target_env = "msvc") {
+ f64_exact_sanity_test(format_exact);
+ }
+ f32_exact_sanity_test(format_exact);
+}
+
+#[test]
+fn test_to_shortest_str() {
+ to_shortest_str_test(format_shortest);
+}
+
+#[test]
+fn test_to_shortest_exp_str() {
+ to_shortest_exp_str_test(format_shortest);
+}
+
+#[test]
+fn test_to_exact_exp_str() {
+ to_exact_exp_str_test(format_exact);
+}
+
+#[test]
+fn test_to_exact_fixed_str() {
+ to_exact_fixed_str_test(format_exact);
+}
diff --git a/library/core/tests/num/i128.rs b/library/core/tests/num/i128.rs
new file mode 100644
index 000000000..1ddd20f33
--- /dev/null
+++ b/library/core/tests/num/i128.rs
@@ -0,0 +1 @@
+int_module!(i128);
diff --git a/library/core/tests/num/i16.rs b/library/core/tests/num/i16.rs
new file mode 100644
index 000000000..c7aa9fff9
--- /dev/null
+++ b/library/core/tests/num/i16.rs
@@ -0,0 +1 @@
+int_module!(i16);
diff --git a/library/core/tests/num/i32.rs b/library/core/tests/num/i32.rs
new file mode 100644
index 000000000..efd5b1596
--- /dev/null
+++ b/library/core/tests/num/i32.rs
@@ -0,0 +1,30 @@
+int_module!(i32);
+
+#[test]
+fn test_arith_operation() {
+ let a: isize = 10;
+ assert_eq!(a * (a - 1), 90);
+ let i32_a: isize = 10;
+ assert_eq!(i32_a, 10);
+ assert_eq!(i32_a - 10, 0);
+ assert_eq!(i32_a / 10, 1);
+ assert_eq!(i32_a - 20, -10);
+ assert_eq!(i32_a << 10, 10240);
+ assert_eq!(i32_a << 16, 655360);
+ assert_eq!(i32_a * 16, 160);
+ assert_eq!(i32_a * i32_a * i32_a, 1000);
+ assert_eq!(i32_a * i32_a * i32_a * i32_a, 10000);
+ assert_eq!(i32_a * i32_a / i32_a * i32_a, 100);
+ assert_eq!(i32_a * (i32_a - 1) << (2 + i32_a as usize), 368640);
+ let i32_b: isize = 0x10101010;
+ assert_eq!(i32_b + 1 - 1, i32_b);
+ assert_eq!(i32_b << 1, i32_b << 1);
+ assert_eq!(i32_b >> 1, i32_b >> 1);
+ assert_eq!(i32_b & i32_b << 1, 0);
+ assert_eq!(i32_b | i32_b << 1, 0x30303030);
+ let i32_c: isize = 0x10101010;
+ assert_eq!(
+ i32_c + i32_c * 2 / 3 * 2 + (i32_c - 7 % 3),
+ i32_c + i32_c * 2 / 3 * 2 + (i32_c - 7 % 3)
+ );
+}
diff --git a/library/core/tests/num/i64.rs b/library/core/tests/num/i64.rs
new file mode 100644
index 000000000..93d23c10a
--- /dev/null
+++ b/library/core/tests/num/i64.rs
@@ -0,0 +1 @@
+int_module!(i64);
diff --git a/library/core/tests/num/i8.rs b/library/core/tests/num/i8.rs
new file mode 100644
index 000000000..887d4f17d
--- /dev/null
+++ b/library/core/tests/num/i8.rs
@@ -0,0 +1 @@
+int_module!(i8);
diff --git a/library/core/tests/num/ieee754.rs b/library/core/tests/num/ieee754.rs
new file mode 100644
index 000000000..f6e5dfc98
--- /dev/null
+++ b/library/core/tests/num/ieee754.rs
@@ -0,0 +1,158 @@
+//! IEEE 754 floating point compliance tests
+//!
+//! To understand IEEE 754's requirements on a programming language, one must understand that the
+//! requirements of IEEE 754 rest on the total programming environment, and not entirely on any
+//! one component. That means the hardware, language, and even libraries are considered part of
+//! conforming floating point support in a programming environment.
+//!
+//! A programming language's duty, accordingly, is:
+//! 1. offer access to the hardware where the hardware offers support
+//! 2. provide operations that fulfill the remaining requirements of the standard
+//! 3. provide the ability to write additional software that can fulfill those requirements
+//!
+//! This may be fulfilled in any combination that the language sees fit. However, to claim that
+//! a language supports IEEE 754 is to suggest that it has fulfilled requirements 1 and 2, without
+//! deferring minimum requirements to libraries. This is because support for IEEE 754 is defined
+//! as complete support for at least one specified floating point type as an "arithmetic" and
+//! "interchange" format, plus specified type conversions to "external character sequences" and
+//! integer types.
+//!
+//! For our purposes,
+//! "interchange format" => f32, f64
+//! "arithmetic format" => f32, f64, and any "soft floats"
+//! "external character sequence" => str from any float
+//! "integer format" => {i,u}{8,16,32,64,128}
+//!
+//! None of these tests are against Rust's own implementation. They are only tests against the
+//! standard. That is why they accept wildly diverse inputs or may seem to duplicate other tests.
+//! Please consider this carefully when adding, removing, or reorganizing these tests. They are
+//! here so that it is clear what tests are required by the standard and what can be changed.
+use ::core::str::FromStr;
+
+// IEEE 754 for many tests is applied to specific bit patterns.
+// These generally are not applicable to NaN, however.
+macro_rules! assert_biteq {
+ ($lhs:expr, $rhs:expr) => {
+ assert_eq!($lhs.to_bits(), $rhs.to_bits())
+ };
+}
+
+// ToString uses the default fmt::Display impl without special concerns, and bypasses other parts
+// of the formatting infrastructure, which makes it ideal for testing here.
+#[allow(unused_macros)]
+macro_rules! roundtrip {
+ ($f:expr => $t:ty) => {
+ ($f).to_string().parse::<$t>().unwrap()
+ };
+}
+
+macro_rules! assert_floats_roundtrip {
+ ($f:ident) => {
+ assert_biteq!(f32::$f, roundtrip!(f32::$f => f32));
+ assert_biteq!(f64::$f, roundtrip!(f64::$f => f64));
+ };
+ ($f:expr) => {
+ assert_biteq!($f as f32, roundtrip!($f => f32));
+ assert_biteq!($f as f64, roundtrip!($f => f64));
+ }
+}
+
+macro_rules! assert_floats_bitne {
+ ($lhs:ident, $rhs:ident) => {
+ assert_ne!(f32::$lhs.to_bits(), f32::$rhs.to_bits());
+ assert_ne!(f64::$lhs.to_bits(), f64::$rhs.to_bits());
+ };
+ ($lhs:expr, $rhs:expr) => {
+ assert_ne!(f32::to_bits($lhs), f32::to_bits($rhs));
+ assert_ne!(f64::to_bits($lhs), f64::to_bits($rhs));
+ };
+}
+
+// We must preserve signs on all numbers. That includes zero.
+// -0 and 0 are == normally, so test bit equality.
+#[test]
+fn preserve_signed_zero() {
+ assert_floats_roundtrip!(-0.0);
+ assert_floats_roundtrip!(0.0);
+ assert_floats_bitne!(0.0, -0.0);
+}
+
+#[test]
+fn preserve_signed_infinity() {
+ assert_floats_roundtrip!(INFINITY);
+ assert_floats_roundtrip!(NEG_INFINITY);
+ assert_floats_bitne!(INFINITY, NEG_INFINITY);
+}
+
+#[test]
+fn infinity_to_str() {
+ assert!(match f32::INFINITY.to_string().to_lowercase().as_str() {
+ "+infinity" | "infinity" => true,
+ "+inf" | "inf" => true,
+ _ => false,
+ });
+ assert!(
+ match f64::INFINITY.to_string().to_lowercase().as_str() {
+ "+infinity" | "infinity" => true,
+ "+inf" | "inf" => true,
+ _ => false,
+ },
+ "Infinity must write to a string as some casing of inf or infinity, with an optional +."
+ );
+}
+
+#[test]
+fn neg_infinity_to_str() {
+ assert!(match f32::NEG_INFINITY.to_string().to_lowercase().as_str() {
+ "-infinity" | "-inf" => true,
+ _ => false,
+ });
+ assert!(
+ match f64::NEG_INFINITY.to_string().to_lowercase().as_str() {
+ "-infinity" | "-inf" => true,
+ _ => false,
+ },
+ "Negative Infinity must write to a string as some casing of -inf or -infinity"
+ )
+}
+
+#[test]
+fn nan_to_str() {
+ assert!(
+ match f32::NAN.to_string().to_lowercase().as_str() {
+ "nan" | "+nan" | "-nan" => true,
+ _ => false,
+ },
+ "NaNs must write to a string as some casing of nan."
+ )
+}
+
+// "+"?("inf"|"infinity") in any case => Infinity
+#[test]
+fn infinity_from_str() {
+ assert_biteq!(f32::INFINITY, f32::from_str("infinity").unwrap());
+ assert_biteq!(f32::INFINITY, f32::from_str("inf").unwrap());
+ assert_biteq!(f32::INFINITY, f32::from_str("+infinity").unwrap());
+ assert_biteq!(f32::INFINITY, f32::from_str("+inf").unwrap());
+ // yes! this means you are weLcOmE tO mY iNfInItElY tWiStEd MiNd
+ assert_biteq!(f32::INFINITY, f32::from_str("+iNfInItY").unwrap());
+}
+
+// "-inf"|"-infinity" in any case => Negative Infinity
+#[test]
+fn neg_infinity_from_str() {
+ assert_biteq!(f32::NEG_INFINITY, f32::from_str("-infinity").unwrap());
+ assert_biteq!(f32::NEG_INFINITY, f32::from_str("-inf").unwrap());
+ assert_biteq!(f32::NEG_INFINITY, f32::from_str("-INF").unwrap());
+ assert_biteq!(f32::NEG_INFINITY, f32::from_str("-INFinity").unwrap());
+}
+
+// ("+"|"-"")?"s"?"nan" in any case => qNaN
+#[test]
+fn qnan_from_str() {
+ assert!("nan".parse::<f32>().unwrap().is_nan());
+ assert!("-nan".parse::<f32>().unwrap().is_nan());
+ assert!("+nan".parse::<f32>().unwrap().is_nan());
+ assert!("+NAN".parse::<f32>().unwrap().is_nan());
+ assert!("-NaN".parse::<f32>().unwrap().is_nan());
+}
diff --git a/library/core/tests/num/int_log.rs b/library/core/tests/num/int_log.rs
new file mode 100644
index 000000000..dc3092e14
--- /dev/null
+++ b/library/core/tests/num/int_log.rs
@@ -0,0 +1,166 @@
+//! This tests the `Integer::{log,log2,log10}` methods. These tests are in a
+//! separate file because there's both a large number of them, and not all tests
+//! can be run on Android. This is because in Android `log2` uses an imprecise
+//! approximation:https://github.com/rust-lang/rust/blob/4825e12fc9c79954aa0fe18f5521efa6c19c7539/src/libstd/sys/unix/android.rs#L27-L53
+
+#[test]
+fn checked_log() {
+ assert_eq!(999u32.checked_log(10), Some(2));
+ assert_eq!(1000u32.checked_log(10), Some(3));
+ assert_eq!(555u32.checked_log(13), Some(2));
+ assert_eq!(63u32.checked_log(4), Some(2));
+ assert_eq!(64u32.checked_log(4), Some(3));
+ assert_eq!(10460353203u64.checked_log(3), Some(21));
+ assert_eq!(10460353202u64.checked_log(3), Some(20));
+ assert_eq!(147808829414345923316083210206383297601u128.checked_log(3), Some(80));
+ assert_eq!(147808829414345923316083210206383297600u128.checked_log(3), Some(79));
+ assert_eq!(22528399544939174411840147874772641u128.checked_log(19683), Some(8));
+ assert_eq!(22528399544939174411840147874772631i128.checked_log(19683), Some(7));
+
+ assert_eq!(0u8.checked_log(4), None);
+ assert_eq!(0u16.checked_log(4), None);
+ assert_eq!(0i8.checked_log(4), None);
+ assert_eq!(0i16.checked_log(4), None);
+
+ #[cfg(not(miri))] // Miri is too slow
+ for i in i16::MIN..=0 {
+ assert_eq!(i.checked_log(4), None);
+ }
+ #[cfg(not(miri))] // Miri is too slow
+ for i in 1..=i16::MAX {
+ assert_eq!(i.checked_log(13), Some((i as f32).log(13.0) as u32));
+ }
+ #[cfg(not(miri))] // Miri is too slow
+ for i in 1..=u16::MAX {
+ assert_eq!(i.checked_log(13), Some((i as f32).log(13.0) as u32));
+ }
+}
+
+#[test]
+fn checked_log2() {
+ assert_eq!(5u32.checked_log2(), Some(2));
+ assert_eq!(0u64.checked_log2(), None);
+ assert_eq!(128i32.checked_log2(), Some(7));
+ assert_eq!((-55i16).checked_log2(), None);
+
+ assert_eq!(0u8.checked_log2(), None);
+ assert_eq!(0u16.checked_log2(), None);
+ assert_eq!(0i8.checked_log2(), None);
+ assert_eq!(0i16.checked_log2(), None);
+
+ for i in 1..=u8::MAX {
+ assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32));
+ }
+ #[cfg(not(miri))] // Miri is too slow
+ for i in 1..=u16::MAX {
+ // Guard against Android's imprecise f32::log2 implementation.
+ if i != 8192 && i != 32768 {
+ assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32));
+ }
+ }
+ for i in i8::MIN..=0 {
+ assert_eq!(i.checked_log2(), None);
+ }
+ for i in 1..=i8::MAX {
+ assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32));
+ }
+ #[cfg(not(miri))] // Miri is too slow
+ for i in i16::MIN..=0 {
+ assert_eq!(i.checked_log2(), None);
+ }
+ #[cfg(not(miri))] // Miri is too slow
+ for i in 1..=i16::MAX {
+ // Guard against Android's imprecise f32::log2 implementation.
+ if i != 8192 {
+ assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32));
+ }
+ }
+}
+
+// Validate cases that fail on Android's imprecise float log2 implementation.
+#[test]
+#[cfg(not(target_os = "android"))]
+fn checked_log2_not_android() {
+ assert_eq!(8192u16.checked_log2(), Some((8192f32).log2() as u32));
+ assert_eq!(32768u16.checked_log2(), Some((32768f32).log2() as u32));
+ assert_eq!(8192i16.checked_log2(), Some((8192f32).log2() as u32));
+}
+
+#[test]
+fn checked_log10() {
+ assert_eq!(0u8.checked_log10(), None);
+ assert_eq!(0u16.checked_log10(), None);
+ assert_eq!(0i8.checked_log10(), None);
+ assert_eq!(0i16.checked_log10(), None);
+
+ #[cfg(not(miri))] // Miri is too slow
+ for i in i16::MIN..=0 {
+ assert_eq!(i.checked_log10(), None);
+ }
+ #[cfg(not(miri))] // Miri is too slow
+ for i in 1..=i16::MAX {
+ assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32));
+ }
+ #[cfg(not(miri))] // Miri is too slow
+ for i in 1..=u16::MAX {
+ assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32));
+ }
+ #[cfg(not(miri))] // Miri is too slow
+ for i in 1..=100_000u32 {
+ assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32));
+ }
+}
+
+macro_rules! log10_loop {
+ ($T:ty, $log10_max:expr) => {
+ assert_eq!(<$T>::MAX.log10(), $log10_max);
+ for i in 0..=$log10_max {
+ let p = (10 as $T).pow(i as u32);
+ if p >= 10 {
+ assert_eq!((p - 9).log10(), i - 1);
+ assert_eq!((p - 1).log10(), i - 1);
+ }
+ assert_eq!(p.log10(), i);
+ assert_eq!((p + 1).log10(), i);
+ if p >= 10 {
+ assert_eq!((p + 9).log10(), i);
+ }
+
+ // also check `x.log(10)`
+ if p >= 10 {
+ assert_eq!((p - 9).log(10), i - 1);
+ assert_eq!((p - 1).log(10), i - 1);
+ }
+ assert_eq!(p.log(10), i);
+ assert_eq!((p + 1).log(10), i);
+ if p >= 10 {
+ assert_eq!((p + 9).log(10), i);
+ }
+ }
+ };
+}
+
+#[test]
+fn log10_u8() {
+ log10_loop! { u8, 2 }
+}
+
+#[test]
+fn log10_u16() {
+ log10_loop! { u16, 4 }
+}
+
+#[test]
+fn log10_u32() {
+ log10_loop! { u32, 9 }
+}
+
+#[test]
+fn log10_u64() {
+ log10_loop! { u64, 19 }
+}
+
+#[test]
+fn log10_u128() {
+ log10_loop! { u128, 38 }
+}
diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs
new file mode 100644
index 000000000..8b84a78e6
--- /dev/null
+++ b/library/core/tests/num/int_macros.rs
@@ -0,0 +1,343 @@
+macro_rules! int_module {
+ ($T:ident) => {
+ #[cfg(test)]
+ mod tests {
+ use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
+ use core::$T::*;
+
+ use crate::num;
+
+ #[test]
+ fn test_overflows() {
+ assert!(MAX > 0);
+ assert!(MIN <= 0);
+ assert_eq!(MIN + MAX + 1, 0);
+ }
+
+ #[test]
+ fn test_num() {
+ num::test_num(10 as $T, 2 as $T);
+ }
+
+ #[test]
+ fn test_rem_euclid() {
+ assert_eq!((-1 as $T).rem_euclid(MIN), MAX);
+ }
+
+ #[test]
+ pub fn test_abs() {
+ assert_eq!((1 as $T).abs(), 1 as $T);
+ assert_eq!((0 as $T).abs(), 0 as $T);
+ assert_eq!((-1 as $T).abs(), 1 as $T);
+ }
+
+ #[test]
+ fn test_signum() {
+ assert_eq!((1 as $T).signum(), 1 as $T);
+ assert_eq!((0 as $T).signum(), 0 as $T);
+ assert_eq!((-0 as $T).signum(), 0 as $T);
+ assert_eq!((-1 as $T).signum(), -1 as $T);
+ }
+
+ #[test]
+ fn test_is_positive() {
+ assert!((1 as $T).is_positive());
+ assert!(!(0 as $T).is_positive());
+ assert!(!(-0 as $T).is_positive());
+ assert!(!(-1 as $T).is_positive());
+ }
+
+ #[test]
+ fn test_is_negative() {
+ assert!(!(1 as $T).is_negative());
+ assert!(!(0 as $T).is_negative());
+ assert!(!(-0 as $T).is_negative());
+ assert!((-1 as $T).is_negative());
+ }
+
+ #[test]
+ fn test_bitwise_operators() {
+ assert_eq!(0b1110 as $T, (0b1100 as $T).bitor(0b1010 as $T));
+ assert_eq!(0b1000 as $T, (0b1100 as $T).bitand(0b1010 as $T));
+ assert_eq!(0b0110 as $T, (0b1100 as $T).bitxor(0b1010 as $T));
+ assert_eq!(0b1110 as $T, (0b0111 as $T).shl(1));
+ assert_eq!(0b0111 as $T, (0b1110 as $T).shr(1));
+ assert_eq!(-(0b11 as $T) - (1 as $T), (0b11 as $T).not());
+ }
+
+ const A: $T = 0b0101100;
+ const B: $T = 0b0100001;
+ const C: $T = 0b1111001;
+
+ const _0: $T = 0;
+ const _1: $T = !0;
+
+ #[test]
+ fn test_count_ones() {
+ assert_eq!(A.count_ones(), 3);
+ assert_eq!(B.count_ones(), 2);
+ assert_eq!(C.count_ones(), 5);
+ }
+
+ #[test]
+ fn test_count_zeros() {
+ assert_eq!(A.count_zeros(), $T::BITS - 3);
+ assert_eq!(B.count_zeros(), $T::BITS - 2);
+ assert_eq!(C.count_zeros(), $T::BITS - 5);
+ }
+
+ #[test]
+ fn test_leading_trailing_ones() {
+ let a: $T = 0b0101_1111;
+ assert_eq!(a.trailing_ones(), 5);
+ assert_eq!((!a).leading_ones(), $T::BITS - 7);
+
+ assert_eq!(a.reverse_bits().leading_ones(), 5);
+
+ assert_eq!(_1.leading_ones(), $T::BITS);
+ assert_eq!(_1.trailing_ones(), $T::BITS);
+
+ assert_eq!((_1 << 1).trailing_ones(), 0);
+ assert_eq!(MAX.leading_ones(), 0);
+
+ assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
+ assert_eq!(MAX.trailing_ones(), $T::BITS - 1);
+
+ assert_eq!(_0.leading_ones(), 0);
+ assert_eq!(_0.trailing_ones(), 0);
+
+ let x: $T = 0b0010_1100;
+ assert_eq!(x.leading_ones(), 0);
+ assert_eq!(x.trailing_ones(), 0);
+ }
+
+ #[test]
+ fn test_rotate() {
+ assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
+ assert_eq!(B.rotate_left(3).rotate_left(2).rotate_right(5), B);
+ assert_eq!(C.rotate_left(6).rotate_right(2).rotate_right(4), C);
+
+ // Rotating these should make no difference
+ //
+ // We test using 124 bits because to ensure that overlong bit shifts do
+ // not cause undefined behaviour. See #10183.
+ assert_eq!(_0.rotate_left(124), _0);
+ assert_eq!(_1.rotate_left(124), _1);
+ assert_eq!(_0.rotate_right(124), _0);
+ assert_eq!(_1.rotate_right(124), _1);
+
+ // Rotating by 0 should have no effect
+ assert_eq!(A.rotate_left(0), A);
+ assert_eq!(B.rotate_left(0), B);
+ assert_eq!(C.rotate_left(0), C);
+ // Rotating by a multiple of word size should also have no effect
+ assert_eq!(A.rotate_left(128), A);
+ assert_eq!(B.rotate_left(128), B);
+ assert_eq!(C.rotate_left(128), C);
+ }
+
+ #[test]
+ fn test_swap_bytes() {
+ assert_eq!(A.swap_bytes().swap_bytes(), A);
+ assert_eq!(B.swap_bytes().swap_bytes(), B);
+ assert_eq!(C.swap_bytes().swap_bytes(), C);
+
+ // Swapping these should make no difference
+ assert_eq!(_0.swap_bytes(), _0);
+ assert_eq!(_1.swap_bytes(), _1);
+ }
+
+ #[test]
+ fn test_le() {
+ assert_eq!($T::from_le(A.to_le()), A);
+ assert_eq!($T::from_le(B.to_le()), B);
+ assert_eq!($T::from_le(C.to_le()), C);
+ assert_eq!($T::from_le(_0), _0);
+ assert_eq!($T::from_le(_1), _1);
+ assert_eq!(_0.to_le(), _0);
+ assert_eq!(_1.to_le(), _1);
+ }
+
+ #[test]
+ fn test_be() {
+ assert_eq!($T::from_be(A.to_be()), A);
+ assert_eq!($T::from_be(B.to_be()), B);
+ assert_eq!($T::from_be(C.to_be()), C);
+ assert_eq!($T::from_be(_0), _0);
+ assert_eq!($T::from_be(_1), _1);
+ assert_eq!(_0.to_be(), _0);
+ assert_eq!(_1.to_be(), _1);
+ }
+
+ #[test]
+ fn test_signed_checked_div() {
+ assert_eq!((10 as $T).checked_div(2), Some(5));
+ assert_eq!((5 as $T).checked_div(0), None);
+ assert_eq!(isize::MIN.checked_div(-1), None);
+ }
+
+ #[test]
+ fn test_saturating_abs() {
+ assert_eq!((0 as $T).saturating_abs(), 0);
+ assert_eq!((123 as $T).saturating_abs(), 123);
+ assert_eq!((-123 as $T).saturating_abs(), 123);
+ assert_eq!((MAX - 2).saturating_abs(), MAX - 2);
+ assert_eq!((MAX - 1).saturating_abs(), MAX - 1);
+ assert_eq!(MAX.saturating_abs(), MAX);
+ assert_eq!((MIN + 2).saturating_abs(), MAX - 1);
+ assert_eq!((MIN + 1).saturating_abs(), MAX);
+ assert_eq!(MIN.saturating_abs(), MAX);
+ }
+
+ #[test]
+ fn test_saturating_neg() {
+ assert_eq!((0 as $T).saturating_neg(), 0);
+ assert_eq!((123 as $T).saturating_neg(), -123);
+ assert_eq!((-123 as $T).saturating_neg(), 123);
+ assert_eq!((MAX - 2).saturating_neg(), MIN + 3);
+ assert_eq!((MAX - 1).saturating_neg(), MIN + 2);
+ assert_eq!(MAX.saturating_neg(), MIN + 1);
+ assert_eq!((MIN + 2).saturating_neg(), MAX - 1);
+ assert_eq!((MIN + 1).saturating_neg(), MAX);
+ assert_eq!(MIN.saturating_neg(), MAX);
+ }
+
+ #[test]
+ fn test_from_str() {
+ fn from_str<T: std::str::FromStr>(t: &str) -> Option<T> {
+ std::str::FromStr::from_str(t).ok()
+ }
+ assert_eq!(from_str::<$T>("0"), Some(0 as $T));
+ assert_eq!(from_str::<$T>("3"), Some(3 as $T));
+ assert_eq!(from_str::<$T>("10"), Some(10 as $T));
+ assert_eq!(from_str::<i32>("123456789"), Some(123456789 as i32));
+ assert_eq!(from_str::<$T>("00100"), Some(100 as $T));
+
+ assert_eq!(from_str::<$T>("-1"), Some(-1 as $T));
+ assert_eq!(from_str::<$T>("-3"), Some(-3 as $T));
+ assert_eq!(from_str::<$T>("-10"), Some(-10 as $T));
+ assert_eq!(from_str::<i32>("-123456789"), Some(-123456789 as i32));
+ assert_eq!(from_str::<$T>("-00100"), Some(-100 as $T));
+
+ assert_eq!(from_str::<$T>(""), None);
+ assert_eq!(from_str::<$T>(" "), None);
+ assert_eq!(from_str::<$T>("x"), None);
+ }
+
+ #[test]
+ fn test_from_str_radix() {
+ assert_eq!($T::from_str_radix("123", 10), Ok(123 as $T));
+ assert_eq!($T::from_str_radix("1001", 2), Ok(9 as $T));
+ assert_eq!($T::from_str_radix("123", 8), Ok(83 as $T));
+ assert_eq!(i32::from_str_radix("123", 16), Ok(291 as i32));
+ assert_eq!(i32::from_str_radix("ffff", 16), Ok(65535 as i32));
+ assert_eq!(i32::from_str_radix("FFFF", 16), Ok(65535 as i32));
+ assert_eq!($T::from_str_radix("z", 36), Ok(35 as $T));
+ assert_eq!($T::from_str_radix("Z", 36), Ok(35 as $T));
+
+ assert_eq!($T::from_str_radix("-123", 10), Ok(-123 as $T));
+ assert_eq!($T::from_str_radix("-1001", 2), Ok(-9 as $T));
+ assert_eq!($T::from_str_radix("-123", 8), Ok(-83 as $T));
+ assert_eq!(i32::from_str_radix("-123", 16), Ok(-291 as i32));
+ assert_eq!(i32::from_str_radix("-ffff", 16), Ok(-65535 as i32));
+ assert_eq!(i32::from_str_radix("-FFFF", 16), Ok(-65535 as i32));
+ assert_eq!($T::from_str_radix("-z", 36), Ok(-35 as $T));
+ assert_eq!($T::from_str_radix("-Z", 36), Ok(-35 as $T));
+
+ assert_eq!($T::from_str_radix("Z", 35).ok(), None::<$T>);
+ assert_eq!($T::from_str_radix("-9", 2).ok(), None::<$T>);
+ }
+
+ #[test]
+ fn test_pow() {
+ let mut r = 2 as $T;
+ assert_eq!(r.pow(2), 4 as $T);
+ assert_eq!(r.pow(0), 1 as $T);
+ assert_eq!(r.wrapping_pow(2), 4 as $T);
+ assert_eq!(r.wrapping_pow(0), 1 as $T);
+ assert_eq!(r.checked_pow(2), Some(4 as $T));
+ assert_eq!(r.checked_pow(0), Some(1 as $T));
+ assert_eq!(r.overflowing_pow(2), (4 as $T, false));
+ assert_eq!(r.overflowing_pow(0), (1 as $T, false));
+ assert_eq!(r.saturating_pow(2), 4 as $T);
+ assert_eq!(r.saturating_pow(0), 1 as $T);
+
+ r = MAX;
+ // use `^` to represent .pow() with no overflow.
+ // if itest::MAX == 2^j-1, then itest is a `j` bit int,
+ // so that `itest::MAX*itest::MAX == 2^(2*j)-2^(j+1)+1`,
+ // thussaturating_pow the overflowing result is exactly 1.
+ assert_eq!(r.wrapping_pow(2), 1 as $T);
+ assert_eq!(r.checked_pow(2), None);
+ assert_eq!(r.overflowing_pow(2), (1 as $T, true));
+ assert_eq!(r.saturating_pow(2), MAX);
+ //test for negative exponent.
+ r = -2 as $T;
+ assert_eq!(r.pow(2), 4 as $T);
+ assert_eq!(r.pow(3), -8 as $T);
+ assert_eq!(r.pow(0), 1 as $T);
+ assert_eq!(r.wrapping_pow(2), 4 as $T);
+ assert_eq!(r.wrapping_pow(3), -8 as $T);
+ assert_eq!(r.wrapping_pow(0), 1 as $T);
+ assert_eq!(r.checked_pow(2), Some(4 as $T));
+ assert_eq!(r.checked_pow(3), Some(-8 as $T));
+ assert_eq!(r.checked_pow(0), Some(1 as $T));
+ assert_eq!(r.overflowing_pow(2), (4 as $T, false));
+ assert_eq!(r.overflowing_pow(3), (-8 as $T, false));
+ assert_eq!(r.overflowing_pow(0), (1 as $T, false));
+ assert_eq!(r.saturating_pow(2), 4 as $T);
+ assert_eq!(r.saturating_pow(3), -8 as $T);
+ assert_eq!(r.saturating_pow(0), 1 as $T);
+ }
+
+ #[test]
+ fn test_div_floor() {
+ let a: $T = 8;
+ let b = 3;
+ assert_eq!(a.div_floor(b), 2);
+ assert_eq!(a.div_floor(-b), -3);
+ assert_eq!((-a).div_floor(b), -3);
+ assert_eq!((-a).div_floor(-b), 2);
+ }
+
+ #[test]
+ fn test_div_ceil() {
+ let a: $T = 8;
+ let b = 3;
+ assert_eq!(a.div_ceil(b), 3);
+ assert_eq!(a.div_ceil(-b), -2);
+ assert_eq!((-a).div_ceil(b), -2);
+ assert_eq!((-a).div_ceil(-b), 3);
+ }
+
+ #[test]
+ fn test_next_multiple_of() {
+ assert_eq!((16 as $T).next_multiple_of(8), 16);
+ assert_eq!((23 as $T).next_multiple_of(8), 24);
+ assert_eq!((16 as $T).next_multiple_of(-8), 16);
+ assert_eq!((23 as $T).next_multiple_of(-8), 16);
+ assert_eq!((-16 as $T).next_multiple_of(8), -16);
+ assert_eq!((-23 as $T).next_multiple_of(8), -16);
+ assert_eq!((-16 as $T).next_multiple_of(-8), -16);
+ assert_eq!((-23 as $T).next_multiple_of(-8), -24);
+ assert_eq!(MIN.next_multiple_of(-1), MIN);
+ }
+
+ #[test]
+ fn test_checked_next_multiple_of() {
+ assert_eq!((16 as $T).checked_next_multiple_of(8), Some(16));
+ assert_eq!((23 as $T).checked_next_multiple_of(8), Some(24));
+ assert_eq!((16 as $T).checked_next_multiple_of(-8), Some(16));
+ assert_eq!((23 as $T).checked_next_multiple_of(-8), Some(16));
+ assert_eq!((-16 as $T).checked_next_multiple_of(8), Some(-16));
+ assert_eq!((-23 as $T).checked_next_multiple_of(8), Some(-16));
+ assert_eq!((-16 as $T).checked_next_multiple_of(-8), Some(-16));
+ assert_eq!((-23 as $T).checked_next_multiple_of(-8), Some(-24));
+ assert_eq!((1 as $T).checked_next_multiple_of(0), None);
+ assert_eq!(MAX.checked_next_multiple_of(2), None);
+ assert_eq!(MIN.checked_next_multiple_of(-3), None);
+ assert_eq!(MIN.checked_next_multiple_of(-1), Some(MIN));
+ }
+ }
+ };
+}
diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs
new file mode 100644
index 000000000..49580cdcc
--- /dev/null
+++ b/library/core/tests/num/mod.rs
@@ -0,0 +1,871 @@
+use core::cmp::PartialEq;
+use core::convert::{TryFrom, TryInto};
+use core::fmt::Debug;
+use core::marker::Copy;
+use core::num::{can_not_overflow, IntErrorKind, ParseIntError, TryFromIntError};
+use core::ops::{Add, Div, Mul, Rem, Sub};
+use core::option::Option;
+use core::option::Option::None;
+use core::str::FromStr;
+
+#[macro_use]
+mod int_macros;
+
+mod i128;
+mod i16;
+mod i32;
+mod i64;
+mod i8;
+
+#[macro_use]
+mod uint_macros;
+
+mod u128;
+mod u16;
+mod u32;
+mod u64;
+mod u8;
+
+mod bignum;
+
+mod const_from;
+mod dec2flt;
+mod flt2dec;
+mod int_log;
+mod ops;
+mod wrapping;
+
+mod ieee754;
+mod nan;
+
+/// Adds the attribute to all items in the block.
+macro_rules! cfg_block {
+ ($(#[$attr:meta]{$($it:item)*})*) => {$($(
+ #[$attr]
+ $it
+ )*)*}
+}
+
+/// Groups items that assume the pointer width is either 16/32/64, and has to be altered if
+/// support for larger/smaller pointer widths are added in the future.
+macro_rules! assume_usize_width {
+ {$($it:item)*} => {#[cfg(not(any(
+ target_pointer_width = "16", target_pointer_width = "32", target_pointer_width = "64")))]
+ compile_error!("The current tests of try_from on usize/isize assume that \
+ the pointer width is either 16, 32, or 64");
+ $($it)*
+ }
+}
+
+/// Helper function for testing numeric operations
+pub fn test_num<T>(ten: T, two: T)
+where
+ T: PartialEq
+ + Add<Output = T>
+ + Sub<Output = T>
+ + Mul<Output = T>
+ + Div<Output = T>
+ + Rem<Output = T>
+ + Debug
+ + Copy,
+{
+ assert_eq!(ten.add(two), ten + two);
+ assert_eq!(ten.sub(two), ten - two);
+ assert_eq!(ten.mul(two), ten * two);
+ assert_eq!(ten.div(two), ten / two);
+ assert_eq!(ten.rem(two), ten % two);
+}
+
+/// Helper function for asserting number parsing returns a specific error
+fn test_parse<T>(num_str: &str, expected: Result<T, IntErrorKind>)
+where
+ T: FromStr<Err = ParseIntError>,
+ Result<T, IntErrorKind>: PartialEq + Debug,
+{
+ assert_eq!(num_str.parse::<T>().map_err(|e| e.kind().clone()), expected)
+}
+
+#[test]
+fn from_str_issue7588() {
+ let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
+ assert_eq!(u, None);
+ let s: Option<i16> = i16::from_str_radix("80000", 10).ok();
+ assert_eq!(s, None);
+}
+
+#[test]
+fn test_int_from_str_overflow() {
+ test_parse::<i8>("127", Ok(127));
+ test_parse::<i8>("128", Err(IntErrorKind::PosOverflow));
+
+ test_parse::<i8>("-128", Ok(-128));
+ test_parse::<i8>("-129", Err(IntErrorKind::NegOverflow));
+
+ test_parse::<i16>("32767", Ok(32_767));
+ test_parse::<i16>("32768", Err(IntErrorKind::PosOverflow));
+
+ test_parse::<i16>("-32768", Ok(-32_768));
+ test_parse::<i16>("-32769", Err(IntErrorKind::NegOverflow));
+
+ test_parse::<i32>("2147483647", Ok(2_147_483_647));
+ test_parse::<i32>("2147483648", Err(IntErrorKind::PosOverflow));
+
+ test_parse::<i32>("-2147483648", Ok(-2_147_483_648));
+ test_parse::<i32>("-2147483649", Err(IntErrorKind::NegOverflow));
+
+ test_parse::<i64>("9223372036854775807", Ok(9_223_372_036_854_775_807));
+ test_parse::<i64>("9223372036854775808", Err(IntErrorKind::PosOverflow));
+
+ test_parse::<i64>("-9223372036854775808", Ok(-9_223_372_036_854_775_808));
+ test_parse::<i64>("-9223372036854775809", Err(IntErrorKind::NegOverflow));
+}
+
+#[test]
+fn test_can_not_overflow() {
+ fn can_overflow<T>(radix: u32, input: &str) -> bool
+ where
+ T: std::convert::TryFrom<i8>,
+ {
+ !can_not_overflow::<T>(radix, T::try_from(-1_i8).is_ok(), input.as_bytes())
+ }
+
+ // Positive tests:
+ assert!(!can_overflow::<i8>(16, "F"));
+ assert!(!can_overflow::<u8>(16, "FF"));
+
+ assert!(!can_overflow::<i8>(10, "9"));
+ assert!(!can_overflow::<u8>(10, "99"));
+
+ // Negative tests:
+
+ // Not currently in std lib (issue: #27728)
+ fn format_radix<T>(mut x: T, radix: T) -> String
+ where
+ T: std::ops::Rem<Output = T>,
+ T: std::ops::Div<Output = T>,
+ T: std::cmp::PartialEq,
+ T: std::default::Default,
+ T: Copy,
+ T: Default,
+ u32: TryFrom<T>,
+ {
+ let mut result = vec![];
+
+ loop {
+ let m = x % radix;
+ x = x / radix;
+ result.push(
+ std::char::from_digit(m.try_into().ok().unwrap(), radix.try_into().ok().unwrap())
+ .unwrap(),
+ );
+ if x == T::default() {
+ break;
+ }
+ }
+ result.into_iter().rev().collect()
+ }
+
+ macro_rules! check {
+ ($($t:ty)*) => ($(
+ for base in 2..=36 {
+ let num = (<$t>::MAX as u128) + 1;
+
+ // Calcutate the string length for the smallest overflowing number:
+ let max_len_string = format_radix(num, base as u128);
+ // Ensure that that string length is deemed to potentially overflow:
+ assert!(can_overflow::<$t>(base, &max_len_string));
+ }
+ )*)
+ }
+
+ check! { i8 i16 i32 i64 i128 isize usize u8 u16 u32 u64 }
+
+ // Check u128 separately:
+ for base in 2..=36 {
+ let num = u128::MAX as u128;
+ let max_len_string = format_radix(num, base as u128);
+ // base 16 fits perfectly for u128 and won't overflow:
+ assert_eq!(can_overflow::<u128>(base, &max_len_string), base != 16);
+ }
+}
+
+#[test]
+fn test_leading_plus() {
+ test_parse::<u8>("+127", Ok(127));
+ test_parse::<i64>("+9223372036854775807", Ok(9223372036854775807));
+}
+
+#[test]
+fn test_invalid() {
+ test_parse::<i8>("--129", Err(IntErrorKind::InvalidDigit));
+ test_parse::<i8>("++129", Err(IntErrorKind::InvalidDigit));
+ test_parse::<u8>("Съешь", Err(IntErrorKind::InvalidDigit));
+ test_parse::<u8>("123Hello", Err(IntErrorKind::InvalidDigit));
+ test_parse::<i8>("--", Err(IntErrorKind::InvalidDigit));
+ test_parse::<i8>("-", Err(IntErrorKind::InvalidDigit));
+ test_parse::<i8>("+", Err(IntErrorKind::InvalidDigit));
+ test_parse::<u8>("-1", Err(IntErrorKind::InvalidDigit));
+}
+
+#[test]
+fn test_empty() {
+ test_parse::<u8>("", Err(IntErrorKind::Empty));
+}
+
+#[test]
+fn test_infallible_try_from_int_error() {
+ let func = |x: i8| -> Result<i32, TryFromIntError> { Ok(x.try_into()?) };
+
+ assert!(func(0).is_ok());
+}
+
+macro_rules! test_impl_from {
+ ($fn_name:ident, bool, $target: ty) => {
+ #[test]
+ fn $fn_name() {
+ let one: $target = 1;
+ let zero: $target = 0;
+ assert_eq!(one, <$target>::from(true));
+ assert_eq!(zero, <$target>::from(false));
+ }
+ };
+ ($fn_name: ident, $Small: ty, $Large: ty) => {
+ #[test]
+ fn $fn_name() {
+ let small_max = <$Small>::MAX;
+ let small_min = <$Small>::MIN;
+ let large_max: $Large = small_max.into();
+ let large_min: $Large = small_min.into();
+ assert_eq!(large_max as $Small, small_max);
+ assert_eq!(large_min as $Small, small_min);
+ }
+ };
+}
+
+// Unsigned -> Unsigned
+test_impl_from! { test_u8u16, u8, u16 }
+test_impl_from! { test_u8u32, u8, u32 }
+test_impl_from! { test_u8u64, u8, u64 }
+test_impl_from! { test_u8usize, u8, usize }
+test_impl_from! { test_u16u32, u16, u32 }
+test_impl_from! { test_u16u64, u16, u64 }
+test_impl_from! { test_u32u64, u32, u64 }
+
+// Signed -> Signed
+test_impl_from! { test_i8i16, i8, i16 }
+test_impl_from! { test_i8i32, i8, i32 }
+test_impl_from! { test_i8i64, i8, i64 }
+test_impl_from! { test_i8isize, i8, isize }
+test_impl_from! { test_i16i32, i16, i32 }
+test_impl_from! { test_i16i64, i16, i64 }
+test_impl_from! { test_i32i64, i32, i64 }
+
+// Unsigned -> Signed
+test_impl_from! { test_u8i16, u8, i16 }
+test_impl_from! { test_u8i32, u8, i32 }
+test_impl_from! { test_u8i64, u8, i64 }
+test_impl_from! { test_u16i32, u16, i32 }
+test_impl_from! { test_u16i64, u16, i64 }
+test_impl_from! { test_u32i64, u32, i64 }
+
+// Bool -> Integer
+test_impl_from! { test_boolu8, bool, u8 }
+test_impl_from! { test_boolu16, bool, u16 }
+test_impl_from! { test_boolu32, bool, u32 }
+test_impl_from! { test_boolu64, bool, u64 }
+test_impl_from! { test_boolu128, bool, u128 }
+test_impl_from! { test_booli8, bool, i8 }
+test_impl_from! { test_booli16, bool, i16 }
+test_impl_from! { test_booli32, bool, i32 }
+test_impl_from! { test_booli64, bool, i64 }
+test_impl_from! { test_booli128, bool, i128 }
+
+// Signed -> Float
+test_impl_from! { test_i8f32, i8, f32 }
+test_impl_from! { test_i8f64, i8, f64 }
+test_impl_from! { test_i16f32, i16, f32 }
+test_impl_from! { test_i16f64, i16, f64 }
+test_impl_from! { test_i32f64, i32, f64 }
+
+// Unsigned -> Float
+test_impl_from! { test_u8f32, u8, f32 }
+test_impl_from! { test_u8f64, u8, f64 }
+test_impl_from! { test_u16f32, u16, f32 }
+test_impl_from! { test_u16f64, u16, f64 }
+test_impl_from! { test_u32f64, u32, f64 }
+
+// Float -> Float
+#[test]
+fn test_f32f64() {
+ let max: f64 = f32::MAX.into();
+ assert_eq!(max as f32, f32::MAX);
+ assert!(max.is_normal());
+
+ let min: f64 = f32::MIN.into();
+ assert_eq!(min as f32, f32::MIN);
+ assert!(min.is_normal());
+
+ let min_positive: f64 = f32::MIN_POSITIVE.into();
+ assert_eq!(min_positive as f32, f32::MIN_POSITIVE);
+ assert!(min_positive.is_normal());
+
+ let epsilon: f64 = f32::EPSILON.into();
+ assert_eq!(epsilon as f32, f32::EPSILON);
+ assert!(epsilon.is_normal());
+
+ let zero: f64 = (0.0f32).into();
+ assert_eq!(zero as f32, 0.0f32);
+ assert!(zero.is_sign_positive());
+
+ let neg_zero: f64 = (-0.0f32).into();
+ assert_eq!(neg_zero as f32, -0.0f32);
+ assert!(neg_zero.is_sign_negative());
+
+ let infinity: f64 = f32::INFINITY.into();
+ assert_eq!(infinity as f32, f32::INFINITY);
+ assert!(infinity.is_infinite());
+ assert!(infinity.is_sign_positive());
+
+ let neg_infinity: f64 = f32::NEG_INFINITY.into();
+ assert_eq!(neg_infinity as f32, f32::NEG_INFINITY);
+ assert!(neg_infinity.is_infinite());
+ assert!(neg_infinity.is_sign_negative());
+
+ let nan: f64 = f32::NAN.into();
+ assert!(nan.is_nan());
+}
+
+/// Conversions where the full width of $source can be represented as $target
+macro_rules! test_impl_try_from_always_ok {
+ ($fn_name:ident, $source:ty, $target: ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ assert_eq!(<$target as TryFrom<$source>>::try_from(max).unwrap(), max as $target);
+ assert_eq!(<$target as TryFrom<$source>>::try_from(min).unwrap(), min as $target);
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ }
+ };
+}
+
+test_impl_try_from_always_ok! { test_try_u8u8, u8, u8 }
+test_impl_try_from_always_ok! { test_try_u8u16, u8, u16 }
+test_impl_try_from_always_ok! { test_try_u8u32, u8, u32 }
+test_impl_try_from_always_ok! { test_try_u8u64, u8, u64 }
+test_impl_try_from_always_ok! { test_try_u8u128, u8, u128 }
+test_impl_try_from_always_ok! { test_try_u8i16, u8, i16 }
+test_impl_try_from_always_ok! { test_try_u8i32, u8, i32 }
+test_impl_try_from_always_ok! { test_try_u8i64, u8, i64 }
+test_impl_try_from_always_ok! { test_try_u8i128, u8, i128 }
+
+test_impl_try_from_always_ok! { test_try_u16u16, u16, u16 }
+test_impl_try_from_always_ok! { test_try_u16u32, u16, u32 }
+test_impl_try_from_always_ok! { test_try_u16u64, u16, u64 }
+test_impl_try_from_always_ok! { test_try_u16u128, u16, u128 }
+test_impl_try_from_always_ok! { test_try_u16i32, u16, i32 }
+test_impl_try_from_always_ok! { test_try_u16i64, u16, i64 }
+test_impl_try_from_always_ok! { test_try_u16i128, u16, i128 }
+
+test_impl_try_from_always_ok! { test_try_u32u32, u32, u32 }
+test_impl_try_from_always_ok! { test_try_u32u64, u32, u64 }
+test_impl_try_from_always_ok! { test_try_u32u128, u32, u128 }
+test_impl_try_from_always_ok! { test_try_u32i64, u32, i64 }
+test_impl_try_from_always_ok! { test_try_u32i128, u32, i128 }
+
+test_impl_try_from_always_ok! { test_try_u64u64, u64, u64 }
+test_impl_try_from_always_ok! { test_try_u64u128, u64, u128 }
+test_impl_try_from_always_ok! { test_try_u64i128, u64, i128 }
+
+test_impl_try_from_always_ok! { test_try_u128u128, u128, u128 }
+
+test_impl_try_from_always_ok! { test_try_i8i8, i8, i8 }
+test_impl_try_from_always_ok! { test_try_i8i16, i8, i16 }
+test_impl_try_from_always_ok! { test_try_i8i32, i8, i32 }
+test_impl_try_from_always_ok! { test_try_i8i64, i8, i64 }
+test_impl_try_from_always_ok! { test_try_i8i128, i8, i128 }
+
+test_impl_try_from_always_ok! { test_try_i16i16, i16, i16 }
+test_impl_try_from_always_ok! { test_try_i16i32, i16, i32 }
+test_impl_try_from_always_ok! { test_try_i16i64, i16, i64 }
+test_impl_try_from_always_ok! { test_try_i16i128, i16, i128 }
+
+test_impl_try_from_always_ok! { test_try_i32i32, i32, i32 }
+test_impl_try_from_always_ok! { test_try_i32i64, i32, i64 }
+test_impl_try_from_always_ok! { test_try_i32i128, i32, i128 }
+
+test_impl_try_from_always_ok! { test_try_i64i64, i64, i64 }
+test_impl_try_from_always_ok! { test_try_i64i128, i64, i128 }
+
+test_impl_try_from_always_ok! { test_try_i128i128, i128, i128 }
+
+test_impl_try_from_always_ok! { test_try_usizeusize, usize, usize }
+test_impl_try_from_always_ok! { test_try_isizeisize, isize, isize }
+
+assume_usize_width! {
+ test_impl_try_from_always_ok! { test_try_u8usize, u8, usize }
+ test_impl_try_from_always_ok! { test_try_u8isize, u8, isize }
+ test_impl_try_from_always_ok! { test_try_i8isize, i8, isize }
+
+ test_impl_try_from_always_ok! { test_try_u16usize, u16, usize }
+ test_impl_try_from_always_ok! { test_try_i16isize, i16, isize }
+
+ test_impl_try_from_always_ok! { test_try_usizeu64, usize, u64 }
+ test_impl_try_from_always_ok! { test_try_usizeu128, usize, u128 }
+ test_impl_try_from_always_ok! { test_try_usizei128, usize, i128 }
+
+ test_impl_try_from_always_ok! { test_try_isizei64, isize, i64 }
+ test_impl_try_from_always_ok! { test_try_isizei128, isize, i128 }
+
+ cfg_block!(
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_always_ok! { test_try_usizeu16, usize, u16 }
+ test_impl_try_from_always_ok! { test_try_isizei16, isize, i16 }
+ test_impl_try_from_always_ok! { test_try_usizeu32, usize, u32 }
+ test_impl_try_from_always_ok! { test_try_usizei32, usize, i32 }
+ test_impl_try_from_always_ok! { test_try_isizei32, isize, i32 }
+ test_impl_try_from_always_ok! { test_try_usizei64, usize, i64 }
+ }
+
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_always_ok! { test_try_u16isize, u16, isize }
+ test_impl_try_from_always_ok! { test_try_usizeu32, usize, u32 }
+ test_impl_try_from_always_ok! { test_try_isizei32, isize, i32 }
+ test_impl_try_from_always_ok! { test_try_u32usize, u32, usize }
+ test_impl_try_from_always_ok! { test_try_i32isize, i32, isize }
+ test_impl_try_from_always_ok! { test_try_usizei64, usize, i64 }
+ }
+
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_always_ok! { test_try_u16isize, u16, isize }
+ test_impl_try_from_always_ok! { test_try_u32usize, u32, usize }
+ test_impl_try_from_always_ok! { test_try_u32isize, u32, isize }
+ test_impl_try_from_always_ok! { test_try_i32isize, i32, isize }
+ test_impl_try_from_always_ok! { test_try_u64usize, u64, usize }
+ test_impl_try_from_always_ok! { test_try_i64isize, i64, isize }
+ }
+ );
+}
+
+/// Conversions where max of $source can be represented as $target,
+macro_rules! test_impl_try_from_signed_to_unsigned_upper_ok {
+ ($fn_name:ident, $source:ty, $target:ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ let neg_one: $source = -1;
+ assert_eq!(<$target as TryFrom<$source>>::try_from(max).unwrap(), max as $target);
+ assert!(<$target as TryFrom<$source>>::try_from(min).is_err());
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ assert!(<$target as TryFrom<$source>>::try_from(neg_one).is_err());
+ }
+ };
+}
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u8, i8, u8 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u16, i8, u16 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u32, i8, u32 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u64, i8, u64 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u128, i8, u128 }
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u16, i16, u16 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u32, i16, u32 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u64, i16, u64 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u128, i16, u128 }
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32u32, i32, u32 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32u64, i32, u64 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32u128, i32, u128 }
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i64u64, i64, u64 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i64u128, i64, u128 }
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i128u128, i128, u128 }
+
+assume_usize_width! {
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8usize, i8, usize }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16usize, i16, usize }
+
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu64, isize, u64 }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu128, isize, u128 }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeusize, isize, usize }
+
+ cfg_block!(
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu16, isize, u16 }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu32, isize, u32 }
+ }
+
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu32, isize, u32 }
+
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32usize, i32, usize }
+ }
+
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32usize, i32, usize }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i64usize, i64, usize }
+ }
+ );
+}
+
+/// Conversions where max of $source can not be represented as $target,
+/// but min can.
+macro_rules! test_impl_try_from_unsigned_to_signed_upper_err {
+ ($fn_name:ident, $source:ty, $target:ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ assert!(<$target as TryFrom<$source>>::try_from(max).is_err());
+ assert_eq!(<$target as TryFrom<$source>>::try_from(min).unwrap(), min as $target);
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ }
+ };
+}
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u8i8, u8, i8 }
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u16i8, u16, i8 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u16i16, u16, i16 }
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i8, u32, i8 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i16, u32, i16 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i32, u32, i32 }
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i8, u64, i8 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i16, u64, i16 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i32, u64, i32 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i64, u64, i64 }
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i8, u128, i8 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i16, u128, i16 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i32, u128, i32 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i64, u128, i64 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i128, u128, i128 }
+
+assume_usize_width! {
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64isize, u64, isize }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128isize, u128, isize }
+
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei8, usize, i8 }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei16, usize, i16 }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizeisize, usize, isize }
+
+ cfg_block!(
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u16isize, u16, isize }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32isize, u32, isize }
+ }
+
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32isize, u32, isize }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei32, usize, i32 }
+ }
+
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei32, usize, i32 }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei64, usize, i64 }
+ }
+ );
+}
+
+/// Conversions where min/max of $source can not be represented as $target.
+macro_rules! test_impl_try_from_same_sign_err {
+ ($fn_name:ident, $source:ty, $target:ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ let t_max = <$target>::MAX;
+ let t_min = <$target>::MIN;
+ assert!(<$target as TryFrom<$source>>::try_from(max).is_err());
+ if min != 0 {
+ assert!(<$target as TryFrom<$source>>::try_from(min).is_err());
+ }
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ assert_eq!(
+ <$target as TryFrom<$source>>::try_from(t_max as $source).unwrap(),
+ t_max as $target
+ );
+ assert_eq!(
+ <$target as TryFrom<$source>>::try_from(t_min as $source).unwrap(),
+ t_min as $target
+ );
+ }
+ };
+}
+
+test_impl_try_from_same_sign_err! { test_try_u16u8, u16, u8 }
+
+test_impl_try_from_same_sign_err! { test_try_u32u8, u32, u8 }
+test_impl_try_from_same_sign_err! { test_try_u32u16, u32, u16 }
+
+test_impl_try_from_same_sign_err! { test_try_u64u8, u64, u8 }
+test_impl_try_from_same_sign_err! { test_try_u64u16, u64, u16 }
+test_impl_try_from_same_sign_err! { test_try_u64u32, u64, u32 }
+
+test_impl_try_from_same_sign_err! { test_try_u128u8, u128, u8 }
+test_impl_try_from_same_sign_err! { test_try_u128u16, u128, u16 }
+test_impl_try_from_same_sign_err! { test_try_u128u32, u128, u32 }
+test_impl_try_from_same_sign_err! { test_try_u128u64, u128, u64 }
+
+test_impl_try_from_same_sign_err! { test_try_i16i8, i16, i8 }
+test_impl_try_from_same_sign_err! { test_try_isizei8, isize, i8 }
+
+test_impl_try_from_same_sign_err! { test_try_i32i8, i32, i8 }
+test_impl_try_from_same_sign_err! { test_try_i32i16, i32, i16 }
+
+test_impl_try_from_same_sign_err! { test_try_i64i8, i64, i8 }
+test_impl_try_from_same_sign_err! { test_try_i64i16, i64, i16 }
+test_impl_try_from_same_sign_err! { test_try_i64i32, i64, i32 }
+
+test_impl_try_from_same_sign_err! { test_try_i128i8, i128, i8 }
+test_impl_try_from_same_sign_err! { test_try_i128i16, i128, i16 }
+test_impl_try_from_same_sign_err! { test_try_i128i32, i128, i32 }
+test_impl_try_from_same_sign_err! { test_try_i128i64, i128, i64 }
+
+assume_usize_width! {
+ test_impl_try_from_same_sign_err! { test_try_usizeu8, usize, u8 }
+ test_impl_try_from_same_sign_err! { test_try_u128usize, u128, usize }
+ test_impl_try_from_same_sign_err! { test_try_i128isize, i128, isize }
+
+ cfg_block!(
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_same_sign_err! { test_try_u32usize, u32, usize }
+ test_impl_try_from_same_sign_err! { test_try_u64usize, u64, usize }
+
+ test_impl_try_from_same_sign_err! { test_try_i32isize, i32, isize }
+ test_impl_try_from_same_sign_err! { test_try_i64isize, i64, isize }
+ }
+
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_same_sign_err! { test_try_u64usize, u64, usize }
+ test_impl_try_from_same_sign_err! { test_try_usizeu16, usize, u16 }
+
+ test_impl_try_from_same_sign_err! { test_try_i64isize, i64, isize }
+ test_impl_try_from_same_sign_err! { test_try_isizei16, isize, i16 }
+ }
+
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_same_sign_err! { test_try_usizeu16, usize, u16 }
+ test_impl_try_from_same_sign_err! { test_try_usizeu32, usize, u32 }
+
+ test_impl_try_from_same_sign_err! { test_try_isizei16, isize, i16 }
+ test_impl_try_from_same_sign_err! { test_try_isizei32, isize, i32 }
+ }
+ );
+}
+
+/// Conversions where neither the min nor the max of $source can be represented by
+/// $target, but max/min of the target can be represented by the source.
+macro_rules! test_impl_try_from_signed_to_unsigned_err {
+ ($fn_name:ident, $source:ty, $target:ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ let t_max = <$target>::MAX;
+ let t_min = <$target>::MIN;
+ assert!(<$target as TryFrom<$source>>::try_from(max).is_err());
+ assert!(<$target as TryFrom<$source>>::try_from(min).is_err());
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ assert_eq!(
+ <$target as TryFrom<$source>>::try_from(t_max as $source).unwrap(),
+ t_max as $target
+ );
+ assert_eq!(
+ <$target as TryFrom<$source>>::try_from(t_min as $source).unwrap(),
+ t_min as $target
+ );
+ }
+ };
+}
+
+test_impl_try_from_signed_to_unsigned_err! { test_try_i16u8, i16, u8 }
+
+test_impl_try_from_signed_to_unsigned_err! { test_try_i32u8, i32, u8 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i32u16, i32, u16 }
+
+test_impl_try_from_signed_to_unsigned_err! { test_try_i64u8, i64, u8 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i64u16, i64, u16 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i64u32, i64, u32 }
+
+test_impl_try_from_signed_to_unsigned_err! { test_try_i128u8, i128, u8 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i128u16, i128, u16 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i128u32, i128, u32 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i128u64, i128, u64 }
+
+assume_usize_width! {
+ test_impl_try_from_signed_to_unsigned_err! { test_try_isizeu8, isize, u8 }
+ test_impl_try_from_signed_to_unsigned_err! { test_try_i128usize, i128, usize }
+
+ cfg_block! {
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_signed_to_unsigned_err! { test_try_i32usize, i32, usize }
+ test_impl_try_from_signed_to_unsigned_err! { test_try_i64usize, i64, usize }
+ }
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_signed_to_unsigned_err! { test_try_i64usize, i64, usize }
+
+ test_impl_try_from_signed_to_unsigned_err! { test_try_isizeu16, isize, u16 }
+ }
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_signed_to_unsigned_err! { test_try_isizeu16, isize, u16 }
+ test_impl_try_from_signed_to_unsigned_err! { test_try_isizeu32, isize, u32 }
+ }
+ }
+}
+
+macro_rules! test_float {
+ ($modname: ident, $fty: ty, $inf: expr, $neginf: expr, $nan: expr) => {
+ mod $modname {
+ #[test]
+ fn min() {
+ assert_eq!((0.0 as $fty).min(0.0), 0.0);
+ assert!((0.0 as $fty).min(0.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).min(-0.0), -0.0);
+ assert!((-0.0 as $fty).min(-0.0).is_sign_negative());
+ assert_eq!((9.0 as $fty).min(9.0), 9.0);
+ assert_eq!((-9.0 as $fty).min(0.0), -9.0);
+ assert_eq!((0.0 as $fty).min(9.0), 0.0);
+ assert!((0.0 as $fty).min(9.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).min(9.0), -0.0);
+ assert!((-0.0 as $fty).min(9.0).is_sign_negative());
+ assert_eq!((-0.0 as $fty).min(-9.0), -9.0);
+ assert_eq!(($inf as $fty).min(9.0), 9.0);
+ assert_eq!((9.0 as $fty).min($inf), 9.0);
+ assert_eq!(($inf as $fty).min(-9.0), -9.0);
+ assert_eq!((-9.0 as $fty).min($inf), -9.0);
+ assert_eq!(($neginf as $fty).min(9.0), $neginf);
+ assert_eq!((9.0 as $fty).min($neginf), $neginf);
+ assert_eq!(($neginf as $fty).min(-9.0), $neginf);
+ assert_eq!((-9.0 as $fty).min($neginf), $neginf);
+ assert_eq!(($nan as $fty).min(9.0), 9.0);
+ assert_eq!(($nan as $fty).min(-9.0), -9.0);
+ assert_eq!((9.0 as $fty).min($nan), 9.0);
+ assert_eq!((-9.0 as $fty).min($nan), -9.0);
+ assert!(($nan as $fty).min($nan).is_nan());
+ }
+ #[test]
+ fn max() {
+ assert_eq!((0.0 as $fty).max(0.0), 0.0);
+ assert!((0.0 as $fty).max(0.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).max(-0.0), -0.0);
+ assert!((-0.0 as $fty).max(-0.0).is_sign_negative());
+ assert_eq!((9.0 as $fty).max(9.0), 9.0);
+ assert_eq!((-9.0 as $fty).max(0.0), 0.0);
+ assert!((-9.0 as $fty).max(0.0).is_sign_positive());
+ assert_eq!((-9.0 as $fty).max(-0.0), -0.0);
+ assert!((-9.0 as $fty).max(-0.0).is_sign_negative());
+ assert_eq!((0.0 as $fty).max(9.0), 9.0);
+ assert_eq!((0.0 as $fty).max(-9.0), 0.0);
+ assert!((0.0 as $fty).max(-9.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).max(-9.0), -0.0);
+ assert!((-0.0 as $fty).max(-9.0).is_sign_negative());
+ assert_eq!(($inf as $fty).max(9.0), $inf);
+ assert_eq!((9.0 as $fty).max($inf), $inf);
+ assert_eq!(($inf as $fty).max(-9.0), $inf);
+ assert_eq!((-9.0 as $fty).max($inf), $inf);
+ assert_eq!(($neginf as $fty).max(9.0), 9.0);
+ assert_eq!((9.0 as $fty).max($neginf), 9.0);
+ assert_eq!(($neginf as $fty).max(-9.0), -9.0);
+ assert_eq!((-9.0 as $fty).max($neginf), -9.0);
+ assert_eq!(($nan as $fty).max(9.0), 9.0);
+ assert_eq!(($nan as $fty).max(-9.0), -9.0);
+ assert_eq!((9.0 as $fty).max($nan), 9.0);
+ assert_eq!((-9.0 as $fty).max($nan), -9.0);
+ assert!(($nan as $fty).max($nan).is_nan());
+ }
+ #[test]
+ fn minimum() {
+ assert_eq!((0.0 as $fty).minimum(0.0), 0.0);
+ assert!((0.0 as $fty).minimum(0.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).minimum(0.0), -0.0);
+ assert!((-0.0 as $fty).minimum(0.0).is_sign_negative());
+ assert_eq!((-0.0 as $fty).minimum(-0.0), -0.0);
+ assert!((-0.0 as $fty).minimum(-0.0).is_sign_negative());
+ assert_eq!((9.0 as $fty).minimum(9.0), 9.0);
+ assert_eq!((-9.0 as $fty).minimum(0.0), -9.0);
+ assert_eq!((0.0 as $fty).minimum(9.0), 0.0);
+ assert!((0.0 as $fty).minimum(9.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).minimum(9.0), -0.0);
+ assert!((-0.0 as $fty).minimum(9.0).is_sign_negative());
+ assert_eq!((-0.0 as $fty).minimum(-9.0), -9.0);
+ assert_eq!(($inf as $fty).minimum(9.0), 9.0);
+ assert_eq!((9.0 as $fty).minimum($inf), 9.0);
+ assert_eq!(($inf as $fty).minimum(-9.0), -9.0);
+ assert_eq!((-9.0 as $fty).minimum($inf), -9.0);
+ assert_eq!(($neginf as $fty).minimum(9.0), $neginf);
+ assert_eq!((9.0 as $fty).minimum($neginf), $neginf);
+ assert_eq!(($neginf as $fty).minimum(-9.0), $neginf);
+ assert_eq!((-9.0 as $fty).minimum($neginf), $neginf);
+ assert!(($nan as $fty).minimum(9.0).is_nan());
+ assert!(($nan as $fty).minimum(-9.0).is_nan());
+ assert!((9.0 as $fty).minimum($nan).is_nan());
+ assert!((-9.0 as $fty).minimum($nan).is_nan());
+ assert!(($nan as $fty).minimum($nan).is_nan());
+ }
+ #[test]
+ fn maximum() {
+ assert_eq!((0.0 as $fty).maximum(0.0), 0.0);
+ assert!((0.0 as $fty).maximum(0.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).maximum(0.0), 0.0);
+ assert!((-0.0 as $fty).maximum(0.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).maximum(-0.0), -0.0);
+ assert!((-0.0 as $fty).maximum(-0.0).is_sign_negative());
+ assert_eq!((9.0 as $fty).maximum(9.0), 9.0);
+ assert_eq!((-9.0 as $fty).maximum(0.0), 0.0);
+ assert!((-9.0 as $fty).maximum(0.0).is_sign_positive());
+ assert_eq!((-9.0 as $fty).maximum(-0.0), -0.0);
+ assert!((-9.0 as $fty).maximum(-0.0).is_sign_negative());
+ assert_eq!((0.0 as $fty).maximum(9.0), 9.0);
+ assert_eq!((0.0 as $fty).maximum(-9.0), 0.0);
+ assert!((0.0 as $fty).maximum(-9.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).maximum(-9.0), -0.0);
+ assert!((-0.0 as $fty).maximum(-9.0).is_sign_negative());
+ assert_eq!(($inf as $fty).maximum(9.0), $inf);
+ assert_eq!((9.0 as $fty).maximum($inf), $inf);
+ assert_eq!(($inf as $fty).maximum(-9.0), $inf);
+ assert_eq!((-9.0 as $fty).maximum($inf), $inf);
+ assert_eq!(($neginf as $fty).maximum(9.0), 9.0);
+ assert_eq!((9.0 as $fty).maximum($neginf), 9.0);
+ assert_eq!(($neginf as $fty).maximum(-9.0), -9.0);
+ assert_eq!((-9.0 as $fty).maximum($neginf), -9.0);
+ assert!(($nan as $fty).maximum(9.0).is_nan());
+ assert!(($nan as $fty).maximum(-9.0).is_nan());
+ assert!((9.0 as $fty).maximum($nan).is_nan());
+ assert!((-9.0 as $fty).maximum($nan).is_nan());
+ assert!(($nan as $fty).maximum($nan).is_nan());
+ }
+ #[test]
+ fn rem_euclid() {
+ let a: $fty = 42.0;
+ assert!($inf.rem_euclid(a).is_nan());
+ assert_eq!(a.rem_euclid($inf), a);
+ assert!(a.rem_euclid($nan).is_nan());
+ assert!($inf.rem_euclid($inf).is_nan());
+ assert!($inf.rem_euclid($nan).is_nan());
+ assert!($nan.rem_euclid($inf).is_nan());
+ }
+ #[test]
+ fn div_euclid() {
+ let a: $fty = 42.0;
+ assert_eq!(a.div_euclid($inf), 0.0);
+ assert!(a.div_euclid($nan).is_nan());
+ assert!($inf.div_euclid($inf).is_nan());
+ assert!($inf.div_euclid($nan).is_nan());
+ assert!($nan.div_euclid($inf).is_nan());
+ }
+ }
+ };
+}
+
+test_float!(f32, f32, f32::INFINITY, f32::NEG_INFINITY, f32::NAN);
+test_float!(f64, f64, f64::INFINITY, f64::NEG_INFINITY, f64::NAN);
diff --git a/library/core/tests/num/nan.rs b/library/core/tests/num/nan.rs
new file mode 100644
index 000000000..ef81988c9
--- /dev/null
+++ b/library/core/tests/num/nan.rs
@@ -0,0 +1,7 @@
+#[test]
+fn test_nan() {
+ let x = "NaN".to_string();
+ assert_eq!(format!("{}", f64::NAN), x);
+ assert_eq!(format!("{:e}", f64::NAN), x);
+ assert_eq!(format!("{:E}", f64::NAN), x);
+}
diff --git a/library/core/tests/num/ops.rs b/library/core/tests/num/ops.rs
new file mode 100644
index 000000000..ae8b93825
--- /dev/null
+++ b/library/core/tests/num/ops.rs
@@ -0,0 +1,232 @@
+use core::ops::*;
+
+// For types L and R, checks that a trait implementation exists for
+// * binary ops: L op R, L op &R, &L op R and &L op &R
+// * assign ops: &mut L op R, &mut L op &R
+macro_rules! impl_defined {
+ ($op:ident, $method:ident($lhs:literal, $rhs:literal), $result:literal, $lt:ty, $rt:ty) => {
+ let lhs = $lhs as $lt;
+ let rhs = $rhs as $rt;
+ assert_eq!($result as $lt, $op::$method(lhs, rhs));
+ assert_eq!($result as $lt, $op::$method(lhs, &rhs));
+ assert_eq!($result as $lt, $op::$method(&lhs, rhs));
+ assert_eq!($result as $lt, $op::$method(&lhs, &rhs));
+ };
+ ($op:ident, $method:ident(&mut $lhs:literal, $rhs:literal), $result:literal, $lt:ty, $rt:ty) => {
+ let rhs = $rhs as $rt;
+ let mut lhs = $lhs as $lt;
+ $op::$method(&mut lhs, rhs);
+ assert_eq!($result as $lt, lhs);
+
+ let mut lhs = $lhs as $lt;
+ $op::$method(&mut lhs, &rhs);
+ assert_eq!($result as $lt, lhs);
+ };
+}
+
+// For all specified types T, checks that a trait implementation exists for
+// * binary ops: T op T, T op &T, &T op T and &T op &T
+// * assign ops: &mut T op T, &mut T op &T
+// * unary ops: op T and op &T
+macro_rules! impls_defined {
+ ($op:ident, $method:ident($lhs:literal, $rhs:literal), $result:literal, $($t:ty),+) => {$(
+ impl_defined!($op, $method($lhs, $rhs), $result, $t, $t);
+ )+};
+ ($op:ident, $method:ident(&mut $lhs:literal, $rhs:literal), $result:literal, $($t:ty),+) => {$(
+ impl_defined!($op, $method(&mut $lhs, $rhs), $result, $t, $t);
+ )+};
+ ($op:ident, $method:ident($operand:literal), $result:literal, $($t:ty),+) => {$(
+ let operand = $operand as $t;
+ assert_eq!($result as $t, $op::$method(operand));
+ assert_eq!($result as $t, $op::$method(&operand));
+ )+};
+}
+
+macro_rules! test_op {
+ ($fn_name:ident, $op:ident::$method:ident($lhs:literal), $result:literal, $($t:ty),+) => {
+ #[test]
+ fn $fn_name() {
+ impls_defined!($op, $method($lhs), $result, $($t),+);
+ }
+ };
+}
+
+test_op!(test_neg_defined, Neg::neg(0), 0, i8, i16, i32, i64, f32, f64);
+#[cfg(not(target_os = "emscripten"))]
+test_op!(test_neg_defined_128, Neg::neg(0), 0, i128);
+
+test_op!(test_not_defined_bool, Not::not(true), false, bool);
+
+macro_rules! test_arith_op {
+ ($fn_name:ident, $op:ident::$method:ident($lhs:literal, $rhs:literal)) => {
+ #[test]
+ fn $fn_name() {
+ impls_defined!(
+ $op,
+ $method($lhs, $rhs),
+ 0,
+ i8,
+ i16,
+ i32,
+ i64,
+ isize,
+ u8,
+ u16,
+ u32,
+ u64,
+ usize,
+ f32,
+ f64
+ );
+ #[cfg(not(target_os = "emscripten"))]
+ impls_defined!($op, $method($lhs, $rhs), 0, i128, u128);
+ }
+ };
+ ($fn_name:ident, $op:ident::$method:ident(&mut $lhs:literal, $rhs:literal)) => {
+ #[test]
+ fn $fn_name() {
+ impls_defined!(
+ $op,
+ $method(&mut $lhs, $rhs),
+ 0,
+ i8,
+ i16,
+ i32,
+ i64,
+ isize,
+ u8,
+ u16,
+ u32,
+ u64,
+ usize,
+ f32,
+ f64
+ );
+ #[cfg(not(target_os = "emscripten"))]
+ impls_defined!($op, $method(&mut $lhs, $rhs), 0, i128, u128);
+ }
+ };
+}
+
+test_arith_op!(test_add_defined, Add::add(0, 0));
+test_arith_op!(test_add_assign_defined, AddAssign::add_assign(&mut 0, 0));
+test_arith_op!(test_sub_defined, Sub::sub(0, 0));
+test_arith_op!(test_sub_assign_defined, SubAssign::sub_assign(&mut 0, 0));
+test_arith_op!(test_mul_defined, Mul::mul(0, 0));
+test_arith_op!(test_mul_assign_defined, MulAssign::mul_assign(&mut 0, 0));
+test_arith_op!(test_div_defined, Div::div(0, 1));
+test_arith_op!(test_div_assign_defined, DivAssign::div_assign(&mut 0, 1));
+test_arith_op!(test_rem_defined, Rem::rem(0, 1));
+test_arith_op!(test_rem_assign_defined, RemAssign::rem_assign(&mut 0, 1));
+
+macro_rules! test_bitop {
+ ($test_name:ident, $op:ident::$method:ident) => {
+ #[test]
+ fn $test_name() {
+ impls_defined!(
+ $op,
+ $method(0, 0),
+ 0,
+ i8,
+ i16,
+ i32,
+ i64,
+ isize,
+ u8,
+ u16,
+ u32,
+ u64,
+ usize
+ );
+ #[cfg(not(target_os = "emscripten"))]
+ impls_defined!($op, $method(0, 0), 0, i128, u128);
+ impls_defined!($op, $method(false, false), false, bool);
+ }
+ };
+}
+macro_rules! test_bitop_assign {
+ ($test_name:ident, $op:ident::$method:ident) => {
+ #[test]
+ fn $test_name() {
+ impls_defined!(
+ $op,
+ $method(&mut 0, 0),
+ 0,
+ i8,
+ i16,
+ i32,
+ i64,
+ isize,
+ u8,
+ u16,
+ u32,
+ u64,
+ usize
+ );
+ #[cfg(not(target_os = "emscripten"))]
+ impls_defined!($op, $method(&mut 0, 0), 0, i128, u128);
+ impls_defined!($op, $method(&mut false, false), false, bool);
+ }
+ };
+}
+
+test_bitop!(test_bitand_defined, BitAnd::bitand);
+test_bitop_assign!(test_bitand_assign_defined, BitAndAssign::bitand_assign);
+test_bitop!(test_bitor_defined, BitOr::bitor);
+test_bitop_assign!(test_bitor_assign_defined, BitOrAssign::bitor_assign);
+test_bitop!(test_bitxor_defined, BitXor::bitxor);
+test_bitop_assign!(test_bitxor_assign_defined, BitXorAssign::bitxor_assign);
+
+macro_rules! test_shift_inner {
+ ($op:ident::$method:ident, $lt:ty, $($rt:ty),+) => {
+ $(impl_defined!($op, $method(0,0), 0, $lt, $rt);)+
+ };
+ ($op:ident::$method:ident, $lt:ty) => {
+ test_shift_inner!($op::$method, $lt, i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
+ #[cfg(not(target_os = "emscripten"))]
+ test_shift_inner!($op::$method, $lt, i128, u128);
+ };
+}
+
+macro_rules! test_shift {
+ ($op:ident::$method:ident, $($lt:ty),+) => {
+ $(test_shift_inner!($op::$method, $lt);)+
+ };
+ ($test_name:ident, $op:ident::$method:ident) => {
+ #[test]
+ fn $test_name() {
+ test_shift!($op::$method, i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
+ #[cfg(not(target_os = "emscripten"))]
+ test_shift!($op::$method, i128, u128);
+ }
+ };
+}
+
+macro_rules! test_shift_assign_inner {
+ ($op:ident::$method:ident, $lt:ty, $($rt:ty),+) => {
+ $(impl_defined!($op, $method(&mut 0,0), 0, $lt, $rt);)+
+ };
+ ($op:ident::$method:ident, $lt:ty) => {
+ test_shift_assign_inner!($op::$method, $lt, i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
+ #[cfg(not(target_os = "emscripten"))]
+ test_shift_assign_inner!($op::$method, $lt, i128, u128);
+ };
+}
+
+macro_rules! test_shift_assign {
+ ($op:ident::$method:ident, $($lt:ty),+) => {
+ $(test_shift_assign_inner!($op::$method, $lt);)+
+ };
+ ($test_name:ident, $op:ident::$method:ident) => {
+ #[test]
+ fn $test_name() {
+ test_shift_assign!($op::$method, i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
+ #[cfg(not(target_os = "emscripten"))]
+ test_shift_assign!($op::$method, i128, u128);
+ }
+ };
+}
+test_shift!(test_shl_defined, Shl::shl);
+test_shift_assign!(test_shl_assign_defined, ShlAssign::shl_assign);
+test_shift!(test_shr_defined, Shr::shr);
+test_shift_assign!(test_shr_assign_defined, ShrAssign::shr_assign);
diff --git a/library/core/tests/num/u128.rs b/library/core/tests/num/u128.rs
new file mode 100644
index 000000000..a7b0f9eff
--- /dev/null
+++ b/library/core/tests/num/u128.rs
@@ -0,0 +1 @@
+uint_module!(u128);
diff --git a/library/core/tests/num/u16.rs b/library/core/tests/num/u16.rs
new file mode 100644
index 000000000..010596a34
--- /dev/null
+++ b/library/core/tests/num/u16.rs
@@ -0,0 +1 @@
+uint_module!(u16);
diff --git a/library/core/tests/num/u32.rs b/library/core/tests/num/u32.rs
new file mode 100644
index 000000000..687d3bbaa
--- /dev/null
+++ b/library/core/tests/num/u32.rs
@@ -0,0 +1 @@
+uint_module!(u32);
diff --git a/library/core/tests/num/u64.rs b/library/core/tests/num/u64.rs
new file mode 100644
index 000000000..ee55071e9
--- /dev/null
+++ b/library/core/tests/num/u64.rs
@@ -0,0 +1 @@
+uint_module!(u64);
diff --git a/library/core/tests/num/u8.rs b/library/core/tests/num/u8.rs
new file mode 100644
index 000000000..12b038ce0
--- /dev/null
+++ b/library/core/tests/num/u8.rs
@@ -0,0 +1 @@
+uint_module!(u8);
diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs
new file mode 100644
index 000000000..93ae620c2
--- /dev/null
+++ b/library/core/tests/num/uint_macros.rs
@@ -0,0 +1,235 @@
+macro_rules! uint_module {
+ ($T:ident) => {
+ #[cfg(test)]
+ mod tests {
+ use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
+ use core::$T::*;
+ use std::str::FromStr;
+
+ use crate::num;
+
+ #[test]
+ fn test_overflows() {
+ assert!(MAX > 0);
+ assert!(MIN <= 0);
+ assert!((MIN + MAX).wrapping_add(1) == 0);
+ }
+
+ #[test]
+ fn test_num() {
+ num::test_num(10 as $T, 2 as $T);
+ }
+
+ #[test]
+ fn test_bitwise_operators() {
+ assert!(0b1110 as $T == (0b1100 as $T).bitor(0b1010 as $T));
+ assert!(0b1000 as $T == (0b1100 as $T).bitand(0b1010 as $T));
+ assert!(0b0110 as $T == (0b1100 as $T).bitxor(0b1010 as $T));
+ assert!(0b1110 as $T == (0b0111 as $T).shl(1));
+ assert!(0b0111 as $T == (0b1110 as $T).shr(1));
+ assert!(MAX - (0b1011 as $T) == (0b1011 as $T).not());
+ }
+
+ const A: $T = 0b0101100;
+ const B: $T = 0b0100001;
+ const C: $T = 0b1111001;
+
+ const _0: $T = 0;
+ const _1: $T = !0;
+
+ #[test]
+ fn test_count_ones() {
+ assert!(A.count_ones() == 3);
+ assert!(B.count_ones() == 2);
+ assert!(C.count_ones() == 5);
+ }
+
+ #[test]
+ fn test_count_zeros() {
+ assert!(A.count_zeros() == $T::BITS - 3);
+ assert!(B.count_zeros() == $T::BITS - 2);
+ assert!(C.count_zeros() == $T::BITS - 5);
+ }
+
+ #[test]
+ fn test_leading_trailing_ones() {
+ let a: $T = 0b0101_1111;
+ assert_eq!(a.trailing_ones(), 5);
+ assert_eq!((!a).leading_ones(), $T::BITS - 7);
+
+ assert_eq!(a.reverse_bits().leading_ones(), 5);
+
+ assert_eq!(_1.leading_ones(), $T::BITS);
+ assert_eq!(_1.trailing_ones(), $T::BITS);
+
+ assert_eq!((_1 << 1).trailing_ones(), 0);
+ assert_eq!((_1 >> 1).leading_ones(), 0);
+
+ assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
+ assert_eq!((_1 >> 1).trailing_ones(), $T::BITS - 1);
+
+ assert_eq!(_0.leading_ones(), 0);
+ assert_eq!(_0.trailing_ones(), 0);
+
+ let x: $T = 0b0010_1100;
+ assert_eq!(x.leading_ones(), 0);
+ assert_eq!(x.trailing_ones(), 0);
+ }
+
+ #[test]
+ fn test_rotate() {
+ assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
+ assert_eq!(B.rotate_left(3).rotate_left(2).rotate_right(5), B);
+ assert_eq!(C.rotate_left(6).rotate_right(2).rotate_right(4), C);
+
+ // Rotating these should make no difference
+ //
+ // We test using 124 bits because to ensure that overlong bit shifts do
+ // not cause undefined behaviour. See #10183.
+ assert_eq!(_0.rotate_left(124), _0);
+ assert_eq!(_1.rotate_left(124), _1);
+ assert_eq!(_0.rotate_right(124), _0);
+ assert_eq!(_1.rotate_right(124), _1);
+
+ // Rotating by 0 should have no effect
+ assert_eq!(A.rotate_left(0), A);
+ assert_eq!(B.rotate_left(0), B);
+ assert_eq!(C.rotate_left(0), C);
+ // Rotating by a multiple of word size should also have no effect
+ assert_eq!(A.rotate_left(128), A);
+ assert_eq!(B.rotate_left(128), B);
+ assert_eq!(C.rotate_left(128), C);
+ }
+
+ #[test]
+ fn test_swap_bytes() {
+ assert_eq!(A.swap_bytes().swap_bytes(), A);
+ assert_eq!(B.swap_bytes().swap_bytes(), B);
+ assert_eq!(C.swap_bytes().swap_bytes(), C);
+
+ // Swapping these should make no difference
+ assert_eq!(_0.swap_bytes(), _0);
+ assert_eq!(_1.swap_bytes(), _1);
+ }
+
+ #[test]
+ fn test_reverse_bits() {
+ assert_eq!(A.reverse_bits().reverse_bits(), A);
+ assert_eq!(B.reverse_bits().reverse_bits(), B);
+ assert_eq!(C.reverse_bits().reverse_bits(), C);
+
+ // Swapping these should make no difference
+ assert_eq!(_0.reverse_bits(), _0);
+ assert_eq!(_1.reverse_bits(), _1);
+ }
+
+ #[test]
+ fn test_le() {
+ assert_eq!($T::from_le(A.to_le()), A);
+ assert_eq!($T::from_le(B.to_le()), B);
+ assert_eq!($T::from_le(C.to_le()), C);
+ assert_eq!($T::from_le(_0), _0);
+ assert_eq!($T::from_le(_1), _1);
+ assert_eq!(_0.to_le(), _0);
+ assert_eq!(_1.to_le(), _1);
+ }
+
+ #[test]
+ fn test_be() {
+ assert_eq!($T::from_be(A.to_be()), A);
+ assert_eq!($T::from_be(B.to_be()), B);
+ assert_eq!($T::from_be(C.to_be()), C);
+ assert_eq!($T::from_be(_0), _0);
+ assert_eq!($T::from_be(_1), _1);
+ assert_eq!(_0.to_be(), _0);
+ assert_eq!(_1.to_be(), _1);
+ }
+
+ #[test]
+ fn test_unsigned_checked_div() {
+ assert!((10 as $T).checked_div(2) == Some(5));
+ assert!((5 as $T).checked_div(0) == None);
+ }
+
+ fn from_str<T: FromStr>(t: &str) -> Option<T> {
+ FromStr::from_str(t).ok()
+ }
+
+ #[test]
+ pub fn test_from_str() {
+ assert_eq!(from_str::<$T>("0"), Some(0 as $T));
+ assert_eq!(from_str::<$T>("3"), Some(3 as $T));
+ assert_eq!(from_str::<$T>("10"), Some(10 as $T));
+ assert_eq!(from_str::<u32>("123456789"), Some(123456789 as u32));
+ assert_eq!(from_str::<$T>("00100"), Some(100 as $T));
+
+ assert_eq!(from_str::<$T>(""), None);
+ assert_eq!(from_str::<$T>(" "), None);
+ assert_eq!(from_str::<$T>("x"), None);
+ }
+
+ #[test]
+ pub fn test_parse_bytes() {
+ assert_eq!($T::from_str_radix("123", 10), Ok(123 as $T));
+ assert_eq!($T::from_str_radix("1001", 2), Ok(9 as $T));
+ assert_eq!($T::from_str_radix("123", 8), Ok(83 as $T));
+ assert_eq!(u16::from_str_radix("123", 16), Ok(291 as u16));
+ assert_eq!(u16::from_str_radix("ffff", 16), Ok(65535 as u16));
+ assert_eq!($T::from_str_radix("z", 36), Ok(35 as $T));
+
+ assert_eq!($T::from_str_radix("Z", 10).ok(), None::<$T>);
+ assert_eq!($T::from_str_radix("_", 2).ok(), None::<$T>);
+ }
+
+ #[test]
+ fn test_pow() {
+ let mut r = 2 as $T;
+ assert_eq!(r.pow(2), 4 as $T);
+ assert_eq!(r.pow(0), 1 as $T);
+ assert_eq!(r.wrapping_pow(2), 4 as $T);
+ assert_eq!(r.wrapping_pow(0), 1 as $T);
+ assert_eq!(r.checked_pow(2), Some(4 as $T));
+ assert_eq!(r.checked_pow(0), Some(1 as $T));
+ assert_eq!(r.overflowing_pow(2), (4 as $T, false));
+ assert_eq!(r.overflowing_pow(0), (1 as $T, false));
+ assert_eq!(r.saturating_pow(2), 4 as $T);
+ assert_eq!(r.saturating_pow(0), 1 as $T);
+
+ r = MAX;
+ // use `^` to represent .pow() with no overflow.
+ // if itest::MAX == 2^j-1, then itest is a `j` bit int,
+ // so that `itest::MAX*itest::MAX == 2^(2*j)-2^(j+1)+1`,
+ // thussaturating_pow the overflowing result is exactly 1.
+ assert_eq!(r.wrapping_pow(2), 1 as $T);
+ assert_eq!(r.checked_pow(2), None);
+ assert_eq!(r.overflowing_pow(2), (1 as $T, true));
+ assert_eq!(r.saturating_pow(2), MAX);
+ }
+
+ #[test]
+ fn test_div_floor() {
+ assert_eq!((8 as $T).div_floor(3), 2);
+ }
+
+ #[test]
+ fn test_div_ceil() {
+ assert_eq!((8 as $T).div_ceil(3), 3);
+ }
+
+ #[test]
+ fn test_next_multiple_of() {
+ assert_eq!((16 as $T).next_multiple_of(8), 16);
+ assert_eq!((23 as $T).next_multiple_of(8), 24);
+ assert_eq!(MAX.next_multiple_of(1), MAX);
+ }
+
+ #[test]
+ fn test_checked_next_multiple_of() {
+ assert_eq!((16 as $T).checked_next_multiple_of(8), Some(16));
+ assert_eq!((23 as $T).checked_next_multiple_of(8), Some(24));
+ assert_eq!((1 as $T).checked_next_multiple_of(0), None);
+ assert_eq!(MAX.checked_next_multiple_of(2), None);
+ }
+ }
+ };
+}
diff --git a/library/core/tests/num/wrapping.rs b/library/core/tests/num/wrapping.rs
new file mode 100644
index 000000000..8ded139a1
--- /dev/null
+++ b/library/core/tests/num/wrapping.rs
@@ -0,0 +1,320 @@
+use core::num::Wrapping;
+
+macro_rules! wrapping_operation {
+ ($result:expr, $lhs:ident $op:tt $rhs:expr) => {
+ assert_eq!($result, $lhs $op $rhs);
+ assert_eq!($result, &$lhs $op $rhs);
+ assert_eq!($result, $lhs $op &$rhs);
+ assert_eq!($result, &$lhs $op &$rhs);
+ };
+ ($result:expr, $op:tt $expr:expr) => {
+ assert_eq!($result, $op $expr);
+ assert_eq!($result, $op &$expr);
+ };
+}
+
+macro_rules! wrapping_assignment {
+ ($result:expr, $lhs:ident $op:tt $rhs:expr) => {
+ let mut lhs1 = $lhs;
+ lhs1 $op $rhs;
+ assert_eq!($result, lhs1);
+
+ let mut lhs2 = $lhs;
+ lhs2 $op &$rhs;
+ assert_eq!($result, lhs2);
+ };
+}
+
+macro_rules! wrapping_test {
+ ($fn_name:ident, $type:ty, $min:expr, $max:expr) => {
+ #[test]
+ fn $fn_name() {
+ let zero: Wrapping<$type> = Wrapping(0);
+ let one: Wrapping<$type> = Wrapping(1);
+ let min: Wrapping<$type> = Wrapping($min);
+ let max: Wrapping<$type> = Wrapping($max);
+
+ wrapping_operation!(min, max + one);
+ wrapping_assignment!(min, max += one);
+ wrapping_operation!(max, min - one);
+ wrapping_assignment!(max, min -= one);
+ wrapping_operation!(max, max * one);
+ wrapping_assignment!(max, max *= one);
+ wrapping_operation!(max, max / one);
+ wrapping_assignment!(max, max /= one);
+ wrapping_operation!(zero, max % one);
+ wrapping_assignment!(zero, max %= one);
+ wrapping_operation!(zero, zero & max);
+ wrapping_assignment!(zero, zero &= max);
+ wrapping_operation!(max, zero | max);
+ wrapping_assignment!(max, zero |= max);
+ wrapping_operation!(zero, max ^ max);
+ wrapping_assignment!(zero, max ^= max);
+ wrapping_operation!(zero, zero << 1usize);
+ wrapping_assignment!(zero, zero <<= 1usize);
+ wrapping_operation!(zero, zero >> 1usize);
+ wrapping_assignment!(zero, zero >>= 1usize);
+ wrapping_operation!(zero, -zero);
+ wrapping_operation!(max, !min);
+ }
+ };
+}
+
+wrapping_test!(test_wrapping_i8, i8, i8::MIN, i8::MAX);
+wrapping_test!(test_wrapping_i16, i16, i16::MIN, i16::MAX);
+wrapping_test!(test_wrapping_i32, i32, i32::MIN, i32::MAX);
+wrapping_test!(test_wrapping_i64, i64, i64::MIN, i64::MAX);
+#[cfg(not(target_os = "emscripten"))]
+wrapping_test!(test_wrapping_i128, i128, i128::MIN, i128::MAX);
+wrapping_test!(test_wrapping_isize, isize, isize::MIN, isize::MAX);
+wrapping_test!(test_wrapping_u8, u8, u8::MIN, u8::MAX);
+wrapping_test!(test_wrapping_u16, u16, u16::MIN, u16::MAX);
+wrapping_test!(test_wrapping_u32, u32, u32::MIN, u32::MAX);
+wrapping_test!(test_wrapping_u64, u64, u64::MIN, u64::MAX);
+#[cfg(not(target_os = "emscripten"))]
+wrapping_test!(test_wrapping_u128, u128, u128::MIN, u128::MAX);
+wrapping_test!(test_wrapping_usize, usize, usize::MIN, usize::MAX);
+
+// Don't warn about overflowing ops on 32-bit platforms
+#[cfg_attr(target_pointer_width = "32", allow(const_err))]
+#[test]
+fn wrapping_int_api() {
+ assert_eq!(i8::MAX.wrapping_add(1), i8::MIN);
+ assert_eq!(i16::MAX.wrapping_add(1), i16::MIN);
+ assert_eq!(i32::MAX.wrapping_add(1), i32::MIN);
+ assert_eq!(i64::MAX.wrapping_add(1), i64::MIN);
+ assert_eq!(isize::MAX.wrapping_add(1), isize::MIN);
+
+ assert_eq!(i8::MIN.wrapping_sub(1), i8::MAX);
+ assert_eq!(i16::MIN.wrapping_sub(1), i16::MAX);
+ assert_eq!(i32::MIN.wrapping_sub(1), i32::MAX);
+ assert_eq!(i64::MIN.wrapping_sub(1), i64::MAX);
+ assert_eq!(isize::MIN.wrapping_sub(1), isize::MAX);
+
+ assert_eq!(u8::MAX.wrapping_add(1), u8::MIN);
+ assert_eq!(u16::MAX.wrapping_add(1), u16::MIN);
+ assert_eq!(u32::MAX.wrapping_add(1), u32::MIN);
+ assert_eq!(u64::MAX.wrapping_add(1), u64::MIN);
+ assert_eq!(usize::MAX.wrapping_add(1), usize::MIN);
+
+ assert_eq!(u8::MIN.wrapping_sub(1), u8::MAX);
+ assert_eq!(u16::MIN.wrapping_sub(1), u16::MAX);
+ assert_eq!(u32::MIN.wrapping_sub(1), u32::MAX);
+ assert_eq!(u64::MIN.wrapping_sub(1), u64::MAX);
+ assert_eq!(usize::MIN.wrapping_sub(1), usize::MAX);
+
+ assert_eq!((0xfe_u8 as i8).wrapping_mul(16), (0xe0_u8 as i8));
+ assert_eq!((0xfedc_u16 as i16).wrapping_mul(16), (0xedc0_u16 as i16));
+ assert_eq!((0xfedc_ba98_u32 as i32).wrapping_mul(16), (0xedcb_a980_u32 as i32));
+ assert_eq!(
+ (0xfedc_ba98_7654_3217_u64 as i64).wrapping_mul(16),
+ (0xedcb_a987_6543_2170_u64 as i64)
+ );
+
+ match () {
+ #[cfg(target_pointer_width = "32")]
+ () => {
+ assert_eq!((0xfedc_ba98_u32 as isize).wrapping_mul(16), (0xedcb_a980_u32 as isize));
+ }
+ #[cfg(target_pointer_width = "64")]
+ () => {
+ assert_eq!(
+ (0xfedc_ba98_7654_3217_u64 as isize).wrapping_mul(16),
+ (0xedcb_a987_6543_2170_u64 as isize)
+ );
+ }
+ }
+
+ assert_eq!((0xfe as u8).wrapping_mul(16), (0xe0 as u8));
+ assert_eq!((0xfedc as u16).wrapping_mul(16), (0xedc0 as u16));
+ assert_eq!((0xfedc_ba98 as u32).wrapping_mul(16), (0xedcb_a980 as u32));
+ assert_eq!((0xfedc_ba98_7654_3217 as u64).wrapping_mul(16), (0xedcb_a987_6543_2170 as u64));
+
+ match () {
+ #[cfg(target_pointer_width = "32")]
+ () => {
+ assert_eq!((0xfedc_ba98 as usize).wrapping_mul(16), (0xedcb_a980 as usize));
+ }
+ #[cfg(target_pointer_width = "64")]
+ () => {
+ assert_eq!(
+ (0xfedc_ba98_7654_3217 as usize).wrapping_mul(16),
+ (0xedcb_a987_6543_2170 as usize)
+ );
+ }
+ }
+
+ macro_rules! check_mul_no_wrap {
+ ($e:expr, $f:expr) => {
+ assert_eq!(($e).wrapping_mul($f), ($e) * $f);
+ };
+ }
+ macro_rules! check_mul_wraps {
+ ($e:expr, $f:expr) => {
+ assert_eq!(($e).wrapping_mul($f), $e);
+ };
+ }
+
+ check_mul_no_wrap!(0xfe_u8 as i8, -1);
+ check_mul_no_wrap!(0xfedc_u16 as i16, -1);
+ check_mul_no_wrap!(0xfedc_ba98_u32 as i32, -1);
+ check_mul_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -1);
+ check_mul_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -1);
+
+ check_mul_no_wrap!(0xfe_u8 as i8, -2);
+ check_mul_no_wrap!(0xfedc_u16 as i16, -2);
+ check_mul_no_wrap!(0xfedc_ba98_u32 as i32, -2);
+ check_mul_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -2);
+ check_mul_no_wrap!(0xfedc_ba98_fedc_ba98_u64 as u64 as isize, -2);
+
+ check_mul_no_wrap!(0xfe_u8 as i8, 2);
+ check_mul_no_wrap!(0xfedc_u16 as i16, 2);
+ check_mul_no_wrap!(0xfedc_ba98_u32 as i32, 2);
+ check_mul_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, 2);
+ check_mul_no_wrap!(0xfedc_ba98_fedc_ba98_u64 as u64 as isize, 2);
+
+ check_mul_wraps!(0x80_u8 as i8, -1);
+ check_mul_wraps!(0x8000_u16 as i16, -1);
+ check_mul_wraps!(0x8000_0000_u32 as i32, -1);
+ check_mul_wraps!(0x8000_0000_0000_0000_u64 as i64, -1);
+ match () {
+ #[cfg(target_pointer_width = "32")]
+ () => {
+ check_mul_wraps!(0x8000_0000_u32 as isize, -1);
+ }
+ #[cfg(target_pointer_width = "64")]
+ () => {
+ check_mul_wraps!(0x8000_0000_0000_0000_u64 as isize, -1);
+ }
+ }
+
+ macro_rules! check_div_no_wrap {
+ ($e:expr, $f:expr) => {
+ assert_eq!(($e).wrapping_div($f), ($e) / $f);
+ };
+ }
+ macro_rules! check_div_wraps {
+ ($e:expr, $f:expr) => {
+ assert_eq!(($e).wrapping_div($f), $e);
+ };
+ }
+
+ check_div_no_wrap!(0xfe_u8 as i8, -1);
+ check_div_no_wrap!(0xfedc_u16 as i16, -1);
+ check_div_no_wrap!(0xfedc_ba98_u32 as i32, -1);
+ check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -1);
+ check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -1);
+
+ check_div_no_wrap!(0xfe_u8 as i8, -2);
+ check_div_no_wrap!(0xfedc_u16 as i16, -2);
+ check_div_no_wrap!(0xfedc_ba98_u32 as i32, -2);
+ check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -2);
+ check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -2);
+
+ check_div_no_wrap!(0xfe_u8 as i8, 2);
+ check_div_no_wrap!(0xfedc_u16 as i16, 2);
+ check_div_no_wrap!(0xfedc_ba98_u32 as i32, 2);
+ check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, 2);
+ check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, 2);
+
+ check_div_wraps!(-128 as i8, -1);
+ check_div_wraps!(0x8000_u16 as i16, -1);
+ check_div_wraps!(0x8000_0000_u32 as i32, -1);
+ check_div_wraps!(0x8000_0000_0000_0000_u64 as i64, -1);
+ match () {
+ #[cfg(target_pointer_width = "32")]
+ () => {
+ check_div_wraps!(0x8000_0000_u32 as isize, -1);
+ }
+ #[cfg(target_pointer_width = "64")]
+ () => {
+ check_div_wraps!(0x8000_0000_0000_0000_u64 as isize, -1);
+ }
+ }
+
+ macro_rules! check_rem_no_wrap {
+ ($e:expr, $f:expr) => {
+ assert_eq!(($e).wrapping_rem($f), ($e) % $f);
+ };
+ }
+ macro_rules! check_rem_wraps {
+ ($e:expr, $f:expr) => {
+ assert_eq!(($e).wrapping_rem($f), 0);
+ };
+ }
+
+ check_rem_no_wrap!(0xfe_u8 as i8, -1);
+ check_rem_no_wrap!(0xfedc_u16 as i16, -1);
+ check_rem_no_wrap!(0xfedc_ba98_u32 as i32, -1);
+ check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -1);
+ check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -1);
+
+ check_rem_no_wrap!(0xfe_u8 as i8, -2);
+ check_rem_no_wrap!(0xfedc_u16 as i16, -2);
+ check_rem_no_wrap!(0xfedc_ba98_u32 as i32, -2);
+ check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -2);
+ check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -2);
+
+ check_rem_no_wrap!(0xfe_u8 as i8, 2);
+ check_rem_no_wrap!(0xfedc_u16 as i16, 2);
+ check_rem_no_wrap!(0xfedc_ba98_u32 as i32, 2);
+ check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, 2);
+ check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, 2);
+
+ check_rem_wraps!(0x80_u8 as i8, -1);
+ check_rem_wraps!(0x8000_u16 as i16, -1);
+ check_rem_wraps!(0x8000_0000_u32 as i32, -1);
+ check_rem_wraps!(0x8000_0000_0000_0000_u64 as i64, -1);
+ match () {
+ #[cfg(target_pointer_width = "32")]
+ () => {
+ check_rem_wraps!(0x8000_0000_u32 as isize, -1);
+ }
+ #[cfg(target_pointer_width = "64")]
+ () => {
+ check_rem_wraps!(0x8000_0000_0000_0000_u64 as isize, -1);
+ }
+ }
+
+ macro_rules! check_neg_no_wrap {
+ ($e:expr) => {
+ assert_eq!(($e).wrapping_neg(), -($e));
+ };
+ }
+ macro_rules! check_neg_wraps {
+ ($e:expr) => {
+ assert_eq!(($e).wrapping_neg(), ($e));
+ };
+ }
+
+ check_neg_no_wrap!(0xfe_u8 as i8);
+ check_neg_no_wrap!(0xfedc_u16 as i16);
+ check_neg_no_wrap!(0xfedc_ba98_u32 as i32);
+ check_neg_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64);
+ check_neg_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize);
+
+ check_neg_wraps!(0x80_u8 as i8);
+ check_neg_wraps!(0x8000_u16 as i16);
+ check_neg_wraps!(0x8000_0000_u32 as i32);
+ check_neg_wraps!(0x8000_0000_0000_0000_u64 as i64);
+ match () {
+ #[cfg(target_pointer_width = "32")]
+ () => {
+ check_neg_wraps!(0x8000_0000_u32 as isize);
+ }
+ #[cfg(target_pointer_width = "64")]
+ () => {
+ check_neg_wraps!(0x8000_0000_0000_0000_u64 as isize);
+ }
+ }
+}
+
+#[test]
+fn wrapping_const() {
+ // Specifically the wrapping behavior of division and remainder is subtle,
+ // see https://github.com/rust-lang/rust/pull/94512.
+ const _: () = {
+ assert!(i32::MIN.wrapping_div(-1) == i32::MIN);
+ assert!(i32::MIN.wrapping_rem(-1) == 0);
+ };
+}
diff --git a/library/core/tests/ops.rs b/library/core/tests/ops.rs
new file mode 100644
index 000000000..0c81cba35
--- /dev/null
+++ b/library/core/tests/ops.rs
@@ -0,0 +1,240 @@
+mod control_flow;
+
+use core::ops::{Bound, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive};
+use core::ops::{Deref, DerefMut};
+
+// Test the Range structs and syntax.
+
+#[test]
+fn test_range() {
+ let r = Range { start: 2, end: 10 };
+ let mut count = 0;
+ for (i, ri) in r.enumerate() {
+ assert_eq!(ri, i + 2);
+ assert!(ri >= 2 && ri < 10);
+ count += 1;
+ }
+ assert_eq!(count, 8);
+}
+
+#[test]
+fn test_range_from() {
+ let r = RangeFrom { start: 2 };
+ let mut count = 0;
+ for (i, ri) in r.take(10).enumerate() {
+ assert_eq!(ri, i + 2);
+ assert!(ri >= 2 && ri < 12);
+ count += 1;
+ }
+ assert_eq!(count, 10);
+}
+
+#[test]
+fn test_range_to() {
+ // Not much to test.
+ let _ = RangeTo { end: 42 };
+}
+
+#[test]
+fn test_full_range() {
+ // Not much to test.
+ let _ = RangeFull;
+}
+
+#[test]
+fn test_range_inclusive() {
+ let mut r = RangeInclusive::new(1i8, 2);
+ assert_eq!(r.next(), Some(1));
+ assert_eq!(r.next(), Some(2));
+ assert_eq!(r.next(), None);
+
+ r = RangeInclusive::new(127i8, 127);
+ assert_eq!(r.next(), Some(127));
+ assert_eq!(r.next(), None);
+
+ r = RangeInclusive::new(-128i8, -128);
+ assert_eq!(r.next_back(), Some(-128));
+ assert_eq!(r.next_back(), None);
+
+ // degenerate
+ r = RangeInclusive::new(1, -1);
+ assert_eq!(r.size_hint(), (0, Some(0)));
+ assert_eq!(r.next(), None);
+}
+
+#[test]
+fn test_range_to_inclusive() {
+ // Not much to test.
+ let _ = RangeToInclusive { end: 42 };
+}
+
+#[test]
+fn test_range_is_empty() {
+ assert!(!(0.0..10.0).is_empty());
+ assert!((-0.0..0.0).is_empty());
+ assert!((10.0..0.0).is_empty());
+
+ assert!(!(f32::NEG_INFINITY..f32::INFINITY).is_empty());
+ assert!((f32::EPSILON..f32::NAN).is_empty());
+ assert!((f32::NAN..f32::EPSILON).is_empty());
+ assert!((f32::NAN..f32::NAN).is_empty());
+
+ assert!(!(0.0..=10.0).is_empty());
+ assert!(!(-0.0..=0.0).is_empty());
+ assert!((10.0..=0.0).is_empty());
+
+ assert!(!(f32::NEG_INFINITY..=f32::INFINITY).is_empty());
+ assert!((f32::EPSILON..=f32::NAN).is_empty());
+ assert!((f32::NAN..=f32::EPSILON).is_empty());
+ assert!((f32::NAN..=f32::NAN).is_empty());
+}
+
+#[test]
+fn test_bound_cloned_unbounded() {
+ assert_eq!(Bound::<&u32>::Unbounded.cloned(), Bound::Unbounded);
+}
+
+#[test]
+fn test_bound_cloned_included() {
+ assert_eq!(Bound::Included(&3).cloned(), Bound::Included(3));
+}
+
+#[test]
+fn test_bound_cloned_excluded() {
+ assert_eq!(Bound::Excluded(&3).cloned(), Bound::Excluded(3));
+}
+
+#[test]
+#[allow(unused_comparisons)]
+#[allow(unused_mut)]
+fn test_range_syntax() {
+ let mut count = 0;
+ for i in 0_usize..10 {
+ assert!(i >= 0 && i < 10);
+ count += i;
+ }
+ assert_eq!(count, 45);
+
+ let mut count = 0;
+ let mut range = 0_usize..10;
+ for i in range {
+ assert!(i >= 0 && i < 10);
+ count += i;
+ }
+ assert_eq!(count, 45);
+
+ let mut count = 0;
+ let mut rf = 3_usize..;
+ for i in rf.take(10) {
+ assert!(i >= 3 && i < 13);
+ count += i;
+ }
+ assert_eq!(count, 75);
+
+ let _ = 0_usize..4 + 4 - 3;
+
+ fn foo() -> isize {
+ 42
+ }
+ let _ = 0..foo();
+
+ let _ = { &42..&100 }; // references to literals are OK
+ let _ = ..42_usize;
+
+ // Test we can use two different types with a common supertype.
+ let x = &42;
+ {
+ let y = 42;
+ let _ = x..&y;
+ }
+}
+
+#[test]
+#[allow(dead_code)]
+fn test_range_syntax_in_return_statement() {
+ fn return_range_to() -> RangeTo<i32> {
+ return ..1;
+ }
+ fn return_full_range() -> RangeFull {
+ return ..;
+ }
+ // Not much to test.
+}
+
+#[test]
+fn range_structural_match() {
+ // test that all range types can be structurally matched upon
+
+ const RANGE: Range<usize> = 0..1000;
+ match RANGE {
+ RANGE => {}
+ _ => unreachable!(),
+ }
+
+ const RANGE_FROM: RangeFrom<usize> = 0..;
+ match RANGE_FROM {
+ RANGE_FROM => {}
+ _ => unreachable!(),
+ }
+
+ const RANGE_FULL: RangeFull = ..;
+ match RANGE_FULL {
+ RANGE_FULL => {}
+ }
+
+ const RANGE_INCLUSIVE: RangeInclusive<usize> = 0..=999;
+ match RANGE_INCLUSIVE {
+ RANGE_INCLUSIVE => {}
+ _ => unreachable!(),
+ }
+
+ const RANGE_TO: RangeTo<usize> = ..1000;
+ match RANGE_TO {
+ RANGE_TO => {}
+ _ => unreachable!(),
+ }
+
+ const RANGE_TO_INCLUSIVE: RangeToInclusive<usize> = ..=999;
+ match RANGE_TO_INCLUSIVE {
+ RANGE_TO_INCLUSIVE => {}
+ _ => unreachable!(),
+ }
+}
+
+// Test Deref implementations
+
+#[test]
+fn deref_mut_on_ref() {
+ // Test that `&mut T` implements `DerefMut<T>`
+
+ fn inc<T: Deref<Target = isize> + DerefMut>(mut t: T) {
+ *t += 1;
+ }
+
+ let mut x: isize = 5;
+ inc(&mut x);
+ assert_eq!(x, 6);
+}
+
+#[test]
+fn deref_on_ref() {
+ // Test that `&T` and `&mut T` implement `Deref<T>`
+
+ fn deref<U: Copy, T: Deref<Target = U>>(t: T) -> U {
+ *t
+ }
+
+ let x: isize = 3;
+ let y = deref(&x);
+ assert_eq!(y, 3);
+
+ let mut x: isize = 4;
+ let y = deref(&mut x);
+ assert_eq!(y, 4);
+}
+
+#[test]
+#[allow(unreachable_code)]
+fn test_not_never() {
+ if !return () {}
+}
diff --git a/library/core/tests/ops/control_flow.rs b/library/core/tests/ops/control_flow.rs
new file mode 100644
index 000000000..eacfd63a6
--- /dev/null
+++ b/library/core/tests/ops/control_flow.rs
@@ -0,0 +1,18 @@
+use core::intrinsics::discriminant_value;
+use core::ops::ControlFlow;
+
+#[test]
+fn control_flow_discriminants_match_result() {
+ // This isn't stable surface area, but helps keep `?` cheap between them,
+ // even if LLVM can't always take advantage of it right now.
+ // (Sadly Result and Option are inconsistent, so ControlFlow can't match both.)
+
+ assert_eq!(
+ discriminant_value(&ControlFlow::<i32, i32>::Break(3)),
+ discriminant_value(&Result::<i32, i32>::Err(3)),
+ );
+ assert_eq!(
+ discriminant_value(&ControlFlow::<i32, i32>::Continue(3)),
+ discriminant_value(&Result::<i32, i32>::Ok(3)),
+ );
+}
diff --git a/library/core/tests/option.rs b/library/core/tests/option.rs
new file mode 100644
index 000000000..9f5e537dc
--- /dev/null
+++ b/library/core/tests/option.rs
@@ -0,0 +1,555 @@
+use core::cell::Cell;
+use core::clone::Clone;
+use core::mem;
+use core::ops::DerefMut;
+use core::option::*;
+
+#[test]
+fn test_get_ptr() {
+ unsafe {
+ let x: Box<_> = Box::new(0);
+ let addr_x: *const isize = mem::transmute(&*x);
+ let opt = Some(x);
+ let y = opt.unwrap();
+ let addr_y: *const isize = mem::transmute(&*y);
+ assert_eq!(addr_x, addr_y);
+ }
+}
+
+#[test]
+fn test_get_str() {
+ let x = "test".to_string();
+ let addr_x = x.as_ptr();
+ let opt = Some(x);
+ let y = opt.unwrap();
+ let addr_y = y.as_ptr();
+ assert_eq!(addr_x, addr_y);
+}
+
+#[test]
+fn test_get_resource() {
+ use core::cell::RefCell;
+ use std::rc::Rc;
+
+ struct R {
+ i: Rc<RefCell<isize>>,
+ }
+
+ impl Drop for R {
+ fn drop(&mut self) {
+ let ii = &*self.i;
+ let i = *ii.borrow();
+ *ii.borrow_mut() = i + 1;
+ }
+ }
+
+ fn r(i: Rc<RefCell<isize>>) -> R {
+ R { i }
+ }
+
+ let i = Rc::new(RefCell::new(0));
+ {
+ let x = r(i.clone());
+ let opt = Some(x);
+ let _y = opt.unwrap();
+ }
+ assert_eq!(*i.borrow(), 1);
+}
+
+#[test]
+fn test_option_dance() {
+ let x = Some(());
+ let mut y = Some(5);
+ let mut y2 = 0;
+ for _x in x {
+ y2 = y.take().unwrap();
+ }
+ assert_eq!(y2, 5);
+ assert!(y.is_none());
+}
+
+#[test]
+#[should_panic]
+fn test_option_too_much_dance() {
+ struct A;
+ let mut y = Some(A);
+ let _y2 = y.take().unwrap();
+ let _y3 = y.take().unwrap();
+}
+
+#[test]
+fn test_and() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.and(Some(2)), Some(2));
+ assert_eq!(x.and(None::<isize>), None);
+
+ let x: Option<isize> = None;
+ assert_eq!(x.and(Some(2)), None);
+ assert_eq!(x.and(None::<isize>), None);
+
+ const FOO: Option<isize> = Some(1);
+ const A: Option<isize> = FOO.and(Some(2));
+ const B: Option<isize> = FOO.and(None);
+ assert_eq!(A, Some(2));
+ assert_eq!(B, None);
+
+ const BAR: Option<isize> = None;
+ const C: Option<isize> = BAR.and(Some(2));
+ const D: Option<isize> = BAR.and(None);
+ assert_eq!(C, None);
+ assert_eq!(D, None);
+}
+
+#[test]
+fn test_and_then() {
+ const fn plus_one(x: isize) -> Option<isize> {
+ Some(x + 1)
+ }
+
+ const fn none(_: isize) -> Option<isize> {
+ None
+ }
+
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.and_then(plus_one), Some(2));
+ assert_eq!(x.and_then(none), None);
+
+ let x: Option<isize> = None;
+ assert_eq!(x.and_then(plus_one), None);
+ assert_eq!(x.and_then(none), None);
+
+ const FOO: Option<isize> = Some(1);
+ const A: Option<isize> = FOO.and_then(plus_one);
+ const B: Option<isize> = FOO.and_then(none);
+ assert_eq!(A, Some(2));
+ assert_eq!(B, None);
+
+ const BAR: Option<isize> = None;
+ const C: Option<isize> = BAR.and_then(plus_one);
+ const D: Option<isize> = BAR.and_then(none);
+ assert_eq!(C, None);
+ assert_eq!(D, None);
+}
+
+#[test]
+fn test_or() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.or(Some(2)), Some(1));
+ assert_eq!(x.or(None), Some(1));
+
+ let x: Option<isize> = None;
+ assert_eq!(x.or(Some(2)), Some(2));
+ assert_eq!(x.or(None), None);
+
+ const FOO: Option<isize> = Some(1);
+ const A: Option<isize> = FOO.or(Some(2));
+ const B: Option<isize> = FOO.or(None);
+ assert_eq!(A, Some(1));
+ assert_eq!(B, Some(1));
+
+ const BAR: Option<isize> = None;
+ const C: Option<isize> = BAR.or(Some(2));
+ const D: Option<isize> = BAR.or(None);
+ assert_eq!(C, Some(2));
+ assert_eq!(D, None);
+}
+
+#[test]
+fn test_or_else() {
+ const fn two() -> Option<isize> {
+ Some(2)
+ }
+
+ const fn none() -> Option<isize> {
+ None
+ }
+
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.or_else(two), Some(1));
+ assert_eq!(x.or_else(none), Some(1));
+
+ let x: Option<isize> = None;
+ assert_eq!(x.or_else(two), Some(2));
+ assert_eq!(x.or_else(none), None);
+
+ const FOO: Option<isize> = Some(1);
+ const A: Option<isize> = FOO.or_else(two);
+ const B: Option<isize> = FOO.or_else(none);
+ assert_eq!(A, Some(1));
+ assert_eq!(B, Some(1));
+
+ const BAR: Option<isize> = None;
+ const C: Option<isize> = BAR.or_else(two);
+ const D: Option<isize> = BAR.or_else(none);
+ assert_eq!(C, Some(2));
+ assert_eq!(D, None);
+}
+
+#[test]
+fn test_unwrap() {
+ assert_eq!(Some(1).unwrap(), 1);
+ let s = Some("hello".to_string()).unwrap();
+ assert_eq!(s, "hello");
+}
+
+#[test]
+#[should_panic]
+fn test_unwrap_panic1() {
+ let x: Option<isize> = None;
+ x.unwrap();
+}
+
+#[test]
+#[should_panic]
+fn test_unwrap_panic2() {
+ let x: Option<String> = None;
+ x.unwrap();
+}
+
+#[test]
+fn test_unwrap_or() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.unwrap_or(2), 1);
+
+ let x: Option<isize> = None;
+ assert_eq!(x.unwrap_or(2), 2);
+
+ const A: isize = Some(1).unwrap_or(2);
+ const B: isize = None.unwrap_or(2);
+ assert_eq!(A, 1);
+ assert_eq!(B, 2);
+}
+
+#[test]
+fn test_unwrap_or_else() {
+ const fn two() -> isize {
+ 2
+ }
+
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.unwrap_or_else(two), 1);
+
+ let x: Option<isize> = None;
+ assert_eq!(x.unwrap_or_else(two), 2);
+
+ const A: isize = Some(1).unwrap_or_else(two);
+ const B: isize = None.unwrap_or_else(two);
+ assert_eq!(A, 1);
+ assert_eq!(B, 2);
+}
+
+#[test]
+fn test_unwrap_unchecked() {
+ assert_eq!(unsafe { Some(1).unwrap_unchecked() }, 1);
+ let s = unsafe { Some("hello".to_string()).unwrap_unchecked() };
+ assert_eq!(s, "hello");
+}
+
+#[test]
+fn test_iter() {
+ let val = 5;
+
+ let x = Some(val);
+ let mut it = x.iter();
+
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next(), Some(&val));
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+
+ let mut it = (&x).into_iter();
+ assert_eq!(it.next(), Some(&val));
+}
+
+#[test]
+fn test_mut_iter() {
+ let mut val = 5;
+ let new_val = 11;
+
+ let mut x = Some(val);
+ {
+ let mut it = x.iter_mut();
+
+ assert_eq!(it.size_hint(), (1, Some(1)));
+
+ match it.next() {
+ Some(interior) => {
+ assert_eq!(*interior, val);
+ *interior = new_val;
+ }
+ None => assert!(false),
+ }
+
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+ }
+ assert_eq!(x, Some(new_val));
+
+ let mut y = Some(val);
+ let mut it = (&mut y).into_iter();
+ assert_eq!(it.next(), Some(&mut val));
+}
+
+#[test]
+fn test_ord() {
+ let small = Some(1.0f64);
+ let big = Some(5.0f64);
+ let nan = Some(0.0f64 / 0.0);
+ assert!(!(nan < big));
+ assert!(!(nan > big));
+ assert!(small < big);
+ assert!(None < big);
+ assert!(big > None);
+}
+
+#[test]
+fn test_collect() {
+ let v: Option<Vec<isize>> = (0..0).map(|_| Some(0)).collect();
+ assert!(v == Some(vec![]));
+
+ let v: Option<Vec<isize>> = (0..3).map(|x| Some(x)).collect();
+ assert!(v == Some(vec![0, 1, 2]));
+
+ let v: Option<Vec<isize>> = (0..3).map(|x| if x > 1 { None } else { Some(x) }).collect();
+ assert!(v == None);
+
+ // test that it does not take more elements than it needs
+ let mut functions: [Box<dyn Fn() -> Option<()>>; 3] =
+ [Box::new(|| Some(())), Box::new(|| None), Box::new(|| panic!())];
+
+ let v: Option<Vec<()>> = functions.iter_mut().map(|f| (*f)()).collect();
+
+ assert!(v == None);
+}
+
+#[test]
+fn test_copied() {
+ let val = 1;
+ let val_ref = &val;
+ let opt_none: Option<&'static u32> = None;
+ let opt_ref = Some(&val);
+ let opt_ref_ref = Some(&val_ref);
+
+ // None works
+ assert_eq!(opt_none.clone(), None);
+ assert_eq!(opt_none.copied(), None);
+
+ // Immutable ref works
+ assert_eq!(opt_ref.clone(), Some(&val));
+ assert_eq!(opt_ref.copied(), Some(1));
+
+ // Double Immutable ref works
+ assert_eq!(opt_ref_ref.clone(), Some(&val_ref));
+ assert_eq!(opt_ref_ref.clone().copied(), Some(&val));
+ assert_eq!(opt_ref_ref.copied().copied(), Some(1));
+}
+
+#[test]
+fn test_cloned() {
+ let val = 1;
+ let val_ref = &val;
+ let opt_none: Option<&'static u32> = None;
+ let opt_ref = Some(&val);
+ let opt_ref_ref = Some(&val_ref);
+
+ // None works
+ assert_eq!(opt_none.clone(), None);
+ assert_eq!(opt_none.cloned(), None);
+
+ // Immutable ref works
+ assert_eq!(opt_ref.clone(), Some(&val));
+ assert_eq!(opt_ref.cloned(), Some(1));
+
+ // Double Immutable ref works
+ assert_eq!(opt_ref_ref.clone(), Some(&val_ref));
+ assert_eq!(opt_ref_ref.clone().cloned(), Some(&val));
+ assert_eq!(opt_ref_ref.cloned().cloned(), Some(1));
+}
+
+#[test]
+fn test_try() {
+ fn try_option_some() -> Option<u8> {
+ let val = Some(1)?;
+ Some(val)
+ }
+ assert_eq!(try_option_some(), Some(1));
+
+ fn try_option_none() -> Option<u8> {
+ let val = None?;
+ Some(val)
+ }
+ assert_eq!(try_option_none(), None);
+}
+
+#[test]
+fn test_option_as_deref() {
+ // Some: &Option<T: Deref>::Some(T) -> Option<&T::Deref::Target>::Some(&*T)
+ let ref_option = &Some(&42);
+ assert_eq!(ref_option.as_deref(), Some(&42));
+
+ let ref_option = &Some(String::from("a result"));
+ assert_eq!(ref_option.as_deref(), Some("a result"));
+
+ let ref_option = &Some(vec![1, 2, 3, 4, 5]);
+ assert_eq!(ref_option.as_deref(), Some([1, 2, 3, 4, 5].as_slice()));
+
+ // None: &Option<T: Deref>>::None -> None
+ let ref_option: &Option<&i32> = &None;
+ assert_eq!(ref_option.as_deref(), None);
+}
+
+#[test]
+fn test_option_as_deref_mut() {
+ // Some: &mut Option<T: Deref>::Some(T) -> Option<&mut T::Deref::Target>::Some(&mut *T)
+ let mut val = 42;
+ let ref_option = &mut Some(&mut val);
+ assert_eq!(ref_option.as_deref_mut(), Some(&mut 42));
+
+ let ref_option = &mut Some(String::from("a result"));
+ assert_eq!(ref_option.as_deref_mut(), Some(String::from("a result").deref_mut()));
+
+ let ref_option = &mut Some(vec![1, 2, 3, 4, 5]);
+ assert_eq!(ref_option.as_deref_mut(), Some([1, 2, 3, 4, 5].as_mut_slice()));
+
+ // None: &mut Option<T: Deref>>::None -> None
+ let ref_option: &mut Option<&mut i32> = &mut None;
+ assert_eq!(ref_option.as_deref_mut(), None);
+}
+
+#[test]
+fn test_replace() {
+ let mut x = Some(2);
+ let old = x.replace(5);
+
+ assert_eq!(x, Some(5));
+ assert_eq!(old, Some(2));
+
+ let mut x = None;
+ let old = x.replace(3);
+
+ assert_eq!(x, Some(3));
+ assert_eq!(old, None);
+}
+
+#[test]
+fn option_const() {
+ // test that the methods of `Option` are usable in a const context
+
+ const OPTION: Option<usize> = Some(32);
+ assert_eq!(OPTION, Some(32));
+
+ const OPTION_FROM: Option<usize> = Option::from(32);
+ assert_eq!(OPTION_FROM, Some(32));
+
+ const REF: Option<&usize> = OPTION.as_ref();
+ assert_eq!(REF, Some(&32));
+
+ const REF_FROM: Option<&usize> = Option::from(&OPTION);
+ assert_eq!(REF_FROM, Some(&32));
+
+ const IS_SOME: bool = OPTION.is_some();
+ assert!(IS_SOME);
+
+ const IS_NONE: bool = OPTION.is_none();
+ assert!(!IS_NONE);
+
+ const COPIED: Option<usize> = OPTION.as_ref().copied();
+ assert_eq!(COPIED, OPTION);
+}
+
+#[test]
+const fn option_const_mut() {
+ // test that the methods of `Option` that take mutable references are usable in a const context
+
+ let mut option: Option<usize> = Some(32);
+
+ let _take = option.take();
+ let _replace = option.replace(42);
+
+ {
+ let as_mut = option.as_mut();
+ match as_mut {
+ Some(v) => *v = 32,
+ None => unreachable!(),
+ }
+ }
+
+ {
+ let as_mut: Option<&mut usize> = Option::from(&mut option);
+ match as_mut {
+ Some(v) => *v = 42,
+ None => unreachable!(),
+ }
+ }
+}
+
+#[test]
+fn test_unwrap_drop() {
+ struct Dtor<'a> {
+ x: &'a Cell<isize>,
+ }
+
+ impl<'a> std::ops::Drop for Dtor<'a> {
+ fn drop(&mut self) {
+ self.x.set(self.x.get() - 1);
+ }
+ }
+
+ fn unwrap<T>(o: Option<T>) -> T {
+ match o {
+ Some(v) => v,
+ None => panic!(),
+ }
+ }
+
+ let x = &Cell::new(1);
+
+ {
+ let b = Some(Dtor { x });
+ let _c = unwrap(b);
+ }
+
+ assert_eq!(x.get(), 0);
+}
+
+#[test]
+fn option_ext() {
+ let thing = "{{ f }}";
+ let f = thing.find("{{");
+
+ if f.is_none() {
+ println!("None!");
+ }
+}
+
+#[test]
+fn zip_options() {
+ let x = Some(10);
+ let y = Some("foo");
+ let z: Option<usize> = None;
+
+ assert_eq!(x.zip(y), Some((10, "foo")));
+ assert_eq!(x.zip(z), None);
+ assert_eq!(z.zip(x), None);
+}
+
+#[test]
+fn unzip_options() {
+ let x = Some((10, "foo"));
+ let y = None::<(bool, i32)>;
+
+ assert_eq!(x.unzip(), (Some(10), Some("foo")));
+ assert_eq!(y.unzip(), (None, None));
+}
+
+#[test]
+fn zip_unzip_roundtrip() {
+ let x = Some(10);
+ let y = Some("foo");
+
+ let z = x.zip(y);
+ assert_eq!(z, Some((10, "foo")));
+
+ let a = z.unzip();
+ assert_eq!(a, (x, y));
+}
diff --git a/library/core/tests/pattern.rs b/library/core/tests/pattern.rs
new file mode 100644
index 000000000..d4bec996d
--- /dev/null
+++ b/library/core/tests/pattern.rs
@@ -0,0 +1,503 @@
+use std::str::pattern::*;
+
+// This macro makes it easier to write
+// tests that do a series of iterations
+macro_rules! search_asserts {
+ ($haystack:expr, $needle:expr, $testname:expr, [$($func:ident),*], $result:expr) => {
+ let mut searcher = $needle.into_searcher($haystack);
+ let arr = [$( Step::from(searcher.$func()) ),*];
+ assert_eq!(&arr[..], &$result, $testname);
+ }
+}
+
+/// Combined enum for the results of next() and next_match()/next_reject()
+#[derive(Debug, PartialEq, Eq)]
+enum Step {
+ // variant names purposely chosen to
+ // be the same length for easy alignment
+ Matches(usize, usize),
+ Rejects(usize, usize),
+ InRange(usize, usize),
+ Done,
+}
+
+use self::Step::*;
+
+impl From<SearchStep> for Step {
+ fn from(x: SearchStep) -> Self {
+ match x {
+ SearchStep::Match(a, b) => Matches(a, b),
+ SearchStep::Reject(a, b) => Rejects(a, b),
+ SearchStep::Done => Done,
+ }
+ }
+}
+
+impl From<Option<(usize, usize)>> for Step {
+ fn from(x: Option<(usize, usize)>) -> Self {
+ match x {
+ Some((a, b)) => InRange(a, b),
+ None => Done,
+ }
+ }
+}
+
+// FIXME(Manishearth) these tests focus on single-character searching (CharSearcher)
+// and on next()/next_match(), not next_reject(). This is because
+// the memchr changes make next_match() for single chars complex, but next_reject()
+// continues to use next() under the hood. We should add more test cases for all
+// of these, as well as tests for StrSearcher and higher level tests for str::find() (etc)
+
+#[test]
+fn test_simple_iteration() {
+ search_asserts!(
+ "abcdeabcd",
+ 'a',
+ "forward iteration for ASCII string",
+ // a b c d e a b c d EOF
+ [next, next, next, next, next, next, next, next, next, next],
+ [
+ Matches(0, 1),
+ Rejects(1, 2),
+ Rejects(2, 3),
+ Rejects(3, 4),
+ Rejects(4, 5),
+ Matches(5, 6),
+ Rejects(6, 7),
+ Rejects(7, 8),
+ Rejects(8, 9),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ "abcdeabcd",
+ 'a',
+ "reverse iteration for ASCII string",
+ // d c b a e d c b a EOF
+ [
+ next_back, next_back, next_back, next_back, next_back, next_back, next_back, next_back,
+ next_back, next_back
+ ],
+ [
+ Rejects(8, 9),
+ Rejects(7, 8),
+ Rejects(6, 7),
+ Matches(5, 6),
+ Rejects(4, 5),
+ Rejects(3, 4),
+ Rejects(2, 3),
+ Rejects(1, 2),
+ Matches(0, 1),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ "我爱我的猫",
+ '我',
+ "forward iteration for Chinese string",
+ // 我 愛 我 的 貓 EOF
+ [next, next, next, next, next, next],
+ [Matches(0, 3), Rejects(3, 6), Matches(6, 9), Rejects(9, 12), Rejects(12, 15), Done]
+ );
+
+ search_asserts!(
+ "我的猫说meow",
+ 'm',
+ "forward iteration for mixed string",
+ // 我 的 猫 说 m e o w EOF
+ [next, next, next, next, next, next, next, next, next],
+ [
+ Rejects(0, 3),
+ Rejects(3, 6),
+ Rejects(6, 9),
+ Rejects(9, 12),
+ Matches(12, 13),
+ Rejects(13, 14),
+ Rejects(14, 15),
+ Rejects(15, 16),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ "我的猫说meow",
+ '猫',
+ "reverse iteration for mixed string",
+ // w o e m 说 猫 的 我 EOF
+ [
+ next_back, next_back, next_back, next_back, next_back, next_back, next_back, next_back,
+ next_back
+ ],
+ [
+ Rejects(15, 16),
+ Rejects(14, 15),
+ Rejects(13, 14),
+ Rejects(12, 13),
+ Rejects(9, 12),
+ Matches(6, 9),
+ Rejects(3, 6),
+ Rejects(0, 3),
+ Done
+ ]
+ );
+}
+
+#[test]
+fn test_simple_search() {
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "next_match for ASCII string",
+ [next_match, next_match, next_match, next_match],
+ [InRange(0, 1), InRange(5, 6), InRange(10, 11), Done]
+ );
+
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "next_match_back for ASCII string",
+ [next_match_back, next_match_back, next_match_back, next_match_back],
+ [InRange(10, 11), InRange(5, 6), InRange(0, 1), Done]
+ );
+
+ search_asserts!(
+ "abcdeab",
+ 'a',
+ "next_reject for ASCII string",
+ [next_reject, next_reject, next_match, next_reject, next_reject],
+ [InRange(1, 2), InRange(2, 3), InRange(5, 6), InRange(6, 7), Done]
+ );
+
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "next_reject_back for ASCII string",
+ [
+ next_reject_back,
+ next_reject_back,
+ next_match_back,
+ next_reject_back,
+ next_reject_back,
+ next_reject_back
+ ],
+ [
+ InRange(14, 15),
+ InRange(13, 14),
+ InRange(10, 11),
+ InRange(9, 10),
+ InRange(8, 9),
+ InRange(7, 8)
+ ]
+ );
+}
+
+// Á, 각, ก, 😀 all end in 0x81
+// 🁀, ᘀ do not end in 0x81 but contain the byte
+// ꁁ has 0x81 as its second and third bytes.
+//
+// The memchr-using implementation of next_match
+// and next_match_back temporarily violate
+// the property that the search is always on a unicode boundary,
+// which is fine as long as this never reaches next() or next_back().
+// So we test if next() is correct after each next_match() as well.
+const STRESS: &str = "Áa🁀bÁꁁfg😁각กᘀ각aÁ각ꁁก😁a";
+
+#[test]
+fn test_stress_indices() {
+ // this isn't really a test, more of documentation on the indices of each character in the stresstest string
+
+ search_asserts!(
+ STRESS,
+ 'x',
+ "Indices of characters in stress test",
+ [
+ next, next, next, next, next, next, next, next, next, next, next, next, next, next,
+ next, next, next, next, next, next, next
+ ],
+ [
+ Rejects(0, 2), // Á
+ Rejects(2, 3), // a
+ Rejects(3, 7), // 🁀
+ Rejects(7, 8), // b
+ Rejects(8, 10), // Á
+ Rejects(10, 13), // ꁁ
+ Rejects(13, 14), // f
+ Rejects(14, 15), // g
+ Rejects(15, 19), // 😀
+ Rejects(19, 22), // 각
+ Rejects(22, 25), // ก
+ Rejects(25, 28), // ᘀ
+ Rejects(28, 31), // 각
+ Rejects(31, 32), // a
+ Rejects(32, 34), // Á
+ Rejects(34, 37), // 각
+ Rejects(37, 40), // ꁁ
+ Rejects(40, 43), // ก
+ Rejects(43, 47), // 😀
+ Rejects(47, 48), // a
+ Done
+ ]
+ );
+}
+
+#[test]
+fn test_forward_search_shared_bytes() {
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Forward search for two-byte Latin character",
+ [next_match, next_match, next_match, next_match],
+ [InRange(0, 2), InRange(8, 10), InRange(32, 34), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Forward search for two-byte Latin character; check if next() still works",
+ [next_match, next, next_match, next, next_match, next, next_match],
+ [
+ InRange(0, 2),
+ Rejects(2, 3),
+ InRange(8, 10),
+ Rejects(10, 13),
+ InRange(32, 34),
+ Rejects(34, 37),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ STRESS,
+ '각',
+ "Forward search for three-byte Hangul character",
+ [next_match, next, next_match, next_match, next_match],
+ [InRange(19, 22), Rejects(22, 25), InRange(28, 31), InRange(34, 37), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '각',
+ "Forward search for three-byte Hangul character; check if next() still works",
+ [next_match, next, next_match, next, next_match, next, next_match],
+ [
+ InRange(19, 22),
+ Rejects(22, 25),
+ InRange(28, 31),
+ Rejects(31, 32),
+ InRange(34, 37),
+ Rejects(37, 40),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Forward search for three-byte Thai character",
+ [next_match, next, next_match, next, next_match],
+ [InRange(22, 25), Rejects(25, 28), InRange(40, 43), Rejects(43, 47), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Forward search for three-byte Thai character; check if next() still works",
+ [next_match, next, next_match, next, next_match],
+ [InRange(22, 25), Rejects(25, 28), InRange(40, 43), Rejects(43, 47), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Forward search for four-byte emoji",
+ [next_match, next, next_match, next, next_match],
+ [InRange(15, 19), Rejects(19, 22), InRange(43, 47), Rejects(47, 48), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Forward search for four-byte emoji; check if next() still works",
+ [next_match, next, next_match, next, next_match],
+ [InRange(15, 19), Rejects(19, 22), InRange(43, 47), Rejects(47, 48), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Forward search for three-byte Yi character with repeated bytes",
+ [next_match, next, next_match, next, next_match],
+ [InRange(10, 13), Rejects(13, 14), InRange(37, 40), Rejects(40, 43), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Forward search for three-byte Yi character with repeated bytes; check if next() still works",
+ [next_match, next, next_match, next, next_match],
+ [InRange(10, 13), Rejects(13, 14), InRange(37, 40), Rejects(40, 43), Done]
+ );
+}
+
+#[test]
+fn test_reverse_search_shared_bytes() {
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Reverse search for two-byte Latin character",
+ [next_match_back, next_match_back, next_match_back, next_match_back],
+ [InRange(32, 34), InRange(8, 10), InRange(0, 2), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Reverse search for two-byte Latin character; check if next_back() still works",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back, next_back],
+ [InRange(32, 34), Rejects(31, 32), InRange(8, 10), Rejects(7, 8), InRange(0, 2), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '각',
+ "Reverse search for three-byte Hangul character",
+ [next_match_back, next_back, next_match_back, next_match_back, next_match_back],
+ [InRange(34, 37), Rejects(32, 34), InRange(28, 31), InRange(19, 22), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '각',
+ "Reverse search for three-byte Hangul character; check if next_back() still works",
+ [
+ next_match_back,
+ next_back,
+ next_match_back,
+ next_back,
+ next_match_back,
+ next_back,
+ next_match_back
+ ],
+ [
+ InRange(34, 37),
+ Rejects(32, 34),
+ InRange(28, 31),
+ Rejects(25, 28),
+ InRange(19, 22),
+ Rejects(15, 19),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Reverse search for three-byte Thai character",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(40, 43), Rejects(37, 40), InRange(22, 25), Rejects(19, 22), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Reverse search for three-byte Thai character; check if next_back() still works",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(40, 43), Rejects(37, 40), InRange(22, 25), Rejects(19, 22), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Reverse search for four-byte emoji",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(43, 47), Rejects(40, 43), InRange(15, 19), Rejects(14, 15), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Reverse search for four-byte emoji; check if next_back() still works",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(43, 47), Rejects(40, 43), InRange(15, 19), Rejects(14, 15), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Reverse search for three-byte Yi character with repeated bytes",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(37, 40), Rejects(34, 37), InRange(10, 13), Rejects(8, 10), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Reverse search for three-byte Yi character with repeated bytes; check if next_back() still works",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(37, 40), Rejects(34, 37), InRange(10, 13), Rejects(8, 10), Done]
+ );
+}
+
+#[test]
+fn double_ended_regression_test() {
+ // https://github.com/rust-lang/rust/issues/47175
+ // Ensures that double ended searching comes to a convergence
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "alternating double ended search",
+ [next_match, next_match_back, next_match, next_match_back],
+ [InRange(0, 1), InRange(10, 11), InRange(5, 6), Done]
+ );
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "triple double ended search for a",
+ [next_match, next_match_back, next_match_back, next_match_back],
+ [InRange(0, 1), InRange(10, 11), InRange(5, 6), Done]
+ );
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'd',
+ "triple double ended search for d",
+ [next_match, next_match_back, next_match_back, next_match_back],
+ [InRange(3, 4), InRange(13, 14), InRange(8, 9), Done]
+ );
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Double ended search for two-byte Latin character",
+ [next_match, next_match_back, next_match, next_match_back],
+ [InRange(0, 2), InRange(32, 34), InRange(8, 10), Done]
+ );
+ search_asserts!(
+ STRESS,
+ '각',
+ "Reverse double ended search for three-byte Hangul character",
+ [next_match_back, next_back, next_match, next, next_match_back, next_match],
+ [InRange(34, 37), Rejects(32, 34), InRange(19, 22), Rejects(22, 25), InRange(28, 31), Done]
+ );
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Double ended search for three-byte Thai character",
+ [next_match, next_back, next, next_match_back, next_match],
+ [InRange(22, 25), Rejects(47, 48), Rejects(25, 28), InRange(40, 43), Done]
+ );
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Double ended search for four-byte emoji",
+ [next_match_back, next, next_match, next_back, next_match],
+ [InRange(43, 47), Rejects(0, 2), InRange(15, 19), Rejects(40, 43), Done]
+ );
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Double ended search for three-byte Yi character with repeated bytes",
+ [next_match, next, next_match_back, next_back, next_match],
+ [InRange(10, 13), Rejects(13, 14), InRange(37, 40), Rejects(34, 37), Done]
+ );
+}
diff --git a/library/core/tests/pin.rs b/library/core/tests/pin.rs
new file mode 100644
index 000000000..6f617c8d0
--- /dev/null
+++ b/library/core/tests/pin.rs
@@ -0,0 +1,31 @@
+use core::pin::Pin;
+
+#[test]
+fn pin_const() {
+ // test that the methods of `Pin` are usable in a const context
+
+ const POINTER: &'static usize = &2;
+
+ const PINNED: Pin<&'static usize> = Pin::new(POINTER);
+ const PINNED_UNCHECKED: Pin<&'static usize> = unsafe { Pin::new_unchecked(POINTER) };
+ assert_eq!(PINNED_UNCHECKED, PINNED);
+
+ const INNER: &'static usize = Pin::into_inner(PINNED);
+ assert_eq!(INNER, POINTER);
+
+ const INNER_UNCHECKED: &'static usize = unsafe { Pin::into_inner_unchecked(PINNED) };
+ assert_eq!(INNER_UNCHECKED, POINTER);
+
+ const REF: &'static usize = PINNED.get_ref();
+ assert_eq!(REF, POINTER);
+
+ // Note: `pin_mut_const` tests that the methods of `Pin<&mut T>` are usable in a const context.
+ // A const fn is used because `&mut` is not (yet) usable in constants.
+ const fn pin_mut_const() {
+ let _ = Pin::new(&mut 2).into_ref();
+ let _ = Pin::new(&mut 2).get_mut();
+ let _ = unsafe { Pin::new(&mut 2).get_unchecked_mut() };
+ }
+
+ pin_mut_const();
+}
diff --git a/library/core/tests/pin_macro.rs b/library/core/tests/pin_macro.rs
new file mode 100644
index 000000000..79c8c166c
--- /dev/null
+++ b/library/core/tests/pin_macro.rs
@@ -0,0 +1,33 @@
+// edition:2021
+use core::{
+ marker::PhantomPinned,
+ mem::{drop as stuff, transmute},
+ pin::{pin, Pin},
+};
+
+#[test]
+fn basic() {
+ let it: Pin<&mut PhantomPinned> = pin!(PhantomPinned);
+ stuff(it);
+}
+
+#[test]
+fn extension_works_through_block() {
+ let it: Pin<&mut PhantomPinned> = { pin!(PhantomPinned) };
+ stuff(it);
+}
+
+#[test]
+fn extension_works_through_unsafe_block() {
+ // "retro-type-inference" works as well.
+ let it: Pin<&mut PhantomPinned> = unsafe { pin!(transmute(())) };
+ stuff(it);
+}
+
+#[test]
+fn unsize_coercion() {
+ let slice: Pin<&mut [PhantomPinned]> = pin!([PhantomPinned; 2]);
+ stuff(slice);
+ let dyn_obj: Pin<&mut dyn Send> = pin!([PhantomPinned; 2]);
+ stuff(dyn_obj);
+}
diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
new file mode 100644
index 000000000..12861794c
--- /dev/null
+++ b/library/core/tests/ptr.rs
@@ -0,0 +1,855 @@
+use core::cell::RefCell;
+use core::mem::{self, MaybeUninit};
+use core::num::NonZeroUsize;
+use core::ptr;
+use core::ptr::*;
+use std::fmt::{Debug, Display};
+
+#[test]
+fn test_const_from_raw_parts() {
+ const SLICE: &[u8] = &[1, 2, 3, 4];
+ const FROM_RAW: &[u8] = unsafe { &*slice_from_raw_parts(SLICE.as_ptr(), SLICE.len()) };
+ assert_eq!(SLICE, FROM_RAW);
+
+ let slice = &[1, 2, 3, 4, 5];
+ let from_raw = unsafe { &*slice_from_raw_parts(slice.as_ptr(), 2) };
+ assert_eq!(&slice[..2], from_raw);
+}
+
+#[test]
+fn test() {
+ unsafe {
+ #[repr(C)]
+ struct Pair {
+ fst: isize,
+ snd: isize,
+ }
+ let mut p = Pair { fst: 10, snd: 20 };
+ let pptr: *mut Pair = &mut p;
+ let iptr: *mut isize = pptr as *mut isize;
+ assert_eq!(*iptr, 10);
+ *iptr = 30;
+ assert_eq!(*iptr, 30);
+ assert_eq!(p.fst, 30);
+
+ *pptr = Pair { fst: 50, snd: 60 };
+ assert_eq!(*iptr, 50);
+ assert_eq!(p.fst, 50);
+ assert_eq!(p.snd, 60);
+
+ let v0 = vec![32000u16, 32001u16, 32002u16];
+ let mut v1 = vec![0u16, 0u16, 0u16];
+
+ copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
+ assert!((v1[0] == 0u16 && v1[1] == 32001u16 && v1[2] == 0u16));
+ copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
+ assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 0u16));
+ copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
+ assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 32000u16));
+ }
+}
+
+#[test]
+fn test_is_null() {
+ let p: *const isize = null();
+ assert!(p.is_null());
+
+ let q = p.wrapping_offset(1);
+ assert!(!q.is_null());
+
+ let mp: *mut isize = null_mut();
+ assert!(mp.is_null());
+
+ let mq = mp.wrapping_offset(1);
+ assert!(!mq.is_null());
+
+ // Pointers to unsized types -- slices
+ let s: &mut [u8] = &mut [1, 2, 3];
+ let cs: *const [u8] = s;
+ assert!(!cs.is_null());
+
+ let ms: *mut [u8] = s;
+ assert!(!ms.is_null());
+
+ let cz: *const [u8] = &[];
+ assert!(!cz.is_null());
+
+ let mz: *mut [u8] = &mut [];
+ assert!(!mz.is_null());
+
+ let ncs: *const [u8] = null::<[u8; 3]>();
+ assert!(ncs.is_null());
+
+ let nms: *mut [u8] = null_mut::<[u8; 3]>();
+ assert!(nms.is_null());
+
+ // Pointers to unsized types -- trait objects
+ let ci: *const dyn ToString = &3;
+ assert!(!ci.is_null());
+
+ let mi: *mut dyn ToString = &mut 3;
+ assert!(!mi.is_null());
+
+ let nci: *const dyn ToString = null::<isize>();
+ assert!(nci.is_null());
+
+ let nmi: *mut dyn ToString = null_mut::<isize>();
+ assert!(nmi.is_null());
+
+ extern "C" {
+ type Extern;
+ }
+ let ec: *const Extern = null::<Extern>();
+ assert!(ec.is_null());
+
+ let em: *mut Extern = null_mut::<Extern>();
+ assert!(em.is_null());
+}
+
+#[test]
+fn test_as_ref() {
+ unsafe {
+ let p: *const isize = null();
+ assert_eq!(p.as_ref(), None);
+
+ let q: *const isize = &2;
+ assert_eq!(q.as_ref().unwrap(), &2);
+
+ let p: *mut isize = null_mut();
+ assert_eq!(p.as_ref(), None);
+
+ let q: *mut isize = &mut 2;
+ assert_eq!(q.as_ref().unwrap(), &2);
+
+ // Lifetime inference
+ let u = 2isize;
+ {
+ let p = &u as *const isize;
+ assert_eq!(p.as_ref().unwrap(), &2);
+ }
+
+ // Pointers to unsized types -- slices
+ let s: &mut [u8] = &mut [1, 2, 3];
+ let cs: *const [u8] = s;
+ assert_eq!(cs.as_ref(), Some(&*s));
+
+ let ms: *mut [u8] = s;
+ assert_eq!(ms.as_ref(), Some(&*s));
+
+ let cz: *const [u8] = &[];
+ assert_eq!(cz.as_ref(), Some(&[][..]));
+
+ let mz: *mut [u8] = &mut [];
+ assert_eq!(mz.as_ref(), Some(&[][..]));
+
+ let ncs: *const [u8] = null::<[u8; 3]>();
+ assert_eq!(ncs.as_ref(), None);
+
+ let nms: *mut [u8] = null_mut::<[u8; 3]>();
+ assert_eq!(nms.as_ref(), None);
+
+ // Pointers to unsized types -- trait objects
+ let ci: *const dyn ToString = &3;
+ assert!(ci.as_ref().is_some());
+
+ let mi: *mut dyn ToString = &mut 3;
+ assert!(mi.as_ref().is_some());
+
+ let nci: *const dyn ToString = null::<isize>();
+ assert!(nci.as_ref().is_none());
+
+ let nmi: *mut dyn ToString = null_mut::<isize>();
+ assert!(nmi.as_ref().is_none());
+ }
+}
+
+#[test]
+fn test_as_mut() {
+ unsafe {
+ let p: *mut isize = null_mut();
+ assert!(p.as_mut() == None);
+
+ let q: *mut isize = &mut 2;
+ assert!(q.as_mut().unwrap() == &mut 2);
+
+ // Lifetime inference
+ let mut u = 2isize;
+ {
+ let p = &mut u as *mut isize;
+ assert!(p.as_mut().unwrap() == &mut 2);
+ }
+
+ // Pointers to unsized types -- slices
+ let s: &mut [u8] = &mut [1, 2, 3];
+ let ms: *mut [u8] = s;
+ assert_eq!(ms.as_mut(), Some(&mut [1, 2, 3][..]));
+
+ let mz: *mut [u8] = &mut [];
+ assert_eq!(mz.as_mut(), Some(&mut [][..]));
+
+ let nms: *mut [u8] = null_mut::<[u8; 3]>();
+ assert_eq!(nms.as_mut(), None);
+
+ // Pointers to unsized types -- trait objects
+ let mi: *mut dyn ToString = &mut 3;
+ assert!(mi.as_mut().is_some());
+
+ let nmi: *mut dyn ToString = null_mut::<isize>();
+ assert!(nmi.as_mut().is_none());
+ }
+}
+
+#[test]
+fn test_ptr_addition() {
+ unsafe {
+ let xs = vec![5; 16];
+ let mut ptr = xs.as_ptr();
+ let end = ptr.offset(16);
+
+ while ptr < end {
+ assert_eq!(*ptr, 5);
+ ptr = ptr.offset(1);
+ }
+
+ let mut xs_mut = xs;
+ let mut m_ptr = xs_mut.as_mut_ptr();
+ let m_end = m_ptr.offset(16);
+
+ while m_ptr < m_end {
+ *m_ptr += 5;
+ m_ptr = m_ptr.offset(1);
+ }
+
+ assert!(xs_mut == vec![10; 16]);
+ }
+}
+
+#[test]
+fn test_ptr_subtraction() {
+ unsafe {
+ let xs = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ let mut idx = 9;
+ let ptr = xs.as_ptr();
+
+ while idx >= 0 {
+ assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
+ idx = idx - 1;
+ }
+
+ let mut xs_mut = xs;
+ let m_start = xs_mut.as_mut_ptr();
+ let mut m_ptr = m_start.offset(9);
+
+ loop {
+ *m_ptr += *m_ptr;
+ if m_ptr == m_start {
+ break;
+ }
+ m_ptr = m_ptr.offset(-1);
+ }
+
+ assert_eq!(xs_mut, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]);
+ }
+}
+
+#[test]
+fn test_set_memory() {
+ let mut xs = [0u8; 20];
+ let ptr = xs.as_mut_ptr();
+ unsafe {
+ write_bytes(ptr, 5u8, xs.len());
+ }
+ assert!(xs == [5u8; 20]);
+}
+
+#[test]
+fn test_set_memory_const() {
+ const XS: [u8; 20] = {
+ let mut xs = [0u8; 20];
+ let ptr = xs.as_mut_ptr();
+ unsafe {
+ ptr.write_bytes(5u8, xs.len());
+ }
+ xs
+ };
+
+ assert!(XS == [5u8; 20]);
+}
+
+#[test]
+fn test_unsized_nonnull() {
+ let xs: &[i32] = &[1, 2, 3];
+ let ptr = unsafe { NonNull::new_unchecked(xs as *const [i32] as *mut [i32]) };
+ let ys = unsafe { ptr.as_ref() };
+ let zs: &[i32] = &[1, 2, 3];
+ assert!(ys == zs);
+}
+
+#[test]
+fn test_const_nonnull_new() {
+ const {
+ assert!(NonNull::new(core::ptr::null_mut::<()>()).is_none());
+
+ let value = &mut 0u32;
+ let mut ptr = NonNull::new(value).unwrap();
+ unsafe { *ptr.as_mut() = 42 };
+
+ let reference = unsafe { &*ptr.as_ref() };
+ assert!(*reference == *value);
+ assert!(*reference == 42);
+ };
+}
+
+#[test]
+#[cfg(unix)] // printf may not be available on other platforms
+#[allow(deprecated)] // For SipHasher
+pub fn test_variadic_fnptr() {
+ use core::ffi;
+ use core::hash::{Hash, SipHasher};
+ extern "C" {
+ // This needs to use the correct function signature even though it isn't called as some
+ // codegen backends make it UB to declare a function with multiple conflicting signatures
+ // (like LLVM) while others straight up return an error (like Cranelift).
+ fn printf(_: *const ffi::c_char, ...) -> ffi::c_int;
+ }
+ let p: unsafe extern "C" fn(*const ffi::c_char, ...) -> ffi::c_int = printf;
+ let q = p.clone();
+ assert_eq!(p, q);
+ assert!(!(p < q));
+ let mut s = SipHasher::new();
+ assert_eq!(p.hash(&mut s), q.hash(&mut s));
+}
+
+#[test]
+fn write_unaligned_drop() {
+ thread_local! {
+ static DROPS: RefCell<Vec<u32>> = RefCell::new(Vec::new());
+ }
+
+ struct Dropper(u32);
+
+ impl Drop for Dropper {
+ fn drop(&mut self) {
+ DROPS.with(|d| d.borrow_mut().push(self.0));
+ }
+ }
+
+ {
+ let c = Dropper(0);
+ let mut t = Dropper(1);
+ unsafe {
+ write_unaligned(&mut t, c);
+ }
+ }
+ DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
+}
+
+#[test]
+fn align_offset_zst() {
+ // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
+ // all, because no amount of elements will align the pointer.
+ let mut p = 1;
+ while p < 1024 {
+ assert_eq!(ptr::invalid::<()>(p).align_offset(p), 0);
+ if p != 1 {
+ assert_eq!(ptr::invalid::<()>(p + 1).align_offset(p), !0);
+ }
+ p = (p + 1).next_power_of_two();
+ }
+}
+
+#[test]
+fn align_offset_stride_one() {
+ // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
+ // number of bytes.
+ let mut align = 1;
+ while align < 1024 {
+ for ptr in 1..2 * align {
+ let expected = ptr % align;
+ let offset = if expected == 0 { 0 } else { align - expected };
+ assert_eq!(
+ ptr::invalid::<u8>(ptr).align_offset(align),
+ offset,
+ "ptr = {}, align = {}, size = 1",
+ ptr,
+ align
+ );
+ }
+ align = (align + 1).next_power_of_two();
+ }
+}
+
+#[test]
+fn align_offset_various_strides() {
+ unsafe fn test_stride<T>(ptr: *const T, align: usize) -> bool {
+ let numptr = ptr as usize;
+ let mut expected = usize::MAX;
+ // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
+ for el in 0..align {
+ if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
+ expected = el;
+ break;
+ }
+ }
+ let got = ptr.align_offset(align);
+ if got != expected {
+ eprintln!(
+ "aligning {:p} (with stride of {}) to {}, expected {}, got {}",
+ ptr,
+ ::std::mem::size_of::<T>(),
+ align,
+ expected,
+ got
+ );
+ return true;
+ }
+ return false;
+ }
+
+ // For pointers of stride != 1, we verify the algorithm against the naivest possible
+ // implementation
+ let mut align = 1;
+ let mut x = false;
+ // Miri is too slow
+ let limit = if cfg!(miri) { 32 } else { 1024 };
+ while align < limit {
+ for ptr in 1usize..4 * align {
+ unsafe {
+ #[repr(packed)]
+ struct A3(u16, u8);
+ x |= test_stride::<A3>(ptr::invalid::<A3>(ptr), align);
+
+ struct A4(u32);
+ x |= test_stride::<A4>(ptr::invalid::<A4>(ptr), align);
+
+ #[repr(packed)]
+ struct A5(u32, u8);
+ x |= test_stride::<A5>(ptr::invalid::<A5>(ptr), align);
+
+ #[repr(packed)]
+ struct A6(u32, u16);
+ x |= test_stride::<A6>(ptr::invalid::<A6>(ptr), align);
+
+ #[repr(packed)]
+ struct A7(u32, u16, u8);
+ x |= test_stride::<A7>(ptr::invalid::<A7>(ptr), align);
+
+ #[repr(packed)]
+ struct A8(u32, u32);
+ x |= test_stride::<A8>(ptr::invalid::<A8>(ptr), align);
+
+ #[repr(packed)]
+ struct A9(u32, u32, u8);
+ x |= test_stride::<A9>(ptr::invalid::<A9>(ptr), align);
+
+ #[repr(packed)]
+ struct A10(u32, u32, u16);
+ x |= test_stride::<A10>(ptr::invalid::<A10>(ptr), align);
+
+ x |= test_stride::<u32>(ptr::invalid::<u32>(ptr), align);
+ x |= test_stride::<u128>(ptr::invalid::<u128>(ptr), align);
+ }
+ }
+ align = (align + 1).next_power_of_two();
+ }
+ assert!(!x);
+}
+
+#[test]
+fn offset_from() {
+ let mut a = [0; 5];
+ let ptr1: *mut i32 = &mut a[1];
+ let ptr2: *mut i32 = &mut a[3];
+ unsafe {
+ assert_eq!(ptr2.offset_from(ptr1), 2);
+ assert_eq!(ptr1.offset_from(ptr2), -2);
+ assert_eq!(ptr1.offset(2), ptr2);
+ assert_eq!(ptr2.offset(-2), ptr1);
+ }
+}
+
+#[test]
+fn ptr_metadata() {
+ struct Unit;
+ struct Pair<A, B: ?Sized>(A, B);
+ extern "C" {
+ type Extern;
+ }
+ let () = metadata(&());
+ let () = metadata(&Unit);
+ let () = metadata(&4_u32);
+ let () = metadata(&String::new());
+ let () = metadata(&Some(4_u32));
+ let () = metadata(&ptr_metadata);
+ let () = metadata(&|| {});
+ let () = metadata(&[4, 7]);
+ let () = metadata(&(4, String::new()));
+ let () = metadata(&Pair(4, String::new()));
+ let () = metadata(ptr::null::<()>() as *const Extern);
+ let () = metadata(ptr::null::<()>() as *const <&u32 as std::ops::Deref>::Target);
+
+ assert_eq!(metadata("foo"), 3_usize);
+ assert_eq!(metadata(&[4, 7][..]), 2_usize);
+
+ let dst_tuple: &(bool, [u8]) = &(true, [0x66, 0x6F, 0x6F]);
+ let dst_struct: &Pair<bool, [u8]> = &Pair(true, [0x66, 0x6F, 0x6F]);
+ assert_eq!(metadata(dst_tuple), 3_usize);
+ assert_eq!(metadata(dst_struct), 3_usize);
+ unsafe {
+ let dst_tuple: &(bool, str) = std::mem::transmute(dst_tuple);
+ let dst_struct: &Pair<bool, str> = std::mem::transmute(dst_struct);
+ assert_eq!(&dst_tuple.1, "foo");
+ assert_eq!(&dst_struct.1, "foo");
+ assert_eq!(metadata(dst_tuple), 3_usize);
+ assert_eq!(metadata(dst_struct), 3_usize);
+ }
+
+ let vtable_1: DynMetadata<dyn Debug> = metadata(&4_u16 as &dyn Debug);
+ let vtable_2: DynMetadata<dyn Display> = metadata(&4_u16 as &dyn Display);
+ let vtable_3: DynMetadata<dyn Display> = metadata(&4_u32 as &dyn Display);
+ let vtable_4: DynMetadata<dyn Display> = metadata(&(true, 7_u32) as &(bool, dyn Display));
+ let vtable_5: DynMetadata<dyn Display> =
+ metadata(&Pair(true, 7_u32) as &Pair<bool, dyn Display>);
+ unsafe {
+ let address_1: *const () = std::mem::transmute(vtable_1);
+ let address_2: *const () = std::mem::transmute(vtable_2);
+ let address_3: *const () = std::mem::transmute(vtable_3);
+ let address_4: *const () = std::mem::transmute(vtable_4);
+ let address_5: *const () = std::mem::transmute(vtable_5);
+ // Different trait => different vtable pointer
+ assert_ne!(address_1, address_2);
+ // Different erased type => different vtable pointer
+ assert_ne!(address_2, address_3);
+ // Same erased type and same trait => same vtable pointer
+ assert_eq!(address_3, address_4);
+ assert_eq!(address_3, address_5);
+ }
+}
+
+#[test]
+fn ptr_metadata_bounds() {
+ fn metadata_eq_method_address<T: ?Sized>() -> usize {
+ // The `Metadata` associated type has an `Ord` bound, so this is valid:
+ <<T as Pointee>::Metadata as PartialEq>::eq as usize
+ }
+ // "Synthetic" trait impls generated by the compiler like those of `Pointee`
+ // are not checked for bounds of associated type.
+ // So with a buggy libcore we could have both:
+ // * `<dyn Display as Pointee>::Metadata == DynMetadata`
+ // * `DynMetadata: !PartialEq`
+ // … and cause an ICE here:
+ metadata_eq_method_address::<dyn Display>();
+
+ // For this reason, let’s check here that bounds are satisfied:
+
+ let _ = static_assert_expected_bounds_for_metadata::<()>;
+ let _ = static_assert_expected_bounds_for_metadata::<usize>;
+ let _ = static_assert_expected_bounds_for_metadata::<DynMetadata<dyn Display>>;
+ fn _static_assert_associated_type<T: ?Sized>() {
+ let _ = static_assert_expected_bounds_for_metadata::<<T as Pointee>::Metadata>;
+ }
+
+ fn static_assert_expected_bounds_for_metadata<Meta>()
+ where
+ // Keep this in sync with the associated type in `library/core/src/ptr/metadata.rs`
+ Meta: Copy + Send + Sync + Ord + std::hash::Hash + Unpin,
+ {
+ }
+}
+
+#[test]
+fn dyn_metadata() {
+ #[derive(Debug)]
+ #[repr(align(32))]
+ struct Something([u8; 47]);
+
+ let value = Something([0; 47]);
+ let trait_object: &dyn Debug = &value;
+ let meta = metadata(trait_object);
+
+ assert_eq!(meta.size_of(), 64);
+ assert_eq!(meta.size_of(), std::mem::size_of::<Something>());
+ assert_eq!(meta.align_of(), 32);
+ assert_eq!(meta.align_of(), std::mem::align_of::<Something>());
+ assert_eq!(meta.layout(), std::alloc::Layout::new::<Something>());
+
+ assert!(format!("{meta:?}").starts_with("DynMetadata(0x"));
+}
+
+#[test]
+fn from_raw_parts() {
+ let mut value = 5_u32;
+ let address = &mut value as *mut _ as *mut ();
+ let trait_object: &dyn Display = &mut value;
+ let vtable = metadata(trait_object);
+ let trait_object = NonNull::from(trait_object);
+
+ assert_eq!(ptr::from_raw_parts(address, vtable), trait_object.as_ptr());
+ assert_eq!(ptr::from_raw_parts_mut(address, vtable), trait_object.as_ptr());
+ assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), vtable), trait_object);
+
+ let mut array = [5_u32, 5, 5, 5, 5];
+ let address = &mut array as *mut _ as *mut ();
+ let array_ptr = NonNull::from(&mut array);
+ let slice_ptr = NonNull::from(&mut array[..]);
+
+ assert_eq!(ptr::from_raw_parts(address, ()), array_ptr.as_ptr());
+ assert_eq!(ptr::from_raw_parts_mut(address, ()), array_ptr.as_ptr());
+ assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), ()), array_ptr);
+
+ assert_eq!(ptr::from_raw_parts(address, 5), slice_ptr.as_ptr());
+ assert_eq!(ptr::from_raw_parts_mut(address, 5), slice_ptr.as_ptr());
+ assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), 5), slice_ptr);
+}
+
+#[test]
+fn thin_box() {
+ let foo = ThinBox::<dyn Display>::new(4);
+ assert_eq!(foo.to_string(), "4");
+ drop(foo);
+ let bar = ThinBox::<dyn Display>::new(7);
+ assert_eq!(bar.to_string(), "7");
+
+ // A slightly more interesting library that could be built on top of metadata APIs.
+ //
+ // * It could be generalized to any `T: ?Sized` (not just trait object)
+ // if `{size,align}_of_for_meta<T: ?Sized>(T::Metadata)` are added.
+ // * Constructing a `ThinBox` without consuming and deallocating a `Box`
+ // requires either the unstable `Unsize` marker trait,
+ // or the unstable `unsized_locals` language feature,
+ // or taking `&dyn T` and restricting to `T: Copy`.
+
+ use std::alloc::*;
+ use std::marker::PhantomData;
+
+ struct ThinBox<T>
+ where
+ T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
+ {
+ ptr: NonNull<DynMetadata<T>>,
+ phantom: PhantomData<T>,
+ }
+
+ impl<T> ThinBox<T>
+ where
+ T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
+ {
+ pub fn new<Value: std::marker::Unsize<T>>(value: Value) -> Self {
+ let unsized_: &T = &value;
+ let meta = metadata(unsized_);
+ let meta_layout = Layout::for_value(&meta);
+ let value_layout = Layout::for_value(&value);
+ let (layout, offset) = meta_layout.extend(value_layout).unwrap();
+ // `DynMetadata` is pointer-sized:
+ assert!(layout.size() > 0);
+ // If `ThinBox<T>` is generalized to any `T: ?Sized`,
+ // handle ZSTs with a dangling pointer without going through `alloc()`,
+ // like `Box<T>` does.
+ unsafe {
+ let ptr = NonNull::new(alloc(layout))
+ .unwrap_or_else(|| handle_alloc_error(layout))
+ .cast::<DynMetadata<T>>();
+ ptr.as_ptr().write(meta);
+ ptr.cast::<u8>().as_ptr().add(offset).cast::<Value>().write(value);
+ Self { ptr, phantom: PhantomData }
+ }
+ }
+
+ fn meta(&self) -> DynMetadata<T> {
+ unsafe { *self.ptr.as_ref() }
+ }
+
+ fn layout(&self) -> (Layout, usize) {
+ let meta = self.meta();
+ Layout::for_value(&meta).extend(meta.layout()).unwrap()
+ }
+
+ fn value_ptr(&self) -> *const T {
+ let (_, offset) = self.layout();
+ let data_ptr = unsafe { self.ptr.cast::<u8>().as_ptr().add(offset) };
+ ptr::from_raw_parts(data_ptr.cast(), self.meta())
+ }
+
+ fn value_mut_ptr(&mut self) -> *mut T {
+ let (_, offset) = self.layout();
+ // FIXME: can this line be shared with the same in `value_ptr()`
+ // without upsetting Stacked Borrows?
+ let data_ptr = unsafe { self.ptr.cast::<u8>().as_ptr().add(offset) };
+ from_raw_parts_mut(data_ptr.cast(), self.meta())
+ }
+ }
+
+ impl<T> std::ops::Deref for ThinBox<T>
+ where
+ T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
+ {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.value_ptr() }
+ }
+ }
+
+ impl<T> std::ops::DerefMut for ThinBox<T>
+ where
+ T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
+ {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.value_mut_ptr() }
+ }
+ }
+
+ impl<T> std::ops::Drop for ThinBox<T>
+ where
+ T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
+ {
+ fn drop(&mut self) {
+ let (layout, _) = self.layout();
+ unsafe {
+ drop_in_place::<T>(&mut **self);
+ dealloc(self.ptr.cast().as_ptr(), layout);
+ }
+ }
+ }
+}
+
+#[test]
+fn nonnull_tagged_pointer_with_provenance() {
+ let raw_pointer = Box::into_raw(Box::new(10));
+
+ let mut p = TaggedPointer::new(raw_pointer).unwrap();
+ assert_eq!(p.tag(), 0);
+
+ p.set_tag(1);
+ assert_eq!(p.tag(), 1);
+ assert_eq!(unsafe { *p.pointer().as_ptr() }, 10);
+
+ p.set_tag(3);
+ assert_eq!(p.tag(), 3);
+ assert_eq!(unsafe { *p.pointer().as_ptr() }, 10);
+
+ unsafe { Box::from_raw(p.pointer().as_ptr()) };
+
+ /// A non-null pointer type which carries several bits of metadata and maintains provenance.
+ #[repr(transparent)]
+ pub struct TaggedPointer<T>(NonNull<T>);
+
+ impl<T> Clone for TaggedPointer<T> {
+ fn clone(&self) -> Self {
+ Self(self.0)
+ }
+ }
+
+ impl<T> Copy for TaggedPointer<T> {}
+
+ impl<T> TaggedPointer<T> {
+ /// The ABI-required minimum alignment of the `P` type.
+ pub const ALIGNMENT: usize = core::mem::align_of::<T>();
+ /// A mask for data-carrying bits of the address.
+ pub const DATA_MASK: usize = !Self::ADDRESS_MASK;
+ /// Number of available bits of storage in the address.
+ pub const NUM_BITS: u32 = Self::ALIGNMENT.trailing_zeros();
+ /// A mask for the non-data-carrying bits of the address.
+ pub const ADDRESS_MASK: usize = usize::MAX << Self::NUM_BITS;
+
+ /// Create a new tagged pointer from a possibly null pointer.
+ pub fn new(pointer: *mut T) -> Option<TaggedPointer<T>> {
+ Some(TaggedPointer(NonNull::new(pointer)?))
+ }
+
+ /// Consume this tagged pointer and produce a raw mutable pointer to the
+ /// memory location.
+ pub fn pointer(self) -> NonNull<T> {
+ // SAFETY: The `addr` guaranteed to have bits set in the Self::ADDRESS_MASK, so the result will be non-null.
+ self.0.map_addr(|addr| unsafe {
+ NonZeroUsize::new_unchecked(addr.get() & Self::ADDRESS_MASK)
+ })
+ }
+
+ /// Consume this tagged pointer and produce the data it carries.
+ pub fn tag(&self) -> usize {
+ self.0.addr().get() & Self::DATA_MASK
+ }
+
+ /// Update the data this tagged pointer carries to a new value.
+ pub fn set_tag(&mut self, data: usize) {
+ assert_eq!(
+ data & Self::ADDRESS_MASK,
+ 0,
+ "cannot set more data beyond the lowest NUM_BITS"
+ );
+ let data = data & Self::DATA_MASK;
+
+ // SAFETY: This value will always be non-zero because the upper bits (from
+ // ADDRESS_MASK) will always be non-zero. This a property of the type and its
+ // construction.
+ self.0 = self.0.map_addr(|addr| unsafe {
+ NonZeroUsize::new_unchecked((addr.get() & Self::ADDRESS_MASK) | data)
+ })
+ }
+ }
+}
+
+#[test]
+fn swap_copy_untyped() {
+ // We call `{swap,copy}{,_nonoverlapping}` at `bool` type on data that is not a valid bool.
+ // These should all do untyped copies, so this should work fine.
+ let mut x = 5u8;
+ let mut y = 6u8;
+
+ let ptr1 = &mut x as *mut u8 as *mut bool;
+ let ptr2 = &mut y as *mut u8 as *mut bool;
+
+ unsafe {
+ ptr::swap(ptr1, ptr2);
+ ptr::swap_nonoverlapping(ptr1, ptr2, 1);
+ }
+ assert_eq!(x, 5);
+ assert_eq!(y, 6);
+
+ unsafe {
+ ptr::copy(ptr1, ptr2, 1);
+ ptr::copy_nonoverlapping(ptr1, ptr2, 1);
+ }
+ assert_eq!(x, 5);
+ assert_eq!(y, 5);
+}
+
+#[test]
+fn test_const_copy() {
+ const {
+ let ptr1 = &1;
+ let mut ptr2 = &666;
+
+ // Copy ptr1 to ptr2, bytewise.
+ unsafe {
+ ptr::copy(
+ &ptr1 as *const _ as *const MaybeUninit<u8>,
+ &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
+ mem::size_of::<&i32>(),
+ );
+ }
+
+ // Make sure they still work.
+ assert!(*ptr1 == 1);
+ assert!(*ptr2 == 1);
+ };
+
+ const {
+ let ptr1 = &1;
+ let mut ptr2 = &666;
+
+ // Copy ptr1 to ptr2, bytewise.
+ unsafe {
+ ptr::copy_nonoverlapping(
+ &ptr1 as *const _ as *const MaybeUninit<u8>,
+ &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
+ mem::size_of::<&i32>(),
+ );
+ }
+
+ // Make sure they still work.
+ assert!(*ptr1 == 1);
+ assert!(*ptr2 == 1);
+ };
+}
diff --git a/library/core/tests/result.rs b/library/core/tests/result.rs
new file mode 100644
index 000000000..103e8cc3a
--- /dev/null
+++ b/library/core/tests/result.rs
@@ -0,0 +1,427 @@
+use core::ops::DerefMut;
+use core::option::*;
+
+fn op1() -> Result<isize, &'static str> {
+ Ok(666)
+}
+fn op2() -> Result<isize, &'static str> {
+ Err("sadface")
+}
+
+#[test]
+fn test_and() {
+ assert_eq!(op1().and(Ok(667)).unwrap(), 667);
+ assert_eq!(op1().and(Err::<i32, &'static str>("bad")).unwrap_err(), "bad");
+
+ assert_eq!(op2().and(Ok(667)).unwrap_err(), "sadface");
+ assert_eq!(op2().and(Err::<i32, &'static str>("bad")).unwrap_err(), "sadface");
+}
+
+#[test]
+fn test_and_then() {
+ assert_eq!(op1().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap(), 667);
+ assert_eq!(op1().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(), "bad");
+
+ assert_eq!(op2().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap_err(), "sadface");
+ assert_eq!(op2().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(), "sadface");
+}
+
+#[test]
+fn test_or() {
+ assert_eq!(op1().or(Ok::<_, &'static str>(667)).unwrap(), 666);
+ assert_eq!(op1().or(Err("bad")).unwrap(), 666);
+
+ assert_eq!(op2().or(Ok::<_, &'static str>(667)).unwrap(), 667);
+ assert_eq!(op2().or(Err("bad")).unwrap_err(), "bad");
+}
+
+#[test]
+fn test_or_else() {
+ assert_eq!(op1().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 666);
+ assert_eq!(op1().or_else(|e| Err::<isize, &'static str>(e)).unwrap(), 666);
+
+ assert_eq!(op2().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 667);
+ assert_eq!(op2().or_else(|e| Err::<isize, &'static str>(e)).unwrap_err(), "sadface");
+}
+
+#[test]
+fn test_impl_map() {
+ assert!(Ok::<isize, isize>(1).map(|x| x + 1) == Ok(2));
+ assert!(Err::<isize, isize>(1).map(|x| x + 1) == Err(1));
+}
+
+#[test]
+fn test_impl_map_err() {
+ assert!(Ok::<isize, isize>(1).map_err(|x| x + 1) == Ok(1));
+ assert!(Err::<isize, isize>(1).map_err(|x| x + 1) == Err(2));
+}
+
+#[test]
+fn test_collect() {
+ let v: Result<Vec<isize>, ()> = (0..0).map(|_| Ok::<isize, ()>(0)).collect();
+ assert!(v == Ok(vec![]));
+
+ let v: Result<Vec<isize>, ()> = (0..3).map(|x| Ok::<isize, ()>(x)).collect();
+ assert!(v == Ok(vec![0, 1, 2]));
+
+ let v: Result<Vec<isize>, isize> = (0..3).map(|x| if x > 1 { Err(x) } else { Ok(x) }).collect();
+ assert!(v == Err(2));
+
+ // test that it does not take more elements than it needs
+ let mut functions: [Box<dyn Fn() -> Result<(), isize>>; 3] =
+ [Box::new(|| Ok(())), Box::new(|| Err(1)), Box::new(|| panic!())];
+
+ let v: Result<Vec<()>, isize> = functions.iter_mut().map(|f| (*f)()).collect();
+ assert!(v == Err(1));
+}
+
+#[test]
+fn test_fmt_default() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ let err: Result<isize, &'static str> = Err("Err");
+
+ let s = format!("{ok:?}");
+ assert_eq!(s, "Ok(100)");
+ let s = format!("{err:?}");
+ assert_eq!(s, "Err(\"Err\")");
+}
+
+#[test]
+fn test_unwrap_or() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ let ok_err: Result<isize, &'static str> = Err("Err");
+
+ assert_eq!(ok.unwrap_or(50), 100);
+ assert_eq!(ok_err.unwrap_or(50), 50);
+}
+
+#[test]
+fn test_ok_or_err() {
+ let ok: Result<isize, isize> = Ok(100);
+ let err: Result<isize, isize> = Err(200);
+
+ assert_eq!(ok.into_ok_or_err(), 100);
+ assert_eq!(err.into_ok_or_err(), 200);
+}
+
+#[test]
+fn test_unwrap_or_else() {
+ fn handler(msg: &'static str) -> isize {
+ if msg == "I got this." { 50 } else { panic!("BadBad") }
+ }
+
+ let ok: Result<isize, &'static str> = Ok(100);
+ let ok_err: Result<isize, &'static str> = Err("I got this.");
+
+ assert_eq!(ok.unwrap_or_else(handler), 100);
+ assert_eq!(ok_err.unwrap_or_else(handler), 50);
+}
+
+#[test]
+#[should_panic]
+pub fn test_unwrap_or_else_panic() {
+ fn handler(msg: &'static str) -> isize {
+ if msg == "I got this." { 50 } else { panic!("BadBad") }
+ }
+
+ let bad_err: Result<isize, &'static str> = Err("Unrecoverable mess.");
+ let _: isize = bad_err.unwrap_or_else(handler);
+}
+
+#[test]
+fn test_unwrap_unchecked() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ assert_eq!(unsafe { ok.unwrap_unchecked() }, 100);
+}
+
+#[test]
+fn test_unwrap_err_unchecked() {
+ let ok_err: Result<isize, &'static str> = Err("Err");
+ assert_eq!(unsafe { ok_err.unwrap_err_unchecked() }, "Err");
+}
+
+#[test]
+pub fn test_expect_ok() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ assert_eq!(ok.expect("Unexpected error"), 100);
+}
+#[test]
+#[should_panic(expected = "Got expected error: \"All good\"")]
+pub fn test_expect_err() {
+ let err: Result<isize, &'static str> = Err("All good");
+ err.expect("Got expected error");
+}
+
+#[test]
+pub fn test_expect_err_err() {
+ let ok: Result<&'static str, isize> = Err(100);
+ assert_eq!(ok.expect_err("Unexpected ok"), 100);
+}
+#[test]
+#[should_panic(expected = "Got expected ok: \"All good\"")]
+pub fn test_expect_err_ok() {
+ let err: Result<&'static str, isize> = Ok("All good");
+ err.expect_err("Got expected ok");
+}
+
+#[test]
+pub fn test_iter() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ let mut it = ok.iter();
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next(), Some(&100));
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+ assert_eq!((&ok).into_iter().next(), Some(&100));
+
+ let err: Result<isize, &'static str> = Err("error");
+ assert_eq!(err.iter().next(), None);
+}
+
+#[test]
+pub fn test_iter_mut() {
+ let mut ok: Result<isize, &'static str> = Ok(100);
+ for loc in ok.iter_mut() {
+ *loc = 200;
+ }
+ assert_eq!(ok, Ok(200));
+ for loc in &mut ok {
+ *loc = 300;
+ }
+ assert_eq!(ok, Ok(300));
+
+ let mut err: Result<isize, &'static str> = Err("error");
+ for loc in err.iter_mut() {
+ *loc = 200;
+ }
+ assert_eq!(err, Err("error"));
+}
+
+#[test]
+pub fn test_unwrap_or_default() {
+ assert_eq!(op1().unwrap_or_default(), 666);
+ assert_eq!(op2().unwrap_or_default(), 0);
+}
+
+#[test]
+pub fn test_into_ok() {
+ fn infallible_op() -> Result<isize, !> {
+ Ok(666)
+ }
+
+ assert_eq!(infallible_op().into_ok(), 666);
+
+ enum MyNeverToken {}
+ impl From<MyNeverToken> for ! {
+ fn from(never: MyNeverToken) -> ! {
+ match never {}
+ }
+ }
+
+ fn infallible_op2() -> Result<isize, MyNeverToken> {
+ Ok(667)
+ }
+
+ assert_eq!(infallible_op2().into_ok(), 667);
+}
+
+#[test]
+pub fn test_into_err() {
+ fn until_error_op() -> Result<!, isize> {
+ Err(666)
+ }
+
+ assert_eq!(until_error_op().into_err(), 666);
+
+ enum MyNeverToken {}
+ impl From<MyNeverToken> for ! {
+ fn from(never: MyNeverToken) -> ! {
+ match never {}
+ }
+ }
+
+ fn until_error_op2() -> Result<MyNeverToken, isize> {
+ Err(667)
+ }
+
+ assert_eq!(until_error_op2().into_err(), 667);
+}
+
+#[test]
+fn test_try() {
+ fn try_result_ok() -> Result<u8, u32> {
+ let result: Result<u8, u8> = Ok(1);
+ let val = result?;
+ Ok(val)
+ }
+ assert_eq!(try_result_ok(), Ok(1));
+
+ fn try_result_err() -> Result<u8, u32> {
+ let result: Result<u8, u8> = Err(1);
+ let val = result?;
+ Ok(val)
+ }
+ assert_eq!(try_result_err(), Err(1));
+}
+
+#[test]
+fn test_result_as_deref() {
+ // &Result<T: Deref, E>::Ok(T).as_deref() ->
+ // Result<&T::Deref::Target, &E>::Ok(&*T)
+ let ref_ok = &Result::Ok::<&i32, u8>(&42);
+ let expected_result = Result::Ok::<&i32, &u8>(&42);
+ assert_eq!(ref_ok.as_deref(), expected_result);
+
+ let ref_ok = &Result::Ok::<String, u32>(String::from("a result"));
+ let expected_result = Result::Ok::<&str, &u32>("a result");
+ assert_eq!(ref_ok.as_deref(), expected_result);
+
+ let ref_ok = &Result::Ok::<Vec<i32>, u32>(vec![1, 2, 3, 4, 5]);
+ let expected_result = Result::Ok::<&[i32], &u32>([1, 2, 3, 4, 5].as_slice());
+ assert_eq!(ref_ok.as_deref(), expected_result);
+
+ // &Result<T: Deref, E>::Err(T).as_deref() ->
+ // Result<&T::Deref::Target, &E>::Err(&*E)
+ let val = 41;
+ let ref_err = &Result::Err::<&u8, i32>(val);
+ let expected_result = Result::Err::<&u8, &i32>(&val);
+ assert_eq!(ref_err.as_deref(), expected_result);
+
+ let s = String::from("an error");
+ let ref_err = &Result::Err::<&u32, String>(s.clone());
+ let expected_result = Result::Err::<&u32, &String>(&s);
+ assert_eq!(ref_err.as_deref(), expected_result);
+
+ let v = vec![5, 4, 3, 2, 1];
+ let ref_err = &Result::Err::<&u32, Vec<i32>>(v.clone());
+ let expected_result = Result::Err::<&u32, &Vec<i32>>(&v);
+ assert_eq!(ref_err.as_deref(), expected_result);
+}
+
+#[test]
+fn test_result_as_deref_mut() {
+ // &mut Result<T: DerefMut, E>::Ok(T).as_deref_mut() ->
+ // Result<&mut T::DerefMut::Target, &mut E>::Ok(&mut *T)
+ let mut val = 42;
+ let mut expected_val = 42;
+ let mut_ok = &mut Result::Ok::<&mut i32, u8>(&mut val);
+ let expected_result = Result::Ok::<&mut i32, &mut u8>(&mut expected_val);
+ assert_eq!(mut_ok.as_deref_mut(), expected_result);
+
+ let mut expected_string = String::from("a result");
+ let mut_ok = &mut Result::Ok::<String, u32>(expected_string.clone());
+ let expected_result = Result::Ok::<&mut str, &mut u32>(expected_string.deref_mut());
+ assert_eq!(mut_ok.as_deref_mut(), expected_result);
+
+ let mut expected_vec = vec![1, 2, 3, 4, 5];
+ let mut_ok = &mut Result::Ok::<Vec<i32>, u32>(expected_vec.clone());
+ let expected_result = Result::Ok::<&mut [i32], &mut u32>(expected_vec.as_mut_slice());
+ assert_eq!(mut_ok.as_deref_mut(), expected_result);
+
+ // &mut Result<T: DerefMut, E>::Err(T).as_deref_mut() ->
+ // Result<&mut T, &mut E>::Err(&mut *E)
+ let mut val = 41;
+ let mut_err = &mut Result::Err::<&mut u8, i32>(val);
+ let expected_result = Result::Err::<&mut u8, &mut i32>(&mut val);
+ assert_eq!(mut_err.as_deref_mut(), expected_result);
+
+ let mut expected_string = String::from("an error");
+ let mut_err = &mut Result::Err::<&mut u32, String>(expected_string.clone());
+ let expected_result = Result::Err::<&mut u32, &mut String>(&mut expected_string);
+ assert_eq!(mut_err.as_deref_mut(), expected_result);
+
+ let mut expected_vec = vec![5, 4, 3, 2, 1];
+ let mut_err = &mut Result::Err::<&mut u32, Vec<i32>>(expected_vec.clone());
+ let expected_result = Result::Err::<&mut u32, &mut Vec<i32>>(&mut expected_vec);
+ assert_eq!(mut_err.as_deref_mut(), expected_result);
+}
+
+#[test]
+fn result_const() {
+ // test that the methods of `Result` are usable in a const context
+
+ const RESULT: Result<usize, bool> = Ok(32);
+
+ const REF: Result<&usize, &bool> = RESULT.as_ref();
+ assert_eq!(REF, Ok(&32));
+
+ const IS_OK: bool = RESULT.is_ok();
+ assert!(IS_OK);
+
+ const IS_ERR: bool = RESULT.is_err();
+ assert!(!IS_ERR)
+}
+
+#[test]
+const fn result_const_mut() {
+ let mut result: Result<usize, bool> = Ok(32);
+
+ {
+ let as_mut = result.as_mut();
+ match as_mut {
+ Ok(v) => *v = 42,
+ Err(_) => unreachable!(),
+ }
+ }
+
+ let mut result_err: Result<usize, bool> = Err(false);
+
+ {
+ let as_mut = result_err.as_mut();
+ match as_mut {
+ Ok(_) => unreachable!(),
+ Err(v) => *v = true,
+ }
+ }
+}
+
+#[test]
+fn result_opt_conversions() {
+ #[derive(Copy, Clone, Debug, PartialEq)]
+ struct BadNumErr;
+
+ fn try_num(x: i32) -> Result<i32, BadNumErr> {
+ if x <= 5 { Ok(x + 1) } else { Err(BadNumErr) }
+ }
+
+ type ResOpt = Result<Option<i32>, BadNumErr>;
+ type OptRes = Option<Result<i32, BadNumErr>>;
+
+ let mut x: ResOpt = Ok(Some(5));
+ let mut y: OptRes = Some(Ok(5));
+ assert_eq!(x, y.transpose());
+ assert_eq!(x.transpose(), y);
+
+ x = Ok(None);
+ y = None;
+ assert_eq!(x, y.transpose());
+ assert_eq!(x.transpose(), y);
+
+ x = Err(BadNumErr);
+ y = Some(Err(BadNumErr));
+ assert_eq!(x, y.transpose());
+ assert_eq!(x.transpose(), y);
+
+ let res: Result<Vec<i32>, BadNumErr> = (0..10)
+ .map(|x| {
+ let y = try_num(x)?;
+ Ok(if y % 2 == 0 { Some(y - 1) } else { None })
+ })
+ .filter_map(Result::transpose)
+ .collect();
+
+ assert_eq!(res, Err(BadNumErr))
+}
+
+#[test]
+fn result_try_trait_v2_branch() {
+ use core::num::NonZeroU32;
+ use core::ops::{ControlFlow::*, Try};
+ assert_eq!(Ok::<i32, i32>(4).branch(), Continue(4));
+ assert_eq!(Err::<i32, i32>(4).branch(), Break(Err(4)));
+ let one = NonZeroU32::new(1).unwrap();
+ assert_eq!(Ok::<(), NonZeroU32>(()).branch(), Continue(()));
+ assert_eq!(Err::<(), NonZeroU32>(one).branch(), Break(Err(one)));
+ assert_eq!(Ok::<NonZeroU32, ()>(one).branch(), Continue(one));
+ assert_eq!(Err::<NonZeroU32, ()>(()).branch(), Break(Err(())));
+}
diff --git a/library/core/tests/simd.rs b/library/core/tests/simd.rs
new file mode 100644
index 000000000..565c8975e
--- /dev/null
+++ b/library/core/tests/simd.rs
@@ -0,0 +1,14 @@
+use core::simd::f32x4;
+use core::simd::SimdFloat;
+
+#[test]
+fn testing() {
+ let x = f32x4::from_array([1.0, 1.0, 1.0, 1.0]);
+ let y = -x;
+
+ let h = x * f32x4::splat(0.5);
+
+ let r = y.abs();
+ assert_eq!(x, r);
+ assert_eq!(h, f32x4::splat(0.5));
+}
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
new file mode 100644
index 000000000..0656109e9
--- /dev/null
+++ b/library/core/tests/slice.rs
@@ -0,0 +1,2597 @@
+use core::cell::Cell;
+use core::cmp::Ordering;
+use core::mem::MaybeUninit;
+use core::result::Result::{Err, Ok};
+use core::slice;
+
+#[test]
+fn test_position() {
+ let b = [1, 2, 3, 5, 5];
+ assert_eq!(b.iter().position(|&v| v == 9), None);
+ assert_eq!(b.iter().position(|&v| v == 5), Some(3));
+ assert_eq!(b.iter().position(|&v| v == 3), Some(2));
+ assert_eq!(b.iter().position(|&v| v == 0), None);
+}
+
+#[test]
+fn test_rposition() {
+ let b = [1, 2, 3, 5, 5];
+ assert_eq!(b.iter().rposition(|&v| v == 9), None);
+ assert_eq!(b.iter().rposition(|&v| v == 5), Some(4));
+ assert_eq!(b.iter().rposition(|&v| v == 3), Some(2));
+ assert_eq!(b.iter().rposition(|&v| v == 0), None);
+}
+
+#[test]
+fn test_binary_search() {
+ let b: [i32; 0] = [];
+ assert_eq!(b.binary_search(&5), Err(0));
+
+ let b = [4];
+ assert_eq!(b.binary_search(&3), Err(0));
+ assert_eq!(b.binary_search(&4), Ok(0));
+ assert_eq!(b.binary_search(&5), Err(1));
+
+ let b = [1, 2, 4, 6, 8, 9];
+ assert_eq!(b.binary_search(&5), Err(3));
+ assert_eq!(b.binary_search(&6), Ok(3));
+ assert_eq!(b.binary_search(&7), Err(4));
+ assert_eq!(b.binary_search(&8), Ok(4));
+
+ let b = [1, 2, 4, 5, 6, 8];
+ assert_eq!(b.binary_search(&9), Err(6));
+
+ let b = [1, 2, 4, 6, 7, 8, 9];
+ assert_eq!(b.binary_search(&6), Ok(3));
+ assert_eq!(b.binary_search(&5), Err(3));
+ assert_eq!(b.binary_search(&8), Ok(5));
+
+ let b = [1, 2, 4, 5, 6, 8, 9];
+ assert_eq!(b.binary_search(&7), Err(5));
+ assert_eq!(b.binary_search(&0), Err(0));
+
+ let b = [1, 3, 3, 3, 7];
+ assert_eq!(b.binary_search(&0), Err(0));
+ assert_eq!(b.binary_search(&1), Ok(0));
+ assert_eq!(b.binary_search(&2), Err(1));
+ assert!(match b.binary_search(&3) {
+ Ok(1..=3) => true,
+ _ => false,
+ });
+ assert!(match b.binary_search(&3) {
+ Ok(1..=3) => true,
+ _ => false,
+ });
+ assert_eq!(b.binary_search(&4), Err(4));
+ assert_eq!(b.binary_search(&5), Err(4));
+ assert_eq!(b.binary_search(&6), Err(4));
+ assert_eq!(b.binary_search(&7), Ok(4));
+ assert_eq!(b.binary_search(&8), Err(5));
+
+ let b = [(); usize::MAX];
+ assert_eq!(b.binary_search(&()), Ok(usize::MAX / 2));
+}
+
+#[test]
+fn test_binary_search_by_overflow() {
+ let b = [(); usize::MAX];
+ assert_eq!(b.binary_search_by(|_| Ordering::Equal), Ok(usize::MAX / 2));
+ assert_eq!(b.binary_search_by(|_| Ordering::Greater), Err(0));
+ assert_eq!(b.binary_search_by(|_| Ordering::Less), Err(usize::MAX));
+}
+
+#[test]
+// Test implementation specific behavior when finding equivalent elements.
+// It is ok to break this test but when you do a crater run is highly advisable.
+fn test_binary_search_implementation_details() {
+ let b = [1, 1, 2, 2, 3, 3, 3];
+ assert_eq!(b.binary_search(&1), Ok(1));
+ assert_eq!(b.binary_search(&2), Ok(3));
+ assert_eq!(b.binary_search(&3), Ok(5));
+ let b = [1, 1, 1, 1, 1, 3, 3, 3, 3];
+ assert_eq!(b.binary_search(&1), Ok(4));
+ assert_eq!(b.binary_search(&3), Ok(7));
+ let b = [1, 1, 1, 1, 3, 3, 3, 3, 3];
+ assert_eq!(b.binary_search(&1), Ok(2));
+ assert_eq!(b.binary_search(&3), Ok(4));
+}
+
+#[test]
+fn test_partition_point() {
+ let b: [i32; 0] = [];
+ assert_eq!(b.partition_point(|&x| x < 5), 0);
+
+ let b = [4];
+ assert_eq!(b.partition_point(|&x| x < 3), 0);
+ assert_eq!(b.partition_point(|&x| x < 4), 0);
+ assert_eq!(b.partition_point(|&x| x < 5), 1);
+
+ let b = [1, 2, 4, 6, 8, 9];
+ assert_eq!(b.partition_point(|&x| x < 5), 3);
+ assert_eq!(b.partition_point(|&x| x < 6), 3);
+ assert_eq!(b.partition_point(|&x| x < 7), 4);
+ assert_eq!(b.partition_point(|&x| x < 8), 4);
+
+ let b = [1, 2, 4, 5, 6, 8];
+ assert_eq!(b.partition_point(|&x| x < 9), 6);
+
+ let b = [1, 2, 4, 6, 7, 8, 9];
+ assert_eq!(b.partition_point(|&x| x < 6), 3);
+ assert_eq!(b.partition_point(|&x| x < 5), 3);
+ assert_eq!(b.partition_point(|&x| x < 8), 5);
+
+ let b = [1, 2, 4, 5, 6, 8, 9];
+ assert_eq!(b.partition_point(|&x| x < 7), 5);
+ assert_eq!(b.partition_point(|&x| x < 0), 0);
+
+ let b = [1, 3, 3, 3, 7];
+ assert_eq!(b.partition_point(|&x| x < 0), 0);
+ assert_eq!(b.partition_point(|&x| x < 1), 0);
+ assert_eq!(b.partition_point(|&x| x < 2), 1);
+ assert_eq!(b.partition_point(|&x| x < 3), 1);
+ assert_eq!(b.partition_point(|&x| x < 4), 4);
+ assert_eq!(b.partition_point(|&x| x < 5), 4);
+ assert_eq!(b.partition_point(|&x| x < 6), 4);
+ assert_eq!(b.partition_point(|&x| x < 7), 4);
+ assert_eq!(b.partition_point(|&x| x < 8), 5);
+}
+
+#[test]
+fn test_iterator_advance_by() {
+ let v = &[0, 1, 2, 3, 4];
+
+ for i in 0..=v.len() {
+ let mut iter = v.iter();
+ iter.advance_by(i).unwrap();
+ assert_eq!(iter.as_slice(), &v[i..]);
+ }
+
+ let mut iter = v.iter();
+ assert_eq!(iter.advance_by(v.len() + 1), Err(v.len()));
+ assert_eq!(iter.as_slice(), &[]);
+
+ let mut iter = v.iter();
+ iter.advance_by(3).unwrap();
+ assert_eq!(iter.as_slice(), &v[3..]);
+ iter.advance_by(2).unwrap();
+ assert_eq!(iter.as_slice(), &[]);
+ iter.advance_by(0).unwrap();
+}
+
+#[test]
+fn test_iterator_advance_back_by() {
+ let v = &[0, 1, 2, 3, 4];
+
+ for i in 0..=v.len() {
+ let mut iter = v.iter();
+ iter.advance_back_by(i).unwrap();
+ assert_eq!(iter.as_slice(), &v[..v.len() - i]);
+ }
+
+ let mut iter = v.iter();
+ assert_eq!(iter.advance_back_by(v.len() + 1), Err(v.len()));
+ assert_eq!(iter.as_slice(), &[]);
+
+ let mut iter = v.iter();
+ iter.advance_back_by(3).unwrap();
+ assert_eq!(iter.as_slice(), &v[..v.len() - 3]);
+ iter.advance_back_by(2).unwrap();
+ assert_eq!(iter.as_slice(), &[]);
+ iter.advance_back_by(0).unwrap();
+}
+
+#[test]
+fn test_iterator_nth() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().nth(i).unwrap(), &v[i]);
+ }
+ assert_eq!(v.iter().nth(v.len()), None);
+
+ let mut iter = v.iter();
+ assert_eq!(iter.nth(2).unwrap(), &v[2]);
+ assert_eq!(iter.nth(1).unwrap(), &v[4]);
+}
+
+#[test]
+fn test_iterator_nth_back() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().nth_back(i).unwrap(), &v[v.len() - i - 1]);
+ }
+ assert_eq!(v.iter().nth_back(v.len()), None);
+
+ let mut iter = v.iter();
+ assert_eq!(iter.nth_back(2).unwrap(), &v[2]);
+ assert_eq!(iter.nth_back(1).unwrap(), &v[0]);
+}
+
+#[test]
+fn test_iterator_last() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ assert_eq!(v.iter().last().unwrap(), &4);
+ assert_eq!(v[..1].iter().last().unwrap(), &0);
+}
+
+#[test]
+fn test_iterator_count() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ assert_eq!(v.iter().count(), 5);
+
+ let mut iter2 = v.iter();
+ iter2.next();
+ iter2.next();
+ assert_eq!(iter2.count(), 3);
+}
+
+#[test]
+fn test_chunks_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.chunks(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.chunks(2);
+ assert_eq!(c2.count(), 3);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.chunks(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_chunks_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks(3);
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_chunks_next() {
+ let v = [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks(2);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next().unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+ assert_eq!(c.next(), None);
+
+ let v = [0, 1, 2, 3, 4, 5, 6, 7];
+ let mut c = v.chunks(3);
+ assert_eq!(c.next().unwrap(), &[0, 1, 2]);
+ assert_eq!(c.next().unwrap(), &[3, 4, 5]);
+ assert_eq!(c.next().unwrap(), &[6, 7]);
+ assert_eq!(c.next(), None);
+}
+
+#[test]
+fn test_chunks_next_back() {
+ let v = [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks(2);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+ assert_eq!(c.next_back().unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[0, 1]);
+ assert_eq!(c.next_back(), None);
+
+ let v = [0, 1, 2, 3, 4, 5, 6, 7];
+ let mut c = v.chunks(3);
+ assert_eq!(c.next_back().unwrap(), &[6, 7]);
+ assert_eq!(c.next_back().unwrap(), &[3, 4, 5]);
+ assert_eq!(c.next_back().unwrap(), &[0, 1, 2]);
+ assert_eq!(c.next_back(), None);
+}
+
+#[test]
+fn test_chunks_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c3 = v3.chunks(10);
+ assert_eq!(c3.nth_back(0).unwrap(), &[0, 1, 2, 3, 4]);
+ assert_eq!(c3.next(), None);
+
+ let v4: &[i32] = &[0, 1, 2];
+ let mut c4 = v4.chunks(10);
+ assert_eq!(c4.nth_back(1_000_000_000usize), None);
+}
+
+#[test]
+fn test_chunks_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.chunks(2);
+ assert_eq!(c.last().unwrap()[1], 5);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.chunks(2);
+ assert_eq!(c2.last().unwrap()[0], 4);
+}
+
+#[test]
+fn test_chunks_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .chunks(2)
+ .zip(v2.chunks(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![14, 22, 14]);
+}
+
+#[test]
+fn test_chunks_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.chunks_mut(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.chunks_mut(2);
+ assert_eq!(c2.count(), 3);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.chunks_mut(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_chunks_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_mut(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks_mut(3);
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_chunks_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_mut(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c1 = v1.chunks_mut(3);
+ assert_eq!(c1.nth_back(1).unwrap(), &[0, 1, 2]);
+ assert_eq!(c1.next(), None);
+
+ let v3: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c3 = v3.chunks_mut(10);
+ assert_eq!(c3.nth_back(0).unwrap(), &[0, 1, 2, 3, 4]);
+ assert_eq!(c3.next(), None);
+
+ let v4: &mut [i32] = &mut [0, 1, 2];
+ let mut c4 = v4.chunks_mut(10);
+ assert_eq!(c4.nth_back(1_000_000_000usize), None);
+}
+
+#[test]
+fn test_chunks_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.chunks_mut(2);
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.chunks_mut(2);
+ assert_eq!(c2.last().unwrap(), &[4]);
+}
+
+#[test]
+fn test_chunks_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.chunks_mut(2).zip(v2.chunks(2)) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [13, 14, 19, 20, 14]);
+}
+
+#[test]
+fn test_chunks_mut_zip_aliasing() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let mut it = v1.chunks_mut(2).zip(v2.chunks(2));
+ let first = it.next().unwrap();
+ let _ = it.next().unwrap();
+ assert_eq!(first, (&mut [0, 1][..], &[6, 7][..]));
+}
+
+#[test]
+fn test_chunks_exact_mut_zip_aliasing() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let mut it = v1.chunks_exact_mut(2).zip(v2.chunks(2));
+ let first = it.next().unwrap();
+ let _ = it.next().unwrap();
+ assert_eq!(first, (&mut [0, 1][..], &[6, 7][..]));
+}
+
+#[test]
+fn test_rchunks_mut_zip_aliasing() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let mut it = v1.rchunks_mut(2).zip(v2.chunks(2));
+ let first = it.next().unwrap();
+ let _ = it.next().unwrap();
+ assert_eq!(first, (&mut [3, 4][..], &[6, 7][..]));
+}
+
+#[test]
+fn test_rchunks_exact_mut_zip_aliasing() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let mut it = v1.rchunks_exact_mut(2).zip(v2.chunks(2));
+ let first = it.next().unwrap();
+ let _ = it.next().unwrap();
+ assert_eq!(first, (&mut [3, 4][..], &[6, 7][..]));
+}
+
+#[test]
+fn test_chunks_exact_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.chunks_exact(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.chunks_exact(2);
+ assert_eq!(c2.count(), 2);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.chunks_exact(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_chunks_exact_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_exact(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.chunks_exact(3);
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_chunks_exact_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_exact(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks_exact(3);
+ assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c3 = v3.chunks_exact(10);
+ assert_eq!(c3.nth_back(0), None);
+}
+
+#[test]
+fn test_chunks_exact_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.chunks_exact(2);
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.chunks_exact(2);
+ assert_eq!(c2.last().unwrap(), &[2, 3]);
+}
+
+#[test]
+fn test_chunks_exact_remainder() {
+ let v: &[i32] = &[0, 1, 2, 3, 4];
+ let c = v.chunks_exact(2);
+ assert_eq!(c.remainder(), &[4]);
+}
+
+#[test]
+fn test_chunks_exact_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .chunks_exact(2)
+ .zip(v2.chunks_exact(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![14, 22]);
+}
+
+#[test]
+fn test_chunks_exact_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.chunks_exact_mut(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.chunks_exact_mut(2);
+ assert_eq!(c2.count(), 2);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.chunks_exact_mut(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_chunks_exact_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_exact_mut(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.chunks_exact_mut(3);
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_chunks_exact_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_exact_mut(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks_exact_mut(3);
+ assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c3 = v3.chunks_exact_mut(10);
+ assert_eq!(c3.nth_back(0), None);
+}
+
+#[test]
+fn test_chunks_exact_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.chunks_exact_mut(2);
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.chunks_exact_mut(2);
+ assert_eq!(c2.last().unwrap(), &[2, 3]);
+}
+
+#[test]
+fn test_chunks_exact_mut_remainder() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c = v.chunks_exact_mut(2);
+ assert_eq!(c.into_remainder(), &[4]);
+}
+
+#[test]
+fn test_chunks_exact_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.chunks_exact_mut(2).zip(v2.chunks_exact(2)) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [13, 14, 19, 20, 4]);
+}
+
+#[test]
+fn test_array_chunks_infer() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, -4];
+ let c = v.array_chunks();
+ for &[a, b, c] in c {
+ assert_eq!(a + b + c, 3);
+ }
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let total = v2.array_chunks().map(|&[a, b]| a * b).sum::<i32>();
+ assert_eq!(total, 2 * 3 + 4 * 5);
+}
+
+#[test]
+fn test_array_chunks_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.array_chunks::<3>();
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.array_chunks::<2>();
+ assert_eq!(c2.count(), 2);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.array_chunks::<2>();
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_array_chunks_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.array_chunks::<2>();
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.array_chunks::<3>();
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_array_chunks_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.array_chunks::<2>();
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.array_chunks::<3>();
+ assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c3 = v3.array_chunks::<10>();
+ assert_eq!(c3.nth_back(0), None);
+}
+
+#[test]
+fn test_array_chunks_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.array_chunks::<2>();
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.array_chunks::<2>();
+ assert_eq!(c2.last().unwrap(), &[2, 3]);
+}
+
+#[test]
+fn test_array_chunks_remainder() {
+ let v: &[i32] = &[0, 1, 2, 3, 4];
+ let c = v.array_chunks::<2>();
+ assert_eq!(c.remainder(), &[4]);
+}
+
+#[test]
+fn test_array_chunks_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .array_chunks::<2>()
+ .zip(v2.array_chunks::<2>())
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![14, 22]);
+}
+
+#[test]
+fn test_array_chunks_mut_infer() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ for a in v.array_chunks_mut() {
+ let sum = a.iter().sum::<i32>();
+ *a = [sum; 3];
+ }
+ assert_eq!(v, &[3, 3, 3, 12, 12, 12, 6]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ v2.array_chunks_mut().for_each(|[a, b]| core::mem::swap(a, b));
+ assert_eq!(v2, &[1, 0, 3, 2, 5, 4, 6]);
+}
+
+#[test]
+fn test_array_chunks_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.array_chunks_mut::<3>();
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.array_chunks_mut::<2>();
+ assert_eq!(c2.count(), 2);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.array_chunks_mut::<2>();
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_array_chunks_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.array_chunks_mut::<2>();
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.array_chunks_mut::<3>();
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_array_chunks_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.array_chunks_mut::<2>();
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.array_chunks_mut::<3>();
+ assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c3 = v3.array_chunks_mut::<10>();
+ assert_eq!(c3.nth_back(0), None);
+}
+
+#[test]
+fn test_array_chunks_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.array_chunks_mut::<2>();
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.array_chunks_mut::<2>();
+ assert_eq!(c2.last().unwrap(), &[2, 3]);
+}
+
+#[test]
+fn test_array_chunks_mut_remainder() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c = v.array_chunks_mut::<2>();
+ assert_eq!(c.into_remainder(), &[4]);
+}
+
+#[test]
+fn test_array_chunks_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.array_chunks_mut::<2>().zip(v2.array_chunks::<2>()) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [13, 14, 19, 20, 4]);
+}
+
+#[test]
+fn test_array_windows_infer() {
+ let v: &[i32] = &[0, 1, 0, 1];
+ assert_eq!(v.array_windows::<2>().count(), 3);
+ let c = v.array_windows();
+ for &[a, b] in c {
+ assert_eq!(a + b, 1);
+ }
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let total = v2.array_windows().map(|&[a, b, c]| a + b + c).sum::<i32>();
+ assert_eq!(total, 3 + 6 + 9 + 12 + 15);
+}
+
+#[test]
+fn test_array_windows_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.array_windows::<3>();
+ assert_eq!(c.count(), 4);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.array_windows::<6>();
+ assert_eq!(c2.count(), 0);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.array_windows::<2>();
+ assert_eq!(c3.count(), 0);
+
+ let v4: &[()] = &[(); usize::MAX];
+ let c4 = v4.array_windows::<1>();
+ assert_eq!(c4.count(), usize::MAX);
+}
+
+#[test]
+fn test_array_windows_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let snd = v.array_windows::<4>().nth(1);
+ assert_eq!(snd, Some(&[1, 2, 3, 4]));
+ let mut arr_windows = v.array_windows::<2>();
+ assert_ne!(arr_windows.nth(0), arr_windows.nth(0));
+ let last = v.array_windows::<3>().last();
+ assert_eq!(last, Some(&[3, 4, 5]));
+}
+
+#[test]
+fn test_array_windows_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let snd = v.array_windows::<4>().nth_back(1);
+ assert_eq!(snd, Some(&[1, 2, 3, 4]));
+ let mut arr_windows = v.array_windows::<2>();
+ assert_ne!(arr_windows.nth_back(0), arr_windows.nth_back(0));
+}
+
+#[test]
+fn test_rchunks_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.rchunks(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.rchunks(2);
+ assert_eq!(c2.count(), 3);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.rchunks(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_rchunks_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.rchunks(3);
+ assert_eq!(c2.nth(1).unwrap(), &[0, 1]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.rchunks(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[2, 3, 4]);
+ assert_eq!(c2.next_back(), None);
+}
+
+#[test]
+fn test_rchunks_next() {
+ let v = [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks(2);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+ assert_eq!(c.next().unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v = [0, 1, 2, 3, 4, 5, 6, 7];
+ let mut c = v.rchunks(3);
+ assert_eq!(c.next().unwrap(), &[5, 6, 7]);
+ assert_eq!(c.next().unwrap(), &[2, 3, 4]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+}
+
+#[test]
+fn test_rchunks_next_back() {
+ let v = [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks(2);
+ assert_eq!(c.next_back().unwrap(), &[0, 1]);
+ assert_eq!(c.next_back().unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+ assert_eq!(c.next_back(), None);
+
+ let v = [0, 1, 2, 3, 4, 5, 6, 7];
+ let mut c = v.rchunks(3);
+ assert_eq!(c.next_back().unwrap(), &[0, 1]);
+ assert_eq!(c.next_back().unwrap(), &[2, 3, 4]);
+ assert_eq!(c.next_back().unwrap(), &[5, 6, 7]);
+ assert_eq!(c.next_back(), None);
+}
+
+#[test]
+fn test_rchunks_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.rchunks(2);
+ assert_eq!(c.last().unwrap()[1], 1);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.rchunks(2);
+ assert_eq!(c2.last().unwrap()[0], 0);
+}
+
+#[test]
+fn test_rchunks_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .rchunks(2)
+ .zip(v2.rchunks(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![26, 18, 6]);
+}
+
+#[test]
+fn test_rchunks_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_mut(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_mut(2);
+ assert_eq!(c2.count(), 3);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.rchunks_mut(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_rchunks_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_mut(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.rchunks_mut(3);
+ assert_eq!(c2.nth(1).unwrap(), &[0, 1]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_mut(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.rchunks_mut(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[2, 3, 4]);
+ assert_eq!(c2.next_back(), None);
+}
+
+#[test]
+fn test_rchunks_mut_next() {
+ let mut v = [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_mut(2);
+ assert_eq!(c.next().unwrap(), &mut [4, 5]);
+ assert_eq!(c.next().unwrap(), &mut [2, 3]);
+ assert_eq!(c.next().unwrap(), &mut [0, 1]);
+ assert_eq!(c.next(), None);
+
+ let mut v = [0, 1, 2, 3, 4, 5, 6, 7];
+ let mut c = v.rchunks_mut(3);
+ assert_eq!(c.next().unwrap(), &mut [5, 6, 7]);
+ assert_eq!(c.next().unwrap(), &mut [2, 3, 4]);
+ assert_eq!(c.next().unwrap(), &mut [0, 1]);
+ assert_eq!(c.next(), None);
+}
+
+#[test]
+fn test_rchunks_mut_next_back() {
+ let mut v = [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_mut(2);
+ assert_eq!(c.next_back().unwrap(), &mut [0, 1]);
+ assert_eq!(c.next_back().unwrap(), &mut [2, 3]);
+ assert_eq!(c.next_back().unwrap(), &mut [4, 5]);
+ assert_eq!(c.next_back(), None);
+
+ let mut v = [0, 1, 2, 3, 4, 5, 6, 7];
+ let mut c = v.rchunks_mut(3);
+ assert_eq!(c.next_back().unwrap(), &mut [0, 1]);
+ assert_eq!(c.next_back().unwrap(), &mut [2, 3, 4]);
+ assert_eq!(c.next_back().unwrap(), &mut [5, 6, 7]);
+ assert_eq!(c.next_back(), None);
+}
+
+#[test]
+fn test_rchunks_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_mut(2);
+ assert_eq!(c.last().unwrap(), &[0, 1]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_mut(2);
+ assert_eq!(c2.last().unwrap(), &[0]);
+}
+
+#[test]
+fn test_rchunks_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.rchunks_mut(2).zip(v2.rchunks(2)) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [6, 16, 17, 22, 23]);
+}
+
+#[test]
+fn test_rchunks_exact_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_exact(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_exact(2);
+ assert_eq!(c2.count(), 2);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.rchunks_exact(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_rchunks_exact_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_exact(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.rchunks_exact(3);
+ assert_eq!(c2.nth(1).unwrap(), &[1, 2, 3]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_exact_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_exact(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.rchunks_exact(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[4, 5, 6]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_exact_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_exact(2);
+ assert_eq!(c.last().unwrap(), &[0, 1]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_exact(2);
+ assert_eq!(c2.last().unwrap(), &[1, 2]);
+}
+
+#[test]
+fn test_rchunks_exact_remainder() {
+ let v: &[i32] = &[0, 1, 2, 3, 4];
+ let c = v.rchunks_exact(2);
+ assert_eq!(c.remainder(), &[0]);
+}
+
+#[test]
+fn test_rchunks_exact_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .rchunks_exact(2)
+ .zip(v2.rchunks_exact(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![26, 18]);
+}
+
+#[test]
+fn test_rchunks_exact_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_exact_mut(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_exact_mut(2);
+ assert_eq!(c2.count(), 2);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.rchunks_exact_mut(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_rchunks_exact_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_exact_mut(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.rchunks_exact_mut(3);
+ assert_eq!(c2.nth(1).unwrap(), &[1, 2, 3]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_exact_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_exact_mut(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.rchunks_exact_mut(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[4, 5, 6]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_exact_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_exact_mut(2);
+ assert_eq!(c.last().unwrap(), &[0, 1]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_exact_mut(2);
+ assert_eq!(c2.last().unwrap(), &[1, 2]);
+}
+
+#[test]
+fn test_rchunks_exact_mut_remainder() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c = v.rchunks_exact_mut(2);
+ assert_eq!(c.into_remainder(), &[0]);
+}
+
+#[test]
+fn test_rchunks_exact_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.rchunks_exact_mut(2).zip(v2.rchunks_exact(2)) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [0, 16, 17, 22, 23]);
+}
+
+#[test]
+fn chunks_mut_are_send_and_sync() {
+ use std::cell::Cell;
+ use std::slice::{ChunksExactMut, ChunksMut, RChunksExactMut, RChunksMut};
+ use std::sync::MutexGuard;
+
+ #[allow(unused)]
+ fn assert_send_and_sync()
+ where
+ ChunksMut<'static, Cell<i32>>: Send,
+ ChunksMut<'static, MutexGuard<'static, u32>>: Sync,
+ ChunksExactMut<'static, Cell<i32>>: Send,
+ ChunksExactMut<'static, MutexGuard<'static, u32>>: Sync,
+ RChunksMut<'static, Cell<i32>>: Send,
+ RChunksMut<'static, MutexGuard<'static, u32>>: Sync,
+ RChunksExactMut<'static, Cell<i32>>: Send,
+ RChunksExactMut<'static, MutexGuard<'static, u32>>: Sync,
+ {
+ }
+}
+
+#[test]
+fn test_windows_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.windows(3);
+ assert_eq!(c.count(), 4);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.windows(6);
+ assert_eq!(c2.count(), 0);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.windows(2);
+ assert_eq!(c3.count(), 0);
+
+ let v4 = &[(); usize::MAX];
+ let c4 = v4.windows(1);
+ assert_eq!(c4.count(), usize::MAX);
+}
+
+#[test]
+fn test_windows_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.windows(2);
+ assert_eq!(c.nth(2).unwrap()[1], 3);
+ assert_eq!(c.next().unwrap()[0], 3);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.windows(4);
+ assert_eq!(c2.nth(1).unwrap()[1], 2);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_windows_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.windows(2);
+ assert_eq!(c.nth_back(2).unwrap()[0], 2);
+ assert_eq!(c.next_back().unwrap()[1], 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.windows(4);
+ assert_eq!(c2.nth_back(1).unwrap()[1], 1);
+ assert_eq!(c2.next_back(), None);
+}
+
+#[test]
+fn test_windows_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.windows(2);
+ assert_eq!(c.last().unwrap()[1], 5);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.windows(2);
+ assert_eq!(c2.last().unwrap()[0], 3);
+}
+
+#[test]
+fn test_windows_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .windows(2)
+ .zip(v2.windows(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+
+ assert_eq!(res, [14, 18, 22, 26]);
+}
+
+#[test]
+#[allow(const_err)]
+fn test_iter_ref_consistency() {
+ use std::fmt::Debug;
+
+ fn test<T: Copy + Debug + PartialEq>(x: T) {
+ let v: &[T] = &[x, x, x];
+ let v_ptrs: [*const T; 3] = match v {
+ [ref v1, ref v2, ref v3] => [v1 as *const _, v2 as *const _, v3 as *const _],
+ _ => unreachable!(),
+ };
+ let len = v.len();
+
+ // nth(i)
+ for i in 0..len {
+ assert_eq!(&v[i] as *const _, v_ptrs[i]); // check the v_ptrs array, just to be sure
+ let nth = v.iter().nth(i).unwrap();
+ assert_eq!(nth as *const _, v_ptrs[i]);
+ }
+ assert_eq!(v.iter().nth(len), None, "nth(len) should return None");
+
+ // stepping through with nth(0)
+ {
+ let mut it = v.iter();
+ for i in 0..len {
+ let next = it.nth(0).unwrap();
+ assert_eq!(next as *const _, v_ptrs[i]);
+ }
+ assert_eq!(it.nth(0), None);
+ }
+
+ // next()
+ {
+ let mut it = v.iter();
+ for i in 0..len {
+ let remaining = len - i;
+ assert_eq!(it.size_hint(), (remaining, Some(remaining)));
+
+ let next = it.next().unwrap();
+ assert_eq!(next as *const _, v_ptrs[i]);
+ }
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None, "The final call to next() should return None");
+ }
+
+ // next_back()
+ {
+ let mut it = v.iter();
+ for i in 0..len {
+ let remaining = len - i;
+ assert_eq!(it.size_hint(), (remaining, Some(remaining)));
+
+ let prev = it.next_back().unwrap();
+ assert_eq!(prev as *const _, v_ptrs[remaining - 1]);
+ }
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next_back(), None, "The final call to next_back() should return None");
+ }
+ }
+
+ fn test_mut<T: Copy + Debug + PartialEq>(x: T) {
+ let v: &mut [T] = &mut [x, x, x];
+ let v_ptrs: [*mut T; 3] = match v {
+ [ref v1, ref v2, ref v3] => {
+ [v1 as *const _ as *mut _, v2 as *const _ as *mut _, v3 as *const _ as *mut _]
+ }
+ _ => unreachable!(),
+ };
+ let len = v.len();
+
+ // nth(i)
+ for i in 0..len {
+ assert_eq!(&mut v[i] as *mut _, v_ptrs[i]); // check the v_ptrs array, just to be sure
+ let nth = v.iter_mut().nth(i).unwrap();
+ assert_eq!(nth as *mut _, v_ptrs[i]);
+ }
+ assert_eq!(v.iter().nth(len), None, "nth(len) should return None");
+
+ // stepping through with nth(0)
+ {
+ let mut it = v.iter();
+ for i in 0..len {
+ let next = it.nth(0).unwrap();
+ assert_eq!(next as *const _, v_ptrs[i]);
+ }
+ assert_eq!(it.nth(0), None);
+ }
+
+ // next()
+ {
+ let mut it = v.iter_mut();
+ for i in 0..len {
+ let remaining = len - i;
+ assert_eq!(it.size_hint(), (remaining, Some(remaining)));
+
+ let next = it.next().unwrap();
+ assert_eq!(next as *mut _, v_ptrs[i]);
+ }
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None, "The final call to next() should return None");
+ }
+
+ // next_back()
+ {
+ let mut it = v.iter_mut();
+ for i in 0..len {
+ let remaining = len - i;
+ assert_eq!(it.size_hint(), (remaining, Some(remaining)));
+
+ let prev = it.next_back().unwrap();
+ assert_eq!(prev as *mut _, v_ptrs[remaining - 1]);
+ }
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next_back(), None, "The final call to next_back() should return None");
+ }
+ }
+
+ // Make sure iterators and slice patterns yield consistent addresses for various types,
+ // including ZSTs.
+ test(0u32);
+ test(());
+ test([0u32; 0]); // ZST with alignment > 0
+ test_mut(0u32);
+ test_mut(());
+ test_mut([0u32; 0]); // ZST with alignment > 0
+}
+
+// The current implementation of SliceIndex fails to handle methods
+// orthogonally from range types; therefore, it is worth testing
+// all of the indexing operations on each input.
+mod slice_index {
+ // This checks all six indexing methods, given an input range that
+ // should succeed. (it is NOT suitable for testing invalid inputs)
+ macro_rules! assert_range_eq {
+ ($arr:expr, $range:expr, $expected:expr) => {
+ let mut arr = $arr;
+ let mut expected = $expected;
+ {
+ let s: &[_] = &arr;
+ let expected: &[_] = &expected;
+
+ assert_eq!(&s[$range], expected, "(in assertion for: index)");
+ assert_eq!(s.get($range), Some(expected), "(in assertion for: get)");
+ unsafe {
+ assert_eq!(
+ s.get_unchecked($range),
+ expected,
+ "(in assertion for: get_unchecked)",
+ );
+ }
+ }
+ {
+ let s: &mut [_] = &mut arr;
+ let expected: &mut [_] = &mut expected;
+
+ assert_eq!(&mut s[$range], expected, "(in assertion for: index_mut)",);
+ assert_eq!(
+ s.get_mut($range),
+ Some(&mut expected[..]),
+ "(in assertion for: get_mut)",
+ );
+ unsafe {
+ assert_eq!(
+ s.get_unchecked_mut($range),
+ expected,
+ "(in assertion for: get_unchecked_mut)",
+ );
+ }
+ }
+ };
+ }
+
+ // Make sure the macro can actually detect bugs,
+ // because if it can't, then what are we even doing here?
+ //
+ // (Be aware this only demonstrates the ability to detect bugs
+ // in the FIRST method that panics, as the macro is not designed
+ // to be used in `should_panic`)
+ #[test]
+ #[should_panic(expected = "out of range")]
+ fn assert_range_eq_can_fail_by_panic() {
+ assert_range_eq!([0, 1, 2], 0..5, [0, 1, 2]);
+ }
+
+ // (Be aware this only demonstrates the ability to detect bugs
+ // in the FIRST method it calls, as the macro is not designed
+ // to be used in `should_panic`)
+ #[test]
+ #[should_panic(expected = "==")]
+ fn assert_range_eq_can_fail_by_inequality() {
+ assert_range_eq!([0, 1, 2], 0..2, [0, 1, 2]);
+ }
+
+ // Test cases for bad index operations.
+ //
+ // This generates `should_panic` test cases for Index/IndexMut
+ // and `None` test cases for get/get_mut.
+ macro_rules! panic_cases {
+ ($(
+ // each test case needs a unique name to namespace the tests
+ in mod $case_name:ident {
+ data: $data:expr;
+
+ // optional:
+ //
+ // one or more similar inputs for which data[input] succeeds,
+ // and the corresponding output as an array. This helps validate
+ // "critical points" where an input range straddles the boundary
+ // between valid and invalid.
+ // (such as the input `len..len`, which is just barely valid)
+ $(
+ good: data[$good:expr] == $output:expr;
+ )*
+
+ bad: data[$bad:expr];
+ message: $expect_msg:expr;
+ }
+ )*) => {$(
+ mod $case_name {
+ #[allow(unused_imports)]
+ use core::ops::Bound;
+
+ #[test]
+ fn pass() {
+ let mut v = $data;
+
+ $( assert_range_eq!($data, $good, $output); )*
+
+ {
+ let v: &[_] = &v;
+ assert_eq!(v.get($bad), None, "(in None assertion for get)");
+ }
+
+ {
+ let v: &mut [_] = &mut v;
+ assert_eq!(v.get_mut($bad), None, "(in None assertion for get_mut)");
+ }
+ }
+
+ #[test]
+ #[should_panic(expected = $expect_msg)]
+ fn index_fail() {
+ let v = $data;
+ let v: &[_] = &v;
+ let _v = &v[$bad];
+ }
+
+ #[test]
+ #[should_panic(expected = $expect_msg)]
+ fn index_mut_fail() {
+ let mut v = $data;
+ let v: &mut [_] = &mut v;
+ let _v = &mut v[$bad];
+ }
+ }
+ )*};
+ }
+
+ #[test]
+ fn simple() {
+ let v = [0, 1, 2, 3, 4, 5];
+
+ assert_range_eq!(v, .., [0, 1, 2, 3, 4, 5]);
+ assert_range_eq!(v, ..2, [0, 1]);
+ assert_range_eq!(v, ..=1, [0, 1]);
+ assert_range_eq!(v, 2.., [2, 3, 4, 5]);
+ assert_range_eq!(v, 1..4, [1, 2, 3]);
+ assert_range_eq!(v, 1..=3, [1, 2, 3]);
+ }
+
+ panic_cases! {
+ in mod rangefrom_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[6..] == [];
+ bad: data[7..];
+ message: "out of range";
+ }
+
+ in mod rangeto_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[..6] == [0, 1, 2, 3, 4, 5];
+ bad: data[..7];
+ message: "out of range";
+ }
+
+ in mod rangetoinclusive_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[..=5] == [0, 1, 2, 3, 4, 5];
+ bad: data[..=6];
+ message: "out of range";
+ }
+
+ in mod rangeinclusive_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[0..=5] == [0, 1, 2, 3, 4, 5];
+ bad: data[0..=6];
+ message: "out of range";
+ }
+
+ in mod range_len_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[6..6] == [];
+ bad: data[7..7];
+ message: "out of range";
+ }
+
+ in mod rangeinclusive_len_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[6..=5] == [];
+ bad: data[7..=6];
+ message: "out of range";
+ }
+
+ in mod boundpair_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[(Bound::Included(6), Bound::Unbounded)] == [];
+ good: data[(Bound::Unbounded, Bound::Included(5))] == [0, 1, 2, 3, 4, 5];
+ good: data[(Bound::Unbounded, Bound::Excluded(6))] == [0, 1, 2, 3, 4, 5];
+ good: data[(Bound::Included(0), Bound::Included(5))] == [0, 1, 2, 3, 4, 5];
+ good: data[(Bound::Included(0), Bound::Excluded(6))] == [0, 1, 2, 3, 4, 5];
+ good: data[(Bound::Included(2), Bound::Excluded(4))] == [2, 3];
+ good: data[(Bound::Excluded(1), Bound::Included(4))] == [2, 3, 4];
+ good: data[(Bound::Excluded(5), Bound::Excluded(6))] == [];
+ good: data[(Bound::Included(6), Bound::Excluded(6))] == [];
+ good: data[(Bound::Excluded(5), Bound::Included(5))] == [];
+ good: data[(Bound::Included(6), Bound::Included(5))] == [];
+ bad: data[(Bound::Unbounded, Bound::Included(6))];
+ message: "out of range";
+ }
+ }
+
+ panic_cases! {
+ in mod rangeinclusive_exhausted {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[0..=5] == [0, 1, 2, 3, 4, 5];
+ good: data[{
+ let mut iter = 0..=5;
+ iter.by_ref().count(); // exhaust it
+ iter
+ }] == [];
+
+ // 0..=6 is out of range before exhaustion, so it
+ // stands to reason that it still would be after.
+ bad: data[{
+ let mut iter = 0..=6;
+ iter.by_ref().count(); // exhaust it
+ iter
+ }];
+ message: "out of range";
+ }
+ }
+
+ panic_cases! {
+ in mod range_neg_width {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[4..4] == [];
+ bad: data[4..3];
+ message: "but ends at";
+ }
+
+ in mod rangeinclusive_neg_width {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[4..=3] == [];
+ bad: data[4..=2];
+ message: "but ends at";
+ }
+
+ in mod boundpair_neg_width {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[(Bound::Included(4), Bound::Excluded(4))] == [];
+ bad: data[(Bound::Included(4), Bound::Excluded(3))];
+ message: "but ends at";
+ }
+ }
+
+ panic_cases! {
+ in mod rangeinclusive_overflow {
+ data: [0, 1];
+
+ // note: using 0 specifically ensures that the result of overflowing is 0..0,
+ // so that `get` doesn't simply return None for the wrong reason.
+ bad: data[0 ..= usize::MAX];
+ message: "maximum usize";
+ }
+
+ in mod rangetoinclusive_overflow {
+ data: [0, 1];
+
+ bad: data[..= usize::MAX];
+ message: "maximum usize";
+ }
+
+ in mod boundpair_overflow_end {
+ data: [0; 1];
+
+ bad: data[(Bound::Unbounded, Bound::Included(usize::MAX))];
+ message: "maximum usize";
+ }
+
+ in mod boundpair_overflow_start {
+ data: [0; 1];
+
+ bad: data[(Bound::Excluded(usize::MAX), Bound::Unbounded)];
+ message: "maximum usize";
+ }
+ } // panic_cases!
+}
+
+#[test]
+fn test_find_rfind() {
+ let v = [0, 1, 2, 3, 4, 5];
+ let mut iter = v.iter();
+ let mut i = v.len();
+ while let Some(&elt) = iter.rfind(|_| true) {
+ i -= 1;
+ assert_eq!(elt, v[i]);
+ }
+ assert_eq!(i, 0);
+ assert_eq!(v.iter().rfind(|&&x| x <= 3), Some(&3));
+}
+
+#[test]
+fn test_iter_folds() {
+ let a = [1, 2, 3, 4, 5]; // len>4 so the unroll is used
+ assert_eq!(a.iter().fold(0, |acc, &x| 2 * acc + x), 57);
+ assert_eq!(a.iter().rfold(0, |acc, &x| 2 * acc + x), 129);
+ let fold = |acc: i32, &x| acc.checked_mul(2)?.checked_add(x);
+ assert_eq!(a.iter().try_fold(0, &fold), Some(57));
+ assert_eq!(a.iter().try_rfold(0, &fold), Some(129));
+
+ // short-circuiting try_fold, through other methods
+ let a = [0, 1, 2, 3, 5, 5, 5, 7, 8, 9];
+ let mut iter = a.iter();
+ assert_eq!(iter.position(|&x| x == 3), Some(3));
+ assert_eq!(iter.rfind(|&&x| x == 5), Some(&5));
+ assert_eq!(iter.len(), 2);
+}
+
+#[test]
+fn test_rotate_left() {
+ const N: usize = 600;
+ let a: &mut [_] = &mut [0; N];
+ for i in 0..N {
+ a[i] = i;
+ }
+
+ a.rotate_left(42);
+ let k = N - 42;
+
+ for i in 0..N {
+ assert_eq!(a[(i + k) % N], i);
+ }
+}
+
+#[test]
+fn test_rotate_right() {
+ const N: usize = 600;
+ let a: &mut [_] = &mut [0; N];
+ for i in 0..N {
+ a[i] = i;
+ }
+
+ a.rotate_right(42);
+
+ for i in 0..N {
+ assert_eq!(a[(i + 42) % N], i);
+ }
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn brute_force_rotate_test_0() {
+ // In case of edge cases involving multiple algorithms
+ let n = 300;
+ for len in 0..n {
+ for s in 0..len {
+ let mut v = Vec::with_capacity(len);
+ for i in 0..len {
+ v.push(i);
+ }
+ v[..].rotate_right(s);
+ for i in 0..v.len() {
+ assert_eq!(v[i], v.len().wrapping_add(i.wrapping_sub(s)) % v.len());
+ }
+ }
+ }
+}
+
+#[test]
+fn brute_force_rotate_test_1() {
+ // `ptr_rotate` covers so many kinds of pointer usage, that this is just a good test for
+ // pointers in general. This uses a `[usize; 4]` to hit all algorithms without overwhelming miri
+ let n = 30;
+ for len in 0..n {
+ for s in 0..len {
+ let mut v: Vec<[usize; 4]> = Vec::with_capacity(len);
+ for i in 0..len {
+ v.push([i, 0, 0, 0]);
+ }
+ v[..].rotate_right(s);
+ for i in 0..v.len() {
+ assert_eq!(v[i][0], v.len().wrapping_add(i.wrapping_sub(s)) % v.len());
+ }
+ }
+ }
+}
+
+#[test]
+#[cfg(not(target_arch = "wasm32"))]
+fn sort_unstable() {
+ use core::cmp::Ordering::{Equal, Greater, Less};
+ use core::slice::heapsort;
+ use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng};
+
+ // Miri is too slow (but still need to `chain` to make the types match)
+ let lens = if cfg!(miri) { (2..20).chain(0..0) } else { (2..25).chain(500..510) };
+ let rounds = if cfg!(miri) { 1 } else { 100 };
+
+ let mut v = [0; 600];
+ let mut tmp = [0; 600];
+ let mut rng = StdRng::from_entropy();
+
+ for len in lens {
+ let v = &mut v[0..len];
+ let tmp = &mut tmp[0..len];
+
+ for &modulus in &[5, 10, 100, 1000] {
+ for _ in 0..rounds {
+ for i in 0..len {
+ v[i] = rng.gen::<i32>() % modulus;
+ }
+
+ // Sort in default order.
+ tmp.copy_from_slice(v);
+ tmp.sort_unstable();
+ assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in ascending order.
+ tmp.copy_from_slice(v);
+ tmp.sort_unstable_by(|a, b| a.cmp(b));
+ assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in descending order.
+ tmp.copy_from_slice(v);
+ tmp.sort_unstable_by(|a, b| b.cmp(a));
+ assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
+
+ // Test heapsort using `<` operator.
+ tmp.copy_from_slice(v);
+ heapsort(tmp, |a, b| a < b);
+ assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+ // Test heapsort using `>` operator.
+ tmp.copy_from_slice(v);
+ heapsort(tmp, |a, b| a > b);
+ assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
+ }
+ }
+ }
+
+ // Sort using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ for i in 0..v.len() {
+ v[i] = i as i32;
+ }
+ v.sort_unstable_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
+ v.sort_unstable();
+ for i in 0..v.len() {
+ assert_eq!(v[i], i as i32);
+ }
+
+ // Should not panic.
+ [0i32; 0].sort_unstable();
+ [(); 10].sort_unstable();
+ [(); 100].sort_unstable();
+
+ let mut v = [0xDEADBEEFu64];
+ v.sort_unstable();
+ assert!(v == [0xDEADBEEF]);
+}
+
+#[test]
+#[cfg(not(target_arch = "wasm32"))]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn select_nth_unstable() {
+ use core::cmp::Ordering::{Equal, Greater, Less};
+ use rand::rngs::StdRng;
+ use rand::seq::SliceRandom;
+ use rand::{Rng, SeedableRng};
+
+ let mut rng = StdRng::from_entropy();
+
+ for len in (2..21).chain(500..501) {
+ let mut orig = vec![0; len];
+
+ for &modulus in &[5, 10, 1000] {
+ for _ in 0..10 {
+ for i in 0..len {
+ orig[i] = rng.gen::<i32>() % modulus;
+ }
+
+ let v_sorted = {
+ let mut v = orig.clone();
+ v.sort();
+ v
+ };
+
+ // Sort in default order.
+ for pivot in 0..len {
+ let mut v = orig.clone();
+ v.select_nth_unstable(pivot);
+
+ assert_eq!(v_sorted[pivot], v[pivot]);
+ for i in 0..pivot {
+ for j in pivot..len {
+ assert!(v[i] <= v[j]);
+ }
+ }
+ }
+
+ // Sort in ascending order.
+ for pivot in 0..len {
+ let mut v = orig.clone();
+ let (left, pivot, right) = v.select_nth_unstable_by(pivot, |a, b| a.cmp(b));
+
+ assert_eq!(left.len() + right.len(), len - 1);
+
+ for l in left {
+ assert!(l <= pivot);
+ for r in right.iter_mut() {
+ assert!(l <= r);
+ assert!(pivot <= r);
+ }
+ }
+ }
+
+ // Sort in descending order.
+ let sort_descending_comparator = |a: &i32, b: &i32| b.cmp(a);
+ let v_sorted_descending = {
+ let mut v = orig.clone();
+ v.sort_by(sort_descending_comparator);
+ v
+ };
+
+ for pivot in 0..len {
+ let mut v = orig.clone();
+ v.select_nth_unstable_by(pivot, sort_descending_comparator);
+
+ assert_eq!(v_sorted_descending[pivot], v[pivot]);
+ for i in 0..pivot {
+ for j in pivot..len {
+ assert!(v[j] <= v[i]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Sort at index using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ let mut v = [0; 500];
+ for i in 0..v.len() {
+ v[i] = i as i32;
+ }
+
+ for pivot in 0..v.len() {
+ v.select_nth_unstable_by(pivot, |_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
+ v.sort();
+ for i in 0..v.len() {
+ assert_eq!(v[i], i as i32);
+ }
+ }
+
+ // Should not panic.
+ [(); 10].select_nth_unstable(0);
+ [(); 10].select_nth_unstable(5);
+ [(); 10].select_nth_unstable(9);
+ [(); 100].select_nth_unstable(0);
+ [(); 100].select_nth_unstable(50);
+ [(); 100].select_nth_unstable(99);
+
+ let mut v = [0xDEADBEEFu64];
+ v.select_nth_unstable(0);
+ assert!(v == [0xDEADBEEF]);
+}
+
+#[test]
+#[should_panic(expected = "index 0 greater than length of slice")]
+fn select_nth_unstable_zero_length() {
+ [0i32; 0].select_nth_unstable(0);
+}
+
+#[test]
+#[should_panic(expected = "index 20 greater than length of slice")]
+fn select_nth_unstable_past_length() {
+ [0i32; 10].select_nth_unstable(20);
+}
+
+pub mod memchr {
+ use core::slice::memchr::{memchr, memrchr};
+
+ // test fallback implementations on all platforms
+ #[test]
+ fn matches_one() {
+ assert_eq!(Some(0), memchr(b'a', b"a"));
+ }
+
+ #[test]
+ fn matches_begin() {
+ assert_eq!(Some(0), memchr(b'a', b"aaaa"));
+ }
+
+ #[test]
+ fn matches_end() {
+ assert_eq!(Some(4), memchr(b'z', b"aaaaz"));
+ }
+
+ #[test]
+ fn matches_nul() {
+ assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00"));
+ }
+
+ #[test]
+ fn matches_past_nul() {
+ assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z"));
+ }
+
+ #[test]
+ fn no_match_empty() {
+ assert_eq!(None, memchr(b'a', b""));
+ }
+
+ #[test]
+ fn no_match() {
+ assert_eq!(None, memchr(b'a', b"xyz"));
+ }
+
+ #[test]
+ fn matches_one_reversed() {
+ assert_eq!(Some(0), memrchr(b'a', b"a"));
+ }
+
+ #[test]
+ fn matches_begin_reversed() {
+ assert_eq!(Some(3), memrchr(b'a', b"aaaa"));
+ }
+
+ #[test]
+ fn matches_end_reversed() {
+ assert_eq!(Some(0), memrchr(b'z', b"zaaaa"));
+ }
+
+ #[test]
+ fn matches_nul_reversed() {
+ assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00"));
+ }
+
+ #[test]
+ fn matches_past_nul_reversed() {
+ assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa"));
+ }
+
+ #[test]
+ fn no_match_empty_reversed() {
+ assert_eq!(None, memrchr(b'a', b""));
+ }
+
+ #[test]
+ fn no_match_reversed() {
+ assert_eq!(None, memrchr(b'a', b"xyz"));
+ }
+
+ #[test]
+ fn each_alignment_reversed() {
+ let mut data = [1u8; 64];
+ let needle = 2;
+ let pos = 40;
+ data[pos] = needle;
+ for start in 0..16 {
+ assert_eq!(Some(pos - start), memrchr(needle, &data[start..]));
+ }
+ }
+}
+
+#[test]
+fn test_align_to_simple() {
+ let bytes = [1u8, 2, 3, 4, 5, 6, 7];
+ let (prefix, aligned, suffix) = unsafe { bytes.align_to::<u16>() };
+ assert_eq!(aligned.len(), 3);
+ assert!(prefix == [1] || suffix == [7]);
+ let expect1 = [1 << 8 | 2, 3 << 8 | 4, 5 << 8 | 6];
+ let expect2 = [1 | 2 << 8, 3 | 4 << 8, 5 | 6 << 8];
+ let expect3 = [2 << 8 | 3, 4 << 8 | 5, 6 << 8 | 7];
+ let expect4 = [2 | 3 << 8, 4 | 5 << 8, 6 | 7 << 8];
+ assert!(
+ aligned == expect1 || aligned == expect2 || aligned == expect3 || aligned == expect4,
+ "aligned={:?} expected={:?} || {:?} || {:?} || {:?}",
+ aligned,
+ expect1,
+ expect2,
+ expect3,
+ expect4
+ );
+}
+
+#[test]
+fn test_align_to_zst() {
+ let bytes = [1, 2, 3, 4, 5, 6, 7];
+ let (prefix, aligned, suffix) = unsafe { bytes.align_to::<()>() };
+ assert_eq!(aligned.len(), 0);
+ assert!(prefix == [1, 2, 3, 4, 5, 6, 7] || suffix == [1, 2, 3, 4, 5, 6, 7]);
+}
+
+#[test]
+fn test_align_to_non_trivial() {
+ #[repr(align(8))]
+ struct U64(u64, u64);
+ #[repr(align(8))]
+ struct U64U64U32(u64, u64, u32);
+ let data = [
+ U64(1, 2),
+ U64(3, 4),
+ U64(5, 6),
+ U64(7, 8),
+ U64(9, 10),
+ U64(11, 12),
+ U64(13, 14),
+ U64(15, 16),
+ ];
+ let (prefix, aligned, suffix) = unsafe { data.align_to::<U64U64U32>() };
+ assert_eq!(aligned.len(), 4);
+ assert_eq!(prefix.len() + suffix.len(), 2);
+}
+
+#[test]
+fn test_align_to_empty_mid() {
+ use core::mem;
+
+ // Make sure that we do not create empty unaligned slices for the mid part, even when the
+ // overall slice is too short to contain an aligned address.
+ let bytes = [1, 2, 3, 4, 5, 6, 7];
+ type Chunk = u32;
+ for offset in 0..4 {
+ let (_, mid, _) = unsafe { bytes[offset..offset + 1].align_to::<Chunk>() };
+ assert_eq!(mid.as_ptr() as usize % mem::align_of::<Chunk>(), 0);
+ }
+}
+
+#[test]
+fn test_align_to_mut_aliasing() {
+ let mut val = [1u8, 2, 3, 4, 5];
+ // `align_to_mut` used to create `mid` in a way that there was some intermediate
+ // incorrect aliasing, invalidating the resulting `mid` slice.
+ let (begin, mid, end) = unsafe { val.align_to_mut::<[u8; 2]>() };
+ assert!(begin.len() == 0);
+ assert!(end.len() == 1);
+ mid[0] = mid[1];
+ assert_eq!(val, [3, 4, 3, 4, 5])
+}
+
+#[test]
+fn test_slice_partition_dedup_by() {
+ let mut slice: [i32; 9] = [1, -1, 2, 3, 1, -5, 5, -2, 2];
+
+ let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.abs() == b.abs());
+
+ assert_eq!(dedup, [1, 2, 3, 1, -5, -2]);
+ assert_eq!(duplicates, [5, -1, 2]);
+}
+
+#[test]
+fn test_slice_partition_dedup_empty() {
+ let mut slice: [i32; 0] = [];
+
+ let (dedup, duplicates) = slice.partition_dedup();
+
+ assert_eq!(dedup, []);
+ assert_eq!(duplicates, []);
+}
+
+#[test]
+fn test_slice_partition_dedup_one() {
+ let mut slice = [12];
+
+ let (dedup, duplicates) = slice.partition_dedup();
+
+ assert_eq!(dedup, [12]);
+ assert_eq!(duplicates, []);
+}
+
+#[test]
+fn test_slice_partition_dedup_multiple_ident() {
+ let mut slice = [12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11];
+
+ let (dedup, duplicates) = slice.partition_dedup();
+
+ assert_eq!(dedup, [12, 11]);
+ assert_eq!(duplicates, [12, 12, 12, 12, 11, 11, 11, 11, 11]);
+}
+
+#[test]
+fn test_slice_partition_dedup_partialeq() {
+ #[derive(Debug)]
+ struct Foo(i32, i32);
+
+ impl PartialEq for Foo {
+ fn eq(&self, other: &Foo) -> bool {
+ self.0 == other.0
+ }
+ }
+
+ let mut slice = [Foo(0, 1), Foo(0, 5), Foo(1, 7), Foo(1, 9)];
+
+ let (dedup, duplicates) = slice.partition_dedup();
+
+ assert_eq!(dedup, [Foo(0, 1), Foo(1, 7)]);
+ assert_eq!(duplicates, [Foo(0, 5), Foo(1, 9)]);
+}
+
+#[test]
+fn test_copy_within() {
+ // Start to end, with a RangeTo.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(..3, 10);
+ assert_eq!(&bytes, b"Hello, WorHel");
+
+ // End to start, with a RangeFrom.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(10.., 0);
+ assert_eq!(&bytes, b"ld!lo, World!");
+
+ // Overlapping, with a RangeInclusive.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(0..=11, 1);
+ assert_eq!(&bytes, b"HHello, World");
+
+ // Whole slice, with a RangeFull.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(.., 0);
+ assert_eq!(&bytes, b"Hello, World!");
+
+ // Ensure that copying at the end of slice won't cause UB.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(13..13, 5);
+ assert_eq!(&bytes, b"Hello, World!");
+ bytes.copy_within(5..5, 13);
+ assert_eq!(&bytes, b"Hello, World!");
+}
+
+#[test]
+#[should_panic(expected = "range end index 14 out of range for slice of length 13")]
+fn test_copy_within_panics_src_too_long() {
+ let mut bytes = *b"Hello, World!";
+ // The length is only 13, so 14 is out of bounds.
+ bytes.copy_within(10..14, 0);
+}
+
+#[test]
+#[should_panic(expected = "dest is out of bounds")]
+fn test_copy_within_panics_dest_too_long() {
+ let mut bytes = *b"Hello, World!";
+ // The length is only 13, so a slice of length 4 starting at index 10 is out of bounds.
+ bytes.copy_within(0..4, 10);
+}
+
+#[test]
+#[should_panic(expected = "slice index starts at 2 but ends at 1")]
+fn test_copy_within_panics_src_inverted() {
+ let mut bytes = *b"Hello, World!";
+ // 2 is greater than 1, so this range is invalid.
+ bytes.copy_within(2..1, 0);
+}
+#[test]
+#[should_panic(expected = "attempted to index slice up to maximum usize")]
+fn test_copy_within_panics_src_out_of_bounds() {
+ let mut bytes = *b"Hello, World!";
+ // an inclusive range ending at usize::MAX would make src_end overflow
+ bytes.copy_within(usize::MAX..=usize::MAX, 0);
+}
+
+#[test]
+fn test_is_sorted() {
+ let empty: [i32; 0] = [];
+
+ assert!([1, 2, 2, 9].is_sorted());
+ assert!(![1, 3, 2].is_sorted());
+ assert!([0].is_sorted());
+ assert!(empty.is_sorted());
+ assert!(![0.0, 1.0, f32::NAN].is_sorted());
+ assert!([-2, -1, 0, 3].is_sorted());
+ assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
+ assert!(!["c", "bb", "aaa"].is_sorted());
+ assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
+}
+
+#[test]
+fn test_slice_run_destructors() {
+ // Make sure that destructors get run on slice literals
+ struct Foo<'a> {
+ x: &'a Cell<isize>,
+ }
+
+ impl<'a> Drop for Foo<'a> {
+ fn drop(&mut self) {
+ self.x.set(self.x.get() + 1);
+ }
+ }
+
+ fn foo(x: &Cell<isize>) -> Foo<'_> {
+ Foo { x }
+ }
+
+ let x = &Cell::new(0);
+
+ {
+ let l = &[foo(x)];
+ assert_eq!(l[0].x.get(), 0);
+ }
+
+ assert_eq!(x.get(), 1);
+}
+
+#[test]
+fn test_const_from_ref() {
+ const VALUE: &i32 = &1;
+ const SLICE: &[i32] = core::slice::from_ref(VALUE);
+
+ assert!(core::ptr::eq(VALUE, &SLICE[0]))
+}
+
+#[test]
+fn test_slice_fill_with_uninit() {
+ // This should not UB. See #87891
+ let mut a = [MaybeUninit::<u8>::uninit(); 10];
+ a.fill(MaybeUninit::uninit());
+}
+
+#[test]
+fn test_swap() {
+ let mut x = ["a", "b", "c", "d"];
+ x.swap(1, 3);
+ assert_eq!(x, ["a", "d", "c", "b"]);
+ x.swap(0, 3);
+ assert_eq!(x, ["b", "d", "c", "a"]);
+}
+
+mod swap_panics {
+ #[test]
+ #[should_panic(expected = "index out of bounds: the len is 4 but the index is 4")]
+ fn index_a_equals_len() {
+ let mut x = ["a", "b", "c", "d"];
+ x.swap(4, 2);
+ }
+
+ #[test]
+ #[should_panic(expected = "index out of bounds: the len is 4 but the index is 4")]
+ fn index_b_equals_len() {
+ let mut x = ["a", "b", "c", "d"];
+ x.swap(2, 4);
+ }
+
+ #[test]
+ #[should_panic(expected = "index out of bounds: the len is 4 but the index is 5")]
+ fn index_a_greater_than_len() {
+ let mut x = ["a", "b", "c", "d"];
+ x.swap(5, 2);
+ }
+
+ #[test]
+ #[should_panic(expected = "index out of bounds: the len is 4 but the index is 5")]
+ fn index_b_greater_than_len() {
+ let mut x = ["a", "b", "c", "d"];
+ x.swap(2, 5);
+ }
+}
+
+#[test]
+fn slice_split_array_mut() {
+ let v = &mut [1, 2, 3, 4, 5, 6][..];
+
+ {
+ let (left, right) = v.split_array_mut::<0>();
+ assert_eq!(left, &mut []);
+ assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+ }
+
+ {
+ let (left, right) = v.split_array_mut::<6>();
+ assert_eq!(left, &mut [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, []);
+ }
+}
+
+#[test]
+fn slice_rsplit_array_mut() {
+ let v = &mut [1, 2, 3, 4, 5, 6][..];
+
+ {
+ let (left, right) = v.rsplit_array_mut::<0>();
+ assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, &mut []);
+ }
+
+ {
+ let (left, right) = v.rsplit_array_mut::<6>();
+ assert_eq!(left, []);
+ assert_eq!(right, &mut [1, 2, 3, 4, 5, 6]);
+ }
+}
+
+#[test]
+fn split_as_slice() {
+ let arr = [1, 2, 3, 4, 5, 6];
+ let mut split = arr.split(|v| v % 2 == 0);
+ assert_eq!(split.as_slice(), &[1, 2, 3, 4, 5, 6]);
+ assert!(split.next().is_some());
+ assert_eq!(split.as_slice(), &[3, 4, 5, 6]);
+ assert!(split.next().is_some());
+ assert!(split.next().is_some());
+ assert_eq!(split.as_slice(), &[]);
+}
+
+#[should_panic]
+#[test]
+fn slice_split_array_ref_out_of_bounds() {
+ let v = &[1, 2, 3, 4, 5, 6][..];
+
+ let _ = v.split_array_ref::<7>();
+}
+
+#[should_panic]
+#[test]
+fn slice_split_array_mut_out_of_bounds() {
+ let v = &mut [1, 2, 3, 4, 5, 6][..];
+
+ let _ = v.split_array_mut::<7>();
+}
+
+#[should_panic]
+#[test]
+fn slice_rsplit_array_ref_out_of_bounds() {
+ let v = &[1, 2, 3, 4, 5, 6][..];
+
+ let _ = v.rsplit_array_ref::<7>();
+}
+
+#[should_panic]
+#[test]
+fn slice_rsplit_array_mut_out_of_bounds() {
+ let v = &mut [1, 2, 3, 4, 5, 6][..];
+
+ let _ = v.rsplit_array_mut::<7>();
+}
+
+macro_rules! take_tests {
+ (slice: &[], $($tts:tt)*) => {
+ take_tests!(ty: &[()], slice: &[], $($tts)*);
+ };
+ (slice: &mut [], $($tts:tt)*) => {
+ take_tests!(ty: &mut [()], slice: &mut [], $($tts)*);
+ };
+ (slice: &$slice:expr, $($tts:tt)*) => {
+ take_tests!(ty: &[_], slice: &$slice, $($tts)*);
+ };
+ (slice: &mut $slice:expr, $($tts:tt)*) => {
+ take_tests!(ty: &mut [_], slice: &mut $slice, $($tts)*);
+ };
+ (ty: $ty:ty, slice: $slice:expr, method: $method:ident, $(($test_name:ident, ($($args:expr),*), $output:expr, $remaining:expr),)*) => {
+ $(
+ #[test]
+ fn $test_name() {
+ let mut slice: $ty = $slice;
+ assert_eq!($output, slice.$method($($args)*));
+ let remaining: $ty = $remaining;
+ assert_eq!(remaining, slice);
+ }
+ )*
+ };
+}
+
+take_tests! {
+ slice: &[0, 1, 2, 3], method: take,
+ (take_in_bounds_range_to, (..1), Some(&[0] as _), &[1, 2, 3]),
+ (take_in_bounds_range_to_inclusive, (..=0), Some(&[0] as _), &[1, 2, 3]),
+ (take_in_bounds_range_from, (2..), Some(&[2, 3] as _), &[0, 1]),
+ (take_oob_range_to, (..5), None, &[0, 1, 2, 3]),
+ (take_oob_range_to_inclusive, (..=4), None, &[0, 1, 2, 3]),
+ (take_oob_range_from, (5..), None, &[0, 1, 2, 3]),
+}
+
+take_tests! {
+ slice: &mut [0, 1, 2, 3], method: take_mut,
+ (take_mut_in_bounds_range_to, (..1), Some(&mut [0] as _), &mut [1, 2, 3]),
+ (take_mut_in_bounds_range_to_inclusive, (..=0), Some(&mut [0] as _), &mut [1, 2, 3]),
+ (take_mut_in_bounds_range_from, (2..), Some(&mut [2, 3] as _), &mut [0, 1]),
+ (take_mut_oob_range_to, (..5), None, &mut [0, 1, 2, 3]),
+ (take_mut_oob_range_to_inclusive, (..=4), None, &mut [0, 1, 2, 3]),
+ (take_mut_oob_range_from, (5..), None, &mut [0, 1, 2, 3]),
+}
+
+take_tests! {
+ slice: &[1, 2], method: take_first,
+ (take_first_nonempty, (), Some(&1), &[2]),
+}
+
+take_tests! {
+ slice: &mut [1, 2], method: take_first_mut,
+ (take_first_mut_nonempty, (), Some(&mut 1), &mut [2]),
+}
+
+take_tests! {
+ slice: &[1, 2], method: take_last,
+ (take_last_nonempty, (), Some(&2), &[1]),
+}
+
+take_tests! {
+ slice: &mut [1, 2], method: take_last_mut,
+ (take_last_mut_nonempty, (), Some(&mut 2), &mut [1]),
+}
+
+take_tests! {
+ slice: &[], method: take_first,
+ (take_first_empty, (), None, &[]),
+}
+
+take_tests! {
+ slice: &mut [], method: take_first_mut,
+ (take_first_mut_empty, (), None, &mut []),
+}
+
+take_tests! {
+ slice: &[], method: take_last,
+ (take_last_empty, (), None, &[]),
+}
+
+take_tests! {
+ slice: &mut [], method: take_last_mut,
+ (take_last_mut_empty, (), None, &mut []),
+}
+
+#[cfg(not(miri))] // unused in Miri
+const EMPTY_MAX: &'static [()] = &[(); usize::MAX];
+
+// can't be a constant due to const mutability rules
+#[cfg(not(miri))] // unused in Miri
+macro_rules! empty_max_mut {
+ () => {
+ &mut [(); usize::MAX] as _
+ };
+}
+
+#[cfg(not(miri))] // Comparing usize::MAX many elements takes forever in Miri (and in rustc without optimizations)
+take_tests! {
+ slice: &[(); usize::MAX], method: take,
+ (take_in_bounds_max_range_to, (..usize::MAX), Some(EMPTY_MAX), &[(); 0]),
+ (take_oob_max_range_to_inclusive, (..=usize::MAX), None, EMPTY_MAX),
+ (take_in_bounds_max_range_from, (usize::MAX..), Some(&[] as _), EMPTY_MAX),
+}
+
+#[cfg(not(miri))] // Comparing usize::MAX many elements takes forever in Miri (and in rustc without optimizations)
+take_tests! {
+ slice: &mut [(); usize::MAX], method: take_mut,
+ (take_mut_in_bounds_max_range_to, (..usize::MAX), Some(empty_max_mut!()), &mut [(); 0]),
+ (take_mut_oob_max_range_to_inclusive, (..=usize::MAX), None, empty_max_mut!()),
+ (take_mut_in_bounds_max_range_from, (usize::MAX..), Some(&mut [] as _), empty_max_mut!()),
+}
+
+#[test]
+fn test_slice_from_ptr_range() {
+ let arr = ["foo".to_owned(), "bar".to_owned()];
+ let range = arr.as_ptr_range();
+ unsafe {
+ assert_eq!(slice::from_ptr_range(range), &arr);
+ }
+
+ let mut arr = [1, 2, 3];
+ let range = arr.as_mut_ptr_range();
+ unsafe {
+ assert_eq!(slice::from_mut_ptr_range(range), &mut [1, 2, 3]);
+ }
+
+ let arr: [Vec<String>; 0] = [];
+ let range = arr.as_ptr_range();
+ unsafe {
+ assert_eq!(slice::from_ptr_range(range), &arr);
+ }
+}
+
+#[test]
+#[should_panic = "slice len overflow"]
+fn test_flatten_size_overflow() {
+ let x = &[[(); usize::MAX]; 2][..];
+ let _ = x.flatten();
+}
+
+#[test]
+#[should_panic = "slice len overflow"]
+fn test_flatten_mut_size_overflow() {
+ let x = &mut [[(); usize::MAX]; 2][..];
+ let _ = x.flatten_mut();
+}
diff --git a/library/core/tests/str.rs b/library/core/tests/str.rs
new file mode 100644
index 000000000..ed939ca71
--- /dev/null
+++ b/library/core/tests/str.rs
@@ -0,0 +1 @@
+// All `str` tests live in liballoc/tests
diff --git a/library/core/tests/str_lossy.rs b/library/core/tests/str_lossy.rs
new file mode 100644
index 000000000..d4b47a470
--- /dev/null
+++ b/library/core/tests/str_lossy.rs
@@ -0,0 +1,85 @@
+use core::str::lossy::*;
+
+#[test]
+fn chunks() {
+ let mut iter = Utf8Lossy::from_bytes(b"hello").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "hello", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes("ศไทย中华Việt Nam".as_bytes()).chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "ศไทย中华Việt Nam", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"Hello\xC2 There\xFF Goodbye").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "Hello", broken: b"\xC2" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: " There", broken: b"\xFF" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: " Goodbye", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "Hello", broken: b"\xC0" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: " There", broken: b"\xE6\x83" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: " Goodbye", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"\xF5foo\xF5\x80bar").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF5" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF5" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"\xF1foo\xF1\x80bar\xF1\x80\x80baz").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF1" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF1\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"\xF1\x80\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "baz", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"\xF4foo\xF4\x80bar\xF4\xBFbaz").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF4" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF4\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"\xF4" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "baz", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF0" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo\u{10000}bar", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ // surrogates
+ let mut iter = Utf8Lossy::from_bytes(b"\xED\xA0\x80foo\xED\xBF\xBFbar").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xED" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xA0" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xED" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+}
+
+#[test]
+fn display() {
+ assert_eq!(
+ "Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye",
+ &Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string()
+ );
+}
+
+#[test]
+fn debug() {
+ assert_eq!(
+ "\"Hello\\xc0\\x80 There\\xe6\\x83 Goodbye\\u{10d4ea}\"",
+ &format!(
+ "{:?}",
+ Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye\xf4\x8d\x93\xaa")
+ )
+ );
+}
diff --git a/library/core/tests/task.rs b/library/core/tests/task.rs
new file mode 100644
index 000000000..d71fef9e5
--- /dev/null
+++ b/library/core/tests/task.rs
@@ -0,0 +1,14 @@
+use core::task::Poll;
+
+#[test]
+fn poll_const() {
+ // test that the methods of `Poll` are usable in a const context
+
+ const POLL: Poll<usize> = Poll::Pending;
+
+ const IS_READY: bool = POLL.is_ready();
+ assert!(!IS_READY);
+
+ const IS_PENDING: bool = POLL.is_pending();
+ assert!(IS_PENDING);
+}
diff --git a/library/core/tests/time.rs b/library/core/tests/time.rs
new file mode 100644
index 000000000..fe2d2f241
--- /dev/null
+++ b/library/core/tests/time.rs
@@ -0,0 +1,447 @@
+use core::time::Duration;
+
+#[test]
+fn creation() {
+ assert_ne!(Duration::from_secs(1), Duration::from_secs(0));
+ assert_eq!(Duration::from_secs(1) + Duration::from_secs(2), Duration::from_secs(3));
+ assert_eq!(
+ Duration::from_millis(10) + Duration::from_secs(4),
+ Duration::new(4, 10 * 1_000_000)
+ );
+ assert_eq!(Duration::from_millis(4000), Duration::new(4, 0));
+}
+
+#[test]
+#[should_panic]
+fn new_overflow() {
+ let _ = Duration::new(u64::MAX, 1_000_000_000);
+}
+
+#[test]
+fn secs() {
+ assert_eq!(Duration::new(0, 0).as_secs(), 0);
+ assert_eq!(Duration::new(0, 500_000_005).as_secs(), 0);
+ assert_eq!(Duration::new(0, 1_050_000_001).as_secs(), 1);
+ assert_eq!(Duration::from_secs(1).as_secs(), 1);
+ assert_eq!(Duration::from_millis(999).as_secs(), 0);
+ assert_eq!(Duration::from_millis(1001).as_secs(), 1);
+ assert_eq!(Duration::from_micros(999_999).as_secs(), 0);
+ assert_eq!(Duration::from_micros(1_000_001).as_secs(), 1);
+ assert_eq!(Duration::from_nanos(999_999_999).as_secs(), 0);
+ assert_eq!(Duration::from_nanos(1_000_000_001).as_secs(), 1);
+}
+
+#[test]
+fn millis() {
+ assert_eq!(Duration::new(0, 0).subsec_millis(), 0);
+ assert_eq!(Duration::new(0, 500_000_005).subsec_millis(), 500);
+ assert_eq!(Duration::new(0, 1_050_000_001).subsec_millis(), 50);
+ assert_eq!(Duration::from_secs(1).subsec_millis(), 0);
+ assert_eq!(Duration::from_millis(999).subsec_millis(), 999);
+ assert_eq!(Duration::from_millis(1001).subsec_millis(), 1);
+ assert_eq!(Duration::from_micros(999_999).subsec_millis(), 999);
+ assert_eq!(Duration::from_micros(1_001_000).subsec_millis(), 1);
+ assert_eq!(Duration::from_nanos(999_999_999).subsec_millis(), 999);
+ assert_eq!(Duration::from_nanos(1_001_000_000).subsec_millis(), 1);
+}
+
+#[test]
+fn micros() {
+ assert_eq!(Duration::new(0, 0).subsec_micros(), 0);
+ assert_eq!(Duration::new(0, 500_000_005).subsec_micros(), 500_000);
+ assert_eq!(Duration::new(0, 1_050_000_001).subsec_micros(), 50_000);
+ assert_eq!(Duration::from_secs(1).subsec_micros(), 0);
+ assert_eq!(Duration::from_millis(999).subsec_micros(), 999_000);
+ assert_eq!(Duration::from_millis(1001).subsec_micros(), 1_000);
+ assert_eq!(Duration::from_micros(999_999).subsec_micros(), 999_999);
+ assert_eq!(Duration::from_micros(1_000_001).subsec_micros(), 1);
+ assert_eq!(Duration::from_nanos(999_999_999).subsec_micros(), 999_999);
+ assert_eq!(Duration::from_nanos(1_000_001_000).subsec_micros(), 1);
+}
+
+#[test]
+fn nanos() {
+ assert_eq!(Duration::new(0, 0).subsec_nanos(), 0);
+ assert_eq!(Duration::new(0, 5).subsec_nanos(), 5);
+ assert_eq!(Duration::new(0, 1_000_000_001).subsec_nanos(), 1);
+ assert_eq!(Duration::from_secs(1).subsec_nanos(), 0);
+ assert_eq!(Duration::from_millis(999).subsec_nanos(), 999_000_000);
+ assert_eq!(Duration::from_millis(1001).subsec_nanos(), 1_000_000);
+ assert_eq!(Duration::from_micros(999_999).subsec_nanos(), 999_999_000);
+ assert_eq!(Duration::from_micros(1_000_001).subsec_nanos(), 1000);
+ assert_eq!(Duration::from_nanos(999_999_999).subsec_nanos(), 999_999_999);
+ assert_eq!(Duration::from_nanos(1_000_000_001).subsec_nanos(), 1);
+}
+
+#[test]
+fn add() {
+ assert_eq!(Duration::new(0, 0) + Duration::new(0, 1), Duration::new(0, 1));
+ assert_eq!(Duration::new(0, 500_000_000) + Duration::new(0, 500_000_001), Duration::new(1, 1));
+}
+
+#[test]
+fn checked_add() {
+ assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)), Some(Duration::new(0, 1)));
+ assert_eq!(
+ Duration::new(0, 500_000_000).checked_add(Duration::new(0, 500_000_001)),
+ Some(Duration::new(1, 1))
+ );
+ assert_eq!(Duration::new(1, 0).checked_add(Duration::new(u64::MAX, 0)), None);
+}
+
+#[test]
+fn saturating_add() {
+ assert_eq!(Duration::new(0, 0).saturating_add(Duration::new(0, 1)), Duration::new(0, 1));
+ assert_eq!(
+ Duration::new(0, 500_000_000).saturating_add(Duration::new(0, 500_000_001)),
+ Duration::new(1, 1)
+ );
+ assert_eq!(Duration::new(1, 0).saturating_add(Duration::new(u64::MAX, 0)), Duration::MAX);
+}
+
+#[test]
+fn sub() {
+ assert_eq!(Duration::new(0, 1) - Duration::new(0, 0), Duration::new(0, 1));
+ assert_eq!(Duration::new(0, 500_000_001) - Duration::new(0, 500_000_000), Duration::new(0, 1));
+ assert_eq!(Duration::new(1, 0) - Duration::new(0, 1), Duration::new(0, 999_999_999));
+}
+
+#[test]
+fn checked_sub() {
+ assert_eq!(Duration::NANOSECOND.checked_sub(Duration::ZERO), Some(Duration::NANOSECOND));
+ assert_eq!(
+ Duration::SECOND.checked_sub(Duration::NANOSECOND),
+ Some(Duration::new(0, 999_999_999))
+ );
+ assert_eq!(Duration::ZERO.checked_sub(Duration::NANOSECOND), None);
+ assert_eq!(Duration::ZERO.checked_sub(Duration::SECOND), None);
+}
+
+#[test]
+fn saturating_sub() {
+ assert_eq!(Duration::NANOSECOND.saturating_sub(Duration::ZERO), Duration::NANOSECOND);
+ assert_eq!(
+ Duration::SECOND.saturating_sub(Duration::NANOSECOND),
+ Duration::new(0, 999_999_999)
+ );
+ assert_eq!(Duration::ZERO.saturating_sub(Duration::NANOSECOND), Duration::ZERO);
+ assert_eq!(Duration::ZERO.saturating_sub(Duration::SECOND), Duration::ZERO);
+}
+
+#[test]
+#[should_panic]
+fn sub_bad1() {
+ let _ = Duration::new(0, 0) - Duration::new(0, 1);
+}
+
+#[test]
+#[should_panic]
+fn sub_bad2() {
+ let _ = Duration::new(0, 0) - Duration::new(1, 0);
+}
+
+#[test]
+fn mul() {
+ assert_eq!(Duration::new(0, 1) * 2, Duration::new(0, 2));
+ assert_eq!(Duration::new(1, 1) * 3, Duration::new(3, 3));
+ assert_eq!(Duration::new(0, 500_000_001) * 4, Duration::new(2, 4));
+ assert_eq!(Duration::new(0, 500_000_001) * 4000, Duration::new(2000, 4000));
+}
+
+#[test]
+fn checked_mul() {
+ assert_eq!(Duration::new(0, 1).checked_mul(2), Some(Duration::new(0, 2)));
+ assert_eq!(Duration::new(1, 1).checked_mul(3), Some(Duration::new(3, 3)));
+ assert_eq!(Duration::new(0, 500_000_001).checked_mul(4), Some(Duration::new(2, 4)));
+ assert_eq!(Duration::new(0, 500_000_001).checked_mul(4000), Some(Duration::new(2000, 4000)));
+ assert_eq!(Duration::new(u64::MAX - 1, 0).checked_mul(2), None);
+}
+
+#[test]
+fn saturating_mul() {
+ assert_eq!(Duration::new(0, 1).saturating_mul(2), Duration::new(0, 2));
+ assert_eq!(Duration::new(1, 1).saturating_mul(3), Duration::new(3, 3));
+ assert_eq!(Duration::new(0, 500_000_001).saturating_mul(4), Duration::new(2, 4));
+ assert_eq!(Duration::new(0, 500_000_001).saturating_mul(4000), Duration::new(2000, 4000));
+ assert_eq!(Duration::new(u64::MAX - 1, 0).saturating_mul(2), Duration::MAX);
+}
+
+#[test]
+fn div() {
+ assert_eq!(Duration::new(0, 1) / 2, Duration::new(0, 0));
+ assert_eq!(Duration::new(1, 1) / 3, Duration::new(0, 333_333_333));
+ assert_eq!(Duration::new(99, 999_999_000) / 100, Duration::new(0, 999_999_990));
+}
+
+#[test]
+fn checked_div() {
+ assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
+ assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
+ assert_eq!(Duration::new(2, 0).checked_div(0), None);
+}
+
+#[test]
+fn correct_sum() {
+ let durations = [
+ Duration::new(1, 999_999_999),
+ Duration::new(2, 999_999_999),
+ Duration::new(0, 999_999_999),
+ Duration::new(0, 999_999_999),
+ Duration::new(0, 999_999_999),
+ Duration::new(5, 0),
+ ];
+ let sum = durations.iter().sum::<Duration>();
+ assert_eq!(sum, Duration::new(1 + 2 + 5 + 4, 1_000_000_000 - 5));
+}
+
+#[test]
+fn debug_formatting_extreme_values() {
+ assert_eq!(
+ format!("{:?}", Duration::new(18_446_744_073_709_551_615, 123_456_789)),
+ "18446744073709551615.123456789s"
+ );
+}
+
+#[test]
+fn debug_formatting_secs() {
+ assert_eq!(format!("{:?}", Duration::new(7, 000_000_000)), "7s");
+ assert_eq!(format!("{:?}", Duration::new(7, 100_000_000)), "7.1s");
+ assert_eq!(format!("{:?}", Duration::new(7, 000_010_000)), "7.00001s");
+ assert_eq!(format!("{:?}", Duration::new(7, 000_000_001)), "7.000000001s");
+ assert_eq!(format!("{:?}", Duration::new(7, 123_456_789)), "7.123456789s");
+
+ assert_eq!(format!("{:?}", Duration::new(88, 000_000_000)), "88s");
+ assert_eq!(format!("{:?}", Duration::new(88, 100_000_000)), "88.1s");
+ assert_eq!(format!("{:?}", Duration::new(88, 000_010_000)), "88.00001s");
+ assert_eq!(format!("{:?}", Duration::new(88, 000_000_001)), "88.000000001s");
+ assert_eq!(format!("{:?}", Duration::new(88, 123_456_789)), "88.123456789s");
+
+ assert_eq!(format!("{:?}", Duration::new(999, 000_000_000)), "999s");
+ assert_eq!(format!("{:?}", Duration::new(999, 100_000_000)), "999.1s");
+ assert_eq!(format!("{:?}", Duration::new(999, 000_010_000)), "999.00001s");
+ assert_eq!(format!("{:?}", Duration::new(999, 000_000_001)), "999.000000001s");
+ assert_eq!(format!("{:?}", Duration::new(999, 123_456_789)), "999.123456789s");
+}
+
+#[test]
+fn debug_formatting_millis() {
+ assert_eq!(format!("{:?}", Duration::new(0, 7_000_000)), "7ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_100_000)), "7.1ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_000_001)), "7.000001ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_123_456)), "7.123456ms");
+
+ assert_eq!(format!("{:?}", Duration::new(0, 88_000_000)), "88ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_100_000)), "88.1ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_000_001)), "88.000001ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_123_456)), "88.123456ms");
+
+ assert_eq!(format!("{:?}", Duration::new(0, 999_000_000)), "999ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_100_000)), "999.1ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_000_001)), "999.000001ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_123_456)), "999.123456ms");
+}
+
+#[test]
+fn debug_formatting_micros() {
+ assert_eq!(format!("{:?}", Duration::new(0, 7_000)), "7µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_100)), "7.1µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_001)), "7.001µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_123)), "7.123µs");
+
+ assert_eq!(format!("{:?}", Duration::new(0, 88_000)), "88µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_100)), "88.1µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_001)), "88.001µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_123)), "88.123µs");
+
+ assert_eq!(format!("{:?}", Duration::new(0, 999_000)), "999µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_100)), "999.1µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_001)), "999.001µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_123)), "999.123µs");
+}
+
+#[test]
+fn debug_formatting_nanos() {
+ assert_eq!(format!("{:?}", Duration::new(0, 0)), "0ns");
+ assert_eq!(format!("{:?}", Duration::new(0, 1)), "1ns");
+ assert_eq!(format!("{:?}", Duration::new(0, 88)), "88ns");
+ assert_eq!(format!("{:?}", Duration::new(0, 999)), "999ns");
+}
+
+#[test]
+fn debug_formatting_precision_zero() {
+ assert_eq!(format!("{:.0?}", Duration::new(0, 0)), "0ns");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 123)), "123ns");
+
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_001)), "1µs");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_499)), "1µs");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_500)), "2µs");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_999)), "2µs");
+
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_000_001)), "1ms");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_499_999)), "1ms");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_500_000)), "2ms");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_999_999)), "2ms");
+
+ assert_eq!(format!("{:.0?}", Duration::new(1, 000_000_001)), "1s");
+ assert_eq!(format!("{:.0?}", Duration::new(1, 499_999_999)), "1s");
+ assert_eq!(format!("{:.0?}", Duration::new(1, 500_000_000)), "2s");
+ assert_eq!(format!("{:.0?}", Duration::new(1, 999_999_999)), "2s");
+}
+
+#[test]
+fn debug_formatting_precision_two() {
+ assert_eq!(format!("{:.2?}", Duration::new(0, 0)), "0.00ns");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 123)), "123.00ns");
+
+ assert_eq!(format!("{:.2?}", Duration::new(0, 1_000)), "1.00µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 7_001)), "7.00µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 7_100)), "7.10µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 7_109)), "7.11µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 7_199)), "7.20µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 1_999)), "2.00µs");
+
+ assert_eq!(format!("{:.2?}", Duration::new(0, 1_000_000)), "1.00ms");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 3_001_000)), "3.00ms");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 3_100_000)), "3.10ms");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 1_999_999)), "2.00ms");
+
+ assert_eq!(format!("{:.2?}", Duration::new(1, 000_000_000)), "1.00s");
+ assert_eq!(format!("{:.2?}", Duration::new(4, 001_000_000)), "4.00s");
+ assert_eq!(format!("{:.2?}", Duration::new(2, 100_000_000)), "2.10s");
+ assert_eq!(format!("{:.2?}", Duration::new(2, 104_990_000)), "2.10s");
+ assert_eq!(format!("{:.2?}", Duration::new(2, 105_000_000)), "2.11s");
+ assert_eq!(format!("{:.2?}", Duration::new(8, 999_999_999)), "9.00s");
+}
+
+#[test]
+fn debug_formatting_padding() {
+ assert_eq!("0ns ", format!("{:<9?}", Duration::new(0, 0)));
+ assert_eq!(" 0ns", format!("{:>9?}", Duration::new(0, 0)));
+ assert_eq!(" 0ns ", format!("{:^9?}", Duration::new(0, 0)));
+ assert_eq!("123ns ", format!("{:<9.0?}", Duration::new(0, 123)));
+ assert_eq!(" 123ns", format!("{:>9.0?}", Duration::new(0, 123)));
+ assert_eq!(" 123ns ", format!("{:^9.0?}", Duration::new(0, 123)));
+ assert_eq!("123.0ns ", format!("{:<9.1?}", Duration::new(0, 123)));
+ assert_eq!(" 123.0ns", format!("{:>9.1?}", Duration::new(0, 123)));
+ assert_eq!(" 123.0ns ", format!("{:^9.1?}", Duration::new(0, 123)));
+ assert_eq!("7.1µs ", format!("{:<9?}", Duration::new(0, 7_100)));
+ assert_eq!(" 7.1µs", format!("{:>9?}", Duration::new(0, 7_100)));
+ assert_eq!(" 7.1µs ", format!("{:^9?}", Duration::new(0, 7_100)));
+ assert_eq!("999.123456ms", format!("{:<9?}", Duration::new(0, 999_123_456)));
+ assert_eq!("999.123456ms", format!("{:>9?}", Duration::new(0, 999_123_456)));
+ assert_eq!("999.123456ms", format!("{:^9?}", Duration::new(0, 999_123_456)));
+ assert_eq!("5s ", format!("{:<9?}", Duration::new(5, 0)));
+ assert_eq!(" 5s", format!("{:>9?}", Duration::new(5, 0)));
+ assert_eq!(" 5s ", format!("{:^9?}", Duration::new(5, 0)));
+ assert_eq!("5.000000000000s", format!("{:<9.12?}", Duration::new(5, 0)));
+ assert_eq!("5.000000000000s", format!("{:>9.12?}", Duration::new(5, 0)));
+ assert_eq!("5.000000000000s", format!("{:^9.12?}", Duration::new(5, 0)));
+
+ // default alignment is left:
+ assert_eq!("5s ", format!("{:9?}", Duration::new(5, 0)));
+}
+
+#[test]
+fn debug_formatting_precision_high() {
+ assert_eq!(format!("{:.5?}", Duration::new(0, 23_678)), "23.67800µs");
+
+ assert_eq!(format!("{:.9?}", Duration::new(1, 000_000_000)), "1.000000000s");
+ assert_eq!(format!("{:.10?}", Duration::new(4, 001_000_000)), "4.0010000000s");
+ assert_eq!(format!("{:.20?}", Duration::new(4, 001_000_000)), "4.00100000000000000000s");
+}
+
+#[test]
+fn duration_const() {
+ // test that the methods of `Duration` are usable in a const context
+
+ const DURATION: Duration = Duration::new(0, 123_456_789);
+
+ const SUB_SEC_MILLIS: u32 = DURATION.subsec_millis();
+ assert_eq!(SUB_SEC_MILLIS, 123);
+
+ const SUB_SEC_MICROS: u32 = DURATION.subsec_micros();
+ assert_eq!(SUB_SEC_MICROS, 123_456);
+
+ const SUB_SEC_NANOS: u32 = DURATION.subsec_nanos();
+ assert_eq!(SUB_SEC_NANOS, 123_456_789);
+
+ const IS_ZERO: bool = Duration::ZERO.is_zero();
+ assert!(IS_ZERO);
+
+ const SECONDS: u64 = Duration::SECOND.as_secs();
+ assert_eq!(SECONDS, 1);
+
+ const FROM_SECONDS: Duration = Duration::from_secs(1);
+ assert_eq!(FROM_SECONDS, Duration::SECOND);
+
+ const SECONDS_F32: f32 = Duration::SECOND.as_secs_f32();
+ assert_eq!(SECONDS_F32, 1.0);
+
+ const FROM_SECONDS_F32: Duration = Duration::from_secs_f32(1.0);
+ assert_eq!(FROM_SECONDS_F32, Duration::SECOND);
+
+ const SECONDS_F64: f64 = Duration::SECOND.as_secs_f64();
+ assert_eq!(SECONDS_F64, 1.0);
+
+ const FROM_SECONDS_F64: Duration = Duration::from_secs_f64(1.0);
+ assert_eq!(FROM_SECONDS_F64, Duration::SECOND);
+
+ const MILLIS: u128 = Duration::SECOND.as_millis();
+ assert_eq!(MILLIS, 1_000);
+
+ const FROM_MILLIS: Duration = Duration::from_millis(1_000);
+ assert_eq!(FROM_MILLIS, Duration::SECOND);
+
+ const MICROS: u128 = Duration::SECOND.as_micros();
+ assert_eq!(MICROS, 1_000_000);
+
+ const FROM_MICROS: Duration = Duration::from_micros(1_000_000);
+ assert_eq!(FROM_MICROS, Duration::SECOND);
+
+ const NANOS: u128 = Duration::SECOND.as_nanos();
+ assert_eq!(NANOS, 1_000_000_000);
+
+ const FROM_NANOS: Duration = Duration::from_nanos(1_000_000_000);
+ assert_eq!(FROM_NANOS, Duration::SECOND);
+
+ const MAX: Duration = Duration::new(u64::MAX, 999_999_999);
+
+ const CHECKED_ADD: Option<Duration> = MAX.checked_add(Duration::SECOND);
+ assert_eq!(CHECKED_ADD, None);
+
+ const CHECKED_SUB: Option<Duration> = Duration::ZERO.checked_sub(Duration::SECOND);
+ assert_eq!(CHECKED_SUB, None);
+
+ const CHECKED_MUL: Option<Duration> = Duration::SECOND.checked_mul(1);
+ assert_eq!(CHECKED_MUL, Some(Duration::SECOND));
+
+ const MUL_F32: Duration = Duration::SECOND.mul_f32(1.0);
+ assert_eq!(MUL_F32, Duration::SECOND);
+
+ const MUL_F64: Duration = Duration::SECOND.mul_f64(1.0);
+ assert_eq!(MUL_F64, Duration::SECOND);
+
+ const CHECKED_DIV: Option<Duration> = Duration::SECOND.checked_div(1);
+ assert_eq!(CHECKED_DIV, Some(Duration::SECOND));
+
+ const DIV_F32: Duration = Duration::SECOND.div_f32(1.0);
+ assert_eq!(DIV_F32, Duration::SECOND);
+
+ const DIV_F64: Duration = Duration::SECOND.div_f64(1.0);
+ assert_eq!(DIV_F64, Duration::SECOND);
+
+ const DIV_DURATION_F32: f32 = Duration::SECOND.div_duration_f32(Duration::SECOND);
+ assert_eq!(DIV_DURATION_F32, 1.0);
+
+ const DIV_DURATION_F64: f64 = Duration::SECOND.div_duration_f64(Duration::SECOND);
+ assert_eq!(DIV_DURATION_F64, 1.0);
+
+ const SATURATING_ADD: Duration = MAX.saturating_add(Duration::SECOND);
+ assert_eq!(SATURATING_ADD, MAX);
+
+ const SATURATING_SUB: Duration = Duration::ZERO.saturating_sub(Duration::SECOND);
+ assert_eq!(SATURATING_SUB, Duration::ZERO);
+
+ const SATURATING_MUL: Duration = MAX.saturating_mul(2);
+ assert_eq!(SATURATING_MUL, MAX);
+}
diff --git a/library/core/tests/tuple.rs b/library/core/tests/tuple.rs
new file mode 100644
index 000000000..ea1e28142
--- /dev/null
+++ b/library/core/tests/tuple.rs
@@ -0,0 +1,61 @@
+use std::cmp::Ordering::{Equal, Greater, Less};
+
+#[test]
+fn test_clone() {
+ let a = (1, "2");
+ let b = a.clone();
+ assert_eq!(a, b);
+}
+
+#[test]
+fn test_partial_eq() {
+ let (small, big) = ((1, 2, 3), (3, 2, 1));
+ assert_eq!(small, small);
+ assert_eq!(big, big);
+ assert_ne!(small, big);
+ assert_ne!(big, small);
+}
+
+#[test]
+fn test_partial_ord() {
+ let (small, big) = ((1, 2, 3), (3, 2, 1));
+
+ assert!(small < big);
+ assert!(!(small < small));
+ assert!(!(big < small));
+ assert!(!(big < big));
+
+ assert!(small <= small);
+ assert!(big <= big);
+
+ assert!(big > small);
+ assert!(small >= small);
+ assert!(big >= small);
+ assert!(big >= big);
+
+ assert!(!((1.0f64, 2.0f64) < (f64::NAN, 3.0)));
+ assert!(!((1.0f64, 2.0f64) <= (f64::NAN, 3.0)));
+ assert!(!((1.0f64, 2.0f64) > (f64::NAN, 3.0)));
+ assert!(!((1.0f64, 2.0f64) >= (f64::NAN, 3.0)));
+ assert!(((1.0f64, 2.0f64) < (2.0, f64::NAN)));
+ assert!(!((2.0f64, 2.0f64) < (2.0, f64::NAN)));
+}
+
+#[test]
+fn test_ord() {
+ let (small, big) = ((1, 2, 3), (3, 2, 1));
+ assert_eq!(small.cmp(&small), Equal);
+ assert_eq!(big.cmp(&big), Equal);
+ assert_eq!(small.cmp(&big), Less);
+ assert_eq!(big.cmp(&small), Greater);
+}
+
+#[test]
+fn test_show() {
+ let s = format!("{:?}", (1,));
+ assert_eq!(s, "(1,)");
+ let s = format!("{:?}", (1, true));
+ assert_eq!(s, "(1, true)");
+ let s = format!("{:?}", (1, "hi", true));
+ assert_eq!(s, "(1, \"hi\", true)");
+}
diff --git a/library/core/tests/unicode.rs b/library/core/tests/unicode.rs
new file mode 100644
index 000000000..bbace0ef6
--- /dev/null
+++ b/library/core/tests/unicode.rs
@@ -0,0 +1,5 @@
+#[test]
+pub fn version() {
+ let (major, _minor, _update) = core::char::UNICODE_VERSION;
+ assert!(major >= 10);
+}
diff --git a/library/core/tests/waker.rs b/library/core/tests/waker.rs
new file mode 100644
index 000000000..38a3a0ada
--- /dev/null
+++ b/library/core/tests/waker.rs
@@ -0,0 +1,22 @@
+use std::ptr;
+use std::task::{RawWaker, RawWakerVTable, Waker};
+
+#[test]
+fn test_waker_getters() {
+ let raw_waker = RawWaker::new(ptr::invalid_mut(42usize), &WAKER_VTABLE);
+ assert_eq!(raw_waker.data() as usize, 42);
+ assert!(ptr::eq(raw_waker.vtable(), &WAKER_VTABLE));
+
+ let waker = unsafe { Waker::from_raw(raw_waker) };
+ let waker2 = waker.clone();
+ let raw_waker2 = waker2.as_raw();
+ assert_eq!(raw_waker2.data() as usize, 43);
+ assert!(ptr::eq(raw_waker2.vtable(), &WAKER_VTABLE));
+}
+
+static WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
+ |data| RawWaker::new(ptr::invalid_mut(data as usize + 1), &WAKER_VTABLE),
+ |_| {},
+ |_| {},
+ |_| {},
+);